code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gym
import numpy as np
import tensorflow as tf
class TFBatchEnv(object):
def __init__(self, envs, blocking):
self._batch_env = PyBatchEnv(envs, blocking, flatten=True)
spaces = self._batch_env.observation_space.spaces
self._dtypes = [self._parse_dtype(spaces[key]) for key in self._keys[:-2]]
self._dtypes += [tf.float32, tf.bool] # Reward and done flag.
self._shapes = [self._parse_shape(spaces[key]) for key in self._keys[:-2]]
self._shapes += [(), ()] # Reward and done flag.
def __getattr__(self, name):
return getattr(self._batch_env, name)
def __len__(self):
return len(self._batch_env)
def __getitem__(self, index):
return self._batch_env[index]
def step(self, action):
output = tf.py_func(
self._batch_env.step, [action], self._dtypes, name='step')
return self._process_output(output, len(self._batch_env))
def reset(self, indices=None):
if indices is None:
indices = tf.range(len(self._batch_env))
output = tf.py_func(
self._batch_env.reset, [indices], self._dtypes, name='reset')
return self._process_output(output, None)
def _process_output(self, output, batch_size):
for tensor, shape in zip(output, self._shapes):
tensor.set_shape((batch_size,) + shape)
return {key: tensor for key, tensor in zip(self._keys, output)}
def _parse_dtype(self, space):
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
if space.low.dtype == np.uint8:
return tf.uint8
else:
return tf.float32
raise NotImplementedError()
def _parse_shape(self, space):
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError("Unsupported space '{}.'".format(space))
class PyBatchEnv(object):
def __init__(self, envs, blocking, flatten=False):
observ_space = envs[0].observation_space
if not all(env.observation_space == observ_space for env in envs):
raise ValueError('All environments must use the same observation space.')
action_space = envs[0].action_space
if not all(env.action_space == action_space for env in envs):
raise ValueError('All environments must use the same observation space.')
self._envs = envs
self._blocking = blocking
self._flatten = flatten
self._keys = list(sorted(observ_space.spaces.keys())) + ['reward', 'done']
def __len__(self):
return len(self._envs)
def __getitem__(self, index):
return self._envs[index]
def __getattr__(self, name):
return getattr(self._envs[0], name)
def step(self, actions):
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action for batch index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [
env.step(action)
for env, action in zip(self._envs, actions)]
else:
transitions = [
env.step(action, blocking=False)
for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
outputs = {key: [] for key in self._keys}
for observ, reward, done, _ in transitions:
for key, value in observ.items():
outputs[key].append(np.array(value))
outputs['reward'].append(np.array(reward, np.float32))
outputs['done'].append(np.array(done, np.bool))
outputs = {key: np.stack(value) for key, value in outputs.items()}
if self._flatten:
outputs = tuple(outputs[key] for key in self._keys)
return outputs
def reset(self, indices=None):
if indices is None:
indices = range(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
outputs = {key: [] for key in self._keys}
for observ in observs:
for key, value in observ.items():
outputs[key].append(np.array(value))
outputs['reward'].append(np.array(0.0, np.float32))
outputs['done'].append(np.array(False, np.bool))
outputs = {key: np.stack(value) for key, value in outputs.items()}
if self._flatten:
outputs = tuple(outputs[key] for key in self._keys)
return outputs
def close(self):
for env in self._envs:
if hasattr(env, 'close'):
env.close()
| [
"numpy.stack",
"numpy.array",
"tensorflow.py_func"
] | [((1506, 1575), 'tensorflow.py_func', 'tf.py_func', (['self._batch_env.step', '[action]', 'self._dtypes'], {'name': '"""step"""'}), "(self._batch_env.step, [action], self._dtypes, name='step')\n", (1516, 1575), True, 'import tensorflow as tf\n'), ((1765, 1837), 'tensorflow.py_func', 'tf.py_func', (['self._batch_env.reset', '[indices]', 'self._dtypes'], {'name': '"""reset"""'}), "(self._batch_env.reset, [indices], self._dtypes, name='reset')\n", (1775, 1837), True, 'import tensorflow as tf\n'), ((4322, 4337), 'numpy.stack', 'np.stack', (['value'], {}), '(value)\n', (4330, 4337), True, 'import numpy as np\n'), ((5086, 5101), 'numpy.stack', 'np.stack', (['value'], {}), '(value)\n', (5094, 5101), True, 'import numpy as np\n'), ((4218, 4246), 'numpy.array', 'np.array', (['reward', 'np.float32'], {}), '(reward, np.float32)\n', (4226, 4246), True, 'import numpy as np\n'), ((4277, 4300), 'numpy.array', 'np.array', (['done', 'np.bool'], {}), '(done, np.bool)\n', (4285, 4300), True, 'import numpy as np\n'), ((4984, 5009), 'numpy.array', 'np.array', (['(0.0)', 'np.float32'], {}), '(0.0, np.float32)\n', (4992, 5009), True, 'import numpy as np\n'), ((5040, 5064), 'numpy.array', 'np.array', (['(False)', 'np.bool'], {}), '(False, np.bool)\n', (5048, 5064), True, 'import numpy as np\n'), ((4170, 4185), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4178, 4185), True, 'import numpy as np\n'), ((4936, 4951), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4944, 4951), True, 'import numpy as np\n')] |
#coding:utf-8
from __future__ import division
import torch
import torch.optim as optim
from adjacency import sparse_mx_to_torch_sparse_tensor
from net.gcn_v import GCN_V
import yaml
from easydict import EasyDict
from tensorboardX import SummaryWriter
import numpy as np
import scipy.sparse as sp
import time
import pprint
import sys
import os
import argparse
import math
import pandas as pd
import dgl
import warnings
from tqdm import tqdm
class node_dataset(torch.utils.data.Dataset):
def __init__(self, node_list, **kwargs):
self.node_list = node_list
def __getitem__(self, index):
return self.node_list[index]
def __len__(self):
return len(self.node_list)
def row_normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
# if rowsum <= 0, keep its previous value
rowsum[rowsum <= 0] = 1
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / (self.count + 1e-10)
class Timer():
def __init__(self, name='task', verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
print('[begin {}]'.format(self.name))
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.verbose:
print('[done {}] use {:.3f} s'.format(self.name, time.time() - self.start))
return exc_type is None
def adjust_lr(cur_epoch, param, cfg):
if cur_epoch not in cfg.step_number:
return
ind = cfg.step_number.index(cur_epoch)
for each in optimizer.param_groups:
each['lr'] = lr
def cos_lr(current_step, optimizer, cfg):
if current_step < cfg.warmup_step:
rate = 1.0 * current_step / cfg.warmup_step
lr = cfg.lr * rate
else:
n1 = cfg.total_step - cfg.warmup_step
n2 = current_step - cfg.warmup_step
rate = (1 + math.cos(math.pi * n2 / n1)) / 2
lr = cfg.lr * rate
for each in optimizer.param_groups:
each['lr'] = lr
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', type=str)
parser.add_argument('--outpath', type=str)
parser.add_argument('--phase', type=str)
parser.add_argument('--train_featfile', type=str)
parser.add_argument('--train_Ifile', type=str)
parser.add_argument('--train_labelfile', type=str)
parser.add_argument('--test_featfile', type=str)
parser.add_argument('--test_Ifile', type=str)
parser.add_argument('--test_labelfile', type=str)
parser.add_argument('--resume_path', type=str)
args = parser.parse_args()
beg_time = time.time()
config = yaml.load(open(args.config_file, "r"), Loader=yaml.FullLoader)
cfg = EasyDict(config)
cfg.step_number = [int(r * cfg.total_step) for r in cfg.lr_step]
# force assignment
for key, value in args._get_kwargs():
cfg[key] = value
#cfg[list(dict(train_adjfile=train_adjfile).keys())[0]] = train_adjfile
#cfg[list(dict(train_labelfile=train_labelfile).keys())[0]] = train_labelfile
#cfg[list(dict(test_adjfile=test_adjfile).keys())[0]] = test_adjfile
#cfg[list(dict(test_labelfile=test_labelfile).keys())[0]] = test_labelfile
print("train hyper parameter list")
pprint.pprint(cfg)
# get model
model = GCN_V(feature_dim=cfg.feat_dim, nhid=cfg.nhid, nclass=cfg.nclass, dropout=0.5)
model.cuda()
# get dataset
scale_max = 80.
with Timer('load data'):
train_feature = np.load(cfg.train_featfile)
train_feature = train_feature / np.linalg.norm(train_feature, axis=1, keepdims=True)
train_adj = np.load(cfg.train_Ifile)[:, :int(scale_max)]
train_label_k = np.load(cfg.train_labelfile).astype(np.float32)
train_label_s = train_label_k / scale_max
train_feature = torch.FloatTensor(train_feature).cuda()
train_label_s = torch.FloatTensor(train_label_s).cuda()
train_data = (train_feature, train_adj, train_label_s)
test_feature = np.load(cfg.test_featfile)
test_feature = test_feature / np.linalg.norm(test_feature, axis=1, keepdims=True)
test_adj = np.load(cfg.test_Ifile)[:, :int(scale_max)]
test_label_k = np.load(cfg.test_labelfile).astype(np.float32)
test_label_s = test_label_k / scale_max
test_feature = torch.FloatTensor(test_feature).cuda()
test_label_s = torch.FloatTensor(test_label_s).cuda()
train_dataset = node_dataset(range(len(train_feature)))
test_dataset = node_dataset(range(len(test_feature)))
train_dataloader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=cfg.batchsize,
shuffle=True,
num_workers=16,
pin_memory=True,
drop_last=False)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=cfg.batchsize,
shuffle=False,
num_workers=16,
pin_memory=True,
drop_last=False)
if cfg.phase == 'train':
optimizer = optim.SGD(model.parameters(), cfg.lr, momentum=cfg.sgd_momentum, weight_decay=cfg.sgd_weight_decay)
beg_step = 0
if cfg.resume_path != None:
beg_step = int(os.path.splitext(os.path.basename(cfg.resume_path))[0].split('_')[1])
with Timer('resume model from %s'%cfg.resume_path):
ckpt = torch.load(cfg.resume_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'])
train_loss_meter = AverageMeter()
train_kdiff_meter = AverageMeter()
train_mre_meter = AverageMeter()
test_loss_meter = AverageMeter()
test_kdiff_meter = AverageMeter()
test_mre_meter = AverageMeter()
writer = SummaryWriter(os.path.join(cfg.outpath), filename_suffix='')
current_step = beg_step
break_flag = False
while 1:
if break_flag:
break
iter_begtime = time.time()
for _, index in enumerate(train_dataloader):
if current_step > cfg.total_step:
break_flag = True
break
current_step += 1
cos_lr(current_step, optimizer, cfg)
batch_feature = train_feature[train_adj[index]]
batch_label = train_label_s[index]
batch_k = train_label_k[index]
batch_data = (batch_feature, batch_label)
model.train()
pred_arr, train_loss = model(batch_data, return_loss=True)
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
train_loss_meter.update(train_loss.item())
pred_arr = pred_arr.data.cpu().numpy()
# add this clip
k_hat = np.round(pred_arr * scale_max)
k_hat[np.where(k_hat < 1)[0]] = 1
k_hat[np.where(k_hat > scale_max)[0]] = scale_max
train_kdiff = np.abs(k_hat - batch_k)
train_kdiff_meter.update(train_kdiff.mean())
train_mre = train_kdiff / batch_k
train_mre_meter.update(train_mre.mean())
writer.add_scalar('lr', optimizer.param_groups[0]['lr'], global_step=current_step)
writer.add_scalar('loss/train', train_loss.item(), global_step=current_step)
writer.add_scalar('kdiff/train', train_kdiff_meter.val, global_step=current_step)
writer.add_scalar('mre/train', train_mre_meter.val, global_step=current_step)
if current_step % cfg.log_freq == 0:
log = "step:{}, step_time:{:.3f}, lr:{:.8f}, trainloss:{:.4f}({:.4f}), trainkdiff:{:.2f}({:.2f}), trainmre:{:.2f}({:.2f}), testloss:{:.4f}({:.4f}), testkdiff:{:.2f}({:.2f}), testmre:{:.2f}({:.2f})".format(current_step, time.time()-iter_begtime, optimizer.param_groups[0]['lr'], train_loss_meter.val, train_loss_meter.avg, train_kdiff_meter.val, train_kdiff_meter.avg, train_mre_meter.val, train_mre_meter.avg, test_loss_meter.val, test_loss_meter.avg, test_kdiff_meter.val, test_kdiff_meter.avg, test_mre_meter.val, test_mre_meter.avg)
print(log)
iter_begtime = time.time()
if (current_step) % cfg.save_freq == 0 and current_step > 0:
torch.save({'state_dict' : model.state_dict(), 'step': current_step},
os.path.join(cfg.outpath, "ckpt_%s.pth"%(current_step)))
if (current_step) % cfg.val_freq == 0 and current_step > 0:
pred_list = []
model.eval()
testloss_list = []
for step, index in enumerate(tqdm(test_dataloader, desc='test phase', disable=False)):
batch_feature = test_feature[test_adj[index]]
batch_label = test_label_s[index]
batch_data = (batch_feature, batch_label)
pred, test_loss = model(batch_data, return_loss=True)
pred_list.append(pred.data.cpu().numpy())
testloss_list.append(test_loss.item())
pred_list = np.concatenate(pred_list)
k_hat, k_arr = pred_list * scale_max, test_label_k
# add this clip before eval
k_hat = np.round(k_hat)
k_hat[np.where(k_hat < 1)[0]] = 1
k_hat[np.where(k_hat > scale_max)[0]] = scale_max
test_kdiff = np.abs(np.round(k_hat) - k_arr.reshape(-1))
test_mre = test_kdiff / k_arr.reshape(-1)
test_kdiff_meter.update(test_kdiff.mean())
test_mre_meter.update(test_mre.mean())
test_loss_meter.update(np.mean(testloss_list))
writer.add_scalar('loss/test', test_loss_meter.val, global_step=current_step)
writer.add_scalar('kdiff/test', test_kdiff_meter.val, global_step=current_step)
writer.add_scalar('mre/test', test_mre_meter.val, global_step=current_step)
writer.close()
else:
ckpt = torch.load(cfg.resume_path, map_location='cpu')
model.load_state_dict(ckpt['state_dict'])
pred_list, gcnfeat_list = [], []
model.eval()
beg_time = time.time()
for step, index, in enumerate(test_dataloader):
batch_feature = test_feature[test_adj[index]]
batch_label = test_label_s[index]
batch_data = (batch_feature, batch_label)
pred, gcnfeat = model(batch_data, output_feat=True)
pred_list.append(pred.data.cpu().numpy())
gcnfeat_list.append(gcnfeat.data.cpu().numpy())
print("time use %.4f"%(time.time()-beg_time))
pred_list = np.concatenate(pred_list)
gcnfeat_arr = np.vstack(gcnfeat_list)
gcnfeat_arr = gcnfeat_arr / np.linalg.norm(gcnfeat_arr, axis=1, keepdims=True)
tag = os.path.splitext(os.path.basename(cfg.resume_path))[0]
print("stat")
k_hat, k_arr = pred_list * scale_max, test_label_k
# add this clip before eval
k_hat = np.round(k_hat)
k_hat[np.where(k_hat < 1)[0]] = 1
k_hat[np.where(k_hat > scale_max)[0]] = scale_max
np.save(os.path.join(cfg.outpath, 'k_infer_pred'), np.round(k_hat))
print("time use", time.time() - beg_time)
| [
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.mean",
"numpy.linalg.norm",
"pprint.pprint",
"net.gcn_v.GCN_V",
"os.path.join",
"numpy.round",
"torch.utils.data.DataLoader",
"numpy.power",
"torch.load",
"torch.FloatTensor",
"math.cos",
"easydict.EasyDict",
"scipy.sparse.di... | [((959, 974), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (967, 974), True, 'import scipy.sparse as sp\n'), ((2520, 2545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2543, 2545), False, 'import argparse\n'), ((3104, 3115), 'time.time', 'time.time', ([], {}), '()\n', (3113, 3115), False, 'import time\n'), ((3202, 3218), 'easydict.EasyDict', 'EasyDict', (['config'], {}), '(config)\n', (3210, 3218), False, 'from easydict import EasyDict\n'), ((3733, 3751), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (3746, 3751), False, 'import pprint\n'), ((3781, 3859), 'net.gcn_v.GCN_V', 'GCN_V', ([], {'feature_dim': 'cfg.feat_dim', 'nhid': 'cfg.nhid', 'nclass': 'cfg.nclass', 'dropout': '(0.5)'}), '(feature_dim=cfg.feat_dim, nhid=cfg.nhid, nclass=cfg.nclass, dropout=0.5)\n', (3786, 3859), False, 'from net.gcn_v import GCN_V\n'), ((921, 936), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (929, 936), True, 'import numpy as np\n'), ((1643, 1654), 'time.time', 'time.time', ([], {}), '()\n', (1652, 1654), False, 'import time\n'), ((3969, 3996), 'numpy.load', 'np.load', (['cfg.train_featfile'], {}), '(cfg.train_featfile)\n', (3976, 3996), True, 'import numpy as np\n'), ((4492, 4518), 'numpy.load', 'np.load', (['cfg.test_featfile'], {}), '(cfg.test_featfile)\n', (4499, 4518), True, 'import numpy as np\n'), ((5069, 5213), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'cfg.batchsize', 'shuffle': '(True)', 'num_workers': '(16)', 'pin_memory': '(True)', 'drop_last': '(False)'}), '(dataset=train_dataset, batch_size=cfg.batchsize,\n shuffle=True, num_workers=16, pin_memory=True, drop_last=False)\n', (5096, 5213), False, 'import torch\n'), ((5310, 5454), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'cfg.batchsize', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)', 'drop_last': '(False)'}), '(dataset=test_dataset, batch_size=cfg.batchsize,\n shuffle=False, num_workers=16, pin_memory=True, drop_last=False)\n', (5337, 5454), False, 'import torch\n'), ((10827, 10874), 'torch.load', 'torch.load', (['cfg.resume_path'], {'map_location': '"""cpu"""'}), "(cfg.resume_path, map_location='cpu')\n", (10837, 10874), False, 'import torch\n'), ((11007, 11018), 'time.time', 'time.time', ([], {}), '()\n', (11016, 11018), False, 'import time\n'), ((11487, 11512), 'numpy.concatenate', 'np.concatenate', (['pred_list'], {}), '(pred_list)\n', (11501, 11512), True, 'import numpy as np\n'), ((11535, 11558), 'numpy.vstack', 'np.vstack', (['gcnfeat_list'], {}), '(gcnfeat_list)\n', (11544, 11558), True, 'import numpy as np\n'), ((11850, 11865), 'numpy.round', 'np.round', (['k_hat'], {}), '(k_hat)\n', (11858, 11865), True, 'import numpy as np\n'), ((880, 900), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (888, 900), True, 'import numpy as np\n'), ((4037, 4089), 'numpy.linalg.norm', 'np.linalg.norm', (['train_feature'], {'axis': '(1)', 'keepdims': '(True)'}), '(train_feature, axis=1, keepdims=True)\n', (4051, 4089), True, 'import numpy as np\n'), ((4110, 4134), 'numpy.load', 'np.load', (['cfg.train_Ifile'], {}), '(cfg.train_Ifile)\n', (4117, 4134), True, 'import numpy as np\n'), ((4557, 4608), 'numpy.linalg.norm', 'np.linalg.norm', (['test_feature'], {'axis': '(1)', 'keepdims': '(True)'}), '(test_feature, axis=1, keepdims=True)\n', (4571, 4608), True, 'import numpy as np\n'), ((4628, 4651), 'numpy.load', 'np.load', (['cfg.test_Ifile'], {}), '(cfg.test_Ifile)\n', (4635, 4651), True, 'import numpy as np\n'), ((6302, 6327), 'os.path.join', 'os.path.join', (['cfg.outpath'], {}), '(cfg.outpath)\n', (6314, 6327), False, 'import os\n'), ((6502, 6513), 'time.time', 'time.time', ([], {}), '()\n', (6511, 6513), False, 'import time\n'), ((11595, 11645), 'numpy.linalg.norm', 'np.linalg.norm', (['gcnfeat_arr'], {'axis': '(1)', 'keepdims': '(True)'}), '(gcnfeat_arr, axis=1, keepdims=True)\n', (11609, 11645), True, 'import numpy as np\n'), ((11982, 12023), 'os.path.join', 'os.path.join', (['cfg.outpath', '"""k_infer_pred"""'], {}), "(cfg.outpath, 'k_infer_pred')\n", (11994, 12023), False, 'import os\n'), ((12025, 12040), 'numpy.round', 'np.round', (['k_hat'], {}), '(k_hat)\n', (12033, 12040), True, 'import numpy as np\n'), ((12065, 12076), 'time.time', 'time.time', ([], {}), '()\n', (12074, 12076), False, 'import time\n'), ((2355, 2382), 'math.cos', 'math.cos', (['(math.pi * n2 / n1)'], {}), '(math.pi * n2 / n1)\n', (2363, 2382), False, 'import math\n'), ((4179, 4207), 'numpy.load', 'np.load', (['cfg.train_labelfile'], {}), '(cfg.train_labelfile)\n', (4186, 4207), True, 'import numpy as np\n'), ((4301, 4333), 'torch.FloatTensor', 'torch.FloatTensor', (['train_feature'], {}), '(train_feature)\n', (4318, 4333), False, 'import torch\n'), ((4365, 4397), 'torch.FloatTensor', 'torch.FloatTensor', (['train_label_s'], {}), '(train_label_s)\n', (4382, 4397), False, 'import torch\n'), ((4695, 4722), 'numpy.load', 'np.load', (['cfg.test_labelfile'], {}), '(cfg.test_labelfile)\n', (4702, 4722), True, 'import numpy as np\n'), ((4813, 4844), 'torch.FloatTensor', 'torch.FloatTensor', (['test_feature'], {}), '(test_feature)\n', (4830, 4844), False, 'import torch\n'), ((4875, 4906), 'torch.FloatTensor', 'torch.FloatTensor', (['test_label_s'], {}), '(test_label_s)\n', (4892, 4906), False, 'import torch\n'), ((5915, 5962), 'torch.load', 'torch.load', (['cfg.resume_path'], {'map_location': '"""cpu"""'}), "(cfg.resume_path, map_location='cpu')\n", (5925, 5962), False, 'import torch\n'), ((7380, 7410), 'numpy.round', 'np.round', (['(pred_arr * scale_max)'], {}), '(pred_arr * scale_max)\n', (7388, 7410), True, 'import numpy as np\n'), ((7558, 7581), 'numpy.abs', 'np.abs', (['(k_hat - batch_k)'], {}), '(k_hat - batch_k)\n', (7564, 7581), True, 'import numpy as np\n'), ((8805, 8816), 'time.time', 'time.time', ([], {}), '()\n', (8814, 8816), False, 'import time\n'), ((11677, 11710), 'os.path.basename', 'os.path.basename', (['cfg.resume_path'], {}), '(cfg.resume_path)\n', (11693, 11710), False, 'import os\n'), ((11880, 11899), 'numpy.where', 'np.where', (['(k_hat < 1)'], {}), '(k_hat < 1)\n', (11888, 11899), True, 'import numpy as np\n'), ((11922, 11949), 'numpy.where', 'np.where', (['(k_hat > scale_max)'], {}), '(k_hat > scale_max)\n', (11930, 11949), True, 'import numpy as np\n'), ((9841, 9866), 'numpy.concatenate', 'np.concatenate', (['pred_list'], {}), '(pred_list)\n', (9855, 9866), True, 'import numpy as np\n'), ((10015, 10030), 'numpy.round', 'np.round', (['k_hat'], {}), '(k_hat)\n', (10023, 10030), True, 'import numpy as np\n'), ((11443, 11454), 'time.time', 'time.time', ([], {}), '()\n', (11452, 11454), False, 'import time\n'), ((1813, 1824), 'time.time', 'time.time', ([], {}), '()\n', (1822, 1824), False, 'import time\n'), ((7433, 7452), 'numpy.where', 'np.where', (['(k_hat < 1)'], {}), '(k_hat < 1)\n', (7441, 7452), True, 'import numpy as np\n'), ((7483, 7510), 'numpy.where', 'np.where', (['(k_hat > scale_max)'], {}), '(k_hat > scale_max)\n', (7491, 7510), True, 'import numpy as np\n'), ((9013, 9068), 'os.path.join', 'os.path.join', (['cfg.outpath', "('ckpt_%s.pth' % current_step)"], {}), "(cfg.outpath, 'ckpt_%s.pth' % current_step)\n", (9025, 9068), False, 'import os\n'), ((9323, 9378), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {'desc': '"""test phase"""', 'disable': '(False)'}), "(test_dataloader, desc='test phase', disable=False)\n", (9327, 9378), False, 'from tqdm import tqdm\n'), ((10460, 10482), 'numpy.mean', 'np.mean', (['testloss_list'], {}), '(testloss_list)\n', (10467, 10482), True, 'import numpy as np\n'), ((8426, 8437), 'time.time', 'time.time', ([], {}), '()\n', (8435, 8437), False, 'import time\n'), ((10057, 10076), 'numpy.where', 'np.where', (['(k_hat < 1)'], {}), '(k_hat < 1)\n', (10065, 10076), True, 'import numpy as np\n'), ((10111, 10138), 'numpy.where', 'np.where', (['(k_hat > scale_max)'], {}), '(k_hat > scale_max)\n', (10119, 10138), True, 'import numpy as np\n'), ((10196, 10211), 'numpy.round', 'np.round', (['k_hat'], {}), '(k_hat)\n', (10204, 10211), True, 'import numpy as np\n'), ((5775, 5808), 'os.path.basename', 'os.path.basename', (['cfg.resume_path'], {}), '(cfg.resume_path)\n', (5791, 5808), False, 'import os\n')] |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Read in the image
image = mpimg.imread('test.jpg')
print('This image is: ', type(image),
'with dimensions:', image.shape)
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
line_image = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Define the vertices of a triangular mask.
# Keep in mind the origin (x=0, y=0) is in the upper left
# MODIFY THESE VALUES TO ISOLATE THE REGION
# WHERE THE LANE LINES ARE IN THE IMAGE
left_bottom = [0, 539]
right_bottom = [900, 539]
apex = [475, 320]
# Perform a linear fit (y=Ax+B) to each of the three sides of the triangle
# np.polyfit returns the coefficients [A, B] of the fit
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
# Mask pixels below the threshold
color_thresholds = (image[:,:,0] < rgb_threshold[0]) | \
(image[:,:,1] < rgb_threshold[1]) | \
(image[:,:,2] < rgb_threshold[2])
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \
(YY > (XX*fit_right[0] + fit_right[1])) & \
(YY < (XX*fit_bottom[0] + fit_bottom[1]))
# Mask color and region selection
color_select[color_thresholds | ~region_thresholds] = [0, 0, 0]
# Color pixels red where both color and region selections met
line_image[~color_thresholds & region_thresholds] = [255, 0, 0]
# Display the image and show region and color selections
plt.imshow(image)
x = [left_bottom[0], right_bottom[0], apex[0], left_bottom[0]]
y = [left_bottom[1], right_bottom[1], apex[1], left_bottom[1]]
plt.plot(x, y, 'b--', lw=4)
plt.imshow(color_select)
plt.imshow(line_image)
plt.show()
| [
"matplotlib.image.imread",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.copy",
"numpy.polyfit",
"matplotlib.pyplot.imshow",
"numpy.arange"
] | [((118, 142), 'matplotlib.image.imread', 'mpimg.imread', (['"""test.jpg"""'], {}), "('test.jpg')\n", (130, 142), True, 'import matplotlib.image as mpimg\n'), ((348, 362), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (355, 362), True, 'import numpy as np\n'), ((377, 391), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (384, 391), True, 'import numpy as np\n'), ((1028, 1095), 'numpy.polyfit', 'np.polyfit', (['(left_bottom[0], apex[0])', '(left_bottom[1], apex[1])', '(1)'], {}), '((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)\n', (1038, 1095), True, 'import numpy as np\n'), ((1109, 1178), 'numpy.polyfit', 'np.polyfit', (['(right_bottom[0], apex[0])', '(right_bottom[1], apex[1])', '(1)'], {}), '((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)\n', (1119, 1178), True, 'import numpy as np\n'), ((1193, 1281), 'numpy.polyfit', 'np.polyfit', (['(left_bottom[0], right_bottom[0])', '(left_bottom[1], right_bottom[1])', '(1)'], {}), '((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom\n [1]), 1)\n', (1203, 1281), True, 'import numpy as np\n'), ((2090, 2107), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2100, 2107), True, 'import matplotlib.pyplot as plt\n'), ((2237, 2264), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b--"""'], {'lw': '(4)'}), "(x, y, 'b--', lw=4)\n", (2245, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2290), 'matplotlib.pyplot.imshow', 'plt.imshow', (['color_select'], {}), '(color_select)\n', (2276, 2290), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2314), 'matplotlib.pyplot.imshow', 'plt.imshow', (['line_image'], {}), '(line_image)\n', (2302, 2314), True, 'import matplotlib.pyplot as plt\n'), ((2316, 2326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2324, 2326), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1565), 'numpy.arange', 'np.arange', (['(0)', 'xsize'], {}), '(0, xsize)\n', (1555, 1565), True, 'import numpy as np\n'), ((1567, 1586), 'numpy.arange', 'np.arange', (['(0)', 'ysize'], {}), '(0, ysize)\n', (1576, 1586), True, 'import numpy as np\n')] |
import bs4 as bs
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import os
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import pickle
import requests
style.use('ggplot')
def save_sp500_tickers():
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# save_sp500_tickers()
def get_data_from_yahoo(reload_sp500=False):
if reload_sp500:
tickers = save_sp500_tickers()
else:
with open("sp500tickers.pickle", "rb") as f:
tickers = pickle.load(f)
if not os.path.exists('stock_dfs'):
os.makedirs('stock_dfs')
start = dt.datetime(2010, 1, 1)
end = dt.datetime.now()
for ticker in tickers[:45]:
# just in case your connection breaks, we'd like to save our progress!
if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):
df = web.DataReader(ticker, 'morningstar', start, end)
df.reset_index(inplace=True)
df.set_index("Date", inplace=True)
df = df.drop("Symbol", axis=1)
df.to_csv('stock_dfs/{}.csv'.format(ticker))
else:
print('Already have {}'.format(ticker))
#get_data_from_yahoo()
def compile_data():
with open("sp500tickers.pickle","rb") as f:
tickers = pickle.load(f)
main_df = pd.DataFrame()
for count,ticker in enumerate(tickers[:45]):
df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))
df.set_index('Date', inplace=True)
df.rename(columns = {'Close':ticker}, inplace=True)
df.drop(['Open','High','Low','Volume'], 1, inplace=True)
if main_df.empty:
main_df = df
else:
main_df = main_df.join(df, how='outer')
if count % 10 == 0:
print(count)
print(main_df.head())
main_df.to_csv('sp500_joined_closes.csv')
#compile_data()
def visualize_data():
df = pd.read_csv('sp500_joined_closes.csv')
## df['GOOGL'].plot()
## plt.show()
df.set_index('Date', inplace=True)
df_corr = df.pct_change().corr()
data = df_corr.values
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
heatmap = ax.pcolor(data, cmap= plt.cm.RdYlGn)
fig.colorbar(heatmap)
ax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
column_lables = df_corr.columns
row_labels = df_corr.index
ax.set_xticklabels(column_lables)
ax.set_yticklabels(row_labels)
plt.xticks(rotation=90)
heatmap.set_clim(-1,1)
plt.tight_layout()
plt.show()
print(df_corr.head())
visualize_data() | [
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"pickle.dump",
"matplotlib.pyplot.show",
"matplotlib.style.use",
"os.makedirs",
"pandas.read_csv",
"pandas_datareader.data.DataReader",
"os.path.exists",
"datetime.datetime",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"r... | [((273, 292), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (282, 292), False, 'from matplotlib import style\n'), ((332, 404), 'requests.get', 'requests.get', (['"""http://en.wikipedia.org/wiki/List_of_S%26P_500_companies"""'], {}), "('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n", (344, 404), False, 'import requests\n'), ((416, 451), 'bs4.BeautifulSoup', 'bs.BeautifulSoup', (['resp.text', '"""lxml"""'], {}), "(resp.text, 'lxml')\n", (432, 451), True, 'import bs4 as bs\n'), ((1063, 1086), 'datetime.datetime', 'dt.datetime', (['(2010)', '(1)', '(1)'], {}), '(2010, 1, 1)\n', (1074, 1086), True, 'import datetime as dt\n'), ((1097, 1114), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1112, 1114), True, 'import datetime as dt\n'), ((1755, 1769), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1767, 1769), True, 'import pandas as pd\n'), ((2343, 2381), 'pandas.read_csv', 'pd.read_csv', (['"""sp500_joined_closes.csv"""'], {}), "('sp500_joined_closes.csv')\n", (2354, 2381), True, 'import pandas as pd\n'), ((2538, 2550), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2548, 2550), True, 'import matplotlib.pyplot as plt\n'), ((2987, 3010), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (2997, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3042, 3060), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3058, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3065, 3075), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3073, 3075), True, 'import matplotlib.pyplot as plt\n'), ((704, 727), 'pickle.dump', 'pickle.dump', (['tickers', 'f'], {}), '(tickers, f)\n', (715, 727), False, 'import pickle\n'), ((988, 1015), 'os.path.exists', 'os.path.exists', (['"""stock_dfs"""'], {}), "('stock_dfs')\n", (1002, 1015), False, 'import os\n'), ((1025, 1049), 'os.makedirs', 'os.makedirs', (['"""stock_dfs"""'], {}), "('stock_dfs')\n", (1036, 1049), False, 'import os\n'), ((1725, 1739), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1736, 1739), False, 'import pickle\n'), ((962, 976), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (973, 976), False, 'import pickle\n'), ((1309, 1358), 'pandas_datareader.data.DataReader', 'web.DataReader', (['ticker', '"""morningstar"""', 'start', 'end'], {}), "(ticker, 'morningstar', start, end)\n", (1323, 1358), True, 'import pandas_datareader.data as web\n'), ((2683, 2707), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (2692, 2707), True, 'import numpy as np\n'), ((2746, 2770), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (2755, 2770), True, 'import numpy as np\n')] |
"""Module for the Risk-Exploiting Circuit Scenario (i.e. Simplified Comm Relay)
- Network of agents are to act as a connecition between origin and destination terminals of
known (observed).
- This is a simplified, surrogate scenario for the more complex communication relay scenario
since would be expected to produce similar physical behavior of the network without adding
the complexity of message passing to the action space
- The aggregate reward is a function of the connection quality of the network between the terminals.
- The connection quality is modeled like an electrical circuit where resistance is a function
of distance between agents in the network. Connections can be serial and parallel.
Beyond a certain distance threshold, connections are not made between agents
- Landmarks can act to boost or degrade the connection quality of agents within a certain
proximity.
- Landmarks can also have a risk associated with them, i.e. probability of causing a nearby
agent to fail and eliminating if from the network.
- Agents actions are their movements
- Most general case: landmarks are at unknown locations and unknown nature (i.e. risk,
signal degredation) and part of the problem is to explore for landmarks and learn their nature
- Simplified case: to accelerate testing and learning, a simplified case has the landmarks
at known locations with known nature.
- Interesting behavior to be investigate: Landmarks of large boosting quality but high risk
of causing agent failures. How does the network leverage such double edged swords?
- See documents/allen_daily_notes.md 2018.09.21 for modifications to above assumptions and
problem formulation
"""
import numpy as np
from multiagent.scenario import BaseScenario
from particle_environments.mager.world import MortalAgent, HazardousWorld, RiskRewardLandmark
from particle_environments.mager.observation import format_observation
from particle_environments.common import is_collision, distance, delta_pos, delta_vel
from particle_environments.common import RadialPolynomialRewardFunction2D as RadialReward
from particle_environments.common import RadialBernoulliRiskFunction2D as RadialRisk
from particle_environments.common import DefaultParameters as DP
from particle_environments.common import linear_index_to_lower_triangular, ResistanceNetwork
# Scenario Parameters
_DISTANCE_RESISTANCE_GAIN = 1.0
_DISTANCE_RESISTANCE_EXPONENT = 1.0
_MAX_COMMUNICATION_DISTANCE = 0.55 # 5 agents
# _MAX_COMMUNICATION_DISTANCE = 0.60 # 4 agents
# _MAX_COMMUNICATION_DISTANCE = 0.65 # 3 agents
_MAX_OBSERVATION_DISTANCE = 2.0
_AGENT_SIZE = 0.01
_LANDMARK_SIZE = 0.025
_NON_TERMINAL_LANDMARKS = []
# _NON_TERMINAL_LANDMARKS.append(
# RiskRewardLandmark( risk_fn=RadialRisk(0.1), reward_fn=RadialReward(0.1, 10.0)))
class Scenario(BaseScenario):
# static class
num_agents = 5
# num_agents = 4
# num_agents = 3
def make_world(self):
world = HazardousWorld()
# set scenario-specific world parameters
world.collaborative = True
world.systemic_rewards = True
world.identical_rewards = False
world.dim_c = 0 # observation-based communication
world.max_communication_distance = _MAX_COMMUNICATION_DISTANCE
world.distance_resistance_gain = _DISTANCE_RESISTANCE_GAIN
world.distance_resistance_exponent = _DISTANCE_RESISTANCE_EXPONENT
# add landmarks
world.origin_terminal_landmark = RiskRewardLandmark( risk_fn=None, reward_fn=RadialReward(1.0, 10.0))
world.destination_terminal_landmark = RiskRewardLandmark( risk_fn=None, reward_fn=RadialReward(1.0, 10.0))
world.landmarks = [world.origin_terminal_landmark, world.destination_terminal_landmark]
for lm in _NON_TERMINAL_LANDMARKS:
world.landmarks.append(lm)
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark_%d' % i
landmark.collide = False
landmark.movable = False
landmark.size = _LANDMARK_SIZE
# properties for landmarks
if isinstance(landmark, RiskRewardLandmark) and landmark.is_hazard:
#TODO: make colors heatmap of risk probability over all bounds
landmark.color = np.array([landmark.risk_fn.get_failure_probability(0,0) + .1, 0, 0])
else:
landmark.color = np.array([0.25, 0.25, 0.25])
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
# add agents
world.agents = [MortalAgent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.terminated = False
agent.size = _AGENT_SIZE
agent.max_observation_distance = _MAX_OBSERVATION_DISTANCE
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
agent.color = np.array([0.35, 0.35, 0.85])
for landmark in world.landmarks:
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
# randomize terminal locations, but regularize to ensure conistent distances
origin_state, destination_state = self.spawn_terminals(world)
world.origin_terminal_landmark.state.p_pos = origin_state
world.destination_terminal_landmark.state.p_pos = destination_state
def spawn_terminals(self, world):
''' create communication terminals at random positions but regularized distance
Notes:
- regularizing the distance between terminals is important to ensure consistency in
max rewards possible between different episodes
'''
# angle of line connecting terminals
th = np.random.uniform(0, 2.0*np.pi)
# half-distance between terminals
# d = np.random.normal(2.0, 0.1)
d = 2.0
dx = d/2.0*np.cos(th)
dy = d/2.0*np.sin(th)
# center of line connecting terminals
# xc = np.random.normal(0.0, 0.1)
# yc = np.random.normal(0.0, 0.1)
xc = yc = 0.0
return (np.array([xc-dx, yc-dy]), np.array([xc+dx, yc+dy]))
def benchmark_data(self, agent, world):
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.linalg.norm(a.state.p_pos - l.state.p_pos) for a in world.agents]
min_dists += min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if is_collision(a, agent):
collisions += 1
return (self.reward(agent, world), collisions, min_dists, occupied_landmarks)
def reward(self, agent, world, systemic_call=False):
''' individual or system-wide rewards per agent
Args
- systemic_call (bool): determine if this is a system-wide calc, or individual agent
Notes:
- this function returns zero for all individual agents, but can return nonzero
for the entire system
'''
if systemic_call:
return self.systemic_reward(world)
else:
if agent.terminated:
# TODO: perhaps add a negative reware
return 0.0
else:
return 0.0
def systemic_reward(self, world):
''' Singular reward for entire system based on communication "conductance" through network
Notes:
- for each agent connections are established to all other agents within a proximity
threshold
- for each connection, the "resistance" is calculated based on length of distance, longer
distances have greater resistance.
- if an agent is within proximity of a Landmarks then the agent's connection resistances are
amplified or attenuated equally based on the reward function of the landmark. Positive
reward functions attenuate resistances equally, negative reward functions amplify
resistances equally
- Once all connections have been determined and resistances calculated, the "current"
is calculated between the origin and destination terminals using a normalized, fixed
"voltage difference" across the terminals.
- The higher the "current" the greater the systemic rewards
'''
# define nodes in resistance network
# by construction, node 0 is origin landmark, node 1 is destination landmark
# terminated agents are not part of network
nodes = [world.origin_terminal_landmark, world.destination_terminal_landmark]
nodes.extend([a for a in world.agents if not a.terminated])
n_nodes = len(nodes)
# init list to hold direct communication resistance values between agents
# there is no direct communication between origin and destination
n_pairs = int(n_nodes*(n_nodes+1)/2)
resistance_array = [None]*n_pairs
resistance_array[0] = 0.0
resistance_array[1] = np.inf
resistance_array[2] = 0.0
# calculate direct communication resistance between agents
for k in range(3,n_pairs):
i,j = linear_index_to_lower_triangular(k)
if i == j:
resistance_array[k] = 0.0
else:
resistance_array[k] = self._calculate_resistance(nodes[i], nodes[j], world)
# create resistance network
resnet = ResistanceNetwork(n_nodes, resistance_array)
# calculate resistance between origin and destination
comm_resistance = resnet.get_two_point_resistance(0,1)
assert not isinstance(comm_resistance, complex)
# systemic reward is inverse of resistance (conductance)
return [1.0/comm_resistance]*self.num_agents
def _calculate_resistance(self, entity1, entity2, world):
''' calculate communication resistance as a function of distance between agents and/or terminals
TODO:
- perhaps normalize gain based on max communication distance
'''
dist = distance(entity1, entity2)
if dist > world.max_communication_distance:
res = np.inf
else:
landmark_gain = self._calculate_landmark_resistance_gain(entity1, entity2, world)
total_gain = world.distance_resistance_gain*landmark_gain
res = total_gain * dist**world.distance_resistance_exponent
return res
def _calculate_landmark_resistance_gain(self, agent1, agent2, world):
''' calculate amplification/attenuation of comm resistance based on proximity to landmarks
'''
# TODO: complete this based on landmark reward functions
return 1.0
def done_callback(self, agent, world):
''' indicate a terminated agent as done '''
if agent.terminated:
return True
else:
return False
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
def communications_observed(other_comm_node):
''' Communication between agents is just the conductance
Notes:
- inverse of comm resistance (i.e. conductance) used so that we can use
zero for out of range comms
- noisy measurement of heading
- TODO: observation of failures
'''
# check if node is terminated
is_terminated = 0
if isinstance(other_comm_node, MortalAgent) and other_comm_node.terminated:
is_terminated = 1
dx = dy = dvx = dvy = 0.
if not is_terminated:
dx, dy = delta_pos(other_comm_node, agent)
dvx, dvy = delta_vel(other_comm_node, agent)
comms = [is_terminated, dx, dy, dvx, dvy]
# dx_noisy = dx + np.random.normal(0.0, 0.01)
# dy_noisy = dy + np.random.normal(0.0, 0.01)
# comms = [dx_noisy, dy_noisy]
# heading = np.arctan2(dy,dx) + np.random.normal(0.0, 0.1)
# conductance = 1.0/self._calculate_resistance(agent, other_comm_node, world)
# comms = [heading, conductance]
# set comms to zero if out for range
# if distance(agent, other_comm_node) > agent.max_observation_distance:
# comms = [0] * len(comms)
return comms
# comm_nodes are origin and destination terminals and unterminated agents
comm_nodes = world.landmarks[0:2]
comm_nodes.extend([a for a in world.agents if a is not agent])
communications = format_observation(observe = communications_observed,
objects = comm_nodes,
num_observations = (self.num_agents-1) + 2,
observation_size = 2*world.dim_p + 1)
# observe velocity if not terminated
vel_obs = [0, 0]
if not agent.terminated:
vel_obs = agent.state.p_vel.tolist()
# package observation
obs = np.asarray(vel_obs + agent.state.p_pos.tolist() + communications)
if agent.terminated:
# if agent is terminated, return all zeros for observation
# TODO: make this more efficient. Right now it does a lot of unnecessary calcs which are all
# then set to zero. Done this way to ensure consistant array size
obs = 0.0*obs
return obs
| [
"numpy.random.uniform",
"particle_environments.common.delta_vel",
"particle_environments.mager.world.HazardousWorld",
"particle_environments.common.ResistanceNetwork",
"particle_environments.mager.observation.format_observation",
"particle_environments.common.distance",
"numpy.zeros",
"particle_enviro... | [((2935, 2951), 'particle_environments.mager.world.HazardousWorld', 'HazardousWorld', ([], {}), '()\n', (2949, 2951), False, 'from particle_environments.mager.world import MortalAgent, HazardousWorld, RiskRewardLandmark\n'), ((6033, 6066), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2.0 * np.pi)'], {}), '(0, 2.0 * np.pi)\n', (6050, 6066), True, 'import numpy as np\n'), ((9800, 9844), 'particle_environments.common.ResistanceNetwork', 'ResistanceNetwork', (['n_nodes', 'resistance_array'], {}), '(n_nodes, resistance_array)\n', (9817, 9844), False, 'from particle_environments.common import linear_index_to_lower_triangular, ResistanceNetwork\n'), ((10427, 10453), 'particle_environments.common.distance', 'distance', (['entity1', 'entity2'], {}), '(entity1, entity2)\n', (10435, 10453), False, 'from particle_environments.common import is_collision, distance, delta_pos, delta_vel\n'), ((12980, 13140), 'particle_environments.mager.observation.format_observation', 'format_observation', ([], {'observe': 'communications_observed', 'objects': 'comm_nodes', 'num_observations': '(self.num_agents - 1 + 2)', 'observation_size': '(2 * world.dim_p + 1)'}), '(observe=communications_observed, objects=comm_nodes,\n num_observations=self.num_agents - 1 + 2, observation_size=2 * world.\n dim_p + 1)\n', (12998, 13140), False, 'from particle_environments.mager.observation import format_observation\n'), ((4621, 4634), 'particle_environments.mager.world.MortalAgent', 'MortalAgent', ([], {}), '()\n', (4632, 4634), False, 'from particle_environments.mager.world import MortalAgent, HazardousWorld, RiskRewardLandmark\n'), ((4999, 5037), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (5016, 5037), True, 'import numpy as np\n'), ((5070, 5091), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (5078, 5091), True, 'import numpy as np\n'), ((5120, 5141), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (5128, 5141), True, 'import numpy as np\n'), ((5168, 5196), 'numpy.array', 'np.array', (['[0.35, 0.35, 0.85]'], {}), '([0.35, 0.35, 0.85])\n', (5176, 5196), True, 'import numpy as np\n'), ((5274, 5312), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (5291, 5312), True, 'import numpy as np\n'), ((5348, 5369), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (5356, 5369), True, 'import numpy as np\n'), ((6184, 6194), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (6190, 6194), True, 'import numpy as np\n'), ((6214, 6224), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (6220, 6224), True, 'import numpy as np\n'), ((6396, 6424), 'numpy.array', 'np.array', (['[xc - dx, yc - dy]'], {}), '([xc - dx, yc - dy])\n', (6404, 6424), True, 'import numpy as np\n'), ((6422, 6450), 'numpy.array', 'np.array', (['[xc + dx, yc + dy]'], {}), '([xc + dx, yc + dy])\n', (6430, 6450), True, 'import numpy as np\n'), ((9535, 9570), 'particle_environments.common.linear_index_to_lower_triangular', 'linear_index_to_lower_triangular', (['k'], {}), '(k)\n', (9567, 9570), False, 'from particle_environments.common import linear_index_to_lower_triangular, ResistanceNetwork\n'), ((3496, 3519), 'particle_environments.common.RadialPolynomialRewardFunction2D', 'RadialReward', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (3508, 3519), True, 'from particle_environments.common import RadialPolynomialRewardFunction2D as RadialReward\n'), ((3611, 3634), 'particle_environments.common.RadialPolynomialRewardFunction2D', 'RadialReward', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (3623, 3634), True, 'from particle_environments.common import RadialPolynomialRewardFunction2D as RadialReward\n'), ((4384, 4412), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (4392, 4412), True, 'import numpy as np\n'), ((6624, 6669), 'numpy.linalg.norm', 'np.linalg.norm', (['(a.state.p_pos - l.state.p_pos)'], {}), '(a.state.p_pos - l.state.p_pos)\n', (6638, 6669), True, 'import numpy as np\n'), ((6882, 6904), 'particle_environments.common.is_collision', 'is_collision', (['a', 'agent'], {}), '(a, agent)\n', (6894, 6904), False, 'from particle_environments.common import is_collision, distance, delta_pos, delta_vel\n'), ((12041, 12074), 'particle_environments.common.delta_pos', 'delta_pos', (['other_comm_node', 'agent'], {}), '(other_comm_node, agent)\n', (12050, 12074), False, 'from particle_environments.common import is_collision, distance, delta_pos, delta_vel\n'), ((12102, 12135), 'particle_environments.common.delta_vel', 'delta_vel', (['other_comm_node', 'agent'], {}), '(other_comm_node, agent)\n', (12111, 12135), False, 'from particle_environments.common import is_collision, distance, delta_pos, delta_vel\n')] |
# Python implantation of metropolis-hastings sampler for quantum states
# The original programs we have modified require the following notice
############################ COPYRIGHT NOTICE #################################
#
# Code provided by <NAME> and <NAME>, written by <NAME>, December 2016
#
# Permission is granted for anyone to copy, use, modify, or distribute the
# accompanying programs and documents for any purpose, provided this copyright
# notice is retained and prominently displayed, along with a complete citation
# of the published version of the paper:
# _____________________________________________________________________________
# | <NAME>, and <NAME> |
# | Solving the quantum many-body problem with artificial neural-networks |
# |___________________________________________________________________________|
#
# The programs and documents are distributed without any warranty, express or
# implied.
#
# These programs were written for research purposes only, and are meant to
# demonstrate and reproduce the main results obtained in the paper.
#
# All use of these programs is entirely at the user's own risk.
#
###############################################################################
import numpy as np
from .hamiltonian import Hamiltonian
class Heisenberg2D(Hamiltonian):
"""
Class represents the Hamiltonian of the 1D Heisenberg model with
transverse field h_x and exchange J_z=1
"""
def __init__(self, n_spins, lattice, j_z, periodic):
super().__init__()
if n_spins != lattice ** 2:
raise ValueError('N_spins not compatible with lattice size.')
self.l = lattice
self.min_flip = 2
self.n_spins = n_spins
self.j_z = j_z
self.periodic = periodic
self.nearest_neighbors, self.bonds = self.find_nearest_neighbors()
def min_flips(self):
return self.min_flip
def num_spins(self):
return self.n_spins
def field(self):
return self.j_z
def is_periodic(self):
return self.periodic
def pbc_h(self, nn, s):
if s % self.l == 0 and nn == s-1:
# s is at left side of lattice; return rightmost element
return s+self.l-1
elif (s+1) % self.l == 0 and nn == (s+1):
# s is at right side of lattice; return leftmost element
return s-self.l+1
else:
return nn # s is in middle of lattice; return element to left
def pbc_v_lower(self, nn):
if nn < self.l:
return self.l*(self.l-1) + nn
else:
return nn - self.l
def pbc_v_higher(self, nn):
if self.l*(self.l-1) <= nn <= self.n_spins:
return nn - self.l*(self.l-1)
else:
return nn + self.l
def find_nearest_neighbors(self):
nearest_neighbors = np.zeros((self.n_spins, 4))
bonds = []
for i in range(self.n_spins):
nearest_neighbors[i][0] = self.pbc_h(i-1, i)
nearest_neighbors[i][1] = self.pbc_h(i+1, i)
nearest_neighbors[i][2] = self.pbc_v_lower(i)
nearest_neighbors[i][3] = self.pbc_v_higher(i)
for i in range(self.n_spins):
for k in range(4):
j = int(nearest_neighbors[i][k])
if i < j:
bonds.append((i, j))
return nearest_neighbors, bonds
def find_matrix_elements(self, state):
"""
inputs
state: list of integers, with each corresponding to quantum number
returns:
transitions: list of states s such that <s|H|state> is nonzero.
s are represented as a list of integers corresponding to which
quantum variables got swapped
matrix_elements: complex list <s|H|state> for each s in transitions
"""
matrix_elements = [0]
spin_flip_transitions = [[]]
# computing interaction part Sz*Sz
for i in range(len(self.bonds)):
matrix_elements[0] += state[self.bonds[i][0]] * \
state[self.bonds[i][1]]
matrix_elements[0] *= self.j_z
# look for spin flips
for i in range(len(self.bonds)):
si = self.bonds[i][0]
sj = self.bonds[i][1]
if state[si] != state[sj]:
matrix_elements.append(-2)
spin_flip_transitions.append([si, sj])
return matrix_elements, spin_flip_transitions
| [
"numpy.zeros"
] | [((2918, 2945), 'numpy.zeros', 'np.zeros', (['(self.n_spins, 4)'], {}), '((self.n_spins, 4))\n', (2926, 2945), True, 'import numpy as np\n')] |
"""
Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from elina_scalar import *
from elina_dimension import *
from elina_linexpr0 import *
from elina_abstract0 import *
from fppoly import *
from fconv import *
import numpy as np
import time
import itertools
import multiprocessing
import math
from config import config
"""
For representing the constraints CDD format is used
http://web.mit.edu/sage/export/cddlib-094b.dfsg/doc/cddlibman.ps:
each row represents b + Ax >= 0
example: 2*x_1 - 3*x_2 >= 1 translates to [-1, 2, -3]
"""
def generate_linexpr0(offset, varids, coeffs):
# returns ELINA expression, equivalent to sum_i(varids[i]*coeffs[i])
assert len(varids) == len(coeffs)
n = len(varids)
linexpr0 = elina_linexpr0_alloc(ElinaLinexprDiscr.ELINA_LINEXPR_SPARSE, n)
cst = pointer(linexpr0.contents.cst)
elina_scalar_set_double(cst.contents.val.scalar, 0)
for i, (x, coeffx) in enumerate(zip(varids, coeffs)):
linterm = pointer(linexpr0.contents.p.linterm[i])
linterm.contents.dim = ElinaDim(offset + x)
coeff = pointer(linterm.contents.coeff)
elina_scalar_set_double(coeff.contents.val.scalar, coeffx)
return linexpr0
class KAct:
def __init__(self, input_hrep):
assert KAct.type in ["ReLU", "Tanh", "Sigmoid"]
self.k = len(input_hrep[0]) - 1
self.input_hrep = np.array(input_hrep)
if KAct.type == "ReLU":
self.cons = fkrelu(self.input_hrep)
elif KAct.type == "Tanh":
self.cons = ftanh_orthant(self.input_hrep)
else:
self.cons = fsigm_orthant(self.input_hrep)
def make_kactivation_obj(input_hrep):
return KAct(input_hrep)
def get_ineqs_zono(varsid):
input_hrep = []
# Get bounds on linear expressions over variables before relu
# Order of coefficients determined by logic here
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c==0 for c in coeffs):
continue
linexpr0 = generate_linexpr0(KAct.offset, varsid, coeffs)
element = elina_abstract0_assign_linexpr_array(KAct.man, True, Krelu.element,
Krelu.tdim, linexpr0, 1, None)
bound_linexpr = elina_abstract0_bound_dimension(KAct.man, KAct.element,
KAct.offset + KAct.length)
upper_bound = bound_linexpr.contents.sup.contents.val.dbl
input_hrep.append([upper_bound] + [-c for c in coeffs])
return input_hrep
def sparse_heuristic_with_cutoff(length, lb, ub):
assert length == len(lb) == len(ub)
all_vars = [i for i in range(length) if lb[i] < 0 < ub[i]]
areas = {var: -lb[var] * ub[var] for var in all_vars}
# Sort vars by descending area
all_vars = sorted(all_vars, key=lambda var: -areas[var])
assert len(all_vars) == len(areas)
K = 3
sparse_n = config.sparse_n
cutoff = 0.05
# Sort vars by descending area
all_vars = sorted(all_vars, key=lambda var: -areas[var])
vars_above_cutoff = [i for i in all_vars if areas[i] >= cutoff]
n_vars_above_cutoff = len(vars_above_cutoff)
kact_args = []
while len(vars_above_cutoff) > 0 and config.sparse_n >= K:
grouplen = min(sparse_n, len(vars_above_cutoff))
group = vars_above_cutoff[:grouplen]
vars_above_cutoff = vars_above_cutoff[grouplen:]
if grouplen <= K:
kact_args.append(group)
else:
sparsed_combs = generate_sparse_cover(grouplen, K)
for comb in sparsed_combs:
kact_args.append(tuple([group[i] for i in comb]))
# Also just apply 1-relu for every var.
for var in all_vars:
kact_args.append([var])
print("krelu: n", config.sparse_n,
"split_zero", len(all_vars),
"after cutoff", n_vars_above_cutoff,
"number of args", len(kact_args))
return kact_args
def sparse_heuristic_curve(length, lb, ub, is_sigm):
assert length == len(lb) == len(ub)
all_vars = [i for i in range(length)]
K = 3
sparse_n = config.sparse_n
# Sort vars by descending area
vars_above_cutoff = all_vars[:]
vars_above_cutoff = [i for i in vars_above_cutoff if ub[i] - lb[i] >= 0.1]
limit = 4 if is_sigm else 3
vars_above_cutoff = [i for i in vars_above_cutoff if lb[i] <= limit and ub[i] >= -limit]
n_vars_after_cutoff = len(vars_above_cutoff)
kact_args = []
while len(vars_above_cutoff) > 0 and config.sparse_n >= K:
grouplen = min(sparse_n, len(vars_above_cutoff))
group = vars_above_cutoff[:grouplen]
vars_above_cutoff = vars_above_cutoff[grouplen:]
if grouplen <= K:
kact_args.append(group)
else:
sparsed_combs = generate_sparse_cover(grouplen, K)
for comb in sparsed_combs:
kact_args.append(tuple([group[i] for i in comb]))
# # Also just apply 1-relu for every var.
for var in all_vars:
kact_args.append([var])
# kact_args = [arg for arg in kact_args if len(arg) == 3]
print("krelu: n", config.sparse_n,
"after cutoff", n_vars_after_cutoff,
"number of args", len(kact_args),
"Sigm" if is_sigm else "Tanh")
return kact_args
def encode_kactivation_cons(nn, man, element, offset, layerno, length, lbi, ubi, constraint_groups, need_pop, domain, activation_type):
import deepzono_nodes as dn
if need_pop:
constraint_groups.pop()
lbi = np.asarray(lbi, dtype=np.double)
ubi = np.asarray(ubi, dtype=np.double)
if activation_type == "ReLU":
kact_args = sparse_heuristic_with_cutoff(length, lbi, ubi)
else:
kact_args = sparse_heuristic_curve(length, lbi, ubi, activation_type == "Sigmoid")
kact_cons = []
tdim = ElinaDim(offset+length)
if domain == 'refinezono':
element = dn.add_dimensions(man,element,offset+length,1)
KAct.man = man
KAct.element = element
KAct.tdim = tdim
KAct.length = length
KAct.layerno = layerno
KAct.offset = offset
KAct.domain = domain
KAct.type = activation_type
start = time.time()
if domain == 'refinezono':
with multiprocessing.Pool(config.numproc) as pool:
input_hrep_array = pool.map(get_ineqs_zono, kact_args)
else:
total_size = 0
for varsid in kact_args:
size = 3**len(varsid) - 1
total_size = total_size + size
linexpr0 = elina_linexpr0_array_alloc(total_size)
i = 0
for varsid in kact_args:
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c == 0 for c in coeffs):
continue
linexpr0[i] = generate_linexpr0(offset, varsid, coeffs)
i = i + 1
upper_bound = get_upper_bound_for_linexpr0(man,element,linexpr0, total_size, layerno)
i=0
input_hrep_array = []
for varsid in kact_args:
input_hrep = []
for coeffs in itertools.product([-1, 0, 1], repeat=len(varsid)):
if all(c == 0 for c in coeffs):
continue
input_hrep.append([upper_bound[i]] + [-c for c in coeffs])
i = i + 1
input_hrep_array.append(input_hrep)
with multiprocessing.Pool(config.numproc) as pool:
kact_results = pool.map(make_kactivation_obj, input_hrep_array)
gid = 0
for inst in kact_results:
varsid = kact_args[gid]
inst.varsid = varsid
kact_cons.append(inst)
gid = gid+1
end = time.time()
if config.debug:
print('kactivation time spent: ' + str(end-start))
if domain == 'refinezono':
element = dn.remove_dimensions(man, element, offset+length, 1)
constraint_groups.append(kact_cons)
| [
"numpy.asarray",
"deepzono_nodes.remove_dimensions",
"time.time",
"deepzono_nodes.add_dimensions",
"numpy.array",
"multiprocessing.Pool"
] | [((6106, 6138), 'numpy.asarray', 'np.asarray', (['lbi'], {'dtype': 'np.double'}), '(lbi, dtype=np.double)\n', (6116, 6138), True, 'import numpy as np\n'), ((6149, 6181), 'numpy.asarray', 'np.asarray', (['ubi'], {'dtype': 'np.double'}), '(ubi, dtype=np.double)\n', (6159, 6181), True, 'import numpy as np\n'), ((6751, 6762), 'time.time', 'time.time', ([], {}), '()\n', (6760, 6762), False, 'import time\n'), ((8220, 8231), 'time.time', 'time.time', ([], {}), '()\n', (8229, 8231), False, 'import time\n'), ((1943, 1963), 'numpy.array', 'np.array', (['input_hrep'], {}), '(input_hrep)\n', (1951, 1963), True, 'import numpy as np\n'), ((6489, 6540), 'deepzono_nodes.add_dimensions', 'dn.add_dimensions', (['man', 'element', '(offset + length)', '(1)'], {}), '(man, element, offset + length, 1)\n', (6506, 6540), True, 'import deepzono_nodes as dn\n'), ((7937, 7973), 'multiprocessing.Pool', 'multiprocessing.Pool', (['config.numproc'], {}), '(config.numproc)\n', (7957, 7973), False, 'import multiprocessing\n'), ((8362, 8416), 'deepzono_nodes.remove_dimensions', 'dn.remove_dimensions', (['man', 'element', '(offset + length)', '(1)'], {}), '(man, element, offset + length, 1)\n', (8382, 8416), True, 'import deepzono_nodes as dn\n'), ((6808, 6844), 'multiprocessing.Pool', 'multiprocessing.Pool', (['config.numproc'], {}), '(config.numproc)\n', (6828, 6844), False, 'import multiprocessing\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorkit.base import EvaluatorBase
def evaluate(sess, input_node, output_node, dataset, stage, eval_list, limit=-1):
if limit == -1:
limit = len(dataset.labels)
start_time = time.time()
input, labels = dataset.next_batch(limit)
feed_dict = {input_node: input}
res = sess.run([output_node], feed_dict)
duration = time.time() - start_time
preds = np.array(res[0])
labels = np.array(labels)
correct_predictions = np.argmax(labels, axis=1) == np.argmax(preds, axis=1)
accuracy = float(np.sum(correct_predictions)) / correct_predictions.shape[0]
eval_list.append(('%s Accuracy:' % stage, accuracy))
eval_list.append(('%s Speed (fps):' % stage, limit/duration))
class Evaluator(EvaluatorBase):
def evaluate(self, hypes, sess, input_node, logits, datasets):
eval_list = []
output_node = logits['output']
limit = len(datasets.validation)
evaluate(sess,
input_node=input_node,
output_node=output_node,
dataset=datasets.validation,
stage='Val',
limit=limit,
eval_list=eval_list)
evaluate(sess,
input_node=input_node,
output_node=output_node,
dataset=datasets.train,
stage='Train',
limit=limit,
eval_list=eval_list)
return eval_list | [
"numpy.sum",
"numpy.array",
"numpy.argmax",
"time.time"
] | [((342, 353), 'time.time', 'time.time', ([], {}), '()\n', (351, 353), False, 'import time\n'), ((536, 552), 'numpy.array', 'np.array', (['res[0]'], {}), '(res[0])\n', (544, 552), True, 'import numpy as np\n'), ((566, 582), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (574, 582), True, 'import numpy as np\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((609, 634), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (618, 634), True, 'import numpy as np\n'), ((638, 662), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (647, 662), True, 'import numpy as np\n'), ((685, 712), 'numpy.sum', 'np.sum', (['correct_predictions'], {}), '(correct_predictions)\n', (691, 712), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Created on Jul 28 2019
Last large update on Jun 22 2020
@author: <NAME>
@supervisor: <NAME>
It's (will be) a Python3.6 or higher program that perform massive search and classification of variable stars into VVV data.
'''
import os
import sys
import math
import numpy as np
import pandas as pd
from astropy.io import ascii
from astropy.table import Table
from astropy.stats import sigma_clip
from scipy.optimize import curve_fit
from scipy import optimize, signal
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
#from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
import time
sys.path.append('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')
import clean_match_tables as cmt
import fit_sin_series as fitSin
import periodogram as pg
import variability_indicator as vi
import star_classificator_tools as sct
import status
class PeriodSearch(object):
def __init__(self, path, tile, minP, maxP, varIndex='chi2'):
'''
tile: tile name as b293 it deppends of your folder architecture.
minP: minimum period (float)
maxP: maximum period (float)
varIndex: uncorrelated variable index: std or chi2
'''
self.tile = tile
self.varIndex = varIndex
self.minP = minP
self.maxP = maxP
self.path = f'{path}/{tile}'
os.makedirs(f'{self.path}/figures',exist_ok=True)
os.makedirs(f'{self.path}/output',exist_ok=True)
self.chips = [fn[:-3] for fn in sorted(os.listdir(f'{self.path}/chips/')) if fn.endswith('ts')]
self.tiles = sorted(os.listdir('/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/'))
def organize_tables(self):
org = cmt.CreateTable(path=self.path,
tile=self.tile,
min_sample=25,
raw_files=True)
#org.plot_chips(show=False)
def select_candidates(self):
select = vi.Variability(path=self.path,
tile=self.tile,
method=self.varIndex,
maxMag=11.5,
stdRatio=1.5,
minChi2=2,
savePlot=True)
select.do_selection()
def _read_tables(self,chip):
self.data_table = pd.read_csv(f'{self.path}/chips/{chip}.ts',index_col=0,sep=',',low_memory=False)
self.obs_time = np.loadtxt(f'{self.path}/chips/{chip}.mjd')
self.mag_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'MAG']
self.err_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'ERR']
self.color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag']
self.ks_mag = self.data_table[self.mag_cols]
self.ks_err = self.data_table[self.err_cols]
self.star_ids = self.data_table.index
if self.varIndex == 'chi2':
self.candidates = np.genfromtxt(f'{self.path}/var_data/{chip}.chi2cand', dtype=str)
if self.varIndex == 'std':
self.candidates = np.genfromtxt(f'{self.path}/var_data/{chip}.stdcand', dtype=str)
def _freq_agreement(self,f1,f2,h=4):
'''check if PDM and LSG frequencies are in agreement till 4 harmonics'''
n = 1
while n <= h:
m = 1
while m <= h:
if abs(f1/f2 - n/m) < 0.01:
bol = True
break
else:
bol = False
m+=1
if bol:
break
n+=1
return bol
def do_periodogram(self, exists=True):
j=1
for chip in self.chips:
if exists:
chips_done = [fn[:-19] for fn in os.listdir(f'{self.path}/var_data/') if fn.endswith('pdm_parameters.csv')]
else:
chips_done = []
if not chip in chips_done:
self._read_tables(chip)
lsg_pgram_params = []
pdm_pgram_params = []
i=1
for star in self.candidates:
status._print(prefix=f'Periodogram of chip {chip}',
iter1=j,
length1=len(self.chips),
iter2=i,
length2=len(self.candidates),
sufix='%')
lc = self.ks_mag.loc[star].values
err = self.ks_err.loc[star].values
t = self.obs_time
pgram = pg.Periodogram(t, lc, err, self.minP, self.maxP,
normalization='psd',
method='scargle',
samples_per_peak=10,
false=0.001,
nbins=10,
covers=3,
mode=False)
lsg_freq, lsg_power, lsg_false_alarm, lsg_best_freq, lsg_fap, lsg_sig_level, lsg_all_freq = pgram.LSG()
#lomg Scargle is much faster than PDM, so, PDM stars only if LSG identify a true Frequency
if lsg_best_freq > 0:
pdm_freq, pdm_theta, pdm_best_freq, pdm_fap, pdm_sig_level, pdm_all_freq = pgram.CyPDM()
if pdm_best_freq > 0:
# comparison with PDM period (inside 1% and harmonics until n = 4):
if self._freq_agreement(f1=lsg_best_freq, f2=pdm_best_freq, h=4):
#coords:
ra = self.data_table.loc[star]['RA']
dec = self.data_table.loc[star]['DEC']
# J and Ks aper mag for color classification
j_mag = self.data_table.loc[star]['J']
j_err = self.data_table.loc[star]['JERR']
k_mag = self.data_table.loc[star]['K']
k_err = self.data_table.loc[star]['KERR']
EJK = self.data_table.loc[star]['EJK']
EJKERR = self.data_table.loc[star]['EJKERR']
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
# amplitude from light curve (95 - 5 percentile)
amplitude = np.nanpercentile(lc,q=95) - np.nanpercentile(lc,q=5)
lsg_params = [star, chip, ra, dec] + color_vals + [j_mag, j_err,
k_mag, k_err, EJK, EJKERR, lsg_best_freq, amplitude,
lsg_fap, lsg_sig_level]
pdm_params = [star, chip, ra, dec] + color_vals + [j_mag, j_err,
k_mag, k_err, EJK, EJKERR, pdm_best_freq, amplitude,
pdm_fap, pdm_sig_level]
lsg_pgram_params.append(lsg_params)
pdm_pgram_params.append(pdm_params)
i+=1
# save periodogram data to files
colnames = ['ID','chip','RA','DEC'] + color_cols + ['APER_J',
'APER_JERR', 'APER_K','APER_KERR','APER_EJK',
'APER_EJKERR','best_freq','amplitude','fap','sig_level']
lsg_pgram_params = pd.DataFrame(lsg_pgram_params, columns=colnames)
lsg_pgram_params.set_index('ID',inplace=True)
lsg_pgram_params.to_csv(f'{self.path}/output/{chip}_lsg_parameters.csv',sep=',')
pdm_pgram_params = pd.DataFrame(pdm_pgram_params, columns=colnames)
pdm_pgram_params.set_index('ID',inplace=True)
pdm_pgram_params.to_csv(f'{self.path}/output/{chip}_pdm_parameters.csv',sep=',')
j+=1
else:
j+=1
def get_color_range(self,tile,star_type='RRLyr',catalog='OGLE'):
c = sct.StarClassificator()
color_range = c.get_color_ranges(tile=tile,
star_type=star_type,
catalog=catalog,
exists=True,
plot=True,
savefile=True)
return color_range
def get_period_range(self,star_type='RRLyr',catalog='OGLE'):
p = sct.StarClassificator()
period_range = p.get_period_ranges(star_type=star_type,
catalog=catalog,
exists=True,
plot=True,
savefile=True)
return period_range
def _find_mean_sig(self,x,xpos,sig):
''' for use in failure modes '''
mean = np.median(x[(x > xpos - sig) & (x < xpos + sig)])
newsig = np.std(x[(x > mean - sig) & (x < mean + sig)])
return mean, newsig
def failure_modes(self, freq=1.0, sigfactor=1, plot=True):
all_chips_params = []
for chip in self.chips:
lsg_params = pd.read_csv(f'{self.path}/var_data/{chip}_lsg_parameters.csv',sep=',',index_col='ID')
all_chips_params.append(lsg_params)
all_chips_params = pd.concat(all_chips_params)
gsig = 0.1
x = all_chips_params['best_freq']
y = all_chips_params['amplitude']
bad_ids = []
alias = []
xbar = []
height = []
xpos = 1
while xpos < x.max():
mean,sig = self._find_mean_sig(x,xpos,gsig)
#print(xpos, mean, sig)
alias.append((mean,sig))
s = all_chips_params.index[((x > mean - sig*sigfactor) & (x < mean + sig*sigfactor))]
#print('BadIds:', len(s), 'for freq: ', mean)
xbar.append(xpos)
height.append(len(s))
bad_ids.append(s)
xpos+=1
bad_ids = np.concatenate(bad_ids)
good_ids = [n for n in all_chips_params.index if n not in bad_ids]
all_chips_params.loc[good_ids].to_csv(f'{self.path}/var_data/{self.tile}_lsg_aliasless_parameters.csv',sep=',')
all_chips_params.loc[bad_ids].to_csv(f'{self.path}/var_data/{self.tile}_lsg_alias_parameters.csv',sep=',')
#print('Selected: ',len(good_ids),' Aliases: ',len(bad_ids),' Total: ',len(y))
if plot:
textstr = '\n'.join((f'Total: {len(y)}',
f'Aliases: {len(bad_ids)}'))
props = dict(boxstyle='square', facecolor='w', alpha=0.3)
fig, ax = plt.subplots(figsize=[6,3],tight_layout=True)
ax.scatter(x.loc[bad_ids],y.loc[bad_ids],marker='.',s=1,c='r',alpha=.4)
ax.scatter(x.loc[good_ids],y.loc[good_ids],marker='.',s=1,c='k',alpha=.4)
ax.text(0.05, 0.95, textstr,transform=ax.transAxes, fontsize=11,verticalalignment='top', bbox=props)
ax.set_xlabel('Frequency [$days^{-1}$]')
ax.set_ylabel('Amplitude [Ks mag]')
ax.set_ylim(0,1.5)
ax.set_xlim(-0.2,10.2)
plt.tight_layout()
plt.savefig(f'{self.path}/var_data/ampxfreq.png',dpi=300)
#plt.show()
plt.close()
plt.figure(figsize=[7,4])
plt.bar(xbar,height,width=0.9)
plt.xlabel('Frequency [$day^{-1}$]')
plt.ylabel('Counts')
plt.title('Number of stars by frequency aliases')
plt.tight_layout()
plt.savefig(f'{self.path}/var_data/barplot_aliases.png',dpi=300)
#plt.show()
plt.close()
return good_ids, bad_ids
def _phase(self,P,t):
phi = (t - min(t))/(P)
phase = phi - (phi).astype(int)
return phase
def _center_curve(self,fitmags,phase,mags,errs,anchor=0.25):
'''
It Shifts the phased light curve to align the min mag value (valley) to anchor for better visualization.
'''
min_arg = fitmags.argmax()
phi = phase[min_arg]
phase_shift = phase - phi
phase_shift[phase_shift < 0 ] += 1
phase_shift = phase_shift + anchor
phase_shift[phase_shift > 1 ] -= 1
return phase_shift
def periodogram_plot(self,star,fitargs,pmin,pmax,fig_path,plotfit=True,show=False):
t,lc,lcErr,freq,order,phaseSpace,fitFreq,iterative,fitastrobase = fitargs
# fit with P
fitdat1 = self._curve_fit_switch(t,lc,lcErr,freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitdat2 = self._curve_fit_switch(t,lc,lcErr,freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
if ((fitdat1 != 0) and (fitdat2 != 0)):
times1 = fitdat1['magseries']['times']
phase1 = fitdat1['magseries']['phase']
mags1 = fitdat1['magseries']['mags']
errs1 = fitdat1['magseries']['errs']
fitmags1 = fitdat1['magseries']['fitmags']
# fit with 2P
times2 = fitdat2['magseries']['times']
phase2 = fitdat2['magseries']['phase']
mags2 = fitdat2['magseries']['mags']
errs2 = fitdat2['magseries']['errs']
fitmags2 = fitdat2['magseries']['fitmags']
# Power spectrum
pgram = pg.Periodogram(x=t, y=lc, yerr=lcErr,
minP=0.1,
maxP=1000,
normalization='psd',
method='scargle',
samples_per_peak=10,
false=0.001,
nbins=10,
covers=3,
mode=False)
frequency, power, false_alarm, best_freq, fap, sig_level, all_freq = pgram.LSG()
ms = 3
fig, ax = plt.subplots(4,1,figsize=[6,9],gridspec_kw={'wspace':0, 'hspace':0.4})
ax[0].set_title(f'ID: {star}')
ax[0].errorbar(x=t,y=lc,yerr=lcErr,
capsize=2,elinewidth=0.8,fmt='.',
mec='k',mfc='k',ms=ms,ecolor='r')
ax[0].invert_yaxis()
ax[0].set_xlabel('MJD [days]')
ax[0].set_ylabel('Ks [mag]')
ax[1].hlines(sig_level, xmin=frequency.max(), xmax=frequency.min(), ls='dashed', lw=.8, color='r')
ax[1].text(s='FAP level 0.1%', x=(0.75/pmin), y=sig_level + sig_level*.02, color='r')
ax[1].plot(frequency,power,'k-',lw=.5)
ax[1].set_xlim(1./pmax, 1./pmin)
ax[1].set_xlabel('frequency [$day^{-1}$]')
ax[1].set_ylabel('LSG power')
#plot phased lc with P:
period = 1./freq
if fitdat1 != 0:
# centrzlize the phased light curve:
phase_shift = self._center_curve(fitmags1,phase1,mags1,errs1,anchor=0.5)
ax[2].plot(phase_shift ,mags1,'.k',ms=ms)
ax[2].plot(phase_shift+1,mags1,'.k',ms=ms)
if plotfit:
ax[2].plot(phase_shift ,fitmags1,'r.',ms=1)
ax[2].plot(phase_shift+1,fitmags1,'r.',ms=1)
else:
ax[2].plot(phase1,mags1 ,'.k',ms=ms)
ax[2].plot(phase1+1,mags1,'.k',ms=ms)
ax[2].invert_yaxis()
ax[2].set_xlabel(f'Phase [Period: {round(period,5)} days]')
ax[2].set_ylabel('Ks [mag]')
#plot phased lc with 2P:
period = 2./best_freq
if fitdat2 != 0:
# centrzlize the phased light curve:
phase_shift = self._center_curve(fitmags2,phase2,mags2,errs2,anchor=0.25)
ax[3].plot(phase_shift,mags2,'.k',ms=ms)
if plotfit:
ax[3].plot(phase_shift,fitmags2,'r.',ms=1)
else:
ax[3].plot(phase2,mags2,'.k',ms=ms)
ax[3].invert_yaxis()
ax[3].set_xlabel(f'Phase [Period: {round(period,5)} days]')
ax[3].set_ylabel('Ks [mag]')
os.makedirs(fig_path,exist_ok=True)
plt.savefig(f'{fig_path}/LSG_{star}.png',dpi=100,pad_inches=0.02)
if show:
plt.show()
plt.close()
def _read_periodogram_outputs(self,chip):
self.lsg_params = pd.read_csv(f'{self.path}/var_data/{chip}_lsg_parameters.csv',sep=',',index_col='ID')
self.pdm_params = pd.read_csv(f'{self.path}/var_data/{chip}_pdm_parameters.csv',sep=',',index_col='ID')
def _RRLyr_subtype(self,freq,RRab_pmin,RRab_pmax):
'amplitude vs period plot'
if ((1./freq > RRab_pmin) and (1./freq < RRab_pmax)):
starsubtype = 'RRab'
else:
starsubtype = 'RRc'
return starsubtype
def _stars_already_plotted(self):
stars_done = ([_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr')) if _[4:-4] != ''] +
[_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/ECL')) if _[4:-4] != ''] +
[_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/IDK')) if _[4:-4] != ''])
return stars_done
def _curve_fit_switch(self,t,lc,lcErr,freq,order,phaseSpace,fitFreq,iterative,fitastrobase):
fit = fitSin.FitSinSeries(phaseSpace=phaseSpace,fitFreq=fitFreq)
if fitastrobase:
fitdat = fit.fit_sinusoid_astrobase(t,lc,lcErr,freq,order=order)
else:
if iterative:
fitdat = fit.fit_sinusoid_iterative(t,lc,lcErr,freq,order=order)
else:
fitdat = fit.fit_sinusoid_N_order(t,lc,lcErr,freq,order=order)
return fitdat
def do_periodogram_plots(self,pmin,pmax,fpath,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It does the first plot and automated classification for ECL by
fitting two sinusoid series to each light curve and comparing the depth of
the two valleys to standar deviation of the resifuals.
If the depths difference is great than residual deviation it
probabelly is a assimetrical ECL.
'''
print(f'Removing seasonal aliases from tile {self.tile}...')
aliasfree_id, _ = self.failure_modes(freq=1.0, sigfactor=1, plot=True)
ecl_fitparams = []
unc_P_fitparams = []
unc_2P_fitparams = []
output_data = []
i = 1
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
candidates = [star for star in self.lsg_params.index if star in aliasfree_id]
#stars_done = self._stars_already_plotted()
#candidates = [_ for _ in candidates if _ not in stars_done]
j=1
for star in candidates:
status._print(prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(candidates),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
#period select
if ((1./best_freq > pmin) and (1./best_freq < pmax)):
# test for ECLs
c = sct.StarClassificator()
startype = c.ECL_classificator(t, lc, lcErr, best_freq, phaseSpace=True)
# ECL star type
if startype == 'ECL':
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
ecl_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
figpath = f'{fpath}/{startype}'
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if R2 > 0.6:
figpath = f'{fpath}/{startype}_bonafide'
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
# Unclassified for visual inspection:
else:
''' do two plots, one with LSG P and oter with 2P'''
# with P
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
figpath = f'{fpath}/{startype}'
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if R2 > 0.6:
figpath = f'{fpath}/{startype}_bonafide'
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
# with 2P
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_2P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
j+=1
i+=1
ecl_fitparams = pd.DataFrame(ecl_fitparams)
ecl_fitparams.set_index('ID',inplace=True)
ecl_fitparams.to_csv(f'{fpath}/{self.tile}_ecl_fit_parameters.csv',sep=',')
unc_P_fitparams = pd.DataFrame(unc_P_fitparams)
unc_P_fitparams.set_index('ID',inplace=True)
unc_P_fitparams.to_csv(f'{fpath}/{self.tile}_unc_P_parameters.csv',sep=',')
unc_2P_fitparams = pd.DataFrame(unc_2P_fitparams)
unc_2P_fitparams.set_index('ID',inplace=True)
unc_2P_fitparams.to_csv(f'{fpath}/{self.tile}_unc_2P_parameters.csv',sep=',')
def do_periodogram_replots(self,pmin,pmax,fpath,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It does the first plot and automated classification for ECL by
fitting two sinusoid series to each light curve and comparing the depth of
the two valleys to standar deviation of the resifuals.
If the depths difference is great than residual deviation it
probabelly is a assimetrical ECL.
'''
print(f'Removing seasonal aliases from tile {self.tile}...')
_, aliasfree_id = self.failure_modes(freq=1.0, sigfactor=1, plot=True)
ecl_fitparams = []
unc_P_fitparams = []
unc_2P_fitparams = []
output_data = []
i = 1
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
candidates = [star for star in self.lsg_params.index if star in aliasfree_id]
stars_done = self._stars_already_plotted()
candidates = [_ for _ in candidates if _ not in stars_done]
j=1
for star in candidates:
status._print(prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(candidates),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
if ((amplitude > 0.2) and (amplitude < 0.5)):
#period select
if ((1./best_freq > 0.4) and (1./best_freq < 0.6)):
# test for ECLs
c = sct.StarClassificator()
startype = c.ECL_classificator(t, lc, lcErr, best_freq, phaseSpace=True)
# ECL star type
if startype == 'ECL':
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
if fitdat1 !=0:
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
ecl_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
figpath = f'{fpath}/{startype}_remaining'
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if R2 > 0.6:
figpath = f'{fpath}/{startype}_bonafide_remaining'
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
# Unclassified for visual inspection:
else:
''' do two plots, one with LSG P and oter with 2P'''
# with P
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
if fitdat1 !=0:
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
figpath = f'{fpath}/{startype}_remaining'
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if R2 > 0.6:
figpath = f'{fpath}/{startype}_bonafide_remaining'
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
# with 2P
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
if fitdat1 !=0:
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_2P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
j+=1
i+=1
ecl_fitparams = pd.DataFrame(ecl_fitparams)
ecl_fitparams.set_index('ID',inplace=True)
ecl_fitparams.to_csv(f'{fpath}/{self.tile}_ecl_fit_parameters_.csv',sep=',')
unc_P_fitparams = pd.DataFrame(unc_P_fitparams)
unc_P_fitparams.set_index('ID',inplace=True)
unc_P_fitparams.to_csv(f'{fpath}/{self.tile}_unc_P_parameters_.csv',sep=',')
unc_2P_fitparams = pd.DataFrame(unc_2P_fitparams)
unc_2P_fitparams.set_index('ID',inplace=True)
unc_2P_fitparams.to_csv(f'{fpath}/{self.tile}_unc_2P_parameters_.csv',sep=',')
def do_replot(self,pmin,pmax,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It replots periodogram plots after first visual inspection.
You must perform visual inspection on previous plots, searching
for RRLyr in UNC/P folder. You may also repeat visual inspection in
folder UNC/2P to check and search for new ECL.
'''
i = 1
dirpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection'
subfolders = [_ for _ in sorted(os.listdir(dirpath)) if os.path.isdir(os.path.join(dirpath, _)) ] #['RRLyr','ECL','IDK']
ECL_params = []
RRLyr_params = []
UNC_params = []
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
for folder in subfolders:
star_list = [_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')) if _.endswith('png') and _[4:-24] == chip]
stars2plot = star_list#[_ for _ in self.star_ids if _ in star_list and _ in self.lsg_params.index]
j = 1
for star in stars2plot:
status._print( prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(stars2plot),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
if folder[:3] == 'ECL':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
if folder == 'ECL':
ECL_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
elif folder == 'RRLyr':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
RRLyr_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
else: # folder == 'IDK':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
meanRESS = np.sum(res**2)/(len(res)-1)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,meanRESS,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
if folder == 'IDK':
UNC_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
j+=1
i+=1
ECL_params = pd.DataFrame(ECL_params)
ECL_params.set_index('ID',inplace=True)
ECL_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_ecl_parameters.csv',sep=',')
RRLyr_params = pd.DataFrame(RRLyr_params)
RRLyr_params.set_index('ID',inplace=True)
RRLyr_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_parameters.csv',sep=',')
UNC_params = pd.DataFrame(UNC_params)
UNC_params.set_index('ID',inplace=True)
UNC_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_unc_parameters.csv',sep=',')
def do_final_periodogram_plots(self,pmin,pmax,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It replots periodogram plots after first visual inspection.
You must perform visual inspection on previous plots, searching
for RRLyr in UNC/P folder. You may also repeat visual inspection in
folder UNC/2P to check and search for new ECL.
'''
i = 1
folders = ['RRLyr','ECL','IDK']
ECL_params = []
RRLyr_params = []
UNC_params = []
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
for folder in folders:
star_list = [_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')) if _[4:-4] != '']
stars2plot = [_ for _ in self.star_ids if _ in star_list and _ in self.lsg_params.index]
j = 1
for star in stars2plot:
status._print( prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(stars2plot),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
if folder == 'ECL':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
ECL_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,figpath,plotfit=True,show=False)
if folder == 'RRLyr':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
RRLyr_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,figpath,plotfit=True,show=False)
if folder == 'IDK':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
UNC_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,figpath,plotfit=True,show=False)
j+=1
i+=1
ECL_params = pd.DataFrame(ECL_params)
ECL_params.set_index('ID',inplace=True)
ECL_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_ecl_parameters.csv',sep=',')
RRLyr_params = pd.DataFrame(RRLyr_params)
RRLyr_params.set_index('ID',inplace=True)
RRLyr_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_parameters.csv',sep=',')
UNC_params = pd.DataFrame(UNC_params)
UNC_params.set_index('ID',inplace=True)
UNC_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_unc_parameters.csv',sep=',')
def plot_remaining(self,pmin,pmax,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It does the first plot and automated classification for ECL by
fitting two sinusoid series to each light curve and comparing the depth of
the two valleys to standar deviation of the resifuals.
If the depths difference is great than residual deviation it
probabelly is a assimetrical ECL.
'''
print(f'Removing seasonal aliases from tile {self.tile}...')
#aliasfree_id, _ = self.failure_modes(freq=1.0, sigfactor=1, plot=True)
_, aliasfree_id = self.failure_modes(freq=1.0, sigfactor=1, plot=True)
stars_done = self._stars_already_plotted()
ecl_fitparams = []
unc_P_fitparams = []
unc_2P_fitparams = []
output_data = []
i = 1
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
candidates = [star for star in self.lsg_params.index if star in aliasfree_id]
candidates = [_ for _ in candidates if _ not in stars_done]
j=1
for star in candidates:
status._print(prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(candidates),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
#amplitude selection
if ((amplitude > 0.2) and (amplitude < 0.5)):
#period select
if ((1./best_freq > 0.4) and (1./best_freq < 0.5)):
# test for ECLs
c = sct.StarClassificator()
startype = c.ECL_classificator(t, lc, lcErr, best_freq, phaseSpace=True)
''' renaming old names'''
# ECL star type
if startype == 'ECL':
doubleP = True
figpath = f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{startype}'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
fit_parameters = [star,ra,dec,best_freq,amplitude] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
ecl_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
# Unclassified for visual inspection:
else:
''' do two plots, one with LSG P and oter with 2P'''
# with P
doubleP = False
figpath = f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{startype}/P'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
fit_parameters = [star,ra,dec,best_freq,amplitude] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
# with 2P
doubleP = True
figpath = f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{startype}/2P'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
fit_parameters = [star,ra,dec,best_freq/2.,amplitude] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
unc_2P_fitparams.append(pd.Series(fit_parameters,index=fit_params_cols))
j+=1
i+=1
ecl_fitparams = pd.DataFrame(ecl_fitparams)
ecl_fitparams.set_index('ID',inplace=True)
ecl_fitparams.to_csv(f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{self.tile}_ecl_fit_parameters.csv',sep=',')
unc_P_fitparams = pd.DataFrame(unc_P_fitparams)
unc_P_fitparams.set_index('ID',inplace=True)
unc_P_fitparams.to_csv(f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{self.tile}_unc_P_parameters.csv',sep=',')
unc_2P_fitparams = pd.DataFrame(unc_2P_fitparams)
unc_2P_fitparams.set_index('ID',inplace=True)
unc_2P_fitparams.to_csv(f'{self.path}/lc_plots/short_period/pre_visual_inspection/remaining2/{self.tile}_unc_2P_parameters.csv',sep=',')
def do_bonafide_plots(self,pmin,pmax,phaseSpace,fitFreq,order,iterative,fitastrobase):
'''
It replots periodogram plots after first visual inspection.
You must perform visual inspection on previous plots, searching
for RRLyr in UNC/P folder. You may also repeat visual inspection in
folder UNC/2P to check and search for new ECL.
'''
i = 1
folders = ['RRLyr','ECL_bonafide','UNC_bonafide']
ECL_params = []
RRLyr_params = []
UNC_params = []
for chip in self.chips:
self._read_tables(chip)
self._read_periodogram_outputs(chip)
for folder in folders:
star_list = [_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')) if _[4:-4] != '']
stars2plot = [_ for _ in self.star_ids if _ in star_list and _ in self.lsg_params.index]
j = 1
for star in stars2plot:
status._print( prefix=f'Plotting chip {chip}',
iter1=i,
length1=len(self.chips),
iter2=j,
length2=len(stars2plot),
sufix='%')
t = self.obs_time
lc = self.ks_mag.loc[star].values
lcErr = self.ks_err.loc[star].values
ra = self.data_table.RA.loc[star]
dec = self.data_table.DEC.loc[star]
best_freq = self.lsg_params['best_freq'].loc[star]
amplitude = self.lsg_params['amplitude'].loc[star]
# ZYJHKs psf mag for color classification
color_cols = [col for col in self.data_table.columns if col.split('_')[0] == 'mag' or col.split('_')[0] == 'er']
color_vals = [self.data_table.loc[star][col] for col in color_cols]
if folder == 'ECL_bonafide':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}2'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
ECL_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if folder == 'RRLyr':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}2'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
RRLyr_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
if folder == 'UNC_bonafide':
figpath = f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}2'
fitdat1 = self._curve_fit_switch(t,lc,lcErr,best_freq/2.,order,phaseSpace,fitFreq,iterative,fitastrobase)
fitparams = fitdat1['fitparams']
R2 = fitdat1['fitinfo']['R2']
chi2 = fitdat1['fitinfo']['Chi2']
res = fitdat1['magseries']['residuals']
errs = fitdat1['magseries']['errs']
RESS = np.sum(res**2)
red_chi2 = np.sum(res**2 / errs**2)/(len(res)-1)
res_std = np.std(res)
fit_parameters = [star,ra,dec,best_freq,amplitude,R2,chi2,RESS,res_std,red_chi2] + color_vals
fit_params_cols = ['ID','RA','DEC','Freq','Amplitude','R2','Chi2','residual_sum','residual_std','red_chi2'] + color_cols
n=0
k=1
while n < len(fitparams[0]):
fitpar = fitdat1['fitparams']
fitparerrs = fitdat1['fitparamserrs']
fit_parameters += [fitpar[0][n],fitparerrs[0][n],fitpar[1][n],fitparerrs[1][n]]
fit_params_cols += [f'A{k}',f'AERR{k}',f'PHI{k}',f'PHIERR{k}']
n+=1
k+=1
UNC_params.append(pd.Series(fit_parameters,index=fit_params_cols))
fitargs = [t,lc,lcErr,best_freq,order,phaseSpace,fitFreq,iterative,fitastrobase]
self.periodogram_plot(star,fitargs,pmin,pmax,figpath,plotfit=True,show=False)
j+=1
i+=1
ECL_params = pd.DataFrame(ECL_params)
ECL_params.set_index('ID',inplace=True)
ECL_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_ecl_bona_parameters.csv',sep=',')
RRLyr_params = pd.DataFrame(RRLyr_params)
RRLyr_params.set_index('ID',inplace=True)
RRLyr_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_bona_parameters.csv',sep=',')
UNC_params = pd.DataFrame(UNC_params)
UNC_params.set_index('ID',inplace=True)
UNC_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_unc_bona_parameters.csv',sep=',')
def merge_parameter_files(self):
RRLyr_params = pd.read_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_bona_parameters.csv',sep=',',index_col='ID')
UNC_params_remaining = pd.read_csv(f'{self.path}/lc_plots/short_period/pre_visual_inspection/{self.tile}_unc_P_parameters_.csv',sep=',',index_col='ID')
rrl_ids = [_[4:-4] for _ in sorted(os.listdir(f'{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr')) if _[4:-4] != '']
for star in UNC_params_remaining.index:
if star in rrl_ids:
RRLyr_params.loc[star] = UNC_params_remaining.loc[star]
RRLyr_params.to_csv(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_bona_parameters.csv',sep=',')
if __name__ == '__main__':
#reload my libs for testing
import importlib
importlib.reload(sys.modules['clean_match_tables'])
importlib.reload(sys.modules['fit_sin_series'])
importlib.reload(sys.modules['periodogram'])
importlib.reload(sys.modules['variability_indicator'])
importlib.reload(sys.modules['star_classificator_tools'])
path = '/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts'
tiles = sorted(os.listdir('data/psf_ts/'))
for tile in tiles:#[5:6]:
#tile = 'b293'
# PERIOD SEARCHs
p_search = PeriodSearch(path=path, tile=tile, minP=0.1, maxP=1000 , varIndex='chi2')
#p_search.organize_tables()
#p_search.select_candidates()
#p_search.do_periodogram(exists=True)
#p_search.do_periodogram_fix()
#p_search.failure_modes(freq=1.0, sigfactor=1, plot=True)
#figurepath = f'{path}/{tile}/lc_plots/short_period/pre_visual_inspection'
#figurepath = f'{path}/{tile}/lc_plots/long_period/pre_visual_inspection'
#pmin = 0.1
#pmax = 1.0
#p_search.do_periodogram_replots(pmin=pmin,pmax=pmax,fpath=figurepath,phaseSpace=True,fitFreq=True,order=5,iterative=False,fitastrobase=False)
#p_search.do_replot(pmin=0.1,pmax=1.0,phaseSpace=True,fitFreq=True,order=5,iterative=False,fitastrobase=False)
#p_search.plot_remaining(pmin=0.1,pmax=1000,phaseSpace=True,fitFreq=True,order=5,iterative=False,fitastrobase=False)
#p_search.do_final_periodogram_plots(pmin=0.1,pmax=1.0,phaseSpace=True,fitFreq=True,order=5,iterative=False,fitastrobase=False)
#p_search.do_bonafide_plots(pmin=0.1,pmax=1.0,phaseSpace=True,fitFreq=True,order=5,iterative=False,fitastrobase=False)
p_search.merge_parameter_files() | [
"matplotlib.pyplot.title",
"numpy.nanpercentile",
"numpy.sum",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"periodogram.Periodogram",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"sys.path.append",
"pandas.DataFrame",
"variability_indicator.Variability",
"num... | [((816, 886), 'sys.path.append', 'sys.path.append', (['"""/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/"""'], {}), "('/home/botan/OneDrive/Doutorado/VVV_DATA/my_modules/')\n", (831, 886), False, 'import sys\n'), ((70288, 70339), 'importlib.reload', 'importlib.reload', (["sys.modules['clean_match_tables']"], {}), "(sys.modules['clean_match_tables'])\n", (70304, 70339), False, 'import importlib\n'), ((70344, 70391), 'importlib.reload', 'importlib.reload', (["sys.modules['fit_sin_series']"], {}), "(sys.modules['fit_sin_series'])\n", (70360, 70391), False, 'import importlib\n'), ((70396, 70440), 'importlib.reload', 'importlib.reload', (["sys.modules['periodogram']"], {}), "(sys.modules['periodogram'])\n", (70412, 70440), False, 'import importlib\n'), ((70445, 70499), 'importlib.reload', 'importlib.reload', (["sys.modules['variability_indicator']"], {}), "(sys.modules['variability_indicator'])\n", (70461, 70499), False, 'import importlib\n'), ((70504, 70561), 'importlib.reload', 'importlib.reload', (["sys.modules['star_classificator_tools']"], {}), "(sys.modules['star_classificator_tools'])\n", (70520, 70561), False, 'import importlib\n'), ((1543, 1593), 'os.makedirs', 'os.makedirs', (['f"""{self.path}/figures"""'], {'exist_ok': '(True)'}), "(f'{self.path}/figures', exist_ok=True)\n", (1554, 1593), False, 'import os\n'), ((1601, 1650), 'os.makedirs', 'os.makedirs', (['f"""{self.path}/output"""'], {'exist_ok': '(True)'}), "(f'{self.path}/output', exist_ok=True)\n", (1612, 1650), False, 'import os\n'), ((1897, 1975), 'clean_match_tables.CreateTable', 'cmt.CreateTable', ([], {'path': 'self.path', 'tile': 'self.tile', 'min_sample': '(25)', 'raw_files': '(True)'}), '(path=self.path, tile=self.tile, min_sample=25, raw_files=True)\n', (1912, 1975), True, 'import clean_match_tables as cmt\n'), ((2154, 2280), 'variability_indicator.Variability', 'vi.Variability', ([], {'path': 'self.path', 'tile': 'self.tile', 'method': 'self.varIndex', 'maxMag': '(11.5)', 'stdRatio': '(1.5)', 'minChi2': '(2)', 'savePlot': '(True)'}), '(path=self.path, tile=self.tile, method=self.varIndex, maxMag\n =11.5, stdRatio=1.5, minChi2=2, savePlot=True)\n', (2168, 2280), True, 'import variability_indicator as vi\n'), ((2560, 2647), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/chips/{chip}.ts"""'], {'index_col': '(0)', 'sep': '""","""', 'low_memory': '(False)'}), "(f'{self.path}/chips/{chip}.ts', index_col=0, sep=',',\n low_memory=False)\n", (2571, 2647), True, 'import pandas as pd\n'), ((2668, 2711), 'numpy.loadtxt', 'np.loadtxt', (['f"""{self.path}/chips/{chip}.mjd"""'], {}), "(f'{self.path}/chips/{chip}.mjd')\n", (2678, 2711), True, 'import numpy as np\n'), ((8831, 8854), 'star_classificator_tools.StarClassificator', 'sct.StarClassificator', ([], {}), '()\n', (8852, 8854), True, 'import star_classificator_tools as sct\n'), ((9298, 9321), 'star_classificator_tools.StarClassificator', 'sct.StarClassificator', ([], {}), '()\n', (9319, 9321), True, 'import star_classificator_tools as sct\n'), ((9748, 9797), 'numpy.median', 'np.median', (['x[(x > xpos - sig) & (x < xpos + sig)]'], {}), '(x[(x > xpos - sig) & (x < xpos + sig)])\n', (9757, 9797), True, 'import numpy as np\n'), ((9815, 9861), 'numpy.std', 'np.std', (['x[(x > mean - sig) & (x < mean + sig)]'], {}), '(x[(x > mean - sig) & (x < mean + sig)])\n', (9821, 9861), True, 'import numpy as np\n'), ((10203, 10230), 'pandas.concat', 'pd.concat', (['all_chips_params'], {}), '(all_chips_params)\n', (10212, 10230), True, 'import pandas as pd\n'), ((10876, 10899), 'numpy.concatenate', 'np.concatenate', (['bad_ids'], {}), '(bad_ids)\n', (10890, 10899), True, 'import numpy as np\n'), ((17339, 17430), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/var_data/{chip}_lsg_parameters.csv"""'], {'sep': '""","""', 'index_col': '"""ID"""'}), "(f'{self.path}/var_data/{chip}_lsg_parameters.csv', sep=',',\n index_col='ID')\n", (17350, 17430), True, 'import pandas as pd\n'), ((17451, 17542), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/var_data/{chip}_pdm_parameters.csv"""'], {'sep': '""","""', 'index_col': '"""ID"""'}), "(f'{self.path}/var_data/{chip}_pdm_parameters.csv', sep=',',\n index_col='ID')\n", (17462, 17542), True, 'import pandas as pd\n'), ((18406, 18465), 'fit_sin_series.FitSinSeries', 'fitSin.FitSinSeries', ([], {'phaseSpace': 'phaseSpace', 'fitFreq': 'fitFreq'}), '(phaseSpace=phaseSpace, fitFreq=fitFreq)\n', (18425, 18465), True, 'import fit_sin_series as fitSin\n'), ((26756, 26783), 'pandas.DataFrame', 'pd.DataFrame', (['ecl_fitparams'], {}), '(ecl_fitparams)\n', (26768, 26783), True, 'import pandas as pd\n'), ((26954, 26983), 'pandas.DataFrame', 'pd.DataFrame', (['unc_P_fitparams'], {}), '(unc_P_fitparams)\n', (26966, 26983), True, 'import pandas as pd\n'), ((27157, 27187), 'pandas.DataFrame', 'pd.DataFrame', (['unc_2P_fitparams'], {}), '(unc_2P_fitparams)\n', (27169, 27187), True, 'import pandas as pd\n'), ((36135, 36162), 'pandas.DataFrame', 'pd.DataFrame', (['ecl_fitparams'], {}), '(ecl_fitparams)\n', (36147, 36162), True, 'import pandas as pd\n'), ((36334, 36363), 'pandas.DataFrame', 'pd.DataFrame', (['unc_P_fitparams'], {}), '(unc_P_fitparams)\n', (36346, 36363), True, 'import pandas as pd\n'), ((36538, 36568), 'pandas.DataFrame', 'pd.DataFrame', (['unc_2P_fitparams'], {}), '(unc_2P_fitparams)\n', (36550, 36568), True, 'import pandas as pd\n'), ((44387, 44411), 'pandas.DataFrame', 'pd.DataFrame', (['ECL_params'], {}), '(ECL_params)\n', (44399, 44411), True, 'import pandas as pd\n'), ((44617, 44643), 'pandas.DataFrame', 'pd.DataFrame', (['RRLyr_params'], {}), '(RRLyr_params)\n', (44629, 44643), True, 'import pandas as pd\n'), ((44853, 44877), 'pandas.DataFrame', 'pd.DataFrame', (['UNC_params'], {}), '(UNC_params)\n', (44865, 44877), True, 'import pandas as pd\n'), ((52516, 52540), 'pandas.DataFrame', 'pd.DataFrame', (['ECL_params'], {}), '(ECL_params)\n', (52528, 52540), True, 'import pandas as pd\n'), ((52746, 52772), 'pandas.DataFrame', 'pd.DataFrame', (['RRLyr_params'], {}), '(RRLyr_params)\n', (52758, 52772), True, 'import pandas as pd\n'), ((52982, 53006), 'pandas.DataFrame', 'pd.DataFrame', (['UNC_params'], {}), '(UNC_params)\n', (52994, 53006), True, 'import pandas as pd\n'), ((60443, 60470), 'pandas.DataFrame', 'pd.DataFrame', (['ecl_fitparams'], {}), '(ecl_fitparams)\n', (60455, 60470), True, 'import pandas as pd\n'), ((60700, 60729), 'pandas.DataFrame', 'pd.DataFrame', (['unc_P_fitparams'], {}), '(unc_P_fitparams)\n', (60712, 60729), True, 'import pandas as pd\n'), ((60962, 60992), 'pandas.DataFrame', 'pd.DataFrame', (['unc_2P_fitparams'], {}), '(unc_2P_fitparams)\n', (60974, 60992), True, 'import pandas as pd\n'), ((68742, 68766), 'pandas.DataFrame', 'pd.DataFrame', (['ECL_params'], {}), '(ECL_params)\n', (68754, 68766), True, 'import pandas as pd\n'), ((68977, 69003), 'pandas.DataFrame', 'pd.DataFrame', (['RRLyr_params'], {}), '(RRLyr_params)\n', (68989, 69003), True, 'import pandas as pd\n'), ((69218, 69242), 'pandas.DataFrame', 'pd.DataFrame', (['UNC_params'], {}), '(UNC_params)\n', (69230, 69242), True, 'import pandas as pd\n'), ((69483, 69627), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_bona_parameters.csv"""'], {'sep': '""","""', 'index_col': '"""ID"""'}), "(\n f'{self.path}/lc_plots/short_period/pos_visual_inspection/{self.tile}_rrlyr_bona_parameters.csv'\n , sep=',', index_col='ID')\n", (69494, 69627), True, 'import pandas as pd\n'), ((69647, 69787), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/lc_plots/short_period/pre_visual_inspection/{self.tile}_unc_P_parameters_.csv"""'], {'sep': '""","""', 'index_col': '"""ID"""'}), "(\n f'{self.path}/lc_plots/short_period/pre_visual_inspection/{self.tile}_unc_P_parameters_.csv'\n , sep=',', index_col='ID')\n", (69658, 69787), True, 'import pandas as pd\n'), ((70652, 70678), 'os.listdir', 'os.listdir', (['"""data/psf_ts/"""'], {}), "('data/psf_ts/')\n", (70662, 70678), False, 'import os\n'), ((1782, 1848), 'os.listdir', 'os.listdir', (['"""/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/"""'], {}), "('/home/botan/OneDrive/Doutorado/VVV_DATA/data/psf_ts/')\n", (1792, 1848), False, 'import os\n'), ((3237, 3302), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""{self.path}/var_data/{chip}.chi2cand"""'], {'dtype': 'str'}), "(f'{self.path}/var_data/{chip}.chi2cand', dtype=str)\n", (3250, 3302), True, 'import numpy as np\n'), ((3368, 3432), 'numpy.genfromtxt', 'np.genfromtxt', (['f"""{self.path}/var_data/{chip}.stdcand"""'], {'dtype': 'str'}), "(f'{self.path}/var_data/{chip}.stdcand', dtype=str)\n", (3381, 3432), True, 'import numpy as np\n'), ((10042, 10133), 'pandas.read_csv', 'pd.read_csv', (['f"""{self.path}/var_data/{chip}_lsg_parameters.csv"""'], {'sep': '""","""', 'index_col': '"""ID"""'}), "(f'{self.path}/var_data/{chip}_lsg_parameters.csv', sep=',',\n index_col='ID')\n", (10053, 10133), True, 'import pandas as pd\n'), ((11521, 11568), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[6, 3]', 'tight_layout': '(True)'}), '(figsize=[6, 3], tight_layout=True)\n', (11533, 11568), True, 'import matplotlib.pyplot as plt\n'), ((12029, 12047), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12045, 12047), True, 'import matplotlib.pyplot as plt\n'), ((12060, 12118), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.path}/var_data/ampxfreq.png"""'], {'dpi': '(300)'}), "(f'{self.path}/var_data/ampxfreq.png', dpi=300)\n", (12071, 12118), True, 'import matplotlib.pyplot as plt\n'), ((12154, 12165), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12163, 12165), True, 'import matplotlib.pyplot as plt\n'), ((12179, 12205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[7, 4]'}), '(figsize=[7, 4])\n', (12189, 12205), True, 'import matplotlib.pyplot as plt\n'), ((12217, 12249), 'matplotlib.pyplot.bar', 'plt.bar', (['xbar', 'height'], {'width': '(0.9)'}), '(xbar, height, width=0.9)\n', (12224, 12249), True, 'import matplotlib.pyplot as plt\n'), ((12260, 12296), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [$day^{-1}$]"""'], {}), "('Frequency [$day^{-1}$]')\n", (12270, 12296), True, 'import matplotlib.pyplot as plt\n'), ((12309, 12329), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (12319, 12329), True, 'import matplotlib.pyplot as plt\n'), ((12342, 12391), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of stars by frequency aliases"""'], {}), "('Number of stars by frequency aliases')\n", (12351, 12391), True, 'import matplotlib.pyplot as plt\n'), ((12404, 12422), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12420, 12422), True, 'import matplotlib.pyplot as plt\n'), ((12435, 12500), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.path}/var_data/barplot_aliases.png"""'], {'dpi': '(300)'}), "(f'{self.path}/var_data/barplot_aliases.png', dpi=300)\n", (12446, 12500), True, 'import matplotlib.pyplot as plt\n'), ((12536, 12547), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12545, 12547), True, 'import matplotlib.pyplot as plt\n'), ((14235, 14407), 'periodogram.Periodogram', 'pg.Periodogram', ([], {'x': 't', 'y': 'lc', 'yerr': 'lcErr', 'minP': '(0.1)', 'maxP': '(1000)', 'normalization': '"""psd"""', 'method': '"""scargle"""', 'samples_per_peak': '(10)', 'false': '(0.001)', 'nbins': '(10)', 'covers': '(3)', 'mode': '(False)'}), "(x=t, y=lc, yerr=lcErr, minP=0.1, maxP=1000, normalization=\n 'psd', method='scargle', samples_per_peak=10, false=0.001, nbins=10,\n covers=3, mode=False)\n", (14249, 14407), True, 'import periodogram as pg\n'), ((14858, 14934), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '[6, 9]', 'gridspec_kw': "{'wspace': 0, 'hspace': 0.4}"}), "(4, 1, figsize=[6, 9], gridspec_kw={'wspace': 0, 'hspace': 0.4})\n", (14870, 14934), True, 'import matplotlib.pyplot as plt\n'), ((17070, 17106), 'os.makedirs', 'os.makedirs', (['fig_path'], {'exist_ok': '(True)'}), '(fig_path, exist_ok=True)\n', (17081, 17106), False, 'import os\n'), ((17118, 17185), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{fig_path}/LSG_{star}.png"""'], {'dpi': '(100)', 'pad_inches': '(0.02)'}), "(f'{fig_path}/LSG_{star}.png', dpi=100, pad_inches=0.02)\n", (17129, 17185), True, 'import matplotlib.pyplot as plt\n'), ((17244, 17255), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17253, 17255), True, 'import matplotlib.pyplot as plt\n'), ((8236, 8284), 'pandas.DataFrame', 'pd.DataFrame', (['lsg_pgram_params'], {'columns': 'colnames'}), '(lsg_pgram_params, columns=colnames)\n', (8248, 8284), True, 'import pandas as pd\n'), ((8479, 8527), 'pandas.DataFrame', 'pd.DataFrame', (['pdm_pgram_params'], {'columns': 'colnames'}), '(pdm_pgram_params, columns=colnames)\n', (8491, 8527), True, 'import pandas as pd\n'), ((17221, 17231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17229, 17231), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1730), 'os.listdir', 'os.listdir', (['f"""{self.path}/chips/"""'], {}), "(f'{self.path}/chips/')\n", (1707, 1730), False, 'import os\n'), ((4919, 5080), 'periodogram.Periodogram', 'pg.Periodogram', (['t', 'lc', 'err', 'self.minP', 'self.maxP'], {'normalization': '"""psd"""', 'method': '"""scargle"""', 'samples_per_peak': '(10)', 'false': '(0.001)', 'nbins': '(10)', 'covers': '(3)', 'mode': '(False)'}), "(t, lc, err, self.minP, self.maxP, normalization='psd',\n method='scargle', samples_per_peak=10, false=0.001, nbins=10, covers=3,\n mode=False)\n", (4933, 5080), True, 'import periodogram as pg\n'), ((21069, 21092), 'star_classificator_tools.StarClassificator', 'sct.StarClassificator', ([], {}), '()\n', (21090, 21092), True, 'import star_classificator_tools as sct\n'), ((37226, 37245), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (37236, 37245), False, 'import os\n'), ((37264, 37288), 'os.path.join', 'os.path.join', (['dirpath', '_'], {}), '(dirpath, _)\n', (37276, 37288), False, 'import os\n'), ((69819, 69895), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr')\n", (69829, 69895), False, 'import os\n'), ((4068, 4104), 'os.listdir', 'os.listdir', (['f"""{self.path}/var_data/"""'], {}), "(f'{self.path}/var_data/')\n", (4078, 4104), False, 'import os\n'), ((18172, 18246), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/IDK"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/IDK')\n", (18182, 18246), False, 'import os\n'), ((29668, 29691), 'star_classificator_tools.StarClassificator', 'sct.StarClassificator', ([], {}), '()\n', (29689, 29691), True, 'import star_classificator_tools as sct\n'), ((47692, 47708), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (47698, 47708), True, 'import numpy as np\n'), ((47815, 47826), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (47821, 47826), True, 'import numpy as np\n'), ((49477, 49493), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (49483, 49493), True, 'import numpy as np\n'), ((49600, 49611), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (49606, 49611), True, 'import numpy as np\n'), ((51266, 51282), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (51272, 51282), True, 'import numpy as np\n'), ((51389, 51400), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (51395, 51400), True, 'import numpy as np\n'), ((55615, 55638), 'star_classificator_tools.StarClassificator', 'sct.StarClassificator', ([], {}), '()\n', (55636, 55638), True, 'import star_classificator_tools as sct\n'), ((63852, 63868), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (63858, 63868), True, 'import numpy as np\n'), ((63975, 63986), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (63981, 63986), True, 'import numpy as np\n'), ((65673, 65689), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (65679, 65689), True, 'import numpy as np\n'), ((65796, 65807), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (65802, 65807), True, 'import numpy as np\n'), ((67482, 67498), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (67488, 67498), True, 'import numpy as np\n'), ((67605, 67616), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (67611, 67616), True, 'import numpy as np\n'), ((17884, 17960), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/RRLyr')\n", (17894, 17960), False, 'import os\n'), ((18029, 18103), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/ECL"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/ECL')\n", (18039, 18103), False, 'import os\n'), ((21723, 21739), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (21729, 21739), True, 'import numpy as np\n'), ((21786, 21814), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (21792, 21814), True, 'import numpy as np\n'), ((22614, 22662), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (22623, 22662), True, 'import pandas as pd\n'), ((23814, 23830), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (23820, 23830), True, 'import numpy as np\n'), ((23877, 23905), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (23883, 23905), True, 'import numpy as np\n'), ((24707, 24755), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (24716, 24755), True, 'import pandas as pd\n'), ((25751, 25767), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (25757, 25767), True, 'import numpy as np\n'), ((25814, 25842), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (25820, 25842), True, 'import numpy as np\n'), ((26645, 26693), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (26654, 26693), True, 'import pandas as pd\n'), ((37598, 37677), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')\n", (37608, 37677), False, 'import os\n'), ((39532, 39548), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (39538, 39548), True, 'import numpy as np\n'), ((39595, 39623), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (39601, 39623), True, 'import numpy as np\n'), ((45800, 45879), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')\n", (45810, 45879), False, 'import os\n'), ((47742, 47770), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (47748, 47770), True, 'import numpy as np\n'), ((48632, 48680), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (48641, 48680), True, 'import pandas as pd\n'), ((49527, 49555), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (49533, 49555), True, 'import numpy as np\n'), ((50419, 50467), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (50428, 50467), True, 'import pandas as pd\n'), ((51316, 51344), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (51322, 51344), True, 'import numpy as np\n'), ((52206, 52254), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (52215, 52254), True, 'import pandas as pd\n'), ((61950, 62029), 'os.listdir', 'os.listdir', (['f"""{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}"""'], {}), "(f'{self.path}/lc_plots/short_period/pos_visual_inspection/{folder}')\n", (61960, 62029), False, 'import os\n'), ((63902, 63930), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (63908, 63930), True, 'import numpy as np\n'), ((64792, 64840), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (64801, 64840), True, 'import pandas as pd\n'), ((65723, 65751), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (65729, 65751), True, 'import numpy as np\n'), ((66615, 66663), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (66624, 66663), True, 'import pandas as pd\n'), ((67532, 67560), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (67538, 67560), True, 'import numpy as np\n'), ((68422, 68470), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (68431, 68470), True, 'import pandas as pd\n'), ((40493, 40541), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (40502, 40541), True, 'import pandas as pd\n'), ((41377, 41393), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (41383, 41393), True, 'import numpy as np\n'), ((41440, 41468), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (41446, 41468), True, 'import numpy as np\n'), ((42267, 42315), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (42276, 42315), True, 'import pandas as pd\n'), ((43131, 43147), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (43137, 43147), True, 'import numpy as np\n'), ((43194, 43222), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (43200, 43222), True, 'import numpy as np\n'), ((57035, 57083), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (57044, 57083), True, 'import pandas as pd\n'), ((58888, 58936), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (58897, 58936), True, 'import pandas as pd\n'), ((60332, 60380), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (60341, 60380), True, 'import pandas as pd\n'), ((7153, 7179), 'numpy.nanpercentile', 'np.nanpercentile', (['lc'], {'q': '(95)'}), '(lc, q=95)\n', (7169, 7179), True, 'import numpy as np\n'), ((7181, 7206), 'numpy.nanpercentile', 'np.nanpercentile', (['lc'], {'q': '(5)'}), '(lc, q=5)\n', (7197, 7206), True, 'import numpy as np\n'), ((30430, 30446), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (30436, 30446), True, 'import numpy as np\n'), ((30501, 30529), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (30507, 30529), True, 'import numpy as np\n'), ((31425, 31473), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (31434, 31473), True, 'import pandas as pd\n'), ((32813, 32829), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (32819, 32829), True, 'import numpy as np\n'), ((32884, 32912), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (32890, 32912), True, 'import numpy as np\n'), ((33810, 33858), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (33819, 33858), True, 'import pandas as pd\n'), ((35026, 35042), 'numpy.sum', 'np.sum', (['(res ** 2)'], {}), '(res ** 2)\n', (35032, 35042), True, 'import numpy as np\n'), ((35097, 35125), 'numpy.sum', 'np.sum', (['(res ** 2 / errs ** 2)'], {}), '(res ** 2 / errs ** 2)\n', (35103, 35125), True, 'import numpy as np\n'), ((36024, 36072), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (36033, 36072), True, 'import pandas as pd\n'), ((44067, 44115), 'pandas.Series', 'pd.Series', (['fit_parameters'], {'index': 'fit_params_cols'}), '(fit_parameters, index=fit_params_cols)\n', (44076, 44115), True, 'import pandas as pd\n')] |
# coding: utf-8
# In[1]:
import datetime
import glob
import hashlib
import multiprocessing as mp
import os
import queue
import random
import threading
from functools import partial
import keras.backend.tensorflow_backend as KTF
#import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,
TensorBoard)
from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge
from keras.layers.normalization import BatchNormalization
from keras.models import Model, load_model, model_from_json
from keras.optimizers import RMSprop
from keras.preprocessing import image
from keras.utils.np_utils import to_categorical
import pelops.utils as utils
from pelops.analysis import analysis
from pelops.analysis.camerautil import get_match_id, make_good_bad
from pelops.datasets.featuredataset import FeatureDataset
from pelops.datasets.veri import VeriDataset
from pelops.experiment_api.experiment import ExperimentGenerator
from pelops.utils import train_test_key_filter
# In[2]:
# In[3]:
def save_model_workaround(model, model_output_file, weights_output_file):
print('saving model to {}'.format(model_output_file))
print('saving weights to {}'.format(weights_output_file))
# serialize model to JSON
model_json = model.to_json()
with open(model_output_file, 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(weights_output_file)
def load_model_workaround(model_output_file, weights_output_file):
# load json and create model
json_file = open(model_output_file, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights_output_file)
return loaded_model
# In[4]:
def makework(workitems, chips, cam_id=None):
left = chips[0]
right = chips[1]
same_vehicle = left.car_id == right.car_id
same_type = left.misc['vehicle_type'] == right.misc['vehicle_type']
same_color = left.misc['color'] == right.misc['color']
#same_angle = cam_id(left.cam_id) == cam_id(right.cam_id)
features = [same_vehicle, same_type, same_color]
workitems.append((left.filepath, right.filepath, features))
workitems.append((right.filepath, left.filepath, features))
def make_examples(gen, examples):
workitems = []
for _ in range(examples):
cameras = gen.generate()
match_id = get_match_id(cameras)
goods, bads = make_good_bad(cameras, match_id)
makework(workitems, goods)
makework(workitems, bads)
print('made', len(workitems))
return workitems
# In[5]:
# get a GPU session and reserve memory
def get_session(gpu_fraction=0.3):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def rgb2bgr(x):
"""
given an array representation of an RGB image, change the image
into an BGR representtaion of the image
"""
return(bgr2rgb(x))
def bgr2rgb(x):
"""
given an array representation of an BGR image, change the image
into an RGB representtaion of the image
"""
y = np.zeros(x.shape)
B = x[:, :, 0]
G = x[:, :, 1]
R = x[:, :, 2]
y[:, :, 0] = R
y[:, :, 1] = G
y[:, :, 2] = B
return y
# load an image from disk
# NOTE: input assumed to be RGB
# NOTE: output is to be BGR for resnet use.
def load_image(img_path,
e_dims=False,
image_flip=0.5,
image_shift=0.20,
image_rotate_degrees=15,
image_zoom=0.15,
output_BGR=True):
"""
WARNING this funciton should only manipulation images meant for resnet50 consumption.
To make it applicable for other environments remove preprocess_input.
Do some image manipulation
image input assumed to be in RGB format
output format default is GBR unless output_BGR is set to False
e_dims = e_dims false will output (x,y,3) sized images
e_domes true will output (1,x,y,3) sized images
image_flip = probability that image will be flipped rt to left
image_shift = percent of image to randomly shift up/down & right/left
image_rotate_degrees = rotate image randomly
between [-image_rotate_degrees image_rotate_degrees]
image_zoom = randomly zoom image [1-image_zoom 1+image_zoom]
output_BGR = True -> image output will be in BGR formate RGB otherwise
"""
img = image.load_img(img_path, target_size=(224, 224))
my_img = image.img_to_array(img)
if image_flip is not None:
if image_flip > 1 or image_flip < -1:
raise ValueError('|image_flip:{0}| > 1'.format(image_flip))
image_flip = abs(image_flip)
if random.random() > image_flip:
my_img = image.flip_axis(my_img, axis=1)
if image_rotate_degrees is not None:
image_rotate_degrees = int(image_rotate_degrees)
if image_rotate_degrees > 360:
image_rotate_degrees = image_rotate_degrees % 360
my_img = image.random_rotation(my_img,
image_rotate_degrees,
row_index=0,
col_index=1,
channel_index=2)
if image_shift is not None:
if image_shift > 1 or image_shift < -1:
raise ValueError('|image_shift:{0}| > 1'.format(image_shift))
image_shift = abs(image_shift)
my_img = image.random_shift(my_img,
image_shift,
image_shift,
row_index=0,
col_index=1,
channel_index=2)
if image_zoom is not None:
if image_zoom > 1 or image_zoom < -1:
raise ValueError('|image_zoom:{0}| > 1'.format(image_zoom))
image_zoom = abs(image_zoom)
low = 1 - image_zoom
high = 1 + image_zoom
rng = [low, high]
my_img = image.random_zoom(my_img,
rng,
row_index=0,
col_index=1,
channel_index=2)
if not output_BGR:
my_img = bgr2rgb(my_img)
my_img = np.expand_dims(my_img, axis=0)
my_img = preprocess_input(my_img)
if not e_dims:
my_img = my_img.squeeze()
return my_img
# In[6]:
def plot_run_no(history, name1, name2, rnd=None):
"""
Take the output of a model.
"""
v = np.array(history[name1])
vc = np.array(history[name2])
if rnd is not None:
vr = np.zeros(vc.shape)
vr.fill(rnd)
b = np.array([v, vc, vr])
else:
b = np.array([v, vc])
c = b.transpose()
ax = plt.subplot(111)
ax.grid(True)
ax.plot(c)
if rnd is not None:
ax.legend((name1, name2, 'random'),
bbox_to_anchor=(1, -0.05),
fancybox=True, shadow=True, ncol=5)
else:
ax.legend((name1, name2),
bbox_to_anchor=(1, -0.05),
fancybox=True, shadow=True, ncol=5)
plt.show()
# In[7]:
def image_class_generator(tasking, batch_size=32, augment=False):
"""
Offload the augmentation of images, create images in batch_size chunks
augment=False -> return image augment=True -> return augmented image
"""
while True:
lefts = []
rights = []
ys = []
for task in random.sample(tasking, batch_size):
left_file = task[0]
right_file = task[1]
classes = task[2]
y = np.zeros(len(classes))
for index, c in enumerate(classes):
y[index] = 1 if c else 0
l_img = None
r_img = None
if augment:
l_img = load_image(left_file)
r_img = load_image(right_file)
else:
l_img = load_image(left_file, False, None, None, None, None)
r_img = load_image(right_file, False, None, None, None, None)
lefts.append(l_img)
rights.append(r_img)
ys.append(y)
yield ([np.array(lefts), np.array(rights)], np.array(ys))
def buffered_gen_mp(source_gen, buffer_size=2, num_processes=4):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the
# buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block
# forever.
for _ in range(num_processes):
process = mp.Process(
target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
# In[8]:
def freeze(model):
"""
Make model untrainable
"""
for layer in model.layers:
layer.trainable = False
model.trainable = False
# In[9]:
def free_model_layers(model):
"""
Make the model trainable
"""
for layer in model.layers:
try:
if layer.name == 'resnet50':
print('found resnet')
for rn_layer in layer.layers:
try:
if not rn_layer.trainable:
rn_layer.trainable = True
except:
if 'merge' not in rn_layer.name:
print('rn layer not trainable', rn_layer.name)
if not layer.trainable:
layer.trainable = True
except:
if 'merge' not in layer.name.lower():
print('layer not trainable:', layer.name)
# In[10]:
def make_siamese_model_concat(num_training_classes=3):
"""
Siamese network created via concatenating resnet50 outputs
@TODO see if less layers can now be used because of not using
binary_crossentropy..
"""
base_model = ResNet50(weights='imagenet', include_top=False)
freeze(base_model)
input_left = Input(shape=(224, 224, 3))
input_right = Input(shape=(224, 224, 3))
processed_left = base_model(input_left)
processed_right = base_model(input_right)
# join by slapping vectors together
siamese_join = merge([processed_left, processed_right], mode='concat')
my_layer = GlobalAveragePooling2D()(siamese_join)
my_layer = Dense(4096, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
my_layer = Dense(2048, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
my_layer = Dense(2048, activation='relu')(my_layer)
predictions = Dense(num_training_classes, activation='sigmoid')(my_layer)
model = Model([input_left, input_right], output=predictions)
return model
# In[11]:
def s_distance(vects):
"""
return the abs difference between vectors
"""
x, y = vects
s = K.abs(x - y)
#s = K.sqrt(K.square(x - y))
return (s)
# return K.squeeze(x,1) - K.squeeze(y,1)
def s_shape(shapes):
"""
return the sape of the vector being used
"""
shape = list(shapes)
outshape = (shape[0])
return tuple(outshape)
def make_siamese_model_subtract(num_training_classes=2):
"""
Siamese network created via subtracting resnet50 outputs
"""
base_model = ResNet50(weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
base_model.trainable = False
input_left = Input(shape=(224, 224, 3))
input_right = Input(shape=(224, 224, 3))
processed_left = base_model(input_left)
processed_right = base_model(input_right)
# use a distance measure for making the join
siamese_join = Lambda(s_distance,
output_shape=s_shape)([processed_left, processed_right])
my_layer = GlobalAveragePooling2D()(siamese_join)
my_layer = Dense(1024, activation='relu')(my_layer)
my_layer = BatchNormalization()(my_layer)
predictions = Dense(num_training_classes, activation='sigmoid')(my_layer)
model = Model([input_left, input_right], output=predictions)
return model
# In[12]:
def make_callbacks(model_checkpoint_format_string, tensor_board_log_dir):
"""
programatically make the callbacks to be used for training
"""
callbacks = []
if model_checkpoint_format_string is not None:
callbacks.append(ModelCheckpoint(model_checkpoint_format_string,
monitor='loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1))
if tensor_board_log_dir is not None:
callbacks.append(TensorBoard(log_dir=tensor_board_log_dir,
histogram_freq=0,
write_graph=True,
write_images=False))
callbacks.append(ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=4,
verbose=1,
mode='min',
epsilon=0.001,
cooldown=2,
min_lr=0))
callbacks.append(EarlyStopping(monitor='val_acc',
min_delta=0.003,
patience=6,
verbose=1,
mode='max'))
return callbacks
# In[13]:
def checkLabels(x):
"""
Make a warm fuzzy about the classes being balanced
"""
s_id = 0.0
s_type = 0.0
s_color = 0.0
total = len(x)
for v in x:
if v[2][0]:
s_id += 1
if v[2][1]:
s_type += 1
if v[2][2]:
s_color += 1
print('P(s_id==1):{0} P(s_type==1):{1} P(s_color==1):{2}'.format(
s_id / total, s_type / total, s_color / total))
return s_id / total, s_type / total, s_color / total
# In[14]:
#---------------------------------------
# In[15]:
# set some constants
ITEMSPERCAMERA = 2
YRANDOM = 13024
CAMERAS = 2
DROPPED = 0
EXPERIMENTS = int(40000 / 4)
batch_size = 16
tbld = '/local_data/dgrossman/tensorboard_logs'
mcfs = '/local_data/dgrossman/tempdir/veri-siamese.{epoch:02d}-{val_loss:.2f}-{val_acc:.2f}.hdf5'
# In[16]:
veri_validate = VeriDataset(
'/local_data/dgrossman/VeRi', set_type=utils.SetType.TEST.value)
veri_train = VeriDataset('/local_data/dgrossman/VeRi',
set_type=utils.SetType.TRAIN.value)
expGen_validate = ExperimentGenerator(veri_train,
CAMERAS,
ITEMSPERCAMERA,
DROPPED,
YRANDOM,
key_filter=partial(train_test_key_filter, split="test"))
expGen_train = ExperimentGenerator(veri_train,
CAMERAS,
ITEMSPERCAMERA,
DROPPED,
YRANDOM,
key_filter=partial(train_test_key_filter, split="train"))
# In[17]:
training_examples = make_examples(expGen_train, EXPERIMENTS)
validaiton_examples = make_examples(expGen_validate, EXPERIMENTS) # GROSSMAN
# In[18]:
checkLabels(training_examples)
# In[19]:
checkLabels(validaiton_examples)
# In[19]:
# GROSSMAN change augment to True when running for real.
train_buffered_generator_mp = buffered_gen_mp(image_class_generator(training_examples,
batch_size,
augment=True),
buffer_size=20,
num_processes=5)
val_buffered_generator_mp = buffered_gen_mp(image_class_generator(validaiton_examples,
batch_size,
augment=False),
buffer_size=20,
num_processes=5)
# In[20]:
callbacks = make_callbacks(mcfs, tbld)
# In[21]:
KTF.set_session(get_session(.90))
# In[25]:
#model = make_siamese_model_concat(3)
model = make_siamese_model_subtract(3)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# In[26]:
fixed_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=10240,
nb_epoch=20,
callbacks=None,
nb_val_samples=10240,
validation_data=val_buffered_generator_mp,
verbose=2)
fixed_history.history
free_model_layers(model)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
free_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=10240,
nb_epoch=50,
callbacks=callbacks,
nb_val_samples=10240,
validation_data=val_buffered_generator_mp,
verbose=2)
save_model_workaround(model,
'/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.model.json',
'/local_data/dgrossman/model_save_dir/VeRi-siamese-weekend-6.weights.hdf5')
| [
"keras.preprocessing.image.random_shift",
"random.sample",
"keras.models.Model",
"keras.backend.abs",
"keras.preprocessing.image.img_to_array",
"tensorflow.ConfigProto",
"multiprocessing.Queue",
"keras.layers.Input",
"tensorflow.GPUOptions",
"pelops.analysis.camerautil.make_good_bad",
"pelops.an... | [((15935, 16011), 'pelops.datasets.veri.VeriDataset', 'VeriDataset', (['"""/local_data/dgrossman/VeRi"""'], {'set_type': 'utils.SetType.TEST.value'}), "('/local_data/dgrossman/VeRi', set_type=utils.SetType.TEST.value)\n", (15946, 16011), False, 'from pelops.datasets.veri import VeriDataset\n'), ((16030, 16107), 'pelops.datasets.veri.VeriDataset', 'VeriDataset', (['"""/local_data/dgrossman/VeRi"""'], {'set_type': 'utils.SetType.TRAIN.value'}), "('/local_data/dgrossman/VeRi', set_type=utils.SetType.TRAIN.value)\n", (16041, 16107), False, 'from pelops.datasets.veri import VeriDataset\n'), ((1873, 1907), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (1888, 1907), False, 'from keras.models import Model, load_model, model_from_json\n'), ((3057, 3090), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""'], {}), "('OMP_NUM_THREADS')\n", (3071, 3090), False, 'import os\n'), ((3109, 3168), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_fraction'}), '(per_process_gpu_memory_fraction=gpu_fraction)\n', (3122, 3168), True, 'import tensorflow as tf\n'), ((3726, 3743), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3734, 3743), True, 'import numpy as np\n'), ((5065, 5113), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (5079, 5113), False, 'from keras.preprocessing import image\n'), ((5127, 5150), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (5145, 5150), False, 'from keras.preprocessing import image\n'), ((6964, 6994), 'numpy.expand_dims', 'np.expand_dims', (['my_img'], {'axis': '(0)'}), '(my_img, axis=0)\n', (6978, 6994), True, 'import numpy as np\n'), ((7008, 7032), 'keras.applications.resnet50.preprocess_input', 'preprocess_input', (['my_img'], {}), '(my_img)\n', (7024, 7032), False, 'from keras.applications.resnet50 import ResNet50, preprocess_input\n'), ((7224, 7248), 'numpy.array', 'np.array', (['history[name1]'], {}), '(history[name1])\n', (7232, 7248), True, 'import numpy as np\n'), ((7258, 7282), 'numpy.array', 'np.array', (['history[name2]'], {}), '(history[name2])\n', (7266, 7282), True, 'import numpy as np\n'), ((9263, 9296), 'multiprocessing.Queue', 'mp.Queue', ([], {'maxsize': '(buffer_size - 1)'}), '(maxsize=buffer_size - 1)\n', (9271, 9296), True, 'import multiprocessing as mp\n'), ((11240, 11287), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (11248, 11287), False, 'from keras.applications.resnet50 import ResNet50, preprocess_input\n'), ((11330, 11356), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (11335, 11356), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11375, 11401), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (11380, 11401), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11553, 11608), 'keras.layers.merge', 'merge', (['[processed_left, processed_right]'], {'mode': '"""concat"""'}), "([processed_left, processed_right], mode='concat')\n", (11558, 11608), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((12014, 12066), 'keras.models.Model', 'Model', (['[input_left, input_right]'], {'output': 'predictions'}), '([input_left, input_right], output=predictions)\n', (12019, 12066), False, 'from keras.models import Model, load_model, model_from_json\n'), ((12208, 12220), 'keras.backend.abs', 'K.abs', (['(x - y)'], {}), '(x - y)\n', (12213, 12220), True, 'from keras import backend as K\n'), ((12631, 12678), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (12639, 12678), False, 'from keras.applications.resnet50 import ResNet50, preprocess_input\n'), ((12799, 12825), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (12804, 12825), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((12844, 12870), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (12849, 12870), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((13379, 13431), 'keras.models.Model', 'Model', (['[input_left, input_right]'], {'output': 'predictions'}), '([input_left, input_right], output=predictions)\n', (13384, 13431), False, 'from keras.models import Model, load_model, model_from_json\n'), ((2674, 2695), 'pelops.analysis.camerautil.get_match_id', 'get_match_id', (['cameras'], {}), '(cameras)\n', (2686, 2695), False, 'from pelops.analysis.camerautil import get_match_id, make_good_bad\n'), ((2718, 2750), 'pelops.analysis.camerautil.make_good_bad', 'make_good_bad', (['cameras', 'match_id'], {}), '(cameras, match_id)\n', (2731, 2750), False, 'from pelops.analysis.camerautil import get_match_id, make_good_bad\n'), ((5651, 5750), 'keras.preprocessing.image.random_rotation', 'image.random_rotation', (['my_img', 'image_rotate_degrees'], {'row_index': '(0)', 'col_index': '(1)', 'channel_index': '(2)'}), '(my_img, image_rotate_degrees, row_index=0, col_index=\n 1, channel_index=2)\n', (5672, 5750), False, 'from keras.preprocessing import image\n'), ((6113, 6213), 'keras.preprocessing.image.random_shift', 'image.random_shift', (['my_img', 'image_shift', 'image_shift'], {'row_index': '(0)', 'col_index': '(1)', 'channel_index': '(2)'}), '(my_img, image_shift, image_shift, row_index=0, col_index\n =1, channel_index=2)\n', (6131, 6213), False, 'from keras.preprocessing import image\n'), ((6679, 6752), 'keras.preprocessing.image.random_zoom', 'image.random_zoom', (['my_img', 'rng'], {'row_index': '(0)', 'col_index': '(1)', 'channel_index': '(2)'}), '(my_img, rng, row_index=0, col_index=1, channel_index=2)\n', (6696, 6752), False, 'from keras.preprocessing import image\n'), ((7320, 7338), 'numpy.zeros', 'np.zeros', (['vc.shape'], {}), '(vc.shape)\n', (7328, 7338), True, 'import numpy as np\n'), ((7372, 7393), 'numpy.array', 'np.array', (['[v, vc, vr]'], {}), '([v, vc, vr])\n', (7380, 7393), True, 'import numpy as np\n'), ((7416, 7433), 'numpy.array', 'np.array', (['[v, vc]'], {}), '([v, vc])\n', (7424, 7433), True, 'import numpy as np\n'), ((8175, 8209), 'random.sample', 'random.sample', (['tasking', 'batch_size'], {}), '(tasking, batch_size)\n', (8188, 8209), False, 'import random\n'), ((9901, 9975), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_buffered_generation_process', 'args': '(source_gen, buffer)'}), '(target=_buffered_generation_process, args=(source_gen, buffer))\n', (9911, 9975), True, 'import multiprocessing as mp\n'), ((11625, 11649), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (11647, 11649), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11679, 11709), 'keras.layers.Dense', 'Dense', (['(4096)'], {'activation': '"""relu"""'}), "(4096, activation='relu')\n", (11684, 11709), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11735, 11755), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11753, 11755), False, 'from keras.layers.normalization import BatchNormalization\n'), ((11781, 11811), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (11786, 11811), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11837, 11857), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11855, 11857), False, 'from keras.layers.normalization import BatchNormalization\n'), ((11883, 11913), 'keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (11888, 11913), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((11942, 11991), 'keras.layers.Dense', 'Dense', (['num_training_classes'], {'activation': '"""sigmoid"""'}), "(num_training_classes, activation='sigmoid')\n", (11947, 11991), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((13031, 13071), 'keras.layers.Lambda', 'Lambda', (['s_distance'], {'output_shape': 's_shape'}), '(s_distance, output_shape=s_shape)\n', (13037, 13071), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((13148, 13172), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (13170, 13172), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((13202, 13232), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (13207, 13232), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((13258, 13278), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (13276, 13278), False, 'from keras.layers.normalization import BatchNormalization\n'), ((13307, 13356), 'keras.layers.Dense', 'Dense', (['num_training_classes'], {'activation': '"""sigmoid"""'}), "(num_training_classes, activation='sigmoid')\n", (13312, 13356), False, 'from keras.layers import Dense, GlobalAveragePooling2D, Input, Lambda, merge\n'), ((14400, 14525), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(4)', 'verbose': '(1)', 'mode': '"""min"""', 'epsilon': '(0.001)', 'cooldown': '(2)', 'min_lr': '(0)'}), "(monitor='val_loss', factor=0.1, patience=4, verbose=1,\n mode='min', epsilon=0.001, cooldown=2, min_lr=0)\n", (14417, 14525), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\n'), ((14818, 14906), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0.003)', 'patience': '(6)', 'verbose': '(1)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0.003, patience=6, verbose=1,\n mode='max')\n", (14831, 14906), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\n'), ((16427, 16471), 'functools.partial', 'partial', (['train_test_key_filter'], {'split': '"""test"""'}), "(train_test_key_filter, split='test')\n", (16434, 16471), False, 'from functools import partial\n'), ((16750, 16795), 'functools.partial', 'partial', (['train_test_key_filter'], {'split': '"""train"""'}), "(train_test_key_filter, split='train')\n", (16757, 16795), False, 'from functools import partial\n'), ((5349, 5364), 'random.random', 'random.random', ([], {}), '()\n', (5362, 5364), False, 'import random\n'), ((5400, 5431), 'keras.preprocessing.image.flip_axis', 'image.flip_axis', (['my_img'], {'axis': '(1)'}), '(my_img, axis=1)\n', (5415, 5431), False, 'from keras.preprocessing import image\n'), ((13711, 13857), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_checkpoint_format_string'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(model_checkpoint_format_string, monitor='loss', verbose=1,\n save_best_only=True, save_weights_only=False, mode='min', period=1)\n", (13726, 13857), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\n'), ((14168, 14270), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'tensor_board_log_dir', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(False)'}), '(log_dir=tensor_board_log_dir, histogram_freq=0, write_graph=\n True, write_images=False)\n', (14179, 14270), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard\n'), ((3223, 3309), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'intra_op_parallelism_threads': 'num_threads'}), '(gpu_options=gpu_options, intra_op_parallelism_threads=\n num_threads)\n', (3237, 3309), True, 'import tensorflow as tf\n'), ((3362, 3401), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (3376, 3401), True, 'import tensorflow as tf\n'), ((8917, 8929), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (8925, 8929), True, 'import numpy as np\n'), ((8881, 8896), 'numpy.array', 'np.array', (['lefts'], {}), '(lefts)\n', (8889, 8896), True, 'import numpy as np\n'), ((8898, 8914), 'numpy.array', 'np.array', (['rights'], {}), '(rights)\n', (8906, 8914), True, 'import numpy as np\n')] |
import numpy as np
from keras.utils import to_categorical
def preprocess(dataset):
data = np.load(dataset)
x = data['x']
y = data['y']
print(x.shape)
print(y.shape)
sc = 152
st = 44
train_length = int(0.7*x.shape[0]/ (sc+st)) - 1
test_length = int(0.3 * x.shape[0] / (sc+st)) - 1
segment_length = train_length + test_length
trainX = []
trainY = []
testX = []
testY = []
# ==========================每人73分=====================================
# i = 0
# while i < x.shape[0]:
# if i + segment_length >= x.shape[0]:
# break
# trainX.append(x[i:i + train_length])
# trainY.append(y[i: i + train_length])
# testX.append(x[i + train_length: i + segment_length])
# testY.append(y[i + train_length: i + segment_length])
# i += segment_length
#=============================人群73分===================================
length = x.shape[0]
sc = sc / (sc + st)
st = st / (sc + st)
i = 0
sc_train = int(sc * 0.7 * length)
sc_test = int(sc * 0.3 * length)
st_train = int(st * 0.7 * length)
st_test = int(st * 0.3 * length)
trainX.append(x[i: sc_train])
trainY.append(y[i: sc_train])
testX.append(x[sc_train: sc_train + sc_test])
testY.append(y[sc_train: sc_train + sc_test])
i = sc_train + sc_test
trainX.append(x[i: i + st_train])
trainY.append(y[i: i + st_train])
testX.append(x[i + st_train: i + st_train + st_test])
testY.append(y[i + st_train: i + st_train + st_test])
trainX = np.concatenate(trainX)
trainY = np.concatenate(trainY)
testX = np.concatenate(testX)
testY = np.concatenate(testY)
trainY = to_categorical(trainY)
testY = to_categorical(testY)
print(trainX.shape)
print(trainY.shape)
print(testX.shape)
print(testY.shape)
filename = 'channel0_kk.npz'
np.savez(filename,
trainX = trainX,
trainY = trainY,
testX = testX,
testY = testY
)
def dataload(dataset):
data = np.load(dataset)
trainX = data['trainX']
trainY = data['trainY']
testX = data['testX']
testY = data['testY']
temp_x = np.zeros((trainX.shape[0],3072))
temp_x[:,36:3036]=trainX
trainX = temp_x
temp_x = np.zeros((testX.shape[0],3072))
temp_x[:,36:3036]=testX
testX = temp_x
result = [trainX, trainY, testX, testY]
return result
if __name__ == '__main__':
preprocess('data.npz')
| [
"numpy.load",
"numpy.zeros",
"numpy.savez",
"numpy.concatenate",
"keras.utils.to_categorical"
] | [((104, 120), 'numpy.load', 'np.load', (['dataset'], {}), '(dataset)\n', (111, 120), True, 'import numpy as np\n'), ((1642, 1664), 'numpy.concatenate', 'np.concatenate', (['trainX'], {}), '(trainX)\n', (1656, 1664), True, 'import numpy as np\n'), ((1679, 1701), 'numpy.concatenate', 'np.concatenate', (['trainY'], {}), '(trainY)\n', (1693, 1701), True, 'import numpy as np\n'), ((1715, 1736), 'numpy.concatenate', 'np.concatenate', (['testX'], {}), '(testX)\n', (1729, 1736), True, 'import numpy as np\n'), ((1750, 1771), 'numpy.concatenate', 'np.concatenate', (['testY'], {}), '(testY)\n', (1764, 1771), True, 'import numpy as np\n'), ((1786, 1808), 'keras.utils.to_categorical', 'to_categorical', (['trainY'], {}), '(trainY)\n', (1800, 1808), False, 'from keras.utils import to_categorical\n'), ((1822, 1843), 'keras.utils.to_categorical', 'to_categorical', (['testY'], {}), '(testY)\n', (1836, 1843), False, 'from keras.utils import to_categorical\n'), ((1981, 2055), 'numpy.savez', 'np.savez', (['filename'], {'trainX': 'trainX', 'trainY': 'trainY', 'testX': 'testX', 'testY': 'testY'}), '(filename, trainX=trainX, trainY=trainY, testX=testX, testY=testY)\n', (1989, 2055), True, 'import numpy as np\n'), ((2175, 2191), 'numpy.load', 'np.load', (['dataset'], {}), '(dataset)\n', (2182, 2191), True, 'import numpy as np\n'), ((2320, 2353), 'numpy.zeros', 'np.zeros', (['(trainX.shape[0], 3072)'], {}), '((trainX.shape[0], 3072))\n', (2328, 2353), True, 'import numpy as np\n'), ((2420, 2452), 'numpy.zeros', 'np.zeros', (['(testX.shape[0], 3072)'], {}), '((testX.shape[0], 3072))\n', (2428, 2452), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of exma (https://github.com/fernandezfran/exma/).
# Copyright (c) 2021, <NAME>
# License: MIT
# Full Text: https://github.com/fernandezfran/exma/blob/master/LICENSE
# ============================================================================
# DOCS
# ============================================================================
"""exma IO module for interacting with molecular dynamics files."""
# ============================================================================
# IMPORTS
# ============================================================================
import warnings
import numpy as np
from . import reader
from . import writer
from ..core import _is_sorted, _sort_traj
# ============================================================================
# FUNCTIONS
# ============================================================================
def xyz2lammpstrj(xyztraj, lammpstrj_name, cell_info, xyzftype="xyz"):
"""Rewrite an xyz file to a lammpstrj file.
Parameters
----------
xyztraj : str
the name of the file with the xyz trajectory.
lammpstrj_name : str
the name of the file with the lammpstrj trajectory.
cell_info : dict
with the `box`, the lenght of the box in each direction, another
dictionary identified with the `type` key that has within it a
correspondence between the elements present in xyz file with integer
identification numbers, e.g. {"Sn": 1, "O": 2}
xyzftype : str, default="xyz"
the `ftype` of xyz file.
"""
xyz = reader.XYZ(xyztraj, xyzftype)
lmp = writer.LAMMPS(lammpstrj_name)
try:
while True:
xyz_frame = xyz.read_frame()
xyz_frame["type"] = [
cell_info["type"][t] for t in xyz_frame["type"]
]
xyz_frame = {
key: value
for key, value in zip(xyz_frame.keys(), xyz_frame.values())
if value is not None
}
cell_info["id"] = np.arange(1, xyz_frame["natoms"] + 1)
del cell_info["type"]
lmp.write_frame(dict(cell_info, **xyz_frame))
except EOFError:
xyz.file_close()
lmp.file_close()
def xyz2inlmp(xyztraj, inlammps_name, cell_info, nframe=-1, xyzftype="xyz"):
"""Write an xyz frame to an input data file of LAMMPS.
Parameters
----------
xyztraj : str
the name of the file with the xyz trajectory.
inlammps_name : str
the name of the file to write to.
cell_info : dict
with the `box`, the lenght of the box in each direction, another
dictionary identified with the `type` key that has within it a
correspondence between the elements present in xyz file with integer
identification numbers, e.g. {"Sn": 1, "O": 2}
nframe : int, default=-1
number of the frame to write, by default is -1, that is, the last.
xyzftype : str, default="xyz"
the `ftype` of xyz file.
"""
nframe = np.inf if nframe == -1 else nframe
xyz = reader.XYZ(xyztraj, xyzftype)
try:
iframe = 0
dframe = xyz.read_frame()
while iframe < nframe:
xyz_frame = xyz.read_frame()
iframe += 1
dframe = xyz_frame
except EOFError:
if nframe != np.inf:
warnings.warn(
f"frame {nframe} does not exist in the trajectory file, "
f"therefore the last frame ({iframe}) was written."
)
finally:
xyz.file_close()
dframe["type"] = [cell_info["type"][t] for t in dframe["type"]]
dframe = {
key: value
for key, value in zip(dframe.keys(), dframe.values())
if value is not None
}
newframe = dframe
if "q" in cell_info.keys():
keys = list(dframe.keys())
keys.insert(2, "q")
newframe = {}
for k in keys:
if k == "q":
newframe[k] = cell_info[k]
else:
newframe[k] = dframe[k]
del cell_info["q"]
cell_info["id"] = np.arange(1, dframe["natoms"] + 1)
del cell_info["type"]
writer.in_lammps(inlammps_name, dict(cell_info, **newframe))
def lammpstrj2xyz(lammpstrjtraj, xyz_name, type_info):
"""Rewrite a lammpstrj file to an xyz file.
Parameters
----------
lammpstrjtraj : str
the name of the file with the lammpstrj trajectory.
xyz_name : str
the name of the file with the lammpstrj trajectory.
type_info : dict
a correspondence between the elements id present in lammpstrj file
with str element, e.g. {1: "Sn", 2: "O"}
"""
lmp = reader.LAMMPS(lammpstrjtraj)
xyz = writer.XYZ(xyz_name)
try:
while True:
lmp_frame = lmp.read_frame()
lmp_frame = (
_sort_traj(lmp_frame)
if not _is_sorted(lmp_frame["id"])
else lmp_frame
)
lmp_frame["type"] = [type_info[t] for t in lmp_frame["type"]]
xyz_frame = {
key: value
for key, value in zip(lmp_frame.keys(), lmp_frame.values())
if key in ["natoms", "type", "x", "y", "z", "ix", "iy", "iz"]
}
xyz.write_frame(xyz_frame)
except EOFError:
lmp.file_close()
xyz.file_close()
def lammpstrj2inlmp(lammpstrjtraj, inlammps_name, nframe=-1):
"""Write a lammpstrj frame to an input data file of LAMMPS.
Parameters
----------
lammpstrjtraj : str
the name of the file with the lammpstrj trajectory.
inlammps_name : str
the name of the file to write to.
nframe : int, default=-1
number of the frame to write, by default is -1, that is, the last.
"""
nframe = np.inf if nframe == -1 else nframe
lmp = reader.LAMMPS(lammpstrjtraj)
try:
iframe = 0
dframe = lmp.read_frame()
while iframe < nframe:
lmp_frame = lmp.read_frame()
iframe += 1
dframe = lmp_frame
except EOFError:
if nframe != np.inf:
warnings.warn(
f"frame {nframe} does not exist in the trajectory file, "
f"therefore the last frame ({iframe}) was written."
)
finally:
lmp.file_close()
dframe = _sort_traj(dframe) if not _is_sorted(dframe["id"]) else dframe
writer.in_lammps(inlammps_name, dframe)
| [
"warnings.warn",
"numpy.arange"
] | [((4241, 4275), 'numpy.arange', 'np.arange', (['(1)', "(dframe['natoms'] + 1)"], {}), "(1, dframe['natoms'] + 1)\n", (4250, 4275), True, 'import numpy as np\n'), ((2088, 2125), 'numpy.arange', 'np.arange', (['(1)', "(xyz_frame['natoms'] + 1)"], {}), "(1, xyz_frame['natoms'] + 1)\n", (2097, 2125), True, 'import numpy as np\n'), ((3419, 3549), 'warnings.warn', 'warnings.warn', (['f"""frame {nframe} does not exist in the trajectory file, therefore the last frame ({iframe}) was written."""'], {}), "(\n f'frame {nframe} does not exist in the trajectory file, therefore the last frame ({iframe}) was written.'\n )\n", (3432, 3549), False, 'import warnings\n'), ((6300, 6430), 'warnings.warn', 'warnings.warn', (['f"""frame {nframe} does not exist in the trajectory file, therefore the last frame ({iframe}) was written."""'], {}), "(\n f'frame {nframe} does not exist in the trajectory file, therefore the last frame ({iframe}) was written.'\n )\n", (6313, 6430), False, 'import warnings\n')] |
# coding: utf-8
import time
import pickle
import random
import warnings
from typing import Dict, List, Tuple
import pandas as pd
import numpy as np
import torch
from gym import Env
from gym.spaces import Discrete, Space
from torch import Tensor
from joeynmt.constants import BOS_TOKEN, EOS_TOKEN, UNK_TOKEN
from joeynmt.vocabulary import Vocabulary
def prep_data(filepath: str) -> dict:
pairwise_data = pd.read_csv(filepath, sep='\t')
src_word2idx = {}
trg_word2idx = {}
tokens = [
' '.join(pairwise_data['ORIGINAL']), ' '.join(
pd.concat([
pairwise_data['TRANSLATION 1'], pairwise_data['TRANSLATION 2']
],
axis=0))
]
for d in tokens:
for s in [src_word2idx, trg_word2idx]:
for k, v in enumerate(d.split()):
v = v.lower()
if v in s.keys(): continue
s[v.lower()] = k
data = []
for i, line in pairwise_data.iterrows():
src = line['ORIGINAL'].lower().split()
pairs = [
line['TRANSLATION 1'].lower().split(),
line['TRANSLATION 2'].lower().split()
]
for i, p in enumerate(pairs):
ratings = line[4:]
pref_a = np.array(
[1 if pref == 'TRANSLATION 1' else 0 for pref in ratings])
pref_b = np.array(
[1 if pref == 'TRANSLATION 2' else 0 for pref in ratings])
if i == 0:
p_pref = sum(pref_a) / (sum(pref_a) + sum(pref_b))
else:
p_pref = sum(pref_b) / (sum(pref_a) + sum(pref_b))
data.append((src, p, p_pref))
data = {
'train': data,
'dev': data,
'src_word2idx': src_word2idx,
'trg_word2idx': trg_word2idx
}
pickle.dump(data, open('./data/pairwise.pkl', 'wb'))
return data
def make_vocab(token2idx: Dict[str, int]) -> Vocabulary:
"""
Converts token-to-index mapping provided by Yao et al. to JoeyNMT
vocabulary. Given indices are ignored and tokens alphabetically sorted.
:param token2idx: Token-to-index mapping
:type token2idx: Dict[str, int]
:returns: Vocabulary object
"""
# UNK_TOKEN must be first token because DEFAULT_UNK_ID is zero
tokens = [UNK_TOKEN] + [word for word in sorted(token2idx.keys())]
return Vocabulary(tokens=tokens)
def text2tensor(text: List[str], vocab: Vocabulary) -> Tensor:
"""
Converts a token sequence to indices.
:param text: Tokens as strings
:type: List[str]
:param vocab: Vocabulary mapping tokens to indices
:type vocab: Vocabulary
:returns: PyTorch LongTensor with indices
"""
idx = []
# Convert tokens to indices
for token in text:
# self.vocab.stoi is DefaultDict returning 0 for unknown
# tokens
idx.append(vocab.stoi[token])
# Add BOS and EOS tokens
idx.insert(0, vocab.stoi[BOS_TOKEN])
idx.append(vocab.stoi[EOS_TOKEN])
return torch.LongTensor(idx)
class MakeObservation:
"""
Functor that converts recipe description/user answer and subtask index
into a format understood by the agent (currently: Tuple).
:param vocab: Vocabulary object mapping tokens to indices
:type vocab: Vocabulary
"""
def __init__(self, vocab: Vocabulary):
self.vocab = vocab
def __call__(self, text: List[str] = None):
"""
Functor call converts list of tokens to Tensor holding their indices
w.r.t. the vocabulary, then puts token indices and subtask index in
tuple.
:param text: Tokens
:type text: List[str]
:param subtask_id: Subtask ID
:type subtask_id: int
:returns: Tuple containing token indices
"""
if text is not None:
text = text2tensor(text, self.vocab)
return text
class PairwiseEnvironment(Env):
"""
Environment for pairwise preferences.
Holds the dataset from <NAME>'s pairwise translations.
"""
def __init__(self,
mode: str = 'train',
beta: float = .6,
correct_reward: float = 1.,
incorrect_reward: float = 0.,
shuffle: bool = True,
datapath: str = None):
super(PairwiseEnvironment, self).__init__()
with open(datapath, 'rb') as f:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data = pickle.load(f, encoding='utf8')
self.data = data[mode]
self.word2idx = data['word2idx']
# self.action_space = 2 # pref 1/2/none
self.vocab = make_vocab(self.word2idx)
self.vocab_size = len(self.vocab)
self.make_obs = MakeObservation(self.vocab)
print(self.make_obs(self.data[0][0][0]))
# self.BETA = beta
# self.correct_reward = correct_reward
# self.incorrect_reward = incorrect_reward
# self.reward_range = (min(self.incorrect_reward,
# self.BETA), max(self.correct_reward,
# self.BETA))
# def __len__(self) -> int:
# return len(self.data)
# @property
# def action_space(self) -> Discrete:
# return self.action_space
# def step(self, action: int) -> Tuple[Tensor, float, bool, None]:
# action_space = self.action_space
# assert action in action_space
# raise NotImplementedError('Still missing')
# return observation, reward, done, info
if __name__ == '__main__':
prep_data('./data/pairwise.tsv')
# PairwiseEnvironment(datapath='../data/pairwise.pkl') | [
"warnings.simplefilter",
"torch.LongTensor",
"pandas.read_csv",
"joeynmt.vocabulary.Vocabulary",
"pickle.load",
"numpy.array",
"warnings.catch_warnings",
"pandas.concat"
] | [((411, 442), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '"""\t"""'}), "(filepath, sep='\\t')\n", (422, 442), True, 'import pandas as pd\n'), ((2375, 2400), 'joeynmt.vocabulary.Vocabulary', 'Vocabulary', ([], {'tokens': 'tokens'}), '(tokens=tokens)\n', (2385, 2400), False, 'from joeynmt.vocabulary import Vocabulary\n'), ((3031, 3052), 'torch.LongTensor', 'torch.LongTensor', (['idx'], {}), '(idx)\n', (3047, 3052), False, 'import torch\n'), ((570, 657), 'pandas.concat', 'pd.concat', (["[pairwise_data['TRANSLATION 1'], pairwise_data['TRANSLATION 2']]"], {'axis': '(0)'}), "([pairwise_data['TRANSLATION 1'], pairwise_data['TRANSLATION 2']],\n axis=0)\n", (579, 657), True, 'import pandas as pd\n'), ((1259, 1328), 'numpy.array', 'np.array', (["[(1 if pref == 'TRANSLATION 1' else 0) for pref in ratings]"], {}), "([(1 if pref == 'TRANSLATION 1' else 0) for pref in ratings])\n", (1267, 1328), True, 'import numpy as np\n'), ((1365, 1434), 'numpy.array', 'np.array', (["[(1 if pref == 'TRANSLATION 2' else 0) for pref in ratings]"], {}), "([(1 if pref == 'TRANSLATION 2' else 0) for pref in ratings])\n", (1373, 1434), True, 'import numpy as np\n'), ((4460, 4485), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4483, 4485), False, 'import warnings\n'), ((4503, 4534), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4524, 4534), False, 'import warnings\n'), ((4558, 4589), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""utf8"""'}), "(f, encoding='utf8')\n", (4569, 4589), False, 'import pickle\n')] |
"""Process A/V dataset."""
import os
import os.path as op
import time
import warnings
import numpy as np
from scipy import signal, stats
import matplotlib.pyplot as plt
import openpyxl
import pandas as pd
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', FutureWarning)
from nilearn.glm.first_level import \
make_first_level_design_matrix, compute_regressor # noqa
import statsmodels.formula.api as smf
import mne_nirs.preprocessing
import mne_nirs.statistics
import mne_nirs.utils
import mne_nirs.statistics
import mne
from mne.preprocessing.nirs import tddr
subjects = (
'6003 6005 6006 6007 6008 6009 6010 6011 6012 6013 '
'6014 6016 6017 6018 6019 6020 6021 6022 6023 6024 '
'6025 6026 6027 6029').split()
assert len(subjects) == 24
conditions = ('A', 'V', 'AV', 'W')
colors = dict( # https://personal.sron.nl/~pault/data/colourschemes.pdf
A='#4477AA', # blue
AV='#CCBB44', # yellow
V='#EE7733', # orange
W='#AA3377', # purple
)
exp_name = 'av'
runs = tuple(range(1, 3))
duration = 1.8
design = 'event'
plot_subject = '6006'
plot_run = 1
beh_title, beh_idx = 'AV', 0
filt_kwargs = dict(
l_freq=0.02, l_trans_bandwidth=0.02,
h_freq=0.2, h_trans_bandwidth=0.02)
run_h = True # regenerate HbO/HbR
n_jobs = 4 # for GLM
raw_path = 'data'
behavioral_path = op.join('data', 'NIRx behavioral data.xlsx')
proc_path = 'processed'
results_path = 'results'
os.makedirs(results_path, exist_ok=True)
os.makedirs(proc_path, exist_ok=True)
use = None
all_sci = list()
for subject in subjects[0 if run_h else subjects.index(plot_subject):]:
for run in runs:
root = f'AV{run}' if run < 3 else 'McGurk'
fname = op.join(raw_path, root, subject)
base = f'{subject}_{run:03d}'
base_pr = base.ljust(20)
if not run_h:
if subject != plot_subject or run != plot_run:
continue
raw_intensity = mne.io.read_raw_nirx(fname)
raw_od = mne.preprocessing.nirs.optical_density(
raw_intensity, verbose='error')
# good/bad channels
peaks = np.ptp(raw_od.get_data('fnirs'), axis=-1)
flat_names = [
raw_od.ch_names[f].split(' ')[0]
for f in np.where(peaks < 0.001)[0]]
sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od)
all_sci.extend(sci)
sci_mask = (sci < 0.25)
got = np.where(sci_mask)[0]
print(f' Run {base_pr}: {len(got)}/{len(raw_od.ch_names)} bad')
# assign bads
assert raw_od.info['bads'] == []
bads = set(raw_od.ch_names[pick] for pick in got)
bads = bads | set(ch_name for ch_name in raw_od.ch_names
if ch_name.split(' ')[0] in flat_names)
bads = sorted(bads)
raw_tddr = tddr(raw_od)
raw_tddr_bp = raw_tddr.copy().filter(**filt_kwargs)
raw_tddr_bp.info['bads'] = bads
picks = mne.pick_types(raw_tddr_bp.info, fnirs=True)
peaks = np.ptp(raw_tddr_bp.get_data(picks), axis=-1)
assert (peaks > 1e-5).all()
raw_tddr_bp.info['bads'] = []
raw_h = mne.preprocessing.nirs.beer_lambert_law(raw_tddr_bp, 6.)
# wait until now to assign bads so that we can choose later whether
# we want the MATLAB bads or the Python ones
h_bads = [
ch_name for ch_name in raw_h.ch_names
if ch_name.split(' ')[0] in set(bad.split(' ')[0] for bad in bads)]
assert len(bads) == len(h_bads)
raw_h.info['bads'] = h_bads
raw_h.info._check_consistency()
picks = mne.pick_types(raw_h.info, fnirs=True)
peaks = np.ptp(raw_h.get_data(picks), axis=-1)
assert (peaks > 1e-9).all() # TODO: Maybe too small
raw_h.save(op.join(proc_path, f'{base}_hbo_raw.fif'),
overwrite=True)
if subject == plot_subject and run == plot_run:
assert use is None
use = dict(intensity=raw_intensity,
od=raw_od,
tddr=raw_tddr,
h=raw_h,
run=run)
del raw_intensity, raw_od, raw_tddr, raw_tddr_bp, raw_h
assert isinstance(use, dict)
ch_names = [ch_name.rstrip(' hbo') for ch_name in use['h'].ch_names[::2]]
info = use['h'].info
###############################################################################
# Settings
plt.rcParams['axes.titlesize'] = 8
plt.rcParams['axes.labelsize'] = 8
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
###############################################################################
# Channel example figure
sfreq = 7.8125 # all analysis at this rate
def _make_design(raw_h, design, subject=None, run=None):
events, _ = mne.events_from_annotations(raw_h)
# mis-codings
if subject == '6011' and run == 1:
events[1, 2] = 5
elif subject == '6014' and run == 2:
events[97, 2] = 5
n_times = len(raw_h.times)
stim = np.zeros((n_times, 4))
events = events[events[:, 2] != 1]
events[:, 2] -= 1
assert len(events) == 100, len(events)
if subject == '6010' and run == 2:
print('fixing events ...', end='')
events[42, 2] = 2
want = [0] + [25] * 4
count = np.bincount(events[:, 2])
assert np.array_equal(count, want), count
assert events.shape == (100, 3), events.shape
# mne.viz.plot_events(events)
if np.diff(events[:2, 0]) > 50:
print('fixing timing ...', end='')
assert (subject, run) in (('6003', 1), ('6006', 1), (None, None)), (subject, run) # noqa: E501
events[0::5, 0] = events[1::5, 0] - 31
# mne.viz.plot_events(events)
if design == 'block':
events = events[0::5]
duration = 20.
assert np.array_equal(np.bincount(events[:, 2]), [0] + [5] * 4)
else:
assert design == 'event'
assert len(events) == 100
duration = 1.8
assert events.shape == (100, 3)
events_r = events[:, 2].reshape(20, 5)
assert (events_r == events_r[:, :1]).all()
del events_r
idx = (events[:, [0, 2]] - [0, 1]).T
assert np.in1d(idx[1], np.arange(len(conditions))).all()
stim[tuple(idx)] = 1
assert raw_h.info['sfreq'] == sfreq # necessary for below logic to work
n_block = int(np.ceil(duration * sfreq))
stim = signal.fftconvolve(stim, np.ones((n_block, 1)), axes=0)[:n_times]
dm_events = pd.DataFrame({
'trial_type': [conditions[ii] for ii in idx[1]],
'onset': idx[0] / raw_h.info['sfreq'],
'duration': n_block / raw_h.info['sfreq']})
dm = make_first_level_design_matrix(
raw_h.times, dm_events, hrf_model='glover',
drift_model='polynomial', drift_order=0)
return stim, dm, events
###############################################################################
# Plot the design matrix and some raw traces
fig, axes = plt.subplots(2, 1, figsize=(6., 3), constrained_layout=True)
# Design
ax = axes[0]
raw_h = use['h']
stim, dm, _ = _make_design(raw_h, design)
for ci, condition in enumerate(conditions):
color = colors[condition]
ax.fill_between(
raw_h.times, stim[:, ci], 0, edgecolor='none', facecolor='k',
alpha=0.5)
model = dm[conditions[ci]].to_numpy()
ax.plot(raw_h.times, model, ls='-', lw=1, color=color)
x = raw_h.times[np.where(model > 0)[0][0]]
ax.text(
x + 10, 1.1, condition, color=color, fontweight='bold', ha='center')
ax.set(ylabel='Modeled\noxyHb', xlabel='', xlim=raw_h.times[[0, -1]])
# HbO/HbR
ax = axes[1]
picks = [pi for pi, ch_name in enumerate(raw_h.ch_names)
if 'S4_D4' in ch_name]
assert len(picks) == 2
colors = dict(hbo='r', hbr='b')
ylim = np.array([-0.5, 0.5])
for pi, pick in enumerate(picks):
color = colors[raw_h.ch_names[pick][-3:]]
data = raw_h.get_data(pick)[0] * 1e6
val = np.ptp(data)
assert val > 0.01
ax.plot(raw_h.times, data, color=color, lw=1.)
ax.set(ylim=ylim, xlabel='Time (s)', ylabel='μM',
xlim=raw_h.times[[0, -1]])
del raw_h
for ax in axes:
for key in ('top', 'right'):
ax.spines[key].set_visible(False)
for ext in ('png', 'svg'):
fig.savefig(
op.join(
results_path, f'figure_1_{exp_name}.{ext}'))
###############################################################################
# Run GLM analysis and epoching
df_cha = pd.DataFrame()
for subject in subjects:
fname = op.join(proc_path, f'{subject}_{exp_name}.h5')
if not op.isfile(fname):
subj_cha = pd.DataFrame()
t0 = time.time()
print(f'Running GLM for {subject}... ', end='')
for run in runs:
base = f'{subject}_{run:03d}'
raw_h = mne.io.read_raw_fif(
op.join(proc_path, f'{base}_hbo_raw.fif'))
if raw_h.info['sfreq'] == sfreq / 2.:
print('resampling... ', end='')
raw_h.resample(sfreq)
assert raw_h.info['sfreq'] == sfreq, raw_h.info['sfreq']
_, dm, _ = _make_design(raw_h, design, subject, run)
glm_est = mne_nirs.statistics.run_glm(
raw_h, dm, noise_model='ols', n_jobs=n_jobs)
cha = glm_est.to_dataframe()
cha['subject'] = subject
cha['run'] = run
# add good/badness of the channel
cha['good'] = ~np.in1d(cha['ch_name'], bads)
subj_cha = subj_cha.append(cha)
del raw_h
subj_cha.to_hdf(fname, 'subj_cha', mode='w')
print(f'{time.time() - t0:0.1f} sec')
df_cha = df_cha.append(pd.read_hdf(fname))
df_cha.reset_index(drop=True, inplace=True)
# block averages
event_id = {condition: ci for ci, condition in enumerate(conditions, 1)}
evokeds = {condition: dict() for condition in conditions}
for subject in subjects:
fname = op.join(
proc_path, f'{subject}-{exp_name}-ave.fif')
if not op.isfile(fname):
tmin, tmax = -2, 38
baseline = (None, 0)
t0 = time.time()
print(f'Creating block average for {subject} ... ', end='')
raws = list()
events = list()
for run in runs:
base = f'{subject}_{run:03d}'
raw_h = mne.io.read_raw_fif(
op.join(proc_path, f'{base}_hbo_raw.fif'))
if raw_h.info['sfreq'] == sfreq / 2:
raw_h.resample(sfreq)
assert raw_h.info['sfreq'] == sfreq
events.append(_make_design(raw_h, 'block', subject, run)[2])
raws.append(raw_h)
bads = sorted(set(sum((r.info['bads'] for r in raws), [])))
for r in raws:
r.info['bads'] = bads
raw_h, events = mne.concatenate_raws(raws, events_list=events)
epochs = mne.Epochs(raw_h, events, event_id, tmin=tmin, tmax=tmax,
baseline=baseline)
this_ev = [epochs[condition].average() for condition in conditions]
assert all(ev.nave > 0 for ev in this_ev)
mne.write_evokeds(fname, this_ev)
print(f'{time.time() - t0:0.1f} sec')
for condition in conditions:
evokeds[condition][subject] = mne.read_evokeds(fname, condition)
# Get behavioral data
beh = openpyxl.load_workbook(behavioral_path).worksheets[0]
assert beh.cell(1, 7).value == 'pMcGurk'
beh_kinds = ('="-9dB SNR ii"', '="-6dB SNR ii"', 'pMcGurk')
beh_short = {
'="-6dB SNR ii"': '-6',
'="-9dB SNR ii"': '-9',
'pMcGurk': 'pM',
}
for bi, b in enumerate(beh_kinds, 5):
assert beh.cell(1, bi).value == b, b
behs = dict()
for ri in range(2, 10000):
subject = beh.cell(ri, 1).value
if subject is None:
break
subject = str(int(subject))
if subject == '6030':
continue # not used
if subject == '6006' and exp_name == 'mcgurk':
continue # excluded
assert subject in subjects
behs[subject] = dict((b, beh.cell(ri, bi).value)
for bi, b in enumerate(beh_kinds, 5))
if subject == '6023':
behs['6023']['="-6dB SNR ii"'] = np.nan
behs[subject] = dict((key, float(val)) for key, val in behs[subject].items())
assert set(behs) == set(subjects)
# Exclude bad channels
bad = dict()
for subject in subjects:
for run in runs:
base = f'{subject}_{run:03d}'
this_info = mne.io.read_info(
op.join(proc_path, f'{base}_hbo_raw.fif'))
bad[(subject, run)] = sorted(
this_info['ch_names'].index(bad) for bad in this_info['bads'])
assert np.in1d(bad[(subject, run)], np.arange(len(use['h'].ch_names))).all() # noqa: E501
# make life easier by combining across runs
bad_combo = dict()
for (subject, run), bb in bad.items():
bad_combo[subject] = sorted(set(bad_combo.get(subject, [])) | set(bb))
bad = bad_combo
assert set(bad) == set(subjects)
start = len(df_cha)
n_drop = 0
for subject, bb in bad.items():
if not len(bb):
continue
drop_names = [use['h'].ch_names[b] for b in bb]
is_subject = (df_cha['subject'] == subject)
assert len(is_subject) == len(df_cha)
# is_run = (df_cha['run'] == run)
drop = df_cha.index[
is_subject &
# is_run &
np.in1d(df_cha['ch_name'], drop_names)]
n_drop += len(drop)
if len(drop):
print(f'Dropping {len(drop)} for {subject}') # {run}')
df_cha.drop(drop, inplace=True)
end = len(df_cha)
assert n_drop == start - end, (n_drop, start - end)
# combine runs by averaging estimates
sorts = ['subject', 'ch_name', 'Chroma', 'Condition', 'run']
df_cha.sort_values(
sorts, inplace=True)
assert (np.array(df_cha['run']).reshape(-1, 2) == runs).all()
theta = np.array(df_cha['theta']).reshape(-1, len(runs)).mean(-1)
df_cha.drop(
[col for col in df_cha.columns if col not in sorts[:-1]], axis='columns',
inplace=True)
df_cha.reset_index(drop=True, inplace=True)
df_cha = df_cha[::len(runs)]
df_cha.reset_index(drop=True, inplace=True)
df_cha['theta'] = theta
def _mixed_df(ch_summary):
ch_model = smf.mixedlm( # remove intercept, interaction between ch+cond
"theta ~ -1 + ch_name:Condition",
ch_summary, groups=ch_summary["subject"]).fit(method='powell')
ch_model_df = mne_nirs.statistics.statsmodels_to_results(ch_model)
ch_model_df['P>|z|'] = ch_model.pvalues
ch_model_df.drop([idx for idx in ch_model_df.index if '[constant]' in idx],
inplace=True)
return ch_model_df
times = evokeds[conditions[0]][subjects[0]].times
info = evokeds[conditions[0]][subjects[0]].info
# Run group level model and convert to dataframe
use_lim = [0, 100] # [0, 100]
lims = np.percentile([b['pMcGurk'] for b in behs.values()], use_lim)
use_subjects = [subj for subj in subjects
if lims[0] <= behs[subj]['pMcGurk'] <= lims[1]]
ch_summary = df_cha.query("Chroma in ['hbo']").copy()
ch_summary_use = df_cha.query(
f"Chroma in ['hbo'] and subject in {use_subjects}").copy()
ch_model_df = _mixed_df(ch_summary_use)
print(f'Correcting for {len(ch_model_df["P>|z|"])} comparisons using FDR')
assert len(ch_model_df['P>|z|']) == len(ch_names) * len(conditions)
_, ch_model_df['P_fdr'] = mne.stats.fdr_correction(
ch_model_df['P>|z|'], method='indep')
sig_chs = dict()
zs = dict()
for condition in conditions:
sig_df = ch_model_df[
(ch_model_df['P_fdr'] < 0.05) &
(ch_model_df['Condition'] == condition)]
sig_chs[condition] = sorted(
(use['h'].ch_names.index(row[1]['ch_name']), row[1]['P_fdr'])
for row in sig_df.iterrows())
ch_model_df[ch_model_df['Condition'] == condition]
zs[condition] = np.array([
ch_model_df[(ch_model_df['Condition'] == condition) &
(ch_model_df['ch_name'] == ch_name)]['z'][0]
for ch_name in info['ch_names'][::2]], float)
assert zs[condition].shape == (52,)
assert np.isfinite(zs[condition]).all()
def _plot_sig_chs(sigs, ax):
if sigs and isinstance(sigs[0], tuple):
sigs = [s[0] for s in sigs]
ch_groups = [sigs, np.setdiff1d(np.arange(info['nchan']), sigs)]
mne.viz.plot_sensors(
info, 'topomap', 'hbo', title='', axes=ax,
show_names=True, ch_groups=ch_groups)
ax.collections[0].set(lw=0)
c = ax.collections[0].get_facecolor()
c[(c[:, :3] == (0.5, 0, 0)).all(-1)] = (0., 0., 0., 0.1)
c[(c[:, :3] == (0, 0, 0.5)).all(-1)] = (0., 1., 0., 0.5)
ax.collections[0].set_facecolor(c)
ch_names = [info['ch_names'][idx] for idx in sigs]
texts = list(ax.texts)
got = []
for text in list(texts):
try:
idx = ch_names.index(text.get_text())
except ValueError:
text.remove()
else:
got.append(idx)
text.set_text(f'{sigs[idx] // 2 + 1}')
text.set(fontsize='xx-small', zorder=5, ha='center')
assert len(got) == len(sigs), (got, list(sigs))
def _plot_sigs(sig_chs, all_corrs=()):
n_col = max(len(x) for x in sig_chs.values()) + 1
n_row = len(conditions)
figsize = (n_col * 1.0, n_row * 1.0)
fig, axes = plt.subplots(
n_row, n_col, figsize=figsize, constrained_layout=True, squeeze=False)
h_colors = {0: 'r', 1: 'b'}
xticks = [0, 10, 20, 30]
ylim = [-0.1, 0.15]
yticks = [-0.1, -0.05, 0, 0.05, 0.1]
xlim = [times[0], 35]
ylim = np.array(ylim)
yticks = np.array(yticks)
for ci, condition in enumerate(conditions):
ii = 0
sigs = sig_chs[condition]
if len(sigs) == 0:
sigs = [(None, None)]
for ii, (ch_idx, ch_p) in enumerate(sigs):
ax = axes[ci, ii]
if ch_idx is not None:
for jj in range(2): # HbO, HbR
color = h_colors[jj]
a = 1e6 * np.array(
[evokeds[condition][subject].data[ch_idx + jj]
for subject in use_subjects
if ch_idx + jj not in bad.get(subject, [])], float)
m = np.mean(a, axis=0)
lower, upper = stats.t.interval(
0.95, len(a) - 1, loc=m, scale=stats.sem(a, axis=0))
ax.fill_between(
times, lower, upper, facecolor=color,
edgecolor='none', lw=0, alpha=0.25, zorder=3,
clip_on=False)
ax.plot(times, m, color=color, lw=1, zorder=4,
clip_on=False)
# Correlations
this_df = ch_summary_use.query(
f'ch_name == {repr(use["h"].ch_names[ch_idx])} and '
f'Chroma == "hbo" and '
f'Condition == {repr(condition)}')
assert 8 <= len(this_df) <= len(subjects), len(this_df)
a = np.array(this_df['theta'])
cs = list()
for kind in beh_kinds:
b = np.array([behs[subject][kind]
for subject in this_df['subject']])
mask = np.isfinite(b)
assert 8 <= mask.sum() <= len(subjects)
r, p = stats.kendalltau(a[mask], b[mask])
if p < 0.05 or kind in all_corrs:
cs.append(f'{beh_short[kind]}: τ{r:+0.2f} p{p:0.2f}')
if len(cs):
cs = [''] + cs
c = '\n'.join(cs)
ax.text(times[-1], ylim[1],
f'ch{ch_idx // 2 + 1}\np={ch_p:0.5f}{c}',
ha='right', va='top', fontsize='x-small')
ax.axvline(20, ls=':', color='0.5', zorder=2, lw=1)
ax.axhline(0, ls='-', color='k', zorder=2, lw=0.5)
ax.set(xticks=xticks, yticks=yticks)
ax.set(xlim=xlim, ylim=ylim)
for key in ('top', 'right'):
ax.spines[key].set_visible(False)
if ax.get_subplotspec().is_last_row():
ax.set(xlabel='Time (sec)')
else:
ax.set_xticklabels([''] * len(xticks))
if ax.get_subplotspec().is_first_col():
ax.set_ylabel(condition)
else:
ax.set_yticklabels([''] * len(yticks))
for key in ('top', 'right'):
ax.spines[key].set_visible(False)
for ii in range(ii + 1, n_col - 1):
fig.delaxes(axes[ci, ii])
# montage
ax = axes[ci, -1]
if sigs[0][0] is None:
fig.delaxes(ax)
else:
# plot montage
_plot_sig_chs(sigs, ax)
return fig
fig = _plot_sigs(sig_chs)
for ext in ('png', 'svg'):
fig.savefig(op.join(results_path, f'stats_{exp_name}.{ext}'))
###############################################################################
# Source space projection
info = use['h'].copy().pick_types(fnirs='hbo', exclude=()).info
info['bads'] = []
assert tuple(zs) == conditions
evoked = mne.EvokedArray(np.array(list(zs.values())).T, info)
picks = np.arange(len(evoked.ch_names))
for ch in evoked.info['chs']:
assert ch['coord_frame'] == mne.io.constants.FIFF.FIFFV_COORD_HEAD
stc = mne.stc_near_sensors(
evoked, trans='fsaverage', subject='fsaverage', mode='weighted',
distance=0.02, project=True, picks=picks)
# Split channel indices by left lat, posterior, right lat:
# num_map = {name: str(ii) for ii, name in enumerate(evoked.ch_names)}
# evoked.copy().rename_channels(num_map).plot_sensors(show_names=True)
view_map = [np.arange(19), np.arange(19, 33), np.arange(33, 52)]
surf = mne.read_bem_surfaces(
mne.utils.get_subjects_dir() +
'/fsaverage/bem/fsaverage-5120-5120-5120-bem.fif', s_id=1) # brain
for ci, condition in enumerate(conditions):
this_sig = [v[0] // 2 for v in sig_chs[condition]]
assert np.in1d(this_sig, np.arange(52)).all()
pos = np.array([info['chs'][idx]['loc'][:3] for idx in this_sig])
pos.shape = (-1, 3) # can be empty
# head->MRI
trans = mne.transforms._get_trans('fsaverage', 'head', 'mri')[0]
# project to brain surface
pos = mne.transforms.apply_trans(trans, pos) # now in MRI coords
pos = mne.surface._project_onto_surface(pos, surf, project_rrs=True)[2]
# plot
brain = stc.plot(hemi='both', views=['lat', 'frontal', 'lat'],
initial_time=evoked.times[ci], cortex='low_contrast',
time_viewer=False, show_traces=False,
surface='pial', smoothing_steps=0, size=(1200, 400),
clim=dict(kind='value', pos_lims=[0., 1.25, 2.5]),
colormap='RdBu_r', view_layout='horizontal',
colorbar=(0, 1), time_label='', background='w',
brain_kwargs=dict(units='m'),
add_data_kwargs=dict(colorbar_kwargs=dict(
title_font_size=24, label_font_size=24, n_labels=5,
title='z score')))
brain.show_view('lat', hemi='lh', row=0, col=0)
brain.show_view(azimuth=270, elevation=90, row=0, col=1)
# significant channel white text overlay
pl = brain.plotter
used = np.zeros(len(this_sig))
for vi in range(3):
this_idx = np.where(np.in1d(this_sig, view_map[vi]))[0]
assert not used[this_idx].any()
used[this_idx] = True
pl.subplot(0, vi)
vp = pl.renderer # subclass of vtkViewport
for idx in this_idx:
ch_pos = pos[idx]
vp.SetWorldPoint(np.r_[ch_pos, 1.])
vp.WorldToDisplay()
ch_pos = (np.array(vp.GetDisplayPoint()[:2]) -
np.array(vp.GetOrigin()))
actor = pl.add_text(
str(this_sig[idx] + 1), ch_pos,
font_size=12, color=(1., 1., 1.))
prop = actor.GetTextProperty()
prop.SetVerticalJustificationToCentered()
prop.SetJustificationToCentered()
actor.SetTextProperty(prop)
prop.SetBold(True)
assert used.all()
brain.show_view('lat', hemi='rh', row=0, col=2)
plt.imsave(
op.join(results_path, f'brain_{exp_name}_{condition}.png'), pl.image)
brain.close()
| [
"mne.stats.fdr_correction",
"mne.preprocessing.nirs.tddr",
"mne.pick_types",
"mne.surface._project_onto_surface",
"openpyxl.load_workbook",
"numpy.ones",
"mne.preprocessing.nirs.beer_lambert_law",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"scipy.stats.kendalltau",
"os.path.join",
"pand... | [((1347, 1391), 'os.path.join', 'op.join', (['"""data"""', '"""NIRx behavioral data.xlsx"""'], {}), "('data', 'NIRx behavioral data.xlsx')\n", (1354, 1391), True, 'import os.path as op\n'), ((1441, 1481), 'os.makedirs', 'os.makedirs', (['results_path'], {'exist_ok': '(True)'}), '(results_path, exist_ok=True)\n', (1452, 1481), False, 'import os\n'), ((1482, 1519), 'os.makedirs', 'os.makedirs', (['proc_path'], {'exist_ok': '(True)'}), '(proc_path, exist_ok=True)\n', (1493, 1519), False, 'import os\n'), ((6926, 6987), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(6.0, 3)', 'constrained_layout': '(True)'}), '(2, 1, figsize=(6.0, 3), constrained_layout=True)\n', (6938, 6987), True, 'import matplotlib.pyplot as plt\n'), ((7735, 7756), 'numpy.array', 'np.array', (['[-0.5, 0.5]'], {}), '([-0.5, 0.5])\n', (7743, 7756), True, 'import numpy as np\n'), ((8401, 8415), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8413, 8415), True, 'import pandas as pd\n'), ((15117, 15179), 'mne.stats.fdr_correction', 'mne.stats.fdr_correction', (["ch_model_df['P>|z|']"], {'method': '"""indep"""'}), "(ch_model_df['P>|z|'], method='indep')\n", (15141, 15179), False, 'import mne\n'), ((21103, 21235), 'mne.stc_near_sensors', 'mne.stc_near_sensors', (['evoked'], {'trans': '"""fsaverage"""', 'subject': '"""fsaverage"""', 'mode': '"""weighted"""', 'distance': '(0.02)', 'project': '(True)', 'picks': 'picks'}), "(evoked, trans='fsaverage', subject='fsaverage', mode=\n 'weighted', distance=0.02, project=True, picks=picks)\n", (21123, 21235), False, 'import mne\n'), ((213, 249), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (236, 249), False, 'import warnings\n'), ((255, 301), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (276, 301), False, 'import warnings\n'), ((4774, 4808), 'mne.events_from_annotations', 'mne.events_from_annotations', (['raw_h'], {}), '(raw_h)\n', (4801, 4808), False, 'import mne\n'), ((5000, 5022), 'numpy.zeros', 'np.zeros', (['(n_times, 4)'], {}), '((n_times, 4))\n', (5008, 5022), True, 'import numpy as np\n'), ((5273, 5298), 'numpy.bincount', 'np.bincount', (['events[:, 2]'], {}), '(events[:, 2])\n', (5284, 5298), True, 'import numpy as np\n'), ((5310, 5337), 'numpy.array_equal', 'np.array_equal', (['count', 'want'], {}), '(count, want)\n', (5324, 5337), True, 'import numpy as np\n'), ((6445, 6595), 'pandas.DataFrame', 'pd.DataFrame', (["{'trial_type': [conditions[ii] for ii in idx[1]], 'onset': idx[0] / raw_h.\n info['sfreq'], 'duration': n_block / raw_h.info['sfreq']}"], {}), "({'trial_type': [conditions[ii] for ii in idx[1]], 'onset': idx\n [0] / raw_h.info['sfreq'], 'duration': n_block / raw_h.info['sfreq']})\n", (6457, 6595), True, 'import pandas as pd\n'), ((6625, 6744), 'nilearn.glm.first_level.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_h.times', 'dm_events'], {'hrf_model': '"""glover"""', 'drift_model': '"""polynomial"""', 'drift_order': '(0)'}), "(raw_h.times, dm_events, hrf_model='glover',\n drift_model='polynomial', drift_order=0)\n", (6655, 6744), False, 'from nilearn.glm.first_level import make_first_level_design_matrix, compute_regressor\n'), ((7888, 7900), 'numpy.ptp', 'np.ptp', (['data'], {}), '(data)\n', (7894, 7900), True, 'import numpy as np\n'), ((8453, 8499), 'os.path.join', 'op.join', (['proc_path', 'f"""{subject}_{exp_name}.h5"""'], {}), "(proc_path, f'{subject}_{exp_name}.h5')\n", (8460, 8499), True, 'import os.path as op\n'), ((9845, 9896), 'os.path.join', 'op.join', (['proc_path', 'f"""{subject}-{exp_name}-ave.fif"""'], {}), "(proc_path, f'{subject}-{exp_name}-ave.fif')\n", (9852, 9896), True, 'import os.path as op\n'), ((15574, 15738), 'numpy.array', 'np.array', (["[ch_model_df[(ch_model_df['Condition'] == condition) & (ch_model_df[\n 'ch_name'] == ch_name)]['z'][0] for ch_name in info['ch_names'][::2]]", 'float'], {}), "([ch_model_df[(ch_model_df['Condition'] == condition) & (\n ch_model_df['ch_name'] == ch_name)]['z'][0] for ch_name in info[\n 'ch_names'][::2]], float)\n", (15582, 15738), True, 'import numpy as np\n'), ((16034, 16140), 'mne.viz.plot_sensors', 'mne.viz.plot_sensors', (['info', '"""topomap"""', '"""hbo"""'], {'title': '""""""', 'axes': 'ax', 'show_names': '(True)', 'ch_groups': 'ch_groups'}), "(info, 'topomap', 'hbo', title='', axes=ax, show_names=\n True, ch_groups=ch_groups)\n", (16054, 16140), False, 'import mne\n'), ((17018, 17105), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_row', 'n_col'], {'figsize': 'figsize', 'constrained_layout': '(True)', 'squeeze': '(False)'}), '(n_row, n_col, figsize=figsize, constrained_layout=True,\n squeeze=False)\n', (17030, 17105), True, 'import matplotlib.pyplot as plt\n'), ((17274, 17288), 'numpy.array', 'np.array', (['ylim'], {}), '(ylim)\n', (17282, 17288), True, 'import numpy as np\n'), ((17302, 17318), 'numpy.array', 'np.array', (['yticks'], {}), '(yticks)\n', (17310, 17318), True, 'import numpy as np\n'), ((21453, 21466), 'numpy.arange', 'np.arange', (['(19)'], {}), '(19)\n', (21462, 21466), True, 'import numpy as np\n'), ((21468, 21485), 'numpy.arange', 'np.arange', (['(19)', '(33)'], {}), '(19, 33)\n', (21477, 21485), True, 'import numpy as np\n'), ((21487, 21504), 'numpy.arange', 'np.arange', (['(33)', '(52)'], {}), '(33, 52)\n', (21496, 21504), True, 'import numpy as np\n'), ((21802, 21861), 'numpy.array', 'np.array', (["[info['chs'][idx]['loc'][:3] for idx in this_sig]"], {}), "([info['chs'][idx]['loc'][:3] for idx in this_sig])\n", (21810, 21861), True, 'import numpy as np\n'), ((22028, 22066), 'mne.transforms.apply_trans', 'mne.transforms.apply_trans', (['trans', 'pos'], {}), '(trans, pos)\n', (22054, 22066), False, 'import mne\n'), ((1708, 1740), 'os.path.join', 'op.join', (['raw_path', 'root', 'subject'], {}), '(raw_path, root, subject)\n', (1715, 1740), True, 'import os.path as op\n'), ((1942, 1969), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['fname'], {}), '(fname)\n', (1962, 1969), False, 'import mne\n'), ((1987, 2057), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {'verbose': '"""error"""'}), "(raw_intensity, verbose='error')\n", (2025, 2057), False, 'import mne\n'), ((2288, 2339), 'mne.preprocessing.nirs.scalp_coupling_index', 'mne.preprocessing.nirs.scalp_coupling_index', (['raw_od'], {}), '(raw_od)\n', (2331, 2339), False, 'import mne\n'), ((2810, 2822), 'mne.preprocessing.nirs.tddr', 'tddr', (['raw_od'], {}), '(raw_od)\n', (2814, 2822), False, 'from mne.preprocessing.nirs import tddr\n'), ((2939, 2983), 'mne.pick_types', 'mne.pick_types', (['raw_tddr_bp.info'], {'fnirs': '(True)'}), '(raw_tddr_bp.info, fnirs=True)\n', (2953, 2983), False, 'import mne\n'), ((3135, 3192), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_tddr_bp', '(6.0)'], {}), '(raw_tddr_bp, 6.0)\n', (3174, 3192), False, 'import mne\n'), ((3602, 3640), 'mne.pick_types', 'mne.pick_types', (['raw_h.info'], {'fnirs': '(True)'}), '(raw_h.info, fnirs=True)\n', (3616, 3640), False, 'import mne\n'), ((5436, 5458), 'numpy.diff', 'np.diff', (['events[:2, 0]'], {}), '(events[:2, 0])\n', (5443, 5458), True, 'import numpy as np\n'), ((6325, 6350), 'numpy.ceil', 'np.ceil', (['(duration * sfreq)'], {}), '(duration * sfreq)\n', (6332, 6350), True, 'import numpy as np\n'), ((8211, 8262), 'os.path.join', 'op.join', (['results_path', 'f"""figure_1_{exp_name}.{ext}"""'], {}), "(results_path, f'figure_1_{exp_name}.{ext}')\n", (8218, 8262), True, 'import os.path as op\n'), ((8511, 8527), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (8520, 8527), True, 'import os.path as op\n'), ((8548, 8562), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8560, 8562), True, 'import pandas as pd\n'), ((8576, 8587), 'time.time', 'time.time', ([], {}), '()\n', (8585, 8587), False, 'import time\n'), ((9595, 9613), 'pandas.read_hdf', 'pd.read_hdf', (['fname'], {}), '(fname)\n', (9606, 9613), True, 'import pandas as pd\n'), ((9917, 9933), 'os.path.isfile', 'op.isfile', (['fname'], {}), '(fname)\n', (9926, 9933), True, 'import os.path as op\n'), ((10005, 10016), 'time.time', 'time.time', ([], {}), '()\n', (10014, 10016), False, 'import time\n'), ((10686, 10732), 'mne.concatenate_raws', 'mne.concatenate_raws', (['raws'], {'events_list': 'events'}), '(raws, events_list=events)\n', (10706, 10732), False, 'import mne\n'), ((10750, 10826), 'mne.Epochs', 'mne.Epochs', (['raw_h', 'events', 'event_id'], {'tmin': 'tmin', 'tmax': 'tmax', 'baseline': 'baseline'}), '(raw_h, events, event_id, tmin=tmin, tmax=tmax, baseline=baseline)\n', (10760, 10826), False, 'import mne\n'), ((10989, 11022), 'mne.write_evokeds', 'mne.write_evokeds', (['fname', 'this_ev'], {}), '(fname, this_ev)\n', (11006, 11022), False, 'import mne\n'), ((11140, 11174), 'mne.read_evokeds', 'mne.read_evokeds', (['fname', 'condition'], {}), '(fname, condition)\n', (11156, 11174), False, 'import mne\n'), ((11205, 11244), 'openpyxl.load_workbook', 'openpyxl.load_workbook', (['behavioral_path'], {}), '(behavioral_path)\n', (11227, 11244), False, 'import openpyxl\n'), ((20624, 20672), 'os.path.join', 'op.join', (['results_path', 'f"""stats_{exp_name}.{ext}"""'], {}), "(results_path, f'stats_{exp_name}.{ext}')\n", (20631, 20672), True, 'import os.path as op\n'), ((21540, 21568), 'mne.utils.get_subjects_dir', 'mne.utils.get_subjects_dir', ([], {}), '()\n', (21566, 21568), False, 'import mne\n'), ((21930, 21983), 'mne.transforms._get_trans', 'mne.transforms._get_trans', (['"""fsaverage"""', '"""head"""', '"""mri"""'], {}), "('fsaverage', 'head', 'mri')\n", (21955, 21983), False, 'import mne\n'), ((22098, 22160), 'mne.surface._project_onto_surface', 'mne.surface._project_onto_surface', (['pos', 'surf'], {'project_rrs': '(True)'}), '(pos, surf, project_rrs=True)\n', (22131, 22160), False, 'import mne\n'), ((24034, 24092), 'os.path.join', 'op.join', (['results_path', 'f"""brain_{exp_name}_{condition}.png"""'], {}), "(results_path, f'brain_{exp_name}_{condition}.png')\n", (24041, 24092), True, 'import os.path as op\n'), ((2414, 2432), 'numpy.where', 'np.where', (['sci_mask'], {}), '(sci_mask)\n', (2422, 2432), True, 'import numpy as np\n'), ((3776, 3817), 'os.path.join', 'op.join', (['proc_path', 'f"""{base}_hbo_raw.fif"""'], {}), "(proc_path, f'{base}_hbo_raw.fif')\n", (3783, 3817), True, 'import os.path as op\n'), ((5802, 5827), 'numpy.bincount', 'np.bincount', (['events[:, 2]'], {}), '(events[:, 2])\n', (5813, 5827), True, 'import numpy as np\n'), ((6388, 6409), 'numpy.ones', 'np.ones', (['(n_block, 1)'], {}), '((n_block, 1))\n', (6395, 6409), True, 'import numpy as np\n'), ((12322, 12363), 'os.path.join', 'op.join', (['proc_path', 'f"""{base}_hbo_raw.fif"""'], {}), "(proc_path, f'{base}_hbo_raw.fif')\n", (12329, 12363), True, 'import os.path as op\n'), ((13153, 13191), 'numpy.in1d', 'np.in1d', (["df_cha['ch_name']", 'drop_names'], {}), "(df_cha['ch_name'], drop_names)\n", (13160, 13191), True, 'import numpy as np\n'), ((13975, 14067), 'statsmodels.formula.api.mixedlm', 'smf.mixedlm', (['"""theta ~ -1 + ch_name:Condition"""', 'ch_summary'], {'groups': "ch_summary['subject']"}), "('theta ~ -1 + ch_name:Condition', ch_summary, groups=ch_summary\n ['subject'])\n", (13986, 14067), True, 'import statsmodels.formula.api as smf\n'), ((15817, 15843), 'numpy.isfinite', 'np.isfinite', (['zs[condition]'], {}), '(zs[condition])\n', (15828, 15843), True, 'import numpy as np\n'), ((15997, 16021), 'numpy.arange', 'np.arange', (["info['nchan']"], {}), "(info['nchan'])\n", (16006, 16021), True, 'import numpy as np\n'), ((7373, 7392), 'numpy.where', 'np.where', (['(model > 0)'], {}), '(model > 0)\n', (7381, 7392), True, 'import numpy as np\n'), ((8768, 8809), 'os.path.join', 'op.join', (['proc_path', 'f"""{base}_hbo_raw.fif"""'], {}), "(proc_path, f'{base}_hbo_raw.fif')\n", (8775, 8809), True, 'import os.path as op\n'), ((9373, 9402), 'numpy.in1d', 'np.in1d', (["cha['ch_name']", 'bads'], {}), "(cha['ch_name'], bads)\n", (9380, 9402), True, 'import numpy as np\n'), ((10255, 10296), 'os.path.join', 'op.join', (['proc_path', 'f"""{base}_hbo_raw.fif"""'], {}), "(proc_path, f'{base}_hbo_raw.fif')\n", (10262, 10296), True, 'import os.path as op\n'), ((13623, 13648), 'numpy.array', 'np.array', (["df_cha['theta']"], {}), "(df_cha['theta'])\n", (13631, 13648), True, 'import numpy as np\n'), ((18757, 18783), 'numpy.array', 'np.array', (["this_df['theta']"], {}), "(this_df['theta'])\n", (18765, 18783), True, 'import numpy as np\n'), ((21771, 21784), 'numpy.arange', 'np.arange', (['(52)'], {}), '(52)\n', (21780, 21784), True, 'import numpy as np\n'), ((23161, 23192), 'numpy.in1d', 'np.in1d', (['this_sig', 'view_map[vi]'], {}), '(this_sig, view_map[vi])\n', (23168, 23192), True, 'import numpy as np\n'), ((2246, 2269), 'numpy.where', 'np.where', (['(peaks < 0.001)'], {}), '(peaks < 0.001)\n', (2254, 2269), True, 'import numpy as np\n'), ((13561, 13584), 'numpy.array', 'np.array', (["df_cha['run']"], {}), "(df_cha['run'])\n", (13569, 13584), True, 'import numpy as np\n'), ((17947, 17965), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (17954, 17965), True, 'import numpy as np\n'), ((18875, 18940), 'numpy.array', 'np.array', (["[behs[subject][kind] for subject in this_df['subject']]"], {}), "([behs[subject][kind] for subject in this_df['subject']])\n", (18883, 18940), True, 'import numpy as np\n'), ((19002, 19016), 'numpy.isfinite', 'np.isfinite', (['b'], {}), '(b)\n', (19013, 19016), True, 'import numpy as np\n'), ((19104, 19138), 'scipy.stats.kendalltau', 'stats.kendalltau', (['a[mask]', 'b[mask]'], {}), '(a[mask], b[mask])\n', (19120, 19138), False, 'from scipy import signal, stats\n'), ((9539, 9550), 'time.time', 'time.time', ([], {}), '()\n', (9548, 9550), False, 'import time\n'), ((11040, 11051), 'time.time', 'time.time', ([], {}), '()\n', (11049, 11051), False, 'import time\n'), ((18074, 18094), 'scipy.stats.sem', 'stats.sem', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (18083, 18094), False, 'from scipy import signal, stats\n')] |
import os
from glob import glob
import numpy as np
import tensorlayer as tl
from predict_M1.model import *
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
flags = tf.app.flags
flags.DEFINE_integer("image_size", 64, "The size of the images [64]")
flags.DEFINE_string("load_dir", "pretrained", "Directory name containing .npz [checkpoint]")
flags.DEFINE_string("load_npz", "net.npz", "The name of npz to load [net]")
FLAGS = flags.FLAGS
def predict(data_dir):
IMAGE_SIZE = FLAGS.image_size
LOAD_NPZ = '19-01-06(17h31m)_12_32864_minErr.npz'
cur_path = os.path.dirname( os.path.abspath( __file__ ))
LOAD_DIR = FLAGS.load_dir
LOAD_DIR = os.path.join(cur_path, LOAD_DIR)
net_name = os.path.join(cur_path, LOAD_DIR, LOAD_NPZ)
if tl.files.folder_exists(LOAD_DIR) is False:
raise ValueError("checkpoint_dir {} does not exist.".format(LOAD_DIR))
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
tl.layers.initialize_global_variables(sess)
x = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3], name='x')
y = tf.placeholder(tf.int64, [None], name='y_')
net_train = model(x, is_train=True, reuse=False)
net = model(x, is_train=False, reuse=True)
y_ = net.outputs
if tl.files.load_and_assign_npz(sess=sess, name=net_name, network=net_train) is False:
print("[*] Loading checkpoints FAILURE!")
exit(1)
else:
print("[*] Loading checkpoints SUCCESS!")
data_files = sorted(glob(os.path.join(data_dir, "*.jpg")))
f = open('gan_vs_others.txt', 'w')
for i in range(len(data_files)):
images = tl.visualize.read_image(data_files[i])
filename = os.path.splitext(os.path.basename(data_files[i]))[0]
images = images[np.newaxis,:]
images = tl.prepro.threading_data(images, tl.prepro.imresize, size=[IMAGE_SIZE, IMAGE_SIZE])
feed_dict = {x: images}
pred = sess.run(tf.nn.softmax(y_), feed_dict=feed_dict)
pred = np.squeeze(pred)
data = "%s,%.4f\n" % (filename, pred[1])
f.write(data)
f.close()
| [
"tensorlayer.files.folder_exists",
"tensorflow.nn.softmax",
"os.path.abspath",
"os.path.basename",
"tensorlayer.layers.initialize_global_variables",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorlayer.visualize.read_image",
"numpy.squeeze",
"os.path.join",
"tensorlayer.files.load_an... | [((671, 703), 'os.path.join', 'os.path.join', (['cur_path', 'LOAD_DIR'], {}), '(cur_path, LOAD_DIR)\n', (683, 703), False, 'import os\n'), ((719, 761), 'os.path.join', 'os.path.join', (['cur_path', 'LOAD_DIR', 'LOAD_NPZ'], {}), '(cur_path, LOAD_DIR, LOAD_NPZ)\n', (731, 761), False, 'import os\n'), ((597, 622), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (612, 622), False, 'import os\n'), ((770, 802), 'tensorlayer.files.folder_exists', 'tl.files.folder_exists', (['LOAD_DIR'], {}), '(LOAD_DIR)\n', (792, 802), True, 'import tensorlayer as tl\n'), ((980, 1023), 'tensorlayer.layers.initialize_global_variables', 'tl.layers.initialize_global_variables', (['sess'], {}), '(sess)\n', (1017, 1023), True, 'import tensorlayer as tl\n'), ((1037, 1108), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, IMAGE_SIZE, IMAGE_SIZE, 3]'], {'name': '"""x"""'}), "(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 3], name='x')\n", (1051, 1108), True, 'import tensorflow as tf\n'), ((1121, 1164), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {'name': '"""y_"""'}), "(tf.int64, [None], name='y_')\n", (1135, 1164), True, 'import tensorflow as tf\n'), ((1311, 1384), 'tensorlayer.files.load_and_assign_npz', 'tl.files.load_and_assign_npz', ([], {'sess': 'sess', 'name': 'net_name', 'network': 'net_train'}), '(sess=sess, name=net_name, network=net_train)\n', (1339, 1384), True, 'import tensorlayer as tl\n'), ((1710, 1748), 'tensorlayer.visualize.read_image', 'tl.visualize.read_image', (['data_files[i]'], {}), '(data_files[i])\n', (1733, 1748), True, 'import tensorlayer as tl\n'), ((1889, 1976), 'tensorlayer.prepro.threading_data', 'tl.prepro.threading_data', (['images', 'tl.prepro.imresize'], {'size': '[IMAGE_SIZE, IMAGE_SIZE]'}), '(images, tl.prepro.imresize, size=[IMAGE_SIZE,\n IMAGE_SIZE])\n', (1913, 1976), True, 'import tensorlayer as tl\n'), ((2096, 2112), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (2106, 2112), True, 'import numpy as np\n'), ((920, 961), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (934, 961), True, 'import tensorflow as tf\n'), ((1571, 1602), 'os.path.join', 'os.path.join', (['data_dir', '"""*.jpg"""'], {}), "(data_dir, '*.jpg')\n", (1583, 1602), False, 'import os\n'), ((2037, 2054), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y_'], {}), '(y_)\n', (2050, 2054), True, 'import tensorflow as tf\n'), ((1789, 1820), 'os.path.basename', 'os.path.basename', (['data_files[i]'], {}), '(data_files[i])\n', (1805, 1820), False, 'import os\n')] |
#!/usr/bin/env python
"""
This program implements the symbolic dynamic filtering (SDF) algorithm
as a time series feature extraction method, presented in the following paper:
Bahrampour, Soheil, <NAME>, <NAME>,
<NAME>, and <NAME>.
"Performance comparison of feature extraction algorithms for target detection
and classification." Pattern Recognition Letters 34, no. 16 (2013): 2126-2134.
Written by <NAME>, August 2012
Rewritten in Python by <NAME>, November 2015
Authors:
- <NAME>, August 2012
- <NAME>, 2015 (<EMAIL>) http://binarybottle.com
Copyright 2015, Sage Bionetworks (http://sagebase.org) Apache v2.0 License
"""
def max_entropy_partition(data, number_of_symbols):
"""
Perform maximum entropy partitioning on given data.
Parameters
----------
data : numpy array
number_of_symbols : integer
number of symbols for symbolic dynamic filtering method
Returns
-------
partition : numpy array
Examples
--------
>>> # Example checked against original Matlab code:
>>> import numpy as np
>>> from mhealthx.extractors.symbolic_dynamic_filtering import max_entropy_partition
>>> data = np.array([0.82487374, 0.21834812, 0.60166418, 0.76465689, 0.44819955, 0.72335342, 0.8710113, 0.73258881, 0.97047932, 0.5975058, 0.02474567, 0.38093561]) #np.random.random((3,4))
>>> number_of_symbols = 4
>>> partition = max_entropy_partition(data, number_of_symbols)
array([ 0.38093561, 0.60166418, 0.76465689])
"""
import numpy as np
if isinstance(data, np.ndarray):
pass
elif isinstance(data, list):
data = np.asarray(data)
else:
raise IOError("data should be a numpy array")
# Change into long vector:
data = data.flatten()
# Sort and partition data:
data = np.sort(data)
len_data = np.float(len(data))
#-------------------------------------------------------------------------
# This code follows the article, resulting in K+1 partitions
#-------------------------------------------------------------------------
#npartitions = number_of_symbols + 1
#partition = np.zeros(npartitions)
#partition[0] = data[0]
#partition[-1] = data[-1]
#
#for isymbol in range(1, number_of_symbols):
# partition[isymbol] = data[np.ceil(isymbol * len_data /
# number_of_symbols)]
#-------------------------------------------------------------------------
# This code matches the Matlab code output, resulting in K-1 partitions
#-------------------------------------------------------------------------
npartitions = number_of_symbols - 1
partition = np.zeros(npartitions)
for ipart in range(1, npartitions + 1):
partition[ipart - 1] = data[np.floor(ipart * len_data /
number_of_symbols) - 1]
return partition
def generate_symbol_sequence(data, partition):
"""
Generate symbol sequence of a given time series using given partition.
Parameters
----------
data : numpy array
partition : numpy array
Returns
-------
symbols : numpy array
Examples
--------
>>> # Example checked against original Matlab code:
>>> import numpy as np
>>> from mhealthx.extractors.symbolic_dynamic_filtering import generate_symbol_sequence, max_entropy_partition
>>> data = np.array([0.82487374, 0.21834812, 0.60166418, 0.76465689, 0.44819955, 0.72335342, 0.8710113, 0.73258881, 0.97047932, 0.5975058, 0.02474567, 0.38093561]) #np.random.random((3,4))
>>> number_of_symbols = 4
>>> partition = max_entropy_partition(data, number_of_symbols)
>>> symbols = generate_symbol_sequence(data, partition)
array([ 4., 1., 3., 4., 2., 3., 4., 3., 4., 2., 1., 2.])
"""
import numpy as np
partition = np.hstack((partition, np.Inf))
symbols = np.zeros(len(data))
for i1 in range(len(data)):
for i2 in range(len(partition)):
if partition[i2] > data[i1]:
symbols[i1] = i2 + 1
break
return symbols
def analyze_symbol_sequence(symbols, number_of_states, morph_matrix_flag):
"""
Estimate the state transition probability ("morph") matrix of the
probabilistic finite state automata, and its eigenvector
corresponding to eigenvalue 1 by counting.
NOTE: Currently the number of states is set to the number of symbols.
Parameters
----------
symbol_sequence : numpy array
number_of_states : integer
morph_matrix_flag : Boolean
Returns
-------
morph_matrix : numpy array
pvec : numpy array
Examples
--------
>>> # Example checked against original Matlab code:
>>> import numpy as np
>>> from mhealthx.extractors.symbolic_dynamic_filtering import analyze_symbol_sequence, generate_symbol_sequence, max_entropy_partition
>>> data = np.array([0.82487374, 0.21834812, 0.60166418, 0.76465689, 0.44819955, 0.72335342, 0.8710113, 0.73258881, 0.97047932, 0.5975058, 0.02474567, 0.38093561]) #np.random.random((3,4))
>>> number_of_symbols = 4
>>> partition = max_entropy_partition(data, number_of_symbols)
>>> symbols = generate_symbol_sequence(data, partition)
>>> number_of_states = number_of_symbols
>>> morph_matrix_flag = True
>>> morph_matrix, pvec = analyze_symbol_sequence(symbols, number_of_states, morph_matrix_flag)
array([[ 0. , 0.5 , 0. , 0.5 ],
[ 0.33333333, 0. , 0. , 0.66666667],
[ 0.33333333, 0.33333333, 0. , 0.33333333],
[ 0. , 0. , 1. , 0. ]])
array([ 0.18181818, 0.18181818, 0.27272727, 0.36363636])
"""
import numpy as np
morph_matrix = np.zeros((number_of_states, number_of_states))
pvec = np.zeros(number_of_states)
for isymbol in range(1, len(symbols)):
index1 = symbols[isymbol - 1] - 1
index2 = symbols[isymbol] - 1
pvec[index1] += 1
if morph_matrix_flag:
morph_matrix[index2, index1] += 1
# Normalize the computed vector:
pvec = pvec / np.sum(pvec)
# Normalize each row of Matrix to make it a stochastic matrix:
if morph_matrix_flag:
for istate in range(number_of_states):
row_sum = np.sum(morph_matrix[istate, :])
if row_sum == 0:
morph_matrix[istate, :] = pvec
else:
morph_matrix[istate, :] /= row_sum
return morph_matrix, pvec
def sdf_features(data, number_of_symbols, pi_matrix_flag=False):
"""
Extract symbolic dynamic filtering features from time series data.
NOTE: Currently the number of states is set to the number of symbols.
Parameters
----------
data : numpy array
number_of_symbols : integer
number of symbols for symbolic dynamic filtering method
pi_matrix_flag : Boolean
feature as vectorized morph matrix (default: False)?
Returns
-------
feature : numpy array
Examples
--------
>>> # Example checked against original Matlab code:
>>> import numpy as np
>>> from mhealthx.extractors.symbolic_dynamic_filtering import sdf_features
>>> data = np.array([0.82487374, 0.21834812, 0.60166418, 0.76465689, 0.44819955, 0.72335342, 0.8710113, 0.73258881, 0.97047932, 0.5975058, 0.02474567, 0.38093561]) #np.random.random((3,4))
>>> number_of_symbols = 4
>>> pi_matrix_flag = False
>>> feature = sdf_features(data, number_of_symbols, pi_matrix_flag)
array([ 0.18181818, 0.18181818, 0.27272727, 0.36363636])
"""
import numpy as np
from mhealthx.extractors.symbolic_dynamic_filtering import \
max_entropy_partition, generate_symbol_sequence, \
analyze_symbol_sequence
# Generate partitions:
partition = max_entropy_partition(data, number_of_symbols)
# Generate symbols:
symbols = generate_symbol_sequence(data, partition)
# morph_matrix is the estimated Morph Matrix, and
# pvec is the eigenvector corresponding to the eigenvalue 1:
morph_matrix, pvec = analyze_symbol_sequence(symbols, number_of_symbols,
pi_matrix_flag)
# Feature as vectorized morph matrix:
if pi_matrix_flag:
b = np.transpose(morph_matrix)
feature = b.flatten()
# Feature as state transition probability vector store:
else:
feature = pvec
return feature
| [
"numpy.sum",
"numpy.asarray",
"numpy.floor",
"numpy.zeros",
"numpy.transpose",
"numpy.hstack",
"mhealthx.extractors.symbolic_dynamic_filtering.analyze_symbol_sequence",
"numpy.sort",
"mhealthx.extractors.symbolic_dynamic_filtering.generate_symbol_sequence",
"mhealthx.extractors.symbolic_dynamic_fi... | [((1880, 1893), 'numpy.sort', 'np.sort', (['data'], {}), '(data)\n', (1887, 1893), True, 'import numpy as np\n'), ((2783, 2804), 'numpy.zeros', 'np.zeros', (['npartitions'], {}), '(npartitions)\n', (2791, 2804), True, 'import numpy as np\n'), ((4011, 4041), 'numpy.hstack', 'np.hstack', (['(partition, np.Inf)'], {}), '((partition, np.Inf))\n', (4020, 4041), True, 'import numpy as np\n'), ((6026, 6072), 'numpy.zeros', 'np.zeros', (['(number_of_states, number_of_states)'], {}), '((number_of_states, number_of_states))\n', (6034, 6072), True, 'import numpy as np\n'), ((6085, 6111), 'numpy.zeros', 'np.zeros', (['number_of_states'], {}), '(number_of_states)\n', (6093, 6111), True, 'import numpy as np\n'), ((8171, 8217), 'mhealthx.extractors.symbolic_dynamic_filtering.max_entropy_partition', 'max_entropy_partition', (['data', 'number_of_symbols'], {}), '(data, number_of_symbols)\n', (8192, 8217), False, 'from mhealthx.extractors.symbolic_dynamic_filtering import max_entropy_partition, generate_symbol_sequence, analyze_symbol_sequence\n'), ((8260, 8301), 'mhealthx.extractors.symbolic_dynamic_filtering.generate_symbol_sequence', 'generate_symbol_sequence', (['data', 'partition'], {}), '(data, partition)\n', (8284, 8301), False, 'from mhealthx.extractors.symbolic_dynamic_filtering import max_entropy_partition, generate_symbol_sequence, analyze_symbol_sequence\n'), ((8451, 8518), 'mhealthx.extractors.symbolic_dynamic_filtering.analyze_symbol_sequence', 'analyze_symbol_sequence', (['symbols', 'number_of_symbols', 'pi_matrix_flag'], {}), '(symbols, number_of_symbols, pi_matrix_flag)\n', (8474, 8518), False, 'from mhealthx.extractors.symbolic_dynamic_filtering import max_entropy_partition, generate_symbol_sequence, analyze_symbol_sequence\n'), ((6404, 6416), 'numpy.sum', 'np.sum', (['pvec'], {}), '(pvec)\n', (6410, 6416), True, 'import numpy as np\n'), ((8651, 8677), 'numpy.transpose', 'np.transpose', (['morph_matrix'], {}), '(morph_matrix)\n', (8663, 8677), True, 'import numpy as np\n'), ((1690, 1706), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1700, 1706), True, 'import numpy as np\n'), ((6585, 6616), 'numpy.sum', 'np.sum', (['morph_matrix[istate, :]'], {}), '(morph_matrix[istate, :])\n', (6591, 6616), True, 'import numpy as np\n'), ((2889, 2935), 'numpy.floor', 'np.floor', (['(ipart * len_data / number_of_symbols)'], {}), '(ipart * len_data / number_of_symbols)\n', (2897, 2935), True, 'import numpy as np\n')] |
# Author: <NAME>
# Date: Dec.07.2020
# Title: Graph-based Deep Generative Model version 3 (single batch only)
# Affiliation: JLK Genome Research Centre
from dataclasses import dataclass, field
from typing import List
from rdkit import Chem
from rdkit.Chem import rdmolfiles, rdmolops, AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
from collections import defaultdict
import itertools
import numpy as np
import dgl.backend as F
from enum import Enum, unique
from rdkit.Chem import BondType
import dgl
import torch
from torch.utils.data import Dataset, DataLoader
import pandas as pd
class AtomFeaturiser(object):
def __init__(self, atom_types=None, chiral_types=None,
charge_types=None, aromatic_types=None,
implicit_hydrogen=None, hybridisation = None):
if atom_types is None:
atom_types = ["C", "N", "O", "F", "P", "S", "Cl", "Br"]
self._atomic_types = atom_types
if chiral_types is None:
chiral_types = [Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER]
self._chiral_types = chiral_types
if charge_types is None:
charge_types = [0, 1, -1, 2, -2, 3,-3, 4, -4]
self._charge_types = charge_types
if aromatic_types is None:
aromatic_types = [False, True]
self._aromatic_types = aromatic_types
if implicit_hydrogen is None:
implicit_hydrogen = [0, 1, 2, 3]
self._explicit_hydrogen = implicit_hydrogen
"""
if hybridisation is None:
hybridisation = [Chem.rdchem.HybridizationType.OTHER,
Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2,
Chem.rdchem.HybridizationType.UNSPECIFIED
]
self._hybridisation = hybridisation"""
def max_atom_type(self):
return len(self._atomic_types)
def __call__(self, mol):
atom_features = []
num_atoms = mol.GetNumAtoms()
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
atom_features.append([
self._atomic_types.index(atom.GetSymbol()),
self._chiral_types.index(atom.GetChiralTag()),
self._charge_types.index(atom.GetFormalCharge()),
self._aromatic_types.index(atom.GetIsAromatic()),
self._explicit_hydrogen.index(atom.GetNumExplicitHs()) #self._hybridisation.index(atom.GetHybridization())
])
atom_features = np.stack(atom_features)
atom_features = F.zerocopy_from_numpy(atom_features.astype(np.uint8))
return {
'atom_type': atom_features[:, 0].long(),
'chirality_type': atom_features[:, 1].long(),
'charge_type': atom_features[:, 2].long(),
'aromatic_type': atom_features[:, 3].long(),
'explicit_hydrogen': atom_features[:, 4].long()
}
class BondFeaturiser(object):
def __init__(self, bond_types=None, stereo_types=None, self_loop=False):
if bond_types is None:
bond_types = [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE
]
"""bond_types = [
Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC
]"""
self._bond_types = bond_types
if stereo_types is None:
stereo_types = [
Chem.rdchem.BondStereo.STEREONONE, Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOCIS, Chem.rdchem.BondStereo.STEREOTRANS,
Chem.rdchem.BondStereo.STEREOZ, Chem.rdchem.BondStereo.STEREOE
]
self._stereo_types = stereo_types
self._self_loop = self_loop
def max_bond_type(self):
return len(self._bond_types)
def __call__(self, mol):
"""Featurizes the input molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
Mapping 'bond_type' and 'bond_direction_type' separately to an int64
tensor of shape (N, 1), where N is the number of edges.
"""
edge_features = []
num_bonds = mol.GetNumBonds()
if num_bonds == 0:
assert self._self_loop, \
'The molecule has 0 bonds and we should set self._self_loop to True.'
# Compute features for each bond
for i in range(num_bonds):
bond = mol.GetBondWithIdx(i)
bond_feats = [
self._bond_types.index(bond.GetBondType()),
self._stereo_types.index(bond.GetStereo())
]
edge_features.extend([bond_feats, bond_feats.copy()])
if num_bonds == 0:
edge_features = self_loop_features
else:
edge_features = np.stack(edge_features)
edge_features = F.zerocopy_from_numpy(edge_features.astype(np.uint8))
return {
'bond_type': edge_features[:, 0].long(),
'stereo_type': edge_features[:, 1].long()
}
| [
"numpy.stack"
] | [((3037, 3060), 'numpy.stack', 'np.stack', (['atom_features'], {}), '(atom_features)\n', (3045, 3060), True, 'import numpy as np\n'), ((5605, 5628), 'numpy.stack', 'np.stack', (['edge_features'], {}), '(edge_features)\n', (5613, 5628), True, 'import numpy as np\n')] |
from statistics import stdev, mean
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import trim_mean
from tqdm import tqdm
from scripts.vandv.graphs import plot_to_html_image
def __html_table_from_dataframe(df, term_style='{:.0%}', highlight_max=True, first_col=1):
df_style = df.style.hide_index()
df_style = df_style.set_table_styles([
dict(selector='table', props=[('border-collapse', 'collapse')]),
dict(selector='td', props=[('border', '2px solid black'),
('text-align', 'right'),
('padding-left', '15px'),
('padding-right', '15px')])
])
for window_size in df.columns[first_col:]:
df_style = df_style.format({window_size: term_style})
if highlight_max:
df_style = df_style.highlight_max(axis=1)
else:
df_style = df_style.highlight_min(axis=1)
heading = '<style type="text/css">table {border-collapse: collapse;} </style>\n'
return heading + df_style.render()
def __create_df_from_results(results):
df_results = pd.DataFrame(results).T
df_results.reset_index(level=0, inplace=True)
df_results.rename(columns={'index': 'Terms'}, inplace=True)
return df_results
def html_table(results):
df_results = __create_df_from_results(results)
results_table = __html_table_from_dataframe(df_results)
return results_table
def trim_proportion(data, proportion_to_cut):
# parts taken from scipy.stats.stats.trim_mean
nobs = data.shape[0]
lower_cut = int(proportion_to_cut * nobs)
upper_cut = nobs - lower_cut
if lower_cut > upper_cut:
raise ValueError("Proportion too big.")
data_tmp = np.partition(data, (lower_cut, upper_cut - 1), 0)
sl = [slice(None)] * data_tmp.ndim
sl[0] = slice(lower_cut, upper_cut)
return data_tmp[tuple(sl)]
def summary_html_table(results, trimmed_proportion_to_cut=0.1):
df_results = __create_df_from_results(results)
means = {
'Summary': f'<b>Mean</b>'}
for prediction_length in df_results.columns[1:]:
means[prediction_length] = mean(df_results[prediction_length])
summary_df = pd.DataFrame(means, index=[0])
trimmed_means = {
'Summary': f'<b>Trimmed ({trimmed_proportion_to_cut * 100.0:.0f}% cut) mean</b>'}
for prediction_length in df_results.columns[1:]:
trimmed_means[prediction_length] = trim_mean(df_results[prediction_length],
trimmed_proportion_to_cut)
summary_df = summary_df.append(trimmed_means, ignore_index=True)
summary_mean_table = __html_table_from_dataframe(summary_df)
standard_deviations = {
'Summary': f'<b>Standard deviation</b>'}
for prediction_length in df_results.columns[1:]:
standard_deviations[prediction_length] = stdev(df_results[prediction_length])
summary_df = pd.DataFrame(standard_deviations, index=[0])
trimmed_standard_deviations = {
'Summary': f'<b>Trimmed ({trimmed_proportion_to_cut * 100.0:.0f}% cut) standard deviation</b>'}
for prediction_length in df_results.columns[1:]:
trimmed_data = trim_proportion(df_results[prediction_length], trimmed_proportion_to_cut)
trimmed_standard_deviations[prediction_length] = stdev(trimmed_data)
summary_df = summary_df.append(trimmed_standard_deviations, ignore_index=True)
summary_sd_table = __html_table_from_dataframe(summary_df, highlight_max=False)
return summary_mean_table + '\n<p/>\n\n' + summary_sd_table
def prediction_as_graphs(data, smoothed_data,term_ngrams, lims, results):
k_range = list(next(iter(results.values())).keys())
html_string = '''
<table>
<tr>
<td>term</td>
<td style="text-align:center">Historical counts</td>'''
for k in k_range:
html_string += f' <td style="text-align:center">Predicted<br/>counts (k={k})</td>\n'
html_string += f' <td style="text-align:center">Predicted<br/>derivative (k={k})</td>\n'
html_string += '''
</tr>
'''
for term in tqdm(results, desc='Producing graphs', unit='term'):
html_string += f'''
<tr>
<td>{term}</td>
'''
term_index = term_ngrams.index(term)
max_y = max(data[term_index]) * 1.2
for k in k_range:
max_y = max(max(results[term][k][0]['predicted_values']) * 1.2, max_y)
fig = plt.figure(term, figsize=(6, 1.5), dpi=100)
ax = fig.add_subplot(111)
print(data[term_index])
ax.plot(data[term_index], color='b', linestyle='-', marker='x', label='Ground truth')
if smoothed_data is not None:
ax.plot(list(smoothed_data[term_index]), color='g', linestyle='-', marker='*',
label='Smoothed Ground truth')
ax.set_ylabel('Frequency', fontsize=12)
ax.set_ylim([0, max_y])
ax.axvline(x=lims[0], color='k', linestyle='--')
ax.axvline(x=lims[1], color='k', linestyle='--')
html_string += ' <td>' + plot_to_html_image(plt) + '</td>\n'
for k in k_range:
last_entry = max([x for x in results[term][k].keys() if type(x) is int])
fig = plt.figure(term, figsize=(1, 1.5), dpi=100)
ax = fig.add_subplot(111)
ax.plot(results[term][k][last_entry]['predicted_values'], color='r', linestyle='-', marker='+',
label='Prediction')
ax.set_ylim([0, max_y])
ax.set_yticklabels([])
html_string += ' <td>' + plot_to_html_image(plt) + '</td>\n'
fig = plt.figure(term, figsize=(1, 1.5), dpi=100)
ax = fig.add_subplot(111)
ax.plot(results[term][k][last_entry]['predicted_derivative'], color='r', linestyle='-', marker='+',
label='Prediction')
# ax.set_ylim([0, max_y])
# ax.set_yticklabels([])
html_string += ' <td>' + plot_to_html_image(plt) + '</td>\n'
html_string += ' </tr>\n'
html_string += ' </table>\n'
return html_string
| [
"pandas.DataFrame",
"numpy.partition",
"tqdm.tqdm",
"statistics.stdev",
"scripts.vandv.graphs.plot_to_html_image",
"matplotlib.pyplot.figure",
"statistics.mean",
"scipy.stats.trim_mean"
] | [((1765, 1814), 'numpy.partition', 'np.partition', (['data', '(lower_cut, upper_cut - 1)', '(0)'], {}), '(data, (lower_cut, upper_cut - 1), 0)\n', (1777, 1814), True, 'import numpy as np\n'), ((2234, 2264), 'pandas.DataFrame', 'pd.DataFrame', (['means'], {'index': '[0]'}), '(means, index=[0])\n', (2246, 2264), True, 'import pandas as pd\n'), ((2963, 3007), 'pandas.DataFrame', 'pd.DataFrame', (['standard_deviations'], {'index': '[0]'}), '(standard_deviations, index=[0])\n', (2975, 3007), True, 'import pandas as pd\n'), ((4185, 4236), 'tqdm.tqdm', 'tqdm', (['results'], {'desc': '"""Producing graphs"""', 'unit': '"""term"""'}), "(results, desc='Producing graphs', unit='term')\n", (4189, 4236), False, 'from tqdm import tqdm\n'), ((1145, 1166), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (1157, 1166), True, 'import pandas as pd\n'), ((2181, 2216), 'statistics.mean', 'mean', (['df_results[prediction_length]'], {}), '(df_results[prediction_length])\n', (2185, 2216), False, 'from statistics import stdev, mean\n'), ((2474, 2541), 'scipy.stats.trim_mean', 'trim_mean', (['df_results[prediction_length]', 'trimmed_proportion_to_cut'], {}), '(df_results[prediction_length], trimmed_proportion_to_cut)\n', (2483, 2541), False, 'from scipy.stats import trim_mean\n'), ((2909, 2945), 'statistics.stdev', 'stdev', (['df_results[prediction_length]'], {}), '(df_results[prediction_length])\n', (2914, 2945), False, 'from statistics import stdev, mean\n'), ((3356, 3375), 'statistics.stdev', 'stdev', (['trimmed_data'], {}), '(trimmed_data)\n', (3361, 3375), False, 'from statistics import stdev, mean\n'), ((4530, 4573), 'matplotlib.pyplot.figure', 'plt.figure', (['term'], {'figsize': '(6, 1.5)', 'dpi': '(100)'}), '(term, figsize=(6, 1.5), dpi=100)\n', (4540, 4573), True, 'import matplotlib.pyplot as plt\n'), ((5315, 5358), 'matplotlib.pyplot.figure', 'plt.figure', (['term'], {'figsize': '(1, 1.5)', 'dpi': '(100)'}), '(term, figsize=(1, 1.5), dpi=100)\n', (5325, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5719, 5762), 'matplotlib.pyplot.figure', 'plt.figure', (['term'], {'figsize': '(1, 1.5)', 'dpi': '(100)'}), '(term, figsize=(1, 1.5), dpi=100)\n', (5729, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5149, 5172), 'scripts.vandv.graphs.plot_to_html_image', 'plot_to_html_image', (['plt'], {}), '(plt)\n', (5167, 5172), False, 'from scripts.vandv.graphs import plot_to_html_image\n'), ((5664, 5687), 'scripts.vandv.graphs.plot_to_html_image', 'plot_to_html_image', (['plt'], {}), '(plt)\n', (5682, 5687), False, 'from scripts.vandv.graphs import plot_to_html_image\n'), ((6076, 6099), 'scripts.vandv.graphs.plot_to_html_image', 'plot_to_html_image', (['plt'], {}), '(plt)\n', (6094, 6099), False, 'from scripts.vandv.graphs import plot_to_html_image\n')] |
from numpy import zeros, ones
def compose_soln(x, n, V):
A = zeros((n, n))
A[0, :] = V * ones(n)
A[-1, :] = -V * ones(n)
A[1 : -1, 1 : -1] = x.reshape((n - 2, n - 2))
return A | [
"numpy.zeros",
"numpy.ones"
] | [((67, 80), 'numpy.zeros', 'zeros', (['(n, n)'], {}), '((n, n))\n', (72, 80), False, 'from numpy import zeros, ones\n'), ((99, 106), 'numpy.ones', 'ones', (['n'], {}), '(n)\n', (103, 106), False, 'from numpy import zeros, ones\n'), ((127, 134), 'numpy.ones', 'ones', (['n'], {}), '(n)\n', (131, 134), False, 'from numpy import zeros, ones\n')] |
#by <NAME> 2020
import numpy as np
import matplotlib.pyplot as plt
import skimage
import skimage.external.tifffile
import os
import pandas as pd
import seaborn as sns
########################################################
#2
import sys
import argparse
#default_i = "../data/PositiveControl/FilamentProjections/TomoJune_Fil06_Projection_crop.tif"
default_i = "../data/PositiveControl/FilamentProjections_readyToProcess/"
default_refPlus = "../data/ReferenceImageStacks/PlusUp.tif"
default_refMinus = "../data/ReferenceImageStacks/MinusUp.tif"
default_o = "../output/"
# this is the default system argument value array. Uncomment to run independent of command line
#sys.argv = ['actinPolarity.py','-i',default_i,'-refPlus',default_refPlus,'-refMinus',default_refMinus,'-o',default_o]
parser = argparse.ArgumentParser(description='ActinPolarity by <NAME> 2020')
parser.add_argument('-i','--input', help='*.tif input filament or folder. If folder: process all *.tif in folder.', default=default_i, required=True)
parser.add_argument('-refPlus','--referencePlusUp', help='*.tif stack plusUp reference',default=default_refPlus, required=True)
parser.add_argument('-refMinus','--referenceMinusUp', help='*.tif stack minusUp reference',default=default_refMinus, required=True)
parser.add_argument('-o','--output', help='output folder path ./output/',default=default_o, required=True)
args = vars(parser.parse_args())
#parser.add_argument('-i', metavar='N', type=int, nargs='+',
# help='input filament.tif')
#parser.add_argument('--sum', dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
#args = vars(parser.parse_args())
#print(args)
########################################################
#3
#zoom call timestamp 19:00
width = 23;
height = 69;
every = 5;
#import mask
input_image_path = os.path.abspath(os.path.join(args['input']))
input_imagePaths = []
if (os.path.isfile(input_image_path) ):
input_imagePaths.append(input_image_path)
if(os.path.isdir(input_image_path)):
for file in os.listdir(input_image_path):
if (file.endswith(".tif")):
input_imagePaths.append(os.path.join(input_image_path, file))
PlusUp_image_path = os.path.abspath(os.path.join(args['referencePlusUp']))
MinusUp_image_path = os.path.abspath(os.path.join(args['referenceMinusUp']))
output_folder_path = os.path.abspath(os.path.join(args['output']))
#create folder if it does not exist
from pathlib import Path
Path(output_folder_path).mkdir(parents=True, exist_ok=True)
minusUpReference = skimage.external.tifffile.imread(MinusUp_image_path)
#print(image.shape)
#plt.imshow(minusUpReference[0],cmap='gray')
#plt.show()
plusUpReference = skimage.external.tifffile.imread(PlusUp_image_path)
#print(image.shape)
#plt.imshow(plusUpReference[0],cmap='gray')
#plt.show()
########################################################
#4
# calculate correlation coefficient:
# from https://www.gcsca.net/IJ/Image_CorrelationJ_1o.java
def getR(d1, d2):
d1 = np.array(d1).flatten()
d2 = np.array(d2).flatten()
t1 = 0
t2 = 0
sumValue = 0
xMean = np.mean(d1)
yMean = np.mean(d2)
xStd = np.std(d1)
yStd = np.std(d2)
for i in range(0,len(d1)):
t1 = (d1[i]-xMean)/xStd
t2 = (d2[i]-yMean)/yStd
sumValue = sumValue + (t1*t2)
r = sumValue/(len(d1)-1)
return(r)
########################################################
#5
from pathlib import Path
for input_image_path in input_imagePaths:
print("process "+input_image_path+" ...")
outputFilePrefix = Path(input_image_path).stem
#print(outputFilePrefix)
#continue
image = skimage.external.tifffile.imread(input_image_path)
globalMin = np.min(image)
globalMax = np.max(image)
print("extracting subimages of shape (w,h) ({},{}) every {} pixel ...".format(width,height,every))
totalHeight = image.shape[0]
#print(totalHeight);
nSubpictures = (np.floor(((totalHeight-height)/every))).astype(int)
subpictures = []
Path(output_folder_path+"/"+outputFilePrefix).mkdir(parents=True, exist_ok=True)
for i in range(nSubpictures,0,-1):
#print(i)
subpicture = image[i*every:height+i*every,:]
skimage.external.tifffile.imsave(output_folder_path+"/"+outputFilePrefix+"/"+outputFilePrefix+"_"+str(i).zfill(5)+".tiff", subpicture, imagej=True );
# plt.imshow(subpicture,cmap='gray')
# plt.show()
subpictures.append(subpicture)
print("found {} subpictures".format(len(subpictures)))
#print("--- %s seconds ---" % (time.time() - start_time))
####################################################################################
#import time
#start_time = time.time()
print("calculating R values...")
rValuesPlusUpReference = []
rValuesMinusUpReference = []
for i in range(0,len(subpictures)):
for j in range(0,len(plusUpReference)):
rpu = getR(plusUpReference[j],subpictures[i])
rValuesPlusUpReference.append(rpu)
for k in range(0,len(minusUpReference)):
rmu = getR(minusUpReference[k],subpictures[i])
rValuesMinusUpReference.append(rmu)
#print("--- %s seconds ---" % (time.time() - start_time))
####################################################################################
#https://matplotlib.org/3.1.0/gallery/color/named_colors.html
avgDifference = np.round(np.mean(np.array(rValuesPlusUpReference)-np.array(rValuesMinusUpReference)),decimals=2)
pltTitleExplanation = ""
if(np.mean(np.array(rValuesPlusUpReference)-np.array(rValuesMinusUpReference))<0):
pltTitleExplanation = outputFilePrefix+"\n difference plus-minus <0 = {}: i.e. Minus End Up".format(avgDifference)
endUpDecision = "Minus End Up"
else:
pltTitleExplanation = outputFilePrefix+"n difference plus-minus >=0 = {}: i.e. Plus End Up".format(avgDifference)
endUpDecision = "Plus End Up"
####
plt.figure(dpi=150)
plt.plot(rValuesPlusUpReference,c="blueviolet",label="Plus Up R")
plt.plot(rValuesMinusUpReference,c="limegreen",label="Minus Up R")
plt.xlabel("Segment")
plt.ylabel("Cross-correlation")
# Hide the right and top spines
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.legend(loc="upper right")
plt.ylim(-0.2,1)
plt.title(pltTitleExplanation)
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot.svg')
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot.png')
####################################################################################
#https://matplotlib.org/3.1.0/gallery/color/named_colors.html
plt.figure(dpi=150)
plt.plot(np.array(rValuesPlusUpReference)-np.array(rValuesMinusUpReference),c="blue",label="difference")
plt.xlabel("Segment")
plt.ylabel("Cross-correlation difference")
# Hide the right and top spines
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.legend(loc="upper right")
#plt.ylim(-0.5,0.5)
plt.title(pltTitleExplanation)
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot_difference.svg')
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot_difference.png')
####################################################################################
plt.figure(dpi=150)
plt.plot([-1, 1], [0, 0], 'k-', lw=3)
differencesPlusUpMinusMinusUp = np.array(rValuesPlusUpReference)-np.array(rValuesMinusUpReference)
dataToPlot = np.array(differencesPlusUpMinusMinusUp.flatten())
if(np.mean(dataToPlot)<0):
colorUsed = "red"
else:
colorUsed = "lightgreen"
plt.plot([-1, 1], [np.mean(dataToPlot), np.mean(dataToPlot)], 'k--', lw=2)
swarmPlot = sns.violinplot(y=dataToPlot,color=colorUsed,edgecolor="black",s=3,linewidth=0.7)
swarmPlot.set(ylim=(-1, 1))
swarmPlot.set(aspect=2)
plt.title(pltTitleExplanation)
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot_difference_violin.svg')
plt.savefig(output_folder_path+"/"+outputFilePrefix+'_plot_difference_violin.png')
####################################################################################
data = np.array([rValuesPlusUpReference, rValuesMinusUpReference])
#print( data.shape)
df = pd.DataFrame({'R_plusUpReference': data[0],
'R_plusUpReference_area': np.sum(np.abs(data[0])),
'R_minusUpReference': data[1],
'R_minusUpReference_area': np.sum(np.abs(data[1])),
'R_Plus_minus_R-Minus:':avgDifference,
'R_Plus_minus_R-Minus:':endUpDecision})
print(df)
df.to_csv(output_folder_path+"/"+outputFilePrefix+'_output.csv', index = True)
####################################################################################
print("done.")
print("find the outputs in "+output_folder_path)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.floor",
"os.path.isfile",
"numpy.mean",
"matplotlib.pyplot.figure",
"pathlib.Path",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.std",
"numpy.max",
"matplotlib.pyplot.ylim",
"matplot... | [((806, 873), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ActinPolarity by <NAME> 2020"""'}), "(description='ActinPolarity by <NAME> 2020')\n", (829, 873), False, 'import argparse\n'), ((2003, 2035), 'os.path.isfile', 'os.path.isfile', (['input_image_path'], {}), '(input_image_path)\n', (2017, 2035), False, 'import os\n'), ((2088, 2119), 'os.path.isdir', 'os.path.isdir', (['input_image_path'], {}), '(input_image_path)\n', (2101, 2119), False, 'import os\n'), ((2643, 2695), 'skimage.external.tifffile.imread', 'skimage.external.tifffile.imread', (['MinusUp_image_path'], {}), '(MinusUp_image_path)\n', (2675, 2695), False, 'import skimage\n'), ((2792, 2843), 'skimage.external.tifffile.imread', 'skimage.external.tifffile.imread', (['PlusUp_image_path'], {}), '(PlusUp_image_path)\n', (2824, 2843), False, 'import skimage\n'), ((1948, 1975), 'os.path.join', 'os.path.join', (["args['input']"], {}), "(args['input'])\n", (1960, 1975), False, 'import os\n'), ((2138, 2166), 'os.listdir', 'os.listdir', (['input_image_path'], {}), '(input_image_path)\n', (2148, 2166), False, 'import os\n'), ((2317, 2354), 'os.path.join', 'os.path.join', (["args['referencePlusUp']"], {}), "(args['referencePlusUp'])\n", (2329, 2354), False, 'import os\n'), ((2393, 2431), 'os.path.join', 'os.path.join', (["args['referenceMinusUp']"], {}), "(args['referenceMinusUp'])\n", (2405, 2431), False, 'import os\n'), ((2471, 2499), 'os.path.join', 'os.path.join', (["args['output']"], {}), "(args['output'])\n", (2483, 2499), False, 'import os\n'), ((3214, 3225), 'numpy.mean', 'np.mean', (['d1'], {}), '(d1)\n', (3221, 3225), True, 'import numpy as np\n'), ((3238, 3249), 'numpy.mean', 'np.mean', (['d2'], {}), '(d2)\n', (3245, 3249), True, 'import numpy as np\n'), ((3261, 3271), 'numpy.std', 'np.std', (['d1'], {}), '(d1)\n', (3267, 3271), True, 'import numpy as np\n'), ((3283, 3293), 'numpy.std', 'np.std', (['d2'], {}), '(d2)\n', (3289, 3293), True, 'import numpy as np\n'), ((3774, 3824), 'skimage.external.tifffile.imread', 'skimage.external.tifffile.imread', (['input_image_path'], {}), '(input_image_path)\n', (3806, 3824), False, 'import skimage\n'), ((3841, 3854), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3847, 3854), True, 'import numpy as np\n'), ((3871, 3884), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3877, 3884), True, 'import numpy as np\n'), ((6209, 6228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(150)'}), '(dpi=150)\n', (6219, 6228), True, 'import matplotlib.pyplot as plt\n'), ((6233, 6300), 'matplotlib.pyplot.plot', 'plt.plot', (['rValuesPlusUpReference'], {'c': '"""blueviolet"""', 'label': '"""Plus Up R"""'}), "(rValuesPlusUpReference, c='blueviolet', label='Plus Up R')\n", (6241, 6300), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6371), 'matplotlib.pyplot.plot', 'plt.plot', (['rValuesMinusUpReference'], {'c': '"""limegreen"""', 'label': '"""Minus Up R"""'}), "(rValuesMinusUpReference, c='limegreen', label='Minus Up R')\n", (6311, 6371), True, 'import matplotlib.pyplot as plt\n'), ((6374, 6395), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Segment"""'], {}), "('Segment')\n", (6384, 6395), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (6410, 6431), True, 'import matplotlib.pyplot as plt\n'), ((6568, 6597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6578, 6597), True, 'import matplotlib.pyplot as plt\n'), ((6602, 6619), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.2)', '(1)'], {}), '(-0.2, 1)\n', (6610, 6619), True, 'import matplotlib.pyplot as plt\n'), ((6623, 6653), 'matplotlib.pyplot.title', 'plt.title', (['pltTitleExplanation'], {}), '(pltTitleExplanation)\n', (6632, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6658, 6728), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot.svg')"], {}), "(output_folder_path + '/' + outputFilePrefix + '_plot.svg')\n", (6669, 6728), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6797), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot.png')"], {}), "(output_folder_path + '/' + outputFilePrefix + '_plot.png')\n", (6738, 6797), True, 'import matplotlib.pyplot as plt\n'), ((6976, 6995), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(150)'}), '(dpi=150)\n', (6986, 6995), True, 'import matplotlib.pyplot as plt\n'), ((7109, 7130), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Segment"""'], {}), "('Segment')\n", (7119, 7130), True, 'import matplotlib.pyplot as plt\n'), ((7135, 7177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross-correlation difference"""'], {}), "('Cross-correlation difference')\n", (7145, 7177), True, 'import matplotlib.pyplot as plt\n'), ((7314, 7343), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (7324, 7343), True, 'import matplotlib.pyplot as plt\n'), ((7391, 7421), 'matplotlib.pyplot.title', 'plt.title', (['pltTitleExplanation'], {}), '(pltTitleExplanation)\n', (7400, 7421), True, 'import matplotlib.pyplot as plt\n'), ((7426, 7511), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot_difference.svg')"], {}), "(output_folder_path + '/' + outputFilePrefix +\n '_plot_difference.svg')\n", (7437, 7511), True, 'import matplotlib.pyplot as plt\n'), ((7506, 7591), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot_difference.png')"], {}), "(output_folder_path + '/' + outputFilePrefix +\n '_plot_difference.png')\n", (7517, 7591), True, 'import matplotlib.pyplot as plt\n'), ((7697, 7716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(150)'}), '(dpi=150)\n', (7707, 7716), True, 'import matplotlib.pyplot as plt\n'), ((7721, 7758), 'matplotlib.pyplot.plot', 'plt.plot', (['[-1, 1]', '[0, 0]', '"""k-"""'], {'lw': '(3)'}), "([-1, 1], [0, 0], 'k-', lw=3)\n", (7729, 7758), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8230), 'seaborn.violinplot', 'sns.violinplot', ([], {'y': 'dataToPlot', 'color': 'colorUsed', 'edgecolor': '"""black"""', 's': '(3)', 'linewidth': '(0.7)'}), "(y=dataToPlot, color=colorUsed, edgecolor='black', s=3,\n linewidth=0.7)\n", (8156, 8230), True, 'import seaborn as sns\n'), ((8287, 8317), 'matplotlib.pyplot.title', 'plt.title', (['pltTitleExplanation'], {}), '(pltTitleExplanation)\n', (8296, 8317), True, 'import matplotlib.pyplot as plt\n'), ((8322, 8414), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot_difference_violin.svg')"], {}), "(output_folder_path + '/' + outputFilePrefix +\n '_plot_difference_violin.svg')\n", (8333, 8414), True, 'import matplotlib.pyplot as plt\n'), ((8409, 8501), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_folder_path + '/' + outputFilePrefix + '_plot_difference_violin.png')"], {}), "(output_folder_path + '/' + outputFilePrefix +\n '_plot_difference_violin.png')\n", (8420, 8501), True, 'import matplotlib.pyplot as plt\n'), ((8607, 8666), 'numpy.array', 'np.array', (['[rValuesPlusUpReference, rValuesMinusUpReference]'], {}), '([rValuesPlusUpReference, rValuesMinusUpReference])\n', (8615, 8666), True, 'import numpy as np\n'), ((2562, 2586), 'pathlib.Path', 'Path', (['output_folder_path'], {}), '(output_folder_path)\n', (2566, 2586), False, 'from pathlib import Path\n'), ((3680, 3702), 'pathlib.Path', 'Path', (['input_image_path'], {}), '(input_image_path)\n', (3684, 3702), False, 'from pathlib import Path\n'), ((7800, 7832), 'numpy.array', 'np.array', (['rValuesPlusUpReference'], {}), '(rValuesPlusUpReference)\n', (7808, 7832), True, 'import numpy as np\n'), ((7833, 7866), 'numpy.array', 'np.array', (['rValuesMinusUpReference'], {}), '(rValuesMinusUpReference)\n', (7841, 7866), True, 'import numpy as np\n'), ((7946, 7965), 'numpy.mean', 'np.mean', (['dataToPlot'], {}), '(dataToPlot)\n', (7953, 7965), True, 'import numpy as np\n'), ((3108, 3120), 'numpy.array', 'np.array', (['d1'], {}), '(d1)\n', (3116, 3120), True, 'import numpy as np\n'), ((3140, 3152), 'numpy.array', 'np.array', (['d2'], {}), '(d2)\n', (3148, 3152), True, 'import numpy as np\n'), ((4077, 4117), 'numpy.floor', 'np.floor', (['((totalHeight - height) / every)'], {}), '((totalHeight - height) / every)\n', (4085, 4117), True, 'import numpy as np\n'), ((4159, 4208), 'pathlib.Path', 'Path', (["(output_folder_path + '/' + outputFilePrefix)"], {}), "(output_folder_path + '/' + outputFilePrefix)\n", (4163, 4208), False, 'from pathlib import Path\n'), ((7009, 7041), 'numpy.array', 'np.array', (['rValuesPlusUpReference'], {}), '(rValuesPlusUpReference)\n', (7017, 7041), True, 'import numpy as np\n'), ((7042, 7075), 'numpy.array', 'np.array', (['rValuesMinusUpReference'], {}), '(rValuesMinusUpReference)\n', (7050, 7075), True, 'import numpy as np\n'), ((8062, 8081), 'numpy.mean', 'np.mean', (['dataToPlot'], {}), '(dataToPlot)\n', (8069, 8081), True, 'import numpy as np\n'), ((8083, 8102), 'numpy.mean', 'np.mean', (['dataToPlot'], {}), '(dataToPlot)\n', (8090, 8102), True, 'import numpy as np\n'), ((2240, 2276), 'os.path.join', 'os.path.join', (['input_image_path', 'file'], {}), '(input_image_path, file)\n', (2252, 2276), False, 'import os\n'), ((5640, 5672), 'numpy.array', 'np.array', (['rValuesPlusUpReference'], {}), '(rValuesPlusUpReference)\n', (5648, 5672), True, 'import numpy as np\n'), ((5673, 5706), 'numpy.array', 'np.array', (['rValuesMinusUpReference'], {}), '(rValuesMinusUpReference)\n', (5681, 5706), True, 'import numpy as np\n'), ((5764, 5796), 'numpy.array', 'np.array', (['rValuesPlusUpReference'], {}), '(rValuesPlusUpReference)\n', (5772, 5796), True, 'import numpy as np\n'), ((5797, 5830), 'numpy.array', 'np.array', (['rValuesMinusUpReference'], {}), '(rValuesMinusUpReference)\n', (5805, 5830), True, 'import numpy as np\n'), ((8801, 8816), 'numpy.abs', 'np.abs', (['data[0]'], {}), '(data[0])\n', (8807, 8816), True, 'import numpy as np\n'), ((8932, 8947), 'numpy.abs', 'np.abs', (['data[1]'], {}), '(data[1])\n', (8938, 8947), True, 'import numpy as np\n'), ((6472, 6481), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6479, 6481), True, 'import matplotlib.pyplot as plt\n'), ((6521, 6530), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6528, 6530), True, 'import matplotlib.pyplot as plt\n'), ((7218, 7227), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7225, 7227), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7276), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7274, 7276), True, 'import matplotlib.pyplot as plt\n')] |
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import time
import torch
import logging
import argparse
import torchvision
import torch.nn as nn
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torchvision.transforms as transforms
from itertools import combinations, permutations
#from utils import progress_bar
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='PyTorch CIFAR100 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
args = parser.parse_args()
logging.info(args)
store_name = "CNN"
nb_epoch = 400
# setup output
use_cuda = torch.cuda.is_available()
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=8)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=False, num_workers=8)
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Sequential(
nn.Linear(512,256),
nn.Linear(256, 100)
)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
# Model
print('==> Building model..')
net = VGG('VGG16')
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
idx = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
pass
idx = batch_idx
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
train_acc = 100.*correct/total
train_loss = train_loss/(idx+1)
logging.info('Iteration %d, train_acc = %.5f,train_loss = %.6f' % (epoch, train_acc,train_loss))
def test(epoch):
net.eval()
test_loss = 0
correct = 0
total = 0
idx = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
with torch.no_grad():
idx = batch_idx
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum().item()
test_acc = 100.*correct/total
test_loss = test_loss/(idx+1)
logging.info('Iteration %d, test_acc = %.4f,test_loss = %.4f' % (epoch, test_acc,test_loss))
return test_acc
def cosine_anneal_schedule(t):
cos_inner = np.pi * (t % (nb_epoch )) # t - 1 is used when t has 1-based indexing.
cos_inner /= (nb_epoch )
cos_out = np.cos(cos_inner) + 1
return float(args.lr / 2 * cos_out)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
max_val_acc = 0
for epoch in range(nb_epoch):
lr = cosine_anneal_schedule(epoch)
for param_group in optimizer.param_groups:
print(param_group['lr'])
param_group['lr'] = lr
train(epoch)
test_acc = test(epoch)
if test_acc >max_val_acc:
max_val_acc = test_acc
print("max_val_acc", max_val_acc)
| [
"argparse.ArgumentParser",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torchvision.transforms.RandomHorizontalFlip",
"torch.autograd.Variable",
"torch.nn.Conv2d",
"torchvision.datasets.CIFAR100",
"torch.nn.BatchNor... | [((601, 640), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (620, 640), False, 'import logging\n'), ((651, 715), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch CIFAR100 Training"""'}), "(description='PyTorch CIFAR100 Training')\n", (674, 715), False, 'import argparse\n'), ((821, 839), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (833, 839), False, 'import logging\n'), ((912, 937), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (935, 937), False, 'import torch\n'), ((1385, 1487), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (1414, 1487), False, 'import torchvision\n'), ((1499, 1585), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(256)', 'shuffle': '(True)', 'num_workers': '(8)'}), '(trainset, batch_size=256, shuffle=True,\n num_workers=8)\n', (1526, 1585), False, 'import torch\n'), ((1599, 1701), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (1628, 1701), False, 'import torchvision\n'), ((1712, 1798), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(256)', 'shuffle': '(False)', 'num_workers': '(8)'}), '(testset, batch_size=256, shuffle=False,\n num_workers=8)\n', (1739, 1798), False, 'import torch\n'), ((3325, 3346), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3344, 3346), True, 'import torch.nn as nn\n'), ((4177, 4278), 'logging.info', 'logging.info', (["('Iteration %d, train_acc = %.5f,train_loss = %.6f' % (epoch, train_acc,\n train_loss))"], {}), "('Iteration %d, train_acc = %.5f,train_loss = %.6f' % (epoch,\n train_acc, train_loss))\n", (4189, 4278), False, 'import logging\n'), ((5025, 5122), 'logging.info', 'logging.info', (["('Iteration %d, test_acc = %.4f,test_loss = %.4f' % (epoch, test_acc,\n test_loss))"], {}), "('Iteration %d, test_acc = %.4f,test_loss = %.4f' % (epoch,\n test_acc, test_loss))\n", (5037, 5122), False, 'import logging\n'), ((1026, 1062), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1047, 1062), True, 'import torchvision.transforms as transforms\n'), ((1069, 1102), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1100, 1102), True, 'import torchvision.transforms as transforms\n'), ((1109, 1130), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1128, 1130), True, 'import torchvision.transforms as transforms\n'), ((1137, 1208), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1157, 1208), True, 'import torchvision.transforms as transforms\n'), ((1265, 1286), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1284, 1286), True, 'import torchvision.transforms as transforms\n'), ((1293, 1364), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1313, 1364), True, 'import torchvision.transforms as transforms\n'), ((3149, 3171), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3162, 3171), True, 'import torch.nn as nn\n'), ((3970, 3996), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (3979, 3996), False, 'import torch\n'), ((5313, 5330), 'numpy.cos', 'np.cos', (['cos_inner'], {}), '(cos_inner)\n', (5319, 5330), True, 'import numpy as np\n'), ((2423, 2442), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (2432, 2442), True, 'import torch.nn as nn\n'), ((2456, 2475), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(100)'], {}), '(256, 100)\n', (2465, 2475), True, 'import torch.nn as nn\n'), ((3094, 3131), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(1)', 'stride': '(1)'}), '(kernel_size=1, stride=1)\n', (3106, 3131), True, 'import torch.nn as nn\n'), ((3739, 3755), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (3747, 3755), False, 'from torch.autograd import Variable\n'), ((3757, 3774), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (3765, 3774), False, 'from torch.autograd import Variable\n'), ((4454, 4469), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4467, 4469), False, 'import torch\n'), ((4811, 4837), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4820, 4837), False, 'import torch\n'), ((4622, 4638), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (4630, 4638), False, 'from torch.autograd import Variable\n'), ((4640, 4657), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (4648, 4657), False, 'from torch.autograd import Variable\n'), ((2804, 2841), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2816, 2841), True, 'import torch.nn as nn\n'), ((2890, 2941), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'x'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, x, kernel_size=3, padding=1)\n', (2899, 2941), True, 'import torch.nn as nn\n'), ((2971, 2988), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['x'], {}), '(x)\n', (2985, 2988), True, 'import torch.nn as nn\n'), ((3018, 3039), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3025, 3039), True, 'import torch.nn as nn\n')] |
from mido import MidiFile
import pretty_midi
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import math
source = '/Users/jonathanmairena/Documents/Fall2021/CIS519/Project/Pretrained Results -interp/Pretrained_results_902_907'
files = os.listdir(source)
global_tempo = []
velocity = []
key = []
tempo_list = []
num_changes = []
beat_range = []
beat_ratio = []
for i in range(0,len(files)):
midi_file_name = os.path.join(source,files[i])
# Store an empirical estimate of its global tempo
midi_data = pretty_midi.PrettyMIDI(midi_file_name)
# print ('Global Tempo \t',midi_data.estimate_tempo())
# print (midi_data.estimate_tempo())
global_tempo.append(midi_data.estimate_tempo())
# Compute the relative amount of each semitone across the entire song,
# a proxy for key
total_velocity = sum(sum(midi_data.get_chroma()))
velocity.append(total_velocity)
# print('Total Velocity \t',total_velocity)
# print(total_velocity)
#relative amount of semitone accross entire song
rel_pitches = [sum(semitone)/total_velocity for semitone in midi_data.get_chroma()]
pitch_names = ['Do', 'Di', 'Re', 'Ri', 'Mi', 'Fa', 'Fi', 'Sol', 'Si', 'La', 'Li', 'Ti']
key.append(np.argmax(rel_pitches))
# plt.bar(pitch_names,rel_pitches)
# plt.title('Relative presence of pitch in song ')
# plt.savefig('Pitch_bar_plot')
#compute the amount of times the tempo changes
[time,tempo] = midi_data.get_tempo_changes()
# print('Tempo Changes \t',len(time))
# print('Tempo \t',tempo)
# print(len(time))
# print(tempo)
tempo_list.append(tempo)
num_changes.append(len(time))
#get_pitch_class_histogram
# plt.figure()
# plt.bar(pitch_names,midi_data.get_pitch_class_histogram(use_duration=False, use_velocity=True, normalize=True))
# plt.title('Pitch Histogram Weighed by Velocity')
# plt.savefig('pitch_histogram')
#transition matrix
# print(midi_data.get_pitch_class_transition_matrix(normalize=False, time_thresh=0.05))
#Get number of beats
beats = midi_data.get_beats()
# print('Number of Beats \t',len(beats))
# print('Range of Beats \t', beats[np.argmax(beats)] - beats[np.argmin(beats)])
beat_range.append(beats[np.argmax(beats)] - beats[np.argmin(beats)])
# print(len(beats))
# print(beats[np.argmax(beats)] - beats[np.argmin(beats)])
#Get number of off beats
down_beats = midi_data.get_downbeats()
beat_ratio.append(len(beats)/len(down_beats))
# print('Number of Down Beats \t', len(down_beats))
# print(len(down_beats))
pitch_names = ['Do', 'Di', 'Re', 'Ri', 'Mi', 'Fa', 'Fi', 'Sol', 'Si', 'La', 'Li', 'Ti']
print(np.mean(global_tempo))
print(np.mean(velocity))
print(np.mean(tempo_list))
print(np.mean(num_changes))
print(np.mean(beat_range))
print(np.mean(beat_ratio))
print(pitch_names[math.floor(np.mean(key))]) | [
"numpy.argmax",
"numpy.argmin",
"numpy.mean",
"pretty_midi.PrettyMIDI",
"os.path.join",
"os.listdir"
] | [((259, 277), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (269, 277), False, 'import os\n'), ((435, 465), 'os.path.join', 'os.path.join', (['source', 'files[i]'], {}), '(source, files[i])\n', (447, 465), False, 'import os\n'), ((529, 567), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['midi_file_name'], {}), '(midi_file_name)\n', (551, 567), False, 'import pretty_midi\n'), ((2563, 2584), 'numpy.mean', 'np.mean', (['global_tempo'], {}), '(global_tempo)\n', (2570, 2584), True, 'import numpy as np\n'), ((2592, 2609), 'numpy.mean', 'np.mean', (['velocity'], {}), '(velocity)\n', (2599, 2609), True, 'import numpy as np\n'), ((2617, 2636), 'numpy.mean', 'np.mean', (['tempo_list'], {}), '(tempo_list)\n', (2624, 2636), True, 'import numpy as np\n'), ((2644, 2664), 'numpy.mean', 'np.mean', (['num_changes'], {}), '(num_changes)\n', (2651, 2664), True, 'import numpy as np\n'), ((2672, 2691), 'numpy.mean', 'np.mean', (['beat_range'], {}), '(beat_range)\n', (2679, 2691), True, 'import numpy as np\n'), ((2699, 2718), 'numpy.mean', 'np.mean', (['beat_ratio'], {}), '(beat_ratio)\n', (2706, 2718), True, 'import numpy as np\n'), ((1194, 1216), 'numpy.argmax', 'np.argmax', (['rel_pitches'], {}), '(rel_pitches)\n', (1203, 1216), True, 'import numpy as np\n'), ((2749, 2761), 'numpy.mean', 'np.mean', (['key'], {}), '(key)\n', (2756, 2761), True, 'import numpy as np\n'), ((2150, 2166), 'numpy.argmax', 'np.argmax', (['beats'], {}), '(beats)\n', (2159, 2166), True, 'import numpy as np\n'), ((2176, 2192), 'numpy.argmin', 'np.argmin', (['beats'], {}), '(beats)\n', (2185, 2192), True, 'import numpy as np\n')] |
'''
this script is to evaluate stochastic motion prediction on amass
'''
import numpy as np
import argparse
import os
import sys
import pickle
import csv
sys.path.append(os.getcwd())
from utils import *
from experiments.utils.config import Config
from experiments.utils.batch_gen_amass import BatchGeneratorAMASSCanonicalized
from experiments.utils.eval_metrics import *
def get_prediction(data, algo, sample_num, num_seeds=1, concat_hist=True):
traj_np = data
if type(traj_np) is np.ndarray:
traj = tensor(traj_np, device=device, dtype=dtype).permute(1, 0, 2).contiguous()
elif type(traj_np) is torch.Tensor:
traj = traj_np.permute(1,0,2).clone().detach()
X = traj[:t_his] # original setting
if algo == 'dlow':
X = X.repeat((1, num_seeds, 1))
Z_g = models[algo].sample(X)
if 'mojo' in args.cfg:
Z_highfreq = torch.randn((t_pred-n_freq, Z_g.shape[1], Z_g.shape[2]), device=Z_g.device)
Z_g = torch.cat([Z_g, Z_highfreq],dim=0) #[freq, b*nk, d]
X = X.repeat_interleave(sample_num, dim=1)
Y = models['vae'].decode(X, Z_g)
elif algo == 'vae':
X = X.repeat((1, sample_num * num_seeds, 1))
Y = models[algo].sample_prior(X)
if concat_hist:
Y = torch.cat((X, Y), dim=0)
Y = Y.permute(1, 0, 2).detach().cpu().numpy()
if Y.shape[0] > 1:
Y = Y.reshape(-1, sample_num, Y.shape[-2], Y.shape[-1])
else:
Y = Y[None, ...] #expand a dim
return Y
def visualize(n_seqs, n_gens):
'''
Actually this is just to generate files for visualization, rather than directly render them.
n_seqs: how many sequences to generate
n_gens: for each input sequence, how many different sequences to predict
'''
### generate data and save them to files. They will need inverse kinematics to get body mesh.
### generate data
gen_results = {}
gen_results['gt'] = []
gen_results['betas'] = []
gen_results['gender'] = []
gen_results['transf_rotmat'] = []
gen_results['transf_transl'] = []
for algo in vis_algos:
gen_results[algo] = []
idx = 0
while idx < n_seqs:
data = batch_gen.next_sequence()
motion_np = data['body_feature']
motion = torch.FloatTensor(motion_np).unsqueeze(0) #[b,t,d]
gen_results['gt'].append(motion_np.reshape((1,motion_np.shape[0],-1,3)))
gen_results['betas'].append(data['betas'])
gen_results['gender'].append(str(data['gender']))
gen_results['transf_rotmat'].append(data['transf_rotmat'])
gen_results['transf_transl'].append(data['transf_transl'])
# vae
for algo in vis_algos:
pred = get_prediction(motion, algo, cfg.nk)[0]
pred = np.reshape(pred, (pred.shape[0], pred.shape[1],-1,3))
pred = pred[:n_gens]
gen_results[algo].append(pred)
idx+=1
gen_results['gt'] = np.stack(gen_results['gt'])
for algo in vis_algos:
gen_results[algo] = np.stack(gen_results[algo]) #[#seq, #genseq_per_pastmotion, t, #joints, 3]
### save to file
outfilename = '{}/seq_gen_seed{}/{}/'.format(cfg.result_dir, args.num_seeds, testing_data[0])
if not os.path.exists(outfilename):
os.makedirs(outfilename)
outfilename += 'results_{}.pkl'.format(body_repr)
with open(outfilename, 'wb') as f:
pickle.dump(gen_results, f)
def compute_stats():
stats_func = {'Diversity': compute_diversity, 'ADE': compute_ade,
'FDE': compute_fde, 'MMADE': compute_mmade, 'MMFDE': compute_mmfde,
'FE': compute_ps_entropy}
stats_names = list(stats_func.keys())
stats_meter = {x: {y: AverageMeter() for y in algos} for x in stats_names}
num_samples = 0
num_seeds = args.num_seeds
for i in range(all_data.shape[0]):
data = all_data[i:i+1]
num_samples += 1
gt = all_data[i:i+1,t_his:,:]
gt_multi = traj_gt_arr[i]
for algo in algos:
pred = get_prediction(data, algo, sample_num=cfg.nk, num_seeds=num_seeds, concat_hist=False)
for stats in stats_names:
val = 0
for pred_i in pred:
val += stats_func[stats](pred_i, gt, gt_multi) / num_seeds
stats_meter[stats][algo].update(val)
logger.info('=' * 80)
for stats in stats_names:
str_stats = f'Total {stats}: ' + ' '.join([f'{x}: {y.avg:.4f}' for x, y in stats_meter[stats].items()])
logger.info(str_stats)
logger.info('=' * 80)
with open('%s/stats_%s.csv' % (cfg.result_dir, args.num_seeds), 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['Metric'] + algos)
writer.writeheader()
for stats, meter in stats_meter.items():
new_meter = {x: y.avg for x, y in meter.items()}
new_meter['Metric'] = stats
writer.writerow(new_meter)
if __name__ == '__main__':
all_algos = ['vae','dlow']
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default=None) # specify the model to evaluate
parser.add_argument('--mode', default='vis') # for visualization or quantitative results?
parser.add_argument('--testdata', default='ACCAD') # which dataset to evaluate? choose only one
parser.add_argument('--gpu_index', type=int, default=-1)
### these are better not to be touched ###
parser.add_argument('--num_seeds', type=int, default=1)
parser.add_argument('--multimodal_threshold', type=float, default=0.5)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--iter', type=int, default=500)
### these are better not to be touched ###
args = parser.parse_args()
if 'mojo' in args.cfg:
from models.models_mojo import *
elif 'vanilla' in args.cfg:
from models.models_vanilla import *
"""setup"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
dtype = torch.float32
torch.set_default_dtype(dtype)
device = torch.device('cuda', index=args.gpu_index) if args.gpu_index >= 0 and torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu_index)
torch.set_grad_enabled(False)
cfg = Config(args.cfg)
logger = create_logger(os.path.join(cfg.log_dir, 'log_eval.txt'))
algos = []
for algo in all_algos:
iter_algo = 'iter_%s' % algo
num_algo = 'num_%s_epoch' % algo
setattr(args, iter_algo, getattr(cfg, num_algo))
algos.append(algo)
vis_algos = algos.copy()
if args.action != 'all':
args.action = set(args.action.split(','))
"""parameter"""
nz = cfg.nz
nk = cfg.nk
t_his = cfg.t_his
t_pred = cfg.t_pred
body_repr = cfg.body_repr
subsets = cfg.dataset
n_freq = cfg.dlow_specs.get('n_freq', None)
"""data"""
testing_data = [args.testdata]
if len(testing_data)>1:
raise NameError('performing testing per dataset please.')
batch_gen = BatchGeneratorAMASSCanonicalized(amass_data_path=cfg.dataset_path,
amass_subset_name=testing_data,
sample_rate=8,
body_repr=body_repr)
batch_gen.get_rec_list(shuffle_seed=3)
all_data = batch_gen.get_all_data().detach().cpu().permute(1,0,2).numpy()#[b,t,d]
traj_gt_arr = get_multimodal_gt(all_data, t_his, args.multimodal_threshold)
"""models"""
model_generator = {
'vae': get_vae_model,
'dlow': get_dlow_model,
}
models = {}
for algo in algos:
models[algo] = model_generator[algo](cfg, batch_gen.get_feature_dim())
model_path = getattr(cfg, f"{algo}_model_path") % getattr(args, 'iter')
print(f'loading {algo} model from checkpoint: {model_path}')
model_cp = torch.load(model_path, map_location='cuda:0')
models[algo].load_state_dict(model_cp['model_dict'])
models[algo].to(device)
models[algo].eval()
if args.mode == 'vis':
visualize(n_seqs=60, n_gens=15)
elif args.mode == 'stats':
compute_stats()
| [
"numpy.stack",
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"os.getcwd",
"experiments.utils.config.Config",
"os.path.exists",
"experiments.utils.batch_gen_amass.BatchGeneratorAMASSCanonicalized",
"numpy.reshape",
"os.path.join",
"csv.DictWriter"
] | [((171, 182), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (180, 182), False, 'import os\n'), ((2933, 2960), 'numpy.stack', 'np.stack', (["gen_results['gt']"], {}), "(gen_results['gt'])\n", (2941, 2960), True, 'import numpy as np\n'), ((5018, 5043), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5041, 5043), False, 'import argparse\n'), ((5917, 5942), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5931, 5942), True, 'import numpy as np\n'), ((6295, 6311), 'experiments.utils.config.Config', 'Config', (['args.cfg'], {}), '(args.cfg)\n', (6301, 6311), False, 'from experiments.utils.config import Config\n'), ((7061, 7199), 'experiments.utils.batch_gen_amass.BatchGeneratorAMASSCanonicalized', 'BatchGeneratorAMASSCanonicalized', ([], {'amass_data_path': 'cfg.dataset_path', 'amass_subset_name': 'testing_data', 'sample_rate': '(8)', 'body_repr': 'body_repr'}), '(amass_data_path=cfg.dataset_path,\n amass_subset_name=testing_data, sample_rate=8, body_repr=body_repr)\n', (7093, 7199), False, 'from experiments.utils.batch_gen_amass import BatchGeneratorAMASSCanonicalized\n'), ((3016, 3043), 'numpy.stack', 'np.stack', (['gen_results[algo]'], {}), '(gen_results[algo])\n', (3024, 3043), True, 'import numpy as np\n'), ((3222, 3249), 'os.path.exists', 'os.path.exists', (['outfilename'], {}), '(outfilename)\n', (3236, 3249), False, 'import os\n'), ((3259, 3283), 'os.makedirs', 'os.makedirs', (['outfilename'], {}), '(outfilename)\n', (3270, 3283), False, 'import os\n'), ((3385, 3412), 'pickle.dump', 'pickle.dump', (['gen_results', 'f'], {}), '(gen_results, f)\n', (3396, 3412), False, 'import pickle\n'), ((4669, 4724), 'csv.DictWriter', 'csv.DictWriter', (['csv_file'], {'fieldnames': "(['Metric'] + algos)"}), "(csv_file, fieldnames=['Metric'] + algos)\n", (4683, 4724), False, 'import csv\n'), ((6339, 6380), 'os.path.join', 'os.path.join', (['cfg.log_dir', '"""log_eval.txt"""'], {}), "(cfg.log_dir, 'log_eval.txt')\n", (6351, 6380), False, 'import os\n'), ((2764, 2819), 'numpy.reshape', 'np.reshape', (['pred', '(pred.shape[0], pred.shape[1], -1, 3)'], {}), '(pred, (pred.shape[0], pred.shape[1], -1, 3))\n', (2774, 2819), True, 'import numpy as np\n')] |
from numpy import array, zeros, unique, searchsorted, where, arange
from pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_element import SpringElement
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank
class CELAS1(SpringElement):
type = 'CELAS1'
def __init__(self, model):
"""
Defines the CELAS1 object.
Parameters
----------
model : BDF
the BDF object
"""
SpringElement.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
if ncards:
self.n = ncards
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID
self.property_id = zeros(ncards, 'int32')
#: Node IDs
self.node_ids = zeros((ncards, 2), 'int32')
#: component number
self.components = zeros((ncards, 2), 'int32')
def add_card(self, card, comment=''):
i = self.i
self.element_id[i] = integer(card, 1, 'eid')
self.property_id[i] = integer_or_blank(card, 2, 'pid', self.element_id[i])
self.node_ids[i, :] = [integer(card, 3, 'n1'),
integer(card, 5, 'n2')]
self.components[i, :] = [integer_or_blank(card, 4, 'c1', 0),
integer_or_blank(card, 6, 'c2', 0)]
assert len(card) <= 7, 'len(CELAS1 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self.components = self.components[i, :]
unique_eids = unique(self.element_id)
if len(unique_eids) != len(self.element_id):
raise RuntimeError('There are duplicate CELAS1 IDs...')
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
pid_map = maps['property']
nid_map = maps['node']
for i, eid, pid, nids in enumerate(zip(self.element_id, self.property_id, self.node_ids)):
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
def write_card(self, bdf_file, size=8, eids=None):
if self.n:
if eids is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, self.eid)
for (eid, pid, n, c) in zip(self.element_id[i], self.property_id[i], self.node_ids[i], self.components[i]):
card = ['CELAS1', eid, pid, n[0], n[1], c[0], c[1]]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def get_stiffness_matrix(self, i, model, positions, index0s, fnorm=1.0):
"""gets the stiffness matrix for CELAS1"""
#print("----------------")
ipid = where(self.model.pelas.property_id==self.property_id[i])[0][0]
prop = self.model.pelas
ki = prop.K[ipid]
k = ki * array([[1, -1,],
[-1, 1]])
#========================
n1, n2 = self.node_ids[i, :]
c1, c2 = self.components[i, :]
#i0, i1 = index0s
delta1 = 0 if c1 in [0, 1, 2, 3] else 3
delta2 = 0 if c2 in [0, 1, 2, 3] else 3
c1b = c1-1 if c1 > 0 else c1
c2b = c2-1 if c2 > 0 else c2
i1 = index0s[n1]
i2 = index0s[n2]
dofs = [
i1 + c1b,
i2 + c1b,
]
n_ijv = [
(n1, 1 + delta1),
(n2, 1 + delta2),
]
return (k, dofs, n_ijv)
def displacement_stress(self, model, positions, q, dofs,
ni, o1, e1, f1):
n = self.n
du_axial = zeros(n, 'float64')
for i in range(self.n):
(n1, n2) = self.node_ids[i, :]
n11 = dofs[(n1, 1)]
n21 = dofs[(n2, 1)]
q_axial = array([
q[n11],
q[n21],
])
u_axial = q_axial
du_axial[i] = u_axial[0] - u_axial[1]
self.model.log.debug("len(pelas) = %s" % self.model.pelas.n)
i = searchsorted(self.model.pelas.property_id, self.property_id)
k = self.model.pelas.K[i]
s = self.model.pelas.s[i]
self.model.log.debug("k=%s s=%s du_axial=%s" % (k, s, du_axial))
e1[ni: ni+n] = du_axial * s
f1[ni: ni+n] = k * du_axial
o1[ni: ni+n] = f1[ni: ni+n] * s
#return (axial_strain, axial_stress, axial_force)
| [
"pyNastran.bdf.bdf_interface.assign_type.integer",
"pyNastran.bdf.field_writer_8.print_card_8",
"numpy.zeros",
"numpy.searchsorted",
"pyNastran.bdf.bdf_interface.assign_type.integer_or_blank",
"numpy.where",
"numpy.array",
"numpy.arange",
"pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_el... | [((584, 619), 'pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_element.SpringElement.__init__', 'SpringElement.__init__', (['self', 'model'], {}), '(self, model)\n', (606, 619), False, 'from pyNastran.dev.bdf_vectorized.cards.elements.spring.spring_element import SpringElement\n'), ((1164, 1187), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(1)', '"""eid"""'], {}), "(card, 1, 'eid')\n", (1171, 1187), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((1218, 1270), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(2)', '"""pid"""', 'self.element_id[i]'], {}), "(card, 2, 'pid', self.element_id[i])\n", (1234, 1270), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((4467, 4486), 'numpy.zeros', 'zeros', (['n', '"""float64"""'], {}), "(n, 'float64')\n", (4472, 4486), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((4883, 4943), 'numpy.searchsorted', 'searchsorted', (['self.model.pelas.property_id', 'self.property_id'], {}), '(self.model.pelas.property_id, self.property_id)\n', (4895, 4943), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((799, 821), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (804, 821), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((880, 902), 'numpy.zeros', 'zeros', (['ncards', '"""int32"""'], {}), "(ncards, 'int32')\n", (885, 902), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((955, 982), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (960, 982), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((1045, 1072), 'numpy.zeros', 'zeros', (['(ncards, 2)', '"""int32"""'], {}), "((ncards, 2), 'int32')\n", (1050, 1072), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((1302, 1324), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(3)', '"""n1"""'], {}), "(card, 3, 'n1')\n", (1309, 1324), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((1357, 1379), 'pyNastran.bdf.bdf_interface.assign_type.integer', 'integer', (['card', '(5)', '"""n2"""'], {}), "(card, 5, 'n2')\n", (1364, 1379), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((1414, 1448), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(4)', '"""c1"""', '(0)'], {}), "(card, 4, 'c1', 0)\n", (1430, 1448), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((1483, 1517), 'pyNastran.bdf.bdf_interface.assign_type.integer_or_blank', 'integer_or_blank', (['card', '(6)', '"""c2"""', '(0)'], {}), "(card, 6, 'c2', 0)\n", (1499, 1517), False, 'from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank\n'), ((1933, 1956), 'numpy.unique', 'unique', (['self.element_id'], {}), '(self.element_id)\n', (1939, 1956), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((2159, 2183), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (2164, 2183), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((2215, 2239), 'numpy.array', 'array', (['[]'], {'dtype': '"""int32"""'}), "([], dtype='int32')\n", (2220, 2239), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((3718, 3743), 'numpy.array', 'array', (['[[1, -1], [-1, 1]]'], {}), '([[1, -1], [-1, 1]])\n', (3723, 3743), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((4650, 4673), 'numpy.array', 'array', (['[q[n11], q[n21]]'], {}), '([q[n11], q[n21]])\n', (4655, 4673), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((2956, 2970), 'numpy.arange', 'arange', (['self.n'], {}), '(self.n)\n', (2962, 2970), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((3009, 3048), 'numpy.searchsorted', 'searchsorted', (['self.element_id', 'self.eid'], {}), '(self.element_id, self.eid)\n', (3021, 3048), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((3580, 3638), 'numpy.where', 'where', (['(self.model.pelas.property_id == self.property_id[i])'], {}), '(self.model.pelas.property_id == self.property_id[i])\n', (3585, 3638), False, 'from numpy import array, zeros, unique, searchsorted, where, arange\n'), ((3303, 3321), 'pyNastran.bdf.field_writer_8.print_card_8', 'print_card_8', (['card'], {}), '(card)\n', (3315, 3321), False, 'from pyNastran.bdf.field_writer_8 import print_card_8\n'), ((3380, 3399), 'pyNastran.bdf.field_writer_16.print_card_16', 'print_card_16', (['card'], {}), '(card)\n', (3393, 3399), False, 'from pyNastran.bdf.field_writer_16 import print_card_16\n')] |
import tensorflow as tf
import numpy as np
def generate_anchors_pre(height, width, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
shift_x = tf.range(width) * feat_stride # width
shift_y = tf.range(height) * feat_stride # height
shift_x, shift_y = tf.meshgrid(shift_x, shift_y)
sx = tf.reshape(shift_x, shape=(-1,))
sy = tf.reshape(shift_y, shape=(-1,))
shifts = tf.transpose(tf.stack([sx, sy, sx, sy]))
K = tf.multiply(width, height)
shifts = tf.transpose(tf.reshape(shifts, shape=[1, K, 4]), perm=(1, 0, 2))
anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
A = anchors.shape[0]
anchor_constant = tf.constant(anchors.reshape((1, A, 4)), dtype=tf.int32)
length = K * A
anchors_tf = tf.reshape(tf.add(anchor_constant, shifts), shape=(length, 4))
return tf.cast(anchors_tf, dtype=tf.float32), length
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
| [
"tensorflow.meshgrid",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.add",
"numpy.hstack",
"tensorflow.stack",
"tensorflow.multiply",
"tensorflow.cast",
"numpy.arange",
"numpy.array",
"numpy.round",
"numpy.sqrt"
] | [((287, 316), 'tensorflow.meshgrid', 'tf.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (298, 316), True, 'import tensorflow as tf\n'), ((326, 358), 'tensorflow.reshape', 'tf.reshape', (['shift_x'], {'shape': '(-1,)'}), '(shift_x, shape=(-1,))\n', (336, 358), True, 'import tensorflow as tf\n'), ((368, 400), 'tensorflow.reshape', 'tf.reshape', (['shift_y'], {'shape': '(-1,)'}), '(shift_y, shape=(-1,))\n', (378, 400), True, 'import tensorflow as tf\n'), ((463, 489), 'tensorflow.multiply', 'tf.multiply', (['width', 'height'], {}), '(width, height)\n', (474, 489), True, 'import tensorflow as tf\n'), ((2316, 2428), 'numpy.hstack', 'np.hstack', (['(x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), \n y_ctr + 0.5 * (hs - 1))'], {}), '((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (\n ws - 1), y_ctr + 0.5 * (hs - 1)))\n', (2325, 2428), True, 'import numpy as np\n'), ((2777, 2798), 'numpy.round', 'np.round', (['(ws * ratios)'], {}), '(ws * ratios)\n', (2785, 2798), True, 'import numpy as np\n'), ((170, 185), 'tensorflow.range', 'tf.range', (['width'], {}), '(width)\n', (178, 185), True, 'import tensorflow as tf\n'), ((223, 239), 'tensorflow.range', 'tf.range', (['height'], {}), '(height)\n', (231, 239), True, 'import tensorflow as tf\n'), ((427, 453), 'tensorflow.stack', 'tf.stack', (['[sx, sy, sx, sy]'], {}), '([sx, sy, sx, sy])\n', (435, 453), True, 'import tensorflow as tf\n'), ((516, 551), 'tensorflow.reshape', 'tf.reshape', (['shifts'], {'shape': '[1, K, 4]'}), '(shifts, shape=[1, K, 4])\n', (526, 551), True, 'import tensorflow as tf\n'), ((816, 847), 'tensorflow.add', 'tf.add', (['anchor_constant', 'shifts'], {}), '(anchor_constant, shifts)\n', (822, 847), True, 'import tensorflow as tf\n'), ((880, 917), 'tensorflow.cast', 'tf.cast', (['anchors_tf'], {'dtype': 'tf.float32'}), '(anchors_tf, dtype=tf.float32)\n', (887, 917), True, 'import tensorflow as tf\n'), ((1361, 1376), 'numpy.arange', 'np.arange', (['(3)', '(6)'], {}), '(3, 6)\n', (1370, 1376), True, 'import numpy as np\n'), ((1535, 1573), 'numpy.array', 'np.array', (['[1, 1, base_size, base_size]'], {}), '([1, 1, base_size, base_size])\n', (1543, 1573), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.sqrt', 'np.sqrt', (['size_ratios'], {}), '(size_ratios)\n', (2753, 2766), True, 'import numpy as np\n'), ((608, 631), 'numpy.array', 'np.array', (['anchor_ratios'], {}), '(anchor_ratios)\n', (616, 631), True, 'import numpy as np\n'), ((640, 663), 'numpy.array', 'np.array', (['anchor_scales'], {}), '(anchor_scales)\n', (648, 663), True, 'import numpy as np\n')] |
from src.network import NeuralNetwork
import src.util_functions as uf
import numpy
from src.util.status import DataStore
from src.util.status import TrainSettings
from src.util.status import Status
import os
import pickle
def print_log(status_train: Status, status_valid: Status):
current_epoch = status_train.current_epoch
print('\t', status_train.current_epoch, '\t', sep='', end=' ')
print('\t|\t ', "{0:.5f}".format(status_train.loss[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_train.error[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_valid.loss[current_epoch]),
' \t|\t ', "{0:.5f}".format(status_valid.error[current_epoch]),
'\t',
sep='')
def iteration(nn: NeuralNetwork, status: Status):
increase_epoch, status.x_batch, status.y_batch = status.draw_batch()
status.soft_prob = nn.fprop(status.x_batch, status.is_train)
status.predict = uf.pick_class(status.soft_prob)
status.train_settings.loss_callback(status)
if status.is_train:
nn.bprop(status.y_batch)
nn.update(status.train_settings)
return increase_epoch
def cross_train(nn: NeuralNetwork, data_store_train: DataStore, data_store_valid: DataStore, train_settings: TrainSettings):
print('------------------ Start Training -----------------')
print('\tepoch\t|\ttrain loss\t|\ttrain error\t|\tvalid loss\t|\tvalid error\t')
status_train = Status(train_settings, data_store_train, True)
status_valid = Status(train_settings, data_store_valid, False)
while status_train.current_epoch < status_train.target_epoch:
if iteration(nn, status_train):
iteration(nn, status_valid)
print_log(status_train, status_valid)
if status_train.current_epoch % 3 == 2:
train_settings.plot_callback(status_train, status_valid)
status_train.current_epoch += 1
status_valid.current_epoch += 1
filename = train_settings.filename + '-' + train_settings.prefix + '-' + train_settings.infix + '-' + train_settings.suffix + '.dump'
full_path = os.path.realpath(__file__)
path, _ = os.path.split(full_path)
savepath = os.path.join(path, '../output/dump', filename)
metricpath = os.path.join(path, '../output/metric', filename)
with open(metricpath, 'wb+') as f:
pickle.dump([status_train, status_valid], f)
with open(savepath, 'wb+') as f:
pickle.dump(nn.dump(), f)
return
def inference(nn: NeuralNetwork, data_store: DataStore, settings: TrainSettings):
status = Status(settings, data_store, False)
iteration(nn, status)
return status.predict
def nlp_inference_sentence(nn: NeuralNetwork, head: list, vocab: dict):
inv_vocab = {v: k for k, v in vocab.items()}
if len(head) is not 3:
raise ValueError(f'the length of the sentence head should be 3, but is actually {len(head)}')
word_next = 'START'
while word_next != 'END' and len(head) <= 13:
id1 = vocab[head[-3]]
id2 = vocab[head[-2]]
id3 = vocab[head[-1]]
data_input = numpy.array([[id1, id2, id3]])
id_prob = nn.fprop(data_input)
id_next = uf.pick_class(id_prob)[0]
word_next = inv_vocab[id_next]
head.append(word_next)
return head
| [
"pickle.dump",
"src.util.status.Status",
"src.util_functions.pick_class",
"os.path.realpath",
"numpy.array",
"os.path.split",
"os.path.join"
] | [((942, 973), 'src.util_functions.pick_class', 'uf.pick_class', (['status.soft_prob'], {}), '(status.soft_prob)\n', (955, 973), True, 'import src.util_functions as uf\n'), ((1443, 1489), 'src.util.status.Status', 'Status', (['train_settings', 'data_store_train', '(True)'], {}), '(train_settings, data_store_train, True)\n', (1449, 1489), False, 'from src.util.status import Status\n'), ((1509, 1556), 'src.util.status.Status', 'Status', (['train_settings', 'data_store_valid', '(False)'], {}), '(train_settings, data_store_valid, False)\n', (1515, 1556), False, 'from src.util.status import Status\n'), ((2122, 2148), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2138, 2148), False, 'import os\n'), ((2163, 2187), 'os.path.split', 'os.path.split', (['full_path'], {}), '(full_path)\n', (2176, 2187), False, 'import os\n'), ((2203, 2249), 'os.path.join', 'os.path.join', (['path', '"""../output/dump"""', 'filename'], {}), "(path, '../output/dump', filename)\n", (2215, 2249), False, 'import os\n'), ((2272, 2320), 'os.path.join', 'os.path.join', (['path', '"""../output/metric"""', 'filename'], {}), "(path, '../output/metric', filename)\n", (2284, 2320), False, 'import os\n'), ((2593, 2628), 'src.util.status.Status', 'Status', (['settings', 'data_store', '(False)'], {}), '(settings, data_store, False)\n', (2599, 2628), False, 'from src.util.status import Status\n'), ((2368, 2412), 'pickle.dump', 'pickle.dump', (['[status_train, status_valid]', 'f'], {}), '([status_train, status_valid], f)\n', (2379, 2412), False, 'import pickle\n'), ((3118, 3148), 'numpy.array', 'numpy.array', (['[[id1, id2, id3]]'], {}), '([[id1, id2, id3]])\n', (3129, 3148), False, 'import numpy\n'), ((3206, 3228), 'src.util_functions.pick_class', 'uf.pick_class', (['id_prob'], {}), '(id_prob)\n', (3219, 3228), True, 'import src.util_functions as uf\n')] |
#coding=utf-8
import networkx as nx
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from util import *
from sklearn.metrics import mutual_info_score
def param_update(X, A, Y, W, alpha):
W_apprx = X * A * X.T
N, M = Y.shape
X_new, A_new = np.zeros(X.shape), np.zeros(A.shape)
for k in range(M):
for i in range(N):
for j in range(N):
X_new[i, k] += W[i, j] * A[k, k] * X[j, k] / W_apprx[i, j]
A_new[k, k] += W[i, j] * X[i, k] * X[j, k] / W_apprx[i, j]
X_new[i, k] *= (2 * alpha * X[i, k])
A_new[k, k] *= (alpha * A[k, k])
X_new[i, k] += (1 - alpha) * Y[i, k]
A_new[k, k] += (1 - alpha) * Y[i, k]
X_new, A_new = np.matrix(X_new / np.sum(X_new, axis=0).reshape(1, M)), np.matrix(A / np.sum(A_new))
Y = X_new * A_new
return X_new, A_new, Y
def read_edgelist(filename,weighted = False):
idmap = set()
edge_cache = {}
with open(filename) as f:
for line in f:
if weighted:
u,v,w = [int(x) for x in line.strip().split()]
else:
tmp = [int(x) for x in line.strip().split()]
u,v,w = tmp[0],tmp[1],1.0
edge_cache[(u,v)] = w
idmap.add(u)
idmap.add(v)
idmap = list(idmap) # 数组下标与结点唯一id标识的映射
idmap_inv = {nid: i for i,nid in enumerate(idmap)} # 结点唯一id标识与数组下标的映射
N = len(idmap)
adj_mat = np.zeros((N,N))
for (u,v),w in edge_cache.items():
adj_mat[idmap_inv[u],idmap_inv[v]] = w
adj_mat += adj_mat.T
return idmap, idmap_inv, adj_mat
def alg(net_path, alpha,tsteps,N,M,with_truth=True): # FacetNet with # of nodes and communities fixed
X, A = np.random.rand(N, M), np.diag(np.random.rand(M))
X, A = np.matrix(X / np.sum(X, axis=0).reshape(1, M)), np.matrix(A / np.sum(A))
Y = X * A
for t in range(tsteps):
# G = nx.read_weighted_edgelist(net_path+"%d.edgelist" % t)
# idmap, mapping: nodeid → array_id
idmap, idmap_inv, adj_mat = read_edgelist(net_path + "%d.edgelist" % t, weighted=False)
if with_truth:
with open(net_path+"%d.comm" % t) as f:
comm_map = {} # mapping: nodeid → its community
for line in f:
id0, comm0 = line.strip().split()
comm_map[int(id0)] = int(comm0)
W = Sim(adj_mat, weighted=False)
X_new, A_new, Y = param_update(X, A, Y, W, alpha)
D = np.zeros((N,))
for i in range(N):
D[i] = np.sum(Y[i, :])
D = np.matrix(np.diag(D))
soft_comm = D.I * X_new * A_new
comm_pred = np.array(np.argmax(soft_comm, axis=1)).ravel()
print("time:", t)
if with_truth:
comm = np.array([comm_map[idmap_inv[i]] for i in range(N)])
print("mutual_info:", mutual_info_score(comm, comm_pred))
print("soft_modularity:", soft_modularity(soft_comm, W))
# community_net = A_new * X_new.T * soft_comm
# print("community_net")
# print(community_net)
# evolution_net = X.T * soft_comm
# print("evolution_net")
# print(evolution_net)
X, A = X_new, A_new
# do experiment with network stated in 4.1.2
def exp1():
tsteps = 15
from synthetic import generate_evolution
print("generating synthetic graph")
generate_evolution("./data/syntetic1/", tsteps=tsteps)
print("start the algorithm")
alpha = 0.9
N, M = 128, 4
np.random.seed(0)
alg("./data/syntetic1/",alpha,tsteps,N,M)
# FacetNet with # of nodes and communities changed
def alg_extended(net_path, alpha,tsteps,M,with_truth=True):
idmap0,idmap_inv0 = [],{}
for t in range(tsteps):
print("time:", t)
idmap, idmap_inv, adj_mat = read_edgelist(net_path+"%d.edgelist" % t, weighted=False)
if with_truth:
with open(net_path+"%d.comm" % t) as f:
comm_map = {}
for line in f:
id0, comm0 = line.strip().split()
comm_map[int(id0)] = int(comm0)
N = len(idmap)
W = Sim(adj_mat, weighted=False)
if t == 0:
X, A = np.random.rand(N, M), np.diag(np.random.rand(M))
X, A = np.matrix(X / np.sum(X, axis=0).reshape(1, M)), np.matrix(A / np.sum(A))
Y = X * A
else: # adjustment for changing of nodes
reserved_rows = [idmap_inv0[x] for x in idmap0 if x in idmap]
num_new,num_old = len(set(idmap) - set(idmap0)),len(reserved_rows)
Y = Y[reserved_rows,:]
Y /= np.sum(Y)
Y = np.pad(Y,((0,num_new),(0,0)),mode='constant',constant_values=(0,0))
# not mentioned on the paper, but are necessary for node changing
X = X[reserved_rows,:]
X = np.matrix(X / np.sum(X, axis=0).reshape(1, M))
X *= num_old/(num_old+num_new)
X = np.pad(X, ((0, num_new), (0, 0)), mode='constant', constant_values=(1/num_new, 1/num_new))
X_new, A_new, Y = param_update(X, A, Y, W, alpha)
D = np.zeros((N,))
for i in range(N):
D[i] = np.sum(Y[i, :])
D = np.matrix(np.diag(D))
soft_comm = D.I * X_new * A_new
comm_pred = np.array(np.argmax(soft_comm, axis=1)).ravel()
if with_truth:
comm = np.array([comm_map[idmap[i]] for i in range(N)])
print("mutual_info:", mutual_info_score(comm, comm_pred))
s_modu = soft_modularity(soft_comm, W)
print("soft_modularity: %f" % s_modu)
#community_net = A_new * X_new.T * soft_comm
#print("community_net")
#print(community_net)
# evolution_net = X.T * soft_comm
# print("evolution_net")
# print(evolution_net)
X, A = X_new, A_new
idmap0, idmap_inv0 = idmap, idmap_inv
# do experiment with adding and removing nodes
def exp2():
tsteps = 15
from synthetic import generate_evolution2
print("generating synthetic graph")
generate_evolution2("./data/syntetic2/", tsteps=tsteps)
print("start the algorithm")
alpha = 0.5
np.random.seed(0)
alg_extended("./data/syntetic2/",alpha,tsteps,4)
# do experiment with network stated in 4.1.2, adding weight
def exp3():
tsteps = 15
from synthetic import generate_evolution3
print("generating synthetic graph")
generate_evolution3("./data/syntetic3/", tsteps=tsteps)
print("start the algorithm")
alpha = 0.9
N, M = 128, 4
np.random.seed(0)
alg("./data/syntetic3/",alpha,tsteps,N,M)
if __name__ == "__main__":
print("do experiment with network stated in 4.1.2")
exp1()
print("\ndo experiment with adding and removing nodes")
exp2()
print("\ndo experiment with network stated in 4.1.2, adding weight")
exp3() | [
"numpy.pad",
"numpy.random.seed",
"numpy.sum",
"numpy.argmax",
"synthetic.generate_evolution3",
"numpy.zeros",
"synthetic.generate_evolution",
"sklearn.metrics.mutual_info_score",
"synthetic.generate_evolution2",
"numpy.random.rand",
"numpy.diag"
] | [((1515, 1531), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (1523, 1531), True, 'import numpy as np\n'), ((3481, 3535), 'synthetic.generate_evolution', 'generate_evolution', (['"""./data/syntetic1/"""'], {'tsteps': 'tsteps'}), "('./data/syntetic1/', tsteps=tsteps)\n", (3499, 3535), False, 'from synthetic import generate_evolution\n'), ((3607, 3624), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3621, 3624), True, 'import numpy as np\n'), ((6166, 6221), 'synthetic.generate_evolution2', 'generate_evolution2', (['"""./data/syntetic2/"""'], {'tsteps': 'tsteps'}), "('./data/syntetic2/', tsteps=tsteps)\n", (6185, 6221), False, 'from synthetic import generate_evolution2\n'), ((6275, 6292), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6289, 6292), True, 'import numpy as np\n'), ((6525, 6580), 'synthetic.generate_evolution3', 'generate_evolution3', (['"""./data/syntetic3/"""'], {'tsteps': 'tsteps'}), "('./data/syntetic3/', tsteps=tsteps)\n", (6544, 6580), False, 'from synthetic import generate_evolution3\n'), ((6652, 6669), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6666, 6669), True, 'import numpy as np\n'), ((284, 301), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (292, 301), True, 'import numpy as np\n'), ((303, 320), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (311, 320), True, 'import numpy as np\n'), ((1800, 1820), 'numpy.random.rand', 'np.random.rand', (['N', 'M'], {}), '(N, M)\n', (1814, 1820), True, 'import numpy as np\n'), ((2592, 2606), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (2600, 2606), True, 'import numpy as np\n'), ((5232, 5246), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (5240, 5246), True, 'import numpy as np\n'), ((1830, 1847), 'numpy.random.rand', 'np.random.rand', (['M'], {}), '(M)\n', (1844, 1847), True, 'import numpy as np\n'), ((2653, 2668), 'numpy.sum', 'np.sum', (['Y[i, :]'], {}), '(Y[i, :])\n', (2659, 2668), True, 'import numpy as np\n'), ((2691, 2701), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (2698, 2701), True, 'import numpy as np\n'), ((4741, 4750), 'numpy.sum', 'np.sum', (['Y'], {}), '(Y)\n', (4747, 4750), True, 'import numpy as np\n'), ((4767, 4841), 'numpy.pad', 'np.pad', (['Y', '((0, num_new), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(0, 0)'}), "(Y, ((0, num_new), (0, 0)), mode='constant', constant_values=(0, 0))\n", (4773, 4841), True, 'import numpy as np\n'), ((5070, 5168), 'numpy.pad', 'np.pad', (['X', '((0, num_new), (0, 0))'], {'mode': '"""constant"""', 'constant_values': '(1 / num_new, 1 / num_new)'}), "(X, ((0, num_new), (0, 0)), mode='constant', constant_values=(1 /\n num_new, 1 / num_new))\n", (5076, 5168), True, 'import numpy as np\n'), ((5293, 5308), 'numpy.sum', 'np.sum', (['Y[i, :]'], {}), '(Y[i, :])\n', (5299, 5308), True, 'import numpy as np\n'), ((5331, 5341), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (5338, 5341), True, 'import numpy as np\n'), ((833, 846), 'numpy.sum', 'np.sum', (['A_new'], {}), '(A_new)\n', (839, 846), True, 'import numpy as np\n'), ((1922, 1931), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (1928, 1931), True, 'import numpy as np\n'), ((2965, 2999), 'sklearn.metrics.mutual_info_score', 'mutual_info_score', (['comm', 'comm_pred'], {}), '(comm, comm_pred)\n', (2982, 2999), False, 'from sklearn.metrics import mutual_info_score\n'), ((4305, 4325), 'numpy.random.rand', 'np.random.rand', (['N', 'M'], {}), '(N, M)\n', (4319, 4325), True, 'import numpy as np\n'), ((5576, 5610), 'sklearn.metrics.mutual_info_score', 'mutual_info_score', (['comm', 'comm_pred'], {}), '(comm, comm_pred)\n', (5593, 5610), False, 'from sklearn.metrics import mutual_info_score\n'), ((2772, 2800), 'numpy.argmax', 'np.argmax', (['soft_comm'], {'axis': '(1)'}), '(soft_comm, axis=1)\n', (2781, 2800), True, 'import numpy as np\n'), ((4335, 4352), 'numpy.random.rand', 'np.random.rand', (['M'], {}), '(M)\n', (4349, 4352), True, 'import numpy as np\n'), ((5413, 5441), 'numpy.argmax', 'np.argmax', (['soft_comm'], {'axis': '(1)'}), '(soft_comm, axis=1)\n', (5422, 5441), True, 'import numpy as np\n'), ((781, 802), 'numpy.sum', 'np.sum', (['X_new'], {'axis': '(0)'}), '(X_new, axis=0)\n', (787, 802), True, 'import numpy as np\n'), ((1874, 1891), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1880, 1891), True, 'import numpy as np\n'), ((4435, 4444), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (4441, 4444), True, 'import numpy as np\n'), ((4978, 4995), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4984, 4995), True, 'import numpy as np\n'), ((4387, 4404), 'numpy.sum', 'np.sum', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4393, 4404), True, 'import numpy as np\n')] |
import collections
import cv2
import json
import numpy as np
import os
import re
import tensorflow as tf
import xml.etree.ElementTree as et
from xml.dom import minidom
import yaml
from yaml.constructor import ConstructorError
from mpl_toolkits.axes_grid1 import ImageGrid
from tensorflow.python.framework import ops
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import math
import copy
# # import tensorflow as tf
# import numpy as np
# # import matplotlib.pyplot as plt
# # from mpl_toolkits.axes_grid1 import ImageGrid
#
#
# # Model construction utilities below adapted from
# # https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
# def weight_variable(shape):
# initial = tf.truncated_normal(shape, stddev=0.1)
# return tf.Variable(initial)
#
#
# def bias_variable(shape):
# initial = tf.constant(0.1, shape=shape)
# return tf.Variable(initial)
#
#
# def conv2d(x, W):
# return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
#
#
# def max_pool_2x2(x):
# return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
# strides=[1, 2, 2, 1], padding='SAME')
#
#
# def shuffle_aligned_list(data):
# """Shuffle arrays in a list by shuffling each array identically."""
# num = data[0].shape[0]
# p = np.random.permutation(num)
# return [d[p] for d in data]
#
#
# def batch_generator(data, batch_size, shuffle=True, iter_shuffle=False):
# """Generate batches of data.
#
# Given a list of array-like objects, generate batches of a given
# size by yielding a list of array-like objects corresponding to the
# same slice of each input.
# """
#
# num = data[0].shape[0]
# if shuffle:
# data = shuffle_aligned_list(data)
#
# batch_count = 0
# while True:
# if batch_count * batch_size + batch_size >= num:
# batch_count = 0
# if iter_shuffle:
# print("batch shuffling")
# data = shuffle_aligned_list(data)
#
# start = batch_count * batch_size
# end = start + batch_size
# batch_count += 1
# yield [d[start:end] for d in data]
#
#
# def imshow_grid(images, shape=[2, 8]):
# """Plot images in a grid of a given shape."""
# fig = plt.figure(1)
# grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)
#
# size = shape[0] * shape[1]
# for i in range(size):
# grid[i].axis('off')
# grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.
#
# plt.show()
#
#
# def plot_embedding(X, y, d, title=None):
# """Plot an embedding X with the class label y colored by the domain d."""
# x_min, x_max = np.min(X, 0), np.max(X, 0)
# X = (X - x_min) / (x_max - x_min)
#
# # Plot colors numbers
# plt.figure(figsize=(10, 10))
# ax = plt.subplot(111)
# for i in range(X.shape[0]):
# # plot colored number
# plt.text(X[i, 0], X[i, 1], str(y[i]),
# color=plt.cm.bwr(d[i] / 1.),
# fontdict={'weight': 'bold', 'size': 9})
#
# plt.xticks([]), plt.yticks([])
# if title is not None:
# plt.title(title)
###############
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
# Model construction utilities below adapted from
# https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def shuffle_aligned_list(data):
"""Shuffle arrays in a list by shuffling each array identically."""
num = data[0].shape[0]
p = np.random.permutation(num)
return [d[p] for d in data]
def batch_generator(data, batch_size, shuffle=True, iter_shuffle=False):
"""Generate batches of data.
`
Given a list of array-like objects, generate batches of a given
size by yielding a list of array-like objects corresponding to the
same slice of each input.
"""
num = data[0].shape[0]
if shuffle:
data = shuffle_aligned_list(data)
batch_count = 0
while True:
if batch_count * batch_size + batch_size >= num:
batch_count = 0
if iter_shuffle:
print("batch shuffling")
data = shuffle_aligned_list(data)
start = batch_count * batch_size
end = start + batch_size
batch_count += 1
yield [d[start:end] for d in data]
###############
def GetFilesRecursively(topdir):
"""Gets all records recursively for some topdir.
Args:
topdir: String, path to top directory.
Returns:
allpaths: List of Strings, full paths to all leaf records.
Raises:
ValueError: If there are no files found for this directory.
"""
assert topdir
topdir = os.path.expanduser(topdir)
allpaths = []
for path, _, leaffiles in tf.gfile.Walk(topdir):
if leaffiles:
allpaths.extend([os.path.join(path, i) for i in leaffiles])
if not allpaths:
raise ValueError('No files found for top directory %s' % topdir)
return allpaths
def NoDuplicatesConstructor(loader, node, deep=False):
"""Check for duplicate keys."""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError('while constructing a mapping', node.start_mark,
'found duplicate key (%s)' % key,
key_node.start_mark)
mapping[key] = value
return loader.construct_mapping(node, deep)
def WriteConfigAsYaml(config, logdir, filename):
"""Writes a config dict as yaml to logdir/experiment.yml."""
if not tf.gfile.Exists(logdir):
tf.gfile.MakeDirs(logdir)
config_filename = os.path.join(logdir, filename)
with tf.gfile.GFile(config_filename, 'w') as f:
f.write(yaml.dump(config))
tf.logging.info('wrote config to %s', config_filename)
def LoadConfigDict(config_paths, model_params):
"""Loads config dictionary from specified yaml files or command line yaml."""
# Ensure that no duplicate keys can be loaded (causing pain).
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
NoDuplicatesConstructor)
# Handle either ',' or '#' separated config lists, since borg will only
# accept '#'.
sep = ',' if ',' in config_paths else '#'
# Load flags from config file.
final_config = {}
if config_paths:
for config_path in config_paths.split(sep):
config_path = config_path.strip()
if not config_path:
continue
config_path = os.path.abspath(config_path)
tf.logging.info('Loading config from %s', config_path)
with tf.gfile.GFile(config_path.strip()) as config_file:
config_flags = yaml.load(config_file)
final_config = DeepMergeDict(final_config, config_flags)
if model_params:
model_params = MaybeLoadYaml(model_params)
final_config = DeepMergeDict(final_config, model_params)
tf.logging.info('Final Config:\n%s', yaml.dump(final_config))
return final_config
def MaybeLoadYaml(item):
"""Parses item if it's a string. If it's a dictionary it's returned as-is."""
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError('Got {}, expected YAML string or dict', type(item))
def DeepMergeDict(dict_x, dict_y, path=None):
"""Recursively merges dict_y into dict_x."""
if path is None: path = []
for key in dict_y:
if key in dict_x:
if isinstance(dict_x[key], dict) and isinstance(dict_y[key], dict):
DeepMergeDict(dict_x[key], dict_y[key], path + [str(key)])
elif dict_x[key] == dict_y[key]:
pass # same leaf value
else:
dict_x[key] = dict_y[key]
else:
dict_x[key] = dict_y[key]
return dict_x
def ParseConfigsToLuaTable(config_paths, extra_model_params=None,
save=False, save_name='final_training_config.yml',
logdir=None):
"""Maps config_paths and extra_model_params to a Luatable-like object."""
# Parse config dict from yaml config files / command line flags.
config = LoadConfigDict(config_paths, extra_model_params)
if save:
WriteConfigAsYaml(config, logdir, save_name)
# Convert config dictionary to T object with dot notation.
config = RecursivelyConvertToLuatable(config)
return config
def RecursivelyConvertToLuatable(yaml_dict):
"""Converts a dictionary to a LuaTable-like T object."""
if isinstance(yaml_dict, dict):
yaml_dict = T(yaml_dict)
# for key, item in yaml_dict.iteritems():
for key, item in yaml_dict.items():
if isinstance(item, dict):
yaml_dict[key] = RecursivelyConvertToLuatable(item)
return yaml_dict
class T(object):
"""Class for emulating lua tables. A convenience class replicating some lua table syntax with a python dict.
In general, should behave like a dictionary except that we can use dot notation
to access keys. Users should be careful to only provide keys suitable for
instance variable names.
Nota bene: do not use the key "keys" since it will collide with the method keys.
Usage example:
>> t = T(a=5,b='kaw', c=T(v=[],x=33))
>> t.a
5
>> t.z = None
>> print t
T(a=5, z=None, c=T(x=33, v=[]), b='kaw')
>> t2 = T({'h':'f','x':4})
>> t2
T(h='f', x=4)
>> t2['x']
4.
"""
def __init__(self, *args, **kwargs):
if len(args) > 1 or (len(args) == 1 and len(kwargs) > 0):
errmsg = '''constructor only allows a single dict as a positional argument or keyword arguments'''
raise ValueError(errmsg)
if len(args) == 1 and isinstance(args[0], dict):
self.__dict__.update(args[0])
else:
self.__dict__.update(kwargs)
def __repr__(self):
fmt = ', '.join('%s=%s' for i in range(len(self.__dict__)))
kwargstr = fmt % tuple(
x for tup in self.__dict__.items() for x in [str(tup[0]), repr(tup[1])])
return 'T(' + kwargstr + ')'
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def keys(self): # Needed for dict(T( ... )) to work.
return self.__dict__.keys()
def iteritems(self):
return self.__dict__.iteritems()
def items(self):
return self.__dict__.items()
class FlipGradientBuilder(object):
def __init__(self):
self.num_calls = 0
def __call__(self, x, l=1.0):
grad_name = "FlipGradient%d" % self.num_calls
@ops.RegisterGradient(grad_name)
def _flip_gradients(op, grad):
return [tf.negative(grad) * l]
g = tf.get_default_graph()
with g.gradient_override_map({"Identity": grad_name}):
y = tf.identity(x)
self.num_calls += 1
return y
| [
"tensorflow.gfile.Exists",
"yaml.load",
"tensorflow.logging.info",
"tensorflow.identity",
"yaml.dump",
"tensorflow.negative",
"tensorflow.Variable",
"tensorflow.gfile.Walk",
"tensorflow.nn.conv2d",
"tensorflow.get_default_graph",
"os.path.join",
"tensorflow.truncated_normal",
"os.path.abspat... | [((6897, 6935), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (6916, 6935), True, 'import tensorflow as tf\n'), ((6945, 6965), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (6956, 6965), True, 'import tensorflow as tf\n'), ((7006, 7035), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (7017, 7035), True, 'import tensorflow as tf\n'), ((7045, 7065), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (7056, 7065), True, 'import tensorflow as tf\n'), ((7095, 7151), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (7107, 7151), True, 'import tensorflow as tf\n'), ((7184, 7259), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (7198, 7259), True, 'import tensorflow as tf\n'), ((7419, 7445), 'numpy.random.permutation', 'np.random.permutation', (['num'], {}), '(num)\n', (7440, 7445), True, 'import numpy as np\n'), ((8500, 8526), 'os.path.expanduser', 'os.path.expanduser', (['topdir'], {}), '(topdir)\n', (8518, 8526), False, 'import os\n'), ((8571, 8592), 'tensorflow.gfile.Walk', 'tf.gfile.Walk', (['topdir'], {}), '(topdir)\n', (8584, 8592), True, 'import tensorflow as tf\n'), ((9529, 9559), 'os.path.join', 'os.path.join', (['logdir', 'filename'], {}), '(logdir, filename)\n', (9541, 9559), False, 'import os\n'), ((9643, 9697), 'tensorflow.logging.info', 'tf.logging.info', (['"""wrote config to %s"""', 'config_filename'], {}), "('wrote config to %s', config_filename)\n", (9658, 9697), True, 'import tensorflow as tf\n'), ((9895, 9992), 'yaml.add_constructor', 'yaml.add_constructor', (['yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG', 'NoDuplicatesConstructor'], {}), '(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n NoDuplicatesConstructor)\n', (9915, 9992), False, 'import yaml\n'), ((6344, 6361), 'tensorflow.stack', 'tf.stack', (['outputs'], {}), '(outputs)\n', (6352, 6361), True, 'import tensorflow as tf\n'), ((9454, 9477), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['logdir'], {}), '(logdir)\n', (9469, 9477), True, 'import tensorflow as tf\n'), ((9483, 9508), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['logdir'], {}), '(logdir)\n', (9500, 9508), True, 'import tensorflow as tf\n'), ((9567, 9603), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['config_filename', '"""w"""'], {}), "(config_filename, 'w')\n", (9581, 9603), True, 'import tensorflow as tf\n'), ((10801, 10824), 'yaml.dump', 'yaml.dump', (['final_config'], {}), '(final_config)\n', (10810, 10824), False, 'import yaml\n'), ((11007, 11022), 'yaml.load', 'yaml.load', (['item'], {}), '(item)\n', (11016, 11022), False, 'import yaml\n'), ((14452, 14483), 'tensorflow.python.framework.ops.RegisterGradient', 'ops.RegisterGradient', (['grad_name'], {}), '(grad_name)\n', (14472, 14483), False, 'from tensorflow.python.framework import ops\n'), ((14565, 14587), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (14585, 14587), True, 'import tensorflow as tf\n'), ((6079, 6138), 'tensorflow.map_fn', 'tf.map_fn', (['fn', 'elems', 'dtype', 'parallel_iterations', 'back_prop'], {}), '(fn, elems, dtype, parallel_iterations, back_prop)\n', (6088, 6138), True, 'import tensorflow as tf\n'), ((9081, 9206), 'yaml.constructor.ConstructorError', 'ConstructorError', (['"""while constructing a mapping"""', 'node.start_mark', "('found duplicate key (%s)' % key)", 'key_node.start_mark'], {}), "('while constructing a mapping', node.start_mark, \n 'found duplicate key (%s)' % key, key_node.start_mark)\n", (9097, 9206), False, 'from yaml.constructor import ConstructorError\n'), ((9622, 9639), 'yaml.dump', 'yaml.dump', (['config'], {}), '(config)\n', (9631, 9639), False, 'import yaml\n'), ((10371, 10399), 'os.path.abspath', 'os.path.abspath', (['config_path'], {}), '(config_path)\n', (10386, 10399), False, 'import os\n'), ((10406, 10460), 'tensorflow.logging.info', 'tf.logging.info', (['"""Loading config from %s"""', 'config_path'], {}), "('Loading config from %s', config_path)\n", (10421, 10460), True, 'import tensorflow as tf\n'), ((14657, 14671), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (14668, 14671), True, 'import tensorflow as tf\n'), ((5682, 5741), 'tensorflow.map_fn', 'tf.map_fn', (['fn', 'elems', 'dtype', 'parallel_iterations', 'back_prop'], {}), '(fn, elems, dtype, parallel_iterations, back_prop)\n', (5691, 5741), True, 'import tensorflow as tf\n'), ((6173, 6190), 'tensorflow.unstack', 'tf.unstack', (['elems'], {}), '(elems)\n', (6183, 6190), True, 'import tensorflow as tf\n'), ((10547, 10569), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (10556, 10569), False, 'import yaml\n'), ((5765, 5781), 'tensorflow.unstack', 'tf.unstack', (['elem'], {}), '(elem)\n', (5775, 5781), True, 'import tensorflow as tf\n'), ((6578, 6600), 'tensorflow.stack', 'tf.stack', (['output_tuple'], {}), '(output_tuple)\n', (6586, 6600), True, 'import tensorflow as tf\n'), ((8635, 8656), 'os.path.join', 'os.path.join', (['path', 'i'], {}), '(path, i)\n', (8647, 8656), False, 'import os\n'), ((14533, 14550), 'tensorflow.negative', 'tf.negative', (['grad'], {}), '(grad)\n', (14544, 14550), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import numpy as np
def visualise_array(Xs, Ys, A, samples=None):
im = plt.imshow(A, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
if samples is not None:
plt.plot(samples[:, 0], samples[:, 1], 'bx')
plt.ylim([Ys.min(), Ys.max()])
plt.xlim([Xs.min(), Xs.max()])
def pdf_grid(Xs, Ys, est):
D = np.zeros((len(Xs), len(Ys)))
G = np.zeros(D.shape)
# this is in-efficient, log_pdf_multiple on a 2d array is faster
for i, x in enumerate(Xs):
for j, y in enumerate(Ys):
point = np.array([x, y])
D[j, i] = est.log_pdf(point)
G[j, i] = np.linalg.norm(est.grad(point))
return D, G
def visualise_fit_2d(est, X, Xs=None, Ys=None):
# visualise found fit
plt.figure()
if Xs is None:
Xs = np.linspace(-5, 5)
if Ys is None:
Ys = np.linspace(-5, 5)
D, G = pdf_grid(Xs, Ys, est)
plt.subplot(121)
visualise_array(Xs, Ys, D, X)
plt.title("log pdf")
plt.subplot(122)
visualise_array(Xs, Ys, G, X)
plt.title("gradient norm")
plt.tight_layout() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.tight_layout"
] | [((107, 136), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A'], {'origin': '"""lower"""'}), "(A, origin='lower')\n", (117, 136), True, 'import matplotlib.pyplot as plt\n'), ((482, 499), 'numpy.zeros', 'np.zeros', (['D.shape'], {}), '(D.shape)\n', (490, 499), True, 'import numpy as np\n'), ((872, 884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (882, 884), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1056), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1051, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1115), 'matplotlib.pyplot.title', 'plt.title', (['"""log pdf"""'], {}), "('log pdf')\n", (1104, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1142), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1137, 1142), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1207), 'matplotlib.pyplot.title', 'plt.title', (['"""gradient norm"""'], {}), "('gradient norm')\n", (1190, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1235), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1233, 1235), True, 'import matplotlib.pyplot as plt\n'), ((293, 337), 'matplotlib.pyplot.plot', 'plt.plot', (['samples[:, 0]', 'samples[:, 1]', '"""bx"""'], {}), "(samples[:, 0], samples[:, 1], 'bx')\n", (301, 337), True, 'import matplotlib.pyplot as plt\n'), ((917, 935), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)'], {}), '(-5, 5)\n', (928, 935), True, 'import numpy as np\n'), ((973, 991), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)'], {}), '(-5, 5)\n', (984, 991), True, 'import numpy as np\n'), ((660, 676), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (668, 676), True, 'import numpy as np\n')] |
import os
import random
import cv2
import numpy as np
class ImageGenerator:
def __init__(self, save_path, plates_path, nums_path, chars_path, transparent=False):
self.save_path = save_path
# Plate
self.list_ = os.listdir(plates_path)
self.plate = plates_path
# Load Numbers
file_path = nums_path
file_list = os.listdir(file_path)
self.Number = list()
self.number_list = list()
for file_ in file_list:
img_path = os.path.join(file_path, file_)
if transparent:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
trans_mask = img[:, :, 3] == 0
img[trans_mask] = [255, 255, 255, 255]
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
else:
img = cv2.imread(img_path)
self.Number.append(img)
self.number_list.append(file_[0:-4])
# Load Chars
file_path = chars_path
file_list = os.listdir(file_path)
self.char_list = list()
self.Char1 = list()
for file_ in file_list:
img_path = os.path.join(file_path, file_)
if transparent:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
trans_mask = img[:, :, 3] == 0
img[trans_mask] = [255, 255, 255, 255]
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
else:
img = cv2.imread(img_path)
self.Char1.append(img)
self.char_list.append(file_[0:-4])
@staticmethod
def add(background_image, char):
roi = background_image
img2gray = cv2.cvtColor(char, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(roi, roi, mask=mask)
img2_fg = cv2.bitwise_and(char, char, mask=mask_inv)
dst = cv2.add(img1_bg, img2_fg)
return dst
@staticmethod
def random_bright(img):
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
img = np.array(img, dtype=np.float64)
random_bright = .5 + np.random.uniform()
img[:, :, 2] = img[:, :, 2] * random_bright
img[:, :, 2][img[:, :, 2] > 255] = 255
img = np.array(img, dtype=np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def Type_A(self, num, save=False):
number = [cv2.resize(number, (56, 83)) for number in self.Number]
char = [cv2.resize(char1, (60, 83)) for char1 in self.Char1]
count_a = 0
for p in self.list_:
plate = cv2.imread(os.path.join(self.plate, p))
for i in range(num):
f = open(f'./result/labels/image_a_{count_a}.txt', 'a')
Plate = cv2.resize(plate, (520, 110))
label = "Z"
# row -> y , col -> x
row, col = 13, 35
# number 1
x1, y1 = col, row
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x2, y2 = x1 + 56, y1 + 83
x_r, y_r = (x1 + x2) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
x2, y1 = x2, y1
# number 2
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x3, y2 = x2 + 56, y2
x_r, y_r = (x2 + x3) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
x3, y1 = x3, y1
# character 3
label += self.char_list[i % 40]
Plate[row:row + 83, col:col + 60, :] = self.add(Plate[row:row + 83, col:col + 60, :],
self.random_bright(char[i % 40]))
x4, y2 = x3 + 60, y2
x_r, y_r = (x3 + x4) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (60 / 520), (83 / 110)
class_name = names.index(self.char_list[i % 40])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += (60 + 36)
x4, y1 = col, y1
# number 4
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x5, y2 = x4 + 56, y2
x_r, y_r = (x4 + x5) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
x5, y1 = x5, y1
# number 5
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x6, y2 = x5 + 56, y2
x_r, y_r = (x5 + x6) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
x6, y1 = x6, y1
# number 6
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x7, y2 = x6 + 56, y2
x_r, y_r = (x6 + x7) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
x7, y1 = x7, y1
# number 7
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 56, :] = self.add(Plate[row:row + 83, col:col + 56, :],
self.random_bright(number[rand_int]))
x8, y2 = x7 + 56, y2
x_r, y_r = (x7 + x8) / (2 * 520), (y1 + y2) / (2 * 110)
w_r, h_r = (56 / 520), (83 / 110)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 56
f.close()
if save:
cv2.imwrite(self.save_path + "image_a_" + str(count_a) + ".jpg", Plate)
count_a += 1
else:
pass
def Type_B(self, num, save=False):
number = [cv2.resize(number, (45, 83)) for number in self.Number]
char = [cv2.resize(char1, (49, 70)) for char1 in self.Char1]
count_b = 0
for p in self.list_:
plate = cv2.imread(os.path.join(self.plate, p))
for i in range(num):
f = open(f'./result/labels/image_b_{count_b}.txt', 'a')
Plate = cv2.resize(plate, (355, 155))
label = ''
row, col = 45, 15 # row + 83, col + 45
# number 1
x1, y1 = col, row
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 45, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x2, y2 = x1 + 45, y1 + 83
x_r, y_r = (x1 + x2) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45
x2, y1 = x2, y1
# number 2
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 45, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x3, y2 = x2 + 45, y2
x_r, y_r = (x2 + x3) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45
x3, y1 = x3, y1
# number 3
label += self.char_list[i % 40]
Plate[row + 12:row + 82, col + 2:col + 49 + 2, :] = self.add(
Plate[row + 12:row + 82, col + 2:col + 49 + 2, :],
self.random_bright(char[i % 40]))
x4, y2 = x3 + 49, y2
x_r, y_r = (x3 + x4) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (49 / 355), (70 / 155)
class_name = names.index(self.char_list[i % 40])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 49 + 2
x4, y1 = col, y1
# number 4
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col + 2:col + 45 + 2, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x5, y2 = x4 + 45 + 2, y2
x_r, y_r = (x4 + x5) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45 + 2
x5, y1 = col, y1
# number 5
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 45, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x6, y2 = x5 + 45, y2
x_r, y_r = (x5 + x6) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45
x6, y1 = x6, y1
# number 6
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 45, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x7, y2 = x6 + 45, y2
x_r, y_r = (x6 + x7) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45
x7, y1 = x7, y1
# number 7
rand_int = random.randint(0, 9)
label += self.number_list[rand_int]
Plate[row:row + 83, col:col + 45, :] = self.add(Plate[row:row + 83, col:col + 45, :],
self.random_bright(number[rand_int]))
x8, y2 = x7 + 45, y2
x_r, y_r = (x7 + x8) / (2 * 355), (y1 + y2) / (2 * 155)
w_r, h_r = (45 / 355), (83 / 155)
class_name = names.index(self.number_list[rand_int])
txt = f'{class_name} {x_r} {y_r} {w_r} {h_r}\n'
f.write(txt)
col += 45
f.close()
if save:
cv2.imwrite(self.save_path + "image_b_" + str(count_b) + ".jpg", Plate)
count_b += 1
# cv2.imwrite(self.save_path + label + ".jpg", Plate) # If you want to save labels as image name
else:
print('Images are not saved')
if __name__ == '__main__':
with open('./assets/names.txt', 'r') as file:
names = file.readlines()
names = [i.strip() for i in names]
if not os.path.exists('./result'):
os.mkdir('./result')
if not os.path.exists('./result/images'):
os.mkdir('./result/images')
if not os.path.exists('./result/labels'):
os.mkdir('./result/labels')
Type_A1 = ImageGenerator(save_path='./result/images/',
plates_path='./assets/plates/type_a',
nums_path='./assets/nums/',
chars_path='./assets/chars/')
Type_B1 = ImageGenerator(save_path='./result/images/',
plates_path='./assets/plates/type_b',
nums_path='./assets/nums/',
chars_path='./assets/chars/')
num_img = 120
Type_A1.Type_A(num_img, save=True)
print("Type 1 finish")
Type_B1.Type_B(num_img, save=True)
print("Type 2 finish")
| [
"os.mkdir",
"numpy.random.uniform",
"cv2.bitwise_not",
"random.randint",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.threshold",
"cv2.add",
"os.path.exists",
"cv2.imread",
"numpy.array",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((240, 263), 'os.listdir', 'os.listdir', (['plates_path'], {}), '(plates_path)\n', (250, 263), False, 'import os\n'), ((372, 393), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (382, 393), False, 'import os\n'), ((1018, 1039), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (1028, 1039), False, 'import os\n'), ((1691, 1729), 'cv2.cvtColor', 'cv2.cvtColor', (['char', 'cv2.COLOR_BGR2GRAY'], {}), '(char, cv2.COLOR_BGR2GRAY)\n', (1703, 1729), False, 'import cv2\n'), ((1750, 1801), 'cv2.threshold', 'cv2.threshold', (['img2gray', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img2gray, 10, 255, cv2.THRESH_BINARY)\n', (1763, 1801), False, 'import cv2\n'), ((1821, 1842), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (1836, 1842), False, 'import cv2\n'), ((1861, 1897), 'cv2.bitwise_and', 'cv2.bitwise_and', (['roi', 'roi'], {'mask': 'mask'}), '(roi, roi, mask=mask)\n', (1876, 1897), False, 'import cv2\n'), ((1916, 1958), 'cv2.bitwise_and', 'cv2.bitwise_and', (['char', 'char'], {'mask': 'mask_inv'}), '(char, char, mask=mask_inv)\n', (1931, 1958), False, 'import cv2\n'), ((1973, 1998), 'cv2.add', 'cv2.add', (['img1_bg', 'img2_fg'], {}), '(img1_bg, img2_fg)\n', (1980, 1998), False, 'import cv2\n'), ((2080, 2116), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (2092, 2116), False, 'import cv2\n'), ((2131, 2162), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float64'}), '(img, dtype=np.float64)\n', (2139, 2162), True, 'import numpy as np\n'), ((2325, 2354), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (2333, 2354), True, 'import numpy as np\n'), ((2369, 2405), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_HSV2RGB'], {}), '(img, cv2.COLOR_HSV2RGB)\n', (2381, 2405), False, 'import cv2\n'), ((14162, 14188), 'os.path.exists', 'os.path.exists', (['"""./result"""'], {}), "('./result')\n", (14176, 14188), False, 'import os\n'), ((14198, 14218), 'os.mkdir', 'os.mkdir', (['"""./result"""'], {}), "('./result')\n", (14206, 14218), False, 'import os\n'), ((14230, 14263), 'os.path.exists', 'os.path.exists', (['"""./result/images"""'], {}), "('./result/images')\n", (14244, 14263), False, 'import os\n'), ((14273, 14300), 'os.mkdir', 'os.mkdir', (['"""./result/images"""'], {}), "('./result/images')\n", (14281, 14300), False, 'import os\n'), ((14312, 14345), 'os.path.exists', 'os.path.exists', (['"""./result/labels"""'], {}), "('./result/labels')\n", (14326, 14345), False, 'import os\n'), ((14355, 14382), 'os.mkdir', 'os.mkdir', (['"""./result/labels"""'], {}), "('./result/labels')\n", (14363, 14382), False, 'import os\n'), ((512, 542), 'os.path.join', 'os.path.join', (['file_path', 'file_'], {}), '(file_path, file_)\n', (524, 542), False, 'import os\n'), ((1155, 1185), 'os.path.join', 'os.path.join', (['file_path', 'file_'], {}), '(file_path, file_)\n', (1167, 1185), False, 'import os\n'), ((2192, 2211), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2209, 2211), True, 'import numpy as np\n'), ((2484, 2512), 'cv2.resize', 'cv2.resize', (['number', '(56, 83)'], {}), '(number, (56, 83))\n', (2494, 2512), False, 'import cv2\n'), ((2556, 2583), 'cv2.resize', 'cv2.resize', (['char1', '(60, 83)'], {}), '(char1, (60, 83))\n', (2566, 2583), False, 'import cv2\n'), ((8194, 8222), 'cv2.resize', 'cv2.resize', (['number', '(45, 83)'], {}), '(number, (45, 83))\n', (8204, 8222), False, 'import cv2\n'), ((8266, 8293), 'cv2.resize', 'cv2.resize', (['char1', '(49, 70)'], {}), '(char1, (49, 70))\n', (8276, 8293), False, 'import cv2\n'), ((593, 635), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_UNCHANGED'], {}), '(img_path, cv2.IMREAD_UNCHANGED)\n', (603, 635), False, 'import cv2\n'), ((760, 797), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2BGR'], {}), '(img, cv2.COLOR_BGRA2BGR)\n', (772, 797), False, 'import cv2\n'), ((838, 858), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (848, 858), False, 'import cv2\n'), ((1236, 1278), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_UNCHANGED'], {}), '(img_path, cv2.IMREAD_UNCHANGED)\n', (1246, 1278), False, 'import cv2\n'), ((1403, 1440), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGRA2BGR'], {}), '(img, cv2.COLOR_BGRA2BGR)\n', (1415, 1440), False, 'import cv2\n'), ((1481, 1501), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1491, 1501), False, 'import cv2\n'), ((2689, 2716), 'os.path.join', 'os.path.join', (['self.plate', 'p'], {}), '(self.plate, p)\n', (2701, 2716), False, 'import os\n'), ((2847, 2876), 'cv2.resize', 'cv2.resize', (['plate', '(520, 110)'], {}), '(plate, (520, 110))\n', (2857, 2876), False, 'import cv2\n'), ((3066, 3086), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3080, 3086), False, 'import random\n'), ((3782, 3802), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3796, 3802), False, 'import random\n'), ((5155, 5175), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (5169, 5175), False, 'import random\n'), ((5866, 5886), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (5880, 5886), False, 'import random\n'), ((6577, 6597), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (6591, 6597), False, 'import random\n'), ((7288, 7308), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (7302, 7308), False, 'import random\n'), ((8399, 8426), 'os.path.join', 'os.path.join', (['self.plate', 'p'], {}), '(self.plate, p)\n', (8411, 8426), False, 'import os\n'), ((8557, 8586), 'cv2.resize', 'cv2.resize', (['plate', '(355, 155)'], {}), '(plate, (355, 155))\n', (8567, 8586), False, 'import cv2\n'), ((8759, 8779), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (8773, 8779), False, 'import random\n'), ((9476, 9496), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (9490, 9496), False, 'import random\n'), ((10847, 10867), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (10861, 10867), False, 'import random\n'), ((11583, 11603), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (11597, 11603), False, 'import random\n'), ((12294, 12314), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (12308, 12314), False, 'import random\n'), ((13006, 13026), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (13020, 13026), False, 'import random\n')] |
import numpy as np
import pytest
from forest_benchmarking import qubit_spectroscopy as qs
@pytest.fixture()
def mock_rabi():
actual_rabi_period = 2.15 * np.pi # not quite 2pi
return {'rabi_per': actual_rabi_period}
def test_sinusoidal_waveform(mock_rabi):
thetas = np.linspace(0, 2 * np.pi, 30)
data = np.asarray([0.5 * np.sin(theta * 2 * np.pi / mock_rabi['rabi_per']) + 0.5
for theta in thetas])
params, params_errs = qs.fit_to_sinusoidal_waveform(thetas, data)
assert np.isclose(2 * np.pi / params[2], mock_rabi['rabi_per'])
| [
"forest_benchmarking.qubit_spectroscopy.fit_to_sinusoidal_waveform",
"pytest.fixture",
"numpy.isclose",
"numpy.sin",
"numpy.linspace"
] | [((94, 110), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (108, 110), False, 'import pytest\n'), ((284, 313), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(30)'], {}), '(0, 2 * np.pi, 30)\n', (295, 313), True, 'import numpy as np\n'), ((471, 514), 'forest_benchmarking.qubit_spectroscopy.fit_to_sinusoidal_waveform', 'qs.fit_to_sinusoidal_waveform', (['thetas', 'data'], {}), '(thetas, data)\n', (500, 514), True, 'from forest_benchmarking import qubit_spectroscopy as qs\n'), ((527, 583), 'numpy.isclose', 'np.isclose', (['(2 * np.pi / params[2])', "mock_rabi['rabi_per']"], {}), "(2 * np.pi / params[2], mock_rabi['rabi_per'])\n", (537, 583), True, 'import numpy as np\n'), ((343, 392), 'numpy.sin', 'np.sin', (["(theta * 2 * np.pi / mock_rabi['rabi_per'])"], {}), "(theta * 2 * np.pi / mock_rabi['rabi_per'])\n", (349, 392), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# =================================================================================================
# A minimal example that defines a triangle mesh of a cylinder with multiple circular
# cross-sections, riggs it with an armature, and animates the mesh using dual-quaternion skinning.
#
# Copyright 2021 <NAME>
# =================================================================================================
from __future__ import division
import sys
import math
from typing import List
from scipy.spatial.transform import Rotation
import numpy as np
import open3d as o3d
from functools import reduce
import transformations as tf
from dq3d import quat, dualquat
from dq3d import op
PROGRAM_EXIT_SUCCESS = 0
def compute_anchor_weight_python(anchor_to_point_distance, node_coverage):
return math.exp(-(anchor_to_point_distance * anchor_to_point_distance) / (2 * node_coverage * node_coverage))
class Viewer:
def __init__(self):
cross_section_count = 20
cylinder_height = 5.0
self.node_count = 3
node_coverage = 1.2
node_y_coordinates = np.linspace(0.0, cylinder_height, self.node_count)
self.edge_lengths = [node_y_coordinates[i_node + 1] - node_y_coordinates[i_node] for i_node in range(self.node_count - 1)]
self.node_locations = np.array([[0.0, y, 0.0] for y in node_y_coordinates])
self.node_dual_quaternions = [dualquat(quat.identity())]
for edge_length in self.edge_lengths:
self.node_dual_quaternions.append(dualquat(quat.identity(), [0.0, edge_length, 0.0]))
self.cylinder_mesh = o3d.geometry.TriangleMesh.create_cylinder(0.5, 5.0, 20, cross_section_count)
self.cylinder_mesh.compute_vertex_normals()
initial_rotation: Rotation = Rotation.from_euler('x', 90, degrees=True)
self.cylinder_mesh.rotate(initial_rotation.as_matrix())
self.cylinder_mesh.translate(np.array([0.0, 2.5, 0.0]))
vertices = np.array(self.cylinder_mesh.vertices)
self.mesh_coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.6, origin=[-2, 0, 0])
vertex_to_anchor_vectors = np.dstack([vertices - node_location for node_location in self.node_locations])
vertex_to_anchor_square_distances = np.sum(vertex_to_anchor_vectors ** 2, axis=1)
self.weights = np.exp(-(vertex_to_anchor_square_distances / (2 * node_coverage * node_coverage)))
self.vertices = vertices
self.i_frame = 0
self.angle_increment = 0.5
def run_visualizer(self):
def deform(visualizer):
self.i_frame += 1
if self.i_frame > 90 / self.angle_increment:
return
per_node_increment = self.angle_increment / (self.node_count - 1)
node_angles_degrees = [self.i_frame * per_node_increment * i_node for i_node in range(1, self.node_count)]
# transformations from frame 0 to this frame
node_rotations = [dualquat(quat(*tf.quaternion_from_euler(0.0, 0.0, np.deg2rad(angle)))) for angle in node_angles_degrees]
transformed_nodes_dual_quaternions: List[dualquat] = [self.node_dual_quaternions[0]]
for i_node in range(1, self.node_count):
original_dq = self.node_dual_quaternions[i_node]
transformed_dq: dualquat = original_dq * node_rotations[i_node - 1] * original_dq.inverse()
transformed_nodes_dual_quaternions.append(transformed_dq)
transformed_vertices = np.array([op.dlb(weight, transformed_nodes_dual_quaternions).transform_point(vertex)
for weight, vertex in zip(self.weights, self.vertices)])
self.cylinder_mesh.vertices = o3d.utility.Vector3dVector(transformed_vertices)
self.cylinder_mesh.compute_vertex_normals()
visualizer.update_geometry(self.cylinder_mesh)
return False
o3d.visualization.draw_geometries_with_animation_callback([self.cylinder_mesh, self.mesh_coordinate_frame],
callback_function=deform,
window_name="DQB Skinning 3D test")
def main():
viewer = Viewer()
viewer.run_visualizer()
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
sys.exit(main())
| [
"numpy.dstack",
"math.exp",
"numpy.sum",
"numpy.deg2rad",
"open3d.geometry.TriangleMesh.create_cylinder",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"dq3d.quat.identity",
"open3d.utility.Vector3dVector",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"dq3d.op.dlb",
"open3d.visual... | [((822, 928), 'math.exp', 'math.exp', (['(-(anchor_to_point_distance * anchor_to_point_distance) / (2 *\n node_coverage * node_coverage))'], {}), '(-(anchor_to_point_distance * anchor_to_point_distance) / (2 *\n node_coverage * node_coverage))\n', (830, 928), False, 'import math\n'), ((1114, 1164), 'numpy.linspace', 'np.linspace', (['(0.0)', 'cylinder_height', 'self.node_count'], {}), '(0.0, cylinder_height, self.node_count)\n', (1125, 1164), True, 'import numpy as np\n'), ((1326, 1379), 'numpy.array', 'np.array', (['[[0.0, y, 0.0] for y in node_y_coordinates]'], {}), '([[0.0, y, 0.0] for y in node_y_coordinates])\n', (1334, 1379), True, 'import numpy as np\n'), ((1620, 1696), 'open3d.geometry.TriangleMesh.create_cylinder', 'o3d.geometry.TriangleMesh.create_cylinder', (['(0.5)', '(5.0)', '(20)', 'cross_section_count'], {}), '(0.5, 5.0, 20, cross_section_count)\n', (1661, 1696), True, 'import open3d as o3d\n'), ((1786, 1828), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""x"""', '(90)'], {'degrees': '(True)'}), "('x', 90, degrees=True)\n", (1805, 1828), False, 'from scipy.spatial.transform import Rotation\n'), ((1977, 2014), 'numpy.array', 'np.array', (['self.cylinder_mesh.vertices'], {}), '(self.cylinder_mesh.vertices)\n', (1985, 2014), True, 'import numpy as np\n'), ((2053, 2131), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(0.6)', 'origin': '[-2, 0, 0]'}), '(size=0.6, origin=[-2, 0, 0])\n', (2102, 2131), True, 'import open3d as o3d\n'), ((2181, 2266), 'numpy.dstack', 'np.dstack', (['[(vertices - node_location) for node_location in self.node_locations]'], {}), '([(vertices - node_location) for node_location in self.node_locations]\n )\n', (2190, 2266), True, 'import numpy as np\n'), ((2304, 2349), 'numpy.sum', 'np.sum', (['(vertex_to_anchor_vectors ** 2)'], {'axis': '(1)'}), '(vertex_to_anchor_vectors ** 2, axis=1)\n', (2310, 2349), True, 'import numpy as np\n'), ((2373, 2459), 'numpy.exp', 'np.exp', (['(-(vertex_to_anchor_square_distances / (2 * node_coverage * node_coverage)))'], {}), '(-(vertex_to_anchor_square_distances / (2 * node_coverage *\n node_coverage)))\n', (2379, 2459), True, 'import numpy as np\n'), ((3975, 4153), 'open3d.visualization.draw_geometries_with_animation_callback', 'o3d.visualization.draw_geometries_with_animation_callback', (['[self.cylinder_mesh, self.mesh_coordinate_frame]'], {'callback_function': 'deform', 'window_name': '"""DQB Skinning 3D test"""'}), "([self.\n cylinder_mesh, self.mesh_coordinate_frame], callback_function=deform,\n window_name='DQB Skinning 3D test')\n", (4032, 4153), True, 'import open3d as o3d\n'), ((1931, 1956), 'numpy.array', 'np.array', (['[0.0, 2.5, 0.0]'], {}), '([0.0, 2.5, 0.0])\n', (1939, 1956), True, 'import numpy as np\n'), ((3776, 3824), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['transformed_vertices'], {}), '(transformed_vertices)\n', (3802, 3824), True, 'import open3d as o3d\n'), ((1428, 1443), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (1441, 1443), False, 'from dq3d import quat, dualquat\n'), ((1547, 1562), 'dq3d.quat.identity', 'quat.identity', ([], {}), '()\n', (1560, 1562), False, 'from dq3d import quat, dualquat\n'), ((3557, 3607), 'dq3d.op.dlb', 'op.dlb', (['weight', 'transformed_nodes_dual_quaternions'], {}), '(weight, transformed_nodes_dual_quaternions)\n', (3563, 3607), False, 'from dq3d import op\n'), ((3058, 3075), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (3068, 3075), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import roslib
#roslib.load_manifest('ros_to_cv_v2')
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from depth.msg import cood
x=0
y=0
class image_depth:
def __init__(self):
self.image_pub = rospy.Publisher("image_topic_depth",Image, queue_size=10)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/depth/image",Image,self.callback)
rospy.Subscriber("/object", cood, self.callback_depth)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
except CvBridgeError as e:
print(e)
(rows,cols) = cv_image.shape
print("rows:", rows, "cols:", cols)
#if cols > 60 and rows > 60 :
# cv2.circle(cv_image, (300,500), 25, (0,255,0))
#for a in range(rows):
pts1 = np.float32([[200,225],[300,225],[300,360],[200,360]])
pts2 = np.float32([[0,0],[cols,0],[cols,rows],[0,rows]])
M = cv2.getPerspectiveTransform(pts1,pts2)
img = cv2.warpPerspective(cv_image,M,(cols,rows))
cv2.imshow("Perspective",img)
print("depth", img[y][x]) #row is y and column is x
cv2.circle(img, (x,y), 15, (0,255,0))
print("at call back x:",x, "y:", y )
cv2.imshow('depth image', img)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "passthrough"))
except CvBridgeError as e:
print(e)
def callback_depth(self, msg):
#print("depth", img[msg.x][msg.y])
#cv2.circle(img, (msg.x,msg.y), 15, (0,255,0))
global x
global y
x=msg.x[0]
y=msg.y[0]
print("x:",x,"y:", y)
def main(args):
ic = image_depth()
rospy.init_node('image_depth', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| [
"cv_bridge.CvBridge",
"cv2.warpPerspective",
"rospy.Subscriber",
"cv2.circle",
"cv2.getPerspectiveTransform",
"numpy.float32",
"cv2.imshow",
"rospy.Publisher",
"rospy.init_node",
"rospy.spin",
"cv2.destroyAllWindows"
] | [((1774, 1820), 'rospy.init_node', 'rospy.init_node', (['"""image_depth"""'], {'anonymous': '(True)'}), "('image_depth', anonymous=True)\n", (1789, 1820), False, 'import rospy\n'), ((1902, 1925), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1923, 1925), False, 'import cv2\n'), ((377, 435), 'rospy.Publisher', 'rospy.Publisher', (['"""image_topic_depth"""', 'Image'], {'queue_size': '(10)'}), "('image_topic_depth', Image, queue_size=10)\n", (392, 435), False, 'import rospy\n'), ((454, 464), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (462, 464), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((486, 547), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/camera/depth/image"""', 'Image', 'self.callback'], {}), "('/camera/depth/image', Image, self.callback)\n", (502, 547), False, 'import rospy\n'), ((550, 604), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/object"""', 'cood', 'self.callback_depth'], {}), "('/object', cood, self.callback_depth)\n", (566, 604), False, 'import rospy\n'), ((954, 1014), 'numpy.float32', 'np.float32', (['[[200, 225], [300, 225], [300, 360], [200, 360]]'], {}), '([[200, 225], [300, 225], [300, 360], [200, 360]])\n', (964, 1014), True, 'import numpy as np\n'), ((1019, 1075), 'numpy.float32', 'np.float32', (['[[0, 0], [cols, 0], [cols, rows], [0, rows]]'], {}), '([[0, 0], [cols, 0], [cols, rows], [0, rows]])\n', (1029, 1075), True, 'import numpy as np\n'), ((1078, 1117), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1105, 1117), False, 'import cv2\n'), ((1127, 1173), 'cv2.warpPerspective', 'cv2.warpPerspective', (['cv_image', 'M', '(cols, rows)'], {}), '(cv_image, M, (cols, rows))\n', (1146, 1173), False, 'import cv2\n'), ((1175, 1205), 'cv2.imshow', 'cv2.imshow', (['"""Perspective"""', 'img'], {}), "('Perspective', img)\n", (1185, 1205), False, 'import cv2\n'), ((1266, 1306), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(15)', '(0, 255, 0)'], {}), '(img, (x, y), 15, (0, 255, 0))\n', (1276, 1306), False, 'import cv2\n'), ((1349, 1379), 'cv2.imshow', 'cv2.imshow', (['"""depth image"""', 'img'], {}), "('depth image', img)\n", (1359, 1379), False, 'import cv2\n'), ((1832, 1844), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1842, 1844), False, 'import rospy\n')] |
# This code is modified on
# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import numpy as np
class ReplayBuffer(object):
def __init__(self, size, priority_replay=False, alpha=0.7, beta=0.5, eps=1e-7):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = {"obses_t": [], "actions": [], "rewards": [], "obses_tp1": [], "dones": []}
self._maxsize = size
self._next_idx = 0
self._probabilities = []
self.priority_replay = priority_replay
self._eps = eps
self.alpha = alpha
self.beta = beta
self._size = 0
def __len__(self):
return self._size
def _max_priority(self):
return np.max(self._probabilities) if self.priority_replay else 1.0
def add(self, obs_t, action, reward, obs_tp1, done):
data = {"obses_t": np.array([obs_t]),
"actions": np.array([action]),
"rewards": np.array([reward]),
"obses_tp1": np.array([obs_tp1]),
"dones": np.array([done])
}
if len(self) == 0:
self._probabilities = np.zeros((self._maxsize), dtype=np.float32)
self._probabilities[0] = 1.0
for k in data.keys():
self._storage[k] = np.zeros((self._maxsize, *data[k].shape[1:]), dtype=data[k].dtype)
self._probabilities[self._next_idx] = self._max_priority()
for k in data.keys():
self._storage[k][self._next_idx] = data[k]
self._size = min(self._size + 1, self._maxsize)
self._next_idx = (self._next_idx + 1) % self._maxsize
def update_priority(self, td_loss):
if self.priority_replay:
self._probabilities[self.idxes] = np.power(np.abs(td_loss) + self._eps, self.alpha)
def _encode_sample(self, idxes):
return (
self._storage["obses_t"][idxes],
self._storage["actions"][idxes],
self._storage["rewards"][idxes],
self._storage["obses_tp1"][idxes],
self._storage["dones"][idxes],
)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
probabilities = self._probabilities[:len(self)] / np.sum(self._probabilities[:len(self)])
self.idxes = np.random.choice(
range(len(self)),
batch_size,
p=probabilities,
)
if self.priority_replay:
is_weight = np.power(len(self) * probabilities[self.idxes], -self.beta)
is_weight /= is_weight.max()
else:
is_weight = np.ones(len(self.idxes))
return (*self._encode_sample(self.idxes), is_weight)
| [
"numpy.zeros",
"numpy.abs",
"numpy.max",
"numpy.array"
] | [((906, 933), 'numpy.max', 'np.max', (['self._probabilities'], {}), '(self._probabilities)\n', (912, 933), True, 'import numpy as np\n'), ((1052, 1069), 'numpy.array', 'np.array', (['[obs_t]'], {}), '([obs_t])\n', (1060, 1069), True, 'import numpy as np\n'), ((1098, 1116), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (1106, 1116), True, 'import numpy as np\n'), ((1145, 1163), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (1153, 1163), True, 'import numpy as np\n'), ((1194, 1213), 'numpy.array', 'np.array', (['[obs_tp1]'], {}), '([obs_tp1])\n', (1202, 1213), True, 'import numpy as np\n'), ((1240, 1256), 'numpy.array', 'np.array', (['[done]'], {}), '([done])\n', (1248, 1256), True, 'import numpy as np\n'), ((1337, 1378), 'numpy.zeros', 'np.zeros', (['self._maxsize'], {'dtype': 'np.float32'}), '(self._maxsize, dtype=np.float32)\n', (1345, 1378), True, 'import numpy as np\n'), ((1491, 1557), 'numpy.zeros', 'np.zeros', (['(self._maxsize, *data[k].shape[1:])'], {'dtype': 'data[k].dtype'}), '((self._maxsize, *data[k].shape[1:]), dtype=data[k].dtype)\n', (1499, 1557), True, 'import numpy as np\n'), ((1959, 1974), 'numpy.abs', 'np.abs', (['td_loss'], {}), '(td_loss)\n', (1965, 1974), True, 'import numpy as np\n')] |
import gym
import numpy as np
import pickle
env = gym.make('CartPole-v0')
# There are four variable, use a linear model W*x + b so actually five params
# Given some param, run a full episode and compute reward
def run_episode(W, max_step=1000, render=False):
observation = env.reset()
total_reward = 0
N_step = 0
done = False
while not done and N_step <= max_step:
if render:
env.render()
b = np.append(observation, 1.0)
action = 1 if np.dot(W, b) > 0 else 0
observation, reward, done, info = env.step(action)
total_reward += reward
N_step += 1
return total_reward, N_step
train_episode = 10000
save_model = 10
def train():
best_W = []
best_reward = 0
for i in range(train_episode):
# generate random param
W = np.random.rand(5) * 2 - 1
reward, t = run_episode(W)
print("Episode {} finished after {} timesteps".format(i, t+1))
if reward > best_reward:
best_reward = reward
best_W = W
if i % save_model == 0:
pickle.dump(best_W, open('random_model.p', 'wb'))
return best_W, best_reward
load_model = True
def run():
if load_model:
W = pickle.load(open('random_model.p', 'rb'))
else:
W, best_reward = train()
print ("Best model reward = {}".format(best_reward))
for i in range(20):
_, t = run_episode(W, max_step=1000, render=True)
print("Episode {} finished after {} timesteps".format(i, t+1))
run()
| [
"numpy.dot",
"numpy.append",
"numpy.random.rand",
"gym.make"
] | [((50, 73), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (58, 73), False, 'import gym\n'), ((443, 470), 'numpy.append', 'np.append', (['observation', '(1.0)'], {}), '(observation, 1.0)\n', (452, 470), True, 'import numpy as np\n'), ((493, 505), 'numpy.dot', 'np.dot', (['W', 'b'], {}), '(W, b)\n', (499, 505), True, 'import numpy as np\n'), ((828, 845), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (842, 845), True, 'import numpy as np\n')] |
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Equirec2Cube import Equirec2Cube
class SpherePad(nn.Module):
def __init__(self, pad_size):
super(SpherePad, self).__init__()
self.pad_size = pad_size
self.data = {}
# pad order: up, down, left, right sides
# use yes/no flag to choose flip/transpose or not
# notation: #face-#side_#flip-hor_#flip_ver_#transpose
# transpose is applied first
self.relation = {
'back': ['top-up_yes_yes_no', 'down-down_yes_yes_no', 'right-right_no_no_no', 'left-left_no_no_no'],
'down': ['front-down_no_no_no', 'back-down_yes_yes_no', 'left-down_yes_no_yes', 'right-down_no_yes_yes'],
'front': ['top-down_no_no_no', 'down-up_no_no_no', 'left-right_no_no_no', 'right-left_no_no_no'],
'left': ['top-left_yes_no_yes', 'down-left_no_yes_yes', 'back-right_no_no_no', 'front-left_no_no_no'],
'right': ['top-right_no_yes_yes', 'down-right_yes_no_yes', 'front-right_no_no_no', 'back-left_no_no_no'],
'top': ['back-up_yes_yes_no', 'front-up_no_no_no', 'left-up_no_yes_yes', 'right-up_yes_no_yes']
}
def _GetLoc(self, R_lst, grid_lst, K):
out = {}
pad = self.pad_size
f, cx, cy = K['f'], K['cx'], K['cy']
K_mat = torch.FloatTensor(
np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]]))
grid_front = grid_lst[2] # 1 x h x h x 3
orders = ['back', 'down', 'front', 'left', 'right', 'top']
for i, face in enumerate(orders):
out[face] = {}
for j, connect_side in enumerate(['up', 'down', 'left', 'right']):
connected_face = self.relation[face][j].split('-')[0]
idx = orders.index(connected_face)
R_world_to_connected = R_lst[idx] # 3 x 3
R_world_to_itself = R_lst[i] # 3 x 3
R_itself_to_connected = torch.matmul(
R_world_to_connected, R_world_to_itself.transpose(0, 1))
new_grid = torch.matmul(
grid_front, R_itself_to_connected.transpose(0, 1))
proj = torch.matmul(new_grid, K_mat.transpose(0, 1))
x = proj[:, :, :, 0:1] / proj[:, :, :, 2:3]
y = proj[:, :, :, 1:2] / proj[:, :, :, 2:3]
x = (x - cx) / cx
y = (y - cy) / cy
xy = torch.cat([x, y], dim=3) # 1 x h x w x 2
out[face][connect_side] = {}
x = xy[:, :, :, 0:1]
y = xy[:, :, :, 1:2]
'''
mask1 = np.logical_and(x >= -1.01, x <= 1.01)
mask2 = np.logical_and(y >= -1.01, y <= 1.01)
mask = np.logical_and(mask1, mask2)
'''
mask1 = (x >= -1.01) & (x <= 1.01)
mask2 = (y >= -1.01) & (y <= 1.01)
mask = mask1 & mask2
xy = torch.clamp(xy, -1, 1)
if connect_side == 'up':
out[face][connect_side]['mask'] = mask[:, :pad, :, :]
out[face][connect_side]['xy'] = xy[:, :pad, :, :]
elif connect_side == 'down':
out[face][connect_side]['mask'] = mask[:, -pad:, :, :]
out[face][connect_side]['xy'] = xy[:, -pad:, :, :]
elif connect_side == 'left':
out[face][connect_side]['mask'] = mask[:, :, :pad, :]
out[face][connect_side]['xy'] = xy[:, :, :pad, :]
elif connect_side == 'right':
out[face][connect_side]['mask'] = mask[:, :, -pad:, :]
out[face][connect_side]['xy'] = xy[:, :, -pad:, :]
return out
def forward(self, inputs):
[bs, c, h, w] = inputs.shape
assert bs % 6 == 0 and h == w
key = '(%d,%d,%d)' % (h, w, self.pad_size)
if key not in self.data:
theta = 2 * np.arctan((0.5 * h + self.pad_size) / (0.5 * h))
e2c_ori = Equirec2Cube(1, 2*h, 4*h, h, 90)
e2c = Equirec2Cube(
1, 2*h, 4*h, h+2*self.pad_size, theta/np.pi * 180)
R_lst = [x.transpose(0, 1) for x in e2c.R_lst]
grid_lst = e2c.grid_lst
K = e2c_ori.intrisic
self.data[key] = self._GetLoc(R_lst, grid_lst, K)
pad = self.pad_size
orders = ['back', 'down', 'front', 'left', 'right', 'top']
out = []
for i, face in enumerate(orders):
this_face = inputs[i::6]
this_face = F.pad(this_face, (pad, pad, pad, pad))
repeats = this_face.shape[0]
for j, connect_side in enumerate(['up', 'down', 'left', 'right']):
connected_face_name = self.relation[face][j].split('-')[0]
connected_face = inputs[orders.index(connected_face_name)::6]
mask = self.data[key][face][connect_side]['mask'].cuda().repeat(repeats, 1, 1, c).permute(0, 3, 1, 2)
xy = self.data[key][face][connect_side]['xy'].cuda().repeat(repeats, 1, 1, 1)
interpo = F.grid_sample(connected_face, xy, mode='bilinear')
if connect_side == 'up':
this_face[:, :, :pad, :][mask] = interpo[mask]
elif connect_side == 'down':
this_face[:, :, -pad:, :][mask] = interpo[mask]
elif connect_side == 'left':
this_face[:, :, :, :pad][mask] = interpo[mask]
elif connect_side == 'right':
this_face[:, :, :, -pad:][mask] = interpo[mask]
out.append(this_face)
out = torch.cat(out, dim=0)
[bs, c, h, w] = out.shape
out = out.view(-1, bs//6, c, h, w).transpose(0,
1).contiguous().view(bs, c, h, w)
return out
| [
"torch.nn.functional.grid_sample",
"torch.cat",
"torch.clamp",
"numpy.array",
"numpy.arctan",
"torch.nn.functional.pad"
] | [((5771, 5792), 'torch.cat', 'torch.cat', (['out'], {'dim': '(0)'}), '(out, dim=0)\n', (5780, 5792), False, 'import torch\n'), ((1447, 1492), 'numpy.array', 'np.array', (['[[f, 0, cx], [0, f, cy], [0, 0, 1]]'], {}), '([[f, 0, cx], [0, f, cy], [0, 0, 1]])\n', (1455, 1492), True, 'import numpy as np\n'), ((4675, 4713), 'torch.nn.functional.pad', 'F.pad', (['this_face', '(pad, pad, pad, pad)'], {}), '(this_face, (pad, pad, pad, pad))\n', (4680, 4713), True, 'import torch.nn.functional as F\n'), ((2514, 2538), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(3)'}), '([x, y], dim=3)\n', (2523, 2538), False, 'import torch\n'), ((3052, 3074), 'torch.clamp', 'torch.clamp', (['xy', '(-1)', '(1)'], {}), '(xy, -1, 1)\n', (3063, 3074), False, 'import torch\n'), ((4067, 4115), 'numpy.arctan', 'np.arctan', (['((0.5 * h + self.pad_size) / (0.5 * h))'], {}), '((0.5 * h + self.pad_size) / (0.5 * h))\n', (4076, 4115), True, 'import numpy as np\n'), ((5225, 5275), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['connected_face', 'xy'], {'mode': '"""bilinear"""'}), "(connected_face, xy, mode='bilinear')\n", (5238, 5275), True, 'import torch.nn.functional as F\n')] |
import os
import numpy as np
import pandas as pd
import pickle
import glob
from scipy import io
from astropy.time import Time
from astropy import units as u
from solarsystemMB import SSObject, planet_geometry
from .database_setup import database_connect
def merc_year(datatime=None, initialize=False):
"""Insert/read start date for each Mercury year from database.
This creates and reads from database table *MESmercyear*
"""
tstart = Time('2011-03-18T00:00:00', format='isot', scale='utc')
tend = Time('2015-04-30T23:59:59', format='isot', scale='utc')
if initialize:
times_ = np.arange(tstart.jd, tend.jd)
times = [Time(t, format='jd', scale='utc') for t in times_]
taa = np.ndarray((len(times),))*u.rad
for i, t in enumerate(times):
time = Time(t, format='jd', scale='utc')
geo = planet_geometry(time, 'Mercury')
taa[i] = geo['taa']
styear = [times[0]]
for a, b, c in zip(taa[0:-1], taa[1:], times[1:]):
if a > b:
styear.append(c)
print(c.iso)
endyr = [*styear[1:], tend]
with database_connect() as con:
cur = con.cursor()
try:
cur.execute('DROP table MESmercyear')
except:
pass
print('creating MESmercyear')
cur.execute('''CREATE table MESmercyear
(yrnum int PRIMARY KEY,
yrstart timestamp,
yrend timestamp)''')
for i, d in enumerate(zip(styear, endyr)):
cur.execute(f'''INSERT into MESmercyear
values ({i}, '{d[0].iso}', '{d[1].iso}')''')
else:
pass
if datatime is not None:
with database_connect() as con:
yrnum = pd.read_sql('''SELECT * from MESmercyear''', con)
myear = np.ndarray((len(datatime),), dtype=int)
for i, yr in yrnum.iterrows():
q = (datatime > yr.yrstart) & (datatime < yr.yrend)
myear[q] = yr.yrnum
return myear
else:
return None
def initialize_MESSENGERdata(datapath):
"""Store data from IDL summary files in a database.
The IDL summary files were provided by Aimee Merkel.
Two tables are created for each species (Ca, Na, and Mg): *xxuvvsdata* and
*xxuvvspointing* where xx is the species. See :doc:`database_fields` for
a description of these tables and fields.
**Parameters**
datapath
Path to the IDL summary files
**Returns**
No output.
"""
mercury = SSObject('Mercury')
# Add to the database
with database_connect() as con:
cur = con.cursor()
cur.execute('select table_name from information_schema.tables')
tables = [r[0] for r in cur.fetchall()]
mestables = ['capointing', 'cauvvsdata', 'mgpointing',
'mguvvsdata', 'napointing', 'nauvvsdata',
'caspectra', 'naspectra', 'mgspectra',
'uvvsmodels_oribt', 'uvvsmodels_query']
# Delete any tables that may exist
for mestab in mestables:
if mestab in tables:
cur.execute(f'drop table {mestab}')
else:
pass
# print('creating MESmercyear table')
# merc_year(initialize=True)
print('creating UVVS tables')
spec = ['Ca', 'Na', 'Mg']
for sp in spec:
# Table with spectrum information
print(f'Creating {sp}uvvsdata')
cur.execute(f'''CREATE table {sp}uvvsdata (
unum SERIAL PRIMARY KEY,
species text,
frame text,
UTC timestamp,
orbit int,
merc_year int,
taa float,
rmerc float,
drdt float,
subslong float,
g float,
radiance float,
sigma float)''')
# Table with MESSENGER geometry and UVVS pointing
print(f'Creating {sp}pointing')
cur.execute(f'''CREATE table {sp}pointing (
pnum SERIAL PRIMARY KEY,
x float,
y float,
z float,
xbore float,
ybore float,
zbore float,
obstype text,
obstype_num int,
xtan float,
ytan float,
ztan float,
rtan float,
alttan float,
longtan float,
lattan float,
loctimetan float,
slit text)''') # Not including slit corners
# Table with spectra
print(f'Creating {sp}spectra')
cur.execute(f'''CREATE table {sp}spectra (
snum SERIAL PRIMARY KEY,
wavelength float[],
calibrated float[],
raw float[],
dark float[],
solarfit float[])''')
savfiles = glob.glob(datapath+'/*_temp.pkl')
savfiles = sorted(savfiles)
for oldfile in savfiles:
# realfile = oldfile.replace('.sav', '_temp.pkl')
# newfile = oldfile.replace('.sav', '.pkl')
newfile = oldfile.replace('_temp', '')
print(f'{oldfile}\n{newfile}\n***')
# data = io.readsav(oldfile, python_dict=True)
# data = pickle.load(open(realfile, 'rb'))
data = pickle.load(open(oldfile, 'rb'))
kR = u.def_unit('kR', 1e3*u.R)
Rmerc = u.def_unit('R_Mercury', mercury.radius)
nm = u.def_unit('nm', 1e-9*u.m)
npts = len(data['orb_num'])
species = os.path.basename(oldfile)[0:2].lower()
# Determine UT for each spectrum
t_iso = ['{}:{}:{}'.format('20'+time[0:2].decode('utf-8'),
time[2:5].decode('utf-8'),
time[6:].decode('utf-8'))
for time in data['step_utc_time']]
UTC = Time(t_iso, format='yday')
# Orbit number for each data spectrum
orbit = np.array([int(o) for o in data['orb_num']])
# determine Mercury year
myear = merc_year(UTC)
rmerc = (np.sqrt(np.sum(data['planet_sun_vector_tg']**2,
axis=1))*u.km).to(u.AU)
radiance = data[f'{species.lower()}_tot_rad_kr']
sigma = radiance/data[f'{species.lower()}_tot_rad_snr']
# Spacecraft position and boresight in MSO
xyz = np.ndarray((npts, 3))
bore = np.ndarray((npts, 3))
corn0 = np.ndarray((npts, 3))
corn1 = np.ndarray((npts, 3))
corn2 = np.ndarray((npts, 3))
corn3 = np.ndarray((npts, 3))
for i in np.arange(npts):
xyz[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['planet_sc_vector_tg'][i, :]
)/mercury.radius.value
bore[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['boresight_unit_vector_center_tg'][i, :])
corn0[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['boresight_unit_vector_c1_tg'][i, :])
corn1[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['boresight_unit_vector_c2_tg'][i, :])
corn2[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['boresight_unit_vector_c3_tg'][i, :])
corn3[i, :] = np.dot(data['mso_rotation_matrix'][i, :, :],
data['boresight_unit_vector_c4_tg'][i, :])
xcorner = np.array([corn0[:, 0], corn1[:, 0],
corn2[:, 0], corn3[:, 0]]).transpose()
ycorner = np.array([corn0[:, 1], corn1[:, 1],
corn2[:, 1], corn3[:, 1]]).transpose()
zcorner = np.array([corn0[:, 2], corn1[:, 2],
corn2[:, 2], corn3[:, 2]]).transpose()
# Determine tangent point
t = -np.sum(xyz*bore, axis=1)
tanpt = xyz+bore*t[:, np.newaxis]
rtan = np.linalg.norm(tanpt, axis=1)
slit = np.array(['Surface' if s == 0
else 'Atmospheric'
for s in data['slit']])
obstype = np.array(
[str(ob).replace('b', '').replace("'", '').strip()
for ob in data['obs_typ']])
# Add in the spectra
spectra = data[species.lower()+'_rad_kr']
wavelength = data['wavelength']
raw = data['orig']
try:
corrected = data['fully_corr_cr']
except:
corrected = data['corr']
dark = data['dark']
solarfit = data['sol_fit']
ndata = pd.DataFrame(
{'species': species,
'frame': 'MSO',
'UTC': UTC,
'orbit': orbit,
'merc_year': myear,
'TAA': data['true_anomaly']*np.pi/180.,
'rmerc': rmerc.value,
'drdt': data['rad_vel'],
'subslong': data['subsolar_longitude']*np.pi/180.,
'g': data['gvals']/u.s,
'radiance': radiance,
'sigma': sigma,
'x': xyz[:, 0]*Rmerc,
'y': xyz[:, 1]*Rmerc,
'z': xyz[:, 2]*Rmerc,
'xbore': bore[:, 0], 'ybore': bore[:, 1], 'zbore': bore[:, 2],
'xcorn1': xcorner[:, 0], 'xcorn2': xcorner[:, 1],
'xcorn3': xcorner[:, 2], 'xcorn4': xcorner[:, 3],
'ycorn1': ycorner[:, 0], 'ycorn2': ycorner[:, 1],
'ycorn3': ycorner[:, 2], 'ycorn4': ycorner[:, 3],
'zcorn1': zcorner[:, 0], 'zcorn2': zcorner[:, 1],
'zcorn3': zcorner[:, 2], 'zcorn4': zcorner[:, 3],
'obstype': obstype,
'obstype_num': data['obs_typ_num'],
'xtan': tanpt[:, 0], 'ytan': tanpt[:, 1],
'ztan': tanpt[:, 2], 'rtan': rtan,
'alttan': data['target_altitude_set'][:, 0],
'minalt': data['minalt'],
'longtan': data['target_longitude_set'][:, 0]*np.pi/180,
'lattan': data['target_latitude_set'][:, 0]*np.pi/180,
'loctimetan': data['obs_solar_localtime'],
'slit': slit})
ndata.fillna(-999, inplace=True)
spectra = [spectra[i,:] for i in range(spectra.shape[0])]
wavelength = [wavelength[i,:] for i in range(wavelength.shape[0])]
raw = [raw[i,:] for i in range(raw.shape[0])]
corrected = [corrected[i,:] for i in range(corrected.shape[0])]
dark = [dark[i,:] for i in range(dark.shape[0])]
solarfit = [solarfit[i,:] for i in range(solarfit.shape[0])]
spectra = pd.DataFrame(
{'spectra': spectra,
'wavelength': wavelength,
'raw': raw,
'corrected': corrected,
'dark': dark,
'solarfit': solarfit})
# save this for later
with open(newfile, 'wb') as f:
pickle.dump(ndata, f, pickle.HIGHEST_PROTOCOL)
with open(newfile.replace('.pkl', '_spectra.pkl'), 'wb') as f:
pickle.dump(spectra, f, pickle.HIGHEST_PROTOCOL)
print('Inserting UVVS data')
with database_connect() as con:
print(f'Saving {species} Data')
for i, dpoint in ndata.iterrows():
cur.execute(f'''INSERT into {species}uvvsdata (
species, frame, UTC, orbit, merc_year,
taa, rmerc, drdt, subslong, g, radiance,
sigma) values (
'{dpoint.species}',
'{dpoint.frame}',
'{dpoint.UTC.iso}',
{dpoint.orbit},
{dpoint.merc_year},
{dpoint.TAA},
{dpoint.rmerc},
{dpoint.drdt},
{dpoint.subslong},
{dpoint.g},
{dpoint.radiance},
{dpoint.sigma})''')
cur.execute(f'''INSERT into {species}pointing (
x, y, z, xbore, ybore, zbore,
obstype, obstype_num, xtan, ytan, ztan,
rtan, alttan, longtan, lattan,
loctimetan, slit) values (
{dpoint.x},
{dpoint.y},
{dpoint.z},
{dpoint.xbore},
{dpoint.ybore},
{dpoint.zbore},
'{dpoint.obstype}',
{dpoint.obstype_num},
{dpoint.xtan},
{dpoint.ytan},
{dpoint.ztan},
{dpoint.rtan},
{dpoint.alttan},
{dpoint.longtan},
{dpoint.lattan},
{dpoint.loctimetan},
'{dpoint.slit}')''')
print(f'Saving {species} Spectra')
for i, spec in spectra.iterrows():
cur.execute(f'''INSERT into {species}spectra (wavelength,
calibrated, raw, dark, solarfit) values (
%s, %s, %s, %s, %s)''',
(spec.wavelength.tolist(), spec.spectra.tolist(),
spec.raw.tolist(), spec.dark.tolist(),
spec.solarfit.tolist()))
| [
"pandas.DataFrame",
"solarsystemMB.planet_geometry",
"pandas.read_sql",
"numpy.sum",
"pickle.dump",
"os.path.basename",
"astropy.time.Time",
"astropy.units.def_unit",
"numpy.arange",
"numpy.linalg.norm",
"numpy.array",
"glob.glob",
"numpy.dot",
"numpy.ndarray",
"solarsystemMB.SSObject"
] | [((459, 514), 'astropy.time.Time', 'Time', (['"""2011-03-18T00:00:00"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2011-03-18T00:00:00', format='isot', scale='utc')\n", (463, 514), False, 'from astropy.time import Time\n'), ((526, 581), 'astropy.time.Time', 'Time', (['"""2015-04-30T23:59:59"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2015-04-30T23:59:59', format='isot', scale='utc')\n", (530, 581), False, 'from astropy.time import Time\n'), ((2750, 2769), 'solarsystemMB.SSObject', 'SSObject', (['"""Mercury"""'], {}), "('Mercury')\n", (2758, 2769), False, 'from solarsystemMB import SSObject, planet_geometry\n'), ((5844, 5879), 'glob.glob', 'glob.glob', (["(datapath + '/*_temp.pkl')"], {}), "(datapath + '/*_temp.pkl')\n", (5853, 5879), False, 'import glob\n'), ((623, 652), 'numpy.arange', 'np.arange', (['tstart.jd', 'tend.jd'], {}), '(tstart.jd, tend.jd)\n', (632, 652), True, 'import numpy as np\n'), ((6308, 6338), 'astropy.units.def_unit', 'u.def_unit', (['"""kR"""', '(1000.0 * u.R)'], {}), "('kR', 1000.0 * u.R)\n", (6318, 6338), True, 'from astropy import units as u\n'), ((6350, 6389), 'astropy.units.def_unit', 'u.def_unit', (['"""R_Mercury"""', 'mercury.radius'], {}), "('R_Mercury', mercury.radius)\n", (6360, 6389), True, 'from astropy import units as u\n'), ((6403, 6432), 'astropy.units.def_unit', 'u.def_unit', (['"""nm"""', '(1e-09 * u.m)'], {}), "('nm', 1e-09 * u.m)\n", (6413, 6432), True, 'from astropy import units as u\n'), ((6838, 6864), 'astropy.time.Time', 'Time', (['t_iso'], {'format': '"""yday"""'}), "(t_iso, format='yday')\n", (6842, 6864), False, 'from astropy.time import Time\n'), ((7378, 7399), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7388, 7399), True, 'import numpy as np\n'), ((7415, 7436), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7425, 7436), True, 'import numpy as np\n'), ((7453, 7474), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7463, 7474), True, 'import numpy as np\n'), ((7491, 7512), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7501, 7512), True, 'import numpy as np\n'), ((7529, 7550), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7539, 7550), True, 'import numpy as np\n'), ((7567, 7588), 'numpy.ndarray', 'np.ndarray', (['(npts, 3)'], {}), '((npts, 3))\n', (7577, 7588), True, 'import numpy as np\n'), ((7606, 7621), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (7615, 7621), True, 'import numpy as np\n'), ((9058, 9087), 'numpy.linalg.norm', 'np.linalg.norm', (['tanpt'], {'axis': '(1)'}), '(tanpt, axis=1)\n', (9072, 9087), True, 'import numpy as np\n'), ((9112, 9186), 'numpy.array', 'np.array', (["[('Surface' if s == 0 else 'Atmospheric') for s in data['slit']]"], {}), "([('Surface' if s == 0 else 'Atmospheric') for s in data['slit']])\n", (9120, 9186), True, 'import numpy as np\n'), ((9722, 10920), 'pandas.DataFrame', 'pd.DataFrame', (["{'species': species, 'frame': 'MSO', 'UTC': UTC, 'orbit': orbit,\n 'merc_year': myear, 'TAA': data['true_anomaly'] * np.pi / 180.0,\n 'rmerc': rmerc.value, 'drdt': data['rad_vel'], 'subslong': data[\n 'subsolar_longitude'] * np.pi / 180.0, 'g': data['gvals'] / u.s,\n 'radiance': radiance, 'sigma': sigma, 'x': xyz[:, 0] * Rmerc, 'y': xyz[\n :, 1] * Rmerc, 'z': xyz[:, 2] * Rmerc, 'xbore': bore[:, 0], 'ybore':\n bore[:, 1], 'zbore': bore[:, 2], 'xcorn1': xcorner[:, 0], 'xcorn2':\n xcorner[:, 1], 'xcorn3': xcorner[:, 2], 'xcorn4': xcorner[:, 3],\n 'ycorn1': ycorner[:, 0], 'ycorn2': ycorner[:, 1], 'ycorn3': ycorner[:, \n 2], 'ycorn4': ycorner[:, 3], 'zcorn1': zcorner[:, 0], 'zcorn2': zcorner\n [:, 1], 'zcorn3': zcorner[:, 2], 'zcorn4': zcorner[:, 3], 'obstype':\n obstype, 'obstype_num': data['obs_typ_num'], 'xtan': tanpt[:, 0],\n 'ytan': tanpt[:, 1], 'ztan': tanpt[:, 2], 'rtan': rtan, 'alttan': data[\n 'target_altitude_set'][:, 0], 'minalt': data['minalt'], 'longtan': data\n ['target_longitude_set'][:, 0] * np.pi / 180, 'lattan': data[\n 'target_latitude_set'][:, 0] * np.pi / 180, 'loctimetan': data[\n 'obs_solar_localtime'], 'slit': slit}"], {}), "({'species': species, 'frame': 'MSO', 'UTC': UTC, 'orbit':\n orbit, 'merc_year': myear, 'TAA': data['true_anomaly'] * np.pi / 180.0,\n 'rmerc': rmerc.value, 'drdt': data['rad_vel'], 'subslong': data[\n 'subsolar_longitude'] * np.pi / 180.0, 'g': data['gvals'] / u.s,\n 'radiance': radiance, 'sigma': sigma, 'x': xyz[:, 0] * Rmerc, 'y': xyz[\n :, 1] * Rmerc, 'z': xyz[:, 2] * Rmerc, 'xbore': bore[:, 0], 'ybore':\n bore[:, 1], 'zbore': bore[:, 2], 'xcorn1': xcorner[:, 0], 'xcorn2':\n xcorner[:, 1], 'xcorn3': xcorner[:, 2], 'xcorn4': xcorner[:, 3],\n 'ycorn1': ycorner[:, 0], 'ycorn2': ycorner[:, 1], 'ycorn3': ycorner[:, \n 2], 'ycorn4': ycorner[:, 3], 'zcorn1': zcorner[:, 0], 'zcorn2': zcorner\n [:, 1], 'zcorn3': zcorner[:, 2], 'zcorn4': zcorner[:, 3], 'obstype':\n obstype, 'obstype_num': data['obs_typ_num'], 'xtan': tanpt[:, 0],\n 'ytan': tanpt[:, 1], 'ztan': tanpt[:, 2], 'rtan': rtan, 'alttan': data[\n 'target_altitude_set'][:, 0], 'minalt': data['minalt'], 'longtan': data\n ['target_longitude_set'][:, 0] * np.pi / 180, 'lattan': data[\n 'target_latitude_set'][:, 0] * np.pi / 180, 'loctimetan': data[\n 'obs_solar_localtime'], 'slit': slit})\n", (9734, 10920), True, 'import pandas as pd\n'), ((11700, 11836), 'pandas.DataFrame', 'pd.DataFrame', (["{'spectra': spectra, 'wavelength': wavelength, 'raw': raw, 'corrected':\n corrected, 'dark': dark, 'solarfit': solarfit}"], {}), "({'spectra': spectra, 'wavelength': wavelength, 'raw': raw,\n 'corrected': corrected, 'dark': dark, 'solarfit': solarfit})\n", (11712, 11836), True, 'import pandas as pd\n'), ((670, 703), 'astropy.time.Time', 'Time', (['t'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(t, format='jd', scale='utc')\n", (674, 703), False, 'from astropy.time import Time\n'), ((833, 866), 'astropy.time.Time', 'Time', (['t'], {'format': '"""jd"""', 'scale': '"""utc"""'}), "(t, format='jd', scale='utc')\n", (837, 866), False, 'from astropy.time import Time\n'), ((885, 917), 'solarsystemMB.planet_geometry', 'planet_geometry', (['time', '"""Mercury"""'], {}), "(time, 'Mercury')\n", (900, 917), False, 'from solarsystemMB import SSObject, planet_geometry\n'), ((1923, 1968), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT * from MESmercyear"""', 'con'], {}), "('SELECT * from MESmercyear', con)\n", (1934, 1968), True, 'import pandas as pd\n'), ((7836, 7932), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['boresight_unit_vector_center_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data[\n 'boresight_unit_vector_center_tg'][i, :])\n", (7842, 7932), True, 'import numpy as np\n'), ((7986, 8078), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['boresight_unit_vector_c1_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data[\n 'boresight_unit_vector_c1_tg'][i, :])\n", (7992, 8078), True, 'import numpy as np\n'), ((8133, 8225), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['boresight_unit_vector_c2_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data[\n 'boresight_unit_vector_c2_tg'][i, :])\n", (8139, 8225), True, 'import numpy as np\n'), ((8280, 8372), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['boresight_unit_vector_c3_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data[\n 'boresight_unit_vector_c3_tg'][i, :])\n", (8286, 8372), True, 'import numpy as np\n'), ((8427, 8519), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['boresight_unit_vector_c4_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data[\n 'boresight_unit_vector_c4_tg'][i, :])\n", (8433, 8519), True, 'import numpy as np\n'), ((8976, 9002), 'numpy.sum', 'np.sum', (['(xyz * bore)'], {'axis': '(1)'}), '(xyz * bore, axis=1)\n', (8982, 9002), True, 'import numpy as np\n'), ((12001, 12047), 'pickle.dump', 'pickle.dump', (['ndata', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(ndata, f, pickle.HIGHEST_PROTOCOL)\n', (12012, 12047), False, 'import pickle\n'), ((12131, 12179), 'pickle.dump', 'pickle.dump', (['spectra', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(spectra, f, pickle.HIGHEST_PROTOCOL)\n', (12142, 12179), False, 'import pickle\n'), ((7647, 7726), 'numpy.dot', 'np.dot', (["data['mso_rotation_matrix'][i, :, :]", "data['planet_sc_vector_tg'][i, :]"], {}), "(data['mso_rotation_matrix'][i, :, :], data['planet_sc_vector_tg'][i, :])\n", (7653, 7726), True, 'import numpy as np\n'), ((8575, 8637), 'numpy.array', 'np.array', (['[corn0[:, 0], corn1[:, 0], corn2[:, 0], corn3[:, 0]]'], {}), '([corn0[:, 0], corn1[:, 0], corn2[:, 0], corn3[:, 0]])\n', (8583, 8637), True, 'import numpy as np\n'), ((8696, 8758), 'numpy.array', 'np.array', (['[corn0[:, 1], corn1[:, 1], corn2[:, 1], corn3[:, 1]]'], {}), '([corn0[:, 1], corn1[:, 1], corn2[:, 1], corn3[:, 1]])\n', (8704, 8758), True, 'import numpy as np\n'), ((8817, 8879), 'numpy.array', 'np.array', (['[corn0[:, 2], corn1[:, 2], corn2[:, 2], corn3[:, 2]]'], {}), '([corn0[:, 2], corn1[:, 2], corn2[:, 2], corn3[:, 2]])\n', (8825, 8879), True, 'import numpy as np\n'), ((6493, 6518), 'os.path.basename', 'os.path.basename', (['oldfile'], {}), '(oldfile)\n', (6509, 6518), False, 'import os\n'), ((7078, 7127), 'numpy.sum', 'np.sum', (["(data['planet_sun_vector_tg'] ** 2)"], {'axis': '(1)'}), "(data['planet_sun_vector_tg'] ** 2, axis=1)\n", (7084, 7127), True, 'import numpy as np\n')] |
import librosa
import numpy as np
from pathlib import Path
import json
import os.path
import sys
import argparse
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir))
sys.path.append(ROOT_DIR)
from audio_feature_utils import *
from utils import distribute_tasks
parser = argparse.ArgumentParser(description="Preprocess audio data")
parser.add_argument("data_path", type=str, help="Directory contining Beat Saber level folders")
parser.add_argument("--feature_names", metavar='', type=str, default="mel", help="mel, chroma, multi_mel, spectralflux, madmombeats. Comma separated")
parser.add_argument("--combined_feature_name", metavar='', type=str, default=None, help="name for the combined features, if several")
parser.add_argument("--audio_format", type=str, default="mp3")
parser.add_argument("--mel_feature_size", metavar='', type=int, default=None)
# parser.add_argument("--step_size", metavar='', type=float, default=0.01666666666)
parser.add_argument("--fps", metavar='', type=float, default=60)
parser.add_argument("--sampling_rate", metavar='', type=float, default=96000)
parser.add_argument("--replace_existing", action="store_true")
parser.add_argument("--notranspose", action="store_true")
args = parser.parse_args()
# makes arugments into global variables of the same name, used later in the code
globals().update(vars(args))
step_size=1.0/fps
data_path = Path(data_path)
feature_names = feature_names.split(",")
if len(feature_names) > 1 and combined_feature_name is None:
combined_feature_name = "_".join(feature_names)
elif len(feature_names) == 1:
combined_feature_name = feature_names[0]
## distributing tasks accross nodes ##
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(rank)
print("creating {} of size {}".format(",".join(feature_names), mel_feature_size))
#assuming mp3 for now.
candidate_audio_files = sorted(data_path.glob('**/*.'+audio_format), key=lambda path: path.parent.__str__())
tasks = distribute_tasks(candidate_audio_files,rank,size)
for i in tasks:
path = candidate_audio_files[i]
song_file_path = path.__str__()
# feature files are going to be saved as numpy files
if feature_names == ["mel"] or feature_names == ["multi_mel"]:
features_file = song_file_path+"_"+combined_feature_name+"_"+str(mel_feature_size)+".npy"
else:
features_file = song_file_path+"_"+combined_feature_name+".npy"
if replace_existing or not os.path.isfile(features_file):
print("creating feature file",i)
featuress = []
for feature_name in feature_names:
# get song
y_wav, sr = librosa.load(song_file_path, sr=sampling_rate)
sr = sampling_rate
hop = int(round(sr * step_size))
# hop = int(sr * step_size)
#get feature
if feature_name == "chroma":
features = extract_features_hybrid(y_wav,sr,hop).transpose(1,0)[1:]
elif feature_name == "mel":
features = extract_features_mel(y_wav,sr,hop,mel_dim=mel_feature_size).transpose(1,0)[1:]
elif feature_name == "envelope":
features = extract_features_envelope(y_wav,sr,hop)[1:]
elif feature_name == "multi_mel":
features = extract_features_multi_mel(y_wav, sr=sampling_rate, hop=hop, nffts=[1024,2048,4096], mel_dim=mel_feature_size)
elif feature_name == "spectralflux": #actually this is the same as envelope I think
features = extract_features_spectral_flux(song_file_path,fps)
elif feature_name == "madmombeats":
features = extract_features_madmombeat(song_file_path,fps)
featuress.append(features)
shortest_length = 99999999999
for feat in featuress:
if feat.shape[0] < shortest_length:
shortest_length = feat.shape[0]
for i in range(len(featuress)):
featuress[i] = featuress[i][:shortest_length]
featuress = np.concatenate(featuress,1)
np.save(features_file,featuress)
| [
"sys.path.append",
"numpy.save",
"utils.distribute_tasks",
"argparse.ArgumentParser",
"pathlib.Path",
"librosa.load",
"numpy.concatenate"
] | [((230, 255), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (245, 255), False, 'import sys\n'), ((335, 395), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess audio data"""'}), "(description='Preprocess audio data')\n", (358, 395), False, 'import argparse\n'), ((1436, 1451), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (1440, 1451), False, 'from pathlib import Path\n'), ((2048, 2099), 'utils.distribute_tasks', 'distribute_tasks', (['candidate_audio_files', 'rank', 'size'], {}), '(candidate_audio_files, rank, size)\n', (2064, 2099), False, 'from utils import distribute_tasks\n'), ((4091, 4119), 'numpy.concatenate', 'np.concatenate', (['featuress', '(1)'], {}), '(featuress, 1)\n', (4105, 4119), True, 'import numpy as np\n'), ((4128, 4161), 'numpy.save', 'np.save', (['features_file', 'featuress'], {}), '(features_file, featuress)\n', (4135, 4161), True, 'import numpy as np\n'), ((2708, 2754), 'librosa.load', 'librosa.load', (['song_file_path'], {'sr': 'sampling_rate'}), '(song_file_path, sr=sampling_rate)\n', (2720, 2754), False, 'import librosa\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Testing the orientation models."""
import numpy as np
import pytest
from fiberoripy.orientation import (
ard_rsc_ode,
folgar_tucker_ode,
iard_ode,
iardrpr_ode,
jeffery_ode,
maier_saupe_ode,
mrd_ode,
pard_ode,
pardrpr_ode,
rsc_ode,
)
@pytest.mark.parametrize(
"model",
[
ard_rsc_ode,
rsc_ode,
folgar_tucker_ode,
maier_saupe_ode,
iard_ode,
mrd_ode,
iardrpr_ode,
pard_ode,
pardrpr_ode,
],
)
def test_default_case(model):
"""The default argument set of all functions should yield to Jeffery's solution."""
from scipy.integrate import odeint
t = np.linspace(0, 1, 100)
a0 = 1.0 / 3.0 * np.eye(3)
def L(t):
return np.array([[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
a_ref = odeint(jeffery_ode, a0.ravel(), t, args=(1.0, L))
a_test = odeint(ard_rsc_ode, a0.ravel(), t, args=(1.0, L))
assert np.allclose(a_ref, a_test, atol=1e-12)
| [
"numpy.eye",
"numpy.allclose",
"numpy.array",
"numpy.linspace",
"pytest.mark.parametrize"
] | [((327, 478), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model"""', '[ard_rsc_ode, rsc_ode, folgar_tucker_ode, maier_saupe_ode, iard_ode,\n mrd_ode, iardrpr_ode, pard_ode, pardrpr_ode]'], {}), "('model', [ard_rsc_ode, rsc_ode, folgar_tucker_ode,\n maier_saupe_ode, iard_ode, mrd_ode, iardrpr_ode, pard_ode, pardrpr_ode])\n", (350, 478), False, 'import pytest\n'), ((731, 753), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (742, 753), True, 'import numpy as np\n'), ((1015, 1053), 'numpy.allclose', 'np.allclose', (['a_ref', 'a_test'], {'atol': '(1e-12)'}), '(a_ref, a_test, atol=1e-12)\n', (1026, 1053), True, 'import numpy as np\n'), ((776, 785), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (782, 785), True, 'import numpy as np\n'), ((816, 877), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n', (824, 877), True, 'import numpy as np\n')] |
'''Class and utilities for one level of an HSA agent.'''
# python
import os
import pickle
from copy import copy
from time import time
# scipy
from numpy.linalg import inv, norm
from numpy.random import choice, permutation, rand, randint, shuffle
from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, \
logical_not,isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, \
sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros
# drawing
from matplotlib import pyplot
from skimage.draw import ellipse_perimeter, line
# tensorflow
import tensorflow
from tensorflow import keras
# openrave
import openravepy
# self
# AGENT ============================================================================================
class RlAgentLevel():
def __init__(self, level, isGrasp, isOrient, params):
'''TODO'''
# parameters
self.level = level
self.isGrasp = isGrasp
self.isOrient = isOrient
self.params = params
self.tMax = params["tMax"]
self.trainEvery = params["trainEvery"]
self.maxExperiences = params["maxExperiences"]
self.epsilonMin = params["epsilonMin"]
self.gamma = params["gamma"]
self.includeTime = params["includeTimeGrasp"] if isGrasp else params["includeTimePlace"]
self.nGraspOrientations = params["nGraspOrientations"]
self.nPlaceOrientations = params["nPlaceOrientations"]
self.nEpochs = params["nEpochs"]
self.batchSize = params["batchSize"]
self.imP = params["imP"]
self.plotImages = params["plotImages"]
# at least half the time steps are grasps
self.minExperiencesToTrain = self.trainEvery * (self.tMax / 2)
# initialize
self.experiences = []
self.Q = self.GenerateNetworkModel(isGrasp)
shape = []
for i in xrange(1, len(self.Q[1].outputs[0].shape)):
shape.append(int(self.Q[1].outputs[0].shape[i]))
self.outputShape = tuple(shape)
self.nActionSamples = prod(self.outputShape)
typeString = "Grasp" if isGrasp else "Place"
print("{} level {} has {} outputs.".format(typeString, self.level, self.outputShape))
def AddExperience(self, experience):
'''Adds an experience to the experience database.
- Input experience: A tuple of (s, a, r, ...). The contents of ... depends on the update rule
(e.g. Monte Carlo, Q-learning, or Sarsa).
- Returns None.
'''
self.experiences.append(experience)
def EvaluateActions(self, image):
'''Run forward propagation and get approximated action-values.
- Input handImage: Image representing hand contents.
- Input targImage: Image representing action and current sensed volume.
- Input flags: State flags, (inHandBit, episodeTime).
- Input actions: List of candidate actions in the agent's encoding.
- Return values: Values, one for each action, approximating the q-values.
'''
dummyIn = zeros((1, 1, 2), dtype='int32') if self.isOrient else zeros((1, 1, 4), dtype='int32')
values = self.Q[1].predict([array([image]), dummyIn])
return squeeze(values)
def EvaluateActionsMultiple(self, images):
'''TODO'''
batchSize = len(images)
dummyIn = zeros((batchSize, 1, 2), dtype='int32') if self.isOrient else \
zeros((batchSize, 1, 4), dtype='int32')
values = self.Q.predict([array(images), dummyIn])
return squeeze(values)
def GenerateNetworkModel(self, graspNetwork):
'''Generates tensorflow model for the deep network used to approximate the Q-function at this
level. Must be called during initialization if a model is not loaded from file.
- Input params: Dictionary of hyperparameters.
- Returns None.
'''
params = self.params
weightDecay = params["weightDecay"]
optimizer = params["optimizer"].lower()
baseLearningRate = params["baseLearningRate"]
# architecture
in1Shape = (self.imP, self.imP, 1 + (not graspNetwork) + self.includeTime)
in2Shape = (1, 2) if self.isOrient else (1, 4)
nDenseOutputs = self.nGraspOrientations if graspNetwork else self.nPlaceOrientations
in1 = keras.Input(shape=in1Shape, dtype=tensorflow.float32)
in2 = keras.Input(shape=in2Shape, dtype=tensorflow.int32)
h1 = keras.layers.Conv2D(params["conv1Outputs"], kernel_size=params["conv1KernelSize"], \
strides=params["conv1Stride"], padding="same", activation="relu", \
kernel_regularizer=keras.regularizers.l2(weightDecay))(in1)
h1 = keras.layers.Conv2D(params["conv2Outputs"], kernel_size=params["conv2KernelSize"], \
strides=params["conv2Stride"], padding="same", activation="relu", \
kernel_regularizer=keras.regularizers.l2(weightDecay))(h1)
h1 = keras.layers.Conv2D(params["conv3Outputs"], kernel_size=params["conv3KernelSize"], \
strides=params["conv3Stride"], padding="same", activation="relu", \
kernel_regularizer=keras.regularizers.l2(weightDecay))(h1)
if self.isOrient:
h1 = keras.layers.Flatten()(h1)
h1 = keras.layers.Dense(nDenseOutputs, \
kernel_regularizer=keras.regularizers.l2(weightDecay))(h1)
else:
h1 = keras.layers.Conv2D(params["conv4Outputs"], kernel_size=params["conv4KernelSize"], \
strides=params["conv4Stride"], padding="same", \
kernel_regularizer=keras.regularizers.l2(weightDecay))(h1)
h2 = keras.layers.Lambda(lambda inputs: tensorflow.gather_nd(inputs[0], inputs[1]))([h1, in2])
Qtrain = keras.Model(inputs=[in1, in2], outputs=h2)
Qtest = keras.Model(inputs=[in1, in2], outputs=h1)
# optimization
if optimizer == "adam":
optimizer = keras.optimizers.Adam(baseLearningRate)
elif optimizer == "rmsprop":
optimizer = keras.optimizers.RMSprop(baseLearningRate)
elif optimizer == "sgd":
optimizer = keras.optimizers.SGD(baseLearningRate)
else:
raise Exception("Unsupported optimizer {}.".format(optimizer))
Qtrain.compile(optimizer=optimizer, loss="MSE")
#typeString = "grasp" if graspNetwork else "place"
#print("Summary of {} Q-function for level {}:".format(typeString, self.level))
#Qtrain.summary()
return Qtrain, Qtest
def GetNumberOfExperiences(self):
'''Returns the number of entries in the experience replay database currently in memory at this level.'''
return len(self.experiences)
def LabelDataMonteCarlo(self):
'''Given a database of (s, g), reorganize into network model inputs and training labels.
- Returns in1: List of first inputs into network (an image).
- Returns labels: List of training labels, one for each set of inputs.
'''
indexShape = (1, 2) if self.isOrient else (1, 4)
inputs1 = []; inputs2 = []; labels = []
for d in self.experiences:
inputs1.append(d[0]) # image
index = zeros(indexShape, dtype='int32')
index[0, 1:] = d[1]
inputs2.append(index) # index
labels.append(d[2]) # return
return array(inputs1), array(inputs2), array(labels)
def LoadExperienceDatabase(self):
'''Loads the experience database to file.'''
path = os.getcwd() + "/tensorflow/experiences/experiences_level_" + str(self.level) + ".pickle"
self.experiences = pickle.load(open(path, "rb"))
print("Loaded database " + path + ".")
def LoadQFunction(self):
'''Loads the network model and weights from the specified file name.'''
directory = os.getcwd() + "/tensorflow/models"
act = "grasp" if self.isGrasp else "place"
path = directory + "/q_level_" + str(self.level) + "_" + act + ".h5"
self.Q[0].load_weights(path)
print("Loaded Q-function " + path + ".")
def PlotImages(self, o, a, desc):
'''Produces plots of the robot's observation and selected action.
- Input o: Image where 1st channel is the target sensed volume and the 2nd channel is the hand
contents.
- Input a: Index into the Q-function output which corresponds to the selected action.
- Input desc: Descriptor corresponding to the current action in the base frame.
- Returns None.
'''
# setup
It = o[:, :, 0]
Ih = zeros(It.shape) if self.isGrasp else o[:, :, 1]
Ir = copy(It); Ig = copy(It); Ib = copy(It)
if self.isOrient:
# determine rotation angle
R = desc.T[0:3, 0:3]
axisAngle = openravepy.axisAngleFromRotationMatrix(R)
angle = norm(axisAngle)
axis = axisAngle / angle if angle > 0 else array([0.0, 0.0, 1.0])
angle *= sign(sum(axis))
# draw axis indicator
c = self.imP / 2
majorRadius = self.imP / 8
minorRadius = majorRadius if self.isGrasp else majorRadius / 2
xx, yy = ellipse_perimeter(c, c, minorRadius, majorRadius, orientation=0)
Ir[xx, yy] = 1.0
# draw angle indicator
length = self.imP / 5
x = -int(length * sin(angle))
y = int(length * cos(angle))
xx, yy = line(c, c, c + x, c + y)
Ir[xx, yy] = 1.0
xx, yy = line(c, c, c, c + length)
Ir[xx, yy] = 1.0
else:
# draw the selection area
halfWidth = (It.shape[0] * (self.selW / self.imW)) / 2.0
middle = It.shape[0] / 2.0
start = int(round(middle - halfWidth))
end = int(round(middle + halfWidth))
pixels = arange(start, end + 1)
if start >= 0 and end < It.shape[0]:
Ib[start, pixels] = 1.0
Ib[end, pixels] = 1.0
Ib[pixels, start] = 1.0
Ib[pixels, end] = 1.0
# draw robot's selection
xh = self.actionsInHandFrame[a]
xi = round(((xh[0:2] * self.imP) / self.imW) + ((self.imP - 1.0) / 2.0)).astype('int32')
value = (xh[2] + (self.imD / 2.0)) / self.imD
if xi[0] >= 0 and xi[1] < self.imP and xi[1] >= 0 and xi[1] < self.imP:
Ir[xi[0], xi[1]] = value
Ig[xi[0], xi[1]] = 0
Ib[xi[0], xi[1]] = 0
# show image
fig = pyplot.figure()
Irgb = stack((Ir, Ig, Ib), 2)
pyplot.subplot(1, 2, 1)
pyplot.imshow(Irgb, vmin=0.00, vmax=1.00, interpolation="none")
pyplot.subplot(1, 2, 2)
pyplot.imshow(Ih, vmin=0.00, vmax=1.00, interpolation="none", cmap="gray")
fig.suptitle("(Left.) Sensed volume. (Right.) Hand contents.")
for i in xrange(2):
fig.axes[i].set_xticks([])
fig.axes[i].set_yticks([])
pyplot.show(block=True)
def PruneDatabase(self):
'''Removes oldest items in the database until the size is no more than maxEntries.'''
if len(self.experiences) > self.maxExperiences:
self.experiences = self.experiences[len(self.experiences) - self.maxExperiences:]
def SaveExperienceDatabase(self):
'''Saves the experience database to file.'''
directory = os.getcwd() + "/tensorflow/experiences"
if not os.path.isdir(directory):
os.makedirs(directory)
act = "grasp" if self.isGrasp else "place"
path = directory + "/experiences_level_" + str(self.level) + "_" + act + ".pickle"
pickle.dump(self.experiences, open(path, "wb"))
print("Saved database " + path + ".")
def SaveQFunction(self):
'''Saves the network model and weights to file.'''
directory = os.getcwd() + "/tensorflow/models"
if not os.path.isdir(directory):
os.makedirs(directory)
act = "grasp" if self.isGrasp else "place"
path = directory + "/q_level_" + str(self.level) + "_" + act + ".h5"
self.Q[0].save_weights(path)
print("Saved Q-function " + path + ".")
def SelectIndexEpsilonGreedy(self, image, unbias):
'''Selects the next action according to an epsilon-greedy policy.
- Input handImage: Depth image representing hand contents.
- Input targImage: Depth image representing next hand pose.
- Input actions: Action choices, nxm matrix with n actions.
- Input flags: State flags, (inHand, time).
- Input epsilon: Number between 0 and 1 indicating the probability of taking a random action.
- Returns bestIdx: Index into descriptors indicating the action the policy decided on.
- Returns bestValue: The value estimate of the state-action. Is NaN if the action is selected
randomly.
'''
epsilon = 0.0 if unbias else maximum(self.epsilonMin, \
1.0 - float(len(self.experiences)) / self.maxExperiences)
if rand() < epsilon:
bestIdx = randint(self.nActionSamples)
bestIdx = unravel_index(bestIdx, self.outputShape)
bestValue = float('NaN')
else:
values = self.EvaluateActions(image)
bestIdx = unravel_index(argmax(values), self.outputShape)
bestValue = values[bestIdx]
#self.PlotValues(actions, values)
if len(bestIdx) == 1: bestIdx = bestIdx[0]
return bestIdx, bestValue, epsilon
def UpdateQFunction(self, inputs1, inputs2, labels):
'''Trains the neural network model on the input training data and labels.
- Input inputs:
- Input labels: Ground truth for the network output.
- Returns: Average loss averaged over each epoch.
'''
# print information
actString = "grasp" if self.isGrasp else "place"
print("Level={}-{}".format(self.level, actString))
# decide whether or not there are enough new experiences to train
if labels.shape[0] < self.minExperiencesToTrain: return 0.0
# shuffle data
idxs = arange(labels.shape[0])
shuffle(idxs)
inputs1 = inputs1[idxs]
inputs2 = inputs2[idxs]
labels = labels[idxs]
# add batch index
for i in xrange(inputs2.shape[0]):
inputs2[i, 0, 0] = i % self.batchSize
# fit
history = self.Q[0].fit([inputs1, inputs2], labels, epochs = self.nEpochs, batch_size = \
self.batchSize, shuffle = False)
# compute average loss
return mean(history.history["loss"])
def UpdateQFunctionMonteCarlo(self):
'''Trains the Q-function on the current replay database using the Monte Carlo update rule.
- Returns: Average loss.
'''
self.PruneDatabase()
inputs1, inputs2, labels = self.LabelDataMonteCarlo()
return self.UpdateQFunction(inputs1, inputs2, labels) | [
"numpy.sum",
"numpy.argmax",
"tensorflow.gather_nd",
"tensorflow.keras.optimizers.SGD",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.randint",
"numpy.sin",
"numpy.round",
"tensorflow.keras.optimizers.RMSprop",
"numpy.prod",
"tensorflow.keras.... | [((1990, 2012), 'numpy.prod', 'prod', (['self.outputShape'], {}), '(self.outputShape)\n', (1994, 2012), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3098, 3113), 'numpy.squeeze', 'squeeze', (['values'], {}), '(values)\n', (3105, 3113), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3401, 3416), 'numpy.squeeze', 'squeeze', (['values'], {}), '(values)\n', (3408, 3416), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((4152, 4205), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'in1Shape', 'dtype': 'tensorflow.float32'}), '(shape=in1Shape, dtype=tensorflow.float32)\n', (4163, 4205), False, 'from tensorflow import keras\n'), ((4216, 4267), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'in2Shape', 'dtype': 'tensorflow.int32'}), '(shape=in2Shape, dtype=tensorflow.int32)\n', (4227, 4267), False, 'from tensorflow import keras\n'), ((5485, 5527), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[in1, in2]', 'outputs': 'h2'}), '(inputs=[in1, in2], outputs=h2)\n', (5496, 5527), False, 'from tensorflow import keras\n'), ((5540, 5582), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[in1, in2]', 'outputs': 'h1'}), '(inputs=[in1, in2], outputs=h1)\n', (5551, 5582), False, 'from tensorflow import keras\n'), ((8205, 8213), 'copy.copy', 'copy', (['It'], {}), '(It)\n', (8209, 8213), False, 'from copy import copy\n'), ((8220, 8228), 'copy.copy', 'copy', (['It'], {}), '(It)\n', (8224, 8228), False, 'from copy import copy\n'), ((8235, 8243), 'copy.copy', 'copy', (['It'], {}), '(It)\n', (8239, 8243), False, 'from copy import copy\n'), ((9909, 9924), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (9922, 9924), False, 'from matplotlib import pyplot\n'), ((9936, 9958), 'numpy.stack', 'stack', (['(Ir, Ig, Ib)', '(2)'], {}), '((Ir, Ig, Ib), 2)\n', (9941, 9958), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((9963, 9986), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (9977, 9986), False, 'from matplotlib import pyplot\n'), ((9991, 10052), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['Irgb'], {'vmin': '(0.0)', 'vmax': '(1.0)', 'interpolation': '"""none"""'}), "(Irgb, vmin=0.0, vmax=1.0, interpolation='none')\n", (10004, 10052), False, 'from matplotlib import pyplot\n'), ((10059, 10082), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (10073, 10082), False, 'from matplotlib import pyplot\n'), ((10087, 10159), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['Ih'], {'vmin': '(0.0)', 'vmax': '(1.0)', 'interpolation': '"""none"""', 'cmap': '"""gray"""'}), "(Ih, vmin=0.0, vmax=1.0, interpolation='none', cmap='gray')\n", (10100, 10159), False, 'from matplotlib import pyplot\n'), ((10323, 10346), 'matplotlib.pyplot.show', 'pyplot.show', ([], {'block': '(True)'}), '(block=True)\n', (10334, 10346), False, 'from matplotlib import pyplot\n'), ((13284, 13307), 'numpy.arange', 'arange', (['labels.shape[0]'], {}), '(labels.shape[0])\n', (13290, 13307), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((13312, 13325), 'numpy.random.shuffle', 'shuffle', (['idxs'], {}), '(idxs)\n', (13319, 13325), False, 'from numpy.random import choice, permutation, rand, randint, shuffle\n'), ((13698, 13727), 'numpy.mean', 'mean', (["history.history['loss']"], {}), "(history.history['loss'])\n", (13702, 13727), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((2943, 2974), 'numpy.zeros', 'zeros', (['(1, 1, 2)'], {'dtype': '"""int32"""'}), "((1, 1, 2), dtype='int32')\n", (2948, 2974), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((2997, 3028), 'numpy.zeros', 'zeros', (['(1, 1, 4)'], {'dtype': '"""int32"""'}), "((1, 1, 4), dtype='int32')\n", (3002, 3028), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3226, 3265), 'numpy.zeros', 'zeros', (['(batchSize, 1, 2)'], {'dtype': '"""int32"""'}), "((batchSize, 1, 2), dtype='int32')\n", (3231, 3265), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3296, 3335), 'numpy.zeros', 'zeros', (['(batchSize, 1, 4)'], {'dtype': '"""int32"""'}), "((batchSize, 1, 4), dtype='int32')\n", (3301, 3335), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((5650, 5689), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', (['baseLearningRate'], {}), '(baseLearningRate)\n', (5671, 5689), False, 'from tensorflow import keras\n'), ((6848, 6880), 'numpy.zeros', 'zeros', (['indexShape'], {'dtype': '"""int32"""'}), "(indexShape, dtype='int32')\n", (6853, 6880), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((6990, 7004), 'numpy.array', 'array', (['inputs1'], {}), '(inputs1)\n', (6995, 7004), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((7006, 7020), 'numpy.array', 'array', (['inputs2'], {}), '(inputs2)\n', (7011, 7020), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((7022, 7035), 'numpy.array', 'array', (['labels'], {}), '(labels)\n', (7027, 7035), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((7444, 7455), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7453, 7455), False, 'import os\n'), ((8148, 8163), 'numpy.zeros', 'zeros', (['It.shape'], {}), '(It.shape)\n', (8153, 8163), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((8356, 8397), 'openravepy.axisAngleFromRotationMatrix', 'openravepy.axisAngleFromRotationMatrix', (['R'], {}), '(R)\n', (8394, 8397), False, 'import openravepy\n'), ((8412, 8427), 'numpy.linalg.norm', 'norm', (['axisAngle'], {}), '(axisAngle)\n', (8416, 8427), False, 'from numpy.linalg import inv, norm\n'), ((8706, 8770), 'skimage.draw.ellipse_perimeter', 'ellipse_perimeter', (['c', 'c', 'minorRadius', 'majorRadius'], {'orientation': '(0)'}), '(c, c, minorRadius, majorRadius, orientation=0)\n', (8723, 8770), False, 'from skimage.draw import ellipse_perimeter, line\n'), ((8944, 8968), 'skimage.draw.line', 'line', (['c', 'c', '(c + x)', '(c + y)'], {}), '(c, c, c + x, c + y)\n', (8948, 8968), False, 'from skimage.draw import ellipse_perimeter, line\n'), ((9007, 9032), 'skimage.draw.line', 'line', (['c', 'c', 'c', '(c + length)'], {}), '(c, c, c, c + length)\n', (9011, 9032), False, 'from skimage.draw import ellipse_perimeter, line\n'), ((9303, 9325), 'numpy.arange', 'arange', (['start', '(end + 1)'], {}), '(start, end + 1)\n', (9309, 9325), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((10713, 10724), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10722, 10724), False, 'import os\n'), ((10764, 10788), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (10777, 10788), False, 'import os\n'), ((10796, 10818), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (10807, 10818), False, 'import os\n'), ((11157, 11168), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11166, 11168), False, 'import os\n'), ((11203, 11227), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (11216, 11227), False, 'import os\n'), ((11235, 11257), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (11246, 11257), False, 'import os\n'), ((12277, 12283), 'numpy.random.rand', 'rand', ([], {}), '()\n', (12281, 12283), False, 'from numpy.random import choice, permutation, rand, randint, shuffle\n'), ((12311, 12339), 'numpy.random.randint', 'randint', (['self.nActionSamples'], {}), '(self.nActionSamples)\n', (12318, 12339), False, 'from numpy.random import choice, permutation, rand, randint, shuffle\n'), ((12356, 12396), 'numpy.unravel_index', 'unravel_index', (['bestIdx', 'self.outputShape'], {}), '(bestIdx, self.outputShape)\n', (12369, 12396), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3061, 3075), 'numpy.array', 'array', (['[image]'], {}), '([image])\n', (3066, 3075), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((3365, 3378), 'numpy.array', 'array', (['images'], {}), '(images)\n', (3370, 3378), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((5002, 5024), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (5022, 5024), False, 'from tensorflow import keras\n'), ((5741, 5783), 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', (['baseLearningRate'], {}), '(baseLearningRate)\n', (5765, 5783), False, 'from tensorflow import keras\n'), ((8477, 8499), 'numpy.array', 'array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (8482, 8499), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((8520, 8529), 'numpy.sum', 'sum', (['axis'], {}), '(axis)\n', (8523, 8529), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((9218, 9243), 'numpy.round', 'round', (['(middle - halfWidth)'], {}), '(middle - halfWidth)\n', (9223, 9243), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((9261, 9286), 'numpy.round', 'round', (['(middle + halfWidth)'], {}), '(middle + halfWidth)\n', (9266, 9286), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((12511, 12525), 'numpy.argmax', 'argmax', (['values'], {}), '(values)\n', (12517, 12525), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((4461, 4495), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['weightDecay'], {}), '(weightDecay)\n', (4482, 4495), False, 'from tensorflow import keras\n'), ((4696, 4730), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['weightDecay'], {}), '(weightDecay)\n', (4717, 4730), False, 'from tensorflow import keras\n'), ((4929, 4963), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['weightDecay'], {}), '(weightDecay)\n', (4950, 4963), False, 'from tensorflow import keras\n'), ((5417, 5459), 'tensorflow.gather_nd', 'tensorflow.gather_nd', (['inputs[0]', 'inputs[1]'], {}), '(inputs[0], inputs[1])\n', (5437, 5459), False, 'import tensorflow\n'), ((5831, 5869), 'tensorflow.keras.optimizers.SGD', 'keras.optimizers.SGD', (['baseLearningRate'], {}), '(baseLearningRate)\n', (5851, 5869), False, 'from tensorflow import keras\n'), ((7134, 7145), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7143, 7145), False, 'import os\n'), ((8917, 8927), 'numpy.cos', 'cos', (['angle'], {}), '(angle)\n', (8920, 8927), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((9576, 9637), 'numpy.round', 'round', (['(xh[0:2] * self.imP / self.imW + (self.imP - 1.0) / 2.0)'], {}), '(xh[0:2] * self.imP / self.imW + (self.imP - 1.0) / 2.0)\n', (9581, 9637), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n'), ((5103, 5137), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['weightDecay'], {}), '(weightDecay)\n', (5124, 5137), False, 'from tensorflow import keras\n'), ((5333, 5367), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['weightDecay'], {}), '(weightDecay)\n', (5354, 5367), False, 'from tensorflow import keras\n'), ((8882, 8892), 'numpy.sin', 'sin', (['angle'], {}), '(angle)\n', (8885, 8892), False, 'from numpy import argmax, arange, array, cos, concatenate, dot, empty, exp, eye, flipud, floor, logical_not, isinf, max, maximum, mean, min, minimum, ones, pi, prod, repeat, reshape, round, sign, sin, squeeze, sqrt, stack, sum, unravel_index, where, zeros\n')] |
from __future__ import division
import numpy as np
'''-----------------------------------------------------------------------------
Perceptron Class -- Single Layer Neuron
-----------------------------------------------------------------------------'''
class Perceptron():
###############################################################################
# This class represents a single neural network neuron (building block) #
# The perceptron is fed a series of inputs, each forms a connection to the #
# main processing unit and after some calculations the neuron returns a #
# value (the output that output can be compared with its true value #
# allowing us to tweak the weights in order to produce correct results: #
# gradient descent #
# #
# X0------->| #
# |----->[Neuron]----->Output(y) #
# X1------->| #
# #
# We need to have a weight(Wi) for every connection of inputs (Xi-->), to #
# find the weights we want to find those that are optimal, that provide #
# the best results with the least amount of error, in the begining we #
# will have to randomly weigh all the inputs, the larger the weight the #
# more influential the corresponding input. #
# The Perceptron then for all its inputs sums the product between the input #
# and its weight: sum = [E(I=0 to inputs) Xi*Wi] + bias (Step 1) #
# After the Sum step we will apply an Activation Function that conducts the #
# output towards a range, we will consider the function to be: #
# f(data) = 1/(1+e^(-data)) #
# #
# After the Perceptron thinks, it can check if the output is as expected #
# calculating an error, since we can't change the input data we can only #´
# change the weights in order to approach a better result, adjusting the #
# weights is key in the perceptron process, as such the new weight will be: #
# Wi = Wi +(Y-output)*Xi #
# The algorithm can be: #
# 1) For every input, multiply that input by its weight #
# 2) Sum all of the weighted inputs #
# 3) Compute the output of the perceptron based on that sum passed through#
# an activation function (sign of the sum) #
# 4) Calculate the error from the output and tweak the weights and bias #
# #
# This class isn't used in the project, just serves as a guidance to #
# understand how a neuron in the network works #
###############################################################################
f = lambda self,x: 0 if x<=0 else 1 #Activation function
def __init__(self,inputs,target):
self.inputs = inputs
self.target = target
self.weights = [np.random.uniform(-1,1)for _ in inputs]
self.bias = np.random.uniform(-1,1)
def train(self):
inputs = self.inputs
weights = self.weights
target = self.target
#Process the inputs-activation
output = self.think(inputs,weights)
#Calculate the error
error = target-output
#Adjust the weights
weights = [w+error*x for w,x in zip(weights,inputs)]
self.weights = weights
def think(self,inputs,weights=None):
if weights is None:
weights = self.weights
#Calculates the product sum, and adds the bias
all_sum = sum([x*w]for x,w in zip(inputs,weights))+self.bias
#Activates result
return self.f(all_sum)
'''-----------------------------------------------------------------------------
Neural Network - MLPerceptron (2 layers)
-----------------------------------------------------------------------------'''
class NeuralNetwork():
############################################################################
# In the previous class we introduced a single neuron capable, this neuron #
# allows us to classify linearly separable inputs, that is, we can draw#
# a plane/line seperating the possible outputs, however if our data in #
# non-linearly separable, the perceptron will fail to classify it. #
# #
# Regardless of its complexity, linear activation function are only viable #
# in one layer deep Perceptrons, this limits the Perceptron for almost #
# all real world problems are non-linear. As such we use non-linear #
# activation functions that are continuous and differentiable in #
# our weights range [-1,1] #
# #
# As such we will have to work with MLP (multi-layer perceptrons), this #
# is accomplished with HIDDEN LAYERS (neuron nodes stacked in between #
# inputs and outputs, allowing neural networks to learn more #
# complicated attributes/features) and BACKPROPAGATION (a procedure to #
# repeatedly adjust the weights so as to minimize the difference #
# between the actual output and the desired one) the more neurons your #
# network possesses and the more hidden layers,the more precise the out#
# #
# For this implementation we will focus on a 2 layer Neural Network where #
# the number of input units is proportional to the columns in inputs #
# In a future implementation we could enable multiple hidden layers #
############################################################################
def __init__(self,ninputs,nhidden,nout,testing=False):
##################################################################
# We can visualize the weights for every layer as a matrix where #
# every row represents an input unit (that unit can be an #
# input node if the weights are for the hidden layer or the #
# hidden nodes if the layer is the output layer) and every #
# column represents a node in the forward layer #
# #
# Since our implementation only has 2 layers (the input layer #
# isn't considered a neural layer) we only need 2 matrixes #
# for the weights (1 for the connections I--->H and 1 for #
# the connections H--->O) and 2 lists with the bias for #
# every layer (for every list, initially we have n random #
# bias, where n is the number of units in the corresponding #
# layer) in backpropagation all this values might be tweaked #
##################################################################
self.nin = ninputs
self.nhid = nhidden
self.no = nout
#Testing option, locks random values to be the same in next program
if testing:
np.random.seed(1)
#Initiate weights randomly [-1,1]
self.weights_ih = [[np.random.uniform(-1,1) for _ in range(ninputs)]for _ in range(nhidden)]
self.weights_ho = [[np.random.uniform(-1,1) for _ in range(nhidden)]for _ in range(nout)]
#initiate bias randomly[0,1]
self.bias_hidden = [np.random.uniform(-1,1) for _ in range(nhidden)]
self.bias_out = [np.random.uniform(-1,1) for _ in range(nout)]
def sigmoid(self,x,deriv=False):
#Non-linear function continuous and derivable in [-1,1]
return (x*(1-x)) if deriv else (1/(1+np.exp(-x)))
def activation(self,weights, inputs,bias):
#Returns SUM+bias and sigmoid(SUM+bias)
#SUMi = SUMi-1 + (Wi * Ii) for i = 1,..,n, and Wi the weight for input unit Ii
#Since we are saving the weights in a matrix, the operation is a dot product
all_sum = np.dot(weights,inputs)
#Adding bias and activating
activated = 0
if type(all_sum)==list:
all_sum = [val+b for val,b in zip(all_sum,bias)]
activated = [self.sigmoid(x) for x in all_sum]
else:
all_sum += bias
activated = self.sigmoid(all_sum)
return all_sum,activated
def feedForward(self,inputs):
#Passes input forward in the network
#Returns the sum and activated sum for the hidden and output layers
#generating sum for hidden layer and activating it
hidden_sum,activated_hidden = self.activation(self.weights_ih,inputs,self.bias_hidden)
#generating sum for output layer and activating it
output_sum,activated_output = self.activation(self.weights_ho,activated_hidden,self.bias_out)
return hidden_sum,output_sum,activated_hidden,activated_output
def backPropagation(self,inputs,targets,hidden,outputs,learning_rate):
#Returns the error for hidden and output layer
#Returns the weight and bias' deltas for input-hidden and hidden-output layers
#Calculates the error for every result to every target
out_error = [tval-tout for tval,tout in zip(targets,outputs)]
#Calculates the errors to the hidden layer
#The errors can be calculated using the transpose of the weights
hidden_error = np.dot(np.transpose(self.weights_ho),out_error)
#H------------->O Layer
#Delta Calculation for H-->O layers
delta_ho,delta_bias_ho = self.deltaCalculation(learning_rate,outputs,out_error,hidden)
#Adjust weights by its deltas
self.weights_ho += delta_ho
#Adjust bias by its deltas
self.bias_out += delta_bias_ho
#I------------->H Layer
#Delta Calculation for I-->H Layers
delta_ih,delta_bias_ih = self.deltaCalculation(learning_rate,hidden,hidden_error,inputs)
#Adjust weights by its deltas
self.weights_ih += delta_ih
#Adjust bias by its deltas
self.bias_hidden += delta_bias_ih
return hidden_error,out_error,delta_ih,delta_ho,delta_bias_ih,delta_bias_ho
def deltaCalculation(self,learning_rate,activated_vector,error_vector,matrix_values):
#Delta Calculation for 2 consecutive layers
#The delta is the tweak values for the weights and the bias
# [scalar]------[elementwise]--[matrix operation dot]
#DWeights = learning_rate*(error*gradient)*Matrix
#DBias = learning:rate*(error*gradient)
#Returns the delta for weights and for the bias
#Calculate gradient- the sigmoid_derivate of the sigmoid values
gradient = [self.sigmoid(x,deriv=True)for x in activated_vector]
#Elementwise multiplication between the gradient and the error
delta = np.dot(error_vector,gradient)
#Scalar multiplication of delta by the learning rate
delta = np.dot(learning_rate,delta)
#Bias calculation
delta_bias = delta
#delta . (matrix_values)T --matrix multiplication
delta = np.dot(delta,np.transpose(matrix_values))
return delta,delta_bias
def train(self,inputs,targets,learning_rate):
#Supervised learning
#Returns error for output layer
#feeds forward the data to get a result from the neural net with sigmoid application
_,_,hidden,outputs = self.feedForward(inputs) #list of values []
_,out_error,_,_,_,_ = self.backPropagation(inputs,targets,hidden,outputs,learning_rate)
return out_error
def predict(self,entry):
#Returns the network prediction for a test entry
_,_,_,guess = self.feedForward(entry)
return guess
| [
"numpy.random.uniform",
"numpy.random.seed",
"numpy.transpose",
"numpy.exp",
"numpy.dot"
] | [((3709, 3733), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3726, 3733), True, 'import numpy as np\n'), ((8848, 8871), 'numpy.dot', 'np.dot', (['weights', 'inputs'], {}), '(weights, inputs)\n', (8854, 8871), True, 'import numpy as np\n'), ((11735, 11765), 'numpy.dot', 'np.dot', (['error_vector', 'gradient'], {}), '(error_vector, gradient)\n', (11741, 11765), True, 'import numpy as np\n'), ((11842, 11870), 'numpy.dot', 'np.dot', (['learning_rate', 'delta'], {}), '(learning_rate, delta)\n', (11848, 11870), True, 'import numpy as np\n'), ((3649, 3673), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (3666, 3673), True, 'import numpy as np\n'), ((7939, 7956), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7953, 7956), True, 'import numpy as np\n'), ((8281, 8305), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8298, 8305), True, 'import numpy as np\n'), ((8355, 8379), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8372, 8379), True, 'import numpy as np\n'), ((10276, 10305), 'numpy.transpose', 'np.transpose', (['self.weights_ho'], {}), '(self.weights_ho)\n', (10288, 10305), True, 'import numpy as np\n'), ((12012, 12039), 'numpy.transpose', 'np.transpose', (['matrix_values'], {}), '(matrix_values)\n', (12024, 12039), True, 'import numpy as np\n'), ((8036, 8060), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8053, 8060), True, 'import numpy as np\n'), ((8137, 8161), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (8154, 8161), True, 'import numpy as np\n'), ((8548, 8558), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (8554, 8558), True, 'import numpy as np\n')] |
import math
import numpy
import lfa
from scipy.interpolate import InterpolatedUnivariateSpline
from makesky import *
from prepord import *
def lsd_multiorder(tmpl_wave, tmpl_flux, tmpl_e_flux, tmpl_msk,
wave, flux, e_flux, msk,
orders,
vl, vh, nv,
kreg, emchop=True):
zl = vl * 1000.0 / lfa.LIGHT
zh = vh * 1000.0 / lfa.LIGHT
zvals = numpy.linspace(zl, zh, nv)
vels = zvals * lfa.LIGHT / 1000.0
AA = numpy.zeros([ nv, nv ])
bb = numpy.zeros([ nv ])
for order in orders:
# Extract order and clean.
thistmpl_wave, thistmpl_flux, thistmpl_e_flux = prepord(order, tmpl_wave, tmpl_flux, tmpl_e_flux, tmpl_msk)
thiswave, thisflux, thise_flux = prepord(order, wave, flux, e_flux, msk)
# Take off sky.
ss = makesky(thistmpl_wave, thistmpl_flux, 4)
thistmpl_flux /= ss
thistmpl_e_flux /= ss
ss = makesky(thiswave, thisflux, 4)
thisflux /= ss
thise_flux /= ss
tmpl_ww = numpy.isfinite(thistmpl_flux)
ww = numpy.isfinite(thisflux)
if emchop:
# Clip emission lines.
medflux, sigflux = medsig(thistmpl_flux[tmpl_ww])
tmpl_ww = numpy.logical_and(tmpl_ww,
thistmpl_flux < medflux + 5*sigflux)
medflux, sigflux = medsig(thisflux[ww])
ww = numpy.logical_and(ww,
thisflux < medflux + 5*sigflux)
thistmpl_wave = thistmpl_wave[tmpl_ww]
thistmpl_flux = thistmpl_flux[tmpl_ww]
thistmpl_e_flux = thistmpl_e_flux[tmpl_ww]
thiswave = thiswave[ww]
thisflux = thisflux[ww]
thise_flux = thise_flux[ww]
# Figure out which pixels in are always in range.
wanttmpl = thiswave - zl*thiswave
inrangel = numpy.logical_and(wanttmpl >= thistmpl_wave[0],
wanttmpl <= thistmpl_wave[-1])
wanttmpl = thiswave - zh*thiswave
inrangeh = numpy.logical_and(wanttmpl >= thistmpl_wave[0],
wanttmpl <= thistmpl_wave[-1])
inrange = numpy.logical_and(inrangel, inrangeh)
# Restrict to that...
thiswave = thiswave[inrange]
thisflux = thisflux[inrange]
thise_flux = thise_flux[inrange]
# plt.plot(thistmpl_wave, thistmpl_flux)
# plt.plot(thiswave, thisflux)
# plt.show()
nwave = len(thiswave)
# Form design matrix.
A = numpy.empty([ nwave, nv ])
# Interpolating spline.
spl = InterpolatedUnivariateSpline(thistmpl_wave, thistmpl_flux, k=3)
for iz, z in enumerate(zvals):
wanttmpl = thiswave - z*thiswave
interp_flux = spl(wanttmpl)
A[:,iz] = interp_flux
# Accumulate.
AA += numpy.dot(A.transpose(), A)
bb += numpy.dot(A.transpose(), thisflux)
# Regularization.
AA += kreg * numpy.identity(nv) # need to calculate this constant properly
prof, chisq, rank, s = numpy.linalg.lstsq(AA, bb, rcond=-1)
return vels, prof
| [
"numpy.linalg.lstsq",
"numpy.logical_and",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.empty",
"numpy.zeros",
"numpy.isfinite",
"numpy.identity",
"numpy.linspace"
] | [((421, 447), 'numpy.linspace', 'numpy.linspace', (['zl', 'zh', 'nv'], {}), '(zl, zh, nv)\n', (435, 447), False, 'import numpy\n'), ((492, 513), 'numpy.zeros', 'numpy.zeros', (['[nv, nv]'], {}), '([nv, nv])\n', (503, 513), False, 'import numpy\n'), ((523, 540), 'numpy.zeros', 'numpy.zeros', (['[nv]'], {}), '([nv])\n', (534, 540), False, 'import numpy\n'), ((2879, 2915), 'numpy.linalg.lstsq', 'numpy.linalg.lstsq', (['AA', 'bb'], {'rcond': '(-1)'}), '(AA, bb, rcond=-1)\n', (2897, 2915), False, 'import numpy\n'), ((1006, 1035), 'numpy.isfinite', 'numpy.isfinite', (['thistmpl_flux'], {}), '(thistmpl_flux)\n', (1020, 1035), False, 'import numpy\n'), ((1045, 1069), 'numpy.isfinite', 'numpy.isfinite', (['thisflux'], {}), '(thisflux)\n', (1059, 1069), False, 'import numpy\n'), ((1764, 1842), 'numpy.logical_and', 'numpy.logical_and', (['(wanttmpl >= thistmpl_wave[0])', '(wanttmpl <= thistmpl_wave[-1])'], {}), '(wanttmpl >= thistmpl_wave[0], wanttmpl <= thistmpl_wave[-1])\n', (1781, 1842), False, 'import numpy\n'), ((1931, 2009), 'numpy.logical_and', 'numpy.logical_and', (['(wanttmpl >= thistmpl_wave[0])', '(wanttmpl <= thistmpl_wave[-1])'], {}), '(wanttmpl >= thistmpl_wave[0], wanttmpl <= thistmpl_wave[-1])\n', (1948, 2009), False, 'import numpy\n'), ((2058, 2095), 'numpy.logical_and', 'numpy.logical_and', (['inrangel', 'inrangeh'], {}), '(inrangel, inrangeh)\n', (2075, 2095), False, 'import numpy\n'), ((2383, 2407), 'numpy.empty', 'numpy.empty', (['[nwave, nv]'], {}), '([nwave, nv])\n', (2394, 2407), False, 'import numpy\n'), ((2449, 2512), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['thistmpl_wave', 'thistmpl_flux'], {'k': '(3)'}), '(thistmpl_wave, thistmpl_flux, k=3)\n', (2477, 2512), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((2790, 2808), 'numpy.identity', 'numpy.identity', (['nv'], {}), '(nv)\n', (2804, 2808), False, 'import numpy\n'), ((1187, 1252), 'numpy.logical_and', 'numpy.logical_and', (['tmpl_ww', '(thistmpl_flux < medflux + 5 * sigflux)'], {}), '(tmpl_ww, thistmpl_flux < medflux + 5 * sigflux)\n', (1204, 1252), False, 'import numpy\n'), ((1343, 1398), 'numpy.logical_and', 'numpy.logical_and', (['ww', '(thisflux < medflux + 5 * sigflux)'], {}), '(ww, thisflux < medflux + 5 * sigflux)\n', (1360, 1398), False, 'import numpy\n')] |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import argparse
import logging
import json
import numpy as np
import os
import trimesh
import deep_sdf
import deep_sdf.workspace as ws
def evaluate(experiment_directory, checkpoint, data_dir, split_filename, max_meshes, keep_normalized=False):
with open(split_filename, "r") as f:
split = json.load(f)
chamfer_results = []
chamfer_distances = []
color_distances = []
for dataset in split:
if len(chamfer_results) == max_meshes:
break
for class_name in split[dataset]:
if len(chamfer_results) == max_meshes:
break
for instance_name in split[dataset][class_name]:
if len(chamfer_results) == max_meshes:
break
logging.debug(
"evaluating " + os.path.join(dataset, class_name, instance_name)
)
reconstructed_mesh_filename = ws.get_reconstructed_training_mesh_filename(
experiment_directory, checkpoint, dataset, class_name, instance_name
)
logging.debug(
'reconstructed mesh is "' + reconstructed_mesh_filename + '"'
)
ground_truth_samples_filename = os.path.join(
data_dir,
"SurfaceSamples",
dataset,
class_name,
instance_name + ".ply",
)
logging.debug(
"ground truth samples are " + ground_truth_samples_filename
)
normalization_params_filename = os.path.join(
data_dir,
"NormalizationParameters",
dataset,
class_name,
instance_name + ".npz",
)
logging.debug(
"normalization params are " + ground_truth_samples_filename
)
# Surface Samples (with color)
ground_truth_points = trimesh.load(ground_truth_samples_filename, process=False)
# Reconstructed Training Mesh (with color)
reconstruction = trimesh.load(reconstructed_mesh_filename, process=False)
if keep_normalized:
offset = 0
scale = 1
else:
normalization_params = np.load(normalization_params_filename)
offset = normalization_params["offset"]
scale = normalization_params["scale"]
chamfer_dist, color_dist = deep_sdf.metrics.chamfer.compute_trimesh_chamfer_color(
ground_truth_points,
reconstruction,
offset,
scale,
)
logging.debug("chamfer distance: " + str(chamfer_dist))
chamfer_results.append(
(os.path.join(dataset, class_name, instance_name), chamfer_dist, color_dist)
)
chamfer_distances.append(chamfer_dist)
color_distances.append(color_dist)
with open(
os.path.join(
ws.get_evaluation_dir(experiment_directory, checkpoint, True), "chamfer.csv"
),
"w",
) as f:
f.write("shape, chamfer_dist, color_dist\n")
f.write("MEAN, {}, {}\n".format(np.mean(chamfer_distances), np.mean(color_distances)))
for result in chamfer_results:
f.write("{}, {}, {}\n".format(result[0], result[1], result[2]))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Evaluate a DeepSDF autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include experiment specifications in "
+ '"specs.json", and logging will be done in this directory as well.',
)
arg_parser.add_argument(
"--checkpoint",
"-c",
dest="checkpoint",
default="latest",
help="The checkpoint to test.",
)
arg_parser.add_argument(
"--data",
"-d",
dest="data_source",
required=True,
help="The data source directory.",
)
arg_parser.add_argument(
"--split",
"-s",
dest="split_filename",
required=True,
help="The split to evaluate.",
)
arg_parser.add_argument(
"--keep_normalization",
"-n",
dest="keep_normalized",
default=False,
action="store_true",
help="If set, keep the meshes in the normalized scale.",
)
arg_parser.add_argument(
"--max_meshes",
"-m",
dest="max_meshes",
default=-1,
help="The maximum number of meshes to evaluate, or -1 for no limit.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
evaluate(
args.experiment_directory,
args.checkpoint,
args.data_source,
args.split_filename,
int(args.max_meshes),
keep_normalized=args.keep_normalized
)
| [
"deep_sdf.configure_logging",
"numpy.load",
"json.load",
"logging.debug",
"argparse.ArgumentParser",
"deep_sdf.workspace.get_reconstructed_training_mesh_filename",
"trimesh.load",
"deep_sdf.workspace.get_evaluation_dir",
"deep_sdf.add_common_args",
"numpy.mean",
"os.path.join",
"deep_sdf.metri... | [((3759, 3828), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate a DeepSDF autodecoder"""'}), "(description='Evaluate a DeepSDF autodecoder')\n", (3782, 3828), False, 'import argparse\n'), ((5063, 5099), 'deep_sdf.add_common_args', 'deep_sdf.add_common_args', (['arg_parser'], {}), '(arg_parser)\n', (5087, 5099), False, 'import deep_sdf\n'), ((5141, 5173), 'deep_sdf.configure_logging', 'deep_sdf.configure_logging', (['args'], {}), '(args)\n', (5167, 5173), False, 'import deep_sdf\n'), ((385, 397), 'json.load', 'json.load', (['f'], {}), '(f)\n', (394, 397), False, 'import json\n'), ((1009, 1126), 'deep_sdf.workspace.get_reconstructed_training_mesh_filename', 'ws.get_reconstructed_training_mesh_filename', (['experiment_directory', 'checkpoint', 'dataset', 'class_name', 'instance_name'], {}), '(experiment_directory,\n checkpoint, dataset, class_name, instance_name)\n', (1052, 1126), True, 'import deep_sdf.workspace as ws\n'), ((1178, 1254), 'logging.debug', 'logging.debug', (['(\'reconstructed mesh is "\' + reconstructed_mesh_filename + \'"\')'], {}), '(\'reconstructed mesh is "\' + reconstructed_mesh_filename + \'"\')\n', (1191, 1254), False, 'import logging\n'), ((1342, 1431), 'os.path.join', 'os.path.join', (['data_dir', '"""SurfaceSamples"""', 'dataset', 'class_name', "(instance_name + '.ply')"], {}), "(data_dir, 'SurfaceSamples', dataset, class_name, instance_name +\n '.ply')\n", (1354, 1431), False, 'import os\n'), ((1564, 1638), 'logging.debug', 'logging.debug', (["('ground truth samples are ' + ground_truth_samples_filename)"], {}), "('ground truth samples are ' + ground_truth_samples_filename)\n", (1577, 1638), False, 'import logging\n'), ((1726, 1825), 'os.path.join', 'os.path.join', (['data_dir', '"""NormalizationParameters"""', 'dataset', 'class_name', "(instance_name + '.npz')"], {}), "(data_dir, 'NormalizationParameters', dataset, class_name, \n instance_name + '.npz')\n", (1738, 1825), False, 'import os\n'), ((1957, 2031), 'logging.debug', 'logging.debug', (["('normalization params are ' + ground_truth_samples_filename)"], {}), "('normalization params are ' + ground_truth_samples_filename)\n", (1970, 2031), False, 'import logging\n'), ((2156, 2214), 'trimesh.load', 'trimesh.load', (['ground_truth_samples_filename'], {'process': '(False)'}), '(ground_truth_samples_filename, process=False)\n', (2168, 2214), False, 'import trimesh\n'), ((2308, 2364), 'trimesh.load', 'trimesh.load', (['reconstructed_mesh_filename'], {'process': '(False)'}), '(reconstructed_mesh_filename, process=False)\n', (2320, 2364), False, 'import trimesh\n'), ((2745, 2855), 'deep_sdf.metrics.chamfer.compute_trimesh_chamfer_color', 'deep_sdf.metrics.chamfer.compute_trimesh_chamfer_color', (['ground_truth_points', 'reconstruction', 'offset', 'scale'], {}), '(ground_truth_points,\n reconstruction, offset, scale)\n', (2799, 2855), False, 'import deep_sdf\n'), ((3336, 3397), 'deep_sdf.workspace.get_evaluation_dir', 'ws.get_evaluation_dir', (['experiment_directory', 'checkpoint', '(True)'], {}), '(experiment_directory, checkpoint, True)\n', (3357, 3397), True, 'import deep_sdf.workspace as ws\n'), ((3542, 3568), 'numpy.mean', 'np.mean', (['chamfer_distances'], {}), '(chamfer_distances)\n', (3549, 3568), True, 'import numpy as np\n'), ((3570, 3594), 'numpy.mean', 'np.mean', (['color_distances'], {}), '(color_distances)\n', (3577, 3594), True, 'import numpy as np\n'), ((2528, 2566), 'numpy.load', 'np.load', (['normalization_params_filename'], {}), '(normalization_params_filename)\n', (2535, 2566), True, 'import numpy as np\n'), ((895, 943), 'os.path.join', 'os.path.join', (['dataset', 'class_name', 'instance_name'], {}), '(dataset, class_name, instance_name)\n', (907, 943), False, 'import os\n'), ((3086, 3134), 'os.path.join', 'os.path.join', (['dataset', 'class_name', 'instance_name'], {}), '(dataset, class_name, instance_name)\n', (3098, 3134), False, 'import os\n')] |
import cv2
import os
import numpy as np
data_root = "/home/csc302/workspace/luohao/code/AlignedReID/data/market1501/query"
gen_root = "/home/csc302/workspace/luohao/code/AlignedReID/data/market1501_partial/query"
def random_crop(img, sample_rate=0.6):
h,w = img.shape[:2]
sh = np.random.randint(sample_rate*h, h*0.9,1)[0]
bh = np.random.randint(0, h-sh, 1)[0]
img = img[bh:sh+bh,:,:]
img = cv2.resize(img, (w,h))
return img
for image_name in os.listdir(data_root):
if image_name[-3:] != 'jpg':
continue
img_path = os.path.join(data_root, image_name)
img = cv2.imread(img_path)
img = random_crop(img)
save_path = os.path.join(gen_root, image_name)
cv2.imwrite(save_path, img) | [
"cv2.imwrite",
"cv2.imread",
"numpy.random.randint",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((470, 491), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (480, 491), False, 'import os\n'), ((412, 435), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {}), '(img, (w, h))\n', (422, 435), False, 'import cv2\n'), ((558, 593), 'os.path.join', 'os.path.join', (['data_root', 'image_name'], {}), '(data_root, image_name)\n', (570, 593), False, 'import os\n'), ((604, 624), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (614, 624), False, 'import cv2\n'), ((668, 702), 'os.path.join', 'os.path.join', (['gen_root', 'image_name'], {}), '(gen_root, image_name)\n', (680, 702), False, 'import os\n'), ((707, 734), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'img'], {}), '(save_path, img)\n', (718, 734), False, 'import cv2\n'), ((287, 333), 'numpy.random.randint', 'np.random.randint', (['(sample_rate * h)', '(h * 0.9)', '(1)'], {}), '(sample_rate * h, h * 0.9, 1)\n', (304, 333), True, 'import numpy as np\n'), ((341, 372), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - sh)', '(1)'], {}), '(0, h - sh, 1)\n', (358, 372), True, 'import numpy as np\n')] |
import save_keras_model as saver
import time
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.layers import Activation
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
from keras import constraints
import pickle
import h5py
import os
from pathlib import Path
if K.backend()=='tensorflow':
K.set_image_dim_ordering("th")
# Import Tensorflow with multiprocessing
import tensorflow as tf
import multiprocessing as mp
# Loading the CIFAR-10 datasets
from keras.datasets import fashion_mnist
save_folder = 'data/Fashion-PositiveWeights-Layers64,32,16-FailedTraining/'
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
batch_size = 32
num_classes = 10
epochs = 200
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
def base_model():
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dropout(0.2))
model.add(Dense(64,activation='relu',
kernel_constraint=constraints.NonNeg()))
model.add(Dropout(0.10))
model.add(Dense(32,activation='relu',
kernel_constraint=constraints.NonNeg()))
model.add(Dropout(0.0))
model.add(Dense(16,activation='relu',
kernel_constraint=constraints.NonNeg()))
model.add(Dropout(0.0))
model.add(Dense(num_classes, activation='softmax',\
kernel_constraint=constraints.NonNeg()))
#sgd = SGD(lr = 0.1, decay=1e-6, momentum=0.9 nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
return model
def load_model_at_epoch(model, epoch):
ret = []
fn = f'MODEL_Epoch{epoch}_'
fn_end = '.npy'
weight_names = ['W' + str(i+1) for i in range(4)]
bias_names = ['b' + str(i+1) for i in range(4)]
for w_num, b_num in zip(weight_names,bias_names):
ret.append(np.load(save_folder + fn + w_num + fn_end))
ret.append(np.load(save_folder + fn + b_num + fn_end))
model.set_weights(ret)
# saves the model into save_folder directory
def saveModel(model, history, fn):
model_save_directory = save_folder
saver.save_model(model,model_save_directory,fn)
#save score into the same model directionry
fn = model_save_directory + 'scores.txt'
my_file = Path(fn)
try:
my_abs_path = my_file.resolve(strict=True)
except FileNotFoundError:
createScoreFile(fn)
with open(fn, 'a') as f:
f.write(f"{history['acc'][0]}\t{history['loss'][0]}\t{history['val_acc'][0]}\t{history['val_loss'][0]}\n")
# helper function to initialize the file for recording the scores
def createScoreFile(fn):
header = 'acc \t loss \t val_acc \t val_loss\n'
f = open(fn, 'w+')
f.write(header)
f.close()
def saveInitModel(model):
history = {'acc': 0, 'val_acc': 0, 'loss':0, 'val_loss':0 }
fn = 'MODEL_Epoch0'
saveModel(model, history, fn)
model = base_model()
load_model_at_epoch(model, 2500)
model.summary()
for e in range(2501, 3000):
print('Epoch',e)
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_data=(x_test,y_test),shuffle=True)
print('Test accuracy:',history.history['val_acc'][0],'\n','Train accuracy:',history.history['acc'][0],'\n')
saveModel(model, history.history, f'MODEL_Epoch{e}')
| [
"os.mkdir",
"save_keras_model.save_model",
"numpy.load",
"keras.constraints.NonNeg",
"os.path.isdir",
"keras.backend.backend",
"keras.layers.Dropout",
"keras.layers.Flatten",
"keras.backend.set_image_dim_ordering",
"keras.utils.np_utils.to_categorical",
"pathlib.Path",
"keras.models.Sequential... | [((1117, 1142), 'keras.datasets.fashion_mnist.load_data', 'fashion_mnist.load_data', ([], {}), '()\n', (1140, 1142), False, 'from keras.datasets import fashion_mnist\n'), ((1154, 1199), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1177, 1199), False, 'from keras.utils import np_utils\n'), ((1209, 1253), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (1232, 1253), False, 'from keras.utils import np_utils\n'), ((653, 664), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (662, 664), True, 'from keras import backend as K\n'), ((684, 714), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""th"""'], {}), "('th')\n", (708, 714), True, 'from keras import backend as K\n'), ((971, 997), 'os.path.isdir', 'os.path.isdir', (['save_folder'], {}), '(save_folder)\n', (984, 997), False, 'import os\n'), ((1007, 1028), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (1015, 1028), False, 'import os\n'), ((1385, 1397), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1395, 1397), False, 'from keras.models import Sequential\n'), ((2654, 2703), 'save_keras_model.save_model', 'saver.save_model', (['model', 'model_save_directory', 'fn'], {}), '(model, model_save_directory, fn)\n', (2670, 2703), True, 'import save_keras_model as saver\n'), ((2810, 2818), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (2814, 2818), False, 'from pathlib import Path\n'), ((1412, 1441), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (1419, 1441), False, 'from keras.layers import Flatten\n'), ((1457, 1469), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1464, 1469), False, 'from keras.layers import Dropout\n'), ((1577, 1589), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (1584, 1589), False, 'from keras.layers import Dropout\n'), ((1698, 1710), 'keras.layers.Dropout', 'Dropout', (['(0.0)'], {}), '(0.0)\n', (1705, 1710), False, 'from keras.layers import Dropout\n'), ((1818, 1830), 'keras.layers.Dropout', 'Dropout', (['(0.0)'], {}), '(0.0)\n', (1825, 1830), False, 'from keras.layers import Dropout\n'), ((2396, 2438), 'numpy.load', 'np.load', (['(save_folder + fn + w_num + fn_end)'], {}), '(save_folder + fn + w_num + fn_end)\n', (2403, 2438), True, 'import numpy as np\n'), ((2459, 2501), 'numpy.load', 'np.load', (['(save_folder + fn + b_num + fn_end)'], {}), '(save_folder + fn + b_num + fn_end)\n', (2466, 2501), True, 'import numpy as np\n'), ((1540, 1560), 'keras.constraints.NonNeg', 'constraints.NonNeg', ([], {}), '()\n', (1558, 1560), False, 'from keras import constraints\n'), ((1661, 1681), 'keras.constraints.NonNeg', 'constraints.NonNeg', ([], {}), '()\n', (1679, 1681), False, 'from keras import constraints\n'), ((1781, 1801), 'keras.constraints.NonNeg', 'constraints.NonNeg', ([], {}), '()\n', (1799, 1801), False, 'from keras import constraints\n'), ((1915, 1935), 'keras.constraints.NonNeg', 'constraints.NonNeg', ([], {}), '()\n', (1933, 1935), False, 'from keras import constraints\n')] |
from core.vocoder.models.fatchord_version import WaveRNN
from core.vocoder.vocoder_dataset import VocoderDataset, collate_vocoder
from core.vocoder.distribution import discretized_mix_logistic_loss
from core.vocoder.display import stream, simple_table
from core.vocoder.gen_wavernn import gen_testset
from torch.utils.data import DataLoader
from pathlib import Path
from torch import optim
import torch.nn.functional as F
import core.vocoder.hparams as hp
import numpy as np
import time
import torch
import platform
def train(
run_id: str,
syn_dir: Path,
voc_dir: Path,
models_dir: Path,
language_code: str,
ground_truth: bool,
save_every: int,
backup_every: int,
force_restart: bool,
):
# Check to make sure the hop length is correctly factorised
assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length
# Instantiate the model
print("Initializing the model...")
model = WaveRNN(
rnn_dims=hp.voc_rnn_dims,
fc_dims=hp.voc_fc_dims,
bits=hp.bits,
pad=hp.voc_pad,
upsample_factors=hp.voc_upsample_factors,
feat_dims=hp.num_mels,
compute_dims=hp.voc_compute_dims,
res_out_dims=hp.voc_res_out_dims,
res_blocks=hp.voc_res_blocks,
hop_length=hp.hop_length,
sample_rate=hp.sample_rate,
mode=hp.voc_mode,
)
if torch.cuda.is_available():
model = model.cuda()
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Initialize the optimizer
optimizer = optim.Adam(model.parameters())
for p in optimizer.param_groups:
p["lr"] = hp.voc_lr
loss_func = (
F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss
)
# Load the weights
model_dir = models_dir.joinpath(language_code).joinpath(run_id).joinpath("vocoder")
model_dir.mkdir(parents=True, exist_ok=True)
weights_fpath = model_dir.joinpath("vocoder.pt")
if force_restart or not weights_fpath.exists():
print("\nStarting the training of WaveRNN from scratch\n")
model.save(weights_fpath, optimizer)
else:
print("\nLoading weights at %s" % weights_fpath)
model.load(weights_fpath, optimizer)
print("WaveRNN weights loaded from step %d" % model.step)
# Initialize the dataset
metadata_fpath = (
syn_dir.joinpath("train.txt")
if ground_truth
else voc_dir.joinpath("synthesized.txt")
)
mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta")
wav_dir = syn_dir.joinpath("audio")
dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir)
test_loader = DataLoader(dataset, batch_size=1, shuffle=True, pin_memory=True)
# Begin the training
simple_table(
[
("Batch size", hp.voc_batch_size),
("LR", hp.voc_lr),
("Sequence Len", hp.voc_seq_len),
]
)
for epoch in range(1, 350):
data_loader = DataLoader(
dataset,
collate_fn=collate_vocoder,
batch_size=hp.voc_batch_size,
num_workers=2 if platform.system() != "Windows" else 0,
shuffle=True,
pin_memory=True,
)
start = time.time()
running_loss = 0.0
for i, (x, y, m) in enumerate(data_loader, 1):
if torch.cuda.is_available():
x, m, y = x.cuda(), m.cuda(), y.cuda()
# Forward pass
y_hat = model(x, m)
if model.mode == "RAW":
y_hat = y_hat.transpose(1, 2).unsqueeze(-1)
elif model.mode == "MOL":
y = y.float()
y = y.unsqueeze(-1)
# Backward pass
loss = loss_func(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
speed = i / (time.time() - start)
avg_loss = running_loss / i
step = model.get_step()
k = step // 1000
if backup_every != 0 and step % backup_every == 0:
model.checkpoint(model_dir, optimizer)
if save_every != 0 and step % save_every == 0:
model.save(weights_fpath, optimizer)
msg = (
f"| Epoch: {epoch} ({i}/{len(data_loader)}) | "
f"Loss: {avg_loss:.4f} | {speed:.1f} "
f"steps/s | Step: {k}k | "
)
stream(msg)
gen_testset(
model,
test_loader,
hp.voc_gen_at_checkpoint,
hp.voc_gen_batched,
hp.voc_target,
hp.voc_overlap,
model_dir,
)
print("")
| [
"core.vocoder.display.stream",
"core.vocoder.models.fatchord_version.WaveRNN",
"numpy.cumprod",
"core.vocoder.vocoder_dataset.VocoderDataset",
"torch.utils.data.DataLoader",
"time.time",
"torch.cuda.is_available",
"core.vocoder.gen_wavernn.gen_testset",
"core.vocoder.display.simple_table",
"torch.... | [((938, 1280), 'core.vocoder.models.fatchord_version.WaveRNN', 'WaveRNN', ([], {'rnn_dims': 'hp.voc_rnn_dims', 'fc_dims': 'hp.voc_fc_dims', 'bits': 'hp.bits', 'pad': 'hp.voc_pad', 'upsample_factors': 'hp.voc_upsample_factors', 'feat_dims': 'hp.num_mels', 'compute_dims': 'hp.voc_compute_dims', 'res_out_dims': 'hp.voc_res_out_dims', 'res_blocks': 'hp.voc_res_blocks', 'hop_length': 'hp.hop_length', 'sample_rate': 'hp.sample_rate', 'mode': 'hp.voc_mode'}), '(rnn_dims=hp.voc_rnn_dims, fc_dims=hp.voc_fc_dims, bits=hp.bits, pad\n =hp.voc_pad, upsample_factors=hp.voc_upsample_factors, feat_dims=hp.\n num_mels, compute_dims=hp.voc_compute_dims, res_out_dims=hp.\n voc_res_out_dims, res_blocks=hp.voc_res_blocks, hop_length=hp.\n hop_length, sample_rate=hp.sample_rate, mode=hp.voc_mode)\n', (945, 1280), False, 'from core.vocoder.models.fatchord_version import WaveRNN\n'), ((1372, 1397), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1395, 1397), False, 'import torch\n'), ((2632, 2680), 'core.vocoder.vocoder_dataset.VocoderDataset', 'VocoderDataset', (['metadata_fpath', 'mel_dir', 'wav_dir'], {}), '(metadata_fpath, mel_dir, wav_dir)\n', (2646, 2680), False, 'from core.vocoder.vocoder_dataset import VocoderDataset, collate_vocoder\n'), ((2699, 2763), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset, batch_size=1, shuffle=True, pin_memory=True)\n', (2709, 2763), False, 'from torch.utils.data import DataLoader\n'), ((2794, 2901), 'core.vocoder.display.simple_table', 'simple_table', (["[('Batch size', hp.voc_batch_size), ('LR', hp.voc_lr), ('Sequence Len', hp.\n voc_seq_len)]"], {}), "([('Batch size', hp.voc_batch_size), ('LR', hp.voc_lr), (\n 'Sequence Len', hp.voc_seq_len)])\n", (2806, 2901), False, 'from core.vocoder.display import stream, simple_table\n'), ((1445, 1465), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1457, 1465), False, 'import torch\n'), ((1493, 1512), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1505, 1512), False, 'import torch\n'), ((3277, 3288), 'time.time', 'time.time', ([], {}), '()\n', (3286, 3288), False, 'import time\n'), ((4539, 4663), 'core.vocoder.gen_wavernn.gen_testset', 'gen_testset', (['model', 'test_loader', 'hp.voc_gen_at_checkpoint', 'hp.voc_gen_batched', 'hp.voc_target', 'hp.voc_overlap', 'model_dir'], {}), '(model, test_loader, hp.voc_gen_at_checkpoint, hp.\n voc_gen_batched, hp.voc_target, hp.voc_overlap, model_dir)\n', (4550, 4663), False, 'from core.vocoder.gen_wavernn import gen_testset\n'), ((801, 836), 'numpy.cumprod', 'np.cumprod', (['hp.voc_upsample_factors'], {}), '(hp.voc_upsample_factors)\n', (811, 836), True, 'import numpy as np\n'), ((3387, 3412), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3410, 3412), False, 'import torch\n'), ((4518, 4529), 'core.vocoder.display.stream', 'stream', (['msg'], {}), '(msg)\n', (4524, 4529), False, 'from core.vocoder.display import stream, simple_table\n'), ((3950, 3961), 'time.time', 'time.time', ([], {}), '()\n', (3959, 3961), False, 'import time\n'), ((3157, 3174), 'platform.system', 'platform.system', ([], {}), '()\n', (3172, 3174), False, 'import platform\n')] |
"""Inference.
Runs inference using a trained network. Raise ImportError when importing torch
to force numpy.
Author:
<NAME> <<EMAIL>>
Created on:
April 3, 2020
"""
try:
# raise ImportError
import torch
from torchvision.datasets import MNIST
from model import FCNetwork as NNModel
USE_NUMPY = False
except ImportError or ModuleNotFoundError:
from model import NumpyModel as NNModel
from utils.mnist_data import MNIST
USE_NUMPY = True
import numpy as np
from argparse import ArgumentParser
from time import time
def parse_args():
parser = ArgumentParser(description='runs inference on the network')
parser.add_argument('ROOT', type=str,
help='path to the root of the dataset')
parser.add_argument('MODEL', type=str,
help='model state_dict to be loaded')
return parser.parse_args()
class AI:
def __init__(self, root, state_dict_path):
"""Initializes the AI.
Args:
root (str): Path to the MNIST data root.
state_dict_path (str): Path to the weight .pth file
"""
self.root = root
self.data = MNIST(root, train=False)
if USE_NUMPY:
state_dict = np.load(state_dict_path, allow_pickle=True).item()
else:
if torch.cuda.is_available():
state_dict = torch.load(state_dict_path)
else:
state_dict = torch.load(state_dict_path,
map_location=torch.device('cpu'))
if not USE_NUMPY:
self.fc1_weight = state_dict['fc1.0.weight'].detach().cpu()
self.fc2_weight = state_dict['fc2.weight'].detach().cpu()
self.fc1_weight = self.fc1_weight.numpy()
self.fc2_weight = self.fc2_weight.numpy()
in_connections = state_dict['fc0.0.weight'].shape[1]
out_connections = state_dict['fc2.bias'].shape[0]
self.layer_1_neurons = state_dict['fc0.0.bias'].shape[0]
self.layer_2_neurons = state_dict['fc1.0.bias'].shape[0]
self.model = NNModel(in_connections,
out_connections,
self.layer_1_neurons,
self.layer_2_neurons)
if not USE_NUMPY:
self.model.eval()
self.model.load_state_dict(state_dict)
self.counter = 0
def infer_next(self, image=None) -> (np.ndarray, int, any, any):
"""Infer the next number in the list or the given image.
Args:
image (np.ndarray): A 28 x 28 array representing the image. Should
be converted directly from PIL.Image to np.ndarray using
np.array(img). This has max value 255 and min value 0.
Returns:
The input image and the prediction.
"""
if image is None:
if self.counter == len(self.data):
# Reset counter if it reaches the end.
self.counter = 0
image = np.array(self.data[self.counter][0])
if USE_NUMPY:
image_arr = image.reshape([1, 1, image.shape[0], image.shape[1]])
image_arr = image_arr.astype(dtype=float) / 255.
h1, h2, out = self.model(image_arr)
out = out.argmax(1)
else:
tensor_image = torch.tensor(image, dtype=torch.float) / 255.
tensor_image = tensor_image.unsqueeze(0)
with torch.no_grad():
h1, h2, out = self.model(tensor_image)
out = out.argmax(1)
self.counter += 1
return image, int(out[0]), h1, h2
if __name__ == '__main__':
args = parse_args()
print(f"Running on {'numpy' if USE_NUMPY else 'torch'}")
print('loading model...')
start_time = time()
ai = AI(args.ROOT, args.MODEL)
print(f"done! t={time() - start_time:.3f}s")
start_time = time()
for i in range(10000):
_, out, _, _ = ai.infer_next()
if i % 1000 == 0:
print(f'Iteration {i}: pred={out}')
time_del = time() - start_time
time_del /= 10000
print(f'done! t per iter={time_del:.6f}s')
| [
"numpy.load",
"argparse.ArgumentParser",
"utils.mnist_data.MNIST",
"model.NumpyModel",
"torch.load",
"time.time",
"torch.cuda.is_available",
"numpy.array",
"torch.device",
"torch.no_grad",
"torch.tensor"
] | [((586, 645), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""runs inference on the network"""'}), "(description='runs inference on the network')\n", (600, 645), False, 'from argparse import ArgumentParser\n'), ((3814, 3820), 'time.time', 'time', ([], {}), '()\n', (3818, 3820), False, 'from time import time\n'), ((3923, 3929), 'time.time', 'time', ([], {}), '()\n', (3927, 3929), False, 'from time import time\n'), ((1167, 1191), 'utils.mnist_data.MNIST', 'MNIST', (['root'], {'train': '(False)'}), '(root, train=False)\n', (1172, 1191), False, 'from utils.mnist_data import MNIST\n'), ((2104, 2193), 'model.NumpyModel', 'NNModel', (['in_connections', 'out_connections', 'self.layer_1_neurons', 'self.layer_2_neurons'], {}), '(in_connections, out_connections, self.layer_1_neurons, self.\n layer_2_neurons)\n', (2111, 2193), True, 'from model import NumpyModel as NNModel\n'), ((4085, 4091), 'time.time', 'time', ([], {}), '()\n', (4089, 4091), False, 'from time import time\n'), ((1321, 1346), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1344, 1346), False, 'import torch\n'), ((3038, 3074), 'numpy.array', 'np.array', (['self.data[self.counter][0]'], {}), '(self.data[self.counter][0])\n', (3046, 3074), True, 'import numpy as np\n'), ((1377, 1404), 'torch.load', 'torch.load', (['state_dict_path'], {}), '(state_dict_path)\n', (1387, 1404), False, 'import torch\n'), ((3358, 3396), 'torch.tensor', 'torch.tensor', (['image'], {'dtype': 'torch.float'}), '(image, dtype=torch.float)\n', (3370, 3396), False, 'import torch\n'), ((3475, 3490), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3488, 3490), False, 'import torch\n'), ((1241, 1284), 'numpy.load', 'np.load', (['state_dict_path'], {'allow_pickle': '(True)'}), '(state_dict_path, allow_pickle=True)\n', (1248, 1284), True, 'import numpy as np\n'), ((3877, 3883), 'time.time', 'time', ([], {}), '()\n', (3881, 3883), False, 'from time import time\n'), ((1533, 1552), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1545, 1552), False, 'import torch\n')] |
import torch
from torch.utils.data import TensorDataset, DataLoader
import models
from pathlib import Path
from ignite.utils import setup_logger
from ignite.handlers import Checkpoint, DiskSaver
import pandas as pd
import numpy as np
import argparse
import os
from tqdm import tqdm
from dataset import set_seed
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model", type=str, default='ffn', help="model's name")
parser.add_argument("--checkpoint", type=str, required=True, help="checkpoint file path")
parser.add_argument("--pilot_version", type=int, choices=[1, 2], default=1)
parser.add_argument("--batch_size", type=int, default=1024)
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
help="Device (cuda or cpu)")
parser.add_argument("--local_rank", type=int, default=-1,
help="Local rank for distributed training (-1: not distributed)")
parser.add_argument("--seed", type=int, default=43)
parser.add_argument("--debug", action='store_true')
args = parser.parse_args()
# Setup CUDA, GPU & distributed training
args.distributed = (args.local_rank != -1)
if not args.distributed:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method='env://')
args.n_gpu = torch.cuda.device_count() if not args.distributed else 1
args.device = device
# Set seed
set_seed(args)
logger = setup_logger("Testing", distributed_rank=args.local_rank)
# Model construction
model = getattr(models, args.model)(args)
checkpoint_fp = Path(args.checkpoint)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load={"model": model}, checkpoint=checkpoint)
model = model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
datapath = f'data/Y_{args.pilot_version}.csv'
dataY = pd.read_csv(datapath, header=None).values
test_dataset = torch.tensor(dataY, dtype=torch.float32)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, pin_memory=True, shuffle=False)
pred = []
model.eval()
for batch in tqdm(test_loader, desc="Runing Testing"):
batch = batch.to(device)
x_pred = model(batch)
x_pred = x_pred > 0.5
pred.append(x_pred.cpu().numpy())
np.concatenate(pred).tofile(f'{os.path.split(args.checkpoint)[0]}/X_pre_{args.pilot_version}.bin')
if args.debug:
np.ones_like(np.concatenate(pred)).tofile(f'{os.path.split(args.checkpoint)[0]}/X_pre_2.bin')
if __name__ == "__main__":
main() | [
"tqdm.tqdm",
"torch.distributed.init_process_group",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"pandas.read_csv",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"ignite.utils.setup_logger",
"pathlib.Path",
"torch.cuda.set_device",
"... | [((336, 361), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (359, 361), False, 'import argparse\n'), ((1706, 1720), 'dataset.set_seed', 'set_seed', (['args'], {}), '(args)\n', (1714, 1720), False, 'from dataset import set_seed\n'), ((1732, 1789), 'ignite.utils.setup_logger', 'setup_logger', (['"""Testing"""'], {'distributed_rank': 'args.local_rank'}), "('Testing', distributed_rank=args.local_rank)\n", (1744, 1789), False, 'from ignite.utils import setup_logger\n'), ((1876, 1897), 'pathlib.Path', 'Path', (['args.checkpoint'], {}), '(args.checkpoint)\n', (1880, 1897), False, 'from pathlib import Path\n'), ((2148, 2220), 'ignite.handlers.Checkpoint.load_objects', 'Checkpoint.load_objects', ([], {'to_load': "{'model': model}", 'checkpoint': 'checkpoint'}), "(to_load={'model': model}, checkpoint=checkpoint)\n", (2171, 2220), False, 'from ignite.handlers import Checkpoint, DiskSaver\n'), ((2561, 2601), 'torch.tensor', 'torch.tensor', (['dataY'], {'dtype': 'torch.float32'}), '(dataY, dtype=torch.float32)\n', (2573, 2601), False, 'import torch\n'), ((2618, 2706), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'pin_memory': '(True)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.batch_size, pin_memory=True,\n shuffle=False)\n', (2628, 2706), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((2746, 2786), 'tqdm.tqdm', 'tqdm', (['test_loader'], {'desc': '"""Runing Testing"""'}), "(test_loader, desc='Runing Testing')\n", (2750, 2786), False, 'from tqdm import tqdm\n'), ((1426, 1464), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (1447, 1464), False, 'import torch\n'), ((1478, 1515), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (1490, 1515), False, 'import torch\n'), ((1520, 1594), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (1556, 1594), False, 'import torch\n'), ((1610, 1635), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1633, 1635), False, 'import torch\n'), ((2289, 2432), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[args.local_rank]', 'output_device': 'args.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[args.\n local_rank], output_device=args.local_rank, find_unused_parameters=True)\n', (2330, 2432), False, 'import torch\n'), ((2501, 2535), 'pandas.read_csv', 'pd.read_csv', (['datapath'], {'header': 'None'}), '(datapath, header=None)\n', (2512, 2535), True, 'import pandas as pd\n'), ((2909, 2929), 'numpy.concatenate', 'np.concatenate', (['pred'], {}), '(pred)\n', (2923, 2929), True, 'import numpy as np\n'), ((760, 785), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (783, 785), False, 'import torch\n'), ((1289, 1314), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1312, 1314), False, 'import torch\n'), ((2940, 2970), 'os.path.split', 'os.path.split', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2953, 2970), False, 'import os\n'), ((3042, 3062), 'numpy.concatenate', 'np.concatenate', (['pred'], {}), '(pred)\n', (3056, 3062), True, 'import numpy as np\n'), ((3074, 3104), 'os.path.split', 'os.path.split', (['args.checkpoint'], {}), '(args.checkpoint)\n', (3087, 3104), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 23:37:44 2020
@author: nguye
*Author : nguyenrobot
Copyright : nguyenrobot
https://github.com/nguyenrobot
https://www.nguyenrobot.com
"""
""" documentation & tutorials
https://docs.opencv.org/master/dc/dbb/tutorial_py_calibration.html
https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
"""
"""
Load chessboards photos and find camera matrix and distorsion coefficient
Save camera calibration data into S7_cam_calibration.p for further usage
"""
import numpy
import cv2
import glob
import pickle
import os
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = numpy.zeros((9*6,3), numpy.float32)
objp[:,:2] = numpy.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('images_calibration/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret == True:
print('true')
print(fname)
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
cv2.imshow('img', img)
file_name = os.path.splitext(os.path.basename(fname))[0] + '_out.jpg'
cv2.imwrite('images_calibration/output_chessboard/' + file_name, img)
cv2.waitKey(500)
cv2.destroyAllWindows()
# Cam matrix and distortion coeff
ret, camera_matrix, distortion_coefficient, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
S7_cam_calibration = {"camera_matrix" : camera_matrix,
"distortion_coefficient" : distortion_coefficient}
# Save cam matrix and distortion coeff into S7_cam_calibration.p for further usages
pickle.dump(S7_cam_calibration, open( "S7_cam_calibration.p", "wb" )) | [
"cv2.findChessboardCorners",
"os.path.basename",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"numpy.zeros",
"cv2.imshow",
"cv2.cornerSubPix",
"cv2.imread",
"cv2.calibrateCamera",
"glob.glob",
"cv2.drawChessboardCorners",
"cv2.destroyAllWindows"
] | [((788, 826), 'numpy.zeros', 'numpy.zeros', (['(9 * 6, 3)', 'numpy.float32'], {}), '((9 * 6, 3), numpy.float32)\n', (799, 826), False, 'import numpy\n'), ((1044, 1081), 'glob.glob', 'glob.glob', (['"""images_calibration/*.jpg"""'], {}), "('images_calibration/*.jpg')\n", (1053, 1081), False, 'import glob\n'), ((1876, 1899), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1897, 1899), False, 'import cv2\n'), ((1998, 2069), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (2017, 2069), False, 'import cv2\n'), ((1114, 1131), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1124, 1131), False, 'import cv2\n'), ((1143, 1180), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1155, 1180), False, 'import cv2\n'), ((1236, 1281), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (1261, 1281), False, 'import cv2\n'), ((1470, 1531), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(gray, corners, (11, 11), (-1, -1), criteria)\n', (1486, 1531), False, 'import cv2\n'), ((1610, 1663), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(9, 6)', 'corners2', 'ret'], {}), '(img, (9, 6), corners2, ret)\n', (1635, 1663), False, 'import cv2\n'), ((1672, 1694), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1682, 1694), False, 'import cv2\n'), ((1781, 1850), 'cv2.imwrite', 'cv2.imwrite', (["('images_calibration/output_chessboard/' + file_name)", 'img'], {}), "('images_calibration/output_chessboard/' + file_name, img)\n", (1792, 1850), False, 'import cv2\n'), ((1859, 1875), 'cv2.waitKey', 'cv2.waitKey', (['(500)'], {}), '(500)\n', (1870, 1875), False, 'import cv2\n'), ((1732, 1755), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (1748, 1755), False, 'import os\n')] |
import cv2
import numpy as np
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
def apply_template(img_rgb, template):
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.64
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
return img_rgb
template = cv2.imread("templates/book_mongo_300.jpg", 0)
while (True):
# Capture the video frame
# by frame
ret, frame = vid.read()
img = apply_template(frame, template)
cv2.imshow('template', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows() | [
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.imread",
"numpy.where",
"cv2.rectangle",
"cv2.destroyAllWindows",
"cv2.matchTemplate"
] | [((38, 57), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (54, 57), False, 'import cv2\n'), ((533, 578), 'cv2.imread', 'cv2.imread', (['"""templates/book_mongo_300.jpg"""', '(0)'], {}), "('templates/book_mongo_300.jpg', 0)\n", (543, 578), False, 'import cv2\n'), ((881, 904), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (902, 904), False, 'import cv2\n'), ((194, 235), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2GRAY'], {}), '(img_rgb, cv2.COLOR_BGR2GRAY)\n', (206, 235), False, 'import cv2\n'), ((278, 337), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img_gray, template, cv2.TM_CCOEFF_NORMED)\n', (295, 337), False, 'import cv2\n'), ((369, 395), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (377, 395), True, 'import numpy as np\n'), ((715, 742), 'cv2.imshow', 'cv2.imshow', (['"""template"""', 'img'], {}), "('template', img)\n", (725, 742), False, 'import cv2\n'), ((435, 501), 'cv2.rectangle', 'cv2.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(2)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n', (448, 501), False, 'import cv2\n'), ((751, 765), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (762, 765), False, 'import cv2\n')] |
# Code for paper:
# [Title] - "Region-based Non-local Operation for Video Classification"
# [Author] - <NAME>, <NAME>
# [Github] - https://github.com/guoxih/region-based-non-local-network
from __future__ import print_function
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
def get_mask(in_channels, channels, ks):
in_channels = int(in_channels)
channels = int(channels)
if len(ks) == 1:
mask = np.zeros((int(in_channels), int(channels), int(ks[0])))
elif len(ks) == 2:
mask = np.zeros((int(in_channels), int(channels), int(ks[0]), int(ks[1])))
elif len(ks) == 3:
mask = np.zeros((int(in_channels), int(channels), int(ks[0]), int(ks[1]), int(ks[2])))
else:
raise Error('not implement yet')
for _ in range(in_channels):
mask[_, _ % channels, :, :] = 1.
return mask
class DiagonalwiseRefactorization(nn.Module):
def __init__(self, in_channels, ks, stride=1, groups=1):
super(DiagonalwiseRefactorization, self).__init__()
channels = in_channels // groups
self.in_channels = in_channels
self.groups = groups
self.stride = stride
p = (np.array(ks)-1)//2
self.p = p.tolist()
self.mask = nn.Parameter(torch.Tensor(get_mask(in_channels, channels, ks=ks)), requires_grad=False)
self.weight = nn.Parameter(torch.Tensor(in_channels, channels, *ks), requires_grad=True)
torch.nn.init.xavier_uniform_(self.weight.data)
self.weight.data.mul_(self.mask.data)
if len(ks) == 1:
self.conv = nn.functional.conv1d
elif len(ks) == 2:
self.conv = nn.functional.conv2d
elif len(ks) == 3:
self.conv = nn.functional.conv3d
else:
raise Error('The kernal size in DiagonalwiseRefactorization is wrong!')
def forward(self, x):
weight = torch.mul(self.weight, self.mask)
x = self.conv(x, weight, bias=None, stride=self.stride, padding=self.p, groups=self.groups)
return x
def DepthwiseConv3d(in_channels, ks=[3,7,7], stride=1):
# Diagonalwise Refactorization
# groups = 16
assert isinstance(ks,list), 'param ks is expected be list type'
groups = max(in_channels // 32, 1)
return DiagonalwiseRefactorization(in_channels, ks, stride, groups)
def DepthwiseConv2d(in_channels, ks=[3,3], stride=1):
# Diagonalwise Refactorization
# groups = 16
assert isinstance(ks,list), 'param ks is expected be list type'
groups = max(in_channels // 32, 1)
return DiagonalwiseRefactorization(in_channels, ks, stride, groups)
def DepthwiseConv1d(in_channels, ks=[3], stride=1):
# Diagonalwise Refactorization
# groups = 16
assert isinstance(ks,list), 'param ks is expected be list type'
groups = max(in_channels // 32, 1)
return DiagonalwiseRefactorization(in_channels, ks, stride, groups)
| [
"torch.nn.init.xavier_uniform_",
"torch.Tensor",
"numpy.array",
"torch.mul"
] | [((1486, 1533), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.weight.data'], {}), '(self.weight.data)\n', (1515, 1533), False, 'import torch\n'), ((1936, 1969), 'torch.mul', 'torch.mul', (['self.weight', 'self.mask'], {}), '(self.weight, self.mask)\n', (1945, 1969), False, 'import torch\n'), ((1416, 1456), 'torch.Tensor', 'torch.Tensor', (['in_channels', 'channels', '*ks'], {}), '(in_channels, channels, *ks)\n', (1428, 1456), False, 'import torch\n'), ((1217, 1229), 'numpy.array', 'np.array', (['ks'], {}), '(ks)\n', (1225, 1229), True, 'import numpy as np\n')] |
import os
import unittest
import numpy as np
from os.path import join as pjoin
from io import BytesIO
from ..array_sequence import ArraySequence
from ..tractogram import Tractogram
from ..tractogram_file import HeaderWarning, HeaderError
from ..tractogram_file import DataError
from .. import tck as tck_module
from ..tck import TckFile
import pytest
from numpy.testing import assert_array_equal
from ...testing import data_path
from .test_tractogram import assert_tractogram_equal
DATA = {}
def setup_module():
global DATA
DATA['empty_tck_fname'] = pjoin(data_path, "empty.tck")
# simple.tck contains only streamlines
DATA['simple_tck_fname'] = pjoin(data_path, "simple.tck")
DATA['simple_tck_big_endian_fname'] = pjoin(data_path,
"simple_big_endian.tck")
# standard.tck contains only streamlines
DATA['standard_tck_fname'] = pjoin(data_path, "standard.tck")
DATA['matlab_nan_tck_fname'] = pjoin(data_path, "matlab_nan.tck")
DATA['streamlines'] = [np.arange(1 * 3, dtype="f4").reshape((1, 3)),
np.arange(2 * 3, dtype="f4").reshape((2, 3)),
np.arange(5 * 3, dtype="f4").reshape((5, 3))]
DATA['empty_tractogram'] = Tractogram(affine_to_rasmm=np.eye(4))
DATA['simple_tractogram'] = Tractogram(DATA['streamlines'],
affine_to_rasmm=np.eye(4))
class TestTCK(unittest.TestCase):
def test_load_empty_file(self):
for lazy_load in [False, True]:
tck = TckFile.load(DATA['empty_tck_fname'], lazy_load=lazy_load)
with pytest.warns(Warning if lazy_load else None):
assert_tractogram_equal(tck.tractogram, DATA['empty_tractogram'])
def test_load_simple_file(self):
for lazy_load in [False, True]:
tck = TckFile.load(DATA['simple_tck_fname'], lazy_load=lazy_load)
with pytest.warns(Warning if lazy_load else None):
assert_tractogram_equal(tck.tractogram, DATA['simple_tractogram'])
# Force TCK loading to use buffering.
buffer_size = 1. / 1024**2 # 1 bytes
hdr = TckFile._read_header(DATA['simple_tck_fname'])
tck_reader = TckFile._read(DATA['simple_tck_fname'], hdr, buffer_size)
streamlines = ArraySequence(tck_reader)
tractogram = Tractogram(streamlines)
tractogram.affine_to_rasmm = np.eye(4)
tck = TckFile(tractogram, header=hdr)
assert_tractogram_equal(tck.tractogram, DATA['simple_tractogram'])
def test_load_matlab_nan_file(self):
for lazy_load in [False, True]:
tck = TckFile.load(DATA['matlab_nan_tck_fname'], lazy_load=lazy_load)
streamlines = list(tck.tractogram.streamlines)
assert len(streamlines) == 1
assert streamlines[0].shape == (108, 3)
def test_writeable_data(self):
data = DATA['simple_tractogram']
for key in ('simple_tck_fname', 'simple_tck_big_endian_fname'):
for lazy_load in [False, True]:
tck = TckFile.load(DATA[key], lazy_load=lazy_load)
for actual, expected_tgi in zip(tck.streamlines, data):
assert_array_equal(actual, expected_tgi.streamline)
# Test we can write to arrays
assert actual.flags.writeable
actual[0, 0] = 99
def test_load_simple_file_in_big_endian(self):
for lazy_load in [False, True]:
tck = TckFile.load(DATA['simple_tck_big_endian_fname'],
lazy_load=lazy_load)
with pytest.warns(Warning if lazy_load else None):
assert_tractogram_equal(tck.tractogram, DATA['simple_tractogram'])
assert tck.header['datatype'] == 'Float32BE'
def test_load_file_with_wrong_information(self):
tck_file = open(DATA['simple_tck_fname'], 'rb').read()
# Simulate a TCK file where `datatype` has not the right endianness.
new_tck_file = tck_file.replace(b"Float32LE", b"Float32BE")
with pytest.raises(DataError):
TckFile.load(BytesIO(new_tck_file))
# Simulate a TCK file with unsupported `datatype`.
new_tck_file = tck_file.replace(b"Float32LE", b"int32")
with pytest.raises(HeaderError):
TckFile.load(BytesIO(new_tck_file))
# Simulate a TCK file with no `datatype` field.
new_tck_file = tck_file.replace(b"datatype: Float32LE\n", b"")
# Need to adjust data offset.
new_tck_file = new_tck_file.replace(b"file: . 67\n", b"file: . 47\n")
with pytest.warns(HeaderWarning, match="Missing 'datatype'"):
tck = TckFile.load(BytesIO(new_tck_file))
assert_array_equal(tck.header['datatype'], "Float32LE")
# Simulate a TCK file with no `file` field.
new_tck_file = tck_file.replace(b"\nfile: . 67", b"")
with pytest.warns(HeaderWarning, match="Missing 'file'") as w:
tck = TckFile.load(BytesIO(new_tck_file))
assert_array_equal(tck.header['file'], ". 56")
# Simulate a TCK file with `file` field pointing to another file.
new_tck_file = tck_file.replace(b"file: . 67\n",
b"file: dummy.mat 75\n")
with pytest.raises(HeaderError):
TckFile.load(BytesIO(new_tck_file))
# Simulate a TCK file which is missing a streamline delimiter.
eos = TckFile.FIBER_DELIMITER.tobytes()
eof = TckFile.EOF_DELIMITER.tobytes()
new_tck_file = tck_file[:-(len(eos) + len(eof))] + tck_file[-len(eof):]
# Force TCK loading to use buffering.
buffer_size = 1. / 1024**2 # 1 bytes
hdr = TckFile._read_header(BytesIO(new_tck_file))
tck_reader = TckFile._read(BytesIO(new_tck_file), hdr, buffer_size)
with pytest.raises(DataError):
list(tck_reader)
# Simulate a TCK file which is missing the end-of-file delimiter.
new_tck_file = tck_file[:-len(eof)]
with pytest.raises(DataError):
TckFile.load(BytesIO(new_tck_file))
def test_write_empty_file(self):
tractogram = Tractogram(affine_to_rasmm=np.eye(4))
tck_file = BytesIO()
tck = TckFile(tractogram)
tck.save(tck_file)
tck_file.seek(0, os.SEEK_SET)
new_tck = TckFile.load(tck_file)
assert_tractogram_equal(new_tck.tractogram, tractogram)
new_tck_orig = TckFile.load(DATA['empty_tck_fname'])
assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram)
tck_file.seek(0, os.SEEK_SET)
assert tck_file.read() == open(DATA['empty_tck_fname'], 'rb').read()
def test_write_simple_file(self):
tractogram = Tractogram(DATA['streamlines'],
affine_to_rasmm=np.eye(4))
tck_file = BytesIO()
tck = TckFile(tractogram)
tck.save(tck_file)
tck_file.seek(0, os.SEEK_SET)
new_tck = TckFile.load(tck_file)
assert_tractogram_equal(new_tck.tractogram, tractogram)
new_tck_orig = TckFile.load(DATA['simple_tck_fname'])
assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram)
tck_file.seek(0, os.SEEK_SET)
assert tck_file.read() == open(DATA['simple_tck_fname'], 'rb').read()
# TCK file containing not well formatted entries in its header.
tck_file = BytesIO()
tck = TckFile(tractogram)
tck.header['new_entry'] = 'value\n' # \n not allowed
with pytest.raises(HeaderError):
tck.save(tck_file)
tck.header['new_entry'] = 'val:ue' # : not allowed
with pytest.raises(HeaderError):
tck.save(tck_file)
def test_load_write_file(self):
for fname in [DATA['empty_tck_fname'],
DATA['simple_tck_fname']]:
for lazy_load in [False, True]:
tck = TckFile.load(fname, lazy_load=lazy_load)
tck_file = BytesIO()
tck.save(tck_file)
loaded_tck = TckFile.load(fname, lazy_load=False)
assert_tractogram_equal(loaded_tck.tractogram, tck.tractogram)
# Check that the written file is the same as the one read.
tck_file.seek(0, os.SEEK_SET)
assert tck_file.read() == open(fname, 'rb').read()
# Save tractogram that has an affine_to_rasmm.
for lazy_load in [False, True]:
tck = TckFile.load(DATA['simple_tck_fname'], lazy_load=lazy_load)
affine = np.eye(4)
affine[0, 0] *= -1 # Flip in X
tractogram = Tractogram(tck.streamlines, affine_to_rasmm=affine)
new_tck = TckFile(tractogram, tck.header)
tck_file = BytesIO()
new_tck.save(tck_file)
tck_file.seek(0, os.SEEK_SET)
loaded_tck = TckFile.load(tck_file, lazy_load=False)
assert_tractogram_equal(loaded_tck.tractogram,
tractogram.to_world(lazy=True))
def test_str(self):
tck = TckFile.load(DATA['simple_tck_fname'])
str(tck) # Simply test it's not failing when called.
| [
"io.BytesIO",
"pytest.warns",
"numpy.testing.assert_array_equal",
"pytest.raises",
"numpy.arange",
"numpy.eye",
"os.path.join"
] | [((566, 595), 'os.path.join', 'pjoin', (['data_path', '"""empty.tck"""'], {}), "(data_path, 'empty.tck')\n", (571, 595), True, 'from os.path import join as pjoin\n'), ((670, 700), 'os.path.join', 'pjoin', (['data_path', '"""simple.tck"""'], {}), "(data_path, 'simple.tck')\n", (675, 700), True, 'from os.path import join as pjoin\n'), ((743, 784), 'os.path.join', 'pjoin', (['data_path', '"""simple_big_endian.tck"""'], {}), "(data_path, 'simple_big_endian.tck')\n", (748, 784), True, 'from os.path import join as pjoin\n'), ((911, 943), 'os.path.join', 'pjoin', (['data_path', '"""standard.tck"""'], {}), "(data_path, 'standard.tck')\n", (916, 943), True, 'from os.path import join as pjoin\n'), ((979, 1013), 'os.path.join', 'pjoin', (['data_path', '"""matlab_nan.tck"""'], {}), "(data_path, 'matlab_nan.tck')\n", (984, 1013), True, 'from os.path import join as pjoin\n'), ((2438, 2447), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2444, 2447), True, 'import numpy as np\n'), ((4790, 4845), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["tck.header['datatype']", '"""Float32LE"""'], {}), "(tck.header['datatype'], 'Float32LE')\n", (4808, 4845), False, 'from numpy.testing import assert_array_equal\n'), ((5094, 5140), 'numpy.testing.assert_array_equal', 'assert_array_equal', (["tck.header['file']", '""". 56"""'], {}), "(tck.header['file'], '. 56')\n", (5112, 5140), False, 'from numpy.testing import assert_array_equal\n'), ((6291, 6300), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6298, 6300), False, 'from io import BytesIO\n'), ((6932, 6941), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (6939, 6941), False, 'from io import BytesIO\n'), ((7496, 7505), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (7503, 7505), False, 'from io import BytesIO\n'), ((1293, 1302), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1299, 1302), True, 'import numpy as np\n'), ((1427, 1436), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1433, 1436), True, 'import numpy as np\n'), ((4127, 4151), 'pytest.raises', 'pytest.raises', (['DataError'], {}), '(DataError)\n', (4140, 4151), False, 'import pytest\n'), ((4338, 4364), 'pytest.raises', 'pytest.raises', (['HeaderError'], {}), '(HeaderError)\n', (4351, 4364), False, 'import pytest\n'), ((4671, 4726), 'pytest.warns', 'pytest.warns', (['HeaderWarning'], {'match': '"""Missing \'datatype\'"""'}), '(HeaderWarning, match="Missing \'datatype\'")\n', (4683, 4726), False, 'import pytest\n'), ((4974, 5025), 'pytest.warns', 'pytest.warns', (['HeaderWarning'], {'match': '"""Missing \'file\'"""'}), '(HeaderWarning, match="Missing \'file\'")\n', (4986, 5025), False, 'import pytest\n'), ((5351, 5377), 'pytest.raises', 'pytest.raises', (['HeaderError'], {}), '(HeaderError)\n', (5364, 5377), False, 'import pytest\n'), ((5801, 5822), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (5808, 5822), False, 'from io import BytesIO\n'), ((5859, 5880), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (5866, 5880), False, 'from io import BytesIO\n'), ((5913, 5937), 'pytest.raises', 'pytest.raises', (['DataError'], {}), '(DataError)\n', (5926, 5937), False, 'import pytest\n'), ((6100, 6124), 'pytest.raises', 'pytest.raises', (['DataError'], {}), '(DataError)\n', (6113, 6124), False, 'import pytest\n'), ((7615, 7641), 'pytest.raises', 'pytest.raises', (['HeaderError'], {}), '(HeaderError)\n', (7628, 7641), False, 'import pytest\n'), ((7748, 7774), 'pytest.raises', 'pytest.raises', (['HeaderError'], {}), '(HeaderError)\n', (7761, 7774), False, 'import pytest\n'), ((8649, 8658), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8655, 8658), True, 'import numpy as np\n'), ((8858, 8867), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8865, 8867), False, 'from io import BytesIO\n'), ((1042, 1070), 'numpy.arange', 'np.arange', (['(1 * 3)'], {'dtype': '"""f4"""'}), "(1 * 3, dtype='f4')\n", (1051, 1070), True, 'import numpy as np\n'), ((1115, 1143), 'numpy.arange', 'np.arange', (['(2 * 3)'], {'dtype': '"""f4"""'}), "(2 * 3, dtype='f4')\n", (1124, 1143), True, 'import numpy as np\n'), ((1188, 1216), 'numpy.arange', 'np.arange', (['(5 * 3)'], {'dtype': '"""f4"""'}), "(5 * 3, dtype='f4')\n", (1197, 1216), True, 'import numpy as np\n'), ((1645, 1689), 'pytest.warns', 'pytest.warns', (['(Warning if lazy_load else None)'], {}), '(Warning if lazy_load else None)\n', (1657, 1689), False, 'import pytest\n'), ((1946, 1990), 'pytest.warns', 'pytest.warns', (['(Warning if lazy_load else None)'], {}), '(Warning if lazy_load else None)\n', (1958, 1990), False, 'import pytest\n'), ((3656, 3700), 'pytest.warns', 'pytest.warns', (['(Warning if lazy_load else None)'], {}), '(Warning if lazy_load else None)\n', (3668, 3700), False, 'import pytest\n'), ((4178, 4199), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (4185, 4199), False, 'from io import BytesIO\n'), ((4391, 4412), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (4398, 4412), False, 'from io import BytesIO\n'), ((4759, 4780), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (4766, 4780), False, 'from io import BytesIO\n'), ((5063, 5084), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (5070, 5084), False, 'from io import BytesIO\n'), ((5404, 5425), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (5411, 5425), False, 'from io import BytesIO\n'), ((6151, 6172), 'io.BytesIO', 'BytesIO', (['new_tck_file'], {}), '(new_tck_file)\n', (6158, 6172), False, 'from io import BytesIO\n'), ((6260, 6269), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6266, 6269), True, 'import numpy as np\n'), ((6901, 6910), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6907, 6910), True, 'import numpy as np\n'), ((8074, 8083), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8081, 8083), False, 'from io import BytesIO\n'), ((3237, 3288), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual', 'expected_tgi.streamline'], {}), '(actual, expected_tgi.streamline)\n', (3255, 3288), False, 'from numpy.testing import assert_array_equal\n')] |
"""
demo04_lr.py 逻辑分类
"""
import numpy as np
import matplotlib.pyplot as mp
import sklearn.linear_model as lm
x = np.array([
[3, 1],
[2, 5],
[1, 8],
[6, 4],
[5, 2],
[3, 5],
[4, 7],
[4, -1]])
y = np.array([0, 1, 1, 0, 0, 1, 1, 0])
# 把整个空间分为500*500的网格化矩阵
l, r = x[:,0].min()-1, x[:,0].max()+1
b, t = x[:,1].min()-1, x[:,1].max()+1
grid_x, grid_y = np.meshgrid(
np.linspace(l, r, 500),
np.linspace(b, t, 500))
# 把grid_x与grid_y抻平并在一起成两列,作为测试集x
mesh_x = np.column_stack((grid_x.ravel(),
grid_y.ravel()))
# 创建模型,针对test_x预测相应输出
model = lm.LogisticRegression(
solver='liblinear', C=1)
model.fit(x, y)
mesh_y = model.predict(mesh_x)
# 把预测结果变维:500*500,用于绘制分类边界线
grid_z = mesh_y.reshape(grid_x.shape)
# 绘制散点图
mp.figure('Simple Classification', facecolor='lightgray')
mp.title('Simple Classification', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
# 为网格化矩阵中的每个元素填充背景颜色
mp.pcolormesh(grid_x, grid_y, grid_z, cmap='gray')
mp.scatter(x[:, 0], x[:, 1], c=y, cmap='brg', s=80)
mp.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.pcolormesh",
"numpy.linspace",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplo... | [((116, 191), 'numpy.array', 'np.array', (['[[3, 1], [2, 5], [1, 8], [6, 4], [5, 2], [3, 5], [4, 7], [4, -1]]'], {}), '([[3, 1], [2, 5], [1, 8], [6, 4], [5, 2], [3, 5], [4, 7], [4, -1]])\n', (124, 191), True, 'import numpy as np\n'), ((229, 263), 'numpy.array', 'np.array', (['[0, 1, 1, 0, 0, 1, 1, 0]'], {}), '([0, 1, 1, 0, 0, 1, 1, 0])\n', (237, 263), True, 'import numpy as np\n'), ((568, 614), 'sklearn.linear_model.LogisticRegression', 'lm.LogisticRegression', ([], {'solver': '"""liblinear"""', 'C': '(1)'}), "(solver='liblinear', C=1)\n", (589, 614), True, 'import sklearn.linear_model as lm\n'), ((739, 796), 'matplotlib.pyplot.figure', 'mp.figure', (['"""Simple Classification"""'], {'facecolor': '"""lightgray"""'}), "('Simple Classification', facecolor='lightgray')\n", (748, 796), True, 'import matplotlib.pyplot as mp\n'), ((797, 843), 'matplotlib.pyplot.title', 'mp.title', (['"""Simple Classification"""'], {'fontsize': '(20)'}), "('Simple Classification', fontsize=20)\n", (805, 843), True, 'import matplotlib.pyplot as mp\n'), ((844, 871), 'matplotlib.pyplot.xlabel', 'mp.xlabel', (['"""x"""'], {'fontsize': '(14)'}), "('x', fontsize=14)\n", (853, 871), True, 'import matplotlib.pyplot as mp\n'), ((872, 899), 'matplotlib.pyplot.ylabel', 'mp.ylabel', (['"""y"""'], {'fontsize': '(14)'}), "('y', fontsize=14)\n", (881, 899), True, 'import matplotlib.pyplot as mp\n'), ((900, 928), 'matplotlib.pyplot.tick_params', 'mp.tick_params', ([], {'labelsize': '(10)'}), '(labelsize=10)\n', (914, 928), True, 'import matplotlib.pyplot as mp\n'), ((950, 1000), 'matplotlib.pyplot.pcolormesh', 'mp.pcolormesh', (['grid_x', 'grid_y', 'grid_z'], {'cmap': '"""gray"""'}), "(grid_x, grid_y, grid_z, cmap='gray')\n", (963, 1000), True, 'import matplotlib.pyplot as mp\n'), ((1001, 1052), 'matplotlib.pyplot.scatter', 'mp.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'y', 'cmap': '"""brg"""', 's': '(80)'}), "(x[:, 0], x[:, 1], c=y, cmap='brg', s=80)\n", (1011, 1052), True, 'import matplotlib.pyplot as mp\n'), ((1053, 1062), 'matplotlib.pyplot.show', 'mp.show', ([], {}), '()\n', (1060, 1062), True, 'import matplotlib.pyplot as mp\n'), ((395, 417), 'numpy.linspace', 'np.linspace', (['l', 'r', '(500)'], {}), '(l, r, 500)\n', (406, 417), True, 'import numpy as np\n'), ((420, 442), 'numpy.linspace', 'np.linspace', (['b', 't', '(500)'], {}), '(b, t, 500)\n', (431, 442), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itk
import numpy as np
from utils.utils import image2array, array2image, cast_image
__author__ = ['<NAME>']
__email__ = ['<EMAIL>']
def median_filter(image, radius = 1, out_type = None) :
'''
Apply a median filter on image
Parameters
----------
image: itk.Image
Image to apply the median filter to
radius : int
kernel radius
out_tipe: itk pixel type (i.e. itk.F)
if non None, cast the filtered image to the specified type
Return
------
filtered : itk.Image
filtered image
'''
PixelType, Dim = itk.template(image)[1]
ImageType = itk.Image[PixelType, Dim]
median = itk.MedianImageFilter[ImageType, ImageType].New()
_ = median.SetInput(image)
_ = median.SetRadius(int(radius))
_ = median.Update()
filtered = median.GetOutput()
if out_type is not None :
filtered = cast_image(filtered, out_type)
return filtered
def connected_components(image, out_type = itk.SS) :
'''
Find the connected components of a binary image
Parameters
----------
image : itk.Image
binary image to extract connected components to
out_type: itk pixel type (i.e. itk.SS)
if not None, cast the results image to the specified type
'''
ImageType = itk.Image[itk.UC, 3]
OutputType = itk.Image[out_type, 3]
cc = itk.ConnectedComponentImageFilter[ImageType, OutputType].New()
_ = cc.SetInput(image)
_ = cc.Update()
return cc.GetOutput()
def binary_threshold(image, upper_thr, lower_thr, out_type = None) :
'''
Assign 0 to alla the voxels outside the interval ]lower_thr, upper_thr[ and
1 to all the voxels inside.
Parameters
----------
image : itk.Image
image to apply the threshold to
upper_thr : float
upper threshold value
lower_thr : float
lower threshold value
out_type : itk pixel type (i.e. itk.UC)
if not None, cast the results image to the specified type
'''
if upper_thr < lower_thr :
raise ValueError("upper_thr cannot be lower than lower\
threshold: {} < {}".format(upper_thr, lower_thr))
array, info = image2array(image)
cond = (array > lower_thr) & (array < upper_thr)
array[cond] = 1
array[~cond] = 0
thr = array2image(array, info)
if out_type is not None :
thr = cast_image(thr, out_type)
return thr
def relabel_compinents(image, out_type = None) :
label_field, info = image2array(image)
offset = 1
max_label = int(label_field.max()) # Ensure max_label is an integer
labels, labels_counts= np.unique(label_field,return_counts=True)
labels=labels[np.argsort(labels_counts)[::-1]]
labels0 = labels[labels != 0]
new_max_label = offset - 1 + len(labels0)
new_labels0 = np.arange(offset, new_max_label + 1)
output_type = label_field.dtype
required_type = np.min_scalar_type(new_max_label)
if np.dtype(required_type).itemsize > np.dtype(label_field.dtype).itemsize:
output_type = required_type
forward_map = np.zeros(max_label + 1, dtype=output_type)
forward_map[labels0] = new_labels0
inverse_map = np.zeros(new_max_label + 1, dtype=output_type)
inverse_map[offset:] = labels0
relabeled = forward_map[label_field]
result = array2image(relabeled, info)
if out_type is not None:
result = cast_image(result, out_type)
return result
def opening(image, radius = 1, bkg = 0, frg = 1, out_type = None) :
'''
Apply a Morphological opening on the targhet image
Parameters
----------
image : itk.Image
target image
radius : int
kernel radius
bkg : pixel Type
value to be considered as bkg. default 0
frg : pixel type
value to be considered as foreground
out_type : itk pixel type (i.e. itk.UC)
if not None, cast the results image to the specified type
Return
------
opened : itk.Image
opened image
'''
# retrive image input type
PixelType, Dim = itk.template(image)[1]
ImageType = itk.Image[PixelType, Dim]
# define the ball structuring element for the opening
StructuringElementType = itk.FlatStructuringElement[Dim]
struct_element = StructuringElementType.Ball(radius)
# define the opening filter
opening = itk.BinaryMorphologicalOpeningImageFilter[ImageType,
ImageType,
StructuringElementType]
opening = opening.New()
_ = opening.SetInput(image)
_ = opening.SetKernel(struct_element)
_ = opening.SetForegroundValue(frg)
_ = opening.SetBackgroundValue(bkg)
_ = opening.Update()
opened = opening.GetOutput()
if out_type is not None :
opened = cast_image(opened, out_type)
return opened
| [
"itk.template",
"numpy.dtype",
"numpy.zeros",
"utils.utils.cast_image",
"utils.utils.image2array",
"utils.utils.array2image",
"numpy.min_scalar_type",
"numpy.argsort",
"numpy.arange",
"numpy.unique"
] | [((2255, 2273), 'utils.utils.image2array', 'image2array', (['image'], {}), '(image)\n', (2266, 2273), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((2379, 2403), 'utils.utils.array2image', 'array2image', (['array', 'info'], {}), '(array, info)\n', (2390, 2403), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((2567, 2585), 'utils.utils.image2array', 'image2array', (['image'], {}), '(image)\n', (2578, 2585), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((2700, 2742), 'numpy.unique', 'np.unique', (['label_field'], {'return_counts': '(True)'}), '(label_field, return_counts=True)\n', (2709, 2742), True, 'import numpy as np\n'), ((2891, 2927), 'numpy.arange', 'np.arange', (['offset', '(new_max_label + 1)'], {}), '(offset, new_max_label + 1)\n', (2900, 2927), True, 'import numpy as np\n'), ((2984, 3017), 'numpy.min_scalar_type', 'np.min_scalar_type', (['new_max_label'], {}), '(new_max_label)\n', (3002, 3017), True, 'import numpy as np\n'), ((3152, 3194), 'numpy.zeros', 'np.zeros', (['(max_label + 1)'], {'dtype': 'output_type'}), '(max_label + 1, dtype=output_type)\n', (3160, 3194), True, 'import numpy as np\n'), ((3252, 3298), 'numpy.zeros', 'np.zeros', (['(new_max_label + 1)'], {'dtype': 'output_type'}), '(new_max_label + 1, dtype=output_type)\n', (3260, 3298), True, 'import numpy as np\n'), ((3388, 3416), 'utils.utils.array2image', 'array2image', (['relabeled', 'info'], {}), '(relabeled, info)\n', (3399, 3416), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((634, 653), 'itk.template', 'itk.template', (['image'], {}), '(image)\n', (646, 653), False, 'import itk\n'), ((941, 971), 'utils.utils.cast_image', 'cast_image', (['filtered', 'out_type'], {}), '(filtered, out_type)\n', (951, 971), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((2448, 2473), 'utils.utils.cast_image', 'cast_image', (['thr', 'out_type'], {}), '(thr, out_type)\n', (2458, 2473), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((3464, 3492), 'utils.utils.cast_image', 'cast_image', (['result', 'out_type'], {}), '(result, out_type)\n', (3474, 3492), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((4137, 4156), 'itk.template', 'itk.template', (['image'], {}), '(image)\n', (4149, 4156), False, 'import itk\n'), ((4915, 4943), 'utils.utils.cast_image', 'cast_image', (['opened', 'out_type'], {}), '(opened, out_type)\n', (4925, 4943), False, 'from utils.utils import image2array, array2image, cast_image\n'), ((2760, 2785), 'numpy.argsort', 'np.argsort', (['labels_counts'], {}), '(labels_counts)\n', (2770, 2785), True, 'import numpy as np\n'), ((3025, 3048), 'numpy.dtype', 'np.dtype', (['required_type'], {}), '(required_type)\n', (3033, 3048), True, 'import numpy as np\n'), ((3060, 3087), 'numpy.dtype', 'np.dtype', (['label_field.dtype'], {}), '(label_field.dtype)\n', (3068, 3087), True, 'import numpy as np\n')] |
import numpy as np
import unittest
import clpybind
class TestClp(unittest.TestCase):
def test_solve(self):
x = clpybind.Matrix(np.array([
[3, 2, 5],
[2, 1, 1],
[1, 1, 3],
[5, 2, 4]
], dtype=np.double))
col_lb = np.zeros(3, dtype=np.double)
col_ub = np.repeat(np.infty, 3)
obj = np.array([20, 10, 15], dtype=np.double)
row_lb = np.repeat(-np.infty, 4)
row_ub = np.array([55, 26, 30, 57], dtype=np.double)
solver = clpybind.Simplex(x, col_lb, col_ub, obj, row_lb, row_ub)
solver.log_level = clpybind.Simplex.LogLevel.Off
solver.optimization_direction = clpybind.Simplex.OptimizationDirection.Maximize
solver.initial_solve()
np.testing.assert_equal(solver.status,
clpybind.Simplex.ProblemStatus.Optimal)
np.testing.assert_almost_equal(solver.objective_value, 268.0)
np.testing.assert_almost_equal(solver.solution, [1.8, 20.8, 1.6])
np.testing.assert_almost_equal(solver.reduced_costs, np.zeros(3))
np.testing.assert_almost_equal(solver.shadow_prices, [1, 6, 0, 1])
| [
"clpybind.Simplex",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.array",
"numpy.testing.assert_equal",
"numpy.repeat"
] | [((290, 318), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.double'}), '(3, dtype=np.double)\n', (298, 318), True, 'import numpy as np\n'), ((336, 358), 'numpy.repeat', 'np.repeat', (['np.infty', '(3)'], {}), '(np.infty, 3)\n', (345, 358), True, 'import numpy as np\n'), ((373, 412), 'numpy.array', 'np.array', (['[20, 10, 15]'], {'dtype': 'np.double'}), '([20, 10, 15], dtype=np.double)\n', (381, 412), True, 'import numpy as np\n'), ((430, 453), 'numpy.repeat', 'np.repeat', (['(-np.infty)', '(4)'], {}), '(-np.infty, 4)\n', (439, 453), True, 'import numpy as np\n'), ((471, 514), 'numpy.array', 'np.array', (['[55, 26, 30, 57]'], {'dtype': 'np.double'}), '([55, 26, 30, 57], dtype=np.double)\n', (479, 514), True, 'import numpy as np\n'), ((532, 588), 'clpybind.Simplex', 'clpybind.Simplex', (['x', 'col_lb', 'col_ub', 'obj', 'row_lb', 'row_ub'], {}), '(x, col_lb, col_ub, obj, row_lb, row_ub)\n', (548, 588), False, 'import clpybind\n'), ((773, 851), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['solver.status', 'clpybind.Simplex.ProblemStatus.Optimal'], {}), '(solver.status, clpybind.Simplex.ProblemStatus.Optimal)\n', (796, 851), True, 'import numpy as np\n'), ((892, 953), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['solver.objective_value', '(268.0)'], {}), '(solver.objective_value, 268.0)\n', (922, 953), True, 'import numpy as np\n'), ((962, 1027), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['solver.solution', '[1.8, 20.8, 1.6]'], {}), '(solver.solution, [1.8, 20.8, 1.6])\n', (992, 1027), True, 'import numpy as np\n'), ((1110, 1176), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['solver.shadow_prices', '[1, 6, 0, 1]'], {}), '(solver.shadow_prices, [1, 6, 0, 1])\n', (1140, 1176), True, 'import numpy as np\n'), ((142, 213), 'numpy.array', 'np.array', (['[[3, 2, 5], [2, 1, 1], [1, 1, 3], [5, 2, 4]]'], {'dtype': 'np.double'}), '([[3, 2, 5], [2, 1, 1], [1, 1, 3], [5, 2, 4]], dtype=np.double)\n', (150, 213), True, 'import numpy as np\n'), ((1089, 1100), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1097, 1100), True, 'import numpy as np\n')] |
# @Time : 2018-9-10
# @Author : zxh
import numpy as np
class LoopQueue:
def __init__(self, size, mean_over):
self.data = np.zeros([size], dtype=np.float32)
self.mean_over = mean_over
self.index = 0
self.size = size
def set(self, n):
self.data[self.index] = n
self.index += 1
self.index %= self.size
return self.data.mean() >= self.mean_over
def get(self):
return self.data.mean() >= self.mean_over
def clear(self):
self.data.fill(0)
| [
"numpy.zeros"
] | [((135, 169), 'numpy.zeros', 'np.zeros', (['[size]'], {'dtype': 'np.float32'}), '([size], dtype=np.float32)\n', (143, 169), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
import sys
import random
import torch
import torchaudio
import torch.nn.functional as F
import numpy as np
import time
import struct
from torchaudio.compliance.kaldi import fbank
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import MultiStepLR
from torch.nn.init import xavier_uniform_
class MyDataset(Dataset):
# input: librispeech:train-clean-100 vad.
# output: fbank(1,160,64)
def __init__(self, mode = 'train', use_redis = True):
self.mode = mode
self.use_redis = use_redis
if self.mode == "train":
self.datafile = 'data/train.csv'
else:
self.datafile = 'data/test.csv'
self.file_ids = open(self.datafile,'r').readlines()
self.sample_rate = 16000
self.width = 160
self.height = 64
def _toRedis(self,r,arr,key):
"""Store given Numpy array 'arr' in Redis under key """
h, w = arr.shape
shape = struct.pack('>II',h,w)
encoded = shape + arr.tobytes()
# Store encoded data in Redis
r.set(key,encoded)
return
def _fromRedis(self,r,key):
"""Retrieve Numpy array from Redis key 'arr'"""
encoded = r.get(key)
h, w = struct.unpack('>II',encoded[:8])
arr = np.frombuffer(encoded, dtype=np.float32, offset=8).reshape(h,w)
return arr
def __getitem__(self, idx):
f,label = self.file_ids[idx].strip().split(" ")
if self.use_redis : #use redis
import redis
r = redis.Redis(host='localhost', port=6379, db=1)
if r.exists(f): #try to read from redis
feature = self._fromRedis(r,f) #read from redis
feature = torch.from_numpy(feature)
else: #get from cpu and save to redis.
wav,sr = torchaudio.load(f,normalize=False)
assert sr == self.sample_rate
wav = wav / 1.0
feature = fbank(wav, dither=1,high_freq=-200, low_freq=64, htk_compat=True, num_mel_bins=self.height, sample_frequency=self.sample_rate, use_energy=False, window_type='hamming')
self._toRedis(r,feature.numpy(),f) #set redis
else: #not ues redis
wav,sr = torchaudio.load(f,normalize=False)
assert sr == self.sample_rate
wav = wav / 1.0
feature = fbank(wav, dither=1,high_freq=-200, low_freq=64, htk_compat=True, num_mel_bins=self.height, sample_frequency=self.sample_rate, use_energy=False, window_type='hamming')
feature_len = len(feature)
if feature_len < self.width:# for too short utterance.
for _ in range(self.width // feature_len):
feature = torch.cat((feature,feature),0)
feature_len = len(feature)
if self.mode == "train": #random start pieces
rand_start = random.randint(0,feature_len - self.width)
feature = feature[rand_start : rand_start + self.width]
else: #fixed feature for test
feature = feature[0 : self.width]
#normalize
std,mu = torch.std_mean(feature,dim=0)
feature = (feature - mu) / (std + 1e-5)
feature = torch.unsqueeze(feature, dim=0)
label = torch.LongTensor([int(label)])
return feature,label
def __len__(self):
return len(self.file_ids)
class Generator(nn.Module):
#input x: h,c
#output G(x): h,c
def __init__(self):
super(Generator,self).__init__()
self.conv1 = nn.Conv2d(1,256, (15,1), stride = (1,1),padding='same')
self.gate1 = nn.Sequential(nn.Conv2d(1,256, (15,1), stride = (1,1),padding='same'), nn.Sigmoid())
self.conv2 = nn.Conv2d(256,512, (5,1), stride = (2,1), padding=(2,0))
self.gate2 = nn.Sequential(nn.Conv2d(256,512, (5,1), stride = (2,1), padding=(2,0)),nn.Sigmoid())
self.conv3 = nn.Conv2d(512,1024, (5,1), stride = (2,1), padding=(2,0))
self.gate3 = nn.Sequential(nn.Conv2d(512,1024, (5,1), stride = (2,1), padding=(2,0)),nn.Sigmoid())
self.conv4 = nn.Conv2d(1024,1024, (5,1), stride = (1,1),padding='same')
self.conv5 = nn.Conv2d(512,512, (5,1), stride = (1,1),padding='same')
self.gate5 = nn.Sequential(nn.Conv2d(256,256, (5,1), stride = (2,1)),nn.Sigmoid())
self.conv6 = nn.Conv2d(256,1, (15,1), stride = (1,1),padding='same')
self._initialize_weights()
def __pixel_shuffle(self,input, upscale_factor_h, upscale_factor_w):
batch_size, channels, in_height, in_width = input.size()
channels //= upscale_factor_h * upscale_factor_w
out_height = in_height * upscale_factor_h
out_width = in_width * upscale_factor_w
input_view = input.contiguous().view(batch_size, channels, upscale_factor_h, upscale_factor_w,in_height, in_width)
shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous()
return shuffle_out.view(batch_size, channels, out_height, out_width)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m,nn.Conv2d):
xavier_uniform_(m.weight)
m.bias.data.fill_(0)
def _instance_norm(self,inputs): #never use.
return nn.InstanceNorm2d(inputs.shape[1])(inputs)
def forward(self,x):
# gate = A * B
A_1 = self.conv1(x)
B_1 = self.gate1(x)
x = A_1 * B_1
#downsample 1
A_2 = self.conv2(x)
B_2 = self.gate2(x)
x = A_2 * B_2
#downsample 2
A_3 = self.conv3(x)
B_3 = self.gate3(x)
x = A_3 * B_3
#upsample 1
x = self.conv4(x)
A_4 = self.__pixel_shuffle(x,2,1)
B_4 = nn.Sigmoid()(self.__pixel_shuffle(x,2,1))
x = A_4 * B_4
#upsample 2
x = self.conv5(x)
A_5 = self.__pixel_shuffle(x,2,1)
B_5 = nn.Sigmoid()(self.__pixel_shuffle(x,2,1))
x = A_5 * B_5
x = self.conv6(x)
return x
class RestBlock1(nn.Module):
#RestBlock1 architecture:
def __init__(self):
super(RestBlock1, self).__init__()
self.conv1 = nn.Conv2d(64,32, (1,1), stride = (1,1), padding='same')
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32,32, (3,3), stride = (1,1), padding='same')
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(32,64, (1,1), stride = (1,1), padding='same')
self.bn3 = nn.BatchNorm2d(64)
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#add x into out
out = self.relu(torch.add(x, out))
return out
class RestBlock2(nn.Module):
#RestBlock2 architecture:
def __init__(self):
super(RestBlock2, self).__init__()
self.conv1 = nn.Conv2d(128,64, (1,1), stride = (1,1), padding='same')
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64,64, (3,3), stride = (1,1), padding='same')
self.bn2 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(64,128, (1,1), stride = (1,1), padding='same')
self.bn3 = nn.BatchNorm2d(128)
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#add x into out
out = self.relu(torch.add(x, out))
return out
class RestBlock3(nn.Module):
#RestBlock3 architecture:
def __init__(self):
super(RestBlock3, self).__init__()
self.conv1 = nn.Conv2d(256,128, (1,1), stride = (1,1), padding='same')
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(128,128, (3,3), stride = (1,1), padding='same')
self.bn2 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(128,256, (1,1), stride = (1,1), padding='same')
self.bn3 = nn.BatchNorm2d(256)
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#add x into out
out = self.relu(torch.add(x, out))
return out
class RestBlock4(nn.Module):
#RestBlock4 architecture:
def __init__(self):
super(RestBlock4, self).__init__()
self.conv1 = nn.Conv2d(512,256, (1,1), stride = (1,1), padding='same')
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(256,256, (3,3), stride = (1,1), padding='same')
self.bn2 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(256,512, (1,1), stride = (1,1), padding='same')
self.bn3 = nn.BatchNorm2d(512)
def forward(self,x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
#add x into out
out = self.relu(torch.add(x, out))
return out
class Discriminator(nn.Module):
#input x,G(x)
#output 251 + 2
def __init__(self, num_classes):
super(Discriminator,self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(1,64, (5,5), stride = (2,2), padding=2)
self.rest1 = nn.Sequential(RestBlock1(), RestBlock1(), RestBlock1())
self.conv2 = nn.Conv2d(64,128, (5,5), stride = (2,2), padding=2)
self.rest2 = nn.Sequential(RestBlock2(), RestBlock2(), RestBlock2())
self.conv3 = nn.Conv2d(128,256, (5,5), stride = (2,2), padding=2)
self.rest3 = nn.Sequential(RestBlock3(), RestBlock3(), RestBlock3())
self.conv4 = nn.Conv2d(256,512, (5,5), stride = (2,2), padding=2)
self.rest4 = nn.Sequential(RestBlock4(), RestBlock4(), RestBlock4())
self.avgpool = nn.AvgPool1d(10)
self.fc = nn.Linear(2048,512)
self.fc_nc = nn.Linear(512,self.num_classes)
self.fc_rf = nn.Linear(512,2)
self.softmax_nc = nn.Softmax()
self.softmax_rf = nn.Softmax(dim=-1)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m,nn.Conv2d):
xavier_uniform_(m.weight)
m.bias.data.fill_(0)
def forward(self,x):
#conv1 and rest1 (out dim: 80,32,64)
out = self.conv1(x)
out = self.rest1(out)
#conv2 and rest2 (out dim: 40,16,128)
out = self.conv2(out)
out = self.rest2(out)
#conv3 and rest3 (out dim: 20,8,256)
out = self.conv3(out)
out = self.rest3(out)
#conv4 and rest4 (out dim: 10,4,512)
out = self.conv4(out)
out = self.rest4(out)
#Reshape(10,2048)
out = out.reshape(-1,2048,10)
#averge temporal pooling layer
out = self.avgpool(out)
#remove third dim
out = torch.squeeze(out,-1)
#fully connected (out dim:512)
out = self.fc(out)
#fully connected layer output N class (out dim:251)
out_nc = self.fc_nc(out)
#fully connected layer output real or fake (out dim:2)
out_rf = self.fc_rf(out)
#softmax layer output N class
#out_nc = self.softmax_nc(out_nc) #CrossEntropy Loss has employed logsoftmax
#softmax layer output real or fake
out_rf = self.softmax_rf(out_rf) #use MSE and label smoothing
return out_nc, out_rf
class SpeakerGAN():
def __init__(self,):
#base info
self.num_classes = 251
self.batch_size = 128 #diff with paper, beneficial ot our CER 1.80.
self.lr_init10 = 5e-4
self.lr_after10 = 2e-4
self.epochs = 2200 #diff with paper, beneficial to our CER 1.80.
self.device = 'cuda:0' #'cuda:0' or 'cpu
self.d_train_times = 4
self.model_path = './model'
#network
self.D = Discriminator(self.num_classes).to(self.device)
self.G = Generator().to(self.device)
#loss
self.HuberLoss = nn.HuberLoss().to(self.device) #the HuberLoss formula in pytorch is a little different with paper
self.Class_CrossEntropyLoss = nn.CrossEntropyLoss().to(self.device) #cross entropy loss for speaker id classification
self.AdversarialLoss = nn.MSELoss().to(self.device) #from LSGAN
#load data
self.train_dataset = MyDataset(mode='train')
self.train_dataloader = DataLoader(self.train_dataset,batch_size = self.batch_size, shuffle=True, drop_last=True, num_workers=5 )
self.test_dataset = MyDataset(mode='test')
self.test_dataloader = DataLoader(self.test_dataset,batch_size = 64, shuffle=False, drop_last=False, num_workers=2 )
#optimizer
self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr = self.lr_init10, betas=(0.5, 0.999) )
self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr = self.lr_init10, betas=(0.5, 0.999) )
#adjust lr
self.lr_scheduler_D = MultiStepLR(self.optimizer_D, milestones=[10,], gamma = 0.4)
self.lr_scheduler_G = MultiStepLR(self.optimizer_G, milestones=[10,], gamma = 0.4)
self.writer = SummaryWriter(log_dir='log')
def save(self,epoch):
torch.save(self.D,os.path.join(self.model_path,str(epoch)+"_"+"D.pkl"))
torch.save(self.G,os.path.join(self.model_path,str(epoch)+"_"+"G.pkl"))
def train(self):
idx = 0
for epoch in range(self.epochs):
id_in_epoch = 0
for batch_id,(x,y) in enumerate(self.train_dataloader):
idx = idx + 1
id_in_epoch = id_in_epoch + 1
self.writer.add_scalar('lr_D',self.optimizer_D.param_groups[0]["lr"] ,idx)
self.writer.add_scalar('lr_G',self.optimizer_G.param_groups[0]["lr"] ,idx)
#load data to device
x = x.to(self.device)
y = y.to(self.device)
y = torch.squeeze(y)
print ('id_in_epoch/total: ' + str(id_in_epoch) + '/'+str(self.train_dataset.__len__() // self.batch_size) + ' epoch:' + str(epoch))
smooth_label_fake = torch.rand(self.batch_size, 2).to(self.device).squeeze()*0.3 + 0.7
smooth_label_real = torch.rand(self.batch_size, 2).to(self.device).squeeze()*0.3 + 0.7
smooth_label_fake.T[1] = 1 - smooth_label_fake.T[0]
smooth_label_real.T[0] = 1 - smooth_label_real.T[1]
label_fake_smooth = smooth_label_fake #smooth for fake_x 0, label: [>0.7, <0.3]
label_real_smooth = smooth_label_real #smooth for x 1, label: [<0.3, >0.7]
#update D
self.G.eval()
self.D.train()
self.optimizer_D.zero_grad()
pred_real_y, pred_real_flag = self.D(x)
real_loss_d = self.AdversarialLoss(pred_real_flag,label_real_smooth)
fake_x = self.G(x)
pred_fake_y, pred_fake_flag = self.D(fake_x) #this line is important.
fake_loss_d = self.AdversarialLoss(pred_fake_flag,label_fake_smooth) #this line is important.
adv_loss_d = real_loss_d + fake_loss_d
class_acc_real = torch.eq(torch.argmax(pred_real_y, dim = 1), y).sum().float().item() / len(y)
class_acc_fake = torch.eq(torch.argmax(pred_fake_y, dim = 1), y).sum().float().item() / len(y)
self.writer.add_scalar('acc/class_acc_real',class_acc_real , idx)
self.writer.add_scalar('acc/class_acc_fake',class_acc_fake , idx)
classification_loss_real = self.Class_CrossEntropyLoss(pred_real_y,y)
classification_loss_fake = self.Class_CrossEntropyLoss(pred_fake_y,y)
classification_loss = classification_loss_real + classification_loss_fake
loss_d = adv_loss_d + classification_loss
loss_d.backward()
self.optimizer_D.step()
self.writer.add_scalar('loss_d/adv_loss_d', adv_loss_d.item(), idx)
self.writer.add_scalar('loss_d/class_loss', classification_loss.item(),idx)
self.writer.add_scalar('loss_d', loss_d.item(),idx)
if idx % self.d_train_times == 0:
self.G.train()
#update G
self.optimizer_G.zero_grad()
fake_x = self.G(x)
pred_fake_y, pred_fake_flag = self.D(fake_x)
huber_loss = self.HuberLoss(x,fake_x)
adv_loss_g = self.AdversarialLoss(pred_fake_flag, label_real_smooth)
loss_g = adv_loss_g + huber_loss
loss_g.backward()
self.optimizer_G.step()
self.writer.add_scalar('loss_g/adv_loss_g', adv_loss_g.item(), idx)
self.writer.add_scalar('loss_g/huber_loss', huber_loss.item(),idx)
self.writer.add_scalar('loss_g', loss_g.item(),idx)
#save model
self.save(epoch)
#adjust lr
self.lr_scheduler_D.step()
self.lr_scheduler_G.step()
self.writer.close()
def test(self,D_model):
d = torch.load(D_model)
d.eval()
correct = 0.0
with torch.no_grad():
count = 0
for batch_id,(x,y) in enumerate(self.test_dataloader):
count = count + 1
if batch_id % 5000 == 0:
print ('testing : ' + str(batch_id) + '/' + str(self.test_dataset.__len__()))
x = x.to(self.device)
y = y.to(self.device)
pred_y, pred_real_flag = d(x)
label_real = torch.tensor([0,1]).to(self.device)
self.writer.add_scalars('test/real_flag_' + D_model,{'real':pred_real_flag[0][1].item() , 'fake':pred_real_flag[0][0].item(),}, batch_id)
real_loss_d = self.AdversarialLoss(pred_real_flag,label_real)
self.writer.add_scalar('test/loss_flag_' + D_model, real_loss_d.item(),batch_id)
test_acc = torch.eq(torch.argmax(pred_y, dim = 1), y.squeeze()).sum().float().item() / len(y)
correct = correct + test_acc
print ('test accuracy:')
print (correct / count)
self.writer.close()
def generate_sample(self, G_model):
g = torch.load(G_model)
g.eval()
g.to('cpu')
gen_file = "gen.png"
fig = plt.figure()
totalimg = 20
for batch_id,(x,y) in enumerate(self.test_dataloader):
fake_x = g(x)
x = torch.squeeze(x)
fake_x = torch.squeeze(fake_x).detach().numpy()
if batch_id > (totalimg -1 ):
break
posid = batch_id * 2 + 1
ax = fig.add_subplot(totalimg,2,posid)
ax.imshow(x.T,cmap='plasma')
ax = fig.add_subplot(totalimg,2,posid + 1)
ax.imshow(fake_x.T,cmap='plasma')
plt.savefig(gen_file)
print ("save genrated samples to: " + gen_file)
if __name__ == "__main__":
model = SpeakerGAN()
model.train()
model.test('model/2200_D.pkl')
model.generate_sample('model/2200_G.pkl')
| [
"torch.argmax",
"torch.nn.InstanceNorm2d",
"torch.cat",
"matplotlib.pyplot.figure",
"torch.nn.Softmax",
"torch.std_mean",
"torch.no_grad",
"redis.Redis",
"torch.nn.MSELoss",
"random.randint",
"torch.utils.data.DataLoader",
"torch.load",
"struct.pack",
"torch.squeeze",
"torch.utils.tensor... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((1153, 1177), 'struct.pack', 'struct.pack', (['""">II"""', 'h', 'w'], {}), "('>II', h, w)\n", (1164, 1177), False, 'import struct\n'), ((1431, 1464), 'struct.unpack', 'struct.unpack', (['""">II"""', 'encoded[:8]'], {}), "('>II', encoded[:8])\n", (1444, 1464), False, 'import struct\n'), ((3299, 3329), 'torch.std_mean', 'torch.std_mean', (['feature'], {'dim': '(0)'}), '(feature, dim=0)\n', (3313, 3329), False, 'import torch\n'), ((3396, 3427), 'torch.unsqueeze', 'torch.unsqueeze', (['feature'], {'dim': '(0)'}), '(feature, dim=0)\n', (3411, 3427), False, 'import torch\n'), ((3723, 3780), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(256)', '(15, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(1, 256, (15, 1), stride=(1, 1), padding='same')\n", (3732, 3780), False, 'from torch import nn\n'), ((3906, 3964), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(5, 1)'], {'stride': '(2, 1)', 'padding': '(2, 0)'}), '(256, 512, (5, 1), stride=(2, 1), padding=(2, 0))\n', (3915, 3964), False, 'from torch import nn\n'), ((4090, 4149), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)', '(5, 1)'], {'stride': '(2, 1)', 'padding': '(2, 0)'}), '(512, 1024, (5, 1), stride=(2, 1), padding=(2, 0))\n', (4099, 4149), False, 'from torch import nn\n'), ((4276, 4336), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(1024)', '(5, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(1024, 1024, (5, 1), stride=(1, 1), padding='same')\n", (4285, 4336), False, 'from torch import nn\n'), ((4356, 4414), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(5, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(512, 512, (5, 1), stride=(1, 1), padding='same')\n", (4365, 4414), False, 'from torch import nn\n'), ((4525, 4582), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(1)', '(15, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(256, 1, (15, 1), stride=(1, 1), padding='same')\n", (4534, 4582), False, 'from torch import nn\n'), ((6357, 6413), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(32)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(64, 32, (1, 1), stride=(1, 1), padding='same')\n", (6366, 6413), False, 'from torch import nn\n'), ((6432, 6450), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (6446, 6450), False, 'from torch import nn\n'), ((6471, 6492), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6478, 6492), False, 'from torch import nn\n'), ((6514, 6570), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(3, 3)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(32, 32, (3, 3), stride=(1, 1), padding='same')\n", (6523, 6570), False, 'from torch import nn\n'), ((6589, 6607), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (6603, 6607), False, 'from torch import nn\n'), ((6628, 6649), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6635, 6649), False, 'from torch import nn\n'), ((6671, 6727), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(32, 64, (1, 1), stride=(1, 1), padding='same')\n", (6680, 6727), False, 'from torch import nn\n'), ((6746, 6764), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6760, 6764), False, 'from torch import nn\n'), ((7254, 7311), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(128, 64, (1, 1), stride=(1, 1), padding='same')\n", (7263, 7311), False, 'from torch import nn\n'), ((7330, 7348), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (7344, 7348), False, 'from torch import nn\n'), ((7369, 7390), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7376, 7390), False, 'from torch import nn\n'), ((7412, 7468), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(64, 64, (3, 3), stride=(1, 1), padding='same')\n", (7421, 7468), False, 'from torch import nn\n'), ((7487, 7505), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (7501, 7505), False, 'from torch import nn\n'), ((7526, 7547), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7533, 7547), False, 'from torch import nn\n'), ((7569, 7626), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(64, 128, (1, 1), stride=(1, 1), padding='same')\n", (7578, 7626), False, 'from torch import nn\n'), ((7645, 7664), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (7659, 7664), False, 'from torch import nn\n'), ((8155, 8213), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(256, 128, (1, 1), stride=(1, 1), padding='same')\n", (8164, 8213), False, 'from torch import nn\n'), ((8232, 8251), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (8246, 8251), False, 'from torch import nn\n'), ((8272, 8293), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8279, 8293), False, 'from torch import nn\n'), ((8315, 8373), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3, 3)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(128, 128, (3, 3), stride=(1, 1), padding='same')\n", (8324, 8373), False, 'from torch import nn\n'), ((8392, 8411), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (8406, 8411), False, 'from torch import nn\n'), ((8432, 8453), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8439, 8453), False, 'from torch import nn\n'), ((8475, 8533), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(128, 256, (1, 1), stride=(1, 1), padding='same')\n", (8484, 8533), False, 'from torch import nn\n'), ((8552, 8571), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (8566, 8571), False, 'from torch import nn\n'), ((9061, 9119), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(512, 256, (1, 1), stride=(1, 1), padding='same')\n", (9070, 9119), False, 'from torch import nn\n'), ((9138, 9157), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (9152, 9157), False, 'from torch import nn\n'), ((9178, 9199), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9185, 9199), False, 'from torch import nn\n'), ((9221, 9279), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(256, 256, (3, 3), stride=(1, 1), padding='same')\n", (9230, 9279), False, 'from torch import nn\n'), ((9298, 9317), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (9312, 9317), False, 'from torch import nn\n'), ((9338, 9359), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9345, 9359), False, 'from torch import nn\n'), ((9381, 9439), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(1, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(256, 512, (1, 1), stride=(1, 1), padding='same')\n", (9390, 9439), False, 'from torch import nn\n'), ((9458, 9477), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (9472, 9477), False, 'from torch import nn\n'), ((10032, 10082), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)', '(5, 5)'], {'stride': '(2, 2)', 'padding': '(2)'}), '(1, 64, (5, 5), stride=(2, 2), padding=2)\n', (10041, 10082), False, 'from torch import nn\n'), ((10181, 10233), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(5, 5)'], {'stride': '(2, 2)', 'padding': '(2)'}), '(64, 128, (5, 5), stride=(2, 2), padding=2)\n', (10190, 10233), False, 'from torch import nn\n'), ((10332, 10385), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(5, 5)'], {'stride': '(2, 2)', 'padding': '(2)'}), '(128, 256, (5, 5), stride=(2, 2), padding=2)\n', (10341, 10385), False, 'from torch import nn\n'), ((10484, 10537), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(5, 5)'], {'stride': '(2, 2)', 'padding': '(2)'}), '(256, 512, (5, 5), stride=(2, 2), padding=2)\n', (10493, 10537), False, 'from torch import nn\n'), ((10638, 10654), 'torch.nn.AvgPool1d', 'nn.AvgPool1d', (['(10)'], {}), '(10)\n', (10650, 10654), False, 'from torch import nn\n'), ((10673, 10693), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(512)'], {}), '(2048, 512)\n', (10682, 10693), False, 'from torch import nn\n'), ((10715, 10747), 'torch.nn.Linear', 'nn.Linear', (['(512)', 'self.num_classes'], {}), '(512, self.num_classes)\n', (10724, 10747), False, 'from torch import nn\n'), ((10768, 10785), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(2)'], {}), '(512, 2)\n', (10777, 10785), False, 'from torch import nn\n'), ((10812, 10824), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (10822, 10824), False, 'from torch import nn\n'), ((10851, 10869), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (10861, 10869), False, 'from torch import nn\n'), ((11718, 11740), 'torch.squeeze', 'torch.squeeze', (['out', '(-1)'], {}), '(out, -1)\n', (11731, 11740), False, 'import torch\n'), ((13271, 13378), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': '(5)'}), '(self.train_dataset, batch_size=self.batch_size, shuffle=True,\n drop_last=True, num_workers=5)\n', (13281, 13378), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13459, 13554), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_dataset'], {'batch_size': '(64)', 'shuffle': '(False)', 'drop_last': '(False)', 'num_workers': '(2)'}), '(self.test_dataset, batch_size=64, shuffle=False, drop_last=False,\n num_workers=2)\n', (13469, 13554), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13837, 13894), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (['self.optimizer_D'], {'milestones': '[10]', 'gamma': '(0.4)'}), '(self.optimizer_D, milestones=[10], gamma=0.4)\n', (13848, 13894), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((13928, 13985), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (['self.optimizer_G'], {'milestones': '[10]', 'gamma': '(0.4)'}), '(self.optimizer_G, milestones=[10], gamma=0.4)\n', (13939, 13985), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((14011, 14039), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""log"""'}), "(log_dir='log')\n", (14024, 14039), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((18234, 18253), 'torch.load', 'torch.load', (['D_model'], {}), '(D_model)\n', (18244, 18253), False, 'import torch\n'), ((19414, 19433), 'torch.load', 'torch.load', (['G_model'], {}), '(G_model)\n', (19424, 19433), False, 'import torch\n'), ((19516, 19528), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19526, 19528), True, 'from matplotlib import pyplot as plt\n'), ((20065, 20086), 'matplotlib.pyplot.savefig', 'plt.savefig', (['gen_file'], {}), '(gen_file)\n', (20076, 20086), True, 'from matplotlib import pyplot as plt\n'), ((1728, 1774), 'redis.Redis', 'redis.Redis', ([], {'host': '"""localhost"""', 'port': '(6379)', 'db': '(1)'}), "(host='localhost', port=6379, db=1)\n", (1739, 1774), False, 'import redis\n'), ((2440, 2475), 'torchaudio.load', 'torchaudio.load', (['f'], {'normalize': '(False)'}), '(f, normalize=False)\n', (2455, 2475), False, 'import torchaudio\n'), ((2567, 2744), 'torchaudio.compliance.kaldi.fbank', 'fbank', (['wav'], {'dither': '(1)', 'high_freq': '(-200)', 'low_freq': '(64)', 'htk_compat': '(True)', 'num_mel_bins': 'self.height', 'sample_frequency': 'self.sample_rate', 'use_energy': '(False)', 'window_type': '"""hamming"""'}), "(wav, dither=1, high_freq=-200, low_freq=64, htk_compat=True,\n num_mel_bins=self.height, sample_frequency=self.sample_rate, use_energy\n =False, window_type='hamming')\n", (2572, 2744), False, 'from torchaudio.compliance.kaldi import fbank\n'), ((3067, 3110), 'random.randint', 'random.randint', (['(0)', '(feature_len - self.width)'], {}), '(0, feature_len - self.width)\n', (3081, 3110), False, 'import random\n'), ((3814, 3871), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(256)', '(15, 1)'], {'stride': '(1, 1)', 'padding': '"""same"""'}), "(1, 256, (15, 1), stride=(1, 1), padding='same')\n", (3823, 3871), False, 'from torch import nn\n'), ((3871, 3883), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3881, 3883), False, 'from torch import nn\n'), ((3998, 4056), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(5, 1)'], {'stride': '(2, 1)', 'padding': '(2, 0)'}), '(256, 512, (5, 1), stride=(2, 1), padding=(2, 0))\n', (4007, 4056), False, 'from torch import nn\n'), ((4055, 4067), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4065, 4067), False, 'from torch import nn\n'), ((4183, 4242), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)', '(5, 1)'], {'stride': '(2, 1)', 'padding': '(2, 0)'}), '(512, 1024, (5, 1), stride=(2, 1), padding=(2, 0))\n', (4192, 4242), False, 'from torch import nn\n'), ((4241, 4253), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4251, 4253), False, 'from torch import nn\n'), ((4448, 4490), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(5, 1)'], {'stride': '(2, 1)'}), '(256, 256, (5, 1), stride=(2, 1))\n', (4457, 4490), False, 'from torch import nn\n'), ((4490, 4502), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4500, 4502), False, 'from torch import nn\n'), ((5437, 5471), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['inputs.shape[1]'], {}), '(inputs.shape[1])\n', (5454, 5471), False, 'from torch import nn\n'), ((5928, 5940), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5938, 5940), False, 'from torch import nn\n'), ((6095, 6107), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6105, 6107), False, 'from torch import nn\n'), ((7068, 7085), 'torch.add', 'torch.add', (['x', 'out'], {}), '(x, out)\n', (7077, 7085), False, 'import torch\n'), ((7969, 7986), 'torch.add', 'torch.add', (['x', 'out'], {}), '(x, out)\n', (7978, 7986), False, 'import torch\n'), ((8875, 8892), 'torch.add', 'torch.add', (['x', 'out'], {}), '(x, out)\n', (8884, 8892), False, 'import torch\n'), ((9781, 9798), 'torch.add', 'torch.add', (['x', 'out'], {}), '(x, out)\n', (9790, 9798), False, 'import torch\n'), ((18306, 18321), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18319, 18321), False, 'import torch\n'), ((19670, 19686), 'torch.squeeze', 'torch.squeeze', (['x'], {}), '(x)\n', (19683, 19686), False, 'import torch\n'), ((1477, 1527), 'numpy.frombuffer', 'np.frombuffer', (['encoded'], {'dtype': 'np.float32', 'offset': '(8)'}), '(encoded, dtype=np.float32, offset=8)\n', (1490, 1527), True, 'import numpy as np\n'), ((1918, 1943), 'torch.from_numpy', 'torch.from_numpy', (['feature'], {}), '(feature)\n', (1934, 1943), False, 'import torch\n'), ((2020, 2055), 'torchaudio.load', 'torchaudio.load', (['f'], {'normalize': '(False)'}), '(f, normalize=False)\n', (2035, 2055), False, 'import torchaudio\n'), ((2159, 2336), 'torchaudio.compliance.kaldi.fbank', 'fbank', (['wav'], {'dither': '(1)', 'high_freq': '(-200)', 'low_freq': '(64)', 'htk_compat': '(True)', 'num_mel_bins': 'self.height', 'sample_frequency': 'self.sample_rate', 'use_energy': '(False)', 'window_type': '"""hamming"""'}), "(wav, dither=1, high_freq=-200, low_freq=64, htk_compat=True,\n num_mel_bins=self.height, sample_frequency=self.sample_rate, use_energy\n =False, window_type='hamming')\n", (2164, 2336), False, 'from torchaudio.compliance.kaldi import fbank\n'), ((2917, 2949), 'torch.cat', 'torch.cat', (['(feature, feature)', '(0)'], {}), '((feature, feature), 0)\n', (2926, 2949), False, 'import torch\n'), ((5309, 5334), 'torch.nn.init.xavier_uniform_', 'xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (5324, 5334), False, 'from torch.nn.init import xavier_uniform_\n'), ((11031, 11056), 'torch.nn.init.xavier_uniform_', 'xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (11046, 11056), False, 'from torch.nn.init import xavier_uniform_\n'), ((12870, 12884), 'torch.nn.HuberLoss', 'nn.HuberLoss', ([], {}), '()\n', (12882, 12884), False, 'from torch import nn\n'), ((13006, 13027), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (13025, 13027), False, 'from torch import nn\n'), ((13125, 13137), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (13135, 13137), False, 'from torch import nn\n'), ((14813, 14829), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (14826, 14829), False, 'import torch\n'), ((18736, 18756), 'torch.tensor', 'torch.tensor', (['[0, 1]'], {}), '([0, 1])\n', (18748, 18756), False, 'import torch\n'), ((19708, 19729), 'torch.squeeze', 'torch.squeeze', (['fake_x'], {}), '(fake_x)\n', (19721, 19729), False, 'import torch\n'), ((15033, 15063), 'torch.rand', 'torch.rand', (['self.batch_size', '(2)'], {}), '(self.batch_size, 2)\n', (15043, 15063), False, 'import torch\n'), ((15136, 15166), 'torch.rand', 'torch.rand', (['self.batch_size', '(2)'], {}), '(self.batch_size, 2)\n', (15146, 15166), False, 'import torch\n'), ((16149, 16181), 'torch.argmax', 'torch.argmax', (['pred_real_y'], {'dim': '(1)'}), '(pred_real_y, dim=1)\n', (16161, 16181), False, 'import torch\n'), ((16260, 16292), 'torch.argmax', 'torch.argmax', (['pred_fake_y'], {'dim': '(1)'}), '(pred_fake_y, dim=1)\n', (16272, 16292), False, 'import torch\n'), ((19137, 19164), 'torch.argmax', 'torch.argmax', (['pred_y'], {'dim': '(1)'}), '(pred_y, dim=1)\n', (19149, 19164), False, 'import torch\n')] |
import unittest
import pqkmeans
import numpy
import collections
import pickle
class TestPQKMeans(unittest.TestCase):
def data_source(self, n: int):
for i in range(n):
yield [i * 100] * 6
def setUp(self):
# Train PQ encoder
self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=3, Ks=20)
self.encoder.fit(numpy.array(list(self.data_source(200))))
def test_just_construction(self):
pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
def test_fit_and_predict(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
predicted = engine.fit_predict(codes)
count = collections.defaultdict(int)
for cluster in predicted:
count[cluster] += 1
# roughly balanced clusters
self.assertGreaterEqual(min(count.values()), max(count.values()) * 0.7)
a = engine.predict(codes[0:1, :])
b = engine.predict(codes[0:1, :])
self.assertEqual(a, b)
def test_cluster_centers_are_really_nearest(self):
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=2, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.predict(codes)
self.assertTrue((fit_predicted == predicted).all())
# Reconstruct the original vectors
codes_decoded = self.encoder.inverse_transform(codes)
cluster_centers_decoded = self.encoder.inverse_transform(cluster_centers)
for cluster, code_decoded in zip(predicted, codes_decoded):
other_cluster = (cluster + 1) % max(predicted)
self.assertLessEqual(
numpy.linalg.norm(cluster_centers_decoded[cluster] - code_decoded),
numpy.linalg.norm(cluster_centers_decoded[other_cluster] - code_decoded)
)
def test_constructor_with_cluster_center(self):
# Run pqkmeans first.
engine = pqkmeans.clustering.PQKMeans(encoder=self.encoder, k=5, iteration=10, verbose=False)
codes = self.encoder.transform(numpy.array(list(self.data_source(100))))
fit_predicted = engine.fit_predict(codes)
cluster_centers = numpy.array(engine.cluster_centers_, dtype=numpy.uint8)
predicted = engine.predict(codes)
# save current engine and recover from savedata
engine_savedata = pickle.dumps(engine)
engine_recovered = pickle.loads(engine_savedata)
fit_predicted_from_recovered_obj = engine_recovered.predict(codes)
numpy.testing.assert_array_equal(predicted, fit_predicted_from_recovered_obj)
| [
"pickle.loads",
"numpy.testing.assert_array_equal",
"collections.defaultdict",
"numpy.array",
"numpy.linalg.norm",
"pqkmeans.clustering.PQKMeans",
"pqkmeans.encoder.PQEncoder",
"pickle.dumps"
] | [((285, 332), 'pqkmeans.encoder.PQEncoder', 'pqkmeans.encoder.PQEncoder', ([], {'num_subdim': '(3)', 'Ks': '(20)'}), '(num_subdim=3, Ks=20)\n', (311, 332), False, 'import pqkmeans\n'), ((447, 535), 'pqkmeans.clustering.PQKMeans', 'pqkmeans.clustering.PQKMeans', ([], {'encoder': 'self.encoder', 'k': '(5)', 'iteration': '(10)', 'verbose': '(False)'}), '(encoder=self.encoder, k=5, iteration=10,\n verbose=False)\n', (475, 535), False, 'import pqkmeans\n'), ((586, 674), 'pqkmeans.clustering.PQKMeans', 'pqkmeans.clustering.PQKMeans', ([], {'encoder': 'self.encoder', 'k': '(2)', 'iteration': '(10)', 'verbose': '(False)'}), '(encoder=self.encoder, k=2, iteration=10,\n verbose=False)\n', (614, 674), False, 'import pqkmeans\n'), ((815, 843), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (838, 843), False, 'import collections\n'), ((1216, 1304), 'pqkmeans.clustering.PQKMeans', 'pqkmeans.clustering.PQKMeans', ([], {'encoder': 'self.encoder', 'k': '(2)', 'iteration': '(10)', 'verbose': '(False)'}), '(encoder=self.encoder, k=2, iteration=10,\n verbose=False)\n', (1244, 1304), False, 'import pqkmeans\n'), ((1458, 1513), 'numpy.array', 'numpy.array', (['engine.cluster_centers_'], {'dtype': 'numpy.uint8'}), '(engine.cluster_centers_, dtype=numpy.uint8)\n', (1469, 1513), False, 'import numpy\n'), ((2254, 2342), 'pqkmeans.clustering.PQKMeans', 'pqkmeans.clustering.PQKMeans', ([], {'encoder': 'self.encoder', 'k': '(5)', 'iteration': '(10)', 'verbose': '(False)'}), '(encoder=self.encoder, k=5, iteration=10,\n verbose=False)\n', (2282, 2342), False, 'import pqkmeans\n'), ((2496, 2551), 'numpy.array', 'numpy.array', (['engine.cluster_centers_'], {'dtype': 'numpy.uint8'}), '(engine.cluster_centers_, dtype=numpy.uint8)\n', (2507, 2551), False, 'import numpy\n'), ((2678, 2698), 'pickle.dumps', 'pickle.dumps', (['engine'], {}), '(engine)\n', (2690, 2698), False, 'import pickle\n'), ((2726, 2755), 'pickle.loads', 'pickle.loads', (['engine_savedata'], {}), '(engine_savedata)\n', (2738, 2755), False, 'import pickle\n'), ((2840, 2917), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['predicted', 'fit_predicted_from_recovered_obj'], {}), '(predicted, fit_predicted_from_recovered_obj)\n', (2872, 2917), False, 'import numpy\n'), ((1983, 2049), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(cluster_centers_decoded[cluster] - code_decoded)'], {}), '(cluster_centers_decoded[cluster] - code_decoded)\n', (2000, 2049), False, 'import numpy\n'), ((2067, 2139), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(cluster_centers_decoded[other_cluster] - code_decoded)'], {}), '(cluster_centers_decoded[other_cluster] - code_decoded)\n', (2084, 2139), False, 'import numpy\n')] |
from ...core.abstract_builder import AbstractBuilder
from uwndc19.helpers.dataset import get_train_datasets, load_data, RANDOM_SEED
from uwndc19.models.multiclass.input import build_input_fn, build_serving_input_receiver_fn
from uwndc19.models.multiclass.model import model_fn
import numpy as np
class Builder(AbstractBuilder):
def __init__(self, config: dict):
super().__init__(config)
# load the data
df, imgs = load_data()
train_imgs, train_labels, eval_imgs, eval_labels = \
get_train_datasets(df, imgs, config['data']['eval_size'], config['model']['image_size'],
config['data'].get('random_seed', RANDOM_SEED))
train_nan_mask = np.logical_not(np.isnan(train_labels))
eval_nan_mask = np.logical_not(np.isnan(eval_labels))
train_labels = np.nan_to_num(train_labels)
eval_labels = np.nan_to_num(eval_labels)
# mask all the columns that are not supposed to be trained
column_id = config['training'].get('only_column_id')
if column_id is not None:
train_nan_mask[:, :column_id] = False
train_nan_mask[:, (column_id + 1):] = False
eval_nan_mask[:, :column_id] = False
eval_nan_mask[:, (column_id + 1):] = False
# build input functions
self._train_input_fn = build_input_fn(train_imgs, train_labels, train_nan_mask,
distortions=config['data'].get('distortions'), num_epochs=None)
self._eval_train_input_fn = build_input_fn(train_imgs, train_labels, train_nan_mask, num_epochs=None)
self._eval_input_fn = build_input_fn(eval_imgs, eval_labels, eval_nan_mask, num_epochs=1)
self._serving_input_receiver_fn = build_serving_input_receiver_fn(config['model']['image_size'])
def build_model_fn(self):
return model_fn
def build_train_input_fn(self):
return self._train_input_fn
def build_eval_train_input_fn(self):
return self._eval_train_input_fn
def build_eval_input_fn(self):
return self._eval_input_fn
def build_serving_input_receiver_fn(self):
return self._serving_input_receiver_fn
| [
"numpy.nan_to_num",
"numpy.isnan",
"uwndc19.helpers.dataset.load_data",
"uwndc19.models.multiclass.input.build_input_fn",
"uwndc19.models.multiclass.input.build_serving_input_receiver_fn"
] | [((446, 457), 'uwndc19.helpers.dataset.load_data', 'load_data', ([], {}), '()\n', (455, 457), False, 'from uwndc19.helpers.dataset import get_train_datasets, load_data, RANDOM_SEED\n'), ((849, 876), 'numpy.nan_to_num', 'np.nan_to_num', (['train_labels'], {}), '(train_labels)\n', (862, 876), True, 'import numpy as np\n'), ((899, 925), 'numpy.nan_to_num', 'np.nan_to_num', (['eval_labels'], {}), '(eval_labels)\n', (912, 925), True, 'import numpy as np\n'), ((1566, 1639), 'uwndc19.models.multiclass.input.build_input_fn', 'build_input_fn', (['train_imgs', 'train_labels', 'train_nan_mask'], {'num_epochs': 'None'}), '(train_imgs, train_labels, train_nan_mask, num_epochs=None)\n', (1580, 1639), False, 'from uwndc19.models.multiclass.input import build_input_fn, build_serving_input_receiver_fn\n'), ((1670, 1737), 'uwndc19.models.multiclass.input.build_input_fn', 'build_input_fn', (['eval_imgs', 'eval_labels', 'eval_nan_mask'], {'num_epochs': '(1)'}), '(eval_imgs, eval_labels, eval_nan_mask, num_epochs=1)\n', (1684, 1737), False, 'from uwndc19.models.multiclass.input import build_input_fn, build_serving_input_receiver_fn\n'), ((1780, 1842), 'uwndc19.models.multiclass.input.build_serving_input_receiver_fn', 'build_serving_input_receiver_fn', (["config['model']['image_size']"], {}), "(config['model']['image_size'])\n", (1811, 1842), False, 'from uwndc19.models.multiclass.input import build_input_fn, build_serving_input_receiver_fn\n'), ((740, 762), 'numpy.isnan', 'np.isnan', (['train_labels'], {}), '(train_labels)\n', (748, 762), True, 'import numpy as np\n'), ((803, 824), 'numpy.isnan', 'np.isnan', (['eval_labels'], {}), '(eval_labels)\n', (811, 824), True, 'import numpy as np\n')] |
import pygame
from pygame.locals import *
import numpy as np
WINDOW_HEIGHT = 420
WINDOW_WIDTH = 420
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GRID_SIZE = 4
FPS = 12
# Initial state
def setup(g):
# Glider gun
g[10][100] = 1
g[10][99] = 1
g[9][100] = 1
g[10][92] = 1
g[10][91] = 1
g[9][93] = 1
g[9][92] = 1
g[9][91] = 1
g[8][92] = 1
g[8][91] = 1
g[11][89] = 1
g[12][89] = 1
g[11][88] = 1
g[7][89] = 1
g[7][88] = 1
g[6][89] = 1
g[10][78] = 1
g[10][77] = 1
g[9][76] = 1
g[8][75] = 1
g[7][75] = 1
g[6][75] = 1
g[5][76] = 1
g[4][77] = 1
g[4][78] = 1
g[8][66] = 1
g[7][66] = 1
g[8][65] = 1
return g
def main():
sizeX = WINDOW_WIDTH // GRID_SIZE
sizeY = WINDOW_HEIGHT // GRID_SIZE
grid = np.zeros(shape=(sizeY + 2, sizeX + 2))
grid = setup(grid)
square = pygame.Surface((GRID_SIZE, GRID_SIZE))
running = True
screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Game of Life')
clock = pygame.time.Clock()
pygame.init()
while running:
new_grid = np.zeros(shape=(sizeY + 2, sizeX + 2))
for event in pygame.event.get():
if not hasattr(event, 'key'):
continue
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:
running = False
screen.fill(BLACK)
for y in range(1, sizeY + 1):
for x in range(1, sizeX + 1):
# Count neighbours
n = grid[y - 1][x - 1] + grid[y - 1][x] + grid[y - 1][x + 1] + grid[y][x - 1] + grid[y][x + 1] + \
grid[y + 1][x - 1] + grid[y + 1][x] + grid[y + 1][x + 1]
if (n == 2 or n == 3) and grid[y][x] == 1:
new_grid[y][x] = 1
elif grid[y][x] == 0 and n == 3:
new_grid[y][x] = 1
else:
new_grid[y][x] = 0
# Draw pixel
if new_grid[y][x] == 1:
square.fill(WHITE)
else:
square.fill(BLACK)
draw_square = pygame.Rect((x - 1) * GRID_SIZE, (y - 1) * GRID_SIZE, GRID_SIZE, GRID_SIZE)
screen.blit(square, draw_square)
grid = np.copy(new_grid)
pygame.display.flip()
clock.tick(FPS)
if __name__ == '__main__':
main()
| [
"pygame.Surface",
"numpy.copy",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.Rect",
"numpy.zeros",
"pygame.init",
"pygame.display.flip",
"pygame.display.set_caption",
"pygame.time.Clock"
] | [((825, 863), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sizeY + 2, sizeX + 2)'}), '(shape=(sizeY + 2, sizeX + 2))\n', (833, 863), True, 'import numpy as np\n'), ((901, 939), 'pygame.Surface', 'pygame.Surface', (['(GRID_SIZE, GRID_SIZE)'], {}), '((GRID_SIZE, GRID_SIZE))\n', (915, 939), False, 'import pygame\n'), ((973, 1027), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WINDOW_WIDTH, WINDOW_HEIGHT)'], {}), '((WINDOW_WIDTH, WINDOW_HEIGHT))\n', (996, 1027), False, 'import pygame\n'), ((1032, 1074), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Game of Life"""'], {}), "('Game of Life')\n", (1058, 1074), False, 'import pygame\n'), ((1087, 1106), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1104, 1106), False, 'import pygame\n'), ((1111, 1124), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1122, 1124), False, 'import pygame\n'), ((1164, 1202), 'numpy.zeros', 'np.zeros', ([], {'shape': '(sizeY + 2, sizeX + 2)'}), '(shape=(sizeY + 2, sizeX + 2))\n', (1172, 1202), True, 'import numpy as np\n'), ((1225, 1243), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1241, 1243), False, 'import pygame\n'), ((2342, 2359), 'numpy.copy', 'np.copy', (['new_grid'], {}), '(new_grid)\n', (2349, 2359), True, 'import numpy as np\n'), ((2368, 2389), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2387, 2389), False, 'import pygame\n'), ((2201, 2276), 'pygame.Rect', 'pygame.Rect', (['((x - 1) * GRID_SIZE)', '((y - 1) * GRID_SIZE)', 'GRID_SIZE', 'GRID_SIZE'], {}), '((x - 1) * GRID_SIZE, (y - 1) * GRID_SIZE, GRID_SIZE, GRID_SIZE)\n', (2212, 2276), False, 'import pygame\n')] |
'''
@Author = Ollie
'''
from data import *
import numpy as np
'''
This file contains functions that can be used to analyse stock returns
against a baseline stock (FTSE 100 tracker) and risk free stock (UK Gilds).
Functions calculate 3 different metrics:
- beta: volatility of stock compared to base through covariance matrix of stock
and baseline returns
- alpha: percentage with which stock outperforms market using beta value, risk
free rate of returns, and baseline stock returns
- Sharpes ratio: a single metric accounting for risk and reward calculated by
taking away risk free return from stock return and dividing by standard
deviation of the stock return
'''
def update_returns(start_date, df):
'''This is used to update the returns column of a dataframe by resampling
from a specified date using the stock_dataframe class. This is mainly used
so ISF_L and GLTS_L databases can be resized for comparison with stocks
having different time frames
:param start_date: is the date from which resampling should be done
:param df: dataframe on which recalculating should be performed
:return: resampled dataframe'''
return stock_dataframe("",None,df[df.index>=start_date]).pre_process(False)
def risk_free_rate(start_date, risk_free_df):
return (update_returns(start_date, risk_free_df)['Returns']).tail(1) - 1
def covariance(df, base):
return np.cov(df['Returns'], base)
def beta(cov_matrix):
if cov_matrix[1][1] == 0:
return np.nan
return (cov_matrix[0][1] / cov_matrix[1][1])
def alpha(df, beta_value, rf, base):
a = (df['Returns'].tail(1) - 1) - rf - beta_value * ((base.tail(1)-1) - rf)
return a.iloc[0] * 100
def sharpes(df, rf):
if np.std(df['Returns']) == 0:
return np.nan
return (((df['Returns'].tail(1) - 1) - rf) / np.std(df['Returns'])).iloc[0]
def get_metrics(df, start_date, base_df, risk_free_df):
if not start_date:
start_date = df.head(1).index[0]
rf = risk_free_rate(start_date, risk_free_df)
base = update_returns(start_date, base_df)['Returns']
beta_value = beta(covariance(df, base))
return beta_value, alpha(df, beta_value, rf, base), sharpes(df, rf)
def get_investment_values(df, buy_value):
'''This returns buy and current value of stock if buy value is specified,
if not buy_value is set to 1 and current is relative change '''
if buy_value != None:
current_value = ((df['Returns']).iloc[-1]) * float(buy_value)
else:
buy_value = (df['Returns']).iloc[0]
current_value = (df['Returns']).iloc[-1]
return buy_value, current_value
| [
"numpy.std",
"numpy.cov"
] | [((1393, 1420), 'numpy.cov', 'np.cov', (["df['Returns']", 'base'], {}), "(df['Returns'], base)\n", (1399, 1420), True, 'import numpy as np\n'), ((1719, 1740), 'numpy.std', 'np.std', (["df['Returns']"], {}), "(df['Returns'])\n", (1725, 1740), True, 'import numpy as np\n'), ((1818, 1839), 'numpy.std', 'np.std', (["df['Returns']"], {}), "(df['Returns'])\n", (1824, 1839), True, 'import numpy as np\n')] |
from rl.make_game import make_game
import numpy as np
import multiprocessing
def random_rollout(actions, env):
done = False
while not done:
action = np.random.choice(actions)
s, r, done, _ = env.step(action)
if done:
return r
class State(object):
def __init__(self, particles, na, envs):
self.v = self.evaluate(envs)
self.particles = particles
self.n = 1
self.na = na
def update(self):
self.n += 1
def evaluate(self, envs):
actions = np.arange(self.na, dtype=int)
p = multiprocessing.Pool(multiprocessing.cpu_count())
results = p.starmap(random_rollout, [(actions, envs[i]) for i in range(len(envs))])
p.close()
return np.mean(results)
class PFAgent(object):
def __init__(self, n_particles, game, game_params, gamma):
self.gamma = gamma
self.envs = []
self.states = []
for _ in n_particles:
env = make_game(game, game_params)
env.seed(np.random.randint(1e7)) # draw some Env seed
s = env.reset()
self.envs.append(env)
self.states.append(s)
def step(self):
new_particles = []
bp = False
rewards = [0]
for i in range(len(self.envs)):
s, r, done, _ = self.envs[i].step()
new_particles.append(s)
if done:
rewards.append(r)
bp = True
if bp:
self.backpropagate(np.mean(rewards))
def backpropagate(self, avg_reward):
pass
| [
"rl.make_game.make_game",
"numpy.mean",
"numpy.arange",
"numpy.random.randint",
"numpy.random.choice",
"multiprocessing.cpu_count"
] | [((167, 192), 'numpy.random.choice', 'np.random.choice', (['actions'], {}), '(actions)\n', (183, 192), True, 'import numpy as np\n'), ((545, 574), 'numpy.arange', 'np.arange', (['self.na'], {'dtype': 'int'}), '(self.na, dtype=int)\n', (554, 574), True, 'import numpy as np\n'), ((765, 781), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (772, 781), True, 'import numpy as np\n'), ((609, 636), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (634, 636), False, 'import multiprocessing\n'), ((993, 1021), 'rl.make_game.make_game', 'make_game', (['game', 'game_params'], {}), '(game, game_params)\n', (1002, 1021), False, 'from rl.make_game import make_game\n'), ((1043, 1072), 'numpy.random.randint', 'np.random.randint', (['(10000000.0)'], {}), '(10000000.0)\n', (1060, 1072), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (1533, 1542), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
import pandas as pd
from unittest import TestCase, main
from nimble import Events
class EvTestCase(TestCase):
@staticmethod
def assertStartStops(events, vstarts, vstops):
npt.assert_array_equal(events._starts, vstarts)
npt.assert_array_equal(events._stops, vstops)
class TestDebouncing(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_adeb(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 10])
events = Events(self.cond, period=1, adeb=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_ddeb(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=1, ddeb=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_adeb_ddeb(self):
vstarts = np.array([2])
vstops = np.array([12])
events = Events(self.cond, period=1, adeb=2, ddeb=3.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_nonint_deb(self):
vstarts = np.array([2, 7, 11])
vstops = np.array([4, 10, 12])
events = Events(self.cond, period=1, adeb=float(0.00000001),
ddeb=float(0.99999999)).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=0.1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=0.12, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 6])
vstops = np.array([2, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, adeb=2, ddeb=2).find()
self.assertStartStops(events, vstarts, vstops)
class TestDurationFilter(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_mindur(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 10])
events = Events(self.cond, period=1, mindur=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_maxdur(self):
vstarts = np.array([2, 11])
vstops = np.array([4, 12])
events = Events(self.cond, period=1, maxdur=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_mindur_maxdur(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=1, mindur=2, maxdur=2.5).find()
self.assertStartStops(events, vstarts, vstops)
def test_nonint_durs(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=1, mindur=float(1.00000001),
maxdur=float(2.99999999)).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=0.1, mindur=0.15, maxdur=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=0.12, mindur=0.15, maxdur=0.35).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, mindur=0.15, maxdur=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, mindur=0.15, maxdur=20).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 6])
vstops = np.array([2, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, mindur=2, maxdur=2).find()
self.assertStartStops(events, vstarts, vstops)
class TestEventOffset(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_startoffset(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([4, 10, 12])
events = Events(self.cond, period=1, startoffset=-1).find()
self.assertStartStops(events, vstarts, vstops)
def test_stopoffset(self):
vstarts = np.array([2, 7, 11])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_startoffset_stopoffset(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=0.1, startoffset=-0.1, stopoffset=0.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=0.12, startoffset=-0.1, stopoffset=0.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 5])
vstops = np.array([3, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, period=1).find()
def test_default_parameters(self):
"""Test as_array() with default settings"""
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
def test_as_array_false_value(self):
"""Test as_array() with low value"""
validation_array = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array(
false_values=-1))
def test_as_array_true_value(self):
"""Test as_array() with high value"""
validation_array = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])
npt.assert_array_equal(validation_array, self.events.as_array(
true_values=5))
def test_as_array_false_and_true_value(self):
"""Test as_array() with low and high values"""
validation_array = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])
npt.assert_array_equal(validation_array, self.events.as_array(
false_values=-1,
true_values=5))
def test_type(self):
typ = type(self.events.as_array(false_values=-1, true_values=5))
self.assertEqual(typ, np.ndarray)
class TestAsSeries(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, period=1).find()
def test_default_parameters(self):
"""Test as_array() with default settings"""
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_series, self.events.as_series())
def test_as_array_false_value(self):
"""Test as_array() with low value"""
validation_series = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])
npt.assert_array_equal(validation_series, self.events.as_series(
false_values=-1))
def test_as_array_true_value(self):
"""Test as_array() with high value"""
validation_series = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])
npt.assert_array_equal(validation_series, self.events.as_series(
true_values=5))
def test_as_array_false_and_true_value(self):
"""Test as_array() with low and high values"""
validation_series = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])
npt.assert_array_equal(validation_series, self.events.as_series(
false_values=-1,
true_values=5))
def test_type(self):
typ = type(self.events.as_series(false_values=-1, true_values=5))
self.assertEqual(typ, pd.core.series.Series)
class TestDurations(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 1, 0, 0, 0, 1, 0, 0])
condition = (condition_array > 0)
self.events = Events(condition, period=1/3,
adeb=0.5, ddeb=1).find()
def test_durations(self):
# validation_array = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
validation_durations = [(8 / 3)]
npt.assert_array_equal(validation_durations, self.events.durations)
class TestEventDetection(TestCase):
def test_default_parameters(self):
"""Test event detection with only a supplied condition"""
np.random.seed(10)
validation_array = np.random.random_integers(0, 1, 100)
condition = (validation_array > 0)
events = Events(condition, period=1).find()
npt.assert_array_equal(validation_array, events.as_array())
def test_multi_input_condition_event(self):
"""Test arrays that have multi-input conditions"""
x = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0])
y = np.array([0, 0, 1, 1, 1, 0, 0, 1, 0, 1])
validation_array = np.array([0, 0, 1, 1, 0, 0, 0, 1, 0, 0])
condition = ((x > 0) & (y > 0))
events = Events(condition, period=1).find()
npt.assert_array_equal(validation_array, events.as_array())
class TestSpecialMethods(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])
self.condition = (condition_array > 0)
self.events = Events(self.condition, period=1).find()
def test__len__(self):
self.assertEquals(4, len(self.events))
def test__eq__(self):
other = Events(self.condition, period=1).find()
self.assertEqual(self.events, other)
class TestAttributes(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])
self.condition = (condition_array > 0)
def test_period(self):
self.assertRaises(ValueError, Events, self.condition, period=0)
def test_startoffset(self):
self.assertRaises(ValueError, Events, self.condition,
period=1, startoffset=1)
def test_stopoffset(self):
self.assertRaises(ValueError, Events, self.condition, period=0, stopoffset=-1)
class TestProperties(TestCase):
def setUp(self):
self.events = Events(np.array([False, False]), period=0.12,
adeb=1, ddeb=1,
mindur=1, maxdur=1,
startoffset=-1, stopoffset=1)
def test_adeb(self):
self.assertEqual(self.events._adeb, 9)
def test_ddeb(self):
self.assertEqual(self.events._adeb, 9)
def test_mindur(self):
self.assertEqual(self.events._mindur, 9)
def test_maxdur(self):
self.assertEqual(self.events._maxdur, 8)
def test_startoffset(self):
self.assertEqual(self.events._startoffset, -9)
def test_stopoffset(self):
self.assertEqual(self.events._stopoffset, 9)
if __name__ == '__main__':
main()
| [
"unittest.main",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.array",
"pandas.Series",
"nimble.Events",
"numpy.random.random_integers"
] | [((13500, 13506), 'unittest.main', 'main', ([], {}), '()\n', (13504, 13506), False, 'from unittest import TestCase, main\n'), ((236, 283), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['events._starts', 'vstarts'], {}), '(events._starts, vstarts)\n', (258, 283), True, 'import numpy.testing as npt\n'), ((292, 337), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['events._stops', 'vstops'], {}), '(events._stops, vstops)\n', (314, 337), True, 'import numpy.testing as npt\n'), ((413, 459), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]'], {}), '([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])\n', (421, 459), True, 'import numpy as np\n'), ((536, 552), 'numpy.array', 'np.array', (['[2, 7]'], {}), '([2, 7])\n', (544, 552), True, 'import numpy as np\n'), ((570, 587), 'numpy.array', 'np.array', (['[4, 10]'], {}), '([4, 10])\n', (578, 587), True, 'import numpy as np\n'), ((747, 763), 'numpy.array', 'np.array', (['[2, 7]'], {}), '([2, 7])\n', (755, 763), True, 'import numpy as np\n'), ((781, 798), 'numpy.array', 'np.array', (['[4, 12]'], {}), '([4, 12])\n', (789, 798), True, 'import numpy as np\n'), ((963, 976), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (971, 976), True, 'import numpy as np\n'), ((994, 1008), 'numpy.array', 'np.array', (['[12]'], {}), '([12])\n', (1002, 1008), True, 'import numpy as np\n'), ((1184, 1204), 'numpy.array', 'np.array', (['[2, 7, 11]'], {}), '([2, 7, 11])\n', (1192, 1204), True, 'import numpy as np\n'), ((1222, 1243), 'numpy.array', 'np.array', (['[4, 10, 12]'], {}), '([4, 10, 12])\n', (1230, 1243), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.array', 'np.array', (['[2, 7]'], {}), '([2, 7])\n', (1483, 1491), True, 'import numpy as np\n'), ((1509, 1526), 'numpy.array', 'np.array', (['[4, 12]'], {}), '([4, 12])\n', (1517, 1526), True, 'import numpy as np\n'), ((1709, 1725), 'numpy.array', 'np.array', (['[2, 7]'], {}), '([2, 7])\n', (1717, 1725), True, 'import numpy as np\n'), ((1743, 1760), 'numpy.array', 'np.array', (['[4, 12]'], {}), '([4, 12])\n', (1751, 1760), True, 'import numpy as np\n'), ((1947, 1959), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1955, 1959), True, 'import numpy as np\n'), ((1977, 1989), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1985, 1989), True, 'import numpy as np\n'), ((2002, 2036), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (2010, 2036), True, 'import numpy as np\n'), ((2220, 2233), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2228, 2233), True, 'import numpy as np\n'), ((2251, 2264), 'numpy.array', 'np.array', (['[8]'], {}), '([8])\n', (2259, 2264), True, 'import numpy as np\n'), ((2277, 2311), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (2285, 2311), True, 'import numpy as np\n'), ((2491, 2507), 'numpy.array', 'np.array', (['[0, 6]'], {}), '([0, 6])\n', (2499, 2507), True, 'import numpy as np\n'), ((2525, 2541), 'numpy.array', 'np.array', (['[2, 8]'], {}), '([2, 8])\n', (2533, 2541), True, 'import numpy as np\n'), ((2554, 2588), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 1, 1]'], {}), '([1, 1, 0, 0, 0, 0, 1, 1])\n', (2562, 2588), True, 'import numpy as np\n'), ((2788, 2834), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]'], {}), '([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])\n', (2796, 2834), True, 'import numpy as np\n'), ((2913, 2929), 'numpy.array', 'np.array', (['[2, 7]'], {}), '([2, 7])\n', (2921, 2929), True, 'import numpy as np\n'), ((2947, 2964), 'numpy.array', 'np.array', (['[4, 10]'], {}), '([4, 10])\n', (2955, 2964), True, 'import numpy as np\n'), ((3128, 3145), 'numpy.array', 'np.array', (['[2, 11]'], {}), '([2, 11])\n', (3136, 3145), True, 'import numpy as np\n'), ((3163, 3180), 'numpy.array', 'np.array', (['[4, 12]'], {}), '([4, 12])\n', (3171, 3180), True, 'import numpy as np\n'), ((3351, 3364), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3359, 3364), True, 'import numpy as np\n'), ((3382, 3395), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (3390, 3395), True, 'import numpy as np\n'), ((3576, 3589), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3584, 3589), True, 'import numpy as np\n'), ((3607, 3620), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (3615, 3620), True, 'import numpy as np\n'), ((3856, 3869), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3864, 3869), True, 'import numpy as np\n'), ((3887, 3900), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (3895, 3900), True, 'import numpy as np\n'), ((4087, 4100), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (4095, 4100), True, 'import numpy as np\n'), ((4118, 4131), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (4126, 4131), True, 'import numpy as np\n'), ((4323, 4335), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4331, 4335), True, 'import numpy as np\n'), ((4353, 4365), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4361, 4365), True, 'import numpy as np\n'), ((4378, 4412), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (4386, 4412), True, 'import numpy as np\n'), ((4600, 4613), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4608, 4613), True, 'import numpy as np\n'), ((4631, 4644), 'numpy.array', 'np.array', (['[8]'], {}), '([8])\n', (4639, 4644), True, 'import numpy as np\n'), ((4657, 4691), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (4665, 4691), True, 'import numpy as np\n'), ((4874, 4890), 'numpy.array', 'np.array', (['[0, 6]'], {}), '([0, 6])\n', (4882, 4890), True, 'import numpy as np\n'), ((4908, 4924), 'numpy.array', 'np.array', (['[2, 8]'], {}), '([2, 8])\n', (4916, 4924), True, 'import numpy as np\n'), ((4937, 4971), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 1, 1]'], {}), '([1, 1, 0, 0, 0, 0, 1, 1])\n', (4945, 4971), True, 'import numpy as np\n'), ((5172, 5218), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]'], {}), '([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])\n', (5180, 5218), True, 'import numpy as np\n'), ((5302, 5322), 'numpy.array', 'np.array', (['[1, 6, 10]'], {}), '([1, 6, 10])\n', (5310, 5322), True, 'import numpy as np\n'), ((5340, 5361), 'numpy.array', 'np.array', (['[4, 10, 12]'], {}), '([4, 10, 12])\n', (5348, 5361), True, 'import numpy as np\n'), ((5535, 5555), 'numpy.array', 'np.array', (['[2, 7, 11]'], {}), '([2, 7, 11])\n', (5543, 5555), True, 'import numpy as np\n'), ((5573, 5594), 'numpy.array', 'np.array', (['[5, 11, 12]'], {}), '([5, 11, 12])\n', (5581, 5594), True, 'import numpy as np\n'), ((5778, 5798), 'numpy.array', 'np.array', (['[1, 6, 10]'], {}), '([1, 6, 10])\n', (5786, 5798), True, 'import numpy as np\n'), ((5816, 5837), 'numpy.array', 'np.array', (['[5, 11, 12]'], {}), '([5, 11, 12])\n', (5824, 5837), True, 'import numpy as np\n'), ((6027, 6047), 'numpy.array', 'np.array', (['[1, 6, 10]'], {}), '([1, 6, 10])\n', (6035, 6047), True, 'import numpy as np\n'), ((6065, 6086), 'numpy.array', 'np.array', (['[5, 11, 12]'], {}), '([5, 11, 12])\n', (6073, 6086), True, 'import numpy as np\n'), ((6282, 6302), 'numpy.array', 'np.array', (['[1, 6, 10]'], {}), '([1, 6, 10])\n', (6290, 6302), True, 'import numpy as np\n'), ((6320, 6341), 'numpy.array', 'np.array', (['[5, 11, 12]'], {}), '([5, 11, 12])\n', (6328, 6341), True, 'import numpy as np\n'), ((6541, 6553), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6549, 6553), True, 'import numpy as np\n'), ((6571, 6583), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6579, 6583), True, 'import numpy as np\n'), ((6596, 6630), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (6604, 6630), True, 'import numpy as np\n'), ((6823, 6836), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (6831, 6836), True, 'import numpy as np\n'), ((6854, 6867), 'numpy.array', 'np.array', (['[8]'], {}), '([8])\n', (6862, 6867), True, 'import numpy as np\n'), ((6880, 6914), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0])\n', (6888, 6914), True, 'import numpy as np\n'), ((7103, 7119), 'numpy.array', 'np.array', (['[0, 5]'], {}), '([0, 5])\n', (7111, 7119), True, 'import numpy as np\n'), ((7137, 7153), 'numpy.array', 'np.array', (['[3, 8]'], {}), '([3, 8])\n', (7145, 7153), True, 'import numpy as np\n'), ((7166, 7200), 'numpy.array', 'np.array', (['[1, 1, 0, 0, 0, 0, 1, 1]'], {}), '([1, 1, 0, 0, 0, 0, 1, 1])\n', (7174, 7200), True, 'import numpy as np\n'), ((7421, 7467), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]'], {}), '([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n', (7429, 7467), True, 'import numpy as np\n'), ((7688, 7734), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]'], {}), '([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n', (7696, 7734), True, 'import numpy as np\n'), ((7922, 7973), 'numpy.array', 'np.array', (['[-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1]'], {}), '([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])\n', (7930, 7973), True, 'import numpy as np\n'), ((8189, 8235), 'numpy.array', 'np.array', (['[0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5]'], {}), '([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])\n', (8197, 8235), True, 'import numpy as np\n'), ((8468, 8519), 'numpy.array', 'np.array', (['[-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5]'], {}), '([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])\n', (8476, 8519), True, 'import numpy as np\n'), ((8870, 8916), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]'], {}), '([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n', (8878, 8916), True, 'import numpy as np\n'), ((9138, 9185), 'pandas.Series', 'pd.Series', (['[0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]'], {}), '([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])\n', (9147, 9185), True, 'import pandas as pd\n'), ((9376, 9427), 'numpy.array', 'np.array', (['[-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1]'], {}), '([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])\n', (9384, 9427), True, 'import numpy as np\n'), ((9646, 9692), 'numpy.array', 'np.array', (['[0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5]'], {}), '([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])\n', (9654, 9692), True, 'import numpy as np\n'), ((9928, 9979), 'numpy.array', 'np.array', (['[-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5]'], {}), '([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])\n', (9936, 9979), True, 'import numpy as np\n'), ((10344, 10414), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]'], {}), '([1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0])\n', (10352, 10414), True, 'import numpy as np\n'), ((10819, 10886), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['validation_durations', 'self.events.durations'], {}), '(validation_durations, self.events.durations)\n', (10841, 10886), True, 'import numpy.testing as npt\n'), ((11038, 11056), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (11052, 11056), True, 'import numpy as np\n'), ((11084, 11120), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (11109, 11120), True, 'import numpy as np\n'), ((11405, 11445), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0, 0, 0, 1, 1, 0]'], {}), '([0, 1, 1, 1, 0, 0, 0, 1, 1, 0])\n', (11413, 11445), True, 'import numpy as np\n'), ((11458, 11498), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 0, 0, 1, 0, 1]'], {}), '([0, 0, 1, 1, 1, 0, 0, 1, 0, 1])\n', (11466, 11498), True, 'import numpy as np\n'), ((11527, 11567), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 0, 1, 0, 0]'], {}), '([0, 0, 1, 1, 0, 0, 0, 1, 0, 0])\n', (11535, 11567), True, 'import numpy as np\n'), ((11814, 11863), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1]'], {}), '([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])\n', (11822, 11863), True, 'import numpy as np\n'), ((12257, 12306), 'numpy.array', 'np.array', (['[1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1]'], {}), '([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])\n', (12265, 12306), True, 'import numpy as np\n'), ((12803, 12827), 'numpy.array', 'np.array', (['[False, False]'], {}), '([False, False])\n', (12811, 12827), True, 'import numpy as np\n'), ((605, 640), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'adeb': '(2)'}), '(self.cond, period=1, adeb=2)\n', (611, 640), False, 'from nimble import Events\n'), ((816, 851), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'ddeb': '(2)'}), '(self.cond, period=1, ddeb=2)\n', (822, 851), False, 'from nimble import Events\n'), ((1026, 1071), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'adeb': '(2)', 'ddeb': '(3.1)'}), '(self.cond, period=1, adeb=2, ddeb=3.1)\n', (1032, 1071), False, 'from nimble import Events\n'), ((1544, 1594), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.1)', 'adeb': '(0.15)', 'ddeb': '(0.2)'}), '(self.cond, period=0.1, adeb=0.15, ddeb=0.2)\n', (1550, 1594), False, 'from nimble import Events\n'), ((1778, 1829), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.12)', 'adeb': '(0.15)', 'ddeb': '(0.2)'}), '(self.cond, period=0.12, adeb=0.15, ddeb=0.2)\n', (1784, 1829), False, 'from nimble import Events\n'), ((2054, 2098), 'nimble.Events', 'Events', (['(x > 0)'], {'period': '(1)', 'adeb': '(0.15)', 'ddeb': '(0.2)'}), '(x > 0, period=1, adeb=0.15, ddeb=0.2)\n', (2060, 2098), False, 'from nimble import Events\n'), ((2329, 2374), 'nimble.Events', 'Events', (['(x == 0)'], {'period': '(1)', 'adeb': '(0.15)', 'ddeb': '(0.2)'}), '(x == 0, period=1, adeb=0.15, ddeb=0.2)\n', (2335, 2374), False, 'from nimble import Events\n'), ((2606, 2646), 'nimble.Events', 'Events', (['(x == 1)'], {'period': '(1)', 'adeb': '(2)', 'ddeb': '(2)'}), '(x == 1, period=1, adeb=2, ddeb=2)\n', (2612, 2646), False, 'from nimble import Events\n'), ((2982, 3019), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'mindur': '(2)'}), '(self.cond, period=1, mindur=2)\n', (2988, 3019), False, 'from nimble import Events\n'), ((3198, 3235), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'maxdur': '(2)'}), '(self.cond, period=1, maxdur=2)\n', (3204, 3235), False, 'from nimble import Events\n'), ((3413, 3462), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'mindur': '(2)', 'maxdur': '(2.5)'}), '(self.cond, period=1, mindur=2, maxdur=2.5)\n', (3419, 3462), False, 'from nimble import Events\n'), ((3918, 3972), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.1)', 'mindur': '(0.15)', 'maxdur': '(0.2)'}), '(self.cond, period=0.1, mindur=0.15, maxdur=0.2)\n', (3924, 3972), False, 'from nimble import Events\n'), ((4149, 4205), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.12)', 'mindur': '(0.15)', 'maxdur': '(0.35)'}), '(self.cond, period=0.12, mindur=0.15, maxdur=0.35)\n', (4155, 4205), False, 'from nimble import Events\n'), ((4430, 4478), 'nimble.Events', 'Events', (['(x > 0)'], {'period': '(1)', 'mindur': '(0.15)', 'maxdur': '(0.2)'}), '(x > 0, period=1, mindur=0.15, maxdur=0.2)\n', (4436, 4478), False, 'from nimble import Events\n'), ((4709, 4757), 'nimble.Events', 'Events', (['(x == 0)'], {'period': '(1)', 'mindur': '(0.15)', 'maxdur': '(20)'}), '(x == 0, period=1, mindur=0.15, maxdur=20)\n', (4715, 4757), False, 'from nimble import Events\n'), ((4989, 5033), 'nimble.Events', 'Events', (['(x == 1)'], {'period': '(1)', 'mindur': '(2)', 'maxdur': '(2)'}), '(x == 1, period=1, mindur=2, maxdur=2)\n', (4995, 5033), False, 'from nimble import Events\n'), ((5379, 5422), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'startoffset': '(-1)'}), '(self.cond, period=1, startoffset=-1)\n', (5385, 5422), False, 'from nimble import Events\n'), ((5612, 5653), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'stopoffset': '(1)'}), '(self.cond, period=1, stopoffset=1)\n', (5618, 5653), False, 'from nimble import Events\n'), ((5855, 5912), 'nimble.Events', 'Events', (['self.cond'], {'period': '(1)', 'startoffset': '(-1)', 'stopoffset': '(1)'}), '(self.cond, period=1, startoffset=-1, stopoffset=1)\n', (5861, 5912), False, 'from nimble import Events\n'), ((6104, 6167), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.1)', 'startoffset': '(-0.1)', 'stopoffset': '(0.1)'}), '(self.cond, period=0.1, startoffset=-0.1, stopoffset=0.1)\n', (6110, 6167), False, 'from nimble import Events\n'), ((6359, 6423), 'nimble.Events', 'Events', (['self.cond'], {'period': '(0.12)', 'startoffset': '(-0.1)', 'stopoffset': '(0.1)'}), '(self.cond, period=0.12, startoffset=-0.1, stopoffset=0.1)\n', (6365, 6423), False, 'from nimble import Events\n'), ((6648, 6701), 'nimble.Events', 'Events', (['(x > 0)'], {'period': '(1)', 'startoffset': '(-1)', 'stopoffset': '(1)'}), '(x > 0, period=1, startoffset=-1, stopoffset=1)\n', (6654, 6701), False, 'from nimble import Events\n'), ((6932, 6986), 'nimble.Events', 'Events', (['(x == 0)'], {'period': '(1)', 'startoffset': '(-1)', 'stopoffset': '(1)'}), '(x == 0, period=1, startoffset=-1, stopoffset=1)\n', (6938, 6986), False, 'from nimble import Events\n'), ((7218, 7272), 'nimble.Events', 'Events', (['(x == 1)'], {'period': '(1)', 'startoffset': '(-1)', 'stopoffset': '(1)'}), '(x == 1, period=1, startoffset=-1, stopoffset=1)\n', (7224, 7272), False, 'from nimble import Events\n'), ((7534, 7561), 'nimble.Events', 'Events', (['condition'], {'period': '(1)'}), '(condition, period=1)\n', (7540, 7561), False, 'from nimble import Events\n'), ((8983, 9010), 'nimble.Events', 'Events', (['condition'], {'period': '(1)'}), '(condition, period=1)\n', (8989, 9010), False, 'from nimble import Events\n'), ((10516, 10565), 'nimble.Events', 'Events', (['condition'], {'period': '(1 / 3)', 'adeb': '(0.5)', 'ddeb': '(1)'}), '(condition, period=1 / 3, adeb=0.5, ddeb=1)\n', (10522, 10565), False, 'from nimble import Events\n'), ((11181, 11208), 'nimble.Events', 'Events', (['condition'], {'period': '(1)'}), '(condition, period=1)\n', (11187, 11208), False, 'from nimble import Events\n'), ((11625, 11652), 'nimble.Events', 'Events', (['condition'], {'period': '(1)'}), '(condition, period=1)\n', (11631, 11652), False, 'from nimble import Events\n'), ((11933, 11965), 'nimble.Events', 'Events', (['self.condition'], {'period': '(1)'}), '(self.condition, period=1)\n', (11939, 11965), False, 'from nimble import Events\n'), ((12091, 12123), 'nimble.Events', 'Events', (['self.condition'], {'period': '(1)'}), '(self.condition, period=1)\n', (12097, 12123), False, 'from nimble import Events\n')] |
import os
import os.path as osp
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Tuple, Union
import numpy as np
from PIL import Image
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import IkHelper
from habitat_baselines.motion_planning.robot_target import RobotTarget
try:
from ompl import base as ob # pylint: disable=import-error
from ompl import geometric as og # pylint: disable=import-error
except ImportError:
pass
def to_ob_state(vec: np.ndarray, space: "ob.StateSpace", dim: int):
ob_vec = ob.State(space)
for i in range(dim):
ob_vec[i] = vec[i]
return ob_vec
class MpSpace(ABC):
"""
Defines an abstract planning space for OMPL to interface with.
"""
def __init__(self, use_sim: RearrangeSim, ik: IkHelper):
self._mp_sim = use_sim
self._ik = ik
@abstractmethod
def convert_state(self, x: Iterable) -> np.ndarray:
pass
@abstractmethod
def set_arm(self, x: Union[List[float], np.ndarray]):
pass
def set_env_state(self, env_state: Dict[str, Any]):
self.env_state = env_state
@abstractmethod
def get_range(self) -> float:
"""
Gets the planner step size range.
"""
@abstractmethod
def get_state_lims(self, restrictive: bool = False) -> np.ndarray:
"""
Get the state limits of the planning problem.
"""
@abstractmethod
def get_state_dim(self) -> int:
"""
Get the dimensionality of the planning problem
"""
@abstractmethod
def get_start_goal(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Gets the used start and goal states for the planner. This is after
clipping and any additional pre-processing.
"""
@abstractmethod
def convert_sol(self, path) -> np.ndarray:
"""
Convert a solution from OMPL format to numpy array
"""
@abstractmethod
def get_planner(self, si: "ob.SpaceInformation"):
pass
@abstractmethod
def set_problem(
self,
pdef: "ob.ProblemDefinition",
space: "ob.StateSpace",
si: "ob.SpaceInformation",
start_state: "ob.State",
targ_state: RobotTarget,
):
"""
Sets up the planning problem
"""
def render_start_targ(
self,
render_dir: str,
subdir: str,
targ_state: np.ndarray,
suffix: str = "targ",
):
"""
Renders the start and target to images for visualization
"""
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(ob.goalRegionCostToGo))
return obj
class JsMpSpace(MpSpace):
def __init__(self, use_sim, ik, start_num_calls, should_render):
super().__init__(use_sim, ik)
# self._lower_joint_lims, self._upper_joint_lims = self._ik.get_joint_limits()
joint_lims = self.get_state_lims(True)
self._lower_joint_lims, self._upper_joint_lims = (
joint_lims[:, 0],
joint_lims[:, 1],
)
self.num_calls = start_num_calls
self._should_render = should_render
def convert_state(self, x):
return np.array([x[i] for i in range(7)])
def _norm_joint_angle(self, angles):
return np.arctan2(np.sin(angles), np.cos(angles))
def get_planner(self, si):
return og.RRTConnect(si)
def get_state_lims(self, restrictive=False):
"""Get the state limits of the planning problem. If restrictive is true then
this returns the joint limts based on the PyBullet joint limits
"""
if restrictive:
lower_joint_lims, upper_joint_lims = self._ik.get_joint_limits()
lower_joint_lims = [
-np.pi if np.isclose(a, 0.0) else a for a in lower_joint_lims
]
upper_joint_lims = [
np.pi if np.isclose(a, 2 * np.pi) else a
for a in upper_joint_lims
]
lower_joint_lims = self._norm_joint_angle(lower_joint_lims)
upper_joint_lims = self._norm_joint_angle(upper_joint_lims)
return np.stack([lower_joint_lims, upper_joint_lims], axis=-1)
else:
return np.stack([[-2 * np.pi] * 7, [2 * np.pi] * 7], axis=-1)
def get_state_dim(self):
return len(self._mp_sim._sim.robot.arm_joint_pos)
def _fk(self, joints):
"""Sets the joint state and applys the change"""
self._mp_sim.set_arm_pos(joints)
self._mp_sim.micro_step()
def get_start_goal(self) -> Tuple[np.ndarray, np.ndarray]:
return (self.used_js_start, self.used_js_goal)
def set_problem(
self,
pdef,
space,
si,
js_start,
robot_targ,
):
"""
Sets up the OMPL problem
"""
js_end = robot_targ.joints_target
joint_shape = self._lower_joint_lims.shape
js_start = self._norm_joint_angle(js_start)
js_end = self._norm_joint_angle(js_end)
# In case you want some padding to the limits for extra safety
eps = np.full(joint_shape, 0.000)
js_start = np.clip(
js_start,
self._lower_joint_lims + eps,
self._upper_joint_lims - eps,
)
js_end = np.clip(
js_end, self._lower_joint_lims + eps, self._upper_joint_lims - eps
)
self.used_js_start = js_start
self.used_js_goal = js_end
self.num_calls += 1
js_start = to_ob_state(js_start, space, self.get_state_dim())
js_end = to_ob_state(js_end, space, self.get_state_dim())
def admiss_heuristic(cur_state, goal):
use_cur_state = self.convert_state(cur_state)
# FK to get both in EE space.
self._fk(use_cur_state)
cur_ee_state = self._mp_sim.get_ee_pos()
ret = np.linalg.norm(robot_targ.ee_target_pos - cur_ee_state)
return ret
def getPathLengthObjWithCostToGo(si):
obj = ob.PathLengthOptimizationObjective(si)
obj.setCostToGoHeuristic(ob.CostToGoHeuristic(admiss_heuristic))
return obj
pdef.setStartAndGoalStates(js_start, js_end)
pdef.setOptimizationObjective(getPathLengthObjWithCostToGo(si))
def render_start_targ(self, render_dir, subdir, targ_state, suffix="targ"):
if targ_state is not None:
targ_viz_id = self._mp_sim.add_sphere(0.06, color=[0, 0, 1, 1])
self._mp_sim.set_position(targ_state, targ_viz_id)
use_dir = osp.join(render_dir, subdir)
os.makedirs(use_dir, exist_ok=True)
# Visualize the target position.
# NOTE: The object will not immediately snap to the robot's hand if a target joint
# state is provided. This is not an issue, it only affects this one
# rendering.
self._fk(self.used_js_goal)
Image.fromarray(self._mp_sim.render()).save(
osp.join(use_dir, f"{suffix}_goal_{self.num_calls}.jpeg")
)
self._fk(self.used_js_start)
save_f_name = osp.join(
use_dir, f"{suffix}_start_{self.num_calls}.jpeg"
)
Image.fromarray(self._mp_sim.render()).save(save_f_name)
print("Rendered start / goal MP to ", save_f_name)
if targ_state is not None:
self._mp_sim.remove_object(targ_viz_id)
def get_range(self):
return 0.1
def set_arm(self, des_joint_pos):
des_joint_pos = self.convert_state(des_joint_pos)
self._fk(des_joint_pos)
self._mp_sim.set_state(self.env_state)
des_joint_pos = np.array(des_joint_pos)[:7]
def convert_sol(self, path):
plan = np.array([self.convert_state(x) for x in path.getStates()])
return plan
| [
"numpy.full",
"numpy.stack",
"os.makedirs",
"ompl.base.CostToGoHeuristic",
"numpy.clip",
"ompl.base.PathLengthOptimizationObjective",
"ompl.base.State",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy.isclose",
"ompl.geometric.RRTConnect",
"os.path.join"
] | [((606, 621), 'ompl.base.State', 'ob.State', (['space'], {}), '(space)\n', (614, 621), True, 'from ompl import base as ob\n'), ((2678, 2716), 'ompl.base.PathLengthOptimizationObjective', 'ob.PathLengthOptimizationObjective', (['si'], {}), '(si)\n', (2712, 2716), True, 'from ompl import base as ob\n'), ((2746, 2789), 'ompl.base.CostToGoHeuristic', 'ob.CostToGoHeuristic', (['ob.goalRegionCostToGo'], {}), '(ob.goalRegionCostToGo)\n', (2766, 2789), True, 'from ompl import base as ob\n'), ((3519, 3536), 'ompl.geometric.RRTConnect', 'og.RRTConnect', (['si'], {}), '(si)\n', (3532, 3536), True, 'from ompl import geometric as og\n'), ((5262, 5287), 'numpy.full', 'np.full', (['joint_shape', '(0.0)'], {}), '(joint_shape, 0.0)\n', (5269, 5287), True, 'import numpy as np\n'), ((5309, 5386), 'numpy.clip', 'np.clip', (['js_start', '(self._lower_joint_lims + eps)', '(self._upper_joint_lims - eps)'], {}), '(js_start, self._lower_joint_lims + eps, self._upper_joint_lims - eps)\n', (5316, 5386), True, 'import numpy as np\n'), ((5451, 5526), 'numpy.clip', 'np.clip', (['js_end', '(self._lower_joint_lims + eps)', '(self._upper_joint_lims - eps)'], {}), '(js_end, self._lower_joint_lims + eps, self._upper_joint_lims - eps)\n', (5458, 5526), True, 'import numpy as np\n'), ((6726, 6754), 'os.path.join', 'osp.join', (['render_dir', 'subdir'], {}), '(render_dir, subdir)\n', (6734, 6754), True, 'import os.path as osp\n'), ((6763, 6798), 'os.makedirs', 'os.makedirs', (['use_dir'], {'exist_ok': '(True)'}), '(use_dir, exist_ok=True)\n', (6774, 6798), False, 'import os\n'), ((7257, 7315), 'os.path.join', 'osp.join', (['use_dir', 'f"""{suffix}_start_{self.num_calls}.jpeg"""'], {}), "(use_dir, f'{suffix}_start_{self.num_calls}.jpeg')\n", (7265, 7315), True, 'import os.path as osp\n'), ((3440, 3454), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (3446, 3454), True, 'import numpy as np\n'), ((3456, 3470), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (3462, 3470), True, 'import numpy as np\n'), ((4291, 4346), 'numpy.stack', 'np.stack', (['[lower_joint_lims, upper_joint_lims]'], {'axis': '(-1)'}), '([lower_joint_lims, upper_joint_lims], axis=-1)\n', (4299, 4346), True, 'import numpy as np\n'), ((4380, 4434), 'numpy.stack', 'np.stack', (['[[-2 * np.pi] * 7, [2 * np.pi] * 7]'], {'axis': '(-1)'}), '([[-2 * np.pi] * 7, [2 * np.pi] * 7], axis=-1)\n', (4388, 4434), True, 'import numpy as np\n'), ((6043, 6098), 'numpy.linalg.norm', 'np.linalg.norm', (['(robot_targ.ee_target_pos - cur_ee_state)'], {}), '(robot_targ.ee_target_pos - cur_ee_state)\n', (6057, 6098), True, 'import numpy as np\n'), ((6187, 6225), 'ompl.base.PathLengthOptimizationObjective', 'ob.PathLengthOptimizationObjective', (['si'], {}), '(si)\n', (6221, 6225), True, 'from ompl import base as ob\n'), ((7129, 7186), 'os.path.join', 'osp.join', (['use_dir', 'f"""{suffix}_goal_{self.num_calls}.jpeg"""'], {}), "(use_dir, f'{suffix}_goal_{self.num_calls}.jpeg')\n", (7137, 7186), True, 'import os.path as osp\n'), ((7794, 7817), 'numpy.array', 'np.array', (['des_joint_pos'], {}), '(des_joint_pos)\n', (7802, 7817), True, 'import numpy as np\n'), ((6263, 6301), 'ompl.base.CostToGoHeuristic', 'ob.CostToGoHeuristic', (['admiss_heuristic'], {}), '(admiss_heuristic)\n', (6283, 6301), True, 'from ompl import base as ob\n'), ((3916, 3934), 'numpy.isclose', 'np.isclose', (['a', '(0.0)'], {}), '(a, 0.0)\n', (3926, 3934), True, 'import numpy as np\n'), ((4040, 4064), 'numpy.isclose', 'np.isclose', (['a', '(2 * np.pi)'], {}), '(a, 2 * np.pi)\n', (4050, 4064), True, 'import numpy as np\n')] |
# Fix OpenCv2 configuration error with ROS
# import sys
# sys.path.remove("/opt/ros/kinetic/lib/python2.7/dist-packages")
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import bcd # The Boustrophedon Cellular decomposition
import move_boustrophedon # Uses output of bcd cells in order to move the robot
from ccp_v0 import complete_coverage0
import exceptions
def display_path_on_map(separate_map, cells, xx, yy):
fig, ax1 = plt.subplots()
map_img = np.empty([*separate_map.shape, 3], dtype=np.uint8)
random_colors = np.random.randint(0, 255, [cells, 3])
for cell_id in range(1, cells):
map_img[separate_map == cell_id, :] = random_colors[cell_id, :]
ax1.imshow(map_img)
ax1.plot(xx, yy)
plt.show()
# Read the original data
# original_map = bcd.cv2.imread("../data/real_ex4.png") # image from the git
original_map = bcd.cv2.imread("../data/map0001.png")
# We need binary image
# 1's represents free space while 0's represents objects/walls
# this git uses 1 and 0, but it shouldn't be hard to convert to True and False
if len(original_map.shape) > 2:
print("Map image is converted to binary")
single_channel_map = original_map[:, :, 0]
_, binary_map = bcd.cv2.threshold(single_channel_map, 127, 1, bcd.cv2.THRESH_BINARY)
# Call The Boustrophedon Cellular Decomposition function
cell_list: bcd.List[bcd.Cell] = None
bcd_out_im, bcd_out_cells, cell_numbers, cell_boundaries, non_neighboor_cell_numbers, cell_list = bcd.bcd(binary_map)
# Show the decomposed cells on top of original map
bcd.display_separate_map(bcd_out_im, bcd_out_cells)
move_boustrophedon.plt.show(block=False)
# define and add all cells(nodes) to the graph
g = nx.Graph()
g.add_nodes_from([i for i in range(bcd_out_cells)])
# Create links between all of the neighbouring cells(nodes)
for node in cell_list:
for neighbour in node.neighbours:
g.add_edge(cell_list.index(node), cell_list.index(neighbour))
print(f'From {cell_list.index(node)} to {cell_list.index(neighbour)}')
nx.draw(g, with_labels=True)
plt.show()
# quick and dirty way to get a Symmetric matrix(simetrija pret galveno diognāli)
# matrix = np.array(np.ones((bcd_out_cells, bcd_out_cells)) * np.inf)
# for i in range(len(cell_list)):
# for j in range(len(cell_list)):
# if cell_list[j] in cell_list[i].neighbours:
# matrix[i][j] = 1
# matrix[j][i] = 1
# matrix[16][17] = 1
# matrix[17][16] = 1
# ant_colony = AntColony(matrix, 1, 1, 100, 0.95, alpha=1, beta=1)
# shortest_path = ant_colony.run()
# print ("shorted_path: {}".format(shortest_path))
depth_first = nx.algorithms.dfs_tree(g, 10).nodes
print(f'meklesana plasuma no 10:{nx.algorithms.bfs_tree(g, 10).nodes}')
print(f'meklesana dziļumā no 10:{depth_first}')
# PLAN THE PATH IN EACH CELL
# todo check if it is better to go horizontally or vertically
x, y = complete_coverage0(cell_list, depth_first, 2)
# try to draw one plot on other:
display_path_on_map(bcd_out_im, bcd_out_cells, x, y)
| [
"move_boustrophedon.plt.show",
"matplotlib.pyplot.show",
"numpy.empty",
"bcd.cv2.imread",
"bcd.bcd",
"networkx.algorithms.bfs_tree",
"numpy.random.randint",
"networkx.Graph",
"networkx.draw",
"bcd.display_separate_map",
"matplotlib.pyplot.subplots",
"bcd.cv2.threshold",
"ccp_v0.complete_cove... | [((884, 921), 'bcd.cv2.imread', 'bcd.cv2.imread', (['"""../data/map0001.png"""'], {}), "('../data/map0001.png')\n", (898, 921), False, 'import bcd\n'), ((1495, 1514), 'bcd.bcd', 'bcd.bcd', (['binary_map'], {}), '(binary_map)\n', (1502, 1514), False, 'import bcd\n'), ((1566, 1617), 'bcd.display_separate_map', 'bcd.display_separate_map', (['bcd_out_im', 'bcd_out_cells'], {}), '(bcd_out_im, bcd_out_cells)\n', (1590, 1617), False, 'import bcd\n'), ((1618, 1658), 'move_boustrophedon.plt.show', 'move_boustrophedon.plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1645, 1658), False, 'import move_boustrophedon\n'), ((1711, 1721), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1719, 1721), True, 'import networkx as nx\n'), ((2046, 2074), 'networkx.draw', 'nx.draw', (['g'], {'with_labels': '(True)'}), '(g, with_labels=True)\n', (2053, 2074), True, 'import networkx as nx\n'), ((2075, 2085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2083, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2937), 'ccp_v0.complete_coverage0', 'complete_coverage0', (['cell_list', 'depth_first', '(2)'], {}), '(cell_list, depth_first, 2)\n', (2910, 2937), False, 'from ccp_v0 import complete_coverage0\n'), ((459, 473), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (471, 473), True, 'import matplotlib.pyplot as plt\n'), ((488, 538), 'numpy.empty', 'np.empty', (['[*separate_map.shape, 3]'], {'dtype': 'np.uint8'}), '([*separate_map.shape, 3], dtype=np.uint8)\n', (496, 538), True, 'import numpy as np\n'), ((559, 596), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '[cells, 3]'], {}), '(0, 255, [cells, 3])\n', (576, 596), True, 'import numpy as np\n'), ((754, 764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (762, 764), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1301), 'bcd.cv2.threshold', 'bcd.cv2.threshold', (['single_channel_map', '(127)', '(1)', 'bcd.cv2.THRESH_BINARY'], {}), '(single_channel_map, 127, 1, bcd.cv2.THRESH_BINARY)\n', (1250, 1301), False, 'import bcd\n'), ((2637, 2666), 'networkx.algorithms.dfs_tree', 'nx.algorithms.dfs_tree', (['g', '(10)'], {}), '(g, 10)\n', (2659, 2666), True, 'import networkx as nx\n'), ((2706, 2735), 'networkx.algorithms.bfs_tree', 'nx.algorithms.bfs_tree', (['g', '(10)'], {}), '(g, 10)\n', (2728, 2735), True, 'import networkx as nx\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021, ETH Zurich, Computer Engineering Group (TEC)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
@author: romantrueb
@brief: Evaluate data collected by flooding using eLWB
"""
import sys
import os
import numpy as np
import pandas as pd
from collections import OrderedDict, Counter
import pickle
import hashlib
import re
# plots
import networkx as nx
from bokeh import plotting
from bokeh.layouts import column, gridplot
from bokeh.models import ColumnDataSource, LabelSet, Div
from bokeh.palettes import Category10_10
import holoviews as hv
hv.extension('bokeh')
from holoviews import opts
# construct html with python
import dominate
from dominate.tags import *
from dominate.util import raw
################################################################################
FSK_MODULATIONS = [8, 9, 10]
PLOT_HEIGHT = 700
################################################################################
# Helper functions/objects
################################################################################
def getDfHash(df):
'''Calculates a hash over all dataframe data values and the column labels, but not the index (aka row labels).
'''
colsBytes = ''.join(df.columns).encode('utf-8')
colsArray = np.array((int(hashlib.sha256(colsBytes).hexdigest()[:16], 16))).astype(np.uint64)
dfValuesArray = pd.util.hash_pandas_object(df, index=False).values
l = np.append(colsArray, dfValuesArray)
return hashlib.sha256(l).hexdigest()
def styleDf(df, cmap='inferno', format='{:.1f}', replaceNan=True, applymap=None):
ret = ( df.style
.background_gradient(cmap=cmap, axis=None)
.format(format) )
if applymap is not None:
ret = ret.applymap(applymap)
ret = ret.render()
if replaceNan:
ret = ret.replace('nan','')
return ret
htmlStyleBlock = '''
table, th, td {font-size:10pt; border:1px solid lightgrey; border-collapse:collapse; text-align:left; font-family:arial;}
th, td {padding: 5px; text-align:center; width:22px;}
table.outer, th.outer, td.outer {font-size:10pt; border:0px solid lightgrey; border-collapse:collapse; text-align:left; font-family:arial;}
th.outer, td.outer {padding: 5px; text-align:center;}
'''
################################################################################
# Functions
################################################################################
def extractConnectionData(df, txConfigLabels):
# get all node IDs
nodeIds = sorted(df.node_id.unique())
numNodes = len(nodeIds)
# extract data
matrixDfDict = OrderedDict()
for txConfig, dfMod in df.groupby(by=txConfigLabels):
configStr = ', '.join(['{}={}'.format(k, v) for k, v in zip(txConfigLabels, txConfig)])
print('=== {} ==='.format(configStr))
# prepare matrices
frrMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # flood reception ratio (FRR) for each connection
prrMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # packet reception ratio (PRR) for each connection
hopDistanceMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # hop distance for each connection
numFloodsMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # number of floods per connection
numFloodsSuccMatrix = np.empty( (numNodes, numNodes,) ) * np.nan # number successfully received floods per connection
# FRR and hop distance
dfModTmp = dfMod[(dfMod.elwb_phase!='CONT') & (dfMod.elwb_phase!='REQ')] # ignore floods during contention phase since content is unreliable in parts
for conn, dfConn in dfModTmp.groupby(by=['initiator_id', 'node_id']):
nodeTx, nodeRx = conn
# skip flood log entries where nodes have not yet received the nodeId of the host
if nodeTx==0 or nodeRx==0:
continue
# skip flood log entreis where which contain faulty data
if not nodeTx in nodeIds:
print('WARNING: out-of-range initiator_id {} observed!'.format(nodeTx))
continue
nodeTxIdx = nodeIds.index(nodeTx)
nodeRxIdx = nodeIds.index(nodeRx)
numFloods = len(dfConn)
numFloodsSucc = np.sum(dfConn.rx_cnt > 0)
# numFailed = np.sum(dfConn.rx_cnt == 0)
numFloodsMatrix[nodeTxIdx, nodeRxIdx] = numFloods
numFloodsSuccMatrix[nodeTxIdx, nodeRxIdx] = numFloodsSucc
frrMatrix[nodeTxIdx, nodeRxIdx] = numFloodsSucc/numFloods
hopDistance = np.mean(dfConn[dfConn.notna().rx_idx].rx_idx)
if not pd.isnull(hopDistance): # note: it is not possible to assign pd.NA to a numpy matrix
hopDistanceMatrix[nodeTxIdx][nodeRxIdx] = hopDistance
assert len(dfConn[dfConn.notna().rx_idx]) == numFloodsSucc # this needs to hold, otherwise we cannot use numFloodsSucc to determine number of floods used to calculate avg hopDistance
# PRR
dfModTmp = dfMod[(dfMod.elwb_phase!='CONT') & (dfMod.elwb_phase!='REQ') & (dfMod.rx_idx==0)] # ignore floods during contention phase & and filter out floods with rx_idx > 0
for conn, dfConn in dfModTmp.groupby(by=['initiator_id', 'node_id']):
nodeTx, nodeRx = conn
# skip flood log entries where nodes have not received the nodeId of the host
if nodeTx==0 or nodeRx==0:
continue
# skip flood log entreis where which contain faulty data
if not nodeTx in nodeIds:
print('WARNING: out-of-range initiator_id {} observed!'.format(nodeTx))
continue
nodeTxIdx = nodeIds.index(nodeTx)
nodeRxIdx = nodeIds.index(nodeRx)
numFloodsFirstSlot = len(dfConn)
prrMatrix[nodeTxIdx, nodeRxIdx] = numFloodsFirstSlot/numFloodsMatrix[nodeTxIdx, nodeRxIdx]
# convert numpy matrix to pandas dataframe to add correct indices
matrixDfDict[txConfig] = {
'prr': pd.DataFrame(data=prrMatrix, index=nodeIds, columns=nodeIds),
'frr': pd.DataFrame(data=frrMatrix, index=nodeIds, columns=nodeIds),
'hopDistance': pd.DataFrame(data=hopDistanceMatrix, index=nodeIds, columns=nodeIds),
'numFloods': pd.DataFrame(data=numFloodsMatrix, index=nodeIds, columns=nodeIds),
'numFloodsSucc': pd.DataFrame(data=numFloodsSuccMatrix, index=nodeIds, columns=nodeIds),
}
return matrixDfDict
def saveMatricesToHtml(matrixDfDict, txConfigLabels, dfHash, matrixNames, titles, cmaps, formats, applymaps=None, outputDir='data'):
h = html()
if applymaps is None:
applymaps = (lambda x: '', lambda x: '')
with h.add(head()):
meta(charset='UTF-8')
style(raw(htmlStyleBlock))
with h.add(body()).add(div(id='content')):
# h1('Main Title')
for txConfig in matrixDfDict.keys():
html0 = styleDf(
df=matrixDfDict[txConfig][matrixNames[0]],
cmap=cmaps[0],
format=formats[0],
applymap=applymaps[0],
)
html1 = styleDf(
df=matrixDfDict[txConfig][matrixNames[1]],
cmap=cmaps[1],
format=formats[1],
applymap=applymaps[1],
)
configStr = ', '.join(['{}={}'.format(k, v) for k, v in zip(txConfigLabels, txConfig)])
h2(configStr)
with table(cls="outer").add(tbody()):
with tr(cls="outer"):
th(titles[0], cls='outer')
th(cls="outer")
th(titles[1], cls='outer')
with tr(cls='outer'):
td(raw(html0), cls='outer')
td(cls="outer")
td(raw(html1), cls='outer')
htmlPath = os.path.join(outputDir, '{}_{}.html'.format(matrixNames[0], dfHash[:8]))
os.makedirs(os.path.split(htmlPath)[0], exist_ok=True)
with open(htmlPath,"w") as fp:
fp.write(h.render())
def evalConnectivity(matrixDfDict, nodeIds, txConfigLabels, prrThreshold=0.95, outputDir='data'):
print('==== evalConnectivity ====')
plotDict = {}
nodeDegreeDict = {}
for txConfig, d in matrixDfDict.items():
prrMatrix = d['prr']
configStr = ', '.join(['{}={}'.format(k, v) for k, v in zip(txConfigLabels, txConfig)])
print('=== {} ==='.format(configStr))
# consruct graph from PRR data
g = nx.Graph()
for node1 in nodeIds:
g.add_node(node1) # ensure that all nodes are contained in the graph
for node2 in nodeIds:
if node1 == node2:
continue
# connection is only considered if PRR in both directions is higher than threshold
prr1 = prrMatrix.loc[(node1, node2)]
prr2 = prrMatrix.loc[(node2, node1)]
if np.isnan(prr1) or np.isnan(prr2):
continue
prrMin = min(prr1, prr2)
if prrMin >= prrThreshold:
# Notes: Adding an edge that already exists updates the edge data
g.add_edge(node1, node2)
## plot the obtained graph
configStr = ', '.join(['{}={}'.format(k, v) for k, v in zip(txConfigLabels, txConfig)])
plot = plotting.figure(title='txConfig: {}'.format(configStr), x_range=(-1.1,1.1), y_range=(-1.1,1.1), plot_height=PLOT_HEIGHT, aspect_ratio=1)
pos = nx.circular_layout(g)
# pos = nx.fruchterman_reingold_layout(g)
graph = plotting.from_networkx(g, pos, scale=2, center=(0,0))
plot.renderers.append(graph)
# add node labels to the graph
x, y = zip(*graph.layout_provider.graph_layout.values())
node_labels = list(g.nodes())
source = ColumnDataSource({'x': x, 'y': y, 'label': node_labels})
labels = LabelSet(x='x', y='y', text='label', source=source, background_fill_color='white')
plot.renderers.append(labels)
# collect
plotDict[txConfig] = plot
# determine connectivity metrics
# networkEdegeConn = nx.edge_connectivity(g) # overall edge connectivity
nodeDegree = {}
for nodeId in nodeIds:
nodeDegree[nodeId] = g.degree[nodeId]
nodeDegreeDict[txConfig] = nodeDegree
vals = list(nodeDegree.values())
print('nodeDegree: min={:.2f}, mean={:.2f}, max={:.2f}'.format(np.min(vals), np.mean(vals), np.max(vals)))
# save all network graphs to html
htmlPath = os.path.join(outputDir, 'prr_connectivity_graph_{}.html'.format(dfHash[:8]))
os.makedirs(os.path.split(htmlPath)[0], exist_ok=True)
plotting.output_file(htmlPath)
infoDiv = Div(text='prrThreshold={}'.format(prrThreshold))
plotting.save(column([infoDiv] + list(plotDict.values())))
## plot nodeDegree data
# create df with nodeDegree data
nodeDegreeDf = pd.DataFrame()
for idx, label in enumerate(txConfigLabels):
nodeDegreeDf[label] = [e[idx] for e in list(nodeDegreeDict.keys())]
nodeDegreeDf['nodeDegreeAvg'] = [np.mean(list(e.values())) for e in nodeDegreeDict.values()]
nodeDegreeDf['nodeDegreeMax'] = [np.max(list(e.values())) for e in nodeDegreeDict.values()]
nodeDegreeDf['nodeDegreeMin'] = [np.min(list(e.values())) for e in nodeDegreeDict.values()]
nodeDegreeDf['nodeDegreeVals'] = [list(e.values()) for e in nodeDegreeDict.values()]
# create all plots
plotList = []
aggP = plotting.figure(plot_height=PLOT_HEIGHT, plot_width=PLOT_HEIGHT) # aggregated plot
color = Category10_10.__iter__()
for modulation, groupDf in nodeDegreeDf.groupby(by=['modulation']):
source = ColumnDataSource(groupDf)
## aggregated plot (avg only)
col = next(color)
aggP.line(x='tx_power', y='nodeDegreeAvg', source=source, legend_label='modulation={}'.format(modulation), line_color=col, )
aggP.circle(x='tx_power', y='nodeDegreeAvg', source=source, legend_label='modulation={}'.format(modulation), color=col)
## violing plots
# violin plot requires list keys and values (list of list is does not work out-of-the-box)
kList = []
vList = []
for idx, row in groupDf.iterrows():
kList += [row['tx_power']] * len(row['nodeDegreeVals'])
vList += row['nodeDegreeVals']
hp = hv.Violin((kList, vList), kdims='tx_power', vdims='nodeDegreeVals')
hp.opts(title="Node Degree (modulation={})".format(modulation))
hp.opts(opts.Violin(inner='stick', cut=0.1, bandwidth=0.1))
p = hv.render(hp)
p.plot_height=PLOT_HEIGHT
p.plot_width=PLOT_HEIGHT
p.xaxis.axis_label = 'tx_power config [dBm]'
p.yaxis.axis_label = 'Node Degree'
plotList.append(p)
aggP.title.text = 'Node Degree (all modulations, avg only)'
aggP.xgrid[0].grid_line_color=None
aggP.ygrid[0].grid_line_alpha=0.5
aggP.xaxis.axis_label = 'tx_power config [dBm]'
aggP.yaxis.axis_label = 'Node Degree (avg)'
aggP.legend.location = "top_left"
aggP.legend.click_policy="hide"
# plot all plots to a single HTML file
htmlPath = os.path.join(outputDir, 'prr_connectivity_nodeDegree_{}.html'.format(dfHash[:8]))
os.makedirs(outputDir, exist_ok=True)
plotting.output_file(htmlPath)
infoDiv = Div(text='prrThreshold={}'.format(prrThreshold))
plotting.save(column([infoDiv, gridplot([aggP] + plotList, ncols=2)]))
################################################################################
# Main
################################################################################
if __name__ == "__main__":
# check arguments
if len(sys.argv) < 2:
print("no dataset file specified!")
sys.exit(1)
elif len(sys.argv) > 2:
print("too many arguments provided!")
sys.exit(1)
datasetFile = sys.argv[1]
outputDir = os.path.split(datasetFile)[0]
# load data from dataset
df = pd.read_pickle(datasetFile)
dfHash = getDfHash(df)
# get all node IDs
nodeIds = [int(e) for e in sorted(df.node_id.unique())] # explicitely convert to python int since bokeh from_networkx does not work with int64
numNodes = len(nodeIds)
# overview of available data
print('==== Overview of available data ====')
for modulation, grp in df.groupby(by=['modulation']):
print('== modulation={} =='.format(modulation))
c = Counter(grp.test_id.to_list())
for k, v in dict(c).items():
print('Test {}: {:>10} rows'.format(k, v))
# define txConfig labels which are used for grouping the results
txConfigLabels = ['modulation', 'tx_power', 'n_tx', 'num_hops']
## extract data per connection (Tx, Rx)
print('==== extractConnectionData ====')
matrixDfDict = extractConnectionData(df, txConfigLabels)
## save matrix data as colored matrices to html files
# FRR
saveMatricesToHtml(
matrixDfDict,
txConfigLabels,
dfHash,
matrixNames=('frr', 'numFloods'),
titles=('FRR Matrix', 'Number of Floods'),
cmaps=('inferno', 'YlGnBu'),
formats=('{:.1f}', '{:.0f}'),
applymaps=(lambda x: '', lambda x: 'background: white' if pd.isnull(x) else ''),
outputDir=outputDir,
)
# hop distance
saveMatricesToHtml(
matrixDfDict,
txConfigLabels,
dfHash,
matrixNames=('hopDistance', 'numFloodsSucc'),
titles=('Hop Distance Matrix (avg rx_idx)', 'Number of Successfully Received Floods'),
cmaps=('inferno_r', 'YlGnBu'),
formats=('{:.1f}', '{:.0f}'),
applymaps=(lambda x: 'background: white' if pd.isnull(x) else '', lambda x: 'background: white' if pd.isnull(x) else ''),
outputDir=outputDir,
)
# PRR
saveMatricesToHtml(
matrixDfDict,
txConfigLabels,
dfHash,
matrixNames=('prr', 'numFloods'),
titles=('PRR Matrix (num of received floods with rx_idx=0)', 'Number of Floods'),
cmaps=('inferno', 'YlGnBu'),
formats=('{:.1f}', '{:.0f}'),
applymaps=(lambda x: '', lambda x: 'background: white' if pd.isnull(x) else ''),
outputDir=outputDir,
)
## extract and output network graph and connectivity data
evalConnectivity(matrixDfDict, nodeIds, txConfigLabels, prrThreshold=0.95, outputDir=outputDir)
| [
"bokeh.models.ColumnDataSource",
"numpy.sum",
"numpy.empty",
"numpy.isnan",
"bokeh.plotting.output_file",
"numpy.mean",
"pandas.DataFrame",
"holoviews.opts.Violin",
"holoviews.extension",
"numpy.append",
"hashlib.sha256",
"numpy.max",
"holoviews.render",
"dominate.util.raw",
"bokeh.layou... | [((2022, 2043), 'holoviews.extension', 'hv.extension', (['"""bokeh"""'], {}), "('bokeh')\n", (2034, 2043), True, 'import holoviews as hv\n'), ((2866, 2901), 'numpy.append', 'np.append', (['colsArray', 'dfValuesArray'], {}), '(colsArray, dfValuesArray)\n', (2875, 2901), True, 'import numpy as np\n'), ((4061, 4074), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4072, 4074), False, 'from collections import OrderedDict, Counter\n'), ((12276, 12306), 'bokeh.plotting.output_file', 'plotting.output_file', (['htmlPath'], {}), '(htmlPath)\n', (12296, 12306), False, 'from bokeh import plotting\n'), ((12518, 12532), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12530, 12532), True, 'import pandas as pd\n'), ((13088, 13152), 'bokeh.plotting.figure', 'plotting.figure', ([], {'plot_height': 'PLOT_HEIGHT', 'plot_width': 'PLOT_HEIGHT'}), '(plot_height=PLOT_HEIGHT, plot_width=PLOT_HEIGHT)\n', (13103, 13152), False, 'from bokeh import plotting\n'), ((13183, 13207), 'bokeh.palettes.Category10_10.__iter__', 'Category10_10.__iter__', ([], {}), '()\n', (13205, 13207), False, 'from bokeh.palettes import Category10_10\n'), ((14861, 14898), 'os.makedirs', 'os.makedirs', (['outputDir'], {'exist_ok': '(True)'}), '(outputDir, exist_ok=True)\n', (14872, 14898), False, 'import os\n'), ((14903, 14933), 'bokeh.plotting.output_file', 'plotting.output_file', (['htmlPath'], {}), '(htmlPath)\n', (14923, 14933), False, 'from bokeh import plotting\n'), ((15593, 15620), 'pandas.read_pickle', 'pd.read_pickle', (['datasetFile'], {}), '(datasetFile)\n', (15607, 15620), True, 'import pandas as pd\n'), ((2807, 2850), 'pandas.util.hash_pandas_object', 'pd.util.hash_pandas_object', (['df'], {'index': '(False)'}), '(df, index=False)\n', (2833, 2850), True, 'import pandas as pd\n'), ((10046, 10056), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10054, 10056), True, 'import networkx as nx\n'), ((11066, 11087), 'networkx.circular_layout', 'nx.circular_layout', (['g'], {}), '(g)\n', (11084, 11087), True, 'import networkx as nx\n'), ((11154, 11208), 'bokeh.plotting.from_networkx', 'plotting.from_networkx', (['g', 'pos'], {'scale': '(2)', 'center': '(0, 0)'}), '(g, pos, scale=2, center=(0, 0))\n', (11176, 11208), False, 'from bokeh import plotting\n'), ((11404, 11460), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'x': x, 'y': y, 'label': node_labels}"], {}), "({'x': x, 'y': y, 'label': node_labels})\n", (11420, 11460), False, 'from bokeh.models import ColumnDataSource, LabelSet, Div\n'), ((11479, 11566), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""label"""', 'source': 'source', 'background_fill_color': '"""white"""'}), "(x='x', y='y', text='label', source=source, background_fill_color=\n 'white')\n", (11487, 11566), False, 'from bokeh.models import ColumnDataSource, LabelSet, Div\n'), ((13297, 13322), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['groupDf'], {}), '(groupDf)\n', (13313, 13322), False, 'from bokeh.models import ColumnDataSource, LabelSet, Div\n'), ((13978, 14045), 'holoviews.Violin', 'hv.Violin', (['(kList, vList)'], {'kdims': '"""tx_power"""', 'vdims': '"""nodeDegreeVals"""'}), "((kList, vList), kdims='tx_power', vdims='nodeDegreeVals')\n", (13987, 14045), True, 'import holoviews as hv\n'), ((14198, 14211), 'holoviews.render', 'hv.render', (['hp'], {}), '(hp)\n', (14207, 14211), True, 'import holoviews as hv\n'), ((15371, 15382), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15379, 15382), False, 'import sys\n'), ((15524, 15550), 'os.path.split', 'os.path.split', (['datasetFile'], {}), '(datasetFile)\n', (15537, 15550), False, 'import os\n'), ((2913, 2930), 'hashlib.sha256', 'hashlib.sha256', (['l'], {}), '(l)\n', (2927, 2930), False, 'import hashlib\n'), ((4334, 4364), 'numpy.empty', 'np.empty', (['(numNodes, numNodes)'], {}), '((numNodes, numNodes))\n', (4342, 4364), True, 'import numpy as np\n'), ((4461, 4491), 'numpy.empty', 'np.empty', (['(numNodes, numNodes)'], {}), '((numNodes, numNodes))\n', (4469, 4491), True, 'import numpy as np\n'), ((4589, 4619), 'numpy.empty', 'np.empty', (['(numNodes, numNodes)'], {}), '((numNodes, numNodes))\n', (4597, 4619), True, 'import numpy as np\n'), ((4701, 4731), 'numpy.empty', 'np.empty', (['(numNodes, numNodes)'], {}), '((numNodes, numNodes))\n', (4709, 4731), True, 'import numpy as np\n'), ((4812, 4842), 'numpy.empty', 'np.empty', (['(numNodes, numNodes)'], {}), '((numNodes, numNodes))\n', (4820, 4842), True, 'import numpy as np\n'), ((5752, 5777), 'numpy.sum', 'np.sum', (['(dfConn.rx_cnt > 0)'], {}), '(dfConn.rx_cnt > 0)\n', (5758, 5777), True, 'import numpy as np\n'), ((7541, 7601), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'prrMatrix', 'index': 'nodeIds', 'columns': 'nodeIds'}), '(data=prrMatrix, index=nodeIds, columns=nodeIds)\n', (7553, 7601), True, 'import pandas as pd\n'), ((7632, 7692), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'frrMatrix', 'index': 'nodeIds', 'columns': 'nodeIds'}), '(data=frrMatrix, index=nodeIds, columns=nodeIds)\n', (7644, 7692), True, 'import pandas as pd\n'), ((7723, 7791), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'hopDistanceMatrix', 'index': 'nodeIds', 'columns': 'nodeIds'}), '(data=hopDistanceMatrix, index=nodeIds, columns=nodeIds)\n', (7735, 7791), True, 'import pandas as pd\n'), ((7822, 7888), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'numFloodsMatrix', 'index': 'nodeIds', 'columns': 'nodeIds'}), '(data=numFloodsMatrix, index=nodeIds, columns=nodeIds)\n', (7834, 7888), True, 'import pandas as pd\n'), ((7919, 7989), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'numFloodsSuccMatrix', 'index': 'nodeIds', 'columns': 'nodeIds'}), '(data=numFloodsSuccMatrix, index=nodeIds, columns=nodeIds)\n', (7931, 7989), True, 'import pandas as pd\n'), ((8319, 8338), 'dominate.util.raw', 'raw', (['htmlStyleBlock'], {}), '(htmlStyleBlock)\n', (8322, 8338), False, 'from dominate.util import raw\n'), ((9492, 9515), 'os.path.split', 'os.path.split', (['htmlPath'], {}), '(htmlPath)\n', (9505, 9515), False, 'import os\n'), ((12229, 12252), 'os.path.split', 'os.path.split', (['htmlPath'], {}), '(htmlPath)\n', (12242, 12252), False, 'import os\n'), ((14134, 14184), 'holoviews.opts.Violin', 'opts.Violin', ([], {'inner': '"""stick"""', 'cut': '(0.1)', 'bandwidth': '(0.1)'}), "(inner='stick', cut=0.1, bandwidth=0.1)\n", (14145, 14184), False, 'from holoviews import opts\n'), ((15465, 15476), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15473, 15476), False, 'import sys\n'), ((6124, 6146), 'pandas.isnull', 'pd.isnull', (['hopDistance'], {}), '(hopDistance)\n', (6133, 6146), True, 'import pandas as pd\n'), ((12038, 12050), 'numpy.min', 'np.min', (['vals'], {}), '(vals)\n', (12044, 12050), True, 'import numpy as np\n'), ((12052, 12065), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (12059, 12065), True, 'import numpy as np\n'), ((12067, 12079), 'numpy.max', 'np.max', (['vals'], {}), '(vals)\n', (12073, 12079), True, 'import numpy as np\n'), ((15032, 15068), 'bokeh.layouts.gridplot', 'gridplot', (['([aggP] + plotList)'], {'ncols': '(2)'}), '([aggP] + plotList, ncols=2)\n', (15040, 15068), False, 'from bokeh.layouts import column, gridplot\n'), ((10490, 10504), 'numpy.isnan', 'np.isnan', (['prr1'], {}), '(prr1)\n', (10498, 10504), True, 'import numpy as np\n'), ((10508, 10522), 'numpy.isnan', 'np.isnan', (['prr2'], {}), '(prr2)\n', (10516, 10522), True, 'import numpy as np\n'), ((9278, 9288), 'dominate.util.raw', 'raw', (['html0'], {}), '(html0)\n', (9281, 9288), False, 'from dominate.util import raw\n'), ((9362, 9372), 'dominate.util.raw', 'raw', (['html1'], {}), '(html1)\n', (9365, 9372), False, 'from dominate.util import raw\n'), ((16858, 16870), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (16867, 16870), True, 'import pandas as pd\n'), ((17299, 17311), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (17308, 17311), True, 'import pandas as pd\n'), ((17354, 17366), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (17363, 17366), True, 'import pandas as pd\n'), ((17781, 17793), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (17790, 17793), True, 'import pandas as pd\n'), ((2719, 2744), 'hashlib.sha256', 'hashlib.sha256', (['colsBytes'], {}), '(colsBytes)\n', (2733, 2744), False, 'import hashlib\n')] |
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Main DrQA reader training script."""
import socket
import argparse
import torch
import numpy as np
import json
import os
import sys
import subprocess
import logging
from tqdm import tqdm
import pickle
from collections import defaultdict
from msr.reader import utils, vector, config, data
from msr.reader.model import Model
from multi_corpus import MultiCorpus
from torch.utils.data.sampler import SequentialSampler, RandomSampler
logger = logging.getLogger()
# ------------------------------------------------------------------------------
# Training arguments.
# ------------------------------------------------------------------------------
# Defaults
ROOT_DIR = '.'
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_train_args(parser):
"""Adds commandline arguments pertaining tos training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--no-cuda', type='bool', default=False,
help='Train on CPU, even if GPUs are available.')
runtime.add_argument('--gpu', type=int, default=-1,
help='Run on a specific GPU')
runtime.add_argument('--data-workers', type=int, default=10,
help='Number of subprocesses for data loading')
runtime.add_argument('--parallel', type='bool', default=False,
help='Use DataParallel on all available GPUs')
runtime.add_argument('--random-seed', type=int, default=1013,
help=('Random seed for all numpy/torch/cuda'
'operations (for reproducibility)'))
runtime.add_argument('--num-epochs', type=int, default=40,
help='Train data iterations')
runtime.add_argument('--batch_size', type=int, default=64,
help='Batch size for training')
runtime.add_argument('--test_batch_size', type=int, default=2,
help='Batch size during validation/testing')
runtime.add_argument('--multi_step_reasoning_steps', type=int, default=3,
help='Number of steps of mult-step-reasoning')
runtime.add_argument('--multi_step_reading_steps', type=int, default=1,
help='Number of steps of mult-step-reading')
runtime.add_argument('--dropout-san-prediction', type=float, default=0.4,
help='During training, dropout few predictions')
runtime.add_argument('--num_gru_layers', type=int, default=3,
help='Number of layers of GRU')
runtime.add_argument('--domain', type=str, default="web-open",
help='wiki/web/web-open')
runtime.add_argument('--dataset_name', type=str, default="triviaqa",
help='triviaqa/searchqa/')
runtime.add_argument('--freeze_reader', type=int, default=0,
help='Donot train the reader?')
runtime.add_argument('--fine_tune_RL', type=int, default=0,
help='Keep everything fixed, fine tune reasoner with RL')
runtime.add_argument('--test', type=int, default=0,
help='eval on test data?')
runtime.add_argument('--drqa_plus', type=int, default=1,
help='Use reader of DrQA++')
runtime.add_argument('--num_positive_paras', type=int, default=1,
help='DrQA++ relies on few paras containing the answer, '
'returned by the retriever during training. Default 1')
runtime.add_argument('--num_paras_test', type=int, default=15,
help='Number of paras to read at test time. Default 1')
runtime.add_argument('--num_low_ranked_paras', type=int, default=0,
help='DrQA++ relies on few low ranked paras by the retriever during training.')
runtime.add_argument('--cheat', type=int, default=0,
help='at test time, overwrite the retriever output and put correct paras containign annotations')
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--model_dir', type=str, default="",
help='Directory for saved models/checkpoints/logs')
files.add_argument('--model-name', type=str, default='',
help='Unique model identifier (.mdl, .txt, .checkpoint)')
files.add_argument('--data_dir', type=str,
help='Directory of training/validation data')
files.add_argument('--train-file', type=str,
default='SQuAD-v1.1-train-processed-corenlp.txt',
help='Preprocessed train file')
files.add_argument('--dev-file', type=str,
default='SQuAD-v1.1-dev-processed-corenlp.txt',
help='Preprocessed dev file')
files.add_argument('--dev-json', type=str, default='SQuAD-v1.1-dev.json',
help=('Unprocessed dev file to run validation '
'while training on'))
files.add_argument('--embed-dir', type=str, default="",
help='Directory of pre-trained embedding files')
files.add_argument('--embedding-file', type=str,
default='crawl-300d-2M.txt',
help='Space-separated pretrained embeddings file')
files.add_argument('--official_output_json', type=str, default="official_output.json",
help='Directory of pre-trained embedding files')
files.add_argument('--saved_para_vectors_dir', type=str,
help='Directory where para and query vectors are saved by the retrievers')
# Saving + loading
save_load = parser.add_argument_group('Saving/Loading')
save_load.add_argument('--checkpoint', type='bool', default=True,
help='Save model + optimizer state after each epoch')
save_load.add_argument('--pretrained', type=str, default='',
help='Path to a pretrained model to warm-start with')
save_load.add_argument('--expand-dictionary', type='bool', default=False,
help='Expand dictionary of pretrained model to ' +
'include training/dev words of new data')
save_load.add_argument('--create_vocab', type=int, default=0,
help='Create vocab or load saved')
save_load.add_argument('--vocab_dir', type=str, default="")
save_load.add_argument('--embedding_table_path', type=str, default='embedding_table.mdl')
save_load.add_argument('--save_pickle_files', type=int, default=0,
help='Save the processed train, dev files for faster loading')
save_load.add_argument('--load_pickle_files', type=int, default=1,
help='Load the processed train, dev files for faster loading')
save_load.add_argument('--small', type=int, default=0,
help='Experiment on small files (for debugging)')
# Data preprocessing
preprocess = parser.add_argument_group('Preprocessing')
preprocess.add_argument('--uncased-question', type='bool', default=False,
help='Question words will be lower-cased')
preprocess.add_argument('--uncased-doc', type='bool', default=False,
help='Document words will be lower-cased')
preprocess.add_argument('--restrict-vocab', type='bool', default=True,
help='Only use pre-trained words in embedding_file')
preprocess.add_argument('--use_pretrained_para_clf', type=int, default=1, help=" use pretrained para clf...")
preprocess.add_argument('--require_answer', type=int, default=0,
help="Retriever only sends paragraphs which have the answers")
# General
general = parser.add_argument_group('General')
general.add_argument('--official-eval', type='bool', default=True,
help='Validate with official SQuAD eval')
general.add_argument('--eval_only', type=int, default=0,
help='Evaluate only after loading a pretrained model')
general.add_argument('--valid-metric', type=str, default='f1',
help='The evaluation metric used for model selection')
general.add_argument('--display-iter', type=int, default=25,
help='Log state after every <display_iter> epochs')
general.add_argument('--sort-by-len', type='bool', default=True,
help='Sort batches by length for speed')
def make_data_loader(args, exs, train_time=False):
dataset = data.ReaderDataset(
args,
exs,
args.word_dict,
args.feature_dict,
single_answer=False,
train_time=train_time
)
sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset)
batch_size = args.batch_size if train_time else args.test_batch_size
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=0,
collate_fn=vector.batchify,
pin_memory=True
)
return loader
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Set model name
args.vocab_dir = os.path.join(args.data_dir, args.dataset_name, "vocab", args.domain)
args.embedding_file = os.path.join(args.data_dir, args.dataset_name, "embeddings", args.embedding_file)
args.embedding_table_path = os.path.join(args.data_dir, args.dataset_name, "embeddings", args.domain,
args.embedding_table_path)
args.origin_data_dir = args.data_dir
args.data_dir = os.path.join(args.data_dir, args.dataset_name, "data", args.domain)
if os.path.exists(args.embedding_table_path):
args.embedding_table = True
else:
args.embedding_table = False
if not args.model_name:
import uuid
import time
args.model_name = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8]
if args.small == 0: # only save on full experiments, saves disk space
args.model_dir = os.path.join(args.model_dir, args.dataset_name, "expts", args.model_name)
subprocess.call(['mkdir', '-p', args.model_dir])
# subprocess.call(['cp', '-r', ROOT_DIR, args.model_dir])
# Set log + model file names
args.log_file = os.path.join(args.model_dir, 'log.txt')
args.model_file = os.path.join(args.model_dir, 'model.mdl')
else:
args.model_file = ""
args.model_dir = ""
args.log_file = None
args.official_output_json = os.path.join(args.model_dir, args.official_output_json)
args.use_pretrained_para_clf = (args.use_pretrained_para_clf == 1)
args.create_vocab = (args.create_vocab == 1)
args.eval_only = (args.eval_only == 1)
args.require_answer = (args.require_answer == 1)
args.drqa_plus = (args.drqa_plus == 1)
# args.saved_para_vectors_dir = os.path.join(DATA_DIR, args.dataset_name, 'paragraph_vectors', args.domain)
args.freeze_reader = (args.freeze_reader == 1)
args.cheat = (args.cheat == 1)
args.fine_tune_RL = (args.fine_tune_RL == 1)
if args.fine_tune_RL:
assert args.freeze_reader is True
args.test = (args.test == 1)
return args
# ------------------------------------------------------------------------------
# Initalization from scratch.
# ------------------------------------------------------------------------------
def init_from_scratch(args, train_exs, dev_exs):
"""New model, new data, new dictionary."""
# Create a feature dict out of the annotations in the data
logger.info('-' * 100)
logger.info('Generate features')
feature_dict = utils.build_feature_dict(args, train_exs)
logger.info('Num features = %d' % len(feature_dict))
logger.info(feature_dict)
# Build a dictionary from the data questions + words (train/dev splits)
logger.info('-' * 100)
logger.info('Build dictionary')
word_dict = utils.build_word_dict(args, train_exs, dev_exs)
logger.info('Num words = %d' % len(word_dict))
# Initialize model
logger.info('Initializing model')
model = Model(args, word_dict, feature_dict)
# Load pretrained embeddings for words in dictionary
if args.embedding_file:
model.load_embeddings(args, word_dict.tokens(), args.embedding_file)
return model
# ------------------------------------------------------------------------------
# Train loop.
# ------------------------------------------------------------------------------
def train(args, data_loader, model, global_stats, ground_truths_map):
"""Run through one epoch of model training with the provided data loader."""
# Initialize meters + timers
train_loss = utils.AverageMeter()
epoch_time = utils.Timer()
# Run one epoch
for idx, ex in enumerate(data_loader):
ret = model.update(ex, epoch_counter=global_stats['epoch'], ground_truths_map=ground_truths_map)
if ret is None:
continue
train_loss.update(*ret)
if idx % args.display_iter == 0:
logger.info('train: Epoch = %d | iter = %d/%d | ' %
(global_stats['epoch'], idx, len(data_loader)) +
'loss = %.2f | elapsed time = %.2f (s)' %
(train_loss.avg, global_stats['timer'].time()))
train_loss.reset()
logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %
(global_stats['epoch'], epoch_time.time()))
# Checkpoint
if args.checkpoint and (args.small == 0):
logger.info("Checkpointing...")
model.checkpoint(args.model_file + '.checkpoint',
global_stats['epoch'] + 1)
def validate_official(args, data_loader, model, global_stats,
offsets, texts, answers, ground_truths_map=None, official_eval_output=False):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = utils.Timer()
f1 = utils.AverageMeter()
exact_match = utils.AverageMeter()
# Run through examples
examples = 0
official_output_json = {}
fout = None
if args.eval_only:
fout = open(os.path.join(args.model_dir, "outputs.txt"), "w")
for ex in tqdm(data_loader):
ex_id, batch_size = ex[-1], ex[0].size(0)
outputs, query_norms, all_query_vectors = model.predict(ex)
max_scores, max_spans = [], []
for i in range(ex[0].size(0)):
span_scores_map = defaultdict(float)
max_score_i = float('-inf')
max_span = None
for step_counter, output in enumerate(outputs): # for each time step
pred_s, pred_e, pred_score, para_ids = output
start = pred_s[i]
end = pred_e[i]
span_scores = pred_score[i]
doc_tensor = ex[0][i, para_ids[i]]
for s_counter, (s, e) in enumerate(zip(start, end)):
int_words = doc_tensor[s_counter, s:e+1]
predicted_span = " ".join(args.word_dict.ind2tok[str(w.item())] for w in int_words)
span_scores_map[predicted_span] += span_scores[s_counter]
if max_score_i < span_scores_map[predicted_span]:
max_score_i = span_scores_map[predicted_span]
max_span = predicted_span
max_scores.append(max_score_i)
max_spans.append(max_span)
# calculate em and f1
ground_truths = ground_truths_map[ex_id[i]]
ground_truths = list(set(ground_truths))
em = utils.metric_max_over_ground_truths(utils.exact_match_score, max_span, ground_truths)
exact_match.update(em)
f1.update(utils.metric_max_over_ground_truths(utils.f1_score, max_span, ground_truths))
examples += 1
official_output_json[ex_id[i]] = max_span
if fout is not None:
fout.close()
logger.info('dev valid official: Epoch = %d | EM = %.2f | ' %
(global_stats['epoch'], exact_match.avg * 100) +
'F1 = %.2f | examples = %d | valid time = %.2f (s)' %
(f1.avg * 100, examples, eval_time.time()))
logger.info("Writing official output at {}".format(args.official_output_json))
json.dump(official_output_json, open(args.official_output_json, "w"))
return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100}
def eval_accuracies(pred_s, target_s, pred_e, target_e):
"""An unofficial evalutation helper.
Compute exact start/end/complete match accuracies for a batch.
"""
# Convert 1D tensors to lists of lists (compatibility)
if torch.is_tensor(target_s):
target_s = [[e] for e in target_s]
target_e = [[e] for e in target_e]
# Compute accuracies from targets
batch_size = len(pred_s)
start = utils.AverageMeter()
end = utils.AverageMeter()
em = utils.AverageMeter()
for i in range(batch_size):
# Start matches
if pred_s[i] in target_s[i]:
start.update(1)
else:
start.update(0)
# End matches
if pred_e[i] in target_e[i]:
end.update(1)
else:
end.update(0)
# Both start and end match
if any([1 for _s, _e in zip(target_s[i], target_e[i])
if _s == pred_s[i] and _e == pred_e[i]]):
em.update(1)
else:
em.update(0)
return start.avg * 100, end.avg * 100, em.avg * 100
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# --------------------------------------------------------------------------
# DATA
logger.info('-' * 100)
logger.info('Load data files')
max_para_len = 400
logger.info("Domain: {}".format(args.domain))
train_exs, dev_exs = None, None
if args.small == 1:
train_file_name = "processed_train_small.pkl"
dev_file_name = "processed_dev_small.pkl"
else:
train_file_name = "processed_train.pkl"
dev_file_name = "processed_test.pkl" if args.test else "processed_dev.pkl"
logger.info("Loading pickle files")
fin = open(os.path.join(args.data_dir, train_file_name), "rb")
train_exs = pickle.load(fin)
fin.close()
fin = open(os.path.join(args.data_dir, dev_file_name), "rb")
dev_exs = pickle.load(fin)
fin.close()
logger.info("Loading done!")
logger.info('Num train examples = %d' % len(train_exs.questions))
# dev_exs = utils.load_data(args, args.dev_file)
logger.info('Num dev examples = %d' % len(dev_exs.questions))
# --------------------------------------------------------------------------
# MODEL
logger.info('-' * 100)
start_epoch = 0
if args.checkpoint and os.path.isfile(args.model_file + '.checkpoint'):
# Just resume training, no modifications.
logger.info('Found a checkpoint...')
checkpoint_file = args.model_file + '.checkpoint'
model, start_epoch = Model.load_checkpoint(checkpoint_file, args)
else:
# Training starts fresh. But the model state is either pretrained or
# newly (randomly) initialized.
if args.pretrained:
logger.info('Using pretrained model...')
model = Model.load(args.pretrained, args)
if args.expand_dictionary:
logger.info('Expanding dictionary for new data...')
# Add words in training + dev examples
words = utils.load_words(args, train_exs + dev_exs)
added = model.expand_dictionary(words)
# Load pretrained embeddings for added words
if args.embedding_file:
model.load_embeddings(added, args.embedding_file)
else:
logger.info('Training model from scratch...')
model = init_from_scratch(args, train_exs, dev_exs)
# Set up partial tuning of embeddings
if args.tune_partial > 0:
logger.info('-' * 100)
logger.info('Counting %d most frequent question words' %
args.tune_partial)
top_words = utils.top_question_words(
args, train_exs, model.word_dict
)
for word in top_words[:5]:
logger.info(word)
logger.info('...')
for word in top_words[-6:-1]:
logger.info(word)
model.tune_embeddings([w[0] for w in top_words])
# Set up optimizer
model.init_optimizer()
# Use the GPU?
if args.cuda:
model.cuda()
# Use multiple GPUs?
if args.parallel:
model.parallelize()
# --------------------------------------------------------------------------
# DATA ITERATORS
# Two datasets: train and dev. If we sort by length it's faster.
# -------------------------------------------------------------------------
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' %
json.dumps(vars(args), indent=4, sort_keys=True))
logger.info('-' * 100)
logger.info('Make data loaders')
args.word_dict = model.word_dict
args.feature_dict = model.feature_dict
# train_dataset = data.ReaderDataset(train_exs, model, single_answer=True)
train_loader = make_data_loader(args, train_exs, train_time=True)
dev_loader = make_data_loader(args, dev_exs)
# --------------------------------------------------------------------------
# TRAIN/VALID LOOP
stats = {'timer': utils.Timer(), 'epoch': 0, 'best_valid': 0}
logger.info('-' * 100)
logger.info("Reading ground truths for train")
fin = open(os.path.join(args.data_dir, "train_testing.txt"))
train_ground_truths_map = {}
for line in fin:
line = line.strip()
qid, ground_truth = line.split("\t")
train_ground_truths_map[qid] = ground_truth.split(
"$@#$@#") # this is the special char with which the gt ans are separated
fin.close()
logger.info("Reading ground truths for dev")
fin = open(os.path.join(args.data_dir, "test_testing.txt")) if args.test else open(
os.path.join(args.data_dir, "dev_testing.txt"))
dev_ground_truths_map = {}
for line in fin:
line = line.strip()
qid, ground_truth = line.split("\t")
dev_ground_truths_map[qid] = ground_truth.split(
"$@#$@#") # this is the special char with which the gt ans are separated
fin.close()
if args.eval_only:
logger.info("Eval only mode")
result = validate_official(args, dev_loader, model, stats, None, None, None,
ground_truths_map=dev_ground_truths_map, official_eval_output=True)
logger.info("Exiting...")
sys.exit(0)
logger.info('Starting training...')
for epoch in range(start_epoch, args.num_epochs):
stats['epoch'] = epoch
# Train
train(args, train_loader, model, stats, train_ground_truths_map)
# Validate official
if args.official_eval:
result = validate_official(args, dev_loader, model, stats, None, None, None, ground_truths_map=dev_ground_truths_map)
# Save best valid
if result[args.valid_metric] > stats['best_valid']:
logger.info('Best valid: %s = %.2f (epoch %d, %d updates)' %
(args.valid_metric, result[args.valid_metric],
stats['epoch'], model.updates))
model.save(args.model_file)
stats['best_valid'] = result[args.valid_metric]
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'DrQA Document Reader',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_train_args(parser)
config.add_model_args(parser)
args = parser.parse_args()
set_defaults(args)
# Set cuda
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set random state
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if args.cuda:
torch.cuda.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
'%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
if args.checkpoint:
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('COMMAND: %s' % ' '.join(sys.argv))
# Run!
main(args)
| [
"msr.reader.utils.Timer",
"numpy.random.seed",
"argparse.ArgumentParser",
"msr.reader.utils.metric_max_over_ground_truths",
"msr.reader.utils.build_word_dict",
"msr.reader.utils.build_feature_dict",
"time.strftime",
"msr.reader.utils.load_words",
"logging.Formatter",
"collections.defaultdict",
"... | [((656, 675), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (673, 675), False, 'import logging\n'), ((9068, 9180), 'msr.reader.data.ReaderDataset', 'data.ReaderDataset', (['args', 'exs', 'args.word_dict', 'args.feature_dict'], {'single_answer': '(False)', 'train_time': 'train_time'}), '(args, exs, args.word_dict, args.feature_dict,\n single_answer=False, train_time=train_time)\n', (9086, 9180), False, 'from msr.reader import utils, vector, config, data\n'), ((9404, 9544), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'sampler', 'num_workers': '(0)', 'collate_fn': 'vector.batchify', 'pin_memory': '(True)'}), '(dataset, batch_size=batch_size, sampler=sampler,\n num_workers=0, collate_fn=vector.batchify, pin_memory=True)\n', (9431, 9544), False, 'import torch\n'), ((9754, 9822), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset_name', '"""vocab"""', 'args.domain'], {}), "(args.data_dir, args.dataset_name, 'vocab', args.domain)\n", (9766, 9822), False, 'import os\n'), ((9849, 9935), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset_name', '"""embeddings"""', 'args.embedding_file'], {}), "(args.data_dir, args.dataset_name, 'embeddings', args.\n embedding_file)\n", (9861, 9935), False, 'import os\n'), ((9963, 10067), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset_name', '"""embeddings"""', 'args.domain', 'args.embedding_table_path'], {}), "(args.data_dir, args.dataset_name, 'embeddings', args.domain,\n args.embedding_table_path)\n", (9975, 10067), False, 'import os\n'), ((10170, 10237), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.dataset_name', '"""data"""', 'args.domain'], {}), "(args.data_dir, args.dataset_name, 'data', args.domain)\n", (10182, 10237), False, 'import os\n'), ((10245, 10286), 'os.path.exists', 'os.path.exists', (['args.embedding_table_path'], {}), '(args.embedding_table_path)\n', (10259, 10286), False, 'import os\n'), ((11109, 11164), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.official_output_json'], {}), '(args.model_dir, args.official_output_json)\n', (11121, 11164), False, 'import os\n'), ((12229, 12270), 'msr.reader.utils.build_feature_dict', 'utils.build_feature_dict', (['args', 'train_exs'], {}), '(args, train_exs)\n', (12253, 12270), False, 'from msr.reader import utils, vector, config, data\n'), ((12513, 12560), 'msr.reader.utils.build_word_dict', 'utils.build_word_dict', (['args', 'train_exs', 'dev_exs'], {}), '(args, train_exs, dev_exs)\n', (12534, 12560), False, 'from msr.reader import utils, vector, config, data\n'), ((12685, 12721), 'msr.reader.model.Model', 'Model', (['args', 'word_dict', 'feature_dict'], {}), '(args, word_dict, feature_dict)\n', (12690, 12721), False, 'from msr.reader.model import Model\n'), ((13284, 13304), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (13302, 13304), False, 'from msr.reader import utils, vector, config, data\n'), ((13322, 13335), 'msr.reader.utils.Timer', 'utils.Timer', ([], {}), '()\n', (13333, 13335), False, 'from msr.reader import utils, vector, config, data\n'), ((14827, 14840), 'msr.reader.utils.Timer', 'utils.Timer', ([], {}), '()\n', (14838, 14840), False, 'from msr.reader import utils, vector, config, data\n'), ((14850, 14870), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (14868, 14870), False, 'from msr.reader import utils, vector, config, data\n'), ((14889, 14909), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (14907, 14909), False, 'from msr.reader import utils, vector, config, data\n'), ((15108, 15125), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (15112, 15125), False, 'from tqdm import tqdm\n'), ((17573, 17598), 'torch.is_tensor', 'torch.is_tensor', (['target_s'], {}), '(target_s)\n', (17588, 17598), False, 'import torch\n'), ((17766, 17786), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (17784, 17786), False, 'from msr.reader import utils, vector, config, data\n'), ((17797, 17817), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (17815, 17817), False, 'from msr.reader import utils, vector, config, data\n'), ((17827, 17847), 'msr.reader.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (17845, 17847), False, 'from msr.reader import utils, vector, config, data\n'), ((19260, 19276), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (19271, 19276), False, 'import pickle\n'), ((19372, 19388), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (19383, 19388), False, 'import pickle\n'), ((24709, 24817), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""DrQA Document Reader"""'], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "('DrQA Document Reader', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n", (24732, 24817), False, 'import argparse\n'), ((24866, 24895), 'msr.reader.config.add_model_args', 'config.add_model_args', (['parser'], {}), '(parser)\n', (24887, 24895), False, 'from msr.reader import utils, vector, config, data\n'), ((25115, 25147), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (25129, 25147), True, 'import numpy as np\n'), ((25152, 25187), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (25169, 25187), False, 'import torch\n'), ((25318, 25391), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s: [ %(message)s ]"""', '"""%m/%d/%Y %I:%M:%S %p"""'], {}), "('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')\n", (25335, 25391), False, 'import logging\n'), ((25434, 25457), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (25455, 25457), False, 'import logging\n'), ((9245, 9271), 'torch.utils.data.sampler.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (9262, 9271), False, 'from torch.utils.data.sampler import SequentialSampler, RandomSampler\n'), ((9295, 9317), 'torch.utils.data.sampler.RandomSampler', 'RandomSampler', (['dataset'], {}), '(dataset)\n', (9308, 9317), False, 'from torch.utils.data.sampler import SequentialSampler, RandomSampler\n'), ((10614, 10687), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.dataset_name', '"""expts"""', 'args.model_name'], {}), "(args.model_dir, args.dataset_name, 'expts', args.model_name)\n", (10626, 10687), False, 'import os\n'), ((10696, 10744), 'subprocess.call', 'subprocess.call', (["['mkdir', '-p', args.model_dir]"], {}), "(['mkdir', '-p', args.model_dir])\n", (10711, 10744), False, 'import subprocess\n'), ((10872, 10911), 'os.path.join', 'os.path.join', (['args.model_dir', '"""log.txt"""'], {}), "(args.model_dir, 'log.txt')\n", (10884, 10911), False, 'import os\n'), ((10938, 10979), 'os.path.join', 'os.path.join', (['args.model_dir', '"""model.mdl"""'], {}), "(args.model_dir, 'model.mdl')\n", (10950, 10979), False, 'import os\n'), ((19192, 19236), 'os.path.join', 'os.path.join', (['args.data_dir', 'train_file_name'], {}), '(args.data_dir, train_file_name)\n', (19204, 19236), False, 'import os\n'), ((19308, 19350), 'os.path.join', 'os.path.join', (['args.data_dir', 'dev_file_name'], {}), '(args.data_dir, dev_file_name)\n', (19320, 19350), False, 'import os\n'), ((19797, 19844), 'os.path.isfile', 'os.path.isfile', (["(args.model_file + '.checkpoint')"], {}), "(args.model_file + '.checkpoint')\n", (19811, 19844), False, 'import os\n'), ((20028, 20072), 'msr.reader.model.Model.load_checkpoint', 'Model.load_checkpoint', (['checkpoint_file', 'args'], {}), '(checkpoint_file, args)\n', (20049, 20072), False, 'from msr.reader.model import Model\n'), ((22572, 22585), 'msr.reader.utils.Timer', 'utils.Timer', ([], {}), '()\n', (22583, 22585), False, 'from msr.reader import utils, vector, config, data\n'), ((22709, 22757), 'os.path.join', 'os.path.join', (['args.data_dir', '"""train_testing.txt"""'], {}), "(args.data_dir, 'train_testing.txt')\n", (22721, 22757), False, 'import os\n'), ((23816, 23827), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (23824, 23827), False, 'import sys\n'), ((25003, 25028), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25026, 25028), False, 'import torch\n'), ((25055, 25086), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (25076, 25086), False, 'import torch\n'), ((25214, 25254), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (25236, 25254), False, 'import torch\n'), ((10465, 10489), 'time.strftime', 'time.strftime', (['"""%Y%m%d-"""'], {}), "('%Y%m%d-')\n", (10478, 10489), False, 'import time\n'), ((15044, 15087), 'os.path.join', 'os.path.join', (['args.model_dir', '"""outputs.txt"""'], {}), "(args.model_dir, 'outputs.txt')\n", (15056, 15087), False, 'import os\n'), ((15353, 15371), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (15364, 15371), False, 'from collections import defaultdict\n'), ((16491, 16580), 'msr.reader.utils.metric_max_over_ground_truths', 'utils.metric_max_over_ground_truths', (['utils.exact_match_score', 'max_span', 'ground_truths'], {}), '(utils.exact_match_score, max_span,\n ground_truths)\n', (16526, 16580), False, 'from msr.reader import utils, vector, config, data\n'), ((20301, 20334), 'msr.reader.model.Model.load', 'Model.load', (['args.pretrained', 'args'], {}), '(args.pretrained, args)\n', (20311, 20334), False, 'from msr.reader.model import Model\n'), ((21179, 21237), 'msr.reader.utils.top_question_words', 'utils.top_question_words', (['args', 'train_exs', 'model.word_dict'], {}), '(args, train_exs, model.word_dict)\n', (21203, 21237), False, 'from msr.reader import utils, vector, config, data\n'), ((23111, 23158), 'os.path.join', 'os.path.join', (['args.data_dir', '"""test_testing.txt"""'], {}), "(args.data_dir, 'test_testing.txt')\n", (23123, 23158), False, 'import os\n'), ((23192, 23238), 'os.path.join', 'os.path.join', (['args.data_dir', '"""dev_testing.txt"""'], {}), "(args.data_dir, 'dev_testing.txt')\n", (23204, 23238), False, 'import os\n'), ((25591, 25630), 'logging.FileHandler', 'logging.FileHandler', (['args.log_file', '"""a"""'], {}), "(args.log_file, 'a')\n", (25610, 25630), False, 'import logging\n'), ((25667, 25706), 'logging.FileHandler', 'logging.FileHandler', (['args.log_file', '"""w"""'], {}), "(args.log_file, 'w')\n", (25686, 25706), False, 'import logging\n'), ((16634, 16710), 'msr.reader.utils.metric_max_over_ground_truths', 'utils.metric_max_over_ground_truths', (['utils.f1_score', 'max_span', 'ground_truths'], {}), '(utils.f1_score, max_span, ground_truths)\n', (16669, 16710), False, 'from msr.reader import utils, vector, config, data\n'), ((20521, 20564), 'msr.reader.utils.load_words', 'utils.load_words', (['args', '(train_exs + dev_exs)'], {}), '(args, train_exs + dev_exs)\n', (20537, 20564), False, 'from msr.reader import utils, vector, config, data\n'), ((10496, 10508), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10506, 10508), False, 'import uuid\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the function "cube_manipulation.compare_coords".
"""
import unittest
import iris
import numpy as np
from cf_units import Unit
from iris.coords import AuxCoord, DimCoord
from iris.tests import IrisTest
from improver.tests.ensemble_calibration.ensemble_calibration. \
helper_functions import set_up_temperature_cube
from improver.utilities.cube_manipulation import compare_coords
from improver.utilities.warnings_handler import ManageWarnings
class Test_compare_coords(IrisTest):
"""Test the compare_coords utility."""
def setUp(self):
"""Use temperature cube to test with."""
self.cube = set_up_temperature_cube()
def test_basic(self):
"""Test that the utility returns a list."""
cube1 = self.cube.copy()
cube2 = self.cube.copy()
cubelist = iris.cube.CubeList([cube1, cube2])
result = compare_coords(cubelist)
self.assertIsInstance(result, list)
@ManageWarnings(record=True)
def test_catch_warning(self, warning_list=None):
"""Test warning is raised if the input is cubelist of length 1."""
cube = self.cube.copy()
result = compare_coords(iris.cube.CubeList([cube]))
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
warning_msg = "Only a single cube so no differences will be found "
self.assertTrue(any(warning_msg in str(item)
for item in warning_list))
self.assertAlmostEqual(result, [])
def test_first_cube_has_extra_dimension_coordinates(self):
"""Test for comparing coordinate between cubes, where the first
cube in the list has extra dimension coordinates."""
cube1 = self.cube.copy()
cube2 = self.cube.copy()
height_coord = DimCoord([5.0], standard_name="height", units="m")
cube1.add_aux_coord(height_coord)
cube1 = iris.util.new_axis(cube1, "height")
cubelist = iris.cube.CubeList([cube1, cube2])
result = compare_coords(cubelist)
self.assertIsInstance(result, list)
self.assertEqual(len(result[0]), 1)
self.assertEqual(len(result[1]), 0)
self.assertEqual(result[0]["height"]["coord"].points, np.array([5.]))
self.assertEqual(result[0]["height"]["coord"].standard_name, "height")
self.assertEqual(result[0]["height"]["coord"].units, Unit("m"))
self.assertEqual(result[0]["height"]["data_dims"], 0)
self.assertEqual(result[0]["height"]["aux_dims"], None)
def test_second_cube_has_extra_dimension_coordinates(self):
"""Test for comparing coordinate between cubes, where the second
cube in the list has extra dimension coordinates."""
cube1 = self.cube.copy()
cube2 = self.cube.copy()
height_coord = DimCoord([5.0], standard_name="height", units="m")
cube2.add_aux_coord(height_coord)
cube2 = iris.util.new_axis(cube2, "height")
cubelist = iris.cube.CubeList([cube1, cube2])
result = compare_coords(cubelist)
self.assertIsInstance(result, list)
self.assertEqual(len(result[0]), 0)
self.assertEqual(len(result[1]), 1)
self.assertEqual(result[1]["height"]["coord"].points, np.array([5.]))
self.assertEqual(result[1]["height"]["coord"].standard_name, "height")
self.assertEqual(result[1]["height"]["coord"].units, Unit("m"))
self.assertEqual(result[1]["height"]["data_dims"], 0)
self.assertEqual(result[1]["height"]["aux_dims"], None)
def test_first_cube_has_extra_auxiliary_coordinates(self):
"""Test for comparing coordinate between cubes, where the first
cube in the list has extra auxiliary coordinates."""
cube1 = self.cube.copy()
cube2 = self.cube.copy()
fp_coord = AuxCoord(
[3.0], standard_name="forecast_period", units="hours")
cube1.add_aux_coord(fp_coord, data_dims=1)
cubelist = iris.cube.CubeList([cube1, cube2])
result = compare_coords(cubelist)
self.assertIsInstance(result, list)
self.assertEqual(len(result[0]), 1)
self.assertEqual(len(result[1]), 0)
self.assertEqual(result[0]["forecast_period"]["coord"].points,
np.array([3.0]))
self.assertEqual(result[0]["forecast_period"]["coord"].standard_name,
"forecast_period")
self.assertEqual(result[0]["forecast_period"]["coord"].units,
Unit("hours"))
self.assertEqual(result[0]["forecast_period"]["data_dims"], None)
self.assertEqual(result[0]["forecast_period"]["aux_dims"], 1)
def test_second_cube_has_extra_auxiliary_coordinates(self):
"""Test for comparing coordinate between cubes, where the second
cube in the list has extra auxiliary coordinates."""
cube1 = self.cube.copy()
cube2 = self.cube.copy()
fp_coord = AuxCoord(
[3.0], standard_name="forecast_period", units="hours")
cube2.add_aux_coord(fp_coord, data_dims=1)
cubelist = iris.cube.CubeList([cube1, cube2])
result = compare_coords(cubelist)
self.assertIsInstance(result, list)
self.assertEqual(len(result[0]), 0)
self.assertEqual(len(result[1]), 1)
self.assertEqual(result[1]["forecast_period"]["coord"].points,
np.array([3.0]))
self.assertEqual(result[1]["forecast_period"]["coord"].standard_name,
"forecast_period")
self.assertEqual(result[1]["forecast_period"]["coord"].units,
Unit("hours"))
self.assertEqual(result[1]["forecast_period"]["data_dims"], None)
self.assertEqual(result[1]["forecast_period"]["aux_dims"], 1)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"iris.coords.AuxCoord",
"iris.cube.CubeList",
"improver.tests.ensemble_calibration.ensemble_calibration.helper_functions.set_up_temperature_cube",
"improver.utilities.warnings_handler.ManageWarnings",
"iris.coords.DimCoord",
"iris.util.new_axis",
"numpy.array",
"improver.utilities.c... | [((2619, 2646), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (2633, 2646), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((7525, 7540), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7538, 7540), False, 'import unittest\n'), ((2302, 2327), 'improver.tests.ensemble_calibration.ensemble_calibration.helper_functions.set_up_temperature_cube', 'set_up_temperature_cube', ([], {}), '()\n', (2325, 2327), False, 'from improver.tests.ensemble_calibration.ensemble_calibration.helper_functions import set_up_temperature_cube\n'), ((2492, 2526), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube1, cube2]'], {}), '([cube1, cube2])\n', (2510, 2526), False, 'import iris\n'), ((2544, 2568), 'improver.utilities.cube_manipulation.compare_coords', 'compare_coords', (['cubelist'], {}), '(cubelist)\n', (2558, 2568), False, 'from improver.utilities.cube_manipulation import compare_coords\n'), ((3492, 3542), 'iris.coords.DimCoord', 'DimCoord', (['[5.0]'], {'standard_name': '"""height"""', 'units': '"""m"""'}), "([5.0], standard_name='height', units='m')\n", (3500, 3542), False, 'from iris.coords import AuxCoord, DimCoord\n'), ((3601, 3636), 'iris.util.new_axis', 'iris.util.new_axis', (['cube1', '"""height"""'], {}), "(cube1, 'height')\n", (3619, 3636), False, 'import iris\n'), ((3656, 3690), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube1, cube2]'], {}), '([cube1, cube2])\n', (3674, 3690), False, 'import iris\n'), ((3708, 3732), 'improver.utilities.cube_manipulation.compare_coords', 'compare_coords', (['cubelist'], {}), '(cubelist)\n', (3722, 3732), False, 'from improver.utilities.cube_manipulation import compare_coords\n'), ((4508, 4558), 'iris.coords.DimCoord', 'DimCoord', (['[5.0]'], {'standard_name': '"""height"""', 'units': '"""m"""'}), "([5.0], standard_name='height', units='m')\n", (4516, 4558), False, 'from iris.coords import AuxCoord, DimCoord\n'), ((4617, 4652), 'iris.util.new_axis', 'iris.util.new_axis', (['cube2', '"""height"""'], {}), "(cube2, 'height')\n", (4635, 4652), False, 'import iris\n'), ((4672, 4706), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube1, cube2]'], {}), '([cube1, cube2])\n', (4690, 4706), False, 'import iris\n'), ((4724, 4748), 'improver.utilities.cube_manipulation.compare_coords', 'compare_coords', (['cubelist'], {}), '(cubelist)\n', (4738, 4748), False, 'from improver.utilities.cube_manipulation import compare_coords\n'), ((5518, 5581), 'iris.coords.AuxCoord', 'AuxCoord', (['[3.0]'], {'standard_name': '"""forecast_period"""', 'units': '"""hours"""'}), "([3.0], standard_name='forecast_period', units='hours')\n", (5526, 5581), False, 'from iris.coords import AuxCoord, DimCoord\n'), ((5665, 5699), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube1, cube2]'], {}), '([cube1, cube2])\n', (5683, 5699), False, 'import iris\n'), ((5717, 5741), 'improver.utilities.cube_manipulation.compare_coords', 'compare_coords', (['cubelist'], {}), '(cubelist)\n', (5731, 5741), False, 'from improver.utilities.cube_manipulation import compare_coords\n'), ((6647, 6710), 'iris.coords.AuxCoord', 'AuxCoord', (['[3.0]'], {'standard_name': '"""forecast_period"""', 'units': '"""hours"""'}), "([3.0], standard_name='forecast_period', units='hours')\n", (6655, 6710), False, 'from iris.coords import AuxCoord, DimCoord\n'), ((6794, 6828), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube1, cube2]'], {}), '([cube1, cube2])\n', (6812, 6828), False, 'import iris\n'), ((6846, 6870), 'improver.utilities.cube_manipulation.compare_coords', 'compare_coords', (['cubelist'], {}), '(cubelist)\n', (6860, 6870), False, 'from improver.utilities.cube_manipulation import compare_coords\n'), ((2839, 2865), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[cube]'], {}), '([cube])\n', (2857, 2865), False, 'import iris\n'), ((3927, 3942), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (3935, 3942), True, 'import numpy as np\n'), ((4083, 4092), 'cf_units.Unit', 'Unit', (['"""m"""'], {}), "('m')\n", (4087, 4092), False, 'from cf_units import Unit\n'), ((4943, 4958), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (4951, 4958), True, 'import numpy as np\n'), ((5099, 5108), 'cf_units.Unit', 'Unit', (['"""m"""'], {}), "('m')\n", (5103, 5108), False, 'from cf_units import Unit\n'), ((5970, 5985), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (5978, 5985), True, 'import numpy as np\n'), ((6204, 6217), 'cf_units.Unit', 'Unit', (['"""hours"""'], {}), "('hours')\n", (6208, 6217), False, 'from cf_units import Unit\n'), ((7099, 7114), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (7107, 7114), True, 'import numpy as np\n'), ((7333, 7346), 'cf_units.Unit', 'Unit', (['"""hours"""'], {}), "('hours')\n", (7337, 7346), False, 'from cf_units import Unit\n')] |
import run
import numpy as np
import pytest
F = 1/np.sqrt(2)
def test_one_project():
np.testing.assert_almost_equal(
np.array([0, 1]),
run.one_project(1, 0))
np.testing.assert_almost_equal(
np.array([0, 1, 0, 1]),
run.one_project(2, 0))
np.testing.assert_almost_equal(
np.array([0, 0, 1, 1]),
run.one_project(2, 1))
np.testing.assert_almost_equal(
np.array([0, 1, 0, 1, 0, 1, 0, 1]),
run.one_project(3, 0))
np.testing.assert_almost_equal(
np.array([0, 0, 1, 1, 0, 0, 1, 1]),
run.one_project(3, 1))
np.testing.assert_almost_equal(
np.array([0, 0, 0, 0, 1, 1, 1, 1]),
run.one_project(3, 2))
def matrix(n, update_fn):
results = []
for x in range(2 ** n):
state = np.zeros(2 ** n)
state[x] = 1.0
results.append(update_fn(state))
return np.transpose(np.array(results))
def test_h_one_qubit():
np.testing.assert_almost_equal(
np.array([
[F, F],
[F, -F]]),
matrix(1, lambda x: run.simulate_h(x, [0], 1)))
def test_h_two_qubits():
np.testing.assert_almost_equal(
np.array([
[F, F, 0, 0],
[F, -F, 0, 0],
[0, 0, F, F],
[0, 0, F, -F]]),
matrix(2, lambda x: run.simulate_h(x, [0], 2)))
np.testing.assert_almost_equal(
np.array([
[F, 0, F, 0],
[0, F, 0, F],
[F, 0, -F, 0],
[0, F, 0, -F]]),
matrix(2, lambda x: run.simulate_h(x, [1], 2)))
def test_cp_two_qubits():
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1j]),
matrix(2, lambda x: run.simulate_cp(x, [0, 1], 2)))
def test_cp_three_qubits():
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1j, 1, 1, 1, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [0, 1], 3)))
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1j, 1, 1, 1, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [1, 0], 3)))
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1, 1, 1j, 1, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [0, 2], 3)))
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1, 1, 1j, 1, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [2, 0], 3)))
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1, 1, 1, 1j, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [1, 2], 3)))
np.testing.assert_almost_equal(
np.diag([1, 1, 1, 1, 1, 1, 1j, 1j]),
matrix(3, lambda x: run.simulate_cp(x, [2, 1], 3)))
def measurement_idempotent(n, index):
for x in range(2 ** n):
state = np.zeros(2 ** n)
state[x] = 1.0
np.testing.assert_almost_equal(state, run.simulate_m(state, [index], n))
def test_measurement_idempotent():
for n in range(3):
for index in range(n):
measurement_idempotent(n, index)
def test_measurement_result_one_qubit(capfd):
np.random.seed(0)
state = np.array([F, F])
state = run.simulate_m(state, [0], 1)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 1 on qubit 0.'
np.testing.assert_almost_equal(np.array([0, 1]), state)
np.random.seed(1)
state = np.array([F, F])
state = run.simulate_m(state, [0], 1)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 0 on qubit 0.'
np.testing.assert_almost_equal(np.array([1, 0]), state)
def test_measurement_result_two_qubits(capfd):
np.random.seed(0)
state = 0.5 * np.array([1, 1, 1, 1])
state = run.simulate_m(state, [0], 2)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 1 on qubit 0.'
np.testing.assert_almost_equal(
np.array([0, F, 0, F]), state)
np.random.seed(1)
state = 0.5 * np.array([1, 1, 1, 1])
state = run.simulate_m(state, [0], 2)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 0 on qubit 0.'
np.testing.assert_almost_equal(
np.array([F, 0, F, 0]), state)
np.random.seed(0)
state = 0.5 * np.array([1, 1, 1, 1])
state = run.simulate_m(state, [1], 2)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 1 on qubit 1.'
np.testing.assert_almost_equal(
np.array([0, 0, F, F]), state)
np.random.seed(1)
state = 0.5 * np.array([1, 1, 1, 1])
state = run.simulate_m(state, [1], 2)
out, _ = capfd.readouterr()
assert out.strip() == 'Measured 0 on qubit 1.'
np.testing.assert_almost_equal(
np.array([F, F, 0, 0]), state)
def test_parse_qubits():
assert [0] == run.parse_qubit([run.S, run.S], run.S, run.E)
assert [1] == run.parse_qubit([run.E, run.E], run.S, run.E)
assert [0] == run.parse_qubit([run.S, run.S, run.S, run.S], run.S, run.E)
assert [1] == run.parse_qubit([run.S, run.S, run.E, run.E], run.S, run.E)
assert [2] == run.parse_qubit([run.E, run.E, run.S, run.S], run.S, run.E)
assert [3] == run.parse_qubit([run.E, run.E, run.E, run.E], run.S, run.E)
assert [0, 0] == run.parse_qubit(
[run.S, run.S, run.S, run.E, run.S, run.S], run.S, run.E)
assert [1, 2] == run.parse_qubit(
[run.E, run.E, run.E, run.S, run.E, run.E, run.S, run.S], run.S, run.E)
assert [0] == run.parse_qubit(['a', 'a'], 'a', run.E)
assert [1] == run.parse_qubit(['b', 'b'], run.S, 'b')
def test_parse_qubits_invalid_token_number():
with pytest.raises(SyntaxError, matches='number'):
run.parse_qubit([run.S], run.S, run.E)
with pytest.raises(SyntaxError, matches='number'):
run.parse_qubit([run.S, run.S, run.E], run.S, run.E)
def test_parse_one_qubit(tmpdir):
p = tmpdir.join('parse.qsel')
p.write('superposition superposition superposition superposition\n'
'superposition entanglement superposition superposition')
all_qubits, program = run.parse(str(p))
assert all_qubits == set([0])
assert program == [
{'gate': 'H', 'qubits': [0]},
{'gate': 'M', 'qubits': [0]}]
def test_parse_two_qubit(tmpdir):
p = tmpdir.join('parse.qsel')
p.write('entanglement entanglement superposition superposition '
'superposition entanglement entanglement entanglement\n'
'superposition entanglement superposition superposition')
all_qubits, program = run.parse(str(p))
assert all_qubits == set([0, 1])
assert program == [
{'gate': 'CP', 'qubits': [0, 1]},
{'gate': 'M', 'qubits': [0]}]
def test_parse_too_few_qubits(tmpdir):
p = tmpdir.join('parse.qsel')
p.write('entanglement entanglement superposition')
with pytest.raises(SyntaxError, match='Not enough'):
run.parse(str(p))
def test_parse_bad_tokens(tmpdir):
p = tmpdir.join('parse.qsel')
p.write('entanglement x')
with pytest.raises(SyntaxError, match='Only'):
run.parse(str(p))
def test_parse_h_wrong_qubit_number(tmpdir):
p = tmpdir.join('parse.qsel')
p.write(' '.join([run.S, run.S, run.S, run.S, run.S, run.E, run.E, run.E]))
with pytest.raises(SyntaxError, match='H gate'):
run.parse(str(p))
def test_parse_cp_wrong_qubit_number(tmpdir):
p = tmpdir.join('parse.qsel')
p.write(' '.join([run.E, run.E, run.S, run.S]))
with pytest.raises(SyntaxError, match='CP gate'):
run.parse(str(p))
def test_parse_m_wrong_qubit_number(tmpdir):
p = tmpdir.join('parse.qsel')
p.write(' '.join([run.S, run.E, run.S, run.S, run.S, run.E, run.E, run.E]))
with pytest.raises(SyntaxError, match='M gate'):
run.parse(str(p))
| [
"numpy.random.seed",
"run.parse_qubit",
"numpy.zeros",
"run.one_project",
"pytest.raises",
"run.simulate_h",
"numpy.array",
"run.simulate_cp",
"numpy.diag",
"run.simulate_m",
"numpy.sqrt"
] | [((52, 62), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (59, 62), True, 'import numpy as np\n'), ((2626, 2643), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2640, 2643), True, 'import numpy as np\n'), ((2653, 2669), 'numpy.array', 'np.array', (['[F, F]'], {}), '([F, F])\n', (2661, 2669), True, 'import numpy as np\n'), ((2679, 2708), 'run.simulate_m', 'run.simulate_m', (['state', '[0]', '(1)'], {}), '(state, [0], 1)\n', (2693, 2708), False, 'import run\n'), ((2845, 2862), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2859, 2862), True, 'import numpy as np\n'), ((2872, 2888), 'numpy.array', 'np.array', (['[F, F]'], {}), '([F, F])\n', (2880, 2888), True, 'import numpy as np\n'), ((2898, 2927), 'run.simulate_m', 'run.simulate_m', (['state', '[0]', '(1)'], {}), '(state, [0], 1)\n', (2912, 2927), False, 'import run\n'), ((3112, 3129), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3126, 3129), True, 'import numpy as np\n'), ((3179, 3208), 'run.simulate_m', 'run.simulate_m', (['state', '[0]', '(2)'], {}), '(state, [0], 2)\n', (3193, 3208), False, 'import run\n'), ((3360, 3377), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3374, 3377), True, 'import numpy as np\n'), ((3427, 3456), 'run.simulate_m', 'run.simulate_m', (['state', '[0]', '(2)'], {}), '(state, [0], 2)\n', (3441, 3456), False, 'import run\n'), ((3607, 3624), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3621, 3624), True, 'import numpy as np\n'), ((3674, 3703), 'run.simulate_m', 'run.simulate_m', (['state', '[1]', '(2)'], {}), '(state, [1], 2)\n', (3688, 3703), False, 'import run\n'), ((3854, 3871), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3868, 3871), True, 'import numpy as np\n'), ((3921, 3950), 'run.simulate_m', 'run.simulate_m', (['state', '[1]', '(2)'], {}), '(state, [1], 2)\n', (3935, 3950), False, 'import run\n'), ((124, 140), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (132, 140), True, 'import numpy as np\n'), ((145, 166), 'run.one_project', 'run.one_project', (['(1)', '(0)'], {}), '(1, 0)\n', (160, 166), False, 'import run\n'), ((204, 226), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (212, 226), True, 'import numpy as np\n'), ((232, 253), 'run.one_project', 'run.one_project', (['(2)', '(0)'], {}), '(2, 0)\n', (247, 253), False, 'import run\n'), ((290, 312), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (298, 312), True, 'import numpy as np\n'), ((317, 338), 'run.one_project', 'run.one_project', (['(2)', '(1)'], {}), '(2, 1)\n', (332, 338), False, 'import run\n'), ((375, 409), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0, 1, 0, 1]'], {}), '([0, 1, 0, 1, 0, 1, 0, 1])\n', (383, 409), True, 'import numpy as np\n'), ((414, 435), 'run.one_project', 'run.one_project', (['(3)', '(0)'], {}), '(3, 0)\n', (429, 435), False, 'import run\n'), ((472, 506), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 1, 1]'], {}), '([0, 0, 1, 1, 0, 0, 1, 1])\n', (480, 506), True, 'import numpy as np\n'), ((511, 532), 'run.one_project', 'run.one_project', (['(3)', '(1)'], {}), '(3, 1)\n', (526, 532), False, 'import run\n'), ((569, 603), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1])\n', (577, 603), True, 'import numpy as np\n'), ((608, 629), 'run.one_project', 'run.one_project', (['(3)', '(2)'], {}), '(3, 2)\n', (623, 629), False, 'import run\n'), ((708, 724), 'numpy.zeros', 'np.zeros', (['(2 ** n)'], {}), '(2 ** n)\n', (716, 724), True, 'import numpy as np\n'), ((798, 815), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (806, 815), True, 'import numpy as np\n'), ((878, 905), 'numpy.array', 'np.array', (['[[F, F], [F, -F]]'], {}), '([[F, F], [F, -F]])\n', (886, 905), True, 'import numpy as np\n'), ((1028, 1096), 'numpy.array', 'np.array', (['[[F, F, 0, 0], [F, -F, 0, 0], [0, 0, F, F], [0, 0, F, -F]]'], {}), '([[F, F, 0, 0], [F, -F, 0, 0], [0, 0, F, F], [0, 0, F, -F]])\n', (1036, 1096), True, 'import numpy as np\n'), ((1203, 1271), 'numpy.array', 'np.array', (['[[F, 0, F, 0], [0, F, 0, F], [F, 0, -F, 0], [0, F, 0, -F]]'], {}), '([[F, 0, F, 0], [0, F, 0, F], [F, 0, -F, 0], [0, F, 0, -F]])\n', (1211, 1271), True, 'import numpy as np\n'), ((1411, 1435), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1.0j]'], {}), '([1, 1, 1, 1.0j])\n', (1418, 1435), True, 'import numpy as np\n'), ((1554, 1593), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1.0j, 1, 1, 1, 1.0j]'], {}), '([1, 1, 1, 1.0j, 1, 1, 1, 1.0j])\n', (1561, 1593), True, 'import numpy as np\n'), ((1680, 1719), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1.0j, 1, 1, 1, 1.0j]'], {}), '([1, 1, 1, 1.0j, 1, 1, 1, 1.0j])\n', (1687, 1719), True, 'import numpy as np\n'), ((1806, 1845), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1.0j, 1, 1.0j]'], {}), '([1, 1, 1, 1, 1, 1.0j, 1, 1.0j])\n', (1813, 1845), True, 'import numpy as np\n'), ((1932, 1971), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1.0j, 1, 1.0j]'], {}), '([1, 1, 1, 1, 1, 1.0j, 1, 1.0j])\n', (1939, 1971), True, 'import numpy as np\n'), ((2058, 2097), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1, 1.0j, 1.0j]'], {}), '([1, 1, 1, 1, 1, 1, 1.0j, 1.0j])\n', (2065, 2097), True, 'import numpy as np\n'), ((2184, 2223), 'numpy.diag', 'np.diag', (['[1, 1, 1, 1, 1, 1, 1.0j, 1.0j]'], {}), '([1, 1, 1, 1, 1, 1, 1.0j, 1.0j])\n', (2191, 2223), True, 'import numpy as np\n'), ((2350, 2366), 'numpy.zeros', 'np.zeros', (['(2 ** n)'], {}), '(2 ** n)\n', (2358, 2366), True, 'import numpy as np\n'), ((2818, 2834), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2826, 2834), True, 'import numpy as np\n'), ((3037, 3053), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3045, 3053), True, 'import numpy as np\n'), ((3146, 3168), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (3154, 3168), True, 'import numpy as np\n'), ((3325, 3347), 'numpy.array', 'np.array', (['[0, F, 0, F]'], {}), '([0, F, 0, F])\n', (3333, 3347), True, 'import numpy as np\n'), ((3394, 3416), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (3402, 3416), True, 'import numpy as np\n'), ((3573, 3595), 'numpy.array', 'np.array', (['[F, 0, F, 0]'], {}), '([F, 0, F, 0])\n', (3581, 3595), True, 'import numpy as np\n'), ((3641, 3663), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (3649, 3663), True, 'import numpy as np\n'), ((3820, 3842), 'numpy.array', 'np.array', (['[0, 0, F, F]'], {}), '([0, 0, F, F])\n', (3828, 3842), True, 'import numpy as np\n'), ((3888, 3910), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (3896, 3910), True, 'import numpy as np\n'), ((4067, 4089), 'numpy.array', 'np.array', (['[F, F, 0, 0]'], {}), '([F, F, 0, 0])\n', (4075, 4089), True, 'import numpy as np\n'), ((4139, 4184), 'run.parse_qubit', 'run.parse_qubit', (['[run.S, run.S]', 'run.S', 'run.E'], {}), '([run.S, run.S], run.S, run.E)\n', (4154, 4184), False, 'import run\n'), ((4200, 4245), 'run.parse_qubit', 'run.parse_qubit', (['[run.E, run.E]', 'run.S', 'run.E'], {}), '([run.E, run.E], run.S, run.E)\n', (4215, 4245), False, 'import run\n'), ((4261, 4320), 'run.parse_qubit', 'run.parse_qubit', (['[run.S, run.S, run.S, run.S]', 'run.S', 'run.E'], {}), '([run.S, run.S, run.S, run.S], run.S, run.E)\n', (4276, 4320), False, 'import run\n'), ((4336, 4395), 'run.parse_qubit', 'run.parse_qubit', (['[run.S, run.S, run.E, run.E]', 'run.S', 'run.E'], {}), '([run.S, run.S, run.E, run.E], run.S, run.E)\n', (4351, 4395), False, 'import run\n'), ((4411, 4470), 'run.parse_qubit', 'run.parse_qubit', (['[run.E, run.E, run.S, run.S]', 'run.S', 'run.E'], {}), '([run.E, run.E, run.S, run.S], run.S, run.E)\n', (4426, 4470), False, 'import run\n'), ((4486, 4545), 'run.parse_qubit', 'run.parse_qubit', (['[run.E, run.E, run.E, run.E]', 'run.S', 'run.E'], {}), '([run.E, run.E, run.E, run.E], run.S, run.E)\n', (4501, 4545), False, 'import run\n'), ((4564, 4637), 'run.parse_qubit', 'run.parse_qubit', (['[run.S, run.S, run.S, run.E, run.S, run.S]', 'run.S', 'run.E'], {}), '([run.S, run.S, run.S, run.E, run.S, run.S], run.S, run.E)\n', (4579, 4637), False, 'import run\n'), ((4659, 4750), 'run.parse_qubit', 'run.parse_qubit', (['[run.E, run.E, run.E, run.S, run.E, run.E, run.S, run.S]', 'run.S', 'run.E'], {}), '([run.E, run.E, run.E, run.S, run.E, run.E, run.S, run.S],\n run.S, run.E)\n', (4674, 4750), False, 'import run\n'), ((4765, 4804), 'run.parse_qubit', 'run.parse_qubit', (["['a', 'a']", '"""a"""', 'run.E'], {}), "(['a', 'a'], 'a', run.E)\n", (4780, 4804), False, 'import run\n'), ((4820, 4859), 'run.parse_qubit', 'run.parse_qubit', (["['b', 'b']", 'run.S', '"""b"""'], {}), "(['b', 'b'], run.S, 'b')\n", (4835, 4859), False, 'import run\n'), ((4914, 4958), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'matches': '"""number"""'}), "(SyntaxError, matches='number')\n", (4927, 4958), False, 'import pytest\n'), ((4962, 5000), 'run.parse_qubit', 'run.parse_qubit', (['[run.S]', 'run.S', 'run.E'], {}), '([run.S], run.S, run.E)\n', (4977, 5000), False, 'import run\n'), ((5007, 5051), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'matches': '"""number"""'}), "(SyntaxError, matches='number')\n", (5020, 5051), False, 'import pytest\n'), ((5055, 5107), 'run.parse_qubit', 'run.parse_qubit', (['[run.S, run.S, run.E]', 'run.S', 'run.E'], {}), '([run.S, run.S, run.E], run.S, run.E)\n', (5070, 5107), False, 'import run\n'), ((6013, 6059), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""Not enough"""'}), "(SyntaxError, match='Not enough')\n", (6026, 6059), False, 'import pytest\n'), ((6182, 6222), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""Only"""'}), "(SyntaxError, match='Only')\n", (6195, 6222), False, 'import pytest\n'), ((6405, 6447), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""H gate"""'}), "(SyntaxError, match='H gate')\n", (6418, 6447), False, 'import pytest\n'), ((6603, 6646), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""CP gate"""'}), "(SyntaxError, match='CP gate')\n", (6616, 6646), False, 'import pytest\n'), ((6829, 6871), 'pytest.raises', 'pytest.raises', (['SyntaxError'], {'match': '"""M gate"""'}), "(SyntaxError, match='M gate')\n", (6842, 6871), False, 'import pytest\n'), ((2424, 2457), 'run.simulate_m', 'run.simulate_m', (['state', '[index]', 'n'], {}), '(state, [index], n)\n', (2438, 2457), False, 'import run\n'), ((938, 963), 'run.simulate_h', 'run.simulate_h', (['x', '[0]', '(1)'], {}), '(x, [0], 1)\n', (952, 963), False, 'import run\n'), ((1140, 1165), 'run.simulate_h', 'run.simulate_h', (['x', '[0]', '(2)'], {}), '(x, [0], 2)\n', (1154, 1165), False, 'import run\n'), ((1320, 1345), 'run.simulate_h', 'run.simulate_h', (['x', '[1]', '(2)'], {}), '(x, [1], 2)\n', (1334, 1345), False, 'import run\n'), ((1457, 1486), 'run.simulate_cp', 'run.simulate_cp', (['x', '[0, 1]', '(2)'], {}), '(x, [0, 1], 2)\n', (1472, 1486), False, 'import run\n'), ((1613, 1642), 'run.simulate_cp', 'run.simulate_cp', (['x', '[0, 1]', '(3)'], {}), '(x, [0, 1], 3)\n', (1628, 1642), False, 'import run\n'), ((1739, 1768), 'run.simulate_cp', 'run.simulate_cp', (['x', '[1, 0]', '(3)'], {}), '(x, [1, 0], 3)\n', (1754, 1768), False, 'import run\n'), ((1865, 1894), 'run.simulate_cp', 'run.simulate_cp', (['x', '[0, 2]', '(3)'], {}), '(x, [0, 2], 3)\n', (1880, 1894), False, 'import run\n'), ((1991, 2020), 'run.simulate_cp', 'run.simulate_cp', (['x', '[2, 0]', '(3)'], {}), '(x, [2, 0], 3)\n', (2006, 2020), False, 'import run\n'), ((2117, 2146), 'run.simulate_cp', 'run.simulate_cp', (['x', '[1, 2]', '(3)'], {}), '(x, [1, 2], 3)\n', (2132, 2146), False, 'import run\n'), ((2243, 2272), 'run.simulate_cp', 'run.simulate_cp', (['x', '[2, 1]', '(3)'], {}), '(x, [2, 1], 3)\n', (2258, 2272), False, 'import run\n')] |
import os
import gym
import time
import enum
import random
import carla
import pygame
import numpy as np
from gym import spaces
from typing import Callable, Dict, Union, List
from pygame.constants import K_q, K_UP, K_w, K_LEFT, K_a, K_RIGHT, K_d, K_DOWN, K_s, K_SPACE, K_ESCAPE, KMOD_CTRL
from rl import utils as rl_utils
from rl.environments.carla import env_utils
from rl.environments.carla.sensors import Sensor, SensorSpecs
from rl.environments.carla.navigation.behavior_agent import BehaviorAgent
from rl.environments.carla.navigation import Route, RoutePlanner, RoadOption
from rl.environments.carla.tools import misc, utils
from rl.environments.carla.tools.utils import WAYPOINT_DICT
from rl.environments.carla.tools.synchronous_mode import CARLASyncContext
class CARLAEvent(enum.Enum):
"""Available events (callbacks) related to CARLAEnvironment"""
RESET = 0
ON_COLLISION = 1
OUT_OF_LANE = 2
# -------------------------------------------------------------------------------------------------
# -- Base Class and Wrappers
# -------------------------------------------------------------------------------------------------
# TODO: use gym register API to make these environments available to gym.make(...)
class CARLABaseEnvironment(gym.Env):
"""Base extendable environment for the CARLA driving simulator"""
def __init__(self, address='localhost', port=2000, timeout=5.0, image_shape=(150, 200, 3), window_size=(800, 600),
vehicle_filter='vehicle.tesla.model3', fps=30.0, render=True, debug=True, spawn: dict = None,
ignore_traffic_light=True, path: dict = None, town: str = None,
weather=carla.WeatherParameters.ClearNoon, skip_frames=30):
"""Arguments:
- path: dict =
- origin: dict(carla.Transform or 'point' or 'points', 'type': [fixed, route, map] or [random,
sequential])
- destination: dict(carla.Transform or 'point' or 'points', 'type': [fixed, map] or [random,
sequential]))
- length: int
- use_planner: bool
- sampling_resolution: float
- spawn: dict(vehicle_filter: str, pedestrian_filter: str, pedestrians: int, vehicles: int, running: float,
crossing: float, hybrid: bool)
"""
super().__init__()
env_utils.init_pygame()
self.timeout = timeout
self.client = env_utils.get_client(address, port, self.timeout)
self.world: carla.World = self.client.get_world()
self.synchronous_context = None
self.sync_mode_enabled = False
self.num_frames_to_skip = skip_frames
# Time
self.initial_timestamp: carla.Timestamp = None
self.current_timestamp: carla.Timestamp = None
# set fix fps:
self.world_settings = carla.WorldSettings(no_rendering_mode=False,
synchronous_mode=False,
fixed_delta_seconds=1.0 / fps)
self.world.apply_settings(self.world_settings)
# Law compliance
self.ignore_traffic_light = ignore_traffic_light
# Map
self.current_town = 'Town03' # loaded by default
if isinstance(town, str):
self.set_town(town)
self.map: carla.Map = self.world.get_map()
# Vehicle
self.vehicle_filter = vehicle_filter
self.vehicle: carla.Vehicle = None
self.control = carla.VehicleControl()
# Weather
self.weather = weather
self.set_weather(weather)
# Spawning (vehicles and pedestrians)
self.vehicles = []
self.walkers_ids = []
self.should_spawn = isinstance(spawn, dict)
self.spawn_dict = spawn
# Path: origin, destination, and path-length:
self.origin_type = 'map' # 'map' means sample a random point from the world's map
self.origin = None
self.destination_type = 'map'
self.destination = None
self.path_length = None
self.use_planner = True
self.sampling_resolution = 2.0
if isinstance(path, dict):
origin_spec = path.get('origin', None)
destination_spec = path.get('destination', None)
self.path_length = path.get('length', None)
# Origin:
if isinstance(origin_spec, carla.Transform):
self.origin = origin_spec
self.origin_type = 'fixed'
elif isinstance(origin_spec, dict):
if 'point' in origin_spec:
self.origin = origin_spec['point']
self.origin_type = origin_spec.get('type', 'fixed')
assert isinstance(self.origin, carla.Transform)
assert self.origin_type in ['map', 'fixed', 'route']
elif 'points' in origin_spec:
self.origins = origin_spec['points']
self.origin = None
self.origin_index = -1
self.origin_type = origin_spec.get('type', 'random')
assert isinstance(self.origins, list) and len(self.origins) > 0
assert all(isinstance(x, carla.Transform) for x in self.origins)
assert self.origin_type in ['random', 'sequential']
elif isinstance(origin_spec, list):
self.origins = origin_spec
self.origin = None
self.origin_index = -1
self.origin_type = 'random'
# Destination:
if isinstance(destination_spec, carla.Location):
self.destination = destination_spec
self.destination_type = 'fixed'
elif isinstance(destination_spec, dict):
if 'point' in destination_spec:
self.destination = destination_spec['point']
self.destination_type = destination_spec.get('type', 'fixed')
assert isinstance(self.destination, carla.Location)
assert self.destination_type in ['map', 'fixed']
elif 'points' in destination_spec:
self.destinations = destination_spec['points']
self.destination = None
self.destination_index = -1
self.destination_type = destination_spec.get('type', 'random')
assert isinstance(self.destinations, list) and len(self.destinations) > 0
assert all(isinstance(x, carla.Location) for x in self.destinations)
assert self.destination_type in ['random', 'sequential']
# Path stuff:
self.path_length = path.get('length', None)
self.use_planner = path.get('use_planner', True)
self.sampling_resolution = path.get('sampling_resolution', 2.0)
if self.origin_type == 'route':
assert self.destination_type == 'fixed'
assert self.use_planner is True
elif path is not None:
raise ValueError('Argument [path] must be either "None" or a "dict".')
# Path-planning:
if self.use_planner:
self.route = Route(planner=RoutePlanner(map=self.map, sampling_resolution=self.sampling_resolution))
else:
self.route = None
# Visualization and Debugging
self.image_shape = image_shape
self.image_size = (image_shape[1], image_shape[0])
self.fps = fps
self.tick_time = 1.0 / self.fps
self.should_render = render
self.should_debug = debug
self.clock = pygame.time.Clock()
if self.should_render:
self.render_data = None # some sensor_data to be rendered in render()
self.window_size = window_size
self.font = env_utils.get_font(size=13)
self.display = env_utils.get_display(window_size)
# vehicle sensors suite
self.sensors = dict()
# events and callbacks
self.events: Dict[CARLAEvent, List[Callable]] = dict()
@property
def observation_space(self) -> spaces.Space:
raise NotImplementedError
@property
def action_space(self) -> spaces.Space:
raise NotImplementedError
@property
def info_space(self) -> spaces.Space:
raise NotImplementedError
@property
def reward_range(self) -> tuple:
raise NotImplementedError
def reset(self) -> dict:
print('env.reset')
self.reset_world()
self.trigger_event(event=CARLAEvent.RESET)
if not self.sync_mode_enabled:
self.__enter__()
self.control = carla.VehicleControl()
self.skip(num_frames=self.num_frames_to_skip)
if self.should_spawn:
self.spawn_actors(self.spawn_dict)
self.spawn_dict = None
self.should_spawn = False
return self.get_observation(sensors_data={})
def reward(self, actions, **kwargs):
"""Agent's reward function"""
raise NotImplementedError
def set_weather(self, weather: Union[carla.WeatherParameters, List[carla.WeatherParameters]]):
"""Sets the given weather. If [weather] is a list, a random preset from the list is chosen and set"""
if isinstance(weather, list):
weather = random.choice(weather)
self.world.set_weather(weather)
self.weather = weather
print(f'Weather changed to {weather}.')
def set_town(self, town: str, timeout=2.0, max_trials=5):
"""Loads then given town"""
if self.current_town == town:
print(f'{town} already loaded.')
return
print(f'Loading town: {town}...')
self.map = None
for _ in range(max_trials):
try:
self.world = self.client.load_world(town)
self.map = self.world.get_map()
except RuntimeError:
print('Failed to connect to newly created map, retrying...')
time.sleep(timeout)
if self.map is not None:
break
self.world.apply_settings(self.world_settings)
self.current_town = town
print(f'Town {town} loaded.')
def spawn_actors(self, spawn_dict: dict, hybrid=True, safe=True):
"""Instantiate vehicles and pedestrians in the current world"""
traffic_manager = self.client.get_trafficmanager()
traffic_manager.set_synchronous_mode(True)
if spawn_dict.get('hybrid', hybrid):
traffic_manager.set_hybrid_physics_mode(True)
blueprints = env_utils.get_blueprints(self.world, vehicles_filter=spawn_dict.get('vehicles_filter', None),
pedestrians_filter=spawn_dict.get('pedestrians_filter', None),
safe=safe)
# Spawn stuff
self.vehicles = env_utils.spawn_vehicles(amount=spawn_dict.get('vehicles', 0), blueprints=blueprints[0],
client=self.client, spawn_points=self.map.get_spawn_points())
self.walkers_ids = env_utils.spawn_pedestrians(amount=spawn_dict.get('pedestrians', 0),
blueprints=blueprints[1], client=self.client,
running=spawn_dict.get('running', 0.0),
crossing=spawn_dict.get('crossing', 0.0))
traffic_manager.global_percentage_speed_difference(30.0)
def destroy_actors(self):
"""Removes the previously spawned actors (vehicles and pedestrians/walkers)"""
# Remove vehicles
print(f'Destroying {len(self.vehicles)} vehicles...')
self.client.apply_batch([carla.command.DestroyActor(x) for x in self.vehicles])
# Stop walker controllers only (list is [controller, actor, controller, actor ...])
actors = self.world.get_actors(self.walkers_ids)
for i in range(0, len(self.walkers_ids), 2):
actors[i].stop()
print(f'Destroying {len(self.walkers_ids) // 2} pedestrians...')
self.client.apply_batch([carla.command.DestroyActor(x) for x in self.walkers_ids])
time.sleep(1.0)
@staticmethod
def consume_pygame_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def step(self, actions):
pygame.event.get()
self.clock.tick()
sensors_data = self.world_step(actions)
reward = self.reward(actions)
terminal = self.terminal_condition()
next_state = self.get_observation(sensors_data)
return next_state, reward, terminal, self.get_info()
def terminal_condition(self, **kwargs) -> Union[bool, int]:
"""Tells whether the episode is terminated or not."""
raise NotImplementedError
def close(self):
print('env.close')
super().close()
self.destroy_actors()
self.vehicles = []
self.walkers_ids = []
if self.vehicle:
self.vehicle.destroy()
if self.sync_mode_enabled:
self.__exit__()
for sensor in self.sensors.values():
sensor.destroy()
def define_sensors(self) -> dict:
"""Define which sensors should be equipped to the vehicle"""
raise NotImplementedError
def on_collision(self, event: carla.CollisionEvent, **kwargs):
raise NotImplementedError
def register_event(self, event: CARLAEvent, callback):
"""Registers a given [callback] to a specific [event]"""
assert isinstance(event, CARLAEvent)
assert callable(callback)
callbacks = self.events.get(event, [])
callbacks.append(callback)
self.events[event] = callbacks
def trigger_event(self, event: CARLAEvent, **kwargs):
"""Cause the call of every callback registered for event [event]"""
print(f'Event {str(event)} triggered.')
for callback in self.events.get(event, []):
callback(**kwargs)
def unregister_event(self, event: CARLAEvent, callback):
"""Unregisters a given [callback] to a specific [event]"""
assert isinstance(event, CARLAEvent)
assert callable(callback)
if event in self.events:
callbacks = self.events[event]
callbacks.remove(callback)
self.events[event] = callbacks
else:
print(f'Event {event} not yet registered!')
def render(self, mode='human'):
"""Renders sensors' output"""
raise NotImplementedError
def debug(self, actions):
env_utils.display_text(self.display, self.font, text=self.debug_text(actions), origin=(16, 12),
offset=(0, 16))
def debug_text(self, actions):
raise NotImplementedError
def skip(self, num_frames=10):
"""Skips the given amount of frames"""
for _ in range(num_frames):
self.synchronous_context.tick(timeout=self.timeout)
if num_frames > 0:
print(f'Skipped {num_frames} frames.')
def control_to_actions(self, control: carla.VehicleControl):
raise NotImplementedError("Implement only if needed for pre-training.")
def before_world_step(self):
"""Callback: called before world.tick()"""
if self.ignore_traffic_light and self.vehicle.is_at_traffic_light():
traffic_light = self.vehicle.get_traffic_light()
traffic_light.set_state(carla.TrafficLightState.Green)
def after_world_step(self, sensors_data: dict):
"""Callback: called after world.tick()."""
self.current_timestamp = sensors_data['world'].timestamp
if self.initial_timestamp is None:
self.initial_timestamp = self.current_timestamp
@staticmethod
def on_sensors_data(data: dict) -> dict:
"""Callback. Triggers when a world's 'tick' occurs, meaning that data from sensors are been collected because a
simulation step of the CARLA's world has been completed.
- Use this method to preprocess sensors' output data for: rendering, observation, ...
"""
return data
def __enter__(self):
"""Enables synchronous mode.
Usage:
with carla_env as env:
# code...
"""
self.synchronous_context.__enter__()
self.sync_mode_enabled = True
return self
def __exit__(self, *args):
"""Disables synchronous mode"""
self.synchronous_context.__exit__()
self.sync_mode_enabled = False
# propagate exception
return False
def world_step(self, actions):
"""Applies the actions to the vehicle, and updates the CARLA's world"""
# [pre-tick updates] Apply control to update the vehicle
self.actions_to_control(actions)
self.vehicle.apply_control(self.control)
self.before_world_step()
# Advance the simulation and wait for sensors' data.
data = self.synchronous_context.tick(timeout=self.timeout)
data = self.on_sensors_data(data)
# [post-tick updates] Update world-related stuff
self.after_world_step(data)
# Draw and debug:
if self.should_render:
self.render_data = data
self.render()
self.render_data = None
if self.should_debug:
self.debug(actions)
pygame.display.flip()
return data
def reset_world(self):
# choose origin (spawn point)
if self.origin_type == 'map':
self.origin = env_utils.random_spawn_point(self.map)
elif self.origin_type == 'random':
self.origin = random.choice(self.origins)
elif self.origin_type == 'sequential':
self.origin_index = (self.origin_index + 1) % len(self.origins)
self.origin = self.origins[self.origin_index]
# choose destination (final point)
if self.destination_type == 'map':
self.destination = env_utils.random_spawn_point(self.map, different_from=self.origin.location).location
elif self.destination_type == 'random':
self.destination = random.choice(self.destinations) # TODO: ensure different from origin?
elif self.destination_type == 'sequential':
self.destination_index = (self.destination_index + 1) % len(self.destinations)
self.destination = self.destinations[self.destination_index]
# plan path between origin and destination
if self.use_planner:
self.route.plan(origin=self.origin.location, destination=self.destination)
# spawn actor
if self.vehicle is None:
blueprint = env_utils.random_blueprint(self.world, actor_filter=self.vehicle_filter)
self.vehicle: carla.Vehicle = env_utils.spawn_actor(self.world, blueprint, self.origin)
self._create_sensors()
self.synchronous_context = CARLASyncContext(self.world, self.sensors, fps=self.fps)
else:
self.vehicle.apply_control(carla.VehicleControl())
self.vehicle.set_velocity(carla.Vector3D(x=0.0, y=0.0, z=0.0))
if self.origin_type == 'route':
new_origin = self.route.random_waypoint().transform
self.vehicle.set_transform(new_origin)
else:
self.vehicle.set_transform(self.origin)
def actions_to_control(self, actions):
"""Specifies the mapping between an actions vector and the vehicle's control."""
raise NotImplementedError
def get_observation(self, sensors_data: dict) -> dict:
raise NotImplementedError
def get_info(self) -> dict:
return {}
def elapsed_time(self):
"""Returns the total elapsed time in seconds, computed from the last reset() call."""
return self.current_timestamp.elapsed_seconds - self.initial_timestamp.elapsed_seconds
def available_towns(self) -> list:
"""Returns a list with the names of the currently available maps/towns"""
return list(map(lambda s: s.split('/')[-1], self.client.get_available_maps()))
def _create_sensors(self):
for name, args in self.define_sensors().items():
if args is None:
continue
kwargs = args.copy()
sensor = Sensor.create(sensor_type=kwargs.pop('type'), parent_actor=self.vehicle, **kwargs)
if name == 'world':
raise ValueError(f'Cannot name a sensor `world` because is reserved.')
self.sensors[name] = sensor
# TODO: make wrappers be composable? (e.g. treat them as environments)
class CARLAWrapper(gym.Wrapper):
pass
class CARLAPlayWrapper(CARLAWrapper):
"""Makes an already instantiated CARLAEnvironment be playable with a keyboard"""
CONTROL = dict(type='float', shape=(5,), min_value=-1.0, max_value=1.0,
default=[0.0, 0.0, 0.0, 0.0, 0.0])
def __init__(self, env: CARLABaseEnvironment):
super().__init__(env)
print('Controls: (W, or UP) accelerate, (A or LEFT) steer left, (D or RIGHT) steer right, (S or DOWN) brake, '
'(Q) toggle reverse, (SPACE) hand-brake, (ESC) quit.')
self.env = env
self._steer_cache = 0.0
# Wrap environment's methods:
self.env.actions_to_control = lambda actions: self.actions_to_control(self.env, actions)
self.env.before_world_step = lambda: self.before_world_step(self.env)
def reset(self) -> dict:
self._steer_cache = 0.0
return self.env.reset()
def play(self):
"""Let's you control the vehicle with a keyboard."""
state = self.reset()
done = False
try:
with self.env.synchronous_context:
while not done:
actions = self.get_action(state)
state, reward, done, info = self.env.step(actions)
finally:
self.env.close()
def get_action(self, state):
return self._parse_events()
def _parse_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.env.close()
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
raise Exception('closing...')
elif event.key == K_q:
self.env.control.gear = 1 if self.env.control.reverse else -1
return self._parse_vehicle_keys()
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
def _parse_vehicle_keys(self):
keys = pygame.key.get_pressed()
steer_increment = 5e-4 * self.env.clock.get_time()
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(1.0, max(-1.0, self._steer_cache))
self.env.control.reverse = self.env.control.gear < 0
# actions
throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0
steer = round(self._steer_cache, 1)
brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0
reverse = 1.0 if self.env.control.reverse else 0.0
hand_brake = keys[K_SPACE]
return [throttle, steer, brake, reverse, hand_brake]
@staticmethod
def actions_to_control(env, actions):
env.control.throttle = actions[0]
env.control.steer = actions[1]
env.control.brake = actions[2]
env.control.reverse = bool(actions[3])
env.control.hand_brake = bool(actions[4])
@staticmethod
def before_world_step(env):
if env.should_debug:
env.route.draw_route(env.world.debug, life_time=1.0 / env.fps)
# env.route.draw_next_waypoint(env.world.debug, env.vehicle.get_location(), life_time=1.0 / env.fps)
class CARLACollectWrapper(CARLAWrapper):
"""Wraps a CARLA Environment, collecting input observations and output actions that can be later
used for pre-training or imitation learning purposes.
"""
def __init__(self, env: CARLABaseEnvironment, ignore_traffic_light: bool, traces_dir='traces', name='carla',
behaviour='normal'):
super().__init__(env)
self.env = env
self.agent = None
self.agent_behaviour = behaviour # 'normal', 'cautious', or 'aggressive'
self.ignore_traffic_light = ignore_traffic_light
# Saving & Buffers
self.save_dir = rl_utils.makedir(traces_dir, name)
print('save_dir:', self.save_dir)
self.buffer = None
self.timestep = 0
# Check for collisions
self.have_collided = False
self.should_terminate = False
def reset(self) -> dict:
self.timestep = 0
observation = self.env.reset()
self.agent = BehaviorAgent(vehicle=self.env.vehicle, behavior=self.agent_behaviour,
ignore_traffic_light=self.ignore_traffic_light)
self.agent.set_destination(start_location=self.env.vehicle.get_location(), end_location=self.env.destination,
clean=True)
return observation
def on_collision(self, actor):
self.have_collided = True
if 'pedestrian' in actor:
self.should_terminate = True
elif 'vehicle' in actor:
self.should_terminate = True
else:
self.should_terminate = False
def collect(self, episodes: int, timesteps: int, agent_debug=False, episode_reward_threshold=0.0, close=True):
self.init_buffer(num_timesteps=timesteps)
env = self.env
env.register_event(event=CARLAEvent.ON_COLLISION, callback=self.on_collision)
self.have_collided = False
self.should_terminate = False
try:
for episode in range(episodes):
state = self.reset()
episode_reward = 0.0
for t in range(timesteps):
# act
self.agent.update_information(vehicle=self.vehicle)
control = self.agent.run_step(debug=agent_debug)
action = env.control_to_actions(control)
# step
next_state, reward, done, info = env.step(action)
episode_reward += reward
if self.have_collided:
self.have_collided = False
if self.should_terminate:
episode_reward = -episode_reward_threshold
break
# record
self.store_transition(state=state, action=action, reward=reward, done=done, info=info)
state = next_state
if done or (t == timesteps - 1):
buffer = self.end_trajectory()
break
if episode_reward > episode_reward_threshold:
self.serialize(buffer, episode)
print(f'Trace-{episode} saved with reward={round(episode_reward, 2)}.')
else:
print(f'Trace-{episode} discarded because reward={round(episode_reward, 2)} below threshold!')
finally:
env.unregister_event(event=CARLAEvent.ON_COLLISION, callback=self.on_collision)
if close:
env.close()
def init_buffer(self, num_timesteps: int):
# partial buffer: misses 'state', 'action', and 'info'
self.buffer = dict(reward=np.zeros(shape=num_timesteps),
done=np.zeros(shape=num_timesteps))
obs_spec = rl_utils.space_to_spec(space=self.env.observation_space)
act_spec = rl_utils.space_to_spec(space=self.env.action_space)
info_spec = rl_utils.space_to_spec(space=self.env.info_space)
print('obs_spec\n', obs_spec)
print('action_spec\n', act_spec)
print('info_spec\n', info_spec)
# include in buffer 'state' and 'action'
self._apply_space_spec(spec=obs_spec, size=num_timesteps, name='state')
self._apply_space_spec(spec=act_spec, size=num_timesteps, name='action')
self._apply_space_spec(spec=info_spec, size=num_timesteps, name='info')
self.timestep = 0
def store_transition(self, **kwargs):
"""Collects one transition (s, a, r, d, i)"""
for name, value in kwargs.items():
self._store_item(item=value, index=self.timestep, name=name)
self.timestep += 1
def end_trajectory(self) -> dict:
"""Ends a sequence of transitions {(s, a, r, d, i)_t}"""
# Add the reward for the terminal/final state:
self.buffer['reward'] = np.concatenate([self.buffer['reward'], np.array([0.0])])
# Duplicate the buffer and cut off the exceeding part (if any)
buffer = dict()
for key, value in self.buffer.items():
buffer[key] = value[:self.timestep]
buffer['reward'] = self.buffer['reward'][:self.timestep + 1]
return buffer
def serialize(self, buffer: dict, episode: int):
"""Writes to file (npz - numpy compressed format) all the transitions collected so far"""
# Trace's file path:
filename = f'trace-{episode}-{time.strftime("%Y%m%d-%H%M%S")}.npz'
trace_path = os.path.join(self.save_dir, filename)
# Save buffer
np.savez_compressed(file=trace_path, **buffer)
print(f'Trace {filename} saved.')
def _apply_space_spec(self, spec: Union[tuple, dict], size: int, name: str):
if not isinstance(spec, dict):
shape = (size,) + spec
self.buffer[name] = np.zeros(shape=shape, dtype=np.float32)
return
# use recursion + names to handle arbitrary nested dicts and recognize them
for spec_name, sub_spec in spec.items():
self._apply_space_spec(spec=sub_spec, size=size, name=f'{name}_{spec_name}')
def _store_item(self, item, index: int, name: str):
if not isinstance(item, dict):
self.buffer[name][index] = item
return
# recursion
for key, value in item.items():
self._store_item(item=value, index=index, name=f'{name}_{key}')
class CARLARecordWrapper:
"""Wraps a CARLA Environment in order to record input observations"""
pass
# -------------------------------------------------------------------------------------------------
# -- Implemented CARLA Environments
# -------------------------------------------------------------------------------------------------
class OneCameraCARLAEnvironment(CARLABaseEnvironment):
"""One camera (front) CARLA Environment"""
# Control: throttle or brake, steer, reverse
ACTION = dict(space=spaces.Box(low=-1.0, high=1.0, shape=(3,)), default=np.zeros(shape=3, dtype=np.float32))
CONTROL = dict(space=spaces.Box(low=-1.0, high=1.0, shape=(4,)), default=np.zeros(shape=4, dtype=np.float32))
# Vehicle: speed, acceleration, angular velocity, similarity, distance to waypoint
VEHICLE_FEATURES = dict(space=spaces.Box(low=np.array([0.0, -np.inf, 0.0, -1.0, -np.inf]),
high=np.array([15.0, np.inf, np.inf, 1.0, np.inf])),
default=np.zeros(shape=5, dtype=np.float32))
# Road: intersection (bool), junction (bool), speed_limit, traffic_light (presence + state), lane type and change,
ROAD_FEATURES = dict(space=spaces.Box(low=np.zeros(shape=(9,)),
high=np.array([1.0, 1.0, 15.0, 1.0, 4.0, 2.0, 10.0, 10.0, 3.0])),
default=np.zeros(shape=9, dtype=np.float32))
# High-level routing command (aka RoadOption)
COMMAND_SPACE = spaces.Box(low=0.0, high=1.0, shape=RoadOption.VOID.shape)
INFO_SPACE = spaces.Dict(speed=spaces.Box(low=0.0, high=150.0, shape=(1,)),
speed_limit=spaces.Box(low=0.0, high=90.0, shape=(1,)),
similarity=spaces.Box(low=-1.0, high=1.0, shape=(1,)),
distance_to_next_waypoint=spaces.Box(low=0.0, high=np.inf, shape=(1,)))
def __init__(self, *args, disable_reverse=False, min_throttle=0.0, camera='segmentation',
hard_control_threshold: Union[float, None] = None, **kwargs):
super().__init__(*args, **kwargs)
self.image_space = spaces.Box(low=0.0, high=1.0, shape=self.image_shape)
self.camera_type = camera
# control hack
self.disable_reverse = disable_reverse
self.min_throttle = min_throttle
self.should_harden_controls = isinstance(hard_control_threshold, float)
self.hard_control_threshold = hard_control_threshold
# reward computation
self.collision_penalty = 0.0
self.should_terminate = False
self.similarity = 0.0
self.forward_vector = None
self.next_command = RoadOption.VOID
self.last_actions = self.ACTION['default']
self.last_location = None
self.last_travelled_distance = 0.0
self.total_travelled_distance = 0.0
# Observations
self.default_image = np.zeros(shape=self.image_shape, dtype=np.float32)
@property
def action_space(self) -> spaces.Space:
return self.ACTION['space']
@property
def observation_space(self) -> spaces.Space:
return spaces.Dict(road=self.ROAD_FEATURES['space'], vehicle=self.VEHICLE_FEATURES['space'],
past_control=self.CONTROL['space'], command=self.COMMAND_SPACE, image=self.image_space)
@property
def info_space(self) -> spaces.Space:
return self.INFO_SPACE
@property
def reward_range(self) -> tuple:
return -float('inf'), float('inf')
def reward(self, actions, time_cost=-1, d=2.0, w=3.0, s=2.0, v_max=150.0, d_max=100.0, **kwargs) -> float:
# Direction term: alignment of the vehicle's forward vector with the waypoint's forward vector
speed = min(utils.speed(self.vehicle), v_max)
if 0.75 <= self.similarity <= 1.0:
direction_penalty = speed * self.similarity
else:
direction_penalty = (speed + 1.0) * abs(self.similarity) * -d
self.trigger_event(CARLAEvent.OUT_OF_LANE, similarity=self.similarity)
# Distance from waypoint (and also lane center)
waypoint_term = min(self.route.distance_to_next_waypoint(), d_max)
waypoint_term = -waypoint_term if waypoint_term <= 5.0 else waypoint_term * -w
# Speed-limit compliance:
speed_limit = self.vehicle.get_speed_limit()
speed_penalty = s * (speed_limit - speed) if speed > speed_limit else 0.0
return time_cost - self.collision_penalty + waypoint_term + direction_penalty + speed_penalty
def step(self, actions):
state, reward, done, info = super().step(actions)
self.collision_penalty = 0.0
self.last_travelled_distance = 0.0
return state, reward, done, info
def reset(self) -> dict:
self.last_actions = self.ACTION['default']
self.should_terminate = False
self.total_travelled_distance = 0.0
self.last_travelled_distance = 0.0
self.next_command = RoadOption.VOID
# reset observations:
observation = super().reset()
self.last_location = self.vehicle.get_location()
# self.last_location = self.origin.location
return observation
def terminal_condition(self, **kwargs) -> bool:
if self.should_terminate:
return True
return self.route.distance_to_destination(self.vehicle.get_location()) <= 2.0
def define_sensors(self) -> dict:
if self.camera_type == 'rgb':
camera_sensor = SensorSpecs.rgb_camera(position='on-top2', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time)
depth_sensor = None
else:
camera_sensor = SensorSpecs.segmentation_camera(position='on-top2', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time)
depth_sensor = SensorSpecs.depth_camera(position='on-top2', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time)
return dict(collision=SensorSpecs.collision_detector(callback=self.on_collision),
imu=SensorSpecs.imu(),
camera=camera_sensor,
depth=depth_sensor)
def on_collision(self, event: carla.CollisionEvent, penalty=1000.0):
actor_type = event.other_actor.type_id
print(f'Collision with actor={actor_type})')
self.trigger_event(event=CARLAEvent.ON_COLLISION, actor=actor_type)
if 'pedestrian' in actor_type:
self.collision_penalty += penalty
self.should_terminate = True
elif 'vehicle' in actor_type:
self.collision_penalty += penalty / 2.0
self.should_terminate = True
else:
self.collision_penalty += penalty / 100.0
self.should_terminate = False
def render(self, mode='human'):
assert self.render_data is not None
image = self.render_data['camera']
env_utils.display_image(self.display, image, window_size=self.window_size)
def debug_text(self, actions):
speed_limit = self.vehicle.get_speed_limit()
speed = utils.speed(self.vehicle)
distance = self.total_travelled_distance
if speed > speed_limit:
speed_text = dict(text='Speed %.1f km/h' % speed, color=(255, 0, 0))
else:
speed_text = 'Speed %.1f km/h' % speed
if self.similarity >= 0.75:
similarity_text = 'Similarity %.2f' % self.similarity
else:
similarity_text = dict(text='Similarity %.2f' % self.similarity, color=(255, 0, 0))
return ['%d FPS' % self.clock.get_fps(),
'',
'Throttle: %.2f' % self.control.throttle,
'Steer: %.2f' % self.control.steer,
'Brake: %.2f' % self.control.brake,
'Reverse: %s' % ('T' if self.control.reverse else 'F'),
'',
speed_text,
'Speed limit %.1f km/h' % speed_limit,
'Distance travelled %.2f %s' % ((distance / 1000.0, 'km') if distance > 1000.0 else (distance, 'm')),
'',
similarity_text,
'Waypoint\'s Distance %.2f' % self.route.distance_to_next_waypoint(),
'Route Option: %s' % self.next_command.name,
'OP: %s' % self.next_command.to_one_hot(),
'',
'Reward: %.2f' % self.reward(actions),
'Collision penalty: %.2f' % self.collision_penalty]
def control_to_actions(self, control: carla.VehicleControl):
reverse = bool(control.reverse)
if control.throttle > 0.0:
return [control.throttle, control.steer, reverse]
return [-control.brake, control.steer, reverse]
def on_sensors_data(self, data: dict) -> dict:
data['camera'] = self.sensors['camera'].convert_image(data['camera'])
if 'depth' in self.sensors:
# include depth information in one image:
data['depth'] = self.sensors['depth'].convert_image(data['depth'])
data['camera'] = np.multiply(1 - data['depth'] / 255.0, data['camera'])
if self.image_shape[2] == 1:
data['camera'] = env_utils.cv2_grayscale(data['camera'])
return data
def after_world_step(self, sensors_data: dict):
super().after_world_step(sensors_data)
self._update_env_state()
def actions_to_control(self, actions):
self.control.throttle = max(self.min_throttle, float(actions[0]) if actions[0] > 0 else 0.0)
# self.control.throttle = float(actions[0]) if actions[0] > 0 else 0.0
self.control.brake = float(-actions[0]) if actions[0] < 0 else 0.0
self.control.steer = float(actions[1])
self.control.hand_brake = False
if self.should_harden_controls and (utils.speed(self.vehicle) <= self.hard_control_threshold):
self.control.throttle = float(round(self.control.throttle))
self.control.brake = float(round(self.control.brake))
if self.disable_reverse:
self.control.reverse = False
else:
self.control.reverse = bool(actions[2] > 0)
def get_observation(self, sensors_data: dict) -> dict:
if len(sensors_data.keys()) == 0:
# sensor_data is empty so, return a default observation
return dict(image=self.default_image, vehicle=self.VEHICLE_FEATURES['default'],
road=self.ROAD_FEATURES['default'], past_control=self.CONTROL['default'],
command=RoadOption.VOID.to_one_hot())
image = sensors_data['camera']
# resize image if necessary
if image.shape != self.image_shape:
image = env_utils.resize(image, size=self.image_size)
# 0-1 scaling
image /= 255.0
# observations
vehicle_obs = self._get_vehicle_features()
control_obs = self._control_as_vector()
road_obs = self._get_road_features()
obs = dict(image=image, vehicle=vehicle_obs, road=road_obs, past_control=control_obs,
command=self.next_command.to_one_hot())
return env_utils.replace_nans(obs)
def get_info(self) -> dict:
"""Returns a dict with additional information either for debugging or learning"""
return dict(speed=utils.speed(self.vehicle), speed_limit=self.vehicle.get_speed_limit(),
similarity=self.similarity, distance_to_next_waypoint=self.route.distance_to_next_waypoint())
def _control_as_vector(self) -> list:
return [self.control.throttle, self.control.brake, self.control.steer, float(self.control.reverse)]
def _get_road_features(self):
waypoint: carla.Waypoint = self.map.get_waypoint(self.vehicle.get_location())
speed_limit = self.vehicle.get_speed_limit()
is_at_traffic_light = self.vehicle.is_at_traffic_light()
if is_at_traffic_light:
traffic_light_state = self.vehicle.get_traffic_light_state()
else:
traffic_light_state = carla.TrafficLightState.Unknown
# get current lane type: consider only road (driving) lanes
if waypoint.lane_type is carla.LaneType.NONE:
lane_type = 0
elif waypoint.lane_type is carla.LaneType.Driving:
lane_type = 1
else:
lane_type = 2 # other
return np.array([waypoint.is_intersection,
waypoint.is_junction,
round(speed_limit / 10.0),
# Traffic light:
is_at_traffic_light,
WAYPOINT_DICT['traffic_light'][traffic_light_state],
# Lanes:
lane_type,
WAYPOINT_DICT['lane_marking_type'][waypoint.left_lane_marking.type],
WAYPOINT_DICT['lane_marking_type'][waypoint.right_lane_marking.type],
WAYPOINT_DICT['lane_change'][waypoint.lane_change]], dtype=np.float32)
def _get_vehicle_features(self):
imu_sensor = self.sensors['imu']
# vehicle's acceleration (also considers direction)
acceleration = env_utils.magnitude(imu_sensor.accelerometer) * env_utils.sign(self.similarity)
# vehicle's angular velocity
angular_velocity = env_utils.magnitude(imu_sensor.gyroscope)
return [utils.speed(self.vehicle) / 10.0,
acceleration,
angular_velocity,
# Target (next) waypoint's features:
self.similarity,
self.route.distance_to_next_waypoint()]
# TODO: move to base class
def _update_env_state(self):
if self.use_planner:
self._update_target_waypoint()
self._update_waypoint_similarity()
self.next_command = self.route.next.road_op
self._update_travelled_distance()
def _update_target_waypoint(self):
self.route.update_next_waypoint(location=self.vehicle.get_location())
def _update_waypoint_similarity(self):
self.forward_vector = self.vehicle.get_transform().get_forward_vector()
self.similarity = utils.cosine_similarity(self.forward_vector,
self.route.next.waypoint.transform.get_forward_vector())
# TODO: move to base class
def _update_travelled_distance(self):
location1 = self.last_location
location2 = self.vehicle.get_location()
self.last_travelled_distance = misc.compute_distance(location1, location2)
self.total_travelled_distance += abs(self.last_travelled_distance)
self.last_location = location2
class OneCameraCARLAEnvironmentDiscrete(OneCameraCARLAEnvironment):
"""One-camera CARLA Environment with discrete action-space"""
def __init__(self, bins: int, *args, **kwargs):
assert (bins >= 2) and (bins % 2 == 0)
action_space = self.ACTION['space']
assert isinstance(action_space, spaces.Box)
self.bins = bins
self._low = action_space.low
self._delta = (action_space.high - action_space.low) / bins
# change action space to "discrete"
self.ACTION = dict(space=spaces.MultiDiscrete([self.bins] * 3),
default=np.zeros(shape=3, dtype=np.float32))
super().__init__(*args, **kwargs)
def actions_to_control(self, actions):
super().actions_to_control(actions=self.to_continuous(actions))
def to_continuous(self, discrete_actions: list):
"""Maps a discrete array of bins into their corresponding continuous values"""
return self._delta * np.asarray(discrete_actions) + self._low
def control_to_actions(self, control: carla.VehicleControl):
actions = super().control_to_actions(control)
return self.to_discrete(actions)
def to_discrete(self, continuous_actions: list):
"""Maps a continuous array of values into their corresponding bins (i.e. inverse of `interpolate`)"""
return ((np.asarray(continuous_actions) - self._low) / self._delta).astype('int')
class ThreeCameraCARLAEnvironment(OneCameraCARLAEnvironment):
"""Three Camera (front, lateral left and right) CARLA Environment"""
def __init__(self, *args, image_shape=(120, 160, 1), window_size=(600, 300), **kwargs):
# Make the shape of the final image three times larger to account for the three cameras
image_shape = (image_shape[0], image_shape[1] * 3, image_shape[2])
super().__init__(*args, image_shape=image_shape, window_size=window_size, **kwargs)
self.image_size = (image_shape[1] // 3, image_shape[0])
def define_sensors(self) -> dict:
return dict(collision=SensorSpecs.collision_detector(callback=self.on_collision),
imu=SensorSpecs.imu(),
front_camera=SensorSpecs.segmentation_camera(position='on-top2', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time),
depth=SensorSpecs.depth_camera(position='on-top2', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time),
left_camera=SensorSpecs.segmentation_camera(position='lateral-left', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time),
right_camera=SensorSpecs.segmentation_camera(position='lateral-right', attachment_type='Rigid',
image_size_x=self.image_size[0],
image_size_y=self.image_size[1],
sensor_tick=self.tick_time))
def render(self, mode='human'):
assert self.render_data is not None
image = self.render_data['camera']
env_utils.display_image(self.display, image, window_size=self.window_size)
def on_sensors_data(self, data: dict) -> dict:
front_image = self.sensors['front_camera'].convert_image(data['front_camera'])
left_image = self.sensors['left_camera'].convert_image(data['left_camera'])
right_image = self.sensors['right_camera'].convert_image(data['right_camera'])
# include depth information in one image:
if 'depth' in self.sensors:
data['depth'] = self.sensors['depth'].convert_image(data['depth'])
front_image = np.multiply(1 - data['depth'] / 255.0, front_image)
# Concat images
data['camera'] = np.concatenate((left_image, front_image, right_image), axis=1)
if self.image_shape[2] == 1:
data['camera'] = env_utils.cv2_grayscale(data['camera'])
return data
class ThreeCameraCARLAEnvironmentDiscrete(ThreeCameraCARLAEnvironment):
"""Three-camera CARLA Environment with discrete action-space"""
def __init__(self, bins: int, *args, **kwargs):
assert (bins >= 2) and (bins % 2 == 0)
action_space = self.ACTION['space']
assert isinstance(action_space, spaces.Box)
self.bins = bins
self._low = action_space.low
self._delta = (action_space.high - action_space.low) / bins
# change action space to "discrete"
self.ACTION = dict(space=spaces.MultiDiscrete([self.bins] * 3),
default=np.zeros(shape=3, dtype=np.float32))
super().__init__(*args, **kwargs)
def actions_to_control(self, actions):
print(f'actions_to_control: d{actions} -> c{self.to_continuous(actions)}')
super().actions_to_control(actions=self.to_continuous(actions))
def to_continuous(self, discrete_actions: list):
"""Maps a discrete array of bins into their corresponding continuous values"""
return self._delta * np.asarray(discrete_actions) + self._low
def control_to_actions(self, control: carla.VehicleControl):
actions = super().control_to_actions(control)
print(f'control_to_actions: {control} -> c{actions} -> d{self.to_discrete(actions)}')
return self.to_discrete(actions)
def to_discrete(self, continuous_actions: list):
"""Maps a continuous array of values into their corresponding bins (i.e. inverse of `interpolate`)"""
return ((np.asarray(continuous_actions) - self._low) / self._delta).astype('int')
# -------------------------------------------------------------------------------------------------
# -- Benchmarks: CARLA + NoCrash
# -------------------------------------------------------------------------------------------------
# TODO: untested
class CARLABenchmark(CARLAWrapper):
"""CARLA benchmark, as described in the paper: "End-to-end Driving via Conditional Imitation Learning"
- https://arxiv.org/pdf/1710.02410
The agent is evaluated on:
- Town: "Town02".
- Performance are measured in two ways: (1) success rate, and (2) avg. distance without infractions.
- Six weather presets (almost like in "Controllable Imitative Reinforcement Learning" paper):
1. CloudyNoon,
2. SoftRainSunset,
3. CloudyNoon,
4. MidRainyNoon,
5. CloudySunset,
6. HardRainSunset.
- An episode terminates when an "infraction". An infraction occurs when the agent is not able to
reach the goal location within the time-budget, and/or when the agent drives in the opposite road
segment (in this case, this kind of infraction is detected by measuring "direction-similarity" with
the next correct waypoint).
- Time-budget: in this case, the time budget is represented by "average speed".
Details:
- https://github.com/carla-simulator/driving-benchmarks
"""
TRAIN_TOWN = 'Town01'
TEST_TOWN = 'Town02'
TRAIN_WEATHERS = [carla.WeatherParameters.ClearNoon,
carla.WeatherParameters.ClearSunset,
carla.WeatherParameters.SoftRainNoon,
carla.WeatherParameters.SoftRainSunset]
TEST_WEATHERS = [carla.WeatherParameters.CloudyNoon,
carla.WeatherParameters.SoftRainSunset,
carla.WeatherParameters.WetCloudyNoon,
carla.WeatherParameters.MidRainyNoon,
carla.WeatherParameters.CloudySunset,
carla.WeatherParameters.HardRainSunset]
class Tasks(enum.Enum):
"""Kind of tasks that the benchmark supports"""
EMPTY_TOWN = 0
REGULAR_TRAFFIC = 1
DENSE_TRAFFIC = 2
# Specifications of each task for training/testing evaluation:
TASKS_SPEC = {Tasks.EMPTY_TOWN: {
TRAIN_TOWN: dict(vehicles=0, pedestrians=0),
TEST_TOWN: dict(vehicles=0, pedestrians=0)},
Tasks.REGULAR_TRAFFIC: {
TRAIN_TOWN: dict(vehicles=20, pedestrians=50),
TEST_TOWN: dict(vehicles=15, pedestrians=50)},
Tasks.DENSE_TRAFFIC: {
TRAIN_TOWN: dict(vehicles=100, pedestrians=250),
TEST_TOWN: dict(vehicles=70, pedestrians=150)}}
def __init__(self, env: CARLABaseEnvironment, task: Tasks, preset='test', weather=None, avg_speed=10.0):
assert isinstance(task, CARLABenchmark.Tasks)
assert preset in ['test', 'train']
super().__init__(env)
self.env = env
self.is_out_of_lane = False
self.has_collided = False
# metrics
self.successful = []
self.route_length = None
self.time_limit = None
self.avg_speed = avg_speed
# events
# self.env.register_event(CARLAEvent.OUT_OF_LANE, callback=self.on_out_of_lane)
self.env.register_event(CARLAEvent.ON_COLLISION, callback=self.on_collision)
# prepare stuff for benchmark
if preset == 'test':
self.task_spec = self.TASKS_SPEC[task][self.TEST_TOWN]
self.env.set_town(self.TEST_TOWN)
else:
self.task_spec = self.TASKS_SPEC[task][self.TRAIN_TOWN]
self.env.set_town(self.TRAIN_TOWN)
if weather is None:
weather = self.TEST_WEATHERS
self.env.set_weather(weather)
self.env.spawn_actors(spawn_dict=self.task_spec)
def reset(self):
self.env.reset()
self.is_out_of_lane = False
self.has_collided = False
self.route_length = self.env.route.distance_to_destination(self.env.destination)
self.time_limit = self.route_length / self.avg_speed * 3.6
def on_collision(self, actor):
if 'sidewalk' in actor:
self.has_collided = False
else:
self.has_collided = True
def on_out_of_lane(self, **kwargs):
self.is_out_of_lane = True
def destination_reached(self, threshold=2.0) -> bool:
"""Tells whether or not the agent has reached the goal (destination) location"""
return self.env.route.distance_to_destination(self.env.vehicle.get_location()) <= threshold
def step(self, actions):
next_state, reward, terminal, info = self.env.step(actions)
if self.env.elapsed_time() < self.time_limit:
if terminal:
self.successful.append(True)
else:
terminal = True
self.successful.append(False)
# benchmark's termination condition
# terminal |= self.is_out_of_lane
terminal |= self.has_collided
if self.env.elapsed_time() > self.time_limit:
terminal = True
self.successful.append(False)
elif terminal:
self.successful.append(self.destination_reached())
# reset flags
self.is_out_of_lane = False
self.has_collided = False
return next_state, reward, terminal, info
def success_rate(self) -> float:
"""Returns the success rate: num. of successful episodes"""
if len(self.successful) == 0:
return 0.0
return sum(self.successful) / len(self.successful) * 100.0
def close(self):
self.env.close()
| [
"rl.environments.carla.sensors.SensorSpecs.rgb_camera",
"rl.environments.carla.tools.misc.compute_distance",
"rl.environments.carla.env_utils.random_spawn_point",
"pygame.event.get",
"rl.environments.carla.env_utils.magnitude",
"rl.environments.carla.env_utils.resize",
"time.strftime",
"carla.WorldSet... | [((32676, 32734), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': 'RoadOption.VOID.shape'}), '(low=0.0, high=1.0, shape=RoadOption.VOID.shape)\n', (32686, 32734), False, 'from gym import spaces\n'), ((2428, 2451), 'rl.environments.carla.env_utils.init_pygame', 'env_utils.init_pygame', ([], {}), '()\n', (2449, 2451), False, 'from rl.environments.carla import env_utils\n'), ((2506, 2555), 'rl.environments.carla.env_utils.get_client', 'env_utils.get_client', (['address', 'port', 'self.timeout'], {}), '(address, port, self.timeout)\n', (2526, 2555), False, 'from rl.environments.carla import env_utils\n'), ((2919, 3022), 'carla.WorldSettings', 'carla.WorldSettings', ([], {'no_rendering_mode': '(False)', 'synchronous_mode': '(False)', 'fixed_delta_seconds': '(1.0 / fps)'}), '(no_rendering_mode=False, synchronous_mode=False,\n fixed_delta_seconds=1.0 / fps)\n', (2938, 3022), False, 'import carla\n'), ((3579, 3601), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (3599, 3601), False, 'import carla\n'), ((7783, 7802), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (7800, 7802), False, 'import pygame\n'), ((8829, 8851), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (8849, 8851), False, 'import carla\n'), ((12441, 12456), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (12451, 12456), False, 'import time\n'), ((12530, 12548), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (12546, 12548), False, 'import pygame\n'), ((12806, 12824), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (12822, 12824), False, 'import pygame\n'), ((22596, 22614), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (22612, 22614), False, 'import pygame\n'), ((23195, 23219), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (23217, 23219), False, 'import pygame\n'), ((25350, 25384), 'rl.utils.makedir', 'rl_utils.makedir', (['traces_dir', 'name'], {}), '(traces_dir, name)\n', (25366, 25384), True, 'from rl import utils as rl_utils\n'), ((25702, 25824), 'rl.environments.carla.navigation.behavior_agent.BehaviorAgent', 'BehaviorAgent', ([], {'vehicle': 'self.env.vehicle', 'behavior': 'self.agent_behaviour', 'ignore_traffic_light': 'self.ignore_traffic_light'}), '(vehicle=self.env.vehicle, behavior=self.agent_behaviour,\n ignore_traffic_light=self.ignore_traffic_light)\n', (25715, 25824), False, 'from rl.environments.carla.navigation.behavior_agent import BehaviorAgent\n'), ((28548, 28604), 'rl.utils.space_to_spec', 'rl_utils.space_to_spec', ([], {'space': 'self.env.observation_space'}), '(space=self.env.observation_space)\n', (28570, 28604), True, 'from rl import utils as rl_utils\n'), ((28624, 28675), 'rl.utils.space_to_spec', 'rl_utils.space_to_spec', ([], {'space': 'self.env.action_space'}), '(space=self.env.action_space)\n', (28646, 28675), True, 'from rl import utils as rl_utils\n'), ((28696, 28745), 'rl.utils.space_to_spec', 'rl_utils.space_to_spec', ([], {'space': 'self.env.info_space'}), '(space=self.env.info_space)\n', (28718, 28745), True, 'from rl import utils as rl_utils\n'), ((30233, 30270), 'os.path.join', 'os.path.join', (['self.save_dir', 'filename'], {}), '(self.save_dir, filename)\n', (30245, 30270), False, 'import os\n'), ((30302, 30348), 'numpy.savez_compressed', 'np.savez_compressed', ([], {'file': 'trace_path'}), '(file=trace_path, **buffer)\n', (30321, 30348), True, 'import numpy as np\n'), ((33329, 33382), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': 'self.image_shape'}), '(low=0.0, high=1.0, shape=self.image_shape)\n', (33339, 33382), False, 'from gym import spaces\n'), ((34110, 34160), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.image_shape', 'dtype': 'np.float32'}), '(shape=self.image_shape, dtype=np.float32)\n', (34118, 34160), True, 'import numpy as np\n'), ((34335, 34518), 'gym.spaces.Dict', 'spaces.Dict', ([], {'road': "self.ROAD_FEATURES['space']", 'vehicle': "self.VEHICLE_FEATURES['space']", 'past_control': "self.CONTROL['space']", 'command': 'self.COMMAND_SPACE', 'image': 'self.image_space'}), "(road=self.ROAD_FEATURES['space'], vehicle=self.VEHICLE_FEATURES\n ['space'], past_control=self.CONTROL['space'], command=self.\n COMMAND_SPACE, image=self.image_space)\n", (34346, 34518), False, 'from gym import spaces\n'), ((38775, 38849), 'rl.environments.carla.env_utils.display_image', 'env_utils.display_image', (['self.display', 'image'], {'window_size': 'self.window_size'}), '(self.display, image, window_size=self.window_size)\n', (38798, 38849), False, 'from rl.environments.carla import env_utils\n'), ((38955, 38980), 'rl.environments.carla.tools.utils.speed', 'utils.speed', (['self.vehicle'], {}), '(self.vehicle)\n', (38966, 38980), False, 'from rl.environments.carla.tools import misc, utils\n'), ((43021, 43048), 'rl.environments.carla.env_utils.replace_nans', 'env_utils.replace_nans', (['obs'], {}), '(obs)\n', (43043, 43048), False, 'from rl.environments.carla import env_utils\n'), ((45222, 45263), 'rl.environments.carla.env_utils.magnitude', 'env_utils.magnitude', (['imu_sensor.gyroscope'], {}), '(imu_sensor.gyroscope)\n', (45241, 45263), False, 'from rl.environments.carla import env_utils\n'), ((46425, 46468), 'rl.environments.carla.tools.misc.compute_distance', 'misc.compute_distance', (['location1', 'location2'], {}), '(location1, location2)\n', (46446, 46468), False, 'from rl.environments.carla.tools import misc, utils\n'), ((50434, 50508), 'rl.environments.carla.env_utils.display_image', 'env_utils.display_image', (['self.display', 'image'], {'window_size': 'self.window_size'}), '(self.display, image, window_size=self.window_size)\n', (50457, 50508), False, 'from rl.environments.carla import env_utils\n'), ((51113, 51175), 'numpy.concatenate', 'np.concatenate', (['(left_image, front_image, right_image)'], {'axis': '(1)'}), '((left_image, front_image, right_image), axis=1)\n', (51127, 51175), True, 'import numpy as np\n'), ((7985, 8012), 'rl.environments.carla.env_utils.get_font', 'env_utils.get_font', ([], {'size': '(13)'}), '(size=13)\n', (8003, 8012), False, 'from rl.environments.carla import env_utils\n'), ((8040, 8074), 'rl.environments.carla.env_utils.get_display', 'env_utils.get_display', (['window_size'], {}), '(window_size)\n', (8061, 8074), False, 'from rl.environments.carla import env_utils\n'), ((9495, 9517), 'random.choice', 'random.choice', (['weather'], {}), '(weather)\n', (9508, 9517), False, 'import random\n'), ((17885, 17906), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (17904, 17906), False, 'import pygame\n'), ((18058, 18096), 'rl.environments.carla.env_utils.random_spawn_point', 'env_utils.random_spawn_point', (['self.map'], {}), '(self.map)\n', (18086, 18096), False, 'from rl.environments.carla import env_utils\n'), ((19197, 19269), 'rl.environments.carla.env_utils.random_blueprint', 'env_utils.random_blueprint', (['self.world'], {'actor_filter': 'self.vehicle_filter'}), '(self.world, actor_filter=self.vehicle_filter)\n', (19223, 19269), False, 'from rl.environments.carla import env_utils\n'), ((19312, 19369), 'rl.environments.carla.env_utils.spawn_actor', 'env_utils.spawn_actor', (['self.world', 'blueprint', 'self.origin'], {}), '(self.world, blueprint, self.origin)\n', (19333, 19369), False, 'from rl.environments.carla import env_utils\n'), ((19445, 19501), 'rl.environments.carla.tools.synchronous_mode.CARLASyncContext', 'CARLASyncContext', (['self.world', 'self.sensors'], {'fps': 'self.fps'}), '(self.world, self.sensors, fps=self.fps)\n', (19461, 19501), False, 'from rl.environments.carla.tools.synchronous_mode import CARLASyncContext\n'), ((30579, 30618), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape', 'dtype': 'np.float32'}), '(shape=shape, dtype=np.float32)\n', (30587, 30618), True, 'import numpy as np\n'), ((31682, 31724), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(3,)'}), '(low=-1.0, high=1.0, shape=(3,))\n', (31692, 31724), False, 'from gym import spaces\n'), ((31734, 31769), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3)', 'dtype': 'np.float32'}), '(shape=3, dtype=np.float32)\n', (31742, 31769), True, 'import numpy as np\n'), ((31796, 31838), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(4,)'}), '(low=-1.0, high=1.0, shape=(4,))\n', (31806, 31838), False, 'from gym import spaces\n'), ((31848, 31883), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4)', 'dtype': 'np.float32'}), '(shape=4, dtype=np.float32)\n', (31856, 31883), True, 'import numpy as np\n'), ((32202, 32237), 'numpy.zeros', 'np.zeros', ([], {'shape': '(5)', 'dtype': 'np.float32'}), '(shape=5, dtype=np.float32)\n', (32210, 32237), True, 'import numpy as np\n'), ((32568, 32603), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9)', 'dtype': 'np.float32'}), '(shape=9, dtype=np.float32)\n', (32576, 32603), True, 'import numpy as np\n'), ((32771, 32814), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(150.0)', 'shape': '(1,)'}), '(low=0.0, high=150.0, shape=(1,))\n', (32781, 32814), False, 'from gym import spaces\n'), ((32857, 32899), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(90.0)', 'shape': '(1,)'}), '(low=0.0, high=90.0, shape=(1,))\n', (32867, 32899), False, 'from gym import spaces\n'), ((32941, 32983), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(1,)'}), '(low=-1.0, high=1.0, shape=(1,))\n', (32951, 32983), False, 'from gym import spaces\n'), ((33040, 33084), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(1,)'}), '(low=0.0, high=np.inf, shape=(1,))\n', (33050, 33084), False, 'from gym import spaces\n'), ((34954, 34979), 'rl.environments.carla.tools.utils.speed', 'utils.speed', (['self.vehicle'], {}), '(self.vehicle)\n', (34965, 34979), False, 'from rl.environments.carla.tools import misc, utils\n'), ((36720, 36889), 'rl.environments.carla.sensors.SensorSpecs.rgb_camera', 'SensorSpecs.rgb_camera', ([], {'position': '"""on-top2"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='on-top2', attachment_type='Rigid',\n image_size_x=self.image_size[0], image_size_y=self.image_size[1],\n sensor_tick=self.tick_time)\n", (36742, 36889), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((37109, 37287), 'rl.environments.carla.sensors.SensorSpecs.segmentation_camera', 'SensorSpecs.segmentation_camera', ([], {'position': '"""on-top2"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='on-top2', attachment_type='Rigid',\n image_size_x=self.image_size[0], image_size_y=self.image_size[1],\n sensor_tick=self.tick_time)\n", (37140, 37287), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((37488, 37659), 'rl.environments.carla.sensors.SensorSpecs.depth_camera', 'SensorSpecs.depth_camera', ([], {'position': '"""on-top2"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='on-top2', attachment_type='Rigid',\n image_size_x=self.image_size[0], image_size_y=self.image_size[1],\n sensor_tick=self.tick_time)\n", (37512, 37659), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((40939, 40993), 'numpy.multiply', 'np.multiply', (["(1 - data['depth'] / 255.0)", "data['camera']"], {}), "(1 - data['depth'] / 255.0, data['camera'])\n", (40950, 40993), True, 'import numpy as np\n'), ((41061, 41100), 'rl.environments.carla.env_utils.cv2_grayscale', 'env_utils.cv2_grayscale', (["data['camera']"], {}), "(data['camera'])\n", (41084, 41100), False, 'from rl.environments.carla import env_utils\n'), ((42591, 42636), 'rl.environments.carla.env_utils.resize', 'env_utils.resize', (['image'], {'size': 'self.image_size'}), '(image, size=self.image_size)\n', (42607, 42636), False, 'from rl.environments.carla import env_utils\n'), ((45077, 45122), 'rl.environments.carla.env_utils.magnitude', 'env_utils.magnitude', (['imu_sensor.accelerometer'], {}), '(imu_sensor.accelerometer)\n', (45096, 45122), False, 'from rl.environments.carla import env_utils\n'), ((45125, 45156), 'rl.environments.carla.env_utils.sign', 'env_utils.sign', (['self.similarity'], {}), '(self.similarity)\n', (45139, 45156), False, 'from rl.environments.carla import env_utils\n'), ((51011, 51062), 'numpy.multiply', 'np.multiply', (["(1 - data['depth'] / 255.0)", 'front_image'], {}), "(1 - data['depth'] / 255.0, front_image)\n", (51022, 51062), True, 'import numpy as np\n'), ((51243, 51282), 'rl.environments.carla.env_utils.cv2_grayscale', 'env_utils.cv2_grayscale', (["data['camera']"], {}), "(data['camera'])\n", (51266, 51282), False, 'from rl.environments.carla import env_utils\n'), ((11979, 12008), 'carla.command.DestroyActor', 'carla.command.DestroyActor', (['x'], {}), '(x)\n', (12005, 12008), False, 'import carla\n'), ((12374, 12403), 'carla.command.DestroyActor', 'carla.command.DestroyActor', (['x'], {}), '(x)\n', (12400, 12403), False, 'import carla\n'), ((18167, 18194), 'random.choice', 'random.choice', (['self.origins'], {}), '(self.origins)\n', (18180, 18194), False, 'import random\n'), ((18495, 18570), 'rl.environments.carla.env_utils.random_spawn_point', 'env_utils.random_spawn_point', (['self.map'], {'different_from': 'self.origin.location'}), '(self.map, different_from=self.origin.location)\n', (18523, 18570), False, 'from rl.environments.carla import env_utils\n'), ((18660, 18692), 'random.choice', 'random.choice', (['self.destinations'], {}), '(self.destinations)\n', (18673, 18692), False, 'import random\n'), ((19555, 19577), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (19575, 19577), False, 'import carla\n'), ((19617, 19652), 'carla.Vector3D', 'carla.Vector3D', ([], {'x': '(0.0)', 'y': '(0.0)', 'z': '(0.0)'}), '(x=0.0, y=0.0, z=0.0)\n', (19631, 19652), False, 'import carla\n'), ((28434, 28463), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_timesteps'}), '(shape=num_timesteps)\n', (28442, 28463), True, 'import numpy as np\n'), ((28497, 28526), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_timesteps'}), '(shape=num_timesteps)\n', (28505, 28526), True, 'import numpy as np\n'), ((29654, 29669), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (29662, 29669), True, 'import numpy as np\n'), ((30175, 30205), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (30188, 30205), False, 'import time\n'), ((37839, 37897), 'rl.environments.carla.sensors.SensorSpecs.collision_detector', 'SensorSpecs.collision_detector', ([], {'callback': 'self.on_collision'}), '(callback=self.on_collision)\n', (37869, 37897), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((37923, 37940), 'rl.environments.carla.sensors.SensorSpecs.imu', 'SensorSpecs.imu', ([], {}), '()\n', (37938, 37940), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((41686, 41711), 'rl.environments.carla.tools.utils.speed', 'utils.speed', (['self.vehicle'], {}), '(self.vehicle)\n', (41697, 41711), False, 'from rl.environments.carla.tools import misc, utils\n'), ((43198, 43223), 'rl.environments.carla.tools.utils.speed', 'utils.speed', (['self.vehicle'], {}), '(self.vehicle)\n', (43209, 43223), False, 'from rl.environments.carla.tools import misc, utils\n'), ((45281, 45306), 'rl.environments.carla.tools.utils.speed', 'utils.speed', (['self.vehicle'], {}), '(self.vehicle)\n', (45292, 45306), False, 'from rl.environments.carla.tools import misc, utils\n'), ((47125, 47162), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['([self.bins] * 3)'], {}), '([self.bins] * 3)\n', (47145, 47162), False, 'from gym import spaces\n'), ((47199, 47234), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3)', 'dtype': 'np.float32'}), '(shape=3, dtype=np.float32)\n', (47207, 47234), True, 'import numpy as np\n'), ((47565, 47593), 'numpy.asarray', 'np.asarray', (['discrete_actions'], {}), '(discrete_actions)\n', (47575, 47593), True, 'import numpy as np\n'), ((48648, 48706), 'rl.environments.carla.sensors.SensorSpecs.collision_detector', 'SensorSpecs.collision_detector', ([], {'callback': 'self.on_collision'}), '(callback=self.on_collision)\n', (48678, 48706), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((48732, 48749), 'rl.environments.carla.sensors.SensorSpecs.imu', 'SensorSpecs.imu', ([], {}), '()\n', (48747, 48749), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((48784, 48962), 'rl.environments.carla.sensors.SensorSpecs.segmentation_camera', 'SensorSpecs.segmentation_camera', ([], {'position': '"""on-top2"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='on-top2', attachment_type='Rigid',\n image_size_x=self.image_size[0], image_size_y=self.image_size[1],\n sensor_tick=self.tick_time)\n", (48815, 48962), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((49177, 49348), 'rl.environments.carla.sensors.SensorSpecs.depth_camera', 'SensorSpecs.depth_camera', ([], {'position': '"""on-top2"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='on-top2', attachment_type='Rigid',\n image_size_x=self.image_size[0], image_size_y=self.image_size[1],\n sensor_tick=self.tick_time)\n", (49201, 49348), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((49527, 49712), 'rl.environments.carla.sensors.SensorSpecs.segmentation_camera', 'SensorSpecs.segmentation_camera', ([], {'position': '"""lateral-left"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='lateral-left', attachment_type=\n 'Rigid', image_size_x=self.image_size[0], image_size_y=self.image_size[\n 1], sensor_tick=self.tick_time)\n", (49558, 49712), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((49929, 50115), 'rl.environments.carla.sensors.SensorSpecs.segmentation_camera', 'SensorSpecs.segmentation_camera', ([], {'position': '"""lateral-right"""', 'attachment_type': '"""Rigid"""', 'image_size_x': 'self.image_size[0]', 'image_size_y': 'self.image_size[1]', 'sensor_tick': 'self.tick_time'}), "(position='lateral-right', attachment_type=\n 'Rigid', image_size_x=self.image_size[0], image_size_y=self.image_size[\n 1], sensor_tick=self.tick_time)\n", (49960, 50115), False, 'from rl.environments.carla.sensors import Sensor, SensorSpecs\n'), ((51852, 51889), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['([self.bins] * 3)'], {}), '([self.bins] * 3)\n', (51872, 51889), False, 'from gym import spaces\n'), ((51926, 51961), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3)', 'dtype': 'np.float32'}), '(shape=3, dtype=np.float32)\n', (51934, 51961), True, 'import numpy as np\n'), ((52375, 52403), 'numpy.asarray', 'np.asarray', (['discrete_actions'], {}), '(discrete_actions)\n', (52385, 52403), True, 'import numpy as np\n'), ((7374, 7446), 'rl.environments.carla.navigation.RoutePlanner', 'RoutePlanner', ([], {'map': 'self.map', 'sampling_resolution': 'self.sampling_resolution'}), '(map=self.map, sampling_resolution=self.sampling_resolution)\n', (7386, 7446), False, 'from rl.environments.carla.navigation import Route, RoutePlanner, RoadOption\n'), ((10192, 10211), 'time.sleep', 'time.sleep', (['timeout'], {}), '(timeout)\n', (10202, 10211), False, 'import time\n'), ((23109, 23130), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (23128, 23130), False, 'import pygame\n'), ((32022, 32066), 'numpy.array', 'np.array', (['[0.0, -np.inf, 0.0, -1.0, -np.inf]'], {}), '([0.0, -np.inf, 0.0, -1.0, -np.inf])\n', (32030, 32066), True, 'import numpy as np\n'), ((32118, 32163), 'numpy.array', 'np.array', (['[15.0, np.inf, np.inf, 1.0, np.inf]'], {}), '([15.0, np.inf, np.inf, 1.0, np.inf])\n', (32126, 32163), True, 'import numpy as np\n'), ((32405, 32425), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)'}), '(shape=(9,))\n', (32413, 32425), True, 'import numpy as np\n'), ((32474, 32532), 'numpy.array', 'np.array', (['[1.0, 1.0, 15.0, 1.0, 4.0, 2.0, 10.0, 10.0, 3.0]'], {}), '([1.0, 1.0, 15.0, 1.0, 4.0, 2.0, 10.0, 10.0, 3.0])\n', (32482, 32532), True, 'import numpy as np\n'), ((42420, 42448), 'rl.environments.carla.navigation.RoadOption.VOID.to_one_hot', 'RoadOption.VOID.to_one_hot', ([], {}), '()\n', (42446, 42448), False, 'from rl.environments.carla.navigation import Route, RoutePlanner, RoadOption\n'), ((47948, 47978), 'numpy.asarray', 'np.asarray', (['continuous_actions'], {}), '(continuous_actions)\n', (47958, 47978), True, 'import numpy as np\n'), ((52852, 52882), 'numpy.asarray', 'np.asarray', (['continuous_actions'], {}), '(continuous_actions)\n', (52862, 52882), True, 'import numpy as np\n')] |
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from salina_cl.core import Model
from salina import instantiate_class
from salina_cl.agents.tools import weight_init
import numpy as np
class Baseline(Model):
"""
Learn one policy per task and store it in a dict with the task id as a key.
"""
def __init__(self,seed,params):
super().__init__(seed,params)
self.algorithm = instantiate_class(self.cfg.algorithm)
self.policy_agent = None
self.critic_agent = None
self.policy_agents = {}
def _create_agent(self,task,logger):
logger.message("Creating Policy and Critic Agents")
input_dimension = task.input_dimension()
output_dimension = task.output_dimension()
policy_agent_cfg = self.cfg.policy_agent
policy_agent_cfg.input_dimension = input_dimension
policy_agent_cfg.output_dimension = output_dimension
self.policy_agent = instantiate_class(policy_agent_cfg)
critic_agent_cfg = self.cfg.critic_agent
critic_agent_cfg.input_dimension = input_dimension
self.critic_agent = instantiate_class(critic_agent_cfg)
def _train(self,task,logger):
self._create_agent(task,logger)
self.critic_agent.apply(weight_init)
env_agent = task.make()
r,policy_agent,self.critic_agent = self.algorithm.run(self.policy_agent, self.critic_agent, env_agent, logger, self.seed,n_max_interactions=task.n_interactions())
self.policy_agents[task.task_id()] = policy_agent
return r
def get_evaluation_agent(self,task_id):
if task_id in self.policy_agents:
return self.policy_agents[task_id]
else:
return None
def memory_size(self):
pytorch_total_params = [sum(p.numel() for p in v.parameters()) for _,v in self.policy_agents.items()]
return {"n_parameters":np.sum(pytorch_total_params)}
class OneStep(Model):
"""
A model that is using 1 algorithm and calling set_task() on the policy_agent after each task.
"""
def __init__(self,seed,params):
super().__init__(seed,params)
self.algorithm = instantiate_class(self.cfg.algorithm)
self.policy_agent = None
self.critic_agent = None
self.batch_norm_agents = {}
def _create_agent(self,task,logger):
logger.message("Creating Policy and Critic Agents")
assert self.policy_agent is None
input_dimension = task.input_dimension()
output_dimension = task.output_dimension()
policy_agent_cfg = self.cfg.policy_agent
policy_agent_cfg.input_dimension = input_dimension
policy_agent_cfg.output_dimension = output_dimension
self.policy_agent = instantiate_class(policy_agent_cfg)
critic_agent_cfg = self.cfg.critic_agent
critic_agent_cfg.input_dimension = input_dimension
self.critic_agent = instantiate_class(critic_agent_cfg)
def _train(self,task,logger):
if self.policy_agent is None:
self._create_agent(task,logger)
else:
self.policy_agent.set_task()
self.critic_agent.apply(weight_init)
env_agent = task.make()
r,self.policy_agent,self.critic_agent = self.algorithm.run(self.policy_agent, self.critic_agent, env_agent,logger, self.seed, n_max_interactions=task.n_interactions())
return r
def memory_size(self):
pytorch_total_params = sum(p.numel() for p in self.policy_agent.parameters())
return {"n_parameters":pytorch_total_params}
def get_evaluation_agent(self,task_id):
self.policy_agent.set_task(task_id)
return self.policy_agent
class TwoSteps(Model):
"""
A model that is using 2 algorithms and managing the budget consequently.
"""
def __init__(self,seed,params):
super().__init__(seed,params)
self.algorithm1 = instantiate_class(self.cfg.algorithm1)
self.algorithm2 = instantiate_class(self.cfg.algorithm2)
self.policy_agent=None
self.critic_agent=None
def _create_policy_agent(self,task,logger):
logger.message("Creating policy Agent")
assert self.policy_agent is None
input_dimension = task.input_dimension()
output_dimension = task.output_dimension()
policy_agent_cfg = self.cfg.policy_agent
policy_agent_cfg.input_dimension = input_dimension
policy_agent_cfg.output_dimension = output_dimension
self.policy_agent = instantiate_class(policy_agent_cfg)
def _create_critic_agent(self,task,logger):
logger.message("Creating Critic Agent")
input_dimension = task.input_dimension()
critic_agent_cfg = self.cfg.critic_agent
critic_agent_cfg.input_dimension = input_dimension
critic_agent_cfg.n_anchors = self.policy_agent[0].n_anchors
self.critic_agent = instantiate_class(critic_agent_cfg)
def _train(self,task,logger):
if self.policy_agent is None:
self._create_policy_agent(task,logger)
else:
logger.message("Setting new task")
self.policy_agent.set_task()
self._create_critic_agent(task,logger)
env_agent = task.make()
budget1 = task.n_interactions() * self.cfg.algorithm1.budget
r1,self.policy_agent,self.critic_agent = self.algorithm1.run(self.policy_agent, self.critic_agent, env_agent,logger, self.seed, n_max_interactions = budget1)
budget2 = task.n_interactions() - r1["n_interaction"]
r2,self.policy_agent,self.critic_agent = self.algorithm2.run(self.policy_agent, self.critic_agent, env_agent,logger, self.seed, n_max_interactions = budget2)
return {k1:v1+v2 for k1,v1,k2,v2 in zip(r1.items(),r2.items)}
def memory_size(self):
pytorch_total_params = sum(p.numel() for p in self.policy_agent.parameters())
return {"n_parameters":pytorch_total_params}
def get_evaluation_agent(self,task_id):
self.policy_agent.set_task(task_id)
return self.policy_agent | [
"salina.instantiate_class",
"numpy.sum"
] | [((536, 573), 'salina.instantiate_class', 'instantiate_class', (['self.cfg.algorithm'], {}), '(self.cfg.algorithm)\n', (553, 573), False, 'from salina import instantiate_class\n'), ((1071, 1106), 'salina.instantiate_class', 'instantiate_class', (['policy_agent_cfg'], {}), '(policy_agent_cfg)\n', (1088, 1106), False, 'from salina import instantiate_class\n'), ((1244, 1279), 'salina.instantiate_class', 'instantiate_class', (['critic_agent_cfg'], {}), '(critic_agent_cfg)\n', (1261, 1279), False, 'from salina import instantiate_class\n'), ((2301, 2338), 'salina.instantiate_class', 'instantiate_class', (['self.cfg.algorithm'], {}), '(self.cfg.algorithm)\n', (2318, 2338), False, 'from salina import instantiate_class\n'), ((2881, 2916), 'salina.instantiate_class', 'instantiate_class', (['policy_agent_cfg'], {}), '(policy_agent_cfg)\n', (2898, 2916), False, 'from salina import instantiate_class\n'), ((3054, 3089), 'salina.instantiate_class', 'instantiate_class', (['critic_agent_cfg'], {}), '(critic_agent_cfg)\n', (3071, 3089), False, 'from salina import instantiate_class\n'), ((4039, 4077), 'salina.instantiate_class', 'instantiate_class', (['self.cfg.algorithm1'], {}), '(self.cfg.algorithm1)\n', (4056, 4077), False, 'from salina import instantiate_class\n'), ((4104, 4142), 'salina.instantiate_class', 'instantiate_class', (['self.cfg.algorithm2'], {}), '(self.cfg.algorithm2)\n', (4121, 4142), False, 'from salina import instantiate_class\n'), ((4640, 4675), 'salina.instantiate_class', 'instantiate_class', (['policy_agent_cfg'], {}), '(policy_agent_cfg)\n', (4657, 4675), False, 'from salina import instantiate_class\n'), ((5026, 5061), 'salina.instantiate_class', 'instantiate_class', (['critic_agent_cfg'], {}), '(critic_agent_cfg)\n', (5043, 5061), False, 'from salina import instantiate_class\n'), ((2035, 2063), 'numpy.sum', 'np.sum', (['pytorch_total_params'], {}), '(pytorch_total_params)\n', (2041, 2063), True, 'import numpy as np\n')] |
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import sys
import typing
class Node ( object ) :
def __init__ ( self ) :
self.id_ :str = ""
self.label_ :str = ""
self.description_ :str = ""
self.level_ :int = 0 # NODES ARE MYOPIC
self.metrics_ :list = list()
self.links_ :list = list()
self.ascendants_ :list = list() # INWARD LINKS , DIRECT ASCENDENCY ( 1 LEVEL )
self.descendants_ :list = list() # OUTWARD LINKS , DIRECT DESCENDENCY ( 1 LEVEL )
self.data_ :dict = dict() # OTHER THINGS SHOULD BE ALL INFORMATION FLOATING IN USERSPACE
def is_a_root ( self, n:int=1 ) -> bool :
return ( len( self.ascendants_ ) < n )
def is_a_leaf( self, n:int=1 ) -> bool :
return ( len( self.descendants_ ) < n )
def degree ( self , degree_type:str='descendants' )->int :
#
# UNDIRECTED NODE DEGREE
if degree_type == 'descendants':
return ( len( self.descendants_ ) )
if degree_type == 'ascendants':
return ( len( self.ascendants_ ) )
if degree_type == 'links':
return ( len( self.links_ ) )
return (-1) # UNDEFINED OPTION
def supplement ( self, n:super ) -> None :
self.label_ = n.label_
self.description_ = n.description_
self.level_ = n.level_
self.metrics_ = [ *self.metrics_ , *n.metrics_ ]
self.links_ = [ *self.links_ , *n.links_ ]
self.ascendants_ = [ *self.ascendants_ , *n.ascendants_ ]
self.descendants_ = [ *self.descendants_ , *n.descendants_ ]
self.data_ = { **self.data_, **n.data_ }
def assign_all ( self, identification : str ,
links : type(list(str())) ,
label : str = "" ,
description : str = "" ) -> object :
# ASSIGNS ALL META DATA AND BIPOLAR LINKS
self.set_id( identification )
self.add_label( label )
self.add_description( description )
self.add_links( links , bClear=True )
return ( self )
def set_level ( self,level:int ) -> None :
self.level_ = level
def set_metrics ( self , metrics:list ) -> None :
self.metrics_ = [ *self.metrics_ , *metrics ]
def get_metrics ( self ) -> list :
return ( self.metrics_ )
def level ( self ) -> None :
return ( self.level_ )
def get_data ( self ) -> dict :
return ( self.data_ )
def overwrite_data ( self, data:dict ) -> None :
self.data_ = data
def set_id ( self, identification:str ) -> None :
self.id_ = identification
def add_label ( self, label : str ) -> None :
self.label_ = label
def add_description ( self, description : str ) -> None :
self.description_ = description
def identification ( self ) -> str :
return ( self.id_ )
def label ( self ) -> str :
return ( self.label_ )
def description ( self ) -> str :
return ( self.description_ )
def clear_links ( self , linktype:str )->None :
if linktype == 'links' :
self.links_ = list()
if linktype == 'ascendants' :
self.ascendants_= list()
if linktype == 'descendants' :
self.descendants_ = list()
def add_link ( self, identification:str , bClear:bool = False , linktype:str = 'links' ) -> None :
if bClear :
self.clear_links( linktype )
self.get_links( linktype ).append( identification )
def add_links ( self, links:list[str], bClear:bool = False , linktype:str = 'links' ) -> None :
if bClear :
self.clear_links( linktype )
for e in links :
self.get_links( linktype ).append ( e )
def get_links ( self , linktype:str='links' ) -> type(list(str())) :
if not linktype in set([ 'links' , 'ascendants' , 'descendants' ]):
print ( ' \n\n!!FATAL!!\t' + ', '.join([ 'links' , 'ascendants' , 'descendants' ]) \
+ '\t ARE THE ONLY VALID EDGE TYPES (linktype)' )
exit ( 1 )
if linktype == 'links' :
return ( self.links_ )
if linktype == 'ascendants' :
return ( self.ascendants_ )
if linktype == 'descendants' :
return ( self.descendants_ )
def show ( self ) -> None :
s_inf = "NODE [" + str(self.identification()) \
+ "," + self.label() + "] - " \
+ self.description() + "\nEDGES:"
for linktype in [ 'links' , 'ascendants' , 'descendants' ] :
s_inf += '\n['+linktype+'] : '
for l in self.get_links(linktype=linktype) :
s_inf += str(l) + '\t'
for item in self.get_data().items() :
s_inf += '\n'+str(item[0])+'\t'+str(item[1])
print ( s_inf )
class ExtendedNode ( Node ) :
def __init__ ( self ) :
#
# DEV
# A BIOLOGICAL PHYSICS NEURON
# AS NODE BUT ALSO
#
self.region_ :str = ""
self.strength_ :float = 0
self.reactivity_ :float = 0
def activation_(self,stimulus:float) -> None :
return ( None )
def pot_(self,stimulus:float) -> None :
# POTENTIATE
return ( None )
def dep_(self,stimulus:float) -> None :
# DEPRESS
return ( None )
class NodeGraph ( Node ) :
# https://github.com/richardtjornhammar/RichTools/commit/c4b9daa78f2a311995d142b0e74fba7c3fdbed20#diff-0b990604c2ec9ebd6f320ebe92099d46e0ab8e854c6e787fac2f208409d112d3
def __init__( self ) :
self.root_id_ = ''
self.desc_ = "SUPPORTS DAGS :: NO STRUCTURE ASSERTION"
self.num_edges_ = 0
self.num_vertices_ = 0
self.graph_map_ = dict()
def keys ( self ) -> list :
return( self.graph_map_.keys() )
def values ( self ) -> list :
return( self.graph_map_.values() )
def items ( self ) -> list :
return( self.graph_map_.items() )
def list_roots ( self ) -> type(list(str())) :
roots = [] # BLOODY ROOTS
for name,node in self.items():
if node.is_a_root() :
roots.append( name )
return ( roots )
def get_node ( self, nid : str ) -> Node :
return ( self.graph_map_[nid] )
def set_root_id ( self, identification : str ) -> None :
self.root_id_ = identification
def get_root_id ( self ) -> str :
return ( self.root_id_ )
def add ( self, n : Node ) -> None :
if n.identification() in self.graph_map_ :
self.graph_map_[ n.identification() ].supplement( n )
else :
self.graph_map_[ n.identification() ] = n
if len ( self.graph_map_ ) == 1 :
self.set_root_id( n.identification() )
def get_dag ( self ) -> dict :
return ( self.graph_map_ )
def get_graph ( self ) -> dict :
return ( self.graph_map_ )
def show ( self ) -> None :
print ( self.desc_ )
for item in self.get_dag().items() :
print ( '\n' + item[0] + '::' )
item[1].show()
def unpack ( self, seq ) :
if isinstance ( seq,(list,tuple,set)) :
yield from ( x for y in seq for x in self.unpack(y) )
elif isinstance ( seq , dict ):
yield from ( x for item in seq.items() for y in item for x in self.unpack(y) )
else :
yield seq
def ltup2lstr ( self, seq:tuple ) -> tuple :
if isinstance ( seq,(tuple) ) :
yield from ( str(x) for y in seq for x in self.unpack(y) )
def assign_from_linkages_tiers( self , linkages:dict ) -> None :
results = sorted( [(v,k) for k,v in linkages.items()] )
self.assign_from_tuple_tiers ( results[-1][1] )
root_id_ = '.'.join(self.ltup2lstr(results[-1][1]))
self.set_root_id ( root_id_ )
graph_ = self.get_graph()
for item in results :
name_ = '.'.join( self.ltup2lstr( item[1] ) )
d_ = item[0]
node_ = graph_[name_]
level_= self.calculate_node_level( node_, stop_at = root_id_ )
node_.set_level(level_)
node_.set_metrics([d_])
node_.get_data()['distance']=d_
def assign_from_tuple_tiers( self , nid:tuple , ascendant:str=None ) -> None :
reformat_id = lambda id : '.'.join(list(self.ltup2lstr(id)))
if isinstance ( nid,(tuple) ) :
n = Node()
cid = reformat_id(nid)
n .set_id( cid )
links = [ reformat_id(item) for item in nid ]
n.add_links ( links , linktype = 'descendants' )
if not ascendant is None :
n.add_link(ascendant,linktype='ascendants')
links = [*links,*[ascendant]]
n.add_links ( links , linktype = 'links' )
self.add( n )
for item in nid :
self.assign_from_tuple_tiers ( item , cid )
def complete_lineage ( self , identification : str ,
order:str = 'depth' ,
linktype:str = 'ascendants' ) -> dict :
# 'ascendants' , 'descendants'
root_id = identification
results = self.search( order=order , root_id=identification , linktype=linktype )
results['path'] = [ idx for idx in results['path'] if not idx==identification ]
return ( results )
def retrieve_leaves ( self , identification : str ,
order:str = 'depth' ,
linktype:str = 'descendants' ) -> dict :
root_id = identification
results = self.search ( order=order , root_id=identification ,
linktype=linktype, bOnlyLeafNodes=True )
results['path'] = [ idx for idx in results['path'] if not idx==identification ]
return ( results )
def search ( self , order:str = 'breadth', root_id:str = None ,
linktype:str = 'links', stop_at:str = None ) -> dict :
#
path:list = list()
visited:set = set()
if root_id is None :
root_id = self.get_root_id()
S:list = [ root_id ]
if not order in set(['breadth','depth']) :
print ( 'order MUST BE EITHER breadth XOR depth' )
exit ( 1 )
if order == 'breadth' :
while ( len(S)>0 ) :
v = S[0] ; S = S[1:]
ncurrent:Node = self.get_node(v)
visited = visited|set([v])
path.append( ncurrent.identification() )
#
# ADDED STOP CRITERION FOR WHEN THE STOP NODE IS FOUND
if not stop_at is None :
if stop_at == v :
S = []
break
links = ncurrent.get_links(linktype)
for w in links :
if not w in visited and len(w)>0:
S.append( w ) # QUE
if order == 'depth' :
while ( len(S)>0 ) :
v = S[0] ; S = S[1:]
if not v in visited and len(v)>0 :
visited = visited|set([v])
ncurrent:Node = self.get_node(v)
links = ncurrent.get_links(linktype)
for w in links :
if not w in visited and len(w)>0:
S = [*[w],*S] # STACK
path.append( ncurrent.identification() )
#
# ADDED STOP CRITERION FOR WHEN THE STOP NODE IS FOUND
if not stop_at is None :
if stop_at == v :
S = []
break
return ( { 'path':path , 'order':order , 'linktype':linktype } )
def locate_value ( self, criterion:tuple , root_id:str=None , bOnlyFirst:bool=True , bHelp:bool=False, R:list=None) -> list :
if bHelp :
print ( "HELP: GRAPH.locate_value( criterion = ( 'distance', lambda x:x==0 ) , root_id = '1.3.2.4.0' , bOnlyFirst=True )" )
exit ( 1 )
if R is None :
R = list()
id = root_id
if root_id is None :
id = self.get_root_id()
if len(id) > 0 :
bCheck = criterion[1]( self.get_graph()[id].get_data()[criterion[0]] )
if bCheck :
if bOnlyFirst :
return ( id )
else :
R.append( id )
if not bCheck or not bOnlyFirst :
for child in self.get_graph()[id].get_links('descendants') :
result = self.locate_value( criterion,child,bOnlyFirst,bHelp,R )
if not result is None :
if bOnlyFirst :
return ( result )
return ( R )
def connectivity ( self, distm:np.array , alpha:float , n_connections:int=1 , bOld:bool=True ) -> list :
#
# AN ALTERNATIVE METHOD
# DOES THE SAME THING AS THE CONNECTIVITY CODE IN MY
# CLUSTERING MODULE (in src/impetuous/clustering.py )
# OR IN https://github.com/richardtjornhammar/RichTools/blob/master/src/cluster.cc
# ADDED TO RICHTOOLS HERE: https://github.com/richardtjornhammar/RichTools/commit/74b35df9c623bf03570707a24eafe828f461ed90#diff-25a6634263c1b1f6fc4697a04e2b9904ea4b042a89af59dc93ec1f5d44848a26
# CONNECTIVITY SEARCH FOR (connectivity) CONNECTIVITY
#
# THIS ROUTINE RETURNS A LIST BELONGING TO THE CLUSTERS
# WITH THE SET OF INDICES THAT MAPS TO THE CLUSTER
# THIS METHOD IS NOW ALSO IN THE clustering.py MODULE
# AND IS CALLED connectedness
# THIS CLASS WILL EMPLOY THE JIT connectivity IMPLEMENTATION
# IN THE FUTURE BECAUSE IT IS SUPERIOR
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
exit(1)
#
if bOld : # WATER CLUSTERING ALGO FROM 2009
from graphtastic.clustering import connectivity as connections
results = connections ( distm , alpha )
L = [set() for i in range(len(results[0]))]
for c in results[1] :
L[c[0]] = L[c[0]]|set([c[1]])
return ( L )
#
def b2i ( a:list ) -> list :
return ( [ i for b,i in zip(a,range(len(a))) if b ] )
def f2i ( a:list,alf:float ) -> list :
return ( b2i( a<=alf ) )
L = []
for a in distm :
bAdd = True
ids = set( f2i(a,alpha) )
for i in range(len(L)) :
if len( L[i]&ids ) >= n_connections :
L[i] = L[i] | ids
bAdd = False
break
if bAdd and len(ids) >= n_connections :
L .append( ids )
return ( L )
def linkages_to_pclist ( self , links:dict ) -> list :
bottoms_up = sorted([ (v,k) for k,v in links.items()])
PClist = []
while ( len(bottoms_up)>1 ) :
child = bottoms_up[0]
for parent in bottoms_up[1:] :
if child[1] in parent[1] :
parent_child = [ (parent[1], child[1]) ]
PClist = [ *PClist, *parent_child ]
bottoms_up.remove( child )
break
return ( PClist )
def linkages_to_graph_dag( self, links:dict ) -> None :
keys = list(links.keys())
if isinstance ( keys[0],(tuple) ) and isinstance ( keys[-1],(tuple) ) :
self.assign_from_linkages_tiers ( links )
return
PClist = self.linkages_to_pclist ( links )
for pc in PClist :
self.add_ascendant_descendant ( pc[0], pc[1] )
self.get_graph()[pc[0]].get_data()['analyte ids'] = [int(a) for a in pc[0].split('.')]
self.get_graph()[pc[1]].get_data()['analyte ids'] = [int(a) for a in pc[1].split('.')]
for k,v in links.items():
self.get_graph()[k].set_metrics([v])
root_ = self.list_roots()[0]
self.set_root_id( root_ )
def distance_matrix_to_pclist ( self , distm:np.array ,
cluster_connections:int = 1 ,
hierarchy_connections:int = 1 ,
bNonRedundant:bool = True ) -> list :
#
# FASTER PCLIST CONSTRUCTION ROUTINE
# RETURNS LIST USEFUL FOR HIERARCHY GENERATION
# SHOULD BE EASIER TO PARALLELIZE WITH JIT
#
logic = lambda p,c : len(p&c) >= hierarchy_connections and len(p^c)>0
if not bNonRedundant :
logic = lambda p,c : len(p&c) >= hierarchy_connections
#
R = sorted( list(set( distm.reshape(-1) ) ) )
prev_clusters = []
PClist = []
for r in R :
present_clusters = self.connectivity ( distm , r , cluster_connections )
parent_child = [ (p,c,r) for c in prev_clusters for p in present_clusters \
if logic(p,c) ]
prev_clusters = present_clusters
PClist = [ *PClist, *parent_child ]
return ( PClist )
def distance_matrix_to_absolute_coordinates ( self , D:np.array , bSquared:bool = False, n_dimensions:int=2 ) -> np.array :
#
# SAME AS IN THE IMPETUOUS cluster.py EXCEPT THE RETURN IS TRANSPOSED
# AND distg.m IN THE RICHTOOLS REPO
# C++ VERSION HERE https://github.com/richardtjornhammar/RichTools/commit/be0c4dfa8f61915b0701561e39ca906a9a2e0bae
#
if not bSquared :
D = D**2.
DIM = n_dimensions
DIJ = D*0.
M = len(D)
for i in range(M) :
for j in range(M) :
DIJ[i,j] = 0.5* (D[i,-1]+D[j,-1]-D[i,j])
D = DIJ
U,S,Vt = np.linalg.svd ( D , full_matrices = True )
S[DIM:] *= 0.
Z = np.diag(S**0.5)[:,:DIM]
xr = np.dot( Z.T,Vt )
return ( xr.T )
def calculate_adjacency_matrix( self , bSparse:bool = False ,
analyte_identifier:str = None ,
analyte_adjacency_level:int = None,
linktypes:list[str] = [ 'ascendants' , 'descendants' ] ) -> dict :
#
# IF ANALYTE IDENTIFIER IS PASSED THEN CONSTRUCT THE
# ANALYTE ADJACENCY MATRIX AT A SPECIFIED LEVEL
# NOTE THAT YOU CAN GET THE ADJACENCY MATRIX FOR ALL
# ANALYTES VIA : distance_matrix:np.array, level_cutoff:float
# adj_matrix = distance_matrix<=level_cutoff - np.eye(len(distance_matrix))
#
# DEFAULT: CONSTRUCT NODE TO NODE (CLUSTERS) LINK ADJACENCY MATRIX
# WE DONT ENFORCE SYMMETRY
#
graph = self.get_graph()
if analyte_identifier is None or analyte_adjacency_level is None :
names = list(self.keys())
Nn = len(names)
lookup = {n:i for n,i in zip(names,range(Nn)) }
if bSparse :
amat = dict()
else :
amat = np.zeros(Nn*Nn).reshape(Nn,Nn)
for name in names :
for linktype in linktypes:
for link in graph[name].get_links(linktype) :
i = lookup[name]
j = lookup[link]
if i == j :
continue
if linktype == 'ascendants':
amat[j,i] = 1
if linktype == 'descendants':
amat[i,j] = 1
if linktype == 'links':
amat[j,i] = 1
amat[i,j] = 1
else :
level = analyte_adjacency_level
root_data = graph[ self.get_root_id() ].get_data()
if analyte_identifier in root_data :
names = root_data[ analyte_identifier ]
else :
print ( 'ERROR COULD NOT FIND GLOBAL IDENTIFIER INFORMATION:' , analyte_identifier )
exit (1)
Nn = len( names )
nnames = list(self.keys())
lookup = { a:i for a,i in zip(names,range(Nn)) }
if bSparse :
amat = dict()
else :
amat = np.zeros(Nn*Nn).reshape(Nn,Nn)
for name in nnames :
for linktype in linktypes :
for link in graph[name].get_links(linktype) :
i_names = graph[ name ].get_data()[ analyte_identifier ]
j_names = graph[ link ].get_data()[ analyte_identifier ]
if (graph[ link ].level()==level or graph[name].level()==level) or level<0 :
for namei in i_names :
for namej in j_names :
i = lookup [ namei ]
j = lookup [ namej ]
if i == j :
continue
if linktype == 'ascendants':
amat[j,i] = 1
if linktype == 'descendants':
amat[i,j] = 1
if linktype == 'links':
amat[j,i] = 1
amat[i,j] = 1
self.adjacency_matrix_ = { 'adjacency matrix':amat , 'index names':names , 'sparsity':bSparse }
return ( self.adjacency_matrix_ )
def retrieve_adjacency_matrix( self , bForceRecalculate:bool=False ) -> dict :
if self.adjacency_matrix_ is None or ( not self.adjacency_matrix_ is None and bForceRecalculate ) :
amat_d = self.calculate_adjacency_matrix()
self.adjacency_matrix_ = amat_d
else :
amat_d = self.adjacency_matrix_
return ( amat_d )
def distance_matrix_to_graph_dag ( self , distm:np.array , n_:int=1 , bVerbose:bool=False , names:list=None ) -> None :
#
# CONSTRUCTS THE HIERACHY FROM A DISTANCE MATRIX
# SIMILAR TO THE ROUTINES IN hierarchical.py IN THIS IMPETUOUS REPO
#
if len ( distm.shape ) < 2 :
print ( 'PLEASE SUBMIT A SQUARE DISTANCE MATRIX' )
exit(1)
lookup = dict()
m_ = len(distm)
for I in range(m_) :
lookup[I] = I
if not names is None :
if len ( names ) == m_ :
for I,N in zip(range(len(names)),names):
lookup[I] = N
pclist = self.distance_matrix_to_pclist( distm )
for pc_ in pclist :
lpc0 = [ lookup[l] for l in list(pc_[0]) ]
lpc1 = [ lookup[l] for l in list(pc_[1]) ]
asc = '.'.join([str(l) for l in lpc0])
des = '.'.join([str(l) for l in lpc1])
asc_met = pc_[2]
self.add_ascendant_descendant(asc,des)
if len( self.get_graph()[asc].get_metrics() ) < 1 :
self.get_graph()[asc].set_metrics([asc_met])
self.get_graph()[asc].get_data()['analyte ids'] = lpc0
self.get_graph()[des].get_data()['analyte ids'] = lpc1
for key in self.keys() :
if self.get_graph()[key].is_a_root(n_):
self.set_root_id ( key )
if bVerbose :
self.show()
print ( self.get_root_id() )
self.get_graph()[self.get_root_id()].show()
def graph_analytes_to_approximate_distance_matrix ( self ,
analyte_identifier:str = 'analyte ids',
alpha:float = 1. ) -> np.array :
root_data = self.get_graph()[ self.get_root_id() ].get_data()
if analyte_identifier in root_data:
all_analytes = root_data[ analyte_identifier ]
else:
print ( 'ERROR COULD NOT FIND GLOBAL IDENTIFIER INFORMATION:' , analyte_identifier )
exit (1)
m_ = len( all_analytes )
lookup = { a:i for a,i in zip(all_analytes,range(m_)) }
CM = np.ones(m_*m_).reshape(m_,m_)
for item in self.get_graph().items() :
item_data = item[1].get_data()
if analyte_identifier in item_data : # IMPROVE SPEED HERE
for q in item_data[analyte_identifier] :
for p in item_data[analyte_identifier] : # STOP
CM[lookup[p],lookup[q]]+=1
#
# CONSTRUCT APPROXIMATION
# LEVELS MISSING EVEN VIA DEFAULT
approximate_distm = 1./CM - np.mean(np.diag(1./CM))
approximate_distm *= 1-np.eye(m_)
return ( np.abs(approximate_distm) , lookup )
def assign_graph_from_adjacency_matrix ( self , adj_matrix:np.array , names:list[str] = None ) -> None :
bAssignIDs = False
if len ( adj_matrix ) == len ( set(names) ) :
bAssignIDs = True
def set_name(i:int,names:list[str],bSet:bool)->str:
if bSet:
return(names[i])
else:
return(str(i))
for i in range(len(adj_matrix)) :
n = Node()
name = set_name(i,names,bAssignIDs)
n.set_id(name)
desc = []
ascs = []
d_adjv = adj_matrix[i,:]
a_adjv = adj_matrix[:,i]
for j in range(len(d_adjv)) :
if i == j :
continue
if d_adjv[j] == 1 :
desc.append(set_name(j,names,bAssignIDs))
if a_adjv[j] == 1 :
ascs.append(set_name(j,names,bAssignIDs))
n.add_links(list(set(desc)),linktype='descendants' )
n.add_links(list(set(ascs)),linktype='ascendants' )
n.add_links(list(set(desc)|set(ascs)),linktype='links' )
self.add(n)
def add_ascendant_descendant ( self, ascendant:str, descendant:str ) -> None :
n = Node()
n.set_id(ascendant)
n.add_label("")
n.add_description("")
n.add_links([descendant],linktype='links' )
n.add_links([descendant],linktype='descendants' )
m = Node()
m.set_id(descendant)
m.add_label("")
m.add_description("")
m.add_links([ascendant],linktype='links' )
m.add_links([ascendant],linktype='ascendants' )
self.add(n)
self.add(m)
def generate_ascendants_descendants_lookup ( self ) -> (type(list(str())),type(list(str()))) :
all_names = self.keys()
descendants = [ ( idx , set( self.complete_lineage( idx,linktype='descendants')['path'] ) ) for idx in all_names ]
ancestors = [ ( idx , set( self.complete_lineage( idx,linktype='ascendants' )['path'] ) ) for idx in all_names ]
return ( ancestors , descendants )
def degrees ( self , bMyopic=True ) -> dict :
#
# DELIVER ALL NODE DEGREES OR DIRECTED NODE GRAPH DEGREES
all_names = self.keys()
graph = self.get_graph()
degree_d = dict()
for idx in all_names :
node_ = graph[ idx ]
n_desc = node_.degree('descendants')
n_asc = node_.degree('ascendants')
n_links = node_.degree('links')
degree_d[idx] = {'descendants':n_desc,'ascendants':n_asc,'links':n_links} # LOCAL MYOPIC
#
# IF DIRECTED
if not bMyopic :
all_descendants = set( self.complete_lineage( idx,linktype='descendants')['path'] )
all_ancestors = set( self.complete_lineage( idx,linktype='ascendants' )['path'] )
degree_d[idx]['all ascendants'] = [len(all_ancestors) , all_ancestors ]
degree_d[idx]['all descendants'] = [len(all_descendants) , all_descendants ]
return ( degree_d )
def ascendant_descendant_file_to_dag ( self, relationship_file:str = './PCLIST.txt' ,
i_a:int = 0 , i_d:int = 1 ,
identifier:str = None , sep:str = '\t' ) -> (type(list(str())),type(list(str()))) :
with open ( relationship_file,'r' ) as input :
for line in input :
if not identifier is None :
if not identifier in line :
continue
lsp = line.replace('\n','').split( sep )
ascendant = lsp[i_a].replace('\n','')
descendant = lsp[i_d].replace('\n','')
self.add_ascendant_descendant( ascendant , descendant )
ancestors , descendants = self.generate_ascendants_descendants_lookup()
return ( ancestors , descendants )
def calculate_node_level( self, node:Node , stop_at:str = None , order:str='depth' ) -> None :
note__ = """
SEARCHING FOR ASCENDANTS WILL YIELD A
DIRECT PATH IF A DEPTH SEARCH IS EMPLOYED.
IF THERE ARE SPLITS ONE MUST BREAK THE SEARCH.
SPLITS SHOULD NOT BE PRESENT IN ASCENDING DAG
SEARCHES. SPLIT KILLING IS USED IF depth AND
stop_at ARE SPECIFIED. THIS CORRESPONDS TO
DIRECT LINEAGE INSTEAD OF COMPLETE.
"""
level = len( self.search( root_id=node.identification(), linktype='ascendants', order=order, stop_at=stop_at )['path'] ) - 1
node.set_level( level )
def hprint ( self, node:Node, visited:set,
I:int = 0, outp:str = "" , linktype:str = "descendants",
bCalcLevel = True ) -> (str,int) :
I = I+1
if bCalcLevel :
self.calculate_node_level( node, stop_at = self.get_root_id() )
head_string = "{\"source\": \"" + node.identification() + "\", \"id\": " + str(I)
head_string = head_string + ", \"level\": " + str(node.level())
desc_ = str(node.description())
if len( desc_ ) == 0 :
desc_ = "\"\""
head_string = head_string + ", \"description\": " + desc_
dat = node.get_data().items()
if len( dat )>0 :
for k,v in dat :
sv = str(v)
if len(sv) == 0 :
sv = "\"\""
head_string = head_string + ", \"" + str(k) + "\": " + sv
desc_h_str = ", \"children\": ["
desc_t_str = "]"
tail_string = "}"
visited = visited|set( [node.identification()] )
outp = outp + head_string
links = node.get_links(linktype)
for w in links :
if not w in visited and len(w)>0 :
outp = outp + desc_h_str
outp,I = self.hprint ( self.get_node(w), visited, I, outp, linktype )
outp = outp + desc_t_str
outp = outp + tail_string
return ( outp,I )
def rename_data_field_values ( self, lookup:dict = None , field_name:str = 'analyte ids' ) -> None :
if lookup is None :
return
for item in self.items() :
igdfnl = item[1].get_data()[field_name]
self.get_graph()[item[0]].get_data()[field_name] =\
[ n if not n in lookup else lookup[n] for n in igdfnl ]
def write_json ( self , jsonfile:str = None, bCalcLevel:bool = True ,
linktype:str = 'descendants', root_id:str = None ) -> str :
I:int = 1
if root_id is None :
root_id = self.get_root_id()
v = root_id
node:Node = self.get_node(v)
visited = set()
json_data_txt,I = self.hprint( node, visited,
linktype = linktype,
bCalcLevel = bCalcLevel )
if not jsonfile is None :
of_ = open(jsonfile,'w')
print ( json_data_txt,file=of_ )
return ( json_data_txt )
def write_gmt ( self, gmtfile:str = None ) -> str :
gmt_data_txt = "#GROUPNAME\tPARENT:DESC:LVL:MET\tANALYTE1\tANALYTE2\t...\n"
for item in self.items() :
asc = ':'.join(item[1].get_links('ascendants'))
gmt_line = item[0] + '\t' + asc + ':' + str(item[1].description()) + \
':' + str(item[1].level()) + ':' + \
' '.join([str(i) for i in item[1].get_metrics()]) + '\t' + \
'\t'.join([str(i) for i in item[1].get_data()['analyte ids']]) + '\n'
gmt_data_txt = gmt_data_txt + gmt_line
if not gmtfile is None:
of_ = open ( gmtfile , 'w' )
print ( gmt_data_txt , file=of_)
return ( gmt_data_txt )
def collect_linkages ( self ) -> dict :
#
links = dict()
for item in self.items() :
if True :
a_ = 0
if len( str(item[1].level()) )>0 :
a_ = item[1].level()
mets = item[1].get_metrics()
if len( mets ) > 0 :
a_ = mets[0]
links[item[0]] = a_
return ( links )
def write_linkages ( self , linkfile:str=None ) -> str :
#
links_ = [ "\"cluster\":"+str(k) + ", \"metric\":" + str(v) for k,v in self.collect_linkages().items() ]
linkages_txt = '['+']\n['.join(links_)+'] '
#
# DEV
if not linkfile is None:
of_ = open(linkfile,'w')
print ( linkages_txt ,file=of_)
return ( linkages_txt )
def ascendant_descendant_to_dag ( relationship_file:str = './PCLIST.txt' ,
i_a:int = 0 , i_d:int = 1 ,
identifier:str = None , sep:str = '\t' ) -> NodeGraph :
RichTree = NodeGraph()
ancestors , descendants = RichTree.ascendant_descendant_file_to_dag( relationship_file = relationship_file ,
i_a = i_a , i_d = i_d ,identifier = identifier , sep = sep )
return ( RichTree , ancestors , descendants )
def write_tree( tree:NodeGraph , outfile='tree.json', bVerbose=True ):
if bVerbose:
print ( 'YOU CAN CALL THE NodeGraph METHOD tree.write_json() FUNCTION DIRECTLY' )
o_json = tree.write_json( outfile )
return ( o_json )
def value_equalisation( X:np.array , method:str='average' ) -> np.array :
X_ = (rankdata( X , method=method )-0.5)/len(set(X))
return ( X_ )
| [
"numpy.abs",
"numpy.eye",
"numpy.zeros",
"numpy.ones",
"numpy.linalg.svd",
"numpy.dot",
"graphtastic.clustering.connectivity",
"numpy.diag"
] | [((18720, 18756), 'numpy.linalg.svd', 'np.linalg.svd', (['D'], {'full_matrices': '(True)'}), '(D, full_matrices=True)\n', (18733, 18756), True, 'import numpy as np\n'), ((18834, 18849), 'numpy.dot', 'np.dot', (['Z.T', 'Vt'], {}), '(Z.T, Vt)\n', (18840, 18849), True, 'import numpy as np\n'), ((14936, 14961), 'graphtastic.clustering.connectivity', 'connections', (['distm', 'alpha'], {}), '(distm, alpha)\n', (14947, 14961), True, 'from graphtastic.clustering import connectivity as connections\n'), ((18797, 18814), 'numpy.diag', 'np.diag', (['(S ** 0.5)'], {}), '(S ** 0.5)\n', (18804, 18814), True, 'import numpy as np\n'), ((25633, 25643), 'numpy.eye', 'np.eye', (['m_'], {}), '(m_)\n', (25639, 25643), True, 'import numpy as np\n'), ((25661, 25686), 'numpy.abs', 'np.abs', (['approximate_distm'], {}), '(approximate_distm)\n', (25667, 25686), True, 'import numpy as np\n'), ((25087, 25103), 'numpy.ones', 'np.ones', (['(m_ * m_)'], {}), '(m_ * m_)\n', (25094, 25103), True, 'import numpy as np\n'), ((25586, 25603), 'numpy.diag', 'np.diag', (['(1.0 / CM)'], {}), '(1.0 / CM)\n', (25593, 25603), True, 'import numpy as np\n'), ((20006, 20023), 'numpy.zeros', 'np.zeros', (['(Nn * Nn)'], {}), '(Nn * Nn)\n', (20014, 20023), True, 'import numpy as np\n'), ((21255, 21272), 'numpy.zeros', 'np.zeros', (['(Nn * Nn)'], {}), '(Nn * Nn)\n', (21263, 21272), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import trange
def simulate(runs, time, bandits):
rewards = np.zeros((len(bandits), runs, time))
best_action_counts = np.zeros(rewards.shape)
for i, bandit in enumerate(bandits):
for r in trange(runs):
bandit.reset()
for t in range(time):
action = bandit.act()
reward = bandit.step(action)
rewards[i, r, t] = reward
if action == bandit.best_action:
best_action_counts[i, r, t] = 1
mean_best_action_counts = best_action_counts.mean(axis=1)
mean_rewards = rewards.mean(axis=1)
return mean_best_action_counts, mean_rewards | [
"numpy.zeros",
"tqdm.trange"
] | [((156, 179), 'numpy.zeros', 'np.zeros', (['rewards.shape'], {}), '(rewards.shape)\n', (164, 179), True, 'import numpy as np\n'), ((238, 250), 'tqdm.trange', 'trange', (['runs'], {}), '(runs)\n', (244, 250), False, 'from tqdm import trange\n')] |
import numpy as np
from gym import utils
from envs.mujoco_env import MujocoEnv
import math
diff_to_path = {
'easy': 'point.xml',
'medium': 'point_medium.xml',
'hard': 'point_hard.xml',
'harder': 'point_harder.xml',
'maze': 'maze.xml',
'maze_easy': 'maze_easy.xml'
}
class PointEnv(MujocoEnv, utils.EzPickle):
def __init__(self, difficulty=None, max_state=500, clip_state=False, terminal=False):
if difficulty is None:
difficulty = 'easy'
model = diff_to_path[difficulty]
self.max_state = max_state
self.clip_state = clip_state
self.bounds = [[0, -9.7, 0], [25, 9.7, 0]]
self.terminal = terminal
MujocoEnv.__init__(self, model, 1)
utils.EzPickle.__init__(self)
def step(self, action):
action = np.clip(action, -1.0, 1.0)
self.do_simulation(action, self.frame_skip)
next_obs = self._get_obs()
qpos = next_obs[:3]
goal = [25.0, 0.0]
if self.clip_state:
qvel = next_obs[3:]
qpos_clipped = np.clip(qpos, a_min=self.bounds[0], a_max=self.bounds[1])
self.set_state(qpos_clipped, qvel)
qpos = qpos_clipped
next_obs = self._get_obs()
reward = -np.linalg.norm(goal - qpos[:2])
done = False
if reward >= -1. and self.terminal:
done = True
return next_obs, reward, done, {}
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent
if __name__ == "__main__":
env = PointEnv(difficulty='maze_easy')
ob = env.reset()
print(env.action_space)
done = False
while not done:
env.render()
command = input()
try:
x, y = [float(a) for a in command.split(' ')]
except:
x, y = 0, 0
ac = np.array([[x, y]])
print(ac)
env.step(ac)
env.render() | [
"gym.utils.EzPickle.__init__",
"numpy.clip",
"numpy.array",
"numpy.linalg.norm",
"envs.mujoco_env.MujocoEnv.__init__",
"numpy.concatenate"
] | [((695, 729), 'envs.mujoco_env.MujocoEnv.__init__', 'MujocoEnv.__init__', (['self', 'model', '(1)'], {}), '(self, model, 1)\n', (713, 729), False, 'from envs.mujoco_env import MujocoEnv\n'), ((738, 767), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (761, 767), False, 'from gym import utils\n'), ((814, 840), 'numpy.clip', 'np.clip', (['action', '(-1.0)', '(1.0)'], {}), '(action, -1.0, 1.0)\n', (821, 840), True, 'import numpy as np\n'), ((1468, 1534), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos.flat, self.sim.data.qvel.flat]'], {}), '([self.sim.data.qpos.flat, self.sim.data.qvel.flat])\n', (1482, 1534), True, 'import numpy as np\n'), ((2246, 2264), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (2254, 2264), True, 'import numpy as np\n'), ((1071, 1128), 'numpy.clip', 'np.clip', (['qpos'], {'a_min': 'self.bounds[0]', 'a_max': 'self.bounds[1]'}), '(qpos, a_min=self.bounds[0], a_max=self.bounds[1])\n', (1078, 1128), True, 'import numpy as np\n'), ((1265, 1296), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal - qpos[:2])'], {}), '(goal - qpos[:2])\n', (1279, 1296), True, 'import numpy as np\n')] |
# coding: utf-8
""" """
from __future__ import division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
import time
# Third-party
import numpy as np
# Project
import streams.io as io
from streams.coordinates import _gc_to_hel, _hel_to_gc
from streams.coordinates.frame import heliocentric
from streams.potential.lm10 import LawMajewski2010
from streams.integrate import LeapfrogIntegrator
import streams.inference as si
from streams.inference.back_integrate import back_integration_likelihood
nparticles = 16
potential = LawMajewski2010()
simulation = io.SgrSimulation("sgr_nfw/M2.5e+08", "SNAP113")
particles = simulation.particles(n=nparticles, expr="tub!=0")\
.to_frame(heliocentric)
satellite = simulation.satellite()\
.to_frame(heliocentric)
p_hel = particles._X.copy()
s_hel = satellite._X.copy()
p_gc = _hel_to_gc(p_hel)
s_gc = _hel_to_gc(s_hel)
gc = np.vstack((s_gc,p_gc)).copy()
acc = np.zeros_like(gc[:,:3])
times = []
for ii in range(10):
a = time.time()
integrator = LeapfrogIntegrator(potential._acceleration_at,
np.array(gc[:,:3]), np.array(gc[:,3:]),
args=(gc.shape[0], acc))
t, rs, vs = integrator.run(t1=6200, t2=0, dt=-1)
times.append(time.time()-a)
print(np.min(times), "seconds per integration")
times = []
for ii in range(10):
a = time.time()
back_integration_likelihood(6200, 0, -1, potential, p_gc, s_gc,
2.5e8, 0.01, particles.tub, 1.5,
np.array([-1]*nparticles))
times.append(time.time()-a)
print(np.min(times), "seconds per likelihood call")
_config = """
name: test
data_file: data/observed_particles/2.5e8.hdf5
nparticles: {}
potential:
class_name: LawMajewski2010
parameters: [q1, qz, phi, v_halo]
particles:
parameters: [d,mul,mub,vr]
satellite:
parameters: [d,mul,mub,vr]
""".format(nparticles)
config = io.read_config(_config)
model = si.StreamModel.from_config(config)
truths = model.truths
times = []
for ii in range(10):
a = time.time()
model(truths)
times.append(time.time()-a)
print(np.min(times), "seconds per model call") | [
"numpy.zeros_like",
"streams.io.read_config",
"streams.potential.lm10.LawMajewski2010",
"streams.inference.StreamModel.from_config",
"streams.coordinates._hel_to_gc",
"time.time",
"numpy.min",
"numpy.array",
"streams.io.SgrSimulation",
"numpy.vstack"
] | [((561, 578), 'streams.potential.lm10.LawMajewski2010', 'LawMajewski2010', ([], {}), '()\n', (576, 578), False, 'from streams.potential.lm10 import LawMajewski2010\n'), ((592, 639), 'streams.io.SgrSimulation', 'io.SgrSimulation', (['"""sgr_nfw/M2.5e+08"""', '"""SNAP113"""'], {}), "('sgr_nfw/M2.5e+08', 'SNAP113')\n", (608, 639), True, 'import streams.io as io\n'), ((896, 913), 'streams.coordinates._hel_to_gc', '_hel_to_gc', (['p_hel'], {}), '(p_hel)\n', (906, 913), False, 'from streams.coordinates import _gc_to_hel, _hel_to_gc\n'), ((921, 938), 'streams.coordinates._hel_to_gc', '_hel_to_gc', (['s_hel'], {}), '(s_hel)\n', (931, 938), False, 'from streams.coordinates import _gc_to_hel, _hel_to_gc\n'), ((981, 1005), 'numpy.zeros_like', 'np.zeros_like', (['gc[:, :3]'], {}), '(gc[:, :3])\n', (994, 1005), True, 'import numpy as np\n'), ((2012, 2035), 'streams.io.read_config', 'io.read_config', (['_config'], {}), '(_config)\n', (2026, 2035), True, 'import streams.io as io\n'), ((2044, 2078), 'streams.inference.StreamModel.from_config', 'si.StreamModel.from_config', (['config'], {}), '(config)\n', (2070, 2078), True, 'import streams.inference as si\n'), ((1046, 1057), 'time.time', 'time.time', ([], {}), '()\n', (1055, 1057), False, 'import time\n'), ((1352, 1365), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (1358, 1365), True, 'import numpy as np\n'), ((1435, 1446), 'time.time', 'time.time', ([], {}), '()\n', (1444, 1446), False, 'import time\n'), ((1678, 1691), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (1684, 1691), True, 'import numpy as np\n'), ((2142, 2153), 'time.time', 'time.time', ([], {}), '()\n', (2151, 2153), False, 'import time\n'), ((2211, 2224), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (2217, 2224), True, 'import numpy as np\n'), ((945, 968), 'numpy.vstack', 'np.vstack', (['(s_gc, p_gc)'], {}), '((s_gc, p_gc))\n', (954, 968), True, 'import numpy as np\n'), ((1158, 1177), 'numpy.array', 'np.array', (['gc[:, :3]'], {}), '(gc[:, :3])\n', (1166, 1177), True, 'import numpy as np\n'), ((1178, 1197), 'numpy.array', 'np.array', (['gc[:, 3:]'], {}), '(gc[:, 3:])\n', (1186, 1197), True, 'import numpy as np\n'), ((1612, 1639), 'numpy.array', 'np.array', (['([-1] * nparticles)'], {}), '([-1] * nparticles)\n', (1620, 1639), True, 'import numpy as np\n'), ((1330, 1341), 'time.time', 'time.time', ([], {}), '()\n', (1339, 1341), False, 'import time\n'), ((1656, 1667), 'time.time', 'time.time', ([], {}), '()\n', (1665, 1667), False, 'import time\n'), ((2189, 2200), 'time.time', 'time.time', ([], {}), '()\n', (2198, 2200), False, 'import time\n')] |
import numpy as np
from sklearn.linear_model import LinearRegression
import sys
import pandas as pd
class Indicators():
"""
Holds all indicators for a given ticker in one place
"""
def __init__(self, data):
self.data = data
def get(self, i):
if i=='MA20':
return self.MA(period=20)
if i=='MA60':
return self.MA(period=60)
elif i=='STDEV20':
return self.STDEV(period=20)
elif i=='RSI':
return self.RSI(period=14)
elif i=='SLBval':
val, inTrend = self.SqueezeLazyBear()
return val
elif i=='SLBtrend':
val, inTrend = self.SqueezeLazyBear()
return inTrend
elif i=='ADX':
return self.ADX()
elif i=='VWI':
return self.VWI()
elif i=='BBu':
BBu, _ = self.BollingerBands()
return BBu
elif i=='BBl':
_, BBl = self.BollingerBands()
return BBl
elif i=='KCu':
KCu, _ = self.KeltnerChannel()
return KCu
elif i=='KCl':
_, KCl = self.KeltnerChannel()
return KCl
else:
raise Exception("Unknown indicator: ",i)
def MA(self, period):
"""
(Rolling) simple moving average
"""
return self.data.Close.rolling(period).mean()
def STDEV(self, period):
"""
(Rolling) standard deviation
"""
return self.data.Close.rolling(period).std()
def RSI(self, period):
"""
taken from:
https://stackoverflow.com/questions/57006437/calculate-rsi-indicator-from-pandas-dataframe/57037866
"""
n = period
def rma(x, n, y0):
a = (n-1) / n
ak = a**np.arange(len(x)-1, -1, -1)
return np.r_[np.full(n, np.nan), y0, np.cumsum(ak * x) / ak / n + y0 * a**np.arange(1, len(x)+1)]
change = self.data.Close.diff()
gain = change.mask(change < 0, 0.0)
loss = -change.mask(change > 0, -0.0)
avg_gain = rma(gain[n+1:].to_numpy(), n, np.nansum(gain.to_numpy()[:n+1])/n)
avg_loss = rma(loss[n+1:].to_numpy(), n, np.nansum(loss.to_numpy()[:n+1])/n)
RS = avg_gain / avg_loss
RSI = np.round(100. - 100. / ( 1. + RS ), 0)
return RSI
def ATR(self, period=14):
"""
Average True Range (ATR)
https://www.investopedia.com/terms/a/atr.asp
"""
return (self.data.High-self.data.Low).rolling(period).mean()
def ADX(self, period=14):
"""
Average Directional Movement Index (ADX)
https://www.investopedia.com/terms/a/adx.asp
"""
pDM = self.data.High.diff().rolling(period).mean()
mDM =-self.data.Low.diff().rolling(period).mean()
ATR = self.ATR(period=period)
pDI = pDM / ATR
mDI = mDM / ATR
DX = ( (pDI.abs()-mDI.abs()) / (pDI.abs()+mDI.abs()) )*100.
ADX = DX.rolling(period).mean()
return ADX
def VWI(self, period=14):
"""
Volume-Weighted Index
"""
VDI = (self.data.Volume * (self.data.Close - self.data.Open)).rolling(period).mean()
ATR = self.ATR()
V = self.data.Volume.rolling(period).mean()
VWI = 100. * VDI / ATR / V
return VWI.rolling(period).mean()
def BollingerBands(self, length = 20, mult = 2.0):
"""
https://www.investopedia.com/terms/b/bollingerbands.asp
BOLU=MA(TP,n)+m∗σ[TP,n]
BOLD=MA(TP,n)−m∗σ[TP,n]
where:
BOLU=Upper Bollinger Band
BOLD=Lower Bollinger Band
MA=Moving average
TP (typical price)=(High+Low+Close)÷3
n=Number of days in smoothing period (typically 20)
m=Number of standard deviations (typically 2)
σ[TP,n]=Standard Deviation over last n periods of TP
"""
basis = self.MA(length)
dev = mult * self.STDEV(length) # ??? czy 1.5 jak KC?
upperBB = basis + dev
lowerBB = basis - dev
return upperBB, lowerBB
def KeltnerChannel(self, lengthKC = 20, mult = 1.5, useExp = True): # was 1.5
"""
https://www.investopedia.com/terms/k/keltnerchannel.asp
Keltner Channel Middle Line=EMA
Keltner Channel Upper Band=EMA+2∗ATR
Keltner Channel Lower Band=EMA−2∗ATR
where:
EMA=Exponential moving average (typically over 20 periods)
ATR=Average True Range (typically over 10 or 20 periods)
"""
if useExp:
ma = self.data.Close.ewm(span=20).mean()
else:
ma = self.MA(lengthKC)
rng = self.data.High - self.data.Low
rangema = rng.rolling(lengthKC).mean()
upperKC = ma + rangema * mult
lowerKC = ma - rangema * mult
return upperKC, lowerKC
def SqueezeLazyBear(self, length = 20, multBB = 2.0, multKC = 1.5):
"""
https://atas.net/atas-possibilities/squeeze-momentum-indicator/
Squeeze Momentum shows periods when volatility increases or decreases,
in other words, when the market goes from the trend into flat movement and vice versa.
"""
# Calculate Bollinger Bands
upperBB, lowerBB = self.BollingerBands(length = 20, mult = multBB)
# Calculate Keltner Channel
upperKC, lowerKC = self.KeltnerChannel(lengthKC = length, mult = multKC)
# Are BB inside KC?
sqzOn = (lowerBB > lowerKC) & (upperBB < upperKC)
sqzOff = (lowerBB < lowerKC) & (upperBB > upperKC)
noSqz = (~sqzOn) & (~sqzOff)
inTrend = sqzOff.apply(lambda x: 1 if x else 0).astype(int)
"""
calculate slope with 0 intercept, on a rolling basis
"""
highest = self.data.High.rolling(length).max()
lowest = self.data.Low.rolling(length).min()
correction = ( (highest+lowest)/2. + self.MA(length) )/2.
X = np.arange(length).reshape(-1,1)
y = (self.data.Close-correction).to_numpy()
N = len(y)
val = np.zeros(N)
for i in range(N):
try:
sub_y = y[i+1-length:i+1].reshape(-1,1)
lr = LinearRegression(fit_intercept=False)
lr.fit(X,sub_y)
val[i] = round(lr.coef_[0][0],3)
except Exception as e:
val[i] = np.nan
return val, inTrend | [
"numpy.full",
"numpy.zeros",
"sklearn.linear_model.LinearRegression",
"numpy.cumsum",
"numpy.arange",
"numpy.round"
] | [((2423, 2462), 'numpy.round', 'np.round', (['(100.0 - 100.0 / (1.0 + RS))', '(0)'], {}), '(100.0 - 100.0 / (1.0 + RS), 0)\n', (2431, 2462), True, 'import numpy as np\n'), ((6445, 6456), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6453, 6456), True, 'import numpy as np\n'), ((6323, 6340), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (6332, 6340), True, 'import numpy as np\n'), ((6582, 6619), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (6598, 6619), False, 'from sklearn.linear_model import LinearRegression\n'), ((1982, 2000), 'numpy.full', 'np.full', (['n', 'np.nan'], {}), '(n, np.nan)\n', (1989, 2000), True, 'import numpy as np\n'), ((2006, 2023), 'numpy.cumsum', 'np.cumsum', (['(ak * x)'], {}), '(ak * x)\n', (2015, 2023), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
from collections import Sequence
from plyfile import PlyData
from torch_scatter import scatter_sum, scatter_mul
import MinkowskiEngineBackend as MEB
import MinkowskiEngine as ME
# Use ScanNet default colors
colors = [
(0, 0, 0), # unlabeled 0
(174, 199, 232), # wall 1
(152, 223, 138), # floor 2
(31, 119, 180), # cabinet 3
(255, 187, 120), # bed 4
(188, 189, 34), # chair 5
(140, 86, 75), # sofa 6
(255, 152, 150), # table 7
(214, 39, 40), # door 8
(197, 176, 213), # window 9
(148, 103, 189), # bookshelf 10
(196, 156, 148), # picture 11
(23, 190, 207), # counter 12
(178, 76, 76),
(247, 182, 210), # desk 14
(66, 188, 102),
(219, 219, 141), # curtain 16
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator 24
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain 28
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet 33
(112, 128, 144), # sink 34
(96, 207, 209),
(227, 119, 194), # bathtub 36
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn 39
(100, 85, 144)
]
# Set labels for the ScanNet dataset
SCANNET_VALID_CLASS_NAMES = ['Wall', 'Floor', 'Cabinet', 'Bed', 'Chair', 'Sofa', 'Table', 'Door', 'Window', 'Bookshelf', 'Picture',
'Counter', 'Desk', 'Curtain', 'Refrigerator', 'Showercurtain', 'Toilet', 'Sink', 'Bathtub', 'Otherfurniture']
SCANNET_VALID_CLASS_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
# Set labels for the Stanford dataset
STANFORD_VALID_CLASS_NAMES = ['Clutter', 'Beam', 'Board', 'Bookcase', 'Ceiling', 'Chair', 'Column', 'Door', 'Floor', 'Sofa',
'Table', 'Wall', 'Window']
STANFORD_VALID_CLASS_IDS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]
def convert_label_scannet(label):
"""
Ignore invalid labels in the ScanNet dataset
"""
ignore_class_ids = tuple(set(range(41)) - set(SCANNET_VALID_CLASS_IDS))
for ignore_lbl in ignore_class_ids:
if ignore_lbl in label:
label[label == ignore_lbl] = 255
for i, lbl in enumerate(SCANNET_VALID_CLASS_IDS):
if lbl in label:
label[label == lbl] = i
return label
def convert_label_stanford(label):
"""
Ignore invalid labels in the Stanford dataset
"""
# Remove the class 10 'stairs' class.
label[label == 10] = 255
for i in [11, 12, 13]:
label[label == i] = i - 1
return label
def load_file(file_name, voxel_size, coords_pcl=None, labels_pcl=None, dataset='scannet'):
"""
Load point clouds
"""
plydata = PlyData.read(file_name+'.ply')
data = plydata.elements[0].data
if coords_pcl is not None:
coords_pcl = np.array(coords_pcl.data, dtype=np.float32)
else:
coords_pcl = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
colors_pcl = np.array([data['red'], data['green'],
data['blue']], dtype=np.float32).T / 255.
if labels_pcl is None:
labels_pcl = np.array(data['label'], dtype=np.int32)
if dataset == 'scannet':
feats_pcl = colors_pcl - 0.5
elif dataset == 'stanford':
# Normalize feature
coords_vox = np.floor(coords_pcl / voxel_size)
coords_vox_mean = coords_vox.mean(0)
coords_vox_mean[-1] = 0. # only center x, y!
coords_vox_norm = coords_vox - coords_vox_mean
feats_pcl = np.concatenate((colors_pcl - 0.5, coords_vox_norm), 1)
idx, inverse_idx, coords_vox, feats_vox = sparse_quantize(
coords_pcl, feats_pcl, None, return_index=True,
return_inverse=True, quantization_size=voxel_size)
return idx, inverse_idx, coords_vox, feats_vox, labels_pcl, coords_pcl, feats_pcl
def load_file_eval(file_name, voxel_size, attacked_coords, dataset='scannet'):
"""
Load point clouds for evaluation
"""
plydata = PlyData.read(file_name+'.ply')
data = plydata.elements[0].data
if attacked_coords is not None:
room_name = file_name.split('/')[-1]
coords_pcl = np.loadtxt(os.path.join(attacked_coords, room_name + '.txt'))
else:
coords_pcl = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
if dataset == 'stanford':
coords_pcl[:,:2] -= coords_pcl[:,:2].mean(axis=0)
coords_pcl[:,2] -= coords_pcl[:,2].min(axis=0)
colors_pcl = np.array([data['red'], data['green'],
data['blue']], dtype=np.float32).T / 255.
labels_pcl = np.array(data['label'], dtype=np.int32)
if dataset == 'scannet':
feats_pcl = colors_pcl - 0.5
elif dataset == 'stanford':
# Normalize feature
coords_vox = np.floor(coords_pcl / voxel_size)
coords_vox_mean = coords_vox.mean(0)
coords_vox_mean[-1] = 0. # only center x, y!
coords_vox_norm = coords_vox - coords_vox_mean
feats_pcl = np.concatenate((colors_pcl - 0.5, coords_vox_norm), 1)
idx, inverse_idx, coords_vox, feats_vox = sparse_quantize(
coords_pcl, feats_pcl, None, return_index=True,
return_inverse=True, quantization_size=voxel_size)
return inverse_idx, coords_vox, feats_vox, labels_pcl
def generate_input_sparse_tensor(file_name, config, coords_pcl=None, coords_pcl0=None, labels_pcl=None, extend=True, dataset='scannet'):
"""
Obtain sparse tensor for input
"""
batch = [load_file(file_name, config.voxel_size, coords_pcl, labels_pcl, dataset)]
idx, inverse_idx, coords_vox, feats_vox, labels_pcl, coords_pcl, feats_pcl = list(zip(*batch))
coords_vox, feats_vox = ME.utils.sparse_collate(coords_vox, feats_vox, None)
coords_pcl = torch.from_numpy(coords_pcl[0])
if extend:
coords_pcl.requires_grad_(True).retain_grad()
feats_pcl = torch.from_numpy(feats_pcl[0])
sinput, occupy_conv, valid = add_occupancy(config, inverse_idx, coords_vox[:,1:], coords_pcl, coords_pcl0, feats_pcl, dataset)
return idx[0], inverse_idx[0], coords_vox, coords_pcl, sinput, occupy_conv, valid
else:
sinput = ME.SparseTensor(feats_vox.float(), coords=coords_vox)
return idx[0], inverse_idx[0], coords_pcl, sinput, labels_pcl[0]
def generate_input_sparse_tensor_eval(file_name, voxel_size=0.02, attacked_coords=None, dataset='scannet'):
"""
Obtain sparse tensor for input
"""
# Create a batch, this process is done in a data loader during training in parallel.
batch = [load_file_eval(file_name, voxel_size, attacked_coords, dataset)]
inverse_idx, coordinates_, featrues_, labels = list(zip(*batch))
coordinates, features = ME.utils.sparse_collate(
coordinates_, featrues_, None)
return inverse_idx, coordinates, features.float(), labels[0]
def add_occupancy(config, inverse_idx, coords_vox_noextend, coords_pcl, coords_pcl0, feats_pcl, dataset='scannet'):
"""
Obtain occupancy values for input voxelization and sparse convolution
"""
if dataset == 'stanford':
# Obtain features with enabled gradient
feat_size = 6
feats_pcl[:,3:] = coords_pcl / config.voxel_size
feats_pcl[:,3:5] -= feats_pcl[:,3:5].mean(0)
### Find Valid Voxels ###
# Find all possible voxels that may be occupied after an attack step.
coords_vox_all = []
coords_vox = []
valid = []
i = 0
for dx in [0, -1, 1]:
for dy in [0, -1, 1]:
for dz in [0, -1, 1]:
if config.dynamics_aware == False:
if [dx, dy, dz] != [0, 0, 0]:
break
if i == 0:
# Add existing occupied voxels
valid.append(torch.arange(coords_pcl.shape[0]))
coords_vox_all.append(torch.floor(coords_pcl / config.voxel_size))
else:
# Examine neighbor voxels whether in step size
coords_vox_new = torch.floor((coords_pcl + torch.Tensor([dx, dy, dz]) * config.step) / config.voxel_size)
valid_new1 = torch.where(~((torch.stack(coords_vox_all) - coords_vox_new).abs().sum(-1) == 0).sum(0).bool())[0]
# Examine neighbor voxels whether in budget
idx1 = torch.where((torch.abs(coords_vox_new[valid_new1] * config.voxel_size - coords_pcl0[valid_new1]) < config.budget).sum(1).bool())[0]
idx2 = torch.where((torch.abs(coords_vox_new[valid_new1] * config.voxel_size + config.voxel_size - coords_pcl0[valid_new1]) < config.budget).sum(1).bool())[0]
idx = torch.unique(torch.cat([idx1, idx2]))
valid_new2 = valid_new1[idx]
valid.append(valid_new2)
coords_vox_all.append(coords_vox_new)
coords_vox.append(coords_vox_all[i][valid[i]])
i = i + 1
coords_vox = torch.cat(coords_vox, dim=0)
### Relation Calculation ###
inverse_idx = torch.Tensor(inverse_idx[0]).long()
relation_input_list = []
if config.dynamics_aware:
relation_conv_list = []
i = 0
for dx in [0, -1, 1]:
for dy in [0, -1, 1]:
for dz in [0, -1, 1]:
if config.dynamics_aware == False:
if [dx, dy, dz] != [0, 0, 0]:
continue
# Distance
coords_vox_nei = coords_vox_noextend[inverse_idx][valid[i]] + torch.Tensor([[dx, dy, dz]]) + 0.5
coords_pcl_valid = coords_pcl[valid[i]]
dist = torch.abs(coords_vox_nei - coords_pcl_valid / config.voxel_size)
# Relation for input voxelization
relation_input = torch.prod(1/(1+torch.exp(config.lamda_input*(dist-0.5))), dim=-1)
relation_input_list.append(relation_input)
# Relation for sparse convolution in network
if config.dynamics_aware:
relation_conv = torch.prod(1/(1+torch.exp(config.lamda_conv*(dist-0.5))), dim=-1)
relation_conv_list.append(relation_conv)
i = i + 1
relation_input_list = torch.cat(relation_input_list, dim=0)
relation_conv_list = torch.cat(relation_conv_list, dim=0)
### Gathering Operation ###
# Obtain neighbor mapping in Equations (10) and (18)
unique_index, inverse_mapping = quantize(coords_vox)
# Obtain the uniqued voxel coordinates
coords_vox = coords_vox[unique_index]
# The gathering function in Equation (10)
occupy_input = 1 - scatter_mul(1-relation_input_list, inverse_mapping)
if config.dynamics_aware:
occupy_conv = 1 - scatter_mul(1-relation_conv_list, inverse_mapping)
### Input Voxelization ###
# Equation (18)
feats_pcl_list = []
for i in range(len(valid)):
feats_pcl_list.append(feats_pcl[valid[i]])
feats_pcl_list = torch.cat(feats_pcl_list, dim=0)
mid_result = relation_input_list.unsqueeze(1).repeat(1,feats_pcl.shape[-1]) * feats_pcl_list
feats_vox_tilde = []
for i in range(feats_pcl.shape[-1]):
feats_vox_tilde.append(scatter_sum(mid_result[:,i], inverse_mapping))
feats_vox_tilde = torch.stack(feats_vox_tilde, dim=1)
relation_input_sum = scatter_sum(relation_input_list, inverse_mapping)
if dataset == 'stanford':
relation_input_sum_ = torch.where(relation_input_sum < 1e-10, torch.ones(relation_input_sum.shape), relation_input_sum)
relation_input_sum = relation_input_sum_
feats_vox_tilde = feats_vox_tilde / relation_input_sum.unsqueeze(1)
if dataset == 'stanford':
# Use sigmoid to mimic floor operation in coordinate features
feats_vox_tilde_floor = torch.floor(feats_vox_tilde[:,3:] - 0.5)
feats_vox_tilde_rem = 1 / (1 + torch.exp(-config.lamda_floor * (feats_vox_tilde[:,3:] - feats_vox_tilde_floor)))
feats_vox_tilde[:,3:] = feats_vox_tilde_floor + feats_vox_tilde_rem
# Equation (19)
feats_vox = occupy_input.unsqueeze(1).repeat(1,feats_pcl.shape[-1]) * feats_vox_tilde
# Build input sparse tensor
sparse_tensor = ME.SparseTensor(feats_vox, coords=torch.cat([torch.zeros(coords_vox.shape[0],1), coords_vox], dim=-1).int())
sparse_tensor._F = feats_vox
# Return input sparse tensor, occupancy values for sparse convolution in network, and valid index
if config.dynamics_aware:
return sparse_tensor, occupy_conv.unsqueeze(1), valid
else:
return sparse_tensor, None, valid
def get_point_output(config, soutput, inverse_idx, coords_vox_noextend, coords_pcl, valid):
"""
Obtain occupancy values for output devoxelization
"""
### Output Devoxelization ###
# Note:
# Equation (20) is applied by not multiplying occupancy value on the final layer output of the network.
# So in devoxelization we only need to apply equation (21).
outputs_pcl = torch.zeros(coords_pcl.shape[0], soutput.F.shape[1]).to(soutput.device)
i = 0
for dx in [0, -1, 1]:
for dy in [0, -1, 1]:
for dz in [0, -1, 1]:
if config.dynamics_aware == False:
if [dx, dy, dz] != [0, 0, 0]:
continue
# Distance
coords_vox_nei = coords_vox_noextend[:,1:][inverse_idx][valid[i]] + torch.Tensor([[dx, dy, dz]]) + 0.5
coords_pcl_valid = coords_pcl[valid[i]]
dist = torch.abs(coords_vox_nei - coords_pcl_valid / config.voxel_size)
# Relation for output devoxelization
relation_output = torch.prod(1/(1+torch.exp(config.lamda_output*(dist-0.5))), dim=-1)
relation_output = relation_output.to(soutput.device)
# We ignore the denominator in Equation (21) for simplicity
try:
outputs_vox = soutput.features_at_coords(coords_vox_noextend + torch.IntTensor([[0, dx, dy, dz]]))[0][inverse_idx][valid[i]]
except:
i = i + 1
continue
else:
# Equation (21)
outputs = relation_output.unsqueeze(1) * outputs_vox
outputs_pcl[valid[i]] += outputs
i = i + 1
return outputs_pcl
def save_prediction(config, save_path, room_name, preds, probs=None, dataset='scannet'):
"""
Save network prediction
"""
if dataset == 'scannet':
VALID_CLASS_IDS = SCANNET_VALID_CLASS_IDS
elif dataset == 'stanford':
VALID_CLASS_IDS = STANFORD_VALID_CLASS_IDS
if config.save_preds:
preds = np.array(VALID_CLASS_IDS, dtype=np.int32)[preds]
if not os.path.exists(os.path.join(save_path, 'pred')):
os.makedirs(os.path.join(save_path, 'pred'))
np.savetxt(os.path.join(save_path, 'pred', room_name+'.txt'), preds, fmt='%d')
if config.save_probs:
np.set_printoptions(suppress=True)
np.set_printoptions(precision=6)
if not os.path.exists(os.path.join(save_path, 'prob')):
os.makedirs(os.path.join(save_path, 'prob'))
np.savetxt(os.path.join(save_path, 'prob', room_name+'.txt'), probs, fmt='%.6f')
return
def save_attacked_coords(save_path, room_name, coords_pcl):
"""
Save attacked point cloud coordinates
"""
if not os.path.exists(os.path.join(save_path, 'coord')):
os.makedirs(os.path.join(save_path, 'coord'))
np.savetxt(os.path.join(save_path, 'coord', room_name+'.txt'), coords_pcl, fmt='%.6f')
return
def visualize(config, room_name, coords_pcl, labels_pcl, save_path, remark=None, refine=False):
"""
Function for visualization
"""
if refine:
coords_pcl = torch.Tensor(coords_pcl)
coords_pcl_ = config.voxel_size * torch.floor(coords_pcl/config.voxel_size) + config.voxel_size * torch.rand(coords_pcl.shape)
coords_pcl_ = torch.where(coords_pcl_ < (coords_pcl0 - config.budget), coords_pcl0 - config.budget, coords_pcl_)
coords_pcl_ = torch.where(coords_pcl_ > (coords_pcl0 + config.budget), coords_pcl0 + config.budget, coords_pcl_)
coords_pcl = torch.where(torch.floor(coords_pcl_/config.voxel_size) != torch.floor(coords_pcl/config.voxel_size), coords_pcl, coords_pcl_)
coords_pcl = coords_pcl.numpy()
plydata = PlyData.read(os.path.join(config.data_path, room_name + '.ply'))
if remark == 'gt':
rgb = []
for l in labels_pcl:
if l == 255:
rgb.append(list(colors[0]))
else:
rgb.append(list(colors[l+1]))
rgb = np.array(rgb)
elif remark == 'noattack':
rgb = np.array([list(colors[l+1]) for l in labels_pcl])
else:
if remark == 'attack':
plydata.elements[0].data['x'] = coords_pcl[:, 0]
plydata.elements[0].data['y'] = coords_pcl[:, 1]
plydata.elements[0].data['z'] = coords_pcl[:, 2]
rgb = np.array([list(colors[l+1]) for l in labels_pcl])
plydata.elements[0].data['red'] = rgb[:, 0]
plydata.elements[0].data['green'] = rgb[:, 1]
plydata.elements[0].data['blue'] = rgb[:, 2]
if remark is not None:
save_path = os.path.join(save_path, 'visual', room_name)
else:
save_path = os.path.join(save_path, 'visual')
if not os.path.exists(save_path):
os.makedirs(save_path)
if remark is not None:
plydata.write(os.path.join(save_path, room_name + '.' + remark + '.ply'))
else:
plydata.write(os.path.join(save_path, room_name + '.ply'))
class IOStream():
"""
Print logs in file
"""
def __init__(self, path):
self.f = open(path, 'a')
def cprint(self, text):
print(text)
self.f.write(text+'\n')
self.f.flush()
def close(self):
self.f.close()
def quantize(coords):
r"""Returns a unique index map and an inverse index map.
Args:
:attr:`coords` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
Returns:
:attr:`unique_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines unique coordinates.
:attr:`coords[unique_map]` is the unique coordinates.
:attr:`inverse_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines the inverse map that recovers the original
coordinates. :attr:`coords[unique_map[inverse_map]] == coords`
Example::
>>> unique_map, inverse_map = quantize(coords)
>>> unique_coords = coords[unique_map]
>>> print(unique_coords[inverse_map] == coords) # True, ..., True
>>> print(coords[unique_map[inverse_map]] == coords) # True, ..., True
"""
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
"Invalid coords type"
if isinstance(coords, np.ndarray):
assert coords.dtype == np.int32, f"Invalid coords type {coords.dtype} != np.int32"
return MEB.quantize_np(coords.astype(np.int32))
else:
# Type check done inside
return MEB.quantize_th(coords.int())
def quantize_label(coords, labels, ignore_label):
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
"Invalid coords type"
if isinstance(coords, np.ndarray):
assert isinstance(labels, np.ndarray)
assert coords.dtype == np.int32, f"Invalid coords type {coords.dtype} != np.int32"
assert labels.dtype == np.int32, f"Invalid label type {labels.dtype} != np.int32"
return MEB.quantize_label_np(coords, labels, ignore_label)
else:
assert isinstance(labels, torch.Tensor)
# Type check done inside
return MEB.quantize_label_th(coords, labels.int(), ignore_label)
def sparse_quantize(coords,
feats=None,
labels=None,
ignore_label=-100,
return_index=False,
return_inverse=False,
quantization_size=None):
r"""Given coordinates, and features (optionally labels), the function
generates quantized (voxelized) coordinates.
Args:
:attr:`coords` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
:attr:`feats` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`, optional): a
matrix of size :math:`N \times D_F` where :math:`N` is the number of
points and :math:`D_F` is the dimension of the features. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor, `feats`
must also be a torch.Tensor).
:attr:`labels` (:attr:`numpy.ndarray` or :attr:`torch.IntTensor`,
optional): integer labels associated to eah coordinates. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor,
`labels` must also be a torch.Tensor). For classification where a set
of points are mapped to one label, do not feed the labels.
:attr:`ignore_label` (:attr:`int`, optional): the int value of the
IGNORE LABEL.
:attr:`torch.nn.CrossEntropyLoss(ignore_index=ignore_label)`
:attr:`return_index` (:attr:`bool`, optional): set True if you want the
indices of the quantized coordinates. False by default.
:attr:`return_inverse` (:attr:`bool`, optional): set True if you want
the indices that can recover the discretized original coordinates.
False by default. `return_index` must be True when `return_reverse` is True.
Example::
>>> unique_map, inverse_map = sparse_quantize(discrete_coords, return_index=True, return_inverse=True)
>>> unique_coords = discrete_coords[unique_map]
>>> print(unique_coords[inverse_map] == discrete_coords) # True
:attr:`quantization_size` (:attr:`float`, :attr:`list`, or
:attr:`numpy.ndarray`, optional): the length of the each side of the
hyperrectangle of of the grid cell.
Example::
>>> # Segmentation
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats, labels = MinkowskiEngine.utils.sparse_quantize(
>>> coords, feats, labels, ignore_label=-100, quantization_size=0.1)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
>>>
>>> # Classification
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats = MinkowskiEngine.utils.sparse_quantize(coords, feats)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
"""
assert isinstance(coords, np.ndarray) or isinstance(coords, torch.Tensor), \
'Coords must be either np.array or torch.Tensor.'
use_label = labels is not None
use_feat = feats is not None
assert coords.ndim == 2, \
"The coordinates must be a 2D matrix. The shape of the input is " + \
str(coords.shape)
if return_inverse:
assert return_index, "return_reverse must be set with return_index"
if use_feat:
assert feats.ndim == 2
assert coords.shape[0] == feats.shape[0]
if use_label:
assert coords.shape[0] == len(labels)
dimension = coords.shape[1]
# Quantize the coordinates
if quantization_size is not None:
if isinstance(quantization_size, (Sequence, np.ndarray, torch.Tensor)):
assert len(
quantization_size
) == dimension, "Quantization size and coordinates size mismatch."
if isinstance(coords, np.ndarray):
quantization_size = np.array([i for i in quantization_size])
discrete_coords = np.floor(coords / quantization_size)
else:
quantization_size = torch.Tensor(
[i for i in quantization_size])
discrete_coords = (coords / quantization_size).floor()
elif np.isscalar(quantization_size): # Assume that it is a scalar
if quantization_size == 1:
discrete_coords = coords
else:
discrete_coords = np.floor(coords / quantization_size)
else:
raise ValueError('Not supported type for quantization_size.')
else:
discrete_coords = coords
discrete_coords = np.floor(discrete_coords)
if isinstance(coords, np.ndarray):
discrete_coords = discrete_coords.astype(np.int32)
else:
discrete_coords = discrete_coords.int()
# Return values accordingly
if use_label:
mapping, colabels = quantize_label(discrete_coords, labels,
ignore_label)
if return_index:
return discrete_coords[mapping], feats[mapping], colabels, mapping
else:
if use_feat:
return discrete_coords[mapping], feats[mapping], colabels
else:
return discrete_coords[mapping], colabels
else:
unique_map, inverse_map = quantize(discrete_coords)
if return_index:
if return_inverse:
return unique_map, inverse_map, \
discrete_coords[unique_map], feats[unique_map]
else:
return unique_map, \
discrete_coords[unique_map], feats[unique_map]
else:
if use_feat:
return discrete_coords[unique_map], feats[unique_map]
else:
return discrete_coords[unique_map]
def intersectionAndUnion(output, target, K, ignore_index=255):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert (output.ndim in [1, 2, 3])
assert output.shape == target.shape
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = 255
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K+1))
area_output, _ = np.histogram(output, bins=np.arange(K+1))
area_target, _ = np.histogram(target, bins=np.arange(K+1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target | [
"numpy.floor",
"torch.cat",
"numpy.arange",
"torch.arange",
"os.path.join",
"torch.ones",
"numpy.set_printoptions",
"os.path.exists",
"torch.exp",
"torch.Tensor",
"torch.zeros",
"plyfile.PlyData.read",
"torch.where",
"torch.floor",
"torch.IntTensor",
"MinkowskiEngine.utils.sparse_colla... | [((3117, 3149), 'plyfile.PlyData.read', 'PlyData.read', (["(file_name + '.ply')"], {}), "(file_name + '.ply')\n", (3129, 3149), False, 'from plyfile import PlyData\n'), ((4409, 4441), 'plyfile.PlyData.read', 'PlyData.read', (["(file_name + '.ply')"], {}), "(file_name + '.ply')\n", (4421, 4441), False, 'from plyfile import PlyData\n'), ((5025, 5064), 'numpy.array', 'np.array', (["data['label']"], {'dtype': 'np.int32'}), "(data['label'], dtype=np.int32)\n", (5033, 5064), True, 'import numpy as np\n'), ((6123, 6175), 'MinkowskiEngine.utils.sparse_collate', 'ME.utils.sparse_collate', (['coords_vox', 'feats_vox', 'None'], {}), '(coords_vox, feats_vox, None)\n', (6146, 6175), True, 'import MinkowskiEngine as ME\n'), ((6193, 6224), 'torch.from_numpy', 'torch.from_numpy', (['coords_pcl[0]'], {}), '(coords_pcl[0])\n', (6209, 6224), False, 'import torch\n'), ((7150, 7204), 'MinkowskiEngine.utils.sparse_collate', 'ME.utils.sparse_collate', (['coordinates_', 'featrues_', 'None'], {}), '(coordinates_, featrues_, None)\n', (7173, 7204), True, 'import MinkowskiEngine as ME\n'), ((9425, 9453), 'torch.cat', 'torch.cat', (['coords_vox'], {'dim': '(0)'}), '(coords_vox, dim=0)\n', (9434, 9453), False, 'import torch\n'), ((10685, 10722), 'torch.cat', 'torch.cat', (['relation_input_list'], {'dim': '(0)'}), '(relation_input_list, dim=0)\n', (10694, 10722), False, 'import torch\n'), ((10748, 10784), 'torch.cat', 'torch.cat', (['relation_conv_list'], {'dim': '(0)'}), '(relation_conv_list, dim=0)\n', (10757, 10784), False, 'import torch\n'), ((11443, 11475), 'torch.cat', 'torch.cat', (['feats_pcl_list'], {'dim': '(0)'}), '(feats_pcl_list, dim=0)\n', (11452, 11475), False, 'import torch\n'), ((11739, 11774), 'torch.stack', 'torch.stack', (['feats_vox_tilde'], {'dim': '(1)'}), '(feats_vox_tilde, dim=1)\n', (11750, 11774), False, 'import torch\n'), ((11800, 11849), 'torch_scatter.scatter_sum', 'scatter_sum', (['relation_input_list', 'inverse_mapping'], {}), '(relation_input_list, inverse_mapping)\n', (11811, 11849), False, 'from torch_scatter import scatter_sum, scatter_mul\n'), ((25272, 25297), 'numpy.floor', 'np.floor', (['discrete_coords'], {}), '(discrete_coords)\n', (25280, 25297), True, 'import numpy as np\n'), ((3236, 3279), 'numpy.array', 'np.array', (['coords_pcl.data'], {'dtype': 'np.float32'}), '(coords_pcl.data, dtype=np.float32)\n', (3244, 3279), True, 'import numpy as np\n'), ((3543, 3582), 'numpy.array', 'np.array', (["data['label']"], {'dtype': 'np.int32'}), "(data['label'], dtype=np.int32)\n", (3551, 3582), True, 'import numpy as np\n'), ((6314, 6344), 'torch.from_numpy', 'torch.from_numpy', (['feats_pcl[0]'], {}), '(feats_pcl[0])\n', (6330, 6344), False, 'import torch\n'), ((11094, 11147), 'torch_scatter.scatter_mul', 'scatter_mul', (['(1 - relation_input_list)', 'inverse_mapping'], {}), '(1 - relation_input_list, inverse_mapping)\n', (11105, 11147), False, 'from torch_scatter import scatter_sum, scatter_mul\n'), ((12262, 12303), 'torch.floor', 'torch.floor', (['(feats_vox_tilde[:, 3:] - 0.5)'], {}), '(feats_vox_tilde[:, 3:] - 0.5)\n', (12273, 12303), False, 'import torch\n'), ((15515, 15549), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (15534, 15549), True, 'import numpy as np\n'), ((15558, 15590), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)'}), '(precision=6)\n', (15577, 15590), True, 'import numpy as np\n'), ((16062, 16114), 'os.path.join', 'os.path.join', (['save_path', '"""coord"""', "(room_name + '.txt')"], {}), "(save_path, 'coord', room_name + '.txt')\n", (16074, 16114), False, 'import os\n'), ((16330, 16354), 'torch.Tensor', 'torch.Tensor', (['coords_pcl'], {}), '(coords_pcl)\n', (16342, 16354), False, 'import torch\n'), ((16512, 16613), 'torch.where', 'torch.where', (['(coords_pcl_ < coords_pcl0 - config.budget)', '(coords_pcl0 - config.budget)', 'coords_pcl_'], {}), '(coords_pcl_ < coords_pcl0 - config.budget, coords_pcl0 - config\n .budget, coords_pcl_)\n', (16523, 16613), False, 'import torch\n'), ((16633, 16734), 'torch.where', 'torch.where', (['(coords_pcl_ > coords_pcl0 + config.budget)', '(coords_pcl0 + config.budget)', 'coords_pcl_'], {}), '(coords_pcl_ > coords_pcl0 + config.budget, coords_pcl0 + config\n .budget, coords_pcl_)\n', (16644, 16734), False, 'import torch\n'), ((16947, 16997), 'os.path.join', 'os.path.join', (['config.data_path', "(room_name + '.ply')"], {}), "(config.data_path, room_name + '.ply')\n", (16959, 16997), False, 'import os\n'), ((17215, 17228), 'numpy.array', 'np.array', (['rgb'], {}), '(rgb)\n', (17223, 17228), True, 'import numpy as np\n'), ((17808, 17852), 'os.path.join', 'os.path.join', (['save_path', '"""visual"""', 'room_name'], {}), "(save_path, 'visual', room_name)\n", (17820, 17852), False, 'import os\n'), ((17883, 17916), 'os.path.join', 'os.path.join', (['save_path', '"""visual"""'], {}), "(save_path, 'visual')\n", (17895, 17916), False, 'import os\n'), ((17929, 17954), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (17943, 17954), False, 'import os\n'), ((17964, 17986), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (17975, 17986), False, 'import os\n'), ((20292, 20343), 'MinkowskiEngineBackend.quantize_label_np', 'MEB.quantize_label_np', (['coords', 'labels', 'ignore_label'], {}), '(coords, labels, ignore_label)\n', (20313, 20343), True, 'import MinkowskiEngineBackend as MEB\n'), ((3311, 3372), 'numpy.array', 'np.array', (["[data['x'], data['y'], data['z']]"], {'dtype': 'np.float32'}), "([data['x'], data['y'], data['z']], dtype=np.float32)\n", (3319, 3372), True, 'import numpy as np\n'), ((3392, 3462), 'numpy.array', 'np.array', (["[data['red'], data['green'], data['blue']]"], {'dtype': 'np.float32'}), "([data['red'], data['green'], data['blue']], dtype=np.float32)\n", (3400, 3462), True, 'import numpy as np\n'), ((3731, 3764), 'numpy.floor', 'np.floor', (['(coords_pcl / voxel_size)'], {}), '(coords_pcl / voxel_size)\n', (3739, 3764), True, 'import numpy as np\n'), ((3939, 3993), 'numpy.concatenate', 'np.concatenate', (['(colors_pcl - 0.5, coords_vox_norm)', '(1)'], {}), '((colors_pcl - 0.5, coords_vox_norm), 1)\n', (3953, 3993), True, 'import numpy as np\n'), ((4589, 4638), 'os.path.join', 'os.path.join', (['attacked_coords', "(room_name + '.txt')"], {}), "(attacked_coords, room_name + '.txt')\n", (4601, 4638), False, 'import os\n'), ((4671, 4732), 'numpy.array', 'np.array', (["[data['x'], data['y'], data['z']]"], {'dtype': 'np.float32'}), "([data['x'], data['y'], data['z']], dtype=np.float32)\n", (4679, 4732), True, 'import numpy as np\n'), ((4905, 4975), 'numpy.array', 'np.array', (["[data['red'], data['green'], data['blue']]"], {'dtype': 'np.float32'}), "([data['red'], data['green'], data['blue']], dtype=np.float32)\n", (4913, 4975), True, 'import numpy as np\n'), ((5213, 5246), 'numpy.floor', 'np.floor', (['(coords_pcl / voxel_size)'], {}), '(coords_pcl / voxel_size)\n', (5221, 5246), True, 'import numpy as np\n'), ((5421, 5475), 'numpy.concatenate', 'np.concatenate', (['(colors_pcl - 0.5, coords_vox_norm)', '(1)'], {}), '((colors_pcl - 0.5, coords_vox_norm), 1)\n', (5435, 5475), True, 'import numpy as np\n'), ((9508, 9536), 'torch.Tensor', 'torch.Tensor', (['inverse_idx[0]'], {}), '(inverse_idx[0])\n', (9520, 9536), False, 'import torch\n'), ((11202, 11254), 'torch_scatter.scatter_mul', 'scatter_mul', (['(1 - relation_conv_list)', 'inverse_mapping'], {}), '(1 - relation_conv_list, inverse_mapping)\n', (11213, 11254), False, 'from torch_scatter import scatter_sum, scatter_mul\n'), ((11670, 11716), 'torch_scatter.scatter_sum', 'scatter_sum', (['mid_result[:, i]', 'inverse_mapping'], {}), '(mid_result[:, i], inverse_mapping)\n', (11681, 11716), False, 'from torch_scatter import scatter_sum, scatter_mul\n'), ((11950, 11986), 'torch.ones', 'torch.ones', (['relation_input_sum.shape'], {}), '(relation_input_sum.shape)\n', (11960, 11986), False, 'import torch\n'), ((13465, 13517), 'torch.zeros', 'torch.zeros', (['coords_pcl.shape[0]', 'soutput.F.shape[1]'], {}), '(coords_pcl.shape[0], soutput.F.shape[1])\n', (13476, 13517), False, 'import torch\n'), ((15223, 15264), 'numpy.array', 'np.array', (['VALID_CLASS_IDS'], {'dtype': 'np.int32'}), '(VALID_CLASS_IDS, dtype=np.int32)\n', (15231, 15264), True, 'import numpy as np\n'), ((15412, 15463), 'os.path.join', 'os.path.join', (['save_path', '"""pred"""', "(room_name + '.txt')"], {}), "(save_path, 'pred', room_name + '.txt')\n", (15424, 15463), False, 'import os\n'), ((15731, 15782), 'os.path.join', 'os.path.join', (['save_path', '"""prob"""', "(room_name + '.txt')"], {}), "(save_path, 'prob', room_name + '.txt')\n", (15743, 15782), False, 'import os\n'), ((15958, 15990), 'os.path.join', 'os.path.join', (['save_path', '"""coord"""'], {}), "(save_path, 'coord')\n", (15970, 15990), False, 'import os\n'), ((16013, 16045), 'os.path.join', 'os.path.join', (['save_path', '"""coord"""'], {}), "(save_path, 'coord')\n", (16025, 16045), False, 'import os\n'), ((18037, 18095), 'os.path.join', 'os.path.join', (['save_path', "(room_name + '.' + remark + '.ply')"], {}), "(save_path, room_name + '.' + remark + '.ply')\n", (18049, 18095), False, 'import os\n'), ((18129, 18172), 'os.path.join', 'os.path.join', (['save_path', "(room_name + '.ply')"], {}), "(save_path, room_name + '.ply')\n", (18141, 18172), False, 'import os\n'), ((24886, 24916), 'numpy.isscalar', 'np.isscalar', (['quantization_size'], {}), '(quantization_size)\n', (24897, 24916), True, 'import numpy as np\n'), ((26815, 26847), 'numpy.where', 'np.where', (['(target == ignore_index)'], {}), '(target == ignore_index)\n', (26823, 26847), True, 'import numpy as np\n'), ((26884, 26910), 'numpy.where', 'np.where', (['(output == target)'], {}), '(output == target)\n', (26892, 26910), True, 'import numpy as np\n'), ((26974, 26990), 'numpy.arange', 'np.arange', (['(K + 1)'], {}), '(K + 1)\n', (26983, 26990), True, 'import numpy as np\n'), ((27037, 27053), 'numpy.arange', 'np.arange', (['(K + 1)'], {}), '(K + 1)\n', (27046, 27053), True, 'import numpy as np\n'), ((27100, 27116), 'numpy.arange', 'np.arange', (['(K + 1)'], {}), '(K + 1)\n', (27109, 27116), True, 'import numpy as np\n'), ((10089, 10153), 'torch.abs', 'torch.abs', (['(coords_vox_nei - coords_pcl_valid / config.voxel_size)'], {}), '(coords_vox_nei - coords_pcl_valid / config.voxel_size)\n', (10098, 10153), False, 'import torch\n'), ((12342, 12427), 'torch.exp', 'torch.exp', (['(-config.lamda_floor * (feats_vox_tilde[:, 3:] - feats_vox_tilde_floor))'], {}), '(-config.lamda_floor * (feats_vox_tilde[:, 3:] -\n feats_vox_tilde_floor))\n', (12351, 12427), False, 'import torch\n'), ((14013, 14077), 'torch.abs', 'torch.abs', (['(coords_vox_nei - coords_pcl_valid / config.voxel_size)'], {}), '(coords_vox_nei - coords_pcl_valid / config.voxel_size)\n', (14022, 14077), False, 'import torch\n'), ((15302, 15333), 'os.path.join', 'os.path.join', (['save_path', '"""pred"""'], {}), "(save_path, 'pred')\n", (15314, 15333), False, 'import os\n'), ((15360, 15391), 'os.path.join', 'os.path.join', (['save_path', '"""pred"""'], {}), "(save_path, 'pred')\n", (15372, 15391), False, 'import os\n'), ((15621, 15652), 'os.path.join', 'os.path.join', (['save_path', '"""prob"""'], {}), "(save_path, 'prob')\n", (15633, 15652), False, 'import os\n'), ((15679, 15710), 'os.path.join', 'os.path.join', (['save_path', '"""prob"""'], {}), "(save_path, 'prob')\n", (15691, 15710), False, 'import os\n'), ((16397, 16440), 'torch.floor', 'torch.floor', (['(coords_pcl / config.voxel_size)'], {}), '(coords_pcl / config.voxel_size)\n', (16408, 16440), False, 'import torch\n'), ((16461, 16489), 'torch.rand', 'torch.rand', (['coords_pcl.shape'], {}), '(coords_pcl.shape)\n', (16471, 16489), False, 'import torch\n'), ((16765, 16809), 'torch.floor', 'torch.floor', (['(coords_pcl_ / config.voxel_size)'], {}), '(coords_pcl_ / config.voxel_size)\n', (16776, 16809), False, 'import torch\n'), ((16811, 16854), 'torch.floor', 'torch.floor', (['(coords_pcl / config.voxel_size)'], {}), '(coords_pcl / config.voxel_size)\n', (16822, 16854), False, 'import torch\n'), ((24569, 24609), 'numpy.array', 'np.array', (['[i for i in quantization_size]'], {}), '([i for i in quantization_size])\n', (24577, 24609), True, 'import numpy as np\n'), ((24644, 24680), 'numpy.floor', 'np.floor', (['(coords / quantization_size)'], {}), '(coords / quantization_size)\n', (24652, 24680), True, 'import numpy as np\n'), ((24735, 24779), 'torch.Tensor', 'torch.Tensor', (['[i for i in quantization_size]'], {}), '([i for i in quantization_size])\n', (24747, 24779), False, 'import torch\n'), ((25081, 25117), 'numpy.floor', 'np.floor', (['(coords / quantization_size)'], {}), '(coords / quantization_size)\n', (25089, 25117), True, 'import numpy as np\n'), ((8206, 8239), 'torch.arange', 'torch.arange', (['coords_pcl.shape[0]'], {}), '(coords_pcl.shape[0])\n', (8218, 8239), False, 'import torch\n'), ((8288, 8331), 'torch.floor', 'torch.floor', (['(coords_pcl / config.voxel_size)'], {}), '(coords_pcl / config.voxel_size)\n', (8299, 8331), False, 'import torch\n'), ((9142, 9165), 'torch.cat', 'torch.cat', (['[idx1, idx2]'], {}), '([idx1, idx2])\n', (9151, 9165), False, 'import torch\n'), ((9975, 10003), 'torch.Tensor', 'torch.Tensor', (['[[dx, dy, dz]]'], {}), '([[dx, dy, dz]])\n', (9987, 10003), False, 'import torch\n'), ((13899, 13927), 'torch.Tensor', 'torch.Tensor', (['[[dx, dy, dz]]'], {}), '([[dx, dy, dz]])\n', (13911, 13927), False, 'import torch\n'), ((10255, 10299), 'torch.exp', 'torch.exp', (['(config.lamda_input * (dist - 0.5))'], {}), '(config.lamda_input * (dist - 0.5))\n', (10264, 10299), False, 'import torch\n'), ((12709, 12744), 'torch.zeros', 'torch.zeros', (['coords_vox.shape[0]', '(1)'], {}), '(coords_vox.shape[0], 1)\n', (12720, 12744), False, 'import torch\n'), ((14183, 14228), 'torch.exp', 'torch.exp', (['(config.lamda_output * (dist - 0.5))'], {}), '(config.lamda_output * (dist - 0.5))\n', (14192, 14228), False, 'import torch\n'), ((10521, 10564), 'torch.exp', 'torch.exp', (['(config.lamda_conv * (dist - 0.5))'], {}), '(config.lamda_conv * (dist - 0.5))\n', (10530, 10564), False, 'import torch\n'), ((8485, 8511), 'torch.Tensor', 'torch.Tensor', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (8497, 8511), False, 'import torch\n'), ((14501, 14535), 'torch.IntTensor', 'torch.IntTensor', (['[[0, dx, dy, dz]]'], {}), '([[0, dx, dy, dz]])\n', (14516, 14535), False, 'import torch\n'), ((8805, 8893), 'torch.abs', 'torch.abs', (['(coords_vox_new[valid_new1] * config.voxel_size - coords_pcl0[valid_new1])'], {}), '(coords_vox_new[valid_new1] * config.voxel_size - coords_pcl0[\n valid_new1])\n', (8814, 8893), False, 'import torch\n'), ((8964, 9072), 'torch.abs', 'torch.abs', (['(coords_vox_new[valid_new1] * config.voxel_size + config.voxel_size -\n coords_pcl0[valid_new1])'], {}), '(coords_vox_new[valid_new1] * config.voxel_size + config.\n voxel_size - coords_pcl0[valid_new1])\n', (8973, 9072), False, 'import torch\n'), ((8596, 8623), 'torch.stack', 'torch.stack', (['coords_vox_all'], {}), '(coords_vox_all)\n', (8607, 8623), False, 'import torch\n')] |
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
class TestOptionsDictionaryFeature(unittest.TestCase):
def test_simple(self):
import openmdao.api as om
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
prob = om.Problem()
prob.model.add_subsystem('double', VectorDoublingComp(size=3)) # 'size' is an option
prob.setup()
prob.set_val('double.x', [1., 2., 3.])
prob.run_model()
assert_near_equal(prob.get_val('double.y'), [2., 4., 6.])
def test_simple_fail(self):
import openmdao.api as om
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
prob = om.Problem()
prob.model.add_subsystem('double', VectorDoublingComp()) # 'size' not specified
try:
prob.setup()
except RuntimeError as err:
self.assertEqual(str(err), "VectorDoublingComp (double): Option 'size' is required but has not been set.")
def test_with_default(self):
import openmdao.api as om
from openmdao.test_suite.components.options_feature_lincomb import LinearCombinationComp
prob = om.Problem()
prob.model.add_subsystem('linear', LinearCombinationComp(a=2.)) # 'b' not specified
prob.setup()
prob.set_val('linear.x', 3)
prob.run_model()
self.assertEqual(prob.get_val('linear.y'), 7.)
def test_simple_array(self):
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.options_feature_array import ArrayMultiplyComp
prob = om.Problem()
prob.model.add_subsystem('a_comp', ArrayMultiplyComp(array=np.array([1, 2, 3])))
prob.setup()
prob.set_val('a_comp.x', 5.)
prob.run_model()
assert_near_equal(prob.get_val('a_comp.y'), [5., 10., 15.])
def test_simple_function(self):
import openmdao.api as om
from openmdao.test_suite.components.options_feature_function import UnitaryFunctionComp
def my_func(x):
return x*2
prob = om.Problem()
prob.model.add_subsystem('f_comp', UnitaryFunctionComp(func=my_func))
prob.setup()
prob.set_val('f_comp.x', 5.)
prob.run_model()
assert_near_equal(prob.get_val('f_comp.y'), 10.)
def test_simple_values(self):
import numpy as np
import openmdao.api as om
class VectorDoublingComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('size', values=[2, 4, 6, 8])
def setup(self):
size = self.options['size']
self.add_input('x', shape=size)
self.add_output('y', shape=size)
self.declare_partials('y', 'x', val=2.,
rows=np.arange(size),
cols=np.arange(size))
def compute(self, inputs, outputs):
outputs['y'] = 2 * inputs['x']
prob = om.Problem()
prob.model.add_subsystem('double', VectorDoublingComp(size=4))
prob.setup()
prob.set_val('double.x', [1., 2., 3., 4.])
prob.run_model()
assert_near_equal(prob.get_val('double.y'), [2., 4., 6., 8.])
def test_simple_bounds_valid(self):
import numpy as np
import openmdao.api as om
def check_even(name, value):
if value % 2 != 0:
raise ValueError(f"Option '{name}' with value {value} must be an even number.")
class VectorDoublingComp(om.ExplicitComponent):
def initialize(self):
self.options.declare('size', types=int, lower=2, upper=8, check_valid=check_even)
def setup(self):
size = self.options['size']
self.add_input('x', shape=size)
self.add_output('y', shape=size)
self.declare_partials('y', 'x', val=2.,
rows=np.arange(size),
cols=np.arange(size))
def compute(self, inputs, outputs):
outputs['y'] = 2 * inputs['x']
try:
comp = VectorDoublingComp(size=5)
except Exception as err:
self.assertEqual(str(err), "Option 'size' with value 5 must be an even number.")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"openmdao.api.Problem",
"openmdao.test_suite.components.options_feature_function.UnitaryFunctionComp",
"numpy.array",
"numpy.arange",
"openmdao.test_suite.components.options_feature_vector.VectorDoublingComp",
"openmdao.test_suite.components.options_feature_lincomb.LinearCombinationComp... | [((4529, 4544), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4542, 4544), False, 'import unittest\n'), ((349, 361), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (359, 361), True, 'import openmdao.api as om\n'), ((794, 806), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (804, 806), True, 'import openmdao.api as om\n'), ((1271, 1283), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (1281, 1283), True, 'import openmdao.api as om\n'), ((1720, 1732), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (1730, 1732), True, 'import openmdao.api as om\n'), ((2207, 2219), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (2217, 2219), True, 'import openmdao.api as om\n'), ((3155, 3167), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (3165, 3167), True, 'import openmdao.api as om\n'), ((405, 431), 'openmdao.test_suite.components.options_feature_vector.VectorDoublingComp', 'VectorDoublingComp', ([], {'size': '(3)'}), '(size=3)\n', (423, 431), False, 'from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp\n'), ((850, 870), 'openmdao.test_suite.components.options_feature_vector.VectorDoublingComp', 'VectorDoublingComp', ([], {}), '()\n', (868, 870), False, 'from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp\n'), ((1327, 1355), 'openmdao.test_suite.components.options_feature_lincomb.LinearCombinationComp', 'LinearCombinationComp', ([], {'a': '(2.0)'}), '(a=2.0)\n', (1348, 1355), False, 'from openmdao.test_suite.components.options_feature_lincomb import LinearCombinationComp\n'), ((2263, 2296), 'openmdao.test_suite.components.options_feature_function.UnitaryFunctionComp', 'UnitaryFunctionComp', ([], {'func': 'my_func'}), '(func=my_func)\n', (2282, 2296), False, 'from openmdao.test_suite.components.options_feature_function import UnitaryFunctionComp\n'), ((3211, 3237), 'openmdao.test_suite.components.options_feature_vector.VectorDoublingComp', 'VectorDoublingComp', ([], {'size': '(4)'}), '(size=4)\n', (3229, 3237), False, 'from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp\n'), ((4343, 4369), 'openmdao.test_suite.components.options_feature_vector.VectorDoublingComp', 'VectorDoublingComp', ([], {'size': '(5)'}), '(size=5)\n', (4361, 4369), False, 'from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp\n'), ((1800, 1819), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1808, 1819), True, 'import numpy as np\n'), ((2966, 2981), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2975, 2981), True, 'import numpy as np\n'), ((3026, 3041), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (3035, 3041), True, 'import numpy as np\n'), ((4137, 4152), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (4146, 4152), True, 'import numpy as np\n'), ((4197, 4212), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (4206, 4212), True, 'import numpy as np\n')] |
from vehicles.idmmobil_merge_vehicle import IDMMOBILVehicleMerge
import numpy as np
import pickle
from importlib import reload
import tensorflow as tf
import json
class NeuralIDMVehicle(IDMMOBILVehicleMerge):
def __init__(self):
super().__init__(id=None, lane_id=None, glob_x=None, speed=None, aggressiveness=None)
self.time_lapse_since_last_param_update = 0
self.samples_n = 1
self.history_len = 30 # steps
self.state_dim = 13
self.obs_history = np.zeros([self.samples_n, self.history_len, self.state_dim])
def load_model(self, config, exp_path):
from models.core import neural_idm
reload(neural_idm)
from models.core.neural_idm import NeurIDMModel
self.model = NeurIDMModel(config)
# self.model.forward_sim.attention_temp = 3
self.model.load_weights(exp_path).expect_partial()
def initialize_agent(self, model_name, epoch_count, data_id):
exp_dir = './src/models/experiments/'+model_name
exp_path = exp_dir+'/model_epo'+epoch_count
dataset_name = 'sim_data_'+data_id
data_files_dir = './src/datasets/'+dataset_name+'/'
with open(data_files_dir+'env_scaler.pickle', 'rb') as handle:
self.env_scaler = pickle.load(handle)
with open(data_files_dir+'m_scaler.pickle', 'rb') as handle:
self.m_scaler = pickle.load(handle)
with open(data_files_dir+'dummy_value_set.pickle', 'rb') as handle:
self.dummy_value_set = pickle.load(handle)
with open(exp_dir+'/'+'config.json', 'rb') as handle:
config = json.load(handle)
self.load_model(config, exp_path)
print(json.dumps(config, ensure_ascii=False, indent=4))
self.create_state_indxs()
def names_to_index(self, col_names):
if type(col_names) == list:
return [self.indxs[item] for item in col_names]
else:
return self.indxs[col_names]
def create_state_indxs(self):
self.indxs = {}
feature_names = [
'e_veh_action_p', 'f_veh_action_p',
'e_veh_speed', 'f_veh_speed',
'el_delta_v', 'el_delta_x',
'em_delta_v', 'em_delta_x',
'm_veh_action_p', 'm_veh_speed','em_delta_y',
'delta_x_to_merge','m_veh_exists']
index = 0
for item_name in feature_names:
self.indxs[item_name] = index
index += 1
col_names = ['e_veh_action_p', 'f_veh_action_p', 'e_veh_speed', 'f_veh_speed',
'el_delta_v', 'el_delta_x', 'em_delta_v', 'em_delta_x']
self.env_s_indxs = self.names_to_index(col_names)
col_names = ['m_veh_action_p', 'm_veh_speed', 'em_delta_y', 'delta_x_to_merge']
self.merger_indxs = self.names_to_index(col_names)
def update_obs_history(self, o_t):
self.obs_history[:, :-1, :] = self.obs_history[:, 1:, :]
self.obs_history[:, -1, :] = o_t[0, 0, :]
def neur_observe(self):
m_veh = self.neighbours['m']
f_veh = self.neighbours['f']
if not m_veh:
m_veh_exists = 0
m_veh_action = self.dummy_value_set['m_veh_action_p']
m_veh_speed = self.dummy_value_set['m_veh_speed']
em_delta_x = self.dummy_value_set['em_delta_x']
em_delta_v = self.dummy_value_set['em_delta_v']
em_delta_y = self.dummy_value_set['em_delta_y']
delta_x_to_merge = self.dummy_value_set['delta_x_to_merge']
else:
m_veh_exists = 1
m_veh_action = m_veh.act_long_p
m_veh_speed = m_veh.speed
em_delta_x = m_veh.glob_x-self.glob_x
em_delta_y = abs(m_veh.glob_y-self.glob_y)
em_delta_v = self.speed-m_veh_speed
delta_x_to_merge = m_veh.ramp_exit_start-m_veh.glob_x
if not f_veh:
f_veh_exists = 0
f_veh_action = self.dummy_value_set['f_veh_action_p']
f_veh_speed = self.dummy_value_set['f_veh_speed']
el_delta_x = self.dummy_value_set['el_delta_x']
el_delta_v = self.dummy_value_set['el_delta_v']
else:
f_veh_exists = 1
f_veh_action = f_veh.act_long_p
f_veh_speed = f_veh.speed
el_delta_x = f_veh.glob_x-self.glob_x
el_delta_v = self.speed-f_veh_speed
obs_t0 = [self.act_long_p, f_veh_action, self.speed, f_veh_speed]
obs_t0.extend([el_delta_v,
el_delta_x])
obs_t0.extend([em_delta_v,
em_delta_x,
m_veh_action,
m_veh_speed,
em_delta_y,
delta_x_to_merge])
obs_t0.append(m_veh_exists)
self.m_veh_exists = m_veh_exists
return [np.array([[obs_t0]]), [[[float(m_veh_exists)]]]]
def driver_params_update(self, idm_params):
idm_params = idm_params.numpy()[0, :]
self.driver_params['desired_v'] = idm_params[0]
self.driver_params['desired_tgap'] = idm_params[1]
self.driver_params['min_jamx'] = idm_params[2]
self.driver_params['max_act'] = idm_params[3]
self.driver_params['min_act'] = idm_params[4]
def belief_update(self, proj_latent):
self.proj_latent = tf.reshape(proj_latent, [self.samples_n, 1, 128])
def scale_state(self, state, state_type):
if state_type == 'full':
state[:, :, self.env_s_indxs] = \
(state[:, :, self.env_s_indxs]-self.env_scaler.mean_)/self.env_scaler.var_**0.5
# merger context
state[:, :, self.merger_indxs] = \
(state[:, :, self.merger_indxs]-self.m_scaler.mean_)/self.m_scaler.var_**0.5
state[:,:,]
elif state_type == 'env_state':
state = \
(state[:, :, self.env_s_indxs]-self.env_scaler.mean_)/self.env_scaler.var_**0.5
elif state_type == 'merger_c':
state = \
(state[:, :, self.merger_indxs]-self.m_scaler.mean_)/self.m_scaler.var_**0.5
return np.float32(state)
def get_neur_att(self, att_context):
f_att_score, m_att_score = self.model.forward_sim.get_att(att_context)
f_att_score, m_att_score = f_att_score.numpy()[0][0][0], m_att_score.numpy()[0][0][0]
f_att_score = (1 - self.m_veh_exists) + f_att_score*self.m_veh_exists
m_att_score = m_att_score*self.m_veh_exists
return f_att_score, m_att_score
def action_clip(self, act_long):
return min(max([-6, act_long]), 6)
def act(self, obs):
obs_t0, m_veh_exists = obs
# if self.time_lapse_since_last_param_update % 20 == 0:
if self.time_lapse_since_last_param_update == 0:
obs_history = self.scale_state(self.obs_history.copy(), 'full')
enc_h = self.model.h_seq_encoder(obs_history)
latent_dis_param = self.model.belief_net(enc_h , dis_type='prior')
z_idm, z_att = self.model.belief_net.sample_z(latent_dis_param)
proj_idm = self.model.belief_net.z_proj_idm(z_idm)
proj_att = self.model.belief_net.z_proj_att(z_att)
self.belief_update(proj_att)
self.enc_h = tf.reshape(enc_h, [self.samples_n, 1, 128])
idm_params = self.model.idm_layer(proj_idm)
self.driver_params_update(idm_params)
# if self.id == 'neur_4':
# print('self.obs_history ', self.obs_history)
self.time_lapse_since_last_param_update += 1
env_state = self.scale_state(obs_t0, 'env_state')
merger_c = self.scale_state(obs_t0, 'merger_c')
att_context = tf.concat([self.proj_latent , self.enc_h, env_state, merger_c, \
m_veh_exists], axis=-1)
f_att_score, m_att_score = self.get_neur_att(att_context)
ef_act = self.action_clip(self.idm_action(self, self.neighbours['f']))
if self.neighbours['m'] and self.neighbours['m'].glob_x > self.glob_x:
em_act = self.idm_action(self, self.neighbours['m'])
# if self.id == 'neur_2':
# print('em_act ', em_act)
if em_act < -20:
# not a feasible action
em_act = 0
m_att_score = 0
else:
em_act = self.action_clip(em_act)
else:
# no merger to attend to
em_act = 0
m_att_score = 0
self.att = m_att_score
act_long = f_att_score*ef_act + m_att_score*em_act
return act_long
| [
"json.load",
"numpy.float32",
"tensorflow.reshape",
"numpy.zeros",
"tensorflow.concat",
"json.dumps",
"importlib.reload",
"pickle.load",
"numpy.array",
"models.core.neural_idm.NeurIDMModel"
] | [((514, 574), 'numpy.zeros', 'np.zeros', (['[self.samples_n, self.history_len, self.state_dim]'], {}), '([self.samples_n, self.history_len, self.state_dim])\n', (522, 574), True, 'import numpy as np\n'), ((675, 693), 'importlib.reload', 'reload', (['neural_idm'], {}), '(neural_idm)\n', (681, 693), False, 'from importlib import reload\n'), ((774, 794), 'models.core.neural_idm.NeurIDMModel', 'NeurIDMModel', (['config'], {}), '(config)\n', (786, 794), False, 'from models.core.neural_idm import NeurIDMModel\n'), ((5581, 5630), 'tensorflow.reshape', 'tf.reshape', (['proj_latent', '[self.samples_n, 1, 128]'], {}), '(proj_latent, [self.samples_n, 1, 128])\n', (5591, 5630), True, 'import tensorflow as tf\n'), ((6395, 6412), 'numpy.float32', 'np.float32', (['state'], {}), '(state)\n', (6405, 6412), True, 'import numpy as np\n'), ((8020, 8109), 'tensorflow.concat', 'tf.concat', (['[self.proj_latent, self.enc_h, env_state, merger_c, m_veh_exists]'], {'axis': '(-1)'}), '([self.proj_latent, self.enc_h, env_state, merger_c, m_veh_exists],\n axis=-1)\n', (8029, 8109), True, 'import tensorflow as tf\n'), ((1300, 1319), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1311, 1319), False, 'import pickle\n'), ((1421, 1440), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1432, 1440), False, 'import pickle\n'), ((1556, 1575), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1567, 1575), False, 'import pickle\n'), ((1663, 1680), 'json.load', 'json.load', (['handle'], {}), '(handle)\n', (1672, 1680), False, 'import json\n'), ((1739, 1787), 'json.dumps', 'json.dumps', (['config'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(config, ensure_ascii=False, indent=4)\n', (1749, 1787), False, 'import json\n'), ((5078, 5098), 'numpy.array', 'np.array', (['[[obs_t0]]'], {}), '([[obs_t0]])\n', (5086, 5098), True, 'import numpy as np\n'), ((7564, 7607), 'tensorflow.reshape', 'tf.reshape', (['enc_h', '[self.samples_n, 1, 128]'], {}), '(enc_h, [self.samples_n, 1, 128])\n', (7574, 7607), True, 'import tensorflow as tf\n')] |
import pybullet as p
import os
import numpy as np
import Helper
def load():
# work in the following section to load your robot
robotName = 'HextechCatcher.urdf'
# robotPath = os.path.join('project', 'proj2_baseball', 'rsc', robotName)
robotPath = Helper.findURDF(robotName)
robotInitPos = [-0.6, 0.6, 1.1+0.5]
robotInitOrn = [1.0, 0.0, 0.0, 0.0]
robotId = p.loadURDF(robotPath, robotInitPos, robotInitOrn)
return robotId
def generateTraj(robotId, ballPos, targetPos):
# work in this section, generate your tarjectory as a second order list
# e.g. traj = [[j_1(t1), j_2(t1), j_3(t1)], [j_1(t2), j_2(t2), j_3(t2)], [j_1(t3), j_2(t3), j_3(t3)], ...]
# robotId is the Unique body index for your robot
# ballPos is a list for the baseball position, like [x, y, z]
# targetPos is a list for the target position, like [x, y, z]
# do not use the inverse kinematics function of pybullet!!!!!!
# The following code is a example for a very simple robot
traj = []
numJoints = p.getNumJoints(robotId)
# stage 1: catch the ball
holderTopState = p.getLinkState(robotId, 3)[0]
dx = ballPos[0] - holderTopState[0]
dy = - (ballPos[1] - holderTopState[1])
dz_1 = - (0.11 + 1.1 + 0.1 - holderTopState[2])
dz_2 = 0.1
targetJointPos1 = [dx, dy, dz_1, 0, 0, 0]
targetJointPos2 = [dx, dy, dz_1+dz_2, 0, 0, 0]
nStep = 240
traj.extend([[targetJointPos1[joint]*step/nStep for joint in range(numJoints)] for step in range(nStep)])
nStep = 120
traj.extend([[targetJointPos1[joint] + (targetJointPos2[joint] - targetJointPos1[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
# stage 2: hold
targetJointPos3 = [dx, dy, dz_1+dz_2, 0, 0.012, 0.012]
nStep = 120
traj.extend([[targetJointPos2[joint] + (targetJointPos3[joint] - targetJointPos2[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
# stage 3: move up
targetJointPos4 = [dx, dy, 0, 0, 0.012, 0.012]
nStep = 240
traj.extend([[targetJointPos3[joint] + (targetJointPos4[joint] - targetJointPos3[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
# stage 4: rotate
vec = [targetPos[0] - ballPos[0], targetPos[1] - ballPos[1]]
rot = np.arctan2(vec[1], vec[0])
targetJointPos5 = [dx, dy, 0, -rot, 0.02, 0.02]
nStep = 240
traj.extend([[targetJointPos4[joint] + (targetJointPos5[joint] - targetJointPos4[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
# stage 5: horizontal speed up
accLength = 0.2
horizontalLength = np.linalg.norm(vec)
height = 0.11+0.45
v = horizontalLength/((height/5)**0.5)
nStep = int(accLength/v*240*0.9)
dx_1 = accLength*np.cos(rot)
dy_1 = -accLength*np.sin(rot)
targetJointPos6 = [dx+dx_1*0.7, dy+dy_1*0.7, 0, -rot, 0.015, 0.015]
traj.extend([[targetJointPos5[joint] + (targetJointPos6[joint] - targetJointPos5[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
# stage 6: throw
targetJointPos7 = [dx+dx_1*0.9, dy+dy_1*0.9, 0, -rot, 0.0, 0.0]
nStep = int(accLength/v*240*0.2)
if nStep < 1:
nStep = 1
traj.extend([[targetJointPos6[joint] + (targetJointPos7[joint] - targetJointPos6[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
targetJointPos8 = [dx+dx_1, dy+dy_1, 0, -rot, 0.0, 0.0]
nStep = int(accLength/v*240*0.1)
if nStep < 1:
nStep = 1
traj.extend([[targetJointPos7[joint] + (targetJointPos8[joint] - targetJointPos7[joint])*step/nStep for joint in range(numJoints)] for step in range(nStep)])
return traj
def addDebugItems(robotId):
# add any debug Items you like
p.addUserDebugLine([0, 0, 0], [1, 0, 0], lineColorRGB=[0.5, 0.5, 0.5], parentObjectUniqueId=robotId, parentLinkIndex=3)
# pass | [
"pybullet.getLinkState",
"numpy.arctan2",
"pybullet.addUserDebugLine",
"Helper.findURDF",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"pybullet.getNumJoints",
"pybullet.loadURDF"
] | [((265, 291), 'Helper.findURDF', 'Helper.findURDF', (['robotName'], {}), '(robotName)\n', (280, 291), False, 'import Helper\n'), ((386, 435), 'pybullet.loadURDF', 'p.loadURDF', (['robotPath', 'robotInitPos', 'robotInitOrn'], {}), '(robotPath, robotInitPos, robotInitOrn)\n', (396, 435), True, 'import pybullet as p\n'), ((1038, 1061), 'pybullet.getNumJoints', 'p.getNumJoints', (['robotId'], {}), '(robotId)\n', (1052, 1061), True, 'import pybullet as p\n'), ((2309, 2335), 'numpy.arctan2', 'np.arctan2', (['vec[1]', 'vec[0]'], {}), '(vec[1], vec[0])\n', (2319, 2335), True, 'import numpy as np\n'), ((2649, 2668), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (2663, 2668), True, 'import numpy as np\n'), ((3781, 3904), 'pybullet.addUserDebugLine', 'p.addUserDebugLine', (['[0, 0, 0]', '[1, 0, 0]'], {'lineColorRGB': '[0.5, 0.5, 0.5]', 'parentObjectUniqueId': 'robotId', 'parentLinkIndex': '(3)'}), '([0, 0, 0], [1, 0, 0], lineColorRGB=[0.5, 0.5, 0.5],\n parentObjectUniqueId=robotId, parentLinkIndex=3)\n', (3799, 3904), True, 'import pybullet as p\n'), ((1114, 1140), 'pybullet.getLinkState', 'p.getLinkState', (['robotId', '(3)'], {}), '(robotId, 3)\n', (1128, 1140), True, 'import pybullet as p\n'), ((2793, 2804), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2799, 2804), True, 'import numpy as np\n'), ((2827, 2838), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2833, 2838), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
from numpy.core.function_base import geomspace
from numpy.core.numeric import NaN
import roslib
import tf2_ros
roslib.load_manifest("multipath_sim")
import rospy
from multipath_sim.msg import MultipathOffset
from gazebo_msgs.srv import GetModelState
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Odometry
def cb_fn(data, cb_args):
get_model_pose = cb_args[0]
br = cb_args[1]
odom_pub = cb_args[2]
true_pose = get_model_pose("laser_0", "map")
true_pos = np.array([true_pose.pose.position.x, true_pose.pose.position.y, true_pose.pose.position.z])
affected_pos = true_pos + np.array(data.offset)
current_time = rospy.Time.now()
t = TransformStamped()
t.header.stamp = current_time
t.header.frame_id = "map"
t.child_frame_id = "affected_pos"
t.transform.translation.x = affected_pos[0]
t.transform.translation.y = affected_pos[1]
t.transform.translation.z = affected_pos[2]
t.transform.rotation.x = 0
t.transform.rotation.y = 0
t.transform.rotation.z = 0
t.transform.rotation.w = 1
br.sendTransform(t)
t = TransformStamped()
t.header.stamp = current_time
t.header.frame_id = "map"
t.child_frame_id = "laser_0"
t.transform.translation.x = true_pose.pose.position.x
t.transform.translation.y = true_pose.pose.position.y
t.transform.translation.z = true_pose.pose.position.z
t.transform.rotation.x = true_pose.pose.orientation.x
t.transform.rotation.y = true_pose.pose.orientation.y
t.transform.rotation.z = true_pose.pose.orientation.z
t.transform.rotation.w = true_pose.pose.orientation.w
br.sendTransform(t)
aff_odom = Odometry()
aff_odom.header.stamp = current_time
aff_odom.header.frame_id = "map"
aff_odom.child_frame_id = "hb1"
aff_odom.pose.pose.position.x = affected_pos[0]
aff_odom.pose.pose.position.y = affected_pos[1]
aff_odom.pose.pose.position.z = affected_pos[2]
aff_odom.pose.pose.orientation = true_pose.pose.orientation
aff_odom.twist.twist.linear.x = data.offset[0] # Stuffing Offset and true position in the same message
aff_odom.twist.twist.linear.y = data.offset[1]
aff_odom.twist.twist.linear.z = data.offset[2]
aff_odom.twist.twist.angular.x = true_pose.pose.position.x
aff_odom.twist.twist.angular.y = true_pose.pose.position.y
aff_odom.twist.twist.angular.z = true_pose.pose.position.z
for i in range(36):
aff_odom.pose.covariance[i] = NaN
aff_odom.twist.covariance[i] = NaN
odom_pub.publish(aff_odom)
# print("true", true_pos)
# print("offset", data.offset)
# print("affected", affected_pos)
# print("+++++++++++++++++++")
pass
def main():
rospy.init_node('multipath_error_vis')
br = tf2_ros.TransformBroadcaster()
aff_odom_pub = rospy.Publisher("/multipath/hb1/aff_odom", Odometry)
get_model_pose = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
offset_sub = rospy.Subscriber("/multipath/offset", MultipathOffset, callback=cb_fn, callback_args=[get_model_pose, br, aff_odom_pub])
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down...")
if __name__=="__main__":
main()
| [
"tf2_ros.TransformBroadcaster",
"rospy.Subscriber",
"nav_msgs.msg.Odometry",
"rospy.Time.now",
"geometry_msgs.msg.TransformStamped",
"rospy.Publisher",
"rospy.ServiceProxy",
"numpy.array",
"rospy.init_node",
"rospy.spin",
"roslib.load_manifest"
] | [((153, 190), 'roslib.load_manifest', 'roslib.load_manifest', (['"""multipath_sim"""'], {}), "('multipath_sim')\n", (173, 190), False, 'import roslib\n'), ((586, 682), 'numpy.array', 'np.array', (['[true_pose.pose.position.x, true_pose.pose.position.y, true_pose.pose.\n position.z]'], {}), '([true_pose.pose.position.x, true_pose.pose.position.y, true_pose.\n pose.position.z])\n', (594, 682), True, 'import numpy as np\n'), ((749, 765), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (763, 765), False, 'import rospy\n'), ((774, 792), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (790, 792), False, 'from geometry_msgs.msg import TransformStamped\n'), ((1196, 1214), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (1212, 1214), False, 'from geometry_msgs.msg import TransformStamped\n'), ((1759, 1769), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (1767, 1769), False, 'from nav_msgs.msg import Odometry\n'), ((2809, 2847), 'rospy.init_node', 'rospy.init_node', (['"""multipath_error_vis"""'], {}), "('multipath_error_vis')\n", (2824, 2847), False, 'import rospy\n'), ((2857, 2887), 'tf2_ros.TransformBroadcaster', 'tf2_ros.TransformBroadcaster', ([], {}), '()\n', (2885, 2887), False, 'import tf2_ros\n'), ((2907, 2959), 'rospy.Publisher', 'rospy.Publisher', (['"""/multipath/hb1/aff_odom"""', 'Odometry'], {}), "('/multipath/hb1/aff_odom', Odometry)\n", (2922, 2959), False, 'import rospy\n'), ((2981, 3041), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/get_model_state"""', 'GetModelState'], {}), "('/gazebo/get_model_state', GetModelState)\n", (2999, 3041), False, 'import rospy\n'), ((3059, 3183), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/multipath/offset"""', 'MultipathOffset'], {'callback': 'cb_fn', 'callback_args': '[get_model_pose, br, aff_odom_pub]'}), "('/multipath/offset', MultipathOffset, callback=cb_fn,\n callback_args=[get_model_pose, br, aff_odom_pub])\n", (3075, 3183), False, 'import rospy\n'), ((708, 729), 'numpy.array', 'np.array', (['data.offset'], {}), '(data.offset)\n', (716, 729), True, 'import numpy as np\n'), ((3203, 3215), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3213, 3215), False, 'import rospy\n')] |
import os
import sys
import warnings
from random import sample
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
sys.path.append(os.path.pardir)
from cgcnn.data import CIFData
from cgcnn.data import collate_pool
from cgcnn.model import CrystalGraphConvNet, SimpleNN
from train import train
from validate import validate
from module.arguments import arguments
from module.function import *
from module.normalizer import Normalizer
args = arguments()
if args.task == 'regression':
best_mae_error = 1e10
else:
best_mae_error = 0.
def tc_trans2():
global args, best_mae_error
# load data
dataset = CIFData(*args.data_options)
collate_fn = collate_pool
# obtain target value normalizer
if args.task == 'classification':
normalizer = Normalizer(torch.zeros(2))
normalizer.load_state_dict({'mean': 0., 'std': 1.})
else:
if len(dataset) < 500:
warnings.warn('Dataset has less than 500 data points. '
'Lower accuracy is expected. ')
sample_data_list = [dataset[i] for i in range(len(dataset))]
else:
sample_data_list = [dataset[i] for i in
sample(range(len(dataset)), 500)]
_, sample_target, _ = collate_pool(sample_data_list)
normalizer = Normalizer(sample_target)
# build model
structures, _, _ = dataset[0]
orig_atom_fea_len = structures[0].shape[-1]
nbr_fea_len = structures[1].shape[-1]
model_a = CrystalGraphConvNet(orig_atom_fea_len, nbr_fea_len,
atom_fea_len=args.atom_fea_len,
n_conv=args.n_conv,
h_fea_len=args.h_fea_len,
n_h=args.n_h,
classification=True if args.task ==
'classification' else False)
model_b = CrystalGraphConvNet(orig_atom_fea_len, nbr_fea_len,
atom_fea_len=args.atom_fea_len,
n_conv=args.n_conv,
h_fea_len=args.h_fea_len,
n_h=args.n_h,
classification=True if args.task ==
'classification' else False)
model = SimpleNN(in_feature=256, out_feature=1)
# pretrained model path
model_a_path='../pre-trained/research-model/bulk_moduli-model_best.pth.tar'
model_b_path='../pre-trained/research-model/sps-model_best.pth.tar'
# load latest model state
ckpt_a = torch.load(model_a_path)
ckpt_b = torch.load(model_b_path)
# load model
model_a.load_state_dict(ckpt_a['state_dict'])
model_b.load_state_dict(ckpt_b['state_dict'])
def get_activation_a(name, activation_a):
def hook(model, input, output):
activation_a[name] = output.detach()
return hook
def get_activation_b(name, activation_b):
def hook(model, input, output):
activation_b[name] = output.detach()
return hook
if args.cuda:
model_a.cuda()
model_b.cuda()
model.cuda()
activation_a = {}
activation_b = {}
# hook the activation function
model_a.conv_to_fc.register_forward_hook(get_activation_a('conv_to_fc', activation_a))
model_b.conv_to_fc.register_forward_hook(get_activation_b('conv_to_fc', activation_b))
# define loss func and optimizer
if args.task == 'classification':
criterion = nn.NLLLoss()
else:
criterion = nn.MSELoss()
if args.optim == 'SGD':
optimizer = optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optim == 'Adam':
optimizer = optim.Adam(model.parameters(), args.lr,
weight_decay=args.weight_decay)
else:
raise NameError('Only SGD or Adam is allowed as --optim')
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_mae_error = checkpoint['best_mae_error']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
normalizer.load_state_dict(checkpoint['normalizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
scheduler = MultiStepLR(optimizer, milestones=args.lr_milestones,
gamma=0.1)
X = torch.Tensor()
T = torch.Tensor()
for i in range(5):
total_size = len(dataset)
indices = list(range(total_size))
batch_size = args.batch_size
num_workers=args.workers
pin_memory=args.cuda
if i == 0:
train_sampler = SubsetRandomSampler(indices[:61])
test_sampler = SubsetRandomSampler(indices[-16:])
if i == 1:
x = indices[:45]
y = x.extend(indices[-16:])
train_samplre = SubsetRandomSampler(y)
test_sampler = SubsetRandomSampler(indices[45:-16])
if i == 2:
x = indices[:29]
y = x.extend(indices[-32:])
train_samplre = SubsetRandomSampler(y)
test_sampler = SubsetRandomSampler(indices[29:-32])
if i == 3:
x = indices[:13]
y = x.extend(indices[-48:])
train_samplre = SubsetRandomSampler(y)
test_sampler = SubsetRandomSampler(indices[13:-48])
if i == 4:
y = indices[-64:]
train_samplre = SubsetRandomSampler(y)
test_sampler = SubsetRandomSampler(indices[:-64])
train_loader = DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
collate_fn=collate_fn, pin_memory=pin_memory)
test_loader = DataLoader(dataset, batch_size=batch_size,
sampler=test_sampler,
num_workers=num_workers,
collate_fn=collate_fn, pin_memory=pin_memory)
print(test_sampler)
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
train(args, train_loader, model_a, model_b, model, activation_a, activation_b, criterion, optimizer, epoch, normalizer)
# evaluate on validation set
mae_error = validate(args, train_loader, model_a, model_b, model, activation_a, activation_b, criterion, normalizer)
if mae_error != mae_error:
print('Exit due to NaN')
sys.exit(1)
scheduler.step()
# remember the best mae_eror and save checkpoint
if args.task == 'regression':
is_best = mae_error < best_mae_error
best_mae_error = min(mae_error, best_mae_error)
else:
is_best = mae_error > best_mae_error
best_mae_error = max(mae_error, best_mae_error)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_mae_error': best_mae_error,
'optimizer': optimizer.state_dict(),
'normalizer': normalizer.state_dict(),
'args': vars(args)
}, is_best, prop=args.property)
# test best model
print('---------Evaluate Model on Test Set---------------')
best_checkpoint = torch.load('../result/'+ args.property +'-model_best.pth.tar')
model.load_state_dict(best_checkpoint['state_dict'])
x, t = validate(args, test_loader, model_a, model_b, model, activation_a, activation_b, criterion, normalizer, test=True, tc=True)
X = torch.cat((X, x), dim=0)
T = torch.cat((T, t), dim=0)
x, t = X.numpy(), T.numpy()
n_max = max(np.max(x), np.max(t))
n_min = min(np.min(x), np.min(t))
a = np.linspace(n_min - abs(n_max), n_max + abs(n_max))
b = a
plt.rcParams["font.family"] = "Times New Roman"
plt.plot(a, b, color = 'blue')
plt.scatter(t, x, marker = ".", color = 'red', edgecolors = 'black')
plt.xlim(n_min - abs(n_min) , n_max + abs(n_min))
plt.ylim(n_min - abs(n_min) , n_max + abs(n_min))
plt.title("Thermal Conductivity Prediction by CGCNN with Combined Model Transfer Learning")
plt.xlabel("observation")
plt.ylabel("prediction")
plt.show()
if __name__ == '__main__':
tc_trans2()
| [
"matplotlib.pyplot.title",
"torch.cat",
"torch.nn.NLLLoss",
"validate.validate",
"os.path.isfile",
"module.normalizer.Normalizer",
"sys.path.append",
"torch.nn.MSELoss",
"cgcnn.data.collate_pool",
"torch.utils.data.DataLoader",
"torch.load",
"numpy.max",
"torch.Tensor",
"torch.zeros",
"m... | [((390, 421), 'sys.path.append', 'sys.path.append', (['os.path.pardir'], {}), '(os.path.pardir)\n', (405, 421), False, 'import sys\n'), ((717, 728), 'module.arguments.arguments', 'arguments', ([], {}), '()\n', (726, 728), False, 'from module.arguments import arguments\n'), ((898, 925), 'cgcnn.data.CIFData', 'CIFData', (['*args.data_options'], {}), '(*args.data_options)\n', (905, 925), False, 'from cgcnn.data import CIFData\n'), ((1777, 1995), 'cgcnn.model.CrystalGraphConvNet', 'CrystalGraphConvNet', (['orig_atom_fea_len', 'nbr_fea_len'], {'atom_fea_len': 'args.atom_fea_len', 'n_conv': 'args.n_conv', 'h_fea_len': 'args.h_fea_len', 'n_h': 'args.n_h', 'classification': "(True if args.task == 'classification' else False)"}), "(orig_atom_fea_len, nbr_fea_len, atom_fea_len=args.\n atom_fea_len, n_conv=args.n_conv, h_fea_len=args.h_fea_len, n_h=args.\n n_h, classification=True if args.task == 'classification' else False)\n", (1796, 1995), False, 'from cgcnn.model import CrystalGraphConvNet, SimpleNN\n'), ((2227, 2445), 'cgcnn.model.CrystalGraphConvNet', 'CrystalGraphConvNet', (['orig_atom_fea_len', 'nbr_fea_len'], {'atom_fea_len': 'args.atom_fea_len', 'n_conv': 'args.n_conv', 'h_fea_len': 'args.h_fea_len', 'n_h': 'args.n_h', 'classification': "(True if args.task == 'classification' else False)"}), "(orig_atom_fea_len, nbr_fea_len, atom_fea_len=args.\n atom_fea_len, n_conv=args.n_conv, h_fea_len=args.h_fea_len, n_h=args.\n n_h, classification=True if args.task == 'classification' else False)\n", (2246, 2445), False, 'from cgcnn.model import CrystalGraphConvNet, SimpleNN\n'), ((2675, 2714), 'cgcnn.model.SimpleNN', 'SimpleNN', ([], {'in_feature': '(256)', 'out_feature': '(1)'}), '(in_feature=256, out_feature=1)\n', (2683, 2714), False, 'from cgcnn.model import CrystalGraphConvNet, SimpleNN\n'), ((2940, 2964), 'torch.load', 'torch.load', (['model_a_path'], {}), '(model_a_path)\n', (2950, 2964), False, 'import torch\n'), ((2978, 3002), 'torch.load', 'torch.load', (['model_b_path'], {}), '(model_b_path)\n', (2988, 3002), False, 'import torch\n'), ((5104, 5168), 'torch.optim.lr_scheduler.MultiStepLR', 'MultiStepLR', (['optimizer'], {'milestones': 'args.lr_milestones', 'gamma': '(0.1)'}), '(optimizer, milestones=args.lr_milestones, gamma=0.1)\n', (5115, 5168), False, 'from torch.optim.lr_scheduler import MultiStepLR\n'), ((5205, 5219), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (5217, 5219), False, 'import torch\n'), ((5228, 5242), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (5240, 5242), False, 'import torch\n'), ((9232, 9242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9240, 9242), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1572), 'cgcnn.data.collate_pool', 'collate_pool', (['sample_data_list'], {}), '(sample_data_list)\n', (1554, 1572), False, 'from cgcnn.data import collate_pool\n'), ((1594, 1619), 'module.normalizer.Normalizer', 'Normalizer', (['sample_target'], {}), '(sample_target)\n', (1604, 1619), False, 'from module.normalizer import Normalizer\n'), ((3877, 3889), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3887, 3889), True, 'import torch.nn as nn\n'), ((3920, 3932), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3930, 3932), True, 'import torch.nn as nn\n'), ((4440, 4467), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (4454, 4467), False, 'import os\n'), ((6381, 6521), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory'}), '(dataset, batch_size=batch_size, sampler=train_sampler,\n num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory)\n', (6391, 6521), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6625, 6764), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'test_sampler', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory'}), '(dataset, batch_size=batch_size, sampler=test_sampler,\n num_workers=num_workers, collate_fn=collate_fn, pin_memory=pin_memory)\n', (6635, 6764), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((8238, 8302), 'torch.load', 'torch.load', (["('../result/' + args.property + '-model_best.pth.tar')"], {}), "('../result/' + args.property + '-model_best.pth.tar')\n", (8248, 8302), False, 'import torch\n'), ((8377, 8504), 'validate.validate', 'validate', (['args', 'test_loader', 'model_a', 'model_b', 'model', 'activation_a', 'activation_b', 'criterion', 'normalizer'], {'test': '(True)', 'tc': '(True)'}), '(args, test_loader, model_a, model_b, model, activation_a,\n activation_b, criterion, normalizer, test=True, tc=True)\n', (8385, 8504), False, 'from validate import validate\n'), ((8513, 8537), 'torch.cat', 'torch.cat', (['(X, x)'], {'dim': '(0)'}), '((X, x), dim=0)\n', (8522, 8537), False, 'import torch\n'), ((8550, 8574), 'torch.cat', 'torch.cat', (['(T, t)'], {'dim': '(0)'}), '((T, t), dim=0)\n', (8559, 8574), False, 'import torch\n'), ((8837, 8865), 'matplotlib.pyplot.plot', 'plt.plot', (['a', 'b'], {'color': '"""blue"""'}), "(a, b, color='blue')\n", (8845, 8865), True, 'import matplotlib.pyplot as plt\n'), ((8876, 8938), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t', 'x'], {'marker': '"""."""', 'color': '"""red"""', 'edgecolors': '"""black"""'}), "(t, x, marker='.', color='red', edgecolors='black')\n", (8887, 8938), True, 'import matplotlib.pyplot as plt\n'), ((9069, 9170), 'matplotlib.pyplot.title', 'plt.title', (['"""Thermal Conductivity Prediction by CGCNN with Combined Model Transfer Learning"""'], {}), "(\n 'Thermal Conductivity Prediction by CGCNN with Combined Model Transfer Learning'\n )\n", (9078, 9170), True, 'import matplotlib.pyplot as plt\n'), ((9169, 9194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""observation"""'], {}), "('observation')\n", (9179, 9194), True, 'import matplotlib.pyplot as plt\n'), ((9203, 9227), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""prediction"""'], {}), "('prediction')\n", (9213, 9227), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1078), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (1075, 1078), False, 'import torch\n'), ((1193, 1282), 'warnings.warn', 'warnings.warn', (['"""Dataset has less than 500 data points. Lower accuracy is expected. """'], {}), "(\n 'Dataset has less than 500 data points. Lower accuracy is expected. ')\n", (1206, 1282), False, 'import warnings\n'), ((4562, 4585), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (4572, 4585), False, 'import torch\n'), ((5490, 5523), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[:61]'], {}), '(indices[:61])\n', (5509, 5523), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((5551, 5585), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[-16:]'], {}), '(indices[-16:])\n', (5570, 5585), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((5702, 5724), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['y'], {}), '(y)\n', (5721, 5724), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((5752, 5788), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[45:-16]'], {}), '(indices[45:-16])\n', (5771, 5788), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((5905, 5927), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['y'], {}), '(y)\n', (5924, 5927), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((5955, 5991), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[29:-32]'], {}), '(indices[29:-32])\n', (5974, 5991), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6108, 6130), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['y'], {}), '(y)\n', (6127, 6130), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6158, 6194), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[13:-48]'], {}), '(indices[13:-48])\n', (6177, 6194), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6272, 6294), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['y'], {}), '(y)\n', (6291, 6294), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6322, 6356), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['indices[:-64]'], {}), '(indices[:-64])\n', (6341, 6356), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((6978, 7101), 'train.train', 'train', (['args', 'train_loader', 'model_a', 'model_b', 'model', 'activation_a', 'activation_b', 'criterion', 'optimizer', 'epoch', 'normalizer'], {}), '(args, train_loader, model_a, model_b, model, activation_a,\n activation_b, criterion, optimizer, epoch, normalizer)\n', (6983, 7101), False, 'from train import train\n'), ((7164, 7272), 'validate.validate', 'validate', (['args', 'train_loader', 'model_a', 'model_b', 'model', 'activation_a', 'activation_b', 'criterion', 'normalizer'], {}), '(args, train_loader, model_a, model_b, model, activation_a,\n activation_b, criterion, normalizer)\n', (7172, 7272), False, 'from validate import validate\n'), ((8631, 8640), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8637, 8640), True, 'import numpy as np\n'), ((8642, 8651), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (8648, 8651), True, 'import numpy as np\n'), ((8673, 8682), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8679, 8682), True, 'import numpy as np\n'), ((8684, 8693), 'numpy.min', 'np.min', (['t'], {}), '(t)\n', (8690, 8693), True, 'import numpy as np\n'), ((7366, 7377), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7374, 7377), False, 'import sys\n')] |
"""
IMAGE WORD2VEC
<NAME> - <EMAIL>
Vectorization Module - based on CNNs encoded in Keras.
The module is used to convert a set of images into corresponding feature vectors.
Each feature vector is of length 2048.
"""
# import libraries
import numpy as np
from keras.applications import vgg16, inception_v3, resnet50, mobilenet
import os
from keras.layers import Dense
from keras.models import Model
#from keras.applications.imagenet_utils import decode_predictions
import matplotlib.pyplot as plt
from keras.preprocessing import image
from sklearn.manifold import TSNE
#===========================#functions=============================================
#=====================================================================================
# Defines the model and the corresponding size of the image they take as input.
#inputs: #modelname : The CNN. The choices are VGG16, ResNet50, MobileNet, InceptionV3
#isTop : boolean defining whether to return base or complete model
#num_classes : The number of classes in the fully connected layer, if isTop = True.
def create_model(modelname, isComplete = False, num_classes = 2048,):
if modelname == "VGG16":
model = vgg16.VGG16(weights='imagenet', pooling = 'avg', include_top = False)
img_size = (224,224)
elif modelname == "ResNet50":
model = resnet50.ResNet50(weights='imagenet', pooling = 'avg', include_top=False)
img_size = (224, 224)
elif modelname == "MobileNet":
model = mobilenet.MobileNet(weights='imagenet', pooling = 'avg', include_top = False)
img_size = (224, 224)
elif modelname == "InceptionV3":
model = inception_v3.InceptionV3(weights='imagenet', pooling = 'avg', include_top = False)
img_size = (299, 299)
else:
print ("No valid Model defined. Options are : VGG16, ResNet50, MobileNet, InceptionV3")
return
if isComplete:
x = model.output
x = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=model.input, outputs=x)
return model, img_size
#Process the image according to the model selected.
#filename : complete name of image file.
#img_size : output of the function "create_model"
#modelname: same as the one used for "create_model"
def process_image(filename, img_size, modelname):
# load an image in PIL format
test_image = image.load_img(filename, target_size = img_size)
# convert the PIL image to a numpy array
# IN PIL - image is in (width, height, channel)
# In Numpy - image is in (height, width, channel)
test_image = image.img_to_array(test_image)
# Convert the image / images into batch format
# expand_dims will add an extra dimension to the data at a particular axis
# We want the input matrix to the network to be of the form (batchsize, height, width, channels)
# Thus we add the extra dimension to the axis 0.
test_image = np.expand_dims(test_image, axis = 0)
if modelname == "VGG16":
processed_image = vgg16.preprocess_input(test_image.copy())
elif modelname == "ResNet50":
processed_image = resnet50.preprocess_input(test_image.copy())
elif modelname == "MobileNet":
processed_image = mobilenet.preprocess_input(test_image.copy())
elif modelname == "InceptionV3":
processed_image = inception_v3.preprocess_input(test_image.copy())
else:
print("No valid Model defined. Options are : VGG16, ResNet50, MobileNet, InceptionV3")
return
return processed_image
# Plot a TSNE model based on the first "n" elements of input labels and tokens
def plot_vec(tokens, labels, n):
tsne_model = TSNE(perplexity=33, n_components=2, init='pca', n_iter=2500,random_state = 23)
new_values = tsne_model.fit_transform(tokens[0:n])
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i], y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
# ======================== end functions =======================================================
#===============================================================================================
modelname = "ResNet50"
model, img_size = create_model(modelname, isComplete = False)
inputfolder = 'outputtest'
outputfolder = 'ImgVecstest'
assert os.path.isdir(inputfolder), "Specified input folder does not exist"
if os.path.isdir(outputfolder) == False:
print("Specified output folder does not exist. Creating it in root directory...")
os.mkdir(outputfolder)
subfolders = os.listdir(inputfolder)
subfolders = subfolders[0:-1]
allpred = []
allnames = []
for idx, subfolder in enumerate(subfolders):
files = os.listdir(inputfolder + "/" + subfolder)
if os.path.isdir(outputfolder + '/' + subfolder):
print("Processed subfolder ", str(idx+1), " of ", str(len(subfolders)) )
continue
predictions = []
names = []
for filename in files:
processed_image = process_image(inputfolder+ "/" + subfolder+ "/" + filename,img_size, modelname )
# get the predicted probabilities for each class
x = model.predict(processed_image)
x = x[0]
predictions.append(x.tolist())
names.append(subfolder + "/" + filename)
allpred.append(x.tolist())
allnames.append(filename)
if os.path.isdir(outputfolder+ '/' + subfolder) == False:
os.mkdir(outputfolder + '/' + subfolder)
np.save(outputfolder + '/' + subfolder + '/' + "ImgVectors", predictions)
np.save(outputfolder + '/' + subfolder + '/' + "Names", names)
print("Processed subfolder ", str(idx+1), " of ", str(len(subfolders)))
plot_vec(allpred,allnames,500)
#plot_vec(np.load("output/000000008211.jpg/ImgVectors.npy"), np.load("output/000000008211.jpg/Names.npy")) | [
"os.mkdir",
"numpy.save",
"matplotlib.pyplot.show",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.annotate",
"os.path.isdir",
"matplotlib.pyplot.scatter",
"numpy.expand_dims",
"keras.models.Model",
"keras.applications.mobilenet.MobileNet",
"keras.preprocessing.image.img_to_array",
"keras.preproc... | [((4621, 4647), 'os.path.isdir', 'os.path.isdir', (['inputfolder'], {}), '(inputfolder)\n', (4634, 4647), False, 'import os\n'), ((4857, 4880), 'os.listdir', 'os.listdir', (['inputfolder'], {}), '(inputfolder)\n', (4867, 4880), False, 'import os\n'), ((2412, 2458), 'keras.preprocessing.image.load_img', 'image.load_img', (['filename'], {'target_size': 'img_size'}), '(filename, target_size=img_size)\n', (2426, 2458), False, 'from keras.preprocessing import image\n'), ((2630, 2660), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (2648, 2660), False, 'from keras.preprocessing import image\n'), ((2964, 2998), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (2978, 2998), True, 'import numpy as np\n'), ((3701, 3778), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(33)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(2500)', 'random_state': '(23)'}), "(perplexity=33, n_components=2, init='pca', n_iter=2500, random_state=23)\n", (3705, 3778), False, 'from sklearn.manifold import TSNE\n'), ((3945, 3973), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (3955, 3973), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4268, 4270), True, 'import matplotlib.pyplot as plt\n'), ((4692, 4719), 'os.path.isdir', 'os.path.isdir', (['outputfolder'], {}), '(outputfolder)\n', (4705, 4719), False, 'import os\n'), ((4820, 4842), 'os.mkdir', 'os.mkdir', (['outputfolder'], {}), '(outputfolder)\n', (4828, 4842), False, 'import os\n'), ((4996, 5037), 'os.listdir', 'os.listdir', (["(inputfolder + '/' + subfolder)"], {}), "(inputfolder + '/' + subfolder)\n", (5006, 5037), False, 'import os\n'), ((5047, 5092), 'os.path.isdir', 'os.path.isdir', (["(outputfolder + '/' + subfolder)"], {}), "(outputfolder + '/' + subfolder)\n", (5060, 5092), False, 'import os\n'), ((5755, 5828), 'numpy.save', 'np.save', (["(outputfolder + '/' + subfolder + '/' + 'ImgVectors')", 'predictions'], {}), "(outputfolder + '/' + subfolder + '/' + 'ImgVectors', predictions)\n", (5762, 5828), True, 'import numpy as np\n'), ((5834, 5896), 'numpy.save', 'np.save', (["(outputfolder + '/' + subfolder + '/' + 'Names')", 'names'], {}), "(outputfolder + '/' + subfolder + '/' + 'Names', names)\n", (5841, 5896), True, 'import numpy as np\n'), ((1206, 1271), 'keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'weights': '"""imagenet"""', 'pooling': '"""avg"""', 'include_top': '(False)'}), "(weights='imagenet', pooling='avg', include_top=False)\n", (1217, 1271), False, 'from keras.applications import vgg16, inception_v3, resnet50, mobilenet\n'), ((2025, 2061), 'keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'x'}), '(inputs=model.input, outputs=x)\n', (2030, 2061), False, 'from keras.models import Model\n'), ((4010, 4033), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[i]', 'y[i]'], {}), '(x[i], y[i])\n', (4021, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4155), 'matplotlib.pyplot.annotate', 'plt.annotate', (['labels[i]'], {'xy': '(x[i], y[i])', 'xytext': '(5, 2)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(labels[i], xy=(x[i], y[i]), xytext=(5, 2), textcoords=\n 'offset points', ha='right', va='bottom')\n", (4054, 4155), True, 'import matplotlib.pyplot as plt\n'), ((5645, 5690), 'os.path.isdir', 'os.path.isdir', (["(outputfolder + '/' + subfolder)"], {}), "(outputfolder + '/' + subfolder)\n", (5658, 5690), False, 'import os\n'), ((5709, 5749), 'os.mkdir', 'os.mkdir', (["(outputfolder + '/' + subfolder)"], {}), "(outputfolder + '/' + subfolder)\n", (5717, 5749), False, 'import os\n'), ((1355, 1426), 'keras.applications.resnet50.ResNet50', 'resnet50.ResNet50', ([], {'weights': '"""imagenet"""', 'pooling': '"""avg"""', 'include_top': '(False)'}), "(weights='imagenet', pooling='avg', include_top=False)\n", (1372, 1426), False, 'from keras.applications import vgg16, inception_v3, resnet50, mobilenet\n'), ((1965, 2005), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (1970, 2005), False, 'from keras.layers import Dense\n'), ((1511, 1584), 'keras.applications.mobilenet.MobileNet', 'mobilenet.MobileNet', ([], {'weights': '"""imagenet"""', 'pooling': '"""avg"""', 'include_top': '(False)'}), "(weights='imagenet', pooling='avg', include_top=False)\n", (1530, 1584), False, 'from keras.applications import vgg16, inception_v3, resnet50, mobilenet\n'), ((1673, 1751), 'keras.applications.inception_v3.InceptionV3', 'inception_v3.InceptionV3', ([], {'weights': '"""imagenet"""', 'pooling': '"""avg"""', 'include_top': '(False)'}), "(weights='imagenet', pooling='avg', include_top=False)\n", (1697, 1751), False, 'from keras.applications import vgg16, inception_v3, resnet50, mobilenet\n')] |
'''
made by @finnkso (github)
2020.04.09
tensorflow-gpu==1.15.0 : tf.compat.v1
if tensorflow-gpu==1.8.0, please replayce tf.compat.v1 to tf
'''
import argparse
import os
import tkinter as tk
from tkinter import filedialog
import cv2
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from net import generator
from tools.utils import preprocessing, check_folder
from tools.adjust_brightness import adjust_brightness_from_src_to_dst
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def parse_args():
desc = "Tensorflow implementation of AnimeGAN"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--video', type=str, default='video/input/'+ 'お花見.mp4',
help='video file or number for webcam')
parser.add_argument('--checkpoint_dir', type=str, default='../checkpoint/generator_Hayao_weight',
help='Directory name to save the checkpoints')
parser.add_argument('--output', type=str, default='video/output',
help='output path')
parser.add_argument('--output_format', type=str, default='MP4V',
help='codec used in VideoWriter when saving video to file')
return parser.parse_args()
def getfileloc(initialdir='/', method='open', title='Please select a file', filetypes=(("video files", ".mkv .avi .mp4"), ("all files","*.*"))):
root = tk.Tk()
if method == 'open':
fileloc = filedialog.askopenfilename(parent=root, initialdir=initialdir, title=title, filetypes=filetypes)
elif method == 'save':
fileloc = filedialog.asksaveasfilename(parent=root, initialdir=initialdir, initialfile='out.avi', title=title, filetypes=filetypes)
root.withdraw()
return fileloc
def convert_image(img, img_size):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = preprocessing(img, img_size)
img = np.expand_dims(img, axis=0)
img = np.asarray(img)
return img
def inverse_image(img):
img = (img.squeeze()+1.) / 2 * 255
img = img.astype(np.uint8)
return img
def cvt2anime_video(video, output, checkpoint_dir, output_format='MP4V', img_size=(256,256)):
'''
output_format: 4-letter code that specify codec to use for specific video type. e.g. for mp4 support use "H264", "MP4V", or "X264"
'''
# tf.reset_default_graph()
# check_folder(result_dir)
gpu_stat = bool(len(tf.config.experimental.list_physical_devices('GPU')))
if gpu_stat:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpu_options = tf.compat.v1.GPUOptions(allow_growth=gpu_stat)
test_real = tf.compat.v1.placeholder(tf.float32, [1, None, None, 3], name='test')
with tf.compat.v1.variable_scope("generator", reuse=False):
test_generated = generator.G_net(test_real).fake
# load video
vid = cv2.VideoCapture(video)
vid_name = os.path.basename(video)
total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
# codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
codec = cv2.VideoWriter_fourcc(*output_format)
tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
with tf.compat.v1.Session(config=tfconfig) as sess:
# tf.global_variables_initializer().run()
# load model
ckpt = tf.train.get_checkpoint_state(checkpoint_dir) # checkpoint file information
saver = tf.compat.v1.train.Saver()
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path) # first line
saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Success to read {}".format(ckpt_name))
else:
print(" [*] Failed to find a checkpoint")
return
# determine output width and height
ret, img = vid.read()
if img is None:
print('Error! Failed to determine frame size: frame empty.')
return
img = preprocessing(img, img_size)
height, width = img.shape[:2]
out = cv2.VideoWriter(os.path.join(output, vid_name), codec, fps, (width, height))
pbar = tqdm(total=total)
vid.set(cv2.CAP_PROP_POS_FRAMES, 0)
while ret:
ret, frame = vid.read()
if frame is None:
print('Warning: got empty frame.')
continue
img = convert_image(frame, img_size)
fake_img = sess.run(test_generated, feed_dict={test_real: img})
fake_img = inverse_image(fake_img)
fake_img = cv2.cvtColor(adjust_brightness_from_src_to_dst(fake_img, frame), cv2.COLOR_BGR2RGB)
out.write(fake_img)
pbar.update(1)
pbar.close()
vid.release()
# cv2.destroyAllWindows()
return os.path.join(output, vid_name)
if __name__ == '__main__':
arg = parse_args()
if not arg.video:
arg.video = getfileloc(initialdir='input/')
else:
arg.video = os.path.join(os.path.dirname(os.path.dirname(__file__)), arg.video)
if not arg.output:
arg.output = getfileloc(initialdir='output/', method='save')
else:
arg.output = os.path.join(os.path.dirname(os.path.dirname(__file__)), arg.output)
check_folder(arg.output)
info = cvt2anime_video(arg.video, arg.output, arg.checkpoint_dir, output_format=arg.output_format)
print(f'output video: {info}') | [
"tkinter.filedialog.asksaveasfilename",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"tensorflow.compat.v1.GPUOptions",
"os.path.join",
"tensorflow.compat.v1.variable_scope",
"cv2.cvtColor",
"tensorflow.compat.v1.placeholder",
"os.path.dirname",
"tkinter.filedialog.askopenfilename",
"too... | [((585, 626), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (608, 626), False, 'import argparse\n'), ((1400, 1407), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1405, 1407), True, 'import tkinter as tk\n'), ((1799, 1835), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1811, 1835), False, 'import cv2\n'), ((1846, 1874), 'tools.utils.preprocessing', 'preprocessing', (['img', 'img_size'], {}), '(img, img_size)\n', (1859, 1874), False, 'from tools.utils import preprocessing, check_folder\n'), ((1885, 1912), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1899, 1912), True, 'import numpy as np\n'), ((1923, 1938), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1933, 1938), True, 'import numpy as np\n'), ((2534, 2580), 'tensorflow.compat.v1.GPUOptions', 'tf.compat.v1.GPUOptions', ([], {'allow_growth': 'gpu_stat'}), '(allow_growth=gpu_stat)\n', (2557, 2580), True, 'import tensorflow as tf\n'), ((2598, 2667), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[1, None, None, 3]'], {'name': '"""test"""'}), "(tf.float32, [1, None, None, 3], name='test')\n", (2622, 2667), True, 'import tensorflow as tf\n'), ((2818, 2841), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (2834, 2841), False, 'import cv2\n'), ((2857, 2880), 'os.path.basename', 'os.path.basename', (['video'], {}), '(video)\n', (2873, 2880), False, 'import os\n'), ((3042, 3080), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*output_format'], {}), '(*output_format)\n', (3064, 3080), False, 'import cv2\n'), ((3097, 3173), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'allow_soft_placement': '(True)', 'gpu_options': 'gpu_options'}), '(allow_soft_placement=True, gpu_options=gpu_options)\n', (3121, 3173), True, 'import tensorflow as tf\n'), ((5275, 5299), 'tools.utils.check_folder', 'check_folder', (['arg.output'], {}), '(arg.output)\n', (5287, 5299), False, 'from tools.utils import preprocessing, check_folder\n'), ((1451, 1551), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'parent': 'root', 'initialdir': 'initialdir', 'title': 'title', 'filetypes': 'filetypes'}), '(parent=root, initialdir=initialdir, title=title,\n filetypes=filetypes)\n', (1477, 1551), False, 'from tkinter import filedialog\n'), ((2678, 2731), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""generator"""'], {'reuse': '(False)'}), "('generator', reuse=False)\n", (2705, 2731), True, 'import tensorflow as tf\n'), ((3183, 3220), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'tfconfig'}), '(config=tfconfig)\n', (3203, 3220), True, 'import tensorflow as tf\n'), ((3316, 3361), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (3345, 3361), True, 'import tensorflow as tf\n'), ((3409, 3435), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {}), '()\n', (3433, 3435), True, 'import tensorflow as tf\n'), ((3995, 4023), 'tools.utils.preprocessing', 'preprocessing', (['img', 'img_size'], {}), '(img, img_size)\n', (4008, 4023), False, 'from tools.utils import preprocessing, check_folder\n'), ((4169, 4186), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total'}), '(total=total)\n', (4173, 4186), False, 'from tqdm import tqdm\n'), ((4824, 4854), 'os.path.join', 'os.path.join', (['output', 'vid_name'], {}), '(output, vid_name)\n', (4836, 4854), False, 'import os\n'), ((1593, 1718), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'parent': 'root', 'initialdir': 'initialdir', 'initialfile': '"""out.avi"""', 'title': 'title', 'filetypes': 'filetypes'}), "(parent=root, initialdir=initialdir,\n initialfile='out.avi', title=title, filetypes=filetypes)\n", (1621, 1718), False, 'from tkinter import filedialog\n'), ((2396, 2447), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2440, 2447), True, 'import tensorflow as tf\n'), ((2758, 2784), 'net.generator.G_net', 'generator.G_net', (['test_real'], {}), '(test_real)\n', (2773, 2784), False, 'from net import generator\n'), ((3508, 3552), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (3524, 3552), False, 'import os\n'), ((4092, 4122), 'os.path.join', 'os.path.join', (['output', 'vid_name'], {}), '(output, vid_name)\n', (4104, 4122), False, 'import os\n'), ((3599, 3638), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (3611, 3638), False, 'import os\n'), ((4601, 4651), 'tools.adjust_brightness.adjust_brightness_from_src_to_dst', 'adjust_brightness_from_src_to_dst', (['fake_img', 'frame'], {}), '(fake_img, frame)\n', (4634, 4651), False, 'from tools.adjust_brightness import adjust_brightness_from_src_to_dst\n'), ((5040, 5065), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5055, 5065), False, 'import os\n'), ((5231, 5256), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5246, 5256), False, 'import os\n')] |
import numpy as np
import sys, os
pykin_path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(pykin_path)
from pykin.kinematics import transform as tf
from pykin.robots.single_arm import SingleArm
from pykin.utils import plot_utils as plt
from pykin.utils.transform_utils import compute_pose_error
file_path = '../../asset/urdf/sawyer/sawyer.urdf'
robot = SingleArm(file_path, tf.Transform(rot=[0.0, 0.0, 0.0], pos=[0, 0, 0]))
robot.setup_link_name("sawyer_base", "sawyer_right_hand")
# panda_example
target_thetas = [0, np.pi/3, np.pi/2, np.pi/2, 0, np.pi/3, 0, 0]
init_thetas = np.random.randn(7)
fk = robot.forward_kin(target_thetas)
_, ax = plt.init_3d_figure("Target Pose")
plt.plot_robot(robot, ax, fk)
target_pose = robot.get_eef_pose(fk)
ik_result = robot.inverse_kin(init_thetas, target_pose, method="LM", maxIter=1000)
theta = np.concatenate((np.zeros(1), ik_result))
result_fk = robot.forward_kin(theta)
_, ax = plt.init_3d_figure("IK Result")
plt.plot_robot(robot, ax,result_fk)
err = compute_pose_error(
fk[robot.eef_name].h_mat,
result_fk[robot.eef_name].h_mat)
print(err)
print(result_fk[robot.eef_name].pose)
plt.show_figure()
| [
"sys.path.append",
"pykin.utils.transform_utils.compute_pose_error",
"pykin.utils.plot_utils.init_3d_figure",
"numpy.random.randn",
"os.getcwd",
"pykin.kinematics.transform.Transform",
"numpy.zeros",
"pykin.utils.plot_utils.show_figure",
"pykin.utils.plot_utils.plot_robot"
] | [((94, 121), 'sys.path.append', 'sys.path.append', (['pykin_path'], {}), '(pykin_path)\n', (109, 121), False, 'import sys, os\n'), ((600, 618), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (615, 618), True, 'import numpy as np\n'), ((666, 699), 'pykin.utils.plot_utils.init_3d_figure', 'plt.init_3d_figure', (['"""Target Pose"""'], {}), "('Target Pose')\n", (684, 699), True, 'from pykin.utils import plot_utils as plt\n'), ((700, 729), 'pykin.utils.plot_utils.plot_robot', 'plt.plot_robot', (['robot', 'ax', 'fk'], {}), '(robot, ax, fk)\n', (714, 729), True, 'from pykin.utils import plot_utils as plt\n'), ((947, 978), 'pykin.utils.plot_utils.init_3d_figure', 'plt.init_3d_figure', (['"""IK Result"""'], {}), "('IK Result')\n", (965, 978), True, 'from pykin.utils import plot_utils as plt\n'), ((979, 1015), 'pykin.utils.plot_utils.plot_robot', 'plt.plot_robot', (['robot', 'ax', 'result_fk'], {}), '(robot, ax, result_fk)\n', (993, 1015), True, 'from pykin.utils import plot_utils as plt\n'), ((1022, 1099), 'pykin.utils.transform_utils.compute_pose_error', 'compute_pose_error', (['fk[robot.eef_name].h_mat', 'result_fk[robot.eef_name].h_mat'], {}), '(fk[robot.eef_name].h_mat, result_fk[robot.eef_name].h_mat)\n', (1040, 1099), False, 'from pykin.utils.transform_utils import compute_pose_error\n'), ((1160, 1177), 'pykin.utils.plot_utils.show_figure', 'plt.show_figure', ([], {}), '()\n', (1175, 1177), True, 'from pykin.utils import plot_utils as plt\n'), ((396, 444), 'pykin.kinematics.transform.Transform', 'tf.Transform', ([], {'rot': '[0.0, 0.0, 0.0]', 'pos': '[0, 0, 0]'}), '(rot=[0.0, 0.0, 0.0], pos=[0, 0, 0])\n', (408, 444), True, 'from pykin.kinematics import transform as tf\n'), ((80, 91), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (89, 91), False, 'import sys, os\n'), ((876, 887), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (884, 887), True, 'import numpy as np\n')] |
import numpy as np
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestTile(serial.SerializedTestCase):
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
@settings(deadline=10000)
def test_tile(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(M=st.integers(min_value=1, max_value=200),
N=st.integers(min_value=1, max_value=200),
tiles=st.integers(min_value=50, max_value=100),
**hu.gcs)
def test_tile_grad(self, M, N, tiles, gc, dc):
X = np.random.rand(M, N).astype(np.float32)
axis = 1
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
grad_op = core.CreateOperator(
'TileGradient', ['dOut'], 'dX',
tiles=tiles,
axis=axis,
)
dX = np.random.rand(M, N * tiles).astype(np.float32)
self.assertDeviceChecks(dc, grad_op, [dX], [0])
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
@settings(deadline=10000)
def test_tilewinput(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
tiles_arg = np.array([tiles], dtype=np.int32)
axis_arg = np.array([axis], dtype=np.int32)
op = core.CreateOperator(
'Tile', ['X', 'tiles', 'axis'], 'out',
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles_arg, axis_arg],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, tiles_arg, axis_arg], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, tiles_arg, axis_arg], 0, [0])
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.skipIf",
"numpy.asarray",
"hypothesis.settings",
"caffe2.python.core.CreateOperator",
"numpy.array",
"numpy.tile",
"numpy.random.rand",
"hypothesis.strategies.integers"
] | [((638, 662), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (646, 662), False, 'from hypothesis import given, settings\n'), ((1451, 1515), 'unittest.skipIf', 'unittest.skipIf', (['(not workspace.has_gpu_support)', '"""No gpu support"""'], {}), "(not workspace.has_gpu_support, 'No gpu support')\n", (1466, 1515), False, 'import unittest\n'), ((3012, 3036), 'hypothesis.settings', 'settings', ([], {'deadline': '(10000)'}), '(deadline=10000)\n', (3020, 3036), False, 'from hypothesis import given, settings\n'), ((3987, 4002), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4000, 4002), False, 'import unittest\n'), ((791, 856), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Tile"""', "['X']", '"""out"""'], {'tiles': 'tiles', 'axis': 'axis'}), "('Tile', ['X'], 'out', tiles=tiles, axis=axis)\n", (810, 856), False, 'from caffe2.python import core, workspace\n'), ((1847, 1912), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Tile"""', "['X']", '"""out"""'], {'tiles': 'tiles', 'axis': 'axis'}), "('Tile', ['X'], 'out', tiles=tiles, axis=axis)\n", (1866, 1912), False, 'from caffe2.python import core, workspace\n'), ((2461, 2536), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""TileGradient"""', "['dOut']", '"""dX"""'], {'tiles': 'tiles', 'axis': 'axis'}), "('TileGradient', ['dOut'], 'dX', tiles=tiles, axis=axis)\n", (2480, 2536), False, 'from caffe2.python import core, workspace\n'), ((3178, 3211), 'numpy.array', 'np.array', (['[tiles]'], {'dtype': 'np.int32'}), '([tiles], dtype=np.int32)\n', (3186, 3211), True, 'import numpy as np\n'), ((3232, 3264), 'numpy.array', 'np.array', (['[axis]'], {'dtype': 'np.int32'}), '([axis], dtype=np.int32)\n', (3240, 3264), True, 'import numpy as np\n'), ((3281, 3339), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Tile"""', "['X', 'tiles', 'axis']", '"""out"""'], {}), "('Tile', ['X', 'tiles', 'axis'], 'out')\n", (3300, 3339), False, 'from caffe2.python import core, workspace\n'), ((969, 1004), 'numpy.asarray', 'np.asarray', (['[1, 1, 1]'], {'dtype': 'np.int'}), '([1, 1, 1], dtype=np.int)\n', (979, 1004), True, 'import numpy as np\n'), ((1063, 1079), 'numpy.tile', 'np.tile', (['X', 'dims'], {}), '(X, dims)\n', (1070, 1079), True, 'import numpy as np\n'), ((349, 387), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (360, 387), True, 'import hypothesis.strategies as st\n'), ((403, 441), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (414, 441), True, 'import hypothesis.strategies as st\n'), ((457, 495), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (468, 495), True, 'import hypothesis.strategies as st\n'), ((515, 552), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(3)'}), '(min_value=1, max_value=3)\n', (526, 552), True, 'import hypothesis.strategies as st\n'), ((571, 608), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(2)'}), '(min_value=0, max_value=2)\n', (582, 608), True, 'import hypothesis.strategies as st\n'), ((2025, 2057), 'numpy.asarray', 'np.asarray', (['[1, 1]'], {'dtype': 'np.int'}), '([1, 1], dtype=np.int)\n', (2035, 2057), True, 'import numpy as np\n'), ((2116, 2132), 'numpy.tile', 'np.tile', (['X', 'dims'], {}), '(X, dims)\n', (2123, 2132), True, 'import numpy as np\n'), ((1530, 1569), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(200)'}), '(min_value=1, max_value=200)\n', (1541, 1569), True, 'import hypothesis.strategies as st\n'), ((1585, 1624), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(200)'}), '(min_value=1, max_value=200)\n', (1596, 1624), True, 'import hypothesis.strategies as st\n'), ((1644, 1684), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(50)', 'max_value': '(100)'}), '(min_value=50, max_value=100)\n', (1655, 1684), True, 'import hypothesis.strategies as st\n'), ((3426, 3461), 'numpy.asarray', 'np.asarray', (['[1, 1, 1]'], {'dtype': 'np.int'}), '([1, 1, 1], dtype=np.int)\n', (3436, 3461), True, 'import numpy as np\n'), ((3520, 3536), 'numpy.tile', 'np.tile', (['X', 'dims'], {}), '(X, dims)\n', (3527, 3536), True, 'import numpy as np\n'), ((2723, 2761), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (2734, 2761), True, 'import hypothesis.strategies as st\n'), ((2777, 2815), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (2788, 2815), True, 'import hypothesis.strategies as st\n'), ((2831, 2869), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (2842, 2869), True, 'import hypothesis.strategies as st\n'), ((2889, 2926), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(3)'}), '(min_value=1, max_value=3)\n', (2900, 2926), True, 'import hypothesis.strategies as st\n'), ((2945, 2982), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(2)'}), '(min_value=0, max_value=2)\n', (2956, 2982), True, 'import hypothesis.strategies as st\n'), ((732, 755), 'numpy.random.rand', 'np.random.rand', (['M', 'K', 'N'], {}), '(M, K, N)\n', (746, 755), True, 'import numpy as np\n'), ((1773, 1793), 'numpy.random.rand', 'np.random.rand', (['M', 'N'], {}), '(M, N)\n', (1787, 1793), True, 'import numpy as np\n'), ((2602, 2630), 'numpy.random.rand', 'np.random.rand', (['M', '(N * tiles)'], {}), '(M, N * tiles)\n', (2616, 2630), True, 'import numpy as np\n'), ((3112, 3135), 'numpy.random.rand', 'np.random.rand', (['M', 'K', 'N'], {}), '(M, K, N)\n', (3126, 3135), True, 'import numpy as np\n')] |
import joblib
from ..utils.matrices import Hdf5
import numpy as np
import faiss
from numpy import linalg as LA
from sklearn.neighbors import NearestNeighbors
CPU = 1
faiss.omp_set_num_threads(CPU)
N_NEIGHBORS = 5
class FaissSimilarity(object):
def __init__(self, n_neighbors=N_NEIGHBORS):
self.n_neighbors = n_neighbors
self.index = None
def fit(self, file_name):
hdf5 = Hdf5(file_name)
V = hdf5.values()
dim = V.shape[1]
index = faiss.IndexFlatIP(dim)
normst = LA.norm(V, axis=1)
index.add(V / normst[:, None])
self.index = index
self.n_neighbors = min(self.n_neighbors, self.index.ntotal)
D, I = index.search(V / normst[:, None], self.n_neighbors + 1)
D = D[:, 1:]
I = I[:, 1:]
return D
def kneighbors(self, file_name):
hdf5 = Hdf5(file_name)
V = hdf5.values()
m_norm = LA.norm(V, axis=1)
D, I = self.index.search(V / m_norm[:, None], self.n_neighbors)
return D, I
def save(self, path):
faiss.write_index(self.index, path)
def load(self, path):
self.index = faiss.read_index(path)
class NearestSimilarity(object):
def __init__(self):
self.n_neighbors = N_NEIGHBORS + 1
self.metric = "cosine"
def fit(self, X):
self.n_neighbors = min(self.n_neighbors, X.shape[0])
self.nn = NearestNeighbors(n_neighbors=self.n_neighbors, metric=self.metric)
self.nn.fit(X)
D, _ = self.nn.kneighbors(X, return_distance=True)
D = D[:, 1:]
self.background = np.sum(D, axis=1)
print(len(self.background))
def pvalue(self, X):
D, _ = self.nn.kneighbors(X, return_distance=True)
D = D[:, :-1]
dists = np.sum(D, axis=1)
n = len(self.background)
pvalues = []
for d in dists:
b = np.sum(self.background >= d)
pvalues += [b / n]
return len(pvalues)
def save(self, file_name):
joblib.dump(self, file_name)
def load(self, file_name):
return joblib.load(file_name)
| [
"faiss.write_index",
"faiss.omp_set_num_threads",
"numpy.sum",
"faiss.read_index",
"joblib.dump",
"faiss.IndexFlatIP",
"numpy.linalg.norm",
"sklearn.neighbors.NearestNeighbors",
"joblib.load"
] | [((167, 197), 'faiss.omp_set_num_threads', 'faiss.omp_set_num_threads', (['CPU'], {}), '(CPU)\n', (192, 197), False, 'import faiss\n'), ((491, 513), 'faiss.IndexFlatIP', 'faiss.IndexFlatIP', (['dim'], {}), '(dim)\n', (508, 513), False, 'import faiss\n'), ((531, 549), 'numpy.linalg.norm', 'LA.norm', (['V'], {'axis': '(1)'}), '(V, axis=1)\n', (538, 549), True, 'from numpy import linalg as LA\n'), ((926, 944), 'numpy.linalg.norm', 'LA.norm', (['V'], {'axis': '(1)'}), '(V, axis=1)\n', (933, 944), True, 'from numpy import linalg as LA\n'), ((1072, 1107), 'faiss.write_index', 'faiss.write_index', (['self.index', 'path'], {}), '(self.index, path)\n', (1089, 1107), False, 'import faiss\n'), ((1156, 1178), 'faiss.read_index', 'faiss.read_index', (['path'], {}), '(path)\n', (1172, 1178), False, 'import faiss\n'), ((1414, 1480), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'self.n_neighbors', 'metric': 'self.metric'}), '(n_neighbors=self.n_neighbors, metric=self.metric)\n', (1430, 1480), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1610, 1627), 'numpy.sum', 'np.sum', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (1616, 1627), True, 'import numpy as np\n'), ((1787, 1804), 'numpy.sum', 'np.sum', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (1793, 1804), True, 'import numpy as np\n'), ((2027, 2055), 'joblib.dump', 'joblib.dump', (['self', 'file_name'], {}), '(self, file_name)\n', (2038, 2055), False, 'import joblib\n'), ((2103, 2125), 'joblib.load', 'joblib.load', (['file_name'], {}), '(file_name)\n', (2114, 2125), False, 'import joblib\n'), ((1899, 1927), 'numpy.sum', 'np.sum', (['(self.background >= d)'], {}), '(self.background >= d)\n', (1905, 1927), True, 'import numpy as np\n')] |
from itertools import permutations
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from mlgm.sampler import MetaSampler
class MnistMetaSampler(MetaSampler):
def __init__(
self,
batch_size,
meta_batch_size,
train_digits,
test_digits,
num_classes_per_batch,
one_hot_labels=False,
same_input_and_label=False,
):
assert train_digits is None or (
type(train_digits) == list
and [0 <= digit <= 9 for digit in train_digits])
assert test_digits is None or (
type(test_digits) == list
and [0 <= digit <= 9 for digit in test_digits])
self._train_digits = list(set(train_digits))
self._test_digits = list(set(test_digits))
self._one_hot_labels = one_hot_labels
self._same_input_and_label = same_input_and_label
(train_inputs, train_labels), (test_inputs,
test_labels) = mnist.load_data()
inputs = np.concatenate((train_inputs, test_inputs))
labels = np.concatenate((train_labels, test_labels))
self._train_inputs_per_label = {}
self._test_inputs_per_label = {}
self._train_size = 0
self._test_size = 0
for digit in self._train_digits:
ids = np.where(digit == labels)[0]
self._train_size += len(ids)
random.shuffle(ids)
self._train_inputs_per_label.update({digit: ids})
for digit in self._test_digits:
ids = np.where(digit == labels)[0]
self._test_size += len(ids)
random.shuffle(ids)
self._test_inputs_per_label.update({digit: ids})
inputs = inputs / 255.0
super().__init__(batch_size, meta_batch_size, inputs, num_classes_per_batch)
def _gen_dataset(self, test=False):
digits = self._test_digits if test else self._train_digits
inputs_per_label = self._test_inputs_per_label if test else self._train_inputs_per_label
tasks = []
while True:
tasks_remaining = self._meta_batch_size - len(tasks)
if tasks_remaining <= 0:
break
tasks_to_add = list(permutations(digits, self._num_classes_per_batch))
n_tasks_to_add = min(len(tasks_to_add), tasks_remaining)
tasks.extend(tasks_to_add[:n_tasks_to_add])
num_inputs_per_meta_batch = (self._batch_size *
self._num_classes_per_batch * self._meta_batch_size)
ids = np.empty((0, num_inputs_per_meta_batch), dtype=np.int32)
lbls = np.empty((0, num_inputs_per_meta_batch), dtype=np.int32)
data_size = self._test_size if test else self._train_size
data_size = data_size // num_inputs_per_meta_batch
data_size = min(data_size, 1000)
for i in range(data_size):
all_ids = np.array([], dtype=np.int32)
all_labels = np.array([], dtype=np.int32)
for task in tasks:
task_ids = np.array([], dtype=np.int32)
task_labels = np.array([], dtype=np.int32)
for i, label in enumerate(task):
label_ids = np.random.choice(inputs_per_label[label], self._batch_size)
labels = np.empty(self._batch_size, dtype=np.int32)
labels.fill(i)
task_labels = np.append(task_labels, labels)
task_ids = np.append(task_ids, label_ids)
all_labels = np.append(all_labels, task_labels)
all_ids = np.append(all_ids, task_ids)
ids = np.append(ids, [all_ids], axis=0)
lbls = np.append(lbls, [all_labels], axis=0)
all_ids_sym = tf.convert_to_tensor(ids)
inputs_sym = tf.convert_to_tensor(self._inputs, dtype=tf.float32)
all_inputs = tf.gather(inputs_sym, all_ids_sym)
all_labels = tf.convert_to_tensor(
lbls, dtype=tf.dtypes.int32)
if self._one_hot_labels:
all_labels = tf.one_hot(all_labels, depth=10)
dataset_sym = tf.data.Dataset.from_tensor_slices((all_inputs, all_labels))
return dataset_sym
def build_inputs_and_labels(self, handle):
slice_size = (self._batch_size // 2) * self._num_classes_per_batch
input_batches, label_batches = self._gen_metadata(handle)
input_a = tf.slice(input_batches, [0, 0, 0, 0],
[-1, slice_size, -1, -1])
input_b = tf.slice(input_batches, [0, slice_size, 0, 0],
[-1, -1, -1, -1])
if self._same_input_and_label:
label_a = tf.reshape(input_a, input_a.get_shape().concatenate(1))
label_b = tf.reshape(input_b, input_b.get_shape().concatenate(1))
else:
label_a = tf.slice(label_batches, [0, 0, 0],
[-1, slice_size, -1])
label_b = tf.slice(label_batches, [0, slice_size, 0],
[-1, -1, -1])
return input_a, label_a, input_b, label_b
| [
"tensorflow.one_hot",
"tensorflow.gather",
"numpy.empty",
"tensorflow.convert_to_tensor",
"random.shuffle",
"itertools.permutations",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.random.choice",
... | [((1073, 1090), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1088, 1090), False, 'from tensorflow.keras.datasets import mnist\n'), ((1109, 1152), 'numpy.concatenate', 'np.concatenate', (['(train_inputs, test_inputs)'], {}), '((train_inputs, test_inputs))\n', (1123, 1152), True, 'import numpy as np\n'), ((1170, 1213), 'numpy.concatenate', 'np.concatenate', (['(train_labels, test_labels)'], {}), '((train_labels, test_labels))\n', (1184, 1213), True, 'import numpy as np\n'), ((2665, 2721), 'numpy.empty', 'np.empty', (['(0, num_inputs_per_meta_batch)'], {'dtype': 'np.int32'}), '((0, num_inputs_per_meta_batch), dtype=np.int32)\n', (2673, 2721), True, 'import numpy as np\n'), ((2737, 2793), 'numpy.empty', 'np.empty', (['(0, num_inputs_per_meta_batch)'], {'dtype': 'np.int32'}), '((0, num_inputs_per_meta_batch), dtype=np.int32)\n', (2745, 2793), True, 'import numpy as np\n'), ((3898, 3923), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['ids'], {}), '(ids)\n', (3918, 3923), True, 'import tensorflow as tf\n'), ((3945, 3997), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self._inputs'], {'dtype': 'tf.float32'}), '(self._inputs, dtype=tf.float32)\n', (3965, 3997), True, 'import tensorflow as tf\n'), ((4019, 4053), 'tensorflow.gather', 'tf.gather', (['inputs_sym', 'all_ids_sym'], {}), '(inputs_sym, all_ids_sym)\n', (4028, 4053), True, 'import tensorflow as tf\n'), ((4075, 4124), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['lbls'], {'dtype': 'tf.dtypes.int32'}), '(lbls, dtype=tf.dtypes.int32)\n', (4095, 4124), True, 'import tensorflow as tf\n'), ((4251, 4311), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(all_inputs, all_labels)'], {}), '((all_inputs, all_labels))\n', (4285, 4311), True, 'import tensorflow as tf\n'), ((4547, 4610), 'tensorflow.slice', 'tf.slice', (['input_batches', '[0, 0, 0, 0]', '[-1, slice_size, -1, -1]'], {}), '(input_batches, [0, 0, 0, 0], [-1, slice_size, -1, -1])\n', (4555, 4610), True, 'import tensorflow as tf\n'), ((4656, 4720), 'tensorflow.slice', 'tf.slice', (['input_batches', '[0, slice_size, 0, 0]', '[-1, -1, -1, -1]'], {}), '(input_batches, [0, slice_size, 0, 0], [-1, -1, -1, -1])\n', (4664, 4720), True, 'import tensorflow as tf\n'), ((1497, 1516), 'random.shuffle', 'random.shuffle', (['ids'], {}), '(ids)\n', (1511, 1516), False, 'import random\n'), ((1719, 1738), 'random.shuffle', 'random.shuffle', (['ids'], {}), '(ids)\n', (1733, 1738), False, 'import random\n'), ((3043, 3071), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (3051, 3071), True, 'import numpy as np\n'), ((3097, 3125), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (3105, 3125), True, 'import numpy as np\n'), ((3784, 3817), 'numpy.append', 'np.append', (['ids', '[all_ids]'], {'axis': '(0)'}), '(ids, [all_ids], axis=0)\n', (3793, 3817), True, 'import numpy as np\n'), ((3837, 3874), 'numpy.append', 'np.append', (['lbls', '[all_labels]'], {'axis': '(0)'}), '(lbls, [all_labels], axis=0)\n', (3846, 3874), True, 'import numpy as np\n'), ((4196, 4228), 'tensorflow.one_hot', 'tf.one_hot', (['all_labels'], {'depth': '(10)'}), '(all_labels, depth=10)\n', (4206, 4228), True, 'import tensorflow as tf\n'), ((4979, 5035), 'tensorflow.slice', 'tf.slice', (['label_batches', '[0, 0, 0]', '[-1, slice_size, -1]'], {}), '(label_batches, [0, 0, 0], [-1, slice_size, -1])\n', (4987, 5035), True, 'import tensorflow as tf\n'), ((5089, 5146), 'tensorflow.slice', 'tf.slice', (['label_batches', '[0, slice_size, 0]', '[-1, -1, -1]'], {}), '(label_batches, [0, slice_size, 0], [-1, -1, -1])\n', (5097, 5146), True, 'import tensorflow as tf\n'), ((1415, 1440), 'numpy.where', 'np.where', (['(digit == labels)'], {}), '(digit == labels)\n', (1423, 1440), True, 'import numpy as np\n'), ((1638, 1663), 'numpy.where', 'np.where', (['(digit == labels)'], {}), '(digit == labels)\n', (1646, 1663), True, 'import numpy as np\n'), ((2331, 2380), 'itertools.permutations', 'permutations', (['digits', 'self._num_classes_per_batch'], {}), '(digits, self._num_classes_per_batch)\n', (2343, 2380), False, 'from itertools import permutations\n'), ((3184, 3212), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (3192, 3212), True, 'import numpy as np\n'), ((3243, 3271), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (3251, 3271), True, 'import numpy as np\n'), ((3676, 3710), 'numpy.append', 'np.append', (['all_labels', 'task_labels'], {}), '(all_labels, task_labels)\n', (3685, 3710), True, 'import numpy as np\n'), ((3737, 3765), 'numpy.append', 'np.append', (['all_ids', 'task_ids'], {}), '(all_ids, task_ids)\n', (3746, 3765), True, 'import numpy as np\n'), ((3353, 3412), 'numpy.random.choice', 'np.random.choice', (['inputs_per_label[label]', 'self._batch_size'], {}), '(inputs_per_label[label], self._batch_size)\n', (3369, 3412), True, 'import numpy as np\n'), ((3442, 3484), 'numpy.empty', 'np.empty', (['self._batch_size'], {'dtype': 'np.int32'}), '(self._batch_size, dtype=np.int32)\n', (3450, 3484), True, 'import numpy as np\n'), ((3554, 3584), 'numpy.append', 'np.append', (['task_labels', 'labels'], {}), '(task_labels, labels)\n', (3563, 3584), True, 'import numpy as np\n'), ((3616, 3646), 'numpy.append', 'np.append', (['task_ids', 'label_ids'], {}), '(task_ids, label_ids)\n', (3625, 3646), True, 'import numpy as np\n')] |
# *************************************************************************** #
# HyperNet #
# --------------------------------------------------------------------------- #
# Machine Learning-Based library for modeling #
# multi-component non-equilibrium thermochemical processes #
# #
# *************************************************************************** #
# -------------------------------- EXEC FILE -------------------------------- #
# Description:
# >> Fit reactions rates following the Arrhenius law
# --------------------------------------------------------------------------- #
NAME = 'fitRates'
import os
import sys
import argparse
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from hypernet.src.general import utils
from hypernet.apps.fitRates.dataGenerator import DataGenerator
from hypernet.src.thermophysicalModels.specie.specie import Specie
import hypernet.database as db
kinetic_db = os.path.dirname(db.__file__) + '/air/kinetics/'
# Parse arguments
###############################################################################
def dir_path(path):
if os.path.exists(path+'/inputs'):
return path
else:
raise argparse.ArgumentTypeError(
"'inputs' folder not found in '{}'".format(
os.path.normpath(path)+'/'
)
)
def get_opts():
parser = argparse.ArgumentParser(
prog=NAME,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Fit reactions rates following the Arrhenius law.",
epilog=utils.app_epilog(name=NAME)
)
parser.add_argument('-d', '--dir',
type=dir_path,
default="./",
help='path to the working directory (with `inputs` folder)'
)
parser.add_argument('-p', '--plot',
type=int,
default=1,
choices=[0,1],
help='plot fitted rates.'
)
parser.add_argument('-v', '--verbose',
type=int,
default=1,
choices=[0,1],
help='verbose mode'
)
return parser.parse_args()
# Arrhenius Law
###############################################################################
def log_arrhenius_law(T, a_log, b, c):
# a_log = np.log(a)
return a_log + b*np.log(T) - c/T
def modified_arrhenius_inv(T_inv, a, b, c):
# T_inv = 1/T
return a * np.power(1/T_inv,b) * np.exp(-c*T_inv)
# Plot rates
###############################################################################
def plot_rates(
fig_name,
x_true,
x_pred,
y_true,
y_pred,
var_name,
title,
labels=[None, None],
scales=['log', 'log']):
"""Variable plotting."""
fig = plt.figure()
plt.title(title)
x_label, y_label = labels
x_scale, y_scale = scales
# X axis
if x_label is not None:
plt.xlabel(x_label)
if x_scale is not None:
plt.xscale(x_scale)
# Y axis
if y_label is not None:
plt.ylabel(y_label)
if y_scale == 'log':
plt.yscale(y_scale)
plt.ylim([np.amin(y_true[y_true!=0.])*1.e-1, np.amax(y_true)*1.e+1])
else:
delta = np.amax(y_true)*0.1
plt.ylim([np.amin(y_true)-delta, np.amax(y_true)+delta])
# Solution
# >> Define styles
true_style = dict(
marker='x', lw=0.5, linestyle='none', fillstyle='none', markersize=5
)
pred_style = dict(lw=1)
# >> Define parameters
colors = []
n = y_true.shape[1]
for d in range(n):
# >> Plot true values
plt.plot(
x_true,
y_true[:,d],
**true_style
)
colors.append(plt.gca().lines[-1].get_color())
# >> Plot predicted values
plt.plot(
x_pred,
y_pred[:,d],
**pred_style,
label=var_name[d],
c=colors[-1]
)
# >> Plot legends
if n < 9:
fontsize = 'x-small'
legend = plt.legend(
[plt.plot([], [], c=colors[i])[0] for i in range(len(var_name))],
var_name,
fontsize=fontsize,
loc=3,
ncol=int(np.ceil(n/8))
)
plt.legend(
[
plt.plot([], [], c="k", **true_style)[0],
plt.plot([], [], c="k", **pred_style)[0]
],
["True", "Pred"],
fontsize=fontsize,
loc=1
)
plt.gca().add_artist(legend)
# >> Save figure
fig.savefig(fig_name)
plt.close()
# Main
###############################################################################
@utils.app_decorator(name=NAME)
def main():
# Inizialization ==========================================================
# Parse arguments ---------------------------------------------------------
opts = get_opts()
# Input module ------------------------------------------------------------
sys.path.append(opts.dir)
from inputs import general as inp_gen
from inputs import postprocessing as inp_post
# Initialize species ------------------------------------------------------
utils.print_main(
"Initializing species ...", start='', verbose=opts.verbose
)
species = {
sp: Specie(sp, **sp_info) if sp_info != None else Specie(sp) \
for sp, sp_info in inp_gen.species.items()
}
# Generate Data ===========================================================
utils.print_main('Generating data ...', verbose=opts.verbose)
dataGen = DataGenerator(
species,
inp_gen.T,
inp_gen.reacReader,
inp_gen.reacWriter,
verbose=opts.verbose
)
T, K = dataGen.training_data()
# Fit Data ================================================================
utils.print_main('Fitting rates ...', verbose=opts.verbose)
for j in range(K.shape[1]):
K_log_j = np.log( K[:,j][ K[:,j] != 0. ] )
T_j = T[:,0][ K[:,j] != 0. ]
if T_j.size == 0:
param_j = np.zeros((3,))
else:
param_j, _ = curve_fit(log_arrhenius_law, T_j, K_log_j, \
p0=[1,1,1.e4], method='trf')
param_j[0] = np.exp(param_j[0])
if j == 0:
param = np.array(param_j)
else:
param = np.vstack((param, np.array(param_j)))
if len(param.shape) == 1:
param = np.expand_dims(param, 0)
# Write coefficient =======================================================
utils.print_main('Writing coefficients ...', verbose=opts.verbose)
paramDB = pd.DataFrame(param, columns=['A', 'beta', 'Ta'])
reactions = pd.concat([dataGen.reacDB, paramDB], axis=1)
path = kinetic_db + inp_gen.reacReader['path']+'/reactions.csv'
utils.print_submain(
'Saving coefficients at `{}`.'.format(os.path.normpath(path)),
verbose=opts.verbose
)
reactions.to_csv(path, float_format='{:e}'.format, index=False)
# Plot fitted rates
###########################################################################
if opts.plot:
utils.print_main('Plotting rates ...', verbose=opts.verbose)
x_true = np.reciprocal(np.array(inp_gen.T))
y_true = K
n = 1000
x_pred = np.reciprocal(
np.linspace(1000.0e0, max(inp_gen.T), n, dtype=np.float64)
)
y_pred = np.zeros((n, dataGen.n_reac), dtype=np.float64)
for i, param_i in enumerate(param):
y_pred[:,i] = modified_arrhenius_inv(x_pred, *param_i)
path = opts.dir + '/plots/'
if not os.path.exists(path):
os.makedirs(path)
# Dissociation
start = 0
end = dataGen.n_diss
if y_pred.shape[1] >= end:
utils.print_submain(
'Plotting dissociation rates ...', verbose=opts.verbose
)
y_true_diss = y_true[:,start:end]
y_pred_diss = y_pred[:,start:end]
fig_name = path + 'dissociation.pdf'
plot_rates(
fig_name,
x_true,
x_pred,
y_true_diss,
y_pred_diss,
inp_post.var_names['diss'],
'Dissociation Rates',
labels=inp_post.labels,
scales=inp_post.scales
)
if dataGen.n_excit > 0:
# Exchange
start = dataGen.n_diss
end = dataGen.n_diss + dataGen.n_excit
if y_pred.shape[1] > end:
utils.print_submain(
'Plotting exchange rates ...', verbose=opts.verbose
)
y_true_exch = y_true[:,start:end]
y_pred_exch = y_pred[:,start:end]
fig_name = path + 'exchange.pdf'
plot_rates(
fig_name,
x_true,
x_pred,
y_true_exch,
y_pred_exch,
inp_post.var_names['excit'],
'Exchange Rates',
labels=inp_post.labels,
scales=inp_post.scales
)
# Inelastic
start = dataGen.n_diss + dataGen.n_excit
end = dataGen.n_diss + dataGen.n_excit * 2
if y_pred.shape[1] >= end:
utils.print_submain(
'Plotting inelastic rates ...', verbose=opts.verbose
)
y_true_inel = y_true[:,start:end]
y_pred_inel = y_pred[:,start:end]
fig_name = path + 'inelastic.pdf'
plot_rates(
fig_name,
x_true,
x_pred,
y_true_inel,
y_pred_inel,
inp_post.var_names['excit'],
'Inelastic Rates',
labels=inp_post.labels,
scales=inp_post.scales
)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"numpy.amin",
"matplotlib.pyplot.figure",
"numpy.exp",
"hypernet.src.thermophysicalModels.specie.specie.Specie",
"matplotlib.pyplot.gca",
"hypernet.src.general.utils.app_decorator",
"sys.path.append",
"pandas.DataFrame",
"matplotlib.pyplot.c... | [((4807, 4837), 'hypernet.src.general.utils.app_decorator', 'utils.app_decorator', ([], {'name': 'NAME'}), '(name=NAME)\n', (4826, 4837), False, 'from hypernet.src.general import utils\n'), ((1176, 1204), 'os.path.dirname', 'os.path.dirname', (['db.__file__'], {}), '(db.__file__)\n', (1191, 1204), False, 'import os\n'), ((1351, 1383), 'os.path.exists', 'os.path.exists', (["(path + '/inputs')"], {}), "(path + '/inputs')\n", (1365, 1383), False, 'import os\n'), ((2905, 2917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2915, 2917), True, 'from matplotlib import pyplot as plt\n'), ((2922, 2938), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2931, 2938), True, 'from matplotlib import pyplot as plt\n'), ((4705, 4716), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4714, 4716), True, 'from matplotlib import pyplot as plt\n'), ((5118, 5143), 'sys.path.append', 'sys.path.append', (['opts.dir'], {}), '(opts.dir)\n', (5133, 5143), False, 'import sys\n'), ((5321, 5397), 'hypernet.src.general.utils.print_main', 'utils.print_main', (['"""Initializing species ..."""'], {'start': '""""""', 'verbose': 'opts.verbose'}), "('Initializing species ...', start='', verbose=opts.verbose)\n", (5337, 5397), False, 'from hypernet.src.general import utils\n'), ((5645, 5706), 'hypernet.src.general.utils.print_main', 'utils.print_main', (['"""Generating data ..."""'], {'verbose': 'opts.verbose'}), "('Generating data ...', verbose=opts.verbose)\n", (5661, 5706), False, 'from hypernet.src.general import utils\n'), ((5721, 5820), 'hypernet.apps.fitRates.dataGenerator.DataGenerator', 'DataGenerator', (['species', 'inp_gen.T', 'inp_gen.reacReader', 'inp_gen.reacWriter'], {'verbose': 'opts.verbose'}), '(species, inp_gen.T, inp_gen.reacReader, inp_gen.reacWriter,\n verbose=opts.verbose)\n', (5734, 5820), False, 'from hypernet.apps.fitRates.dataGenerator import DataGenerator\n'), ((5983, 6042), 'hypernet.src.general.utils.print_main', 'utils.print_main', (['"""Fitting rates ..."""'], {'verbose': 'opts.verbose'}), "('Fitting rates ...', verbose=opts.verbose)\n", (5999, 6042), False, 'from hypernet.src.general import utils\n'), ((6685, 6751), 'hypernet.src.general.utils.print_main', 'utils.print_main', (['"""Writing coefficients ..."""'], {'verbose': 'opts.verbose'}), "('Writing coefficients ...', verbose=opts.verbose)\n", (6701, 6751), False, 'from hypernet.src.general import utils\n'), ((6766, 6814), 'pandas.DataFrame', 'pd.DataFrame', (['param'], {'columns': "['A', 'beta', 'Ta']"}), "(param, columns=['A', 'beta', 'Ta'])\n", (6778, 6814), True, 'import pandas as pd\n'), ((6831, 6875), 'pandas.concat', 'pd.concat', (['[dataGen.reacDB, paramDB]'], {'axis': '(1)'}), '([dataGen.reacDB, paramDB], axis=1)\n', (6840, 6875), True, 'import pandas as pd\n'), ((2598, 2616), 'numpy.exp', 'np.exp', (['(-c * T_inv)'], {}), '(-c * T_inv)\n', (2604, 2616), True, 'import numpy as np\n'), ((3050, 3069), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3060, 3069), True, 'from matplotlib import pyplot as plt\n'), ((3106, 3125), 'matplotlib.pyplot.xscale', 'plt.xscale', (['x_scale'], {}), '(x_scale)\n', (3116, 3125), True, 'from matplotlib import pyplot as plt\n'), ((3176, 3195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3186, 3195), True, 'from matplotlib import pyplot as plt\n'), ((3229, 3248), 'matplotlib.pyplot.yscale', 'plt.yscale', (['y_scale'], {}), '(y_scale)\n', (3239, 3248), True, 'from matplotlib import pyplot as plt\n'), ((3738, 3782), 'matplotlib.pyplot.plot', 'plt.plot', (['x_true', 'y_true[:, d]'], {}), '(x_true, y_true[:, d], **true_style)\n', (3746, 3782), True, 'from matplotlib import pyplot as plt\n'), ((3926, 4003), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pred', 'y_pred[:, d]'], {'label': 'var_name[d]', 'c': 'colors[-1]'}), '(x_pred, y_pred[:, d], **pred_style, label=var_name[d], c=colors[-1])\n', (3934, 4003), True, 'from matplotlib import pyplot as plt\n'), ((6093, 6124), 'numpy.log', 'np.log', (['K[:, j][K[:, j] != 0.0]'], {}), '(K[:, j][K[:, j] != 0.0])\n', (6099, 6124), True, 'import numpy as np\n'), ((6575, 6599), 'numpy.expand_dims', 'np.expand_dims', (['param', '(0)'], {}), '(param, 0)\n', (6589, 6599), True, 'import numpy as np\n'), ((7274, 7334), 'hypernet.src.general.utils.print_main', 'utils.print_main', (['"""Plotting rates ..."""'], {'verbose': 'opts.verbose'}), "('Plotting rates ...', verbose=opts.verbose)\n", (7290, 7334), False, 'from hypernet.src.general import utils\n'), ((7554, 7601), 'numpy.zeros', 'np.zeros', (['(n, dataGen.n_reac)'], {'dtype': 'np.float64'}), '((n, dataGen.n_reac), dtype=np.float64)\n', (7562, 7601), True, 'import numpy as np\n'), ((1801, 1828), 'hypernet.src.general.utils.app_epilog', 'utils.app_epilog', ([], {'name': 'NAME'}), '(name=NAME)\n', (1817, 1828), False, 'from hypernet.src.general import utils\n'), ((2576, 2598), 'numpy.power', 'np.power', (['(1 / T_inv)', 'b'], {}), '(1 / T_inv, b)\n', (2584, 2598), True, 'import numpy as np\n'), ((3352, 3367), 'numpy.amax', 'np.amax', (['y_true'], {}), '(y_true)\n', (3359, 3367), True, 'import numpy as np\n'), ((5440, 5461), 'hypernet.src.thermophysicalModels.specie.specie.Specie', 'Specie', (['sp'], {}), '(sp, **sp_info)\n', (5446, 5461), False, 'from hypernet.src.thermophysicalModels.specie.specie import Specie\n'), ((5486, 5496), 'hypernet.src.thermophysicalModels.specie.specie.Specie', 'Specie', (['sp'], {}), '(sp)\n', (5492, 5496), False, 'from hypernet.src.thermophysicalModels.specie.specie import Specie\n'), ((5530, 5553), 'inputs.general.species.items', 'inp_gen.species.items', ([], {}), '()\n', (5551, 5553), True, 'from inputs import general as inp_gen\n'), ((6212, 6226), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (6220, 6226), True, 'import numpy as np\n'), ((6266, 6342), 'scipy.optimize.curve_fit', 'curve_fit', (['log_arrhenius_law', 'T_j', 'K_log_j'], {'p0': '[1, 1, 10000.0]', 'method': '"""trf"""'}), "(log_arrhenius_law, T_j, K_log_j, p0=[1, 1, 10000.0], method='trf')\n", (6275, 6342), False, 'from scipy.optimize import curve_fit\n'), ((6381, 6399), 'numpy.exp', 'np.exp', (['param_j[0]'], {}), '(param_j[0])\n', (6387, 6399), True, 'import numpy as np\n'), ((6439, 6456), 'numpy.array', 'np.array', (['param_j'], {}), '(param_j)\n', (6447, 6456), True, 'import numpy as np\n'), ((7015, 7037), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (7031, 7037), False, 'import os\n'), ((7366, 7385), 'numpy.array', 'np.array', (['inp_gen.T'], {}), '(inp_gen.T)\n', (7374, 7385), True, 'import numpy as np\n'), ((7765, 7785), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7779, 7785), False, 'import os\n'), ((7799, 7816), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (7810, 7816), False, 'import os\n'), ((7935, 8011), 'hypernet.src.general.utils.print_submain', 'utils.print_submain', (['"""Plotting dissociation rates ..."""'], {'verbose': 'opts.verbose'}), "('Plotting dissociation rates ...', verbose=opts.verbose)\n", (7954, 8011), False, 'from hypernet.src.general import utils\n'), ((2482, 2491), 'numpy.log', 'np.log', (['T'], {}), '(T)\n', (2488, 2491), True, 'import numpy as np\n'), ((4624, 4633), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4631, 4633), True, 'from matplotlib import pyplot as plt\n'), ((8710, 8782), 'hypernet.src.general.utils.print_submain', 'utils.print_submain', (['"""Plotting exchange rates ..."""'], {'verbose': 'opts.verbose'}), "('Plotting exchange rates ...', verbose=opts.verbose)\n", (8729, 8782), False, 'from hypernet.src.general import utils\n'), ((9530, 9603), 'hypernet.src.general.utils.print_submain', 'utils.print_submain', (['"""Plotting inelastic rates ..."""'], {'verbose': 'opts.verbose'}), "('Plotting inelastic rates ...', verbose=opts.verbose)\n", (9549, 9603), False, 'from hypernet.src.general import utils\n'), ((1527, 1549), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (1543, 1549), False, 'import os\n'), ((3267, 3297), 'numpy.amin', 'np.amin', (['y_true[y_true != 0.0]'], {}), '(y_true[y_true != 0.0])\n', (3274, 3297), True, 'import numpy as np\n'), ((3302, 3317), 'numpy.amax', 'np.amax', (['y_true'], {}), '(y_true)\n', (3309, 3317), True, 'import numpy as np\n'), ((3390, 3405), 'numpy.amin', 'np.amin', (['y_true'], {}), '(y_true)\n', (3397, 3405), True, 'import numpy as np\n'), ((3413, 3428), 'numpy.amax', 'np.amax', (['y_true'], {}), '(y_true)\n', (3420, 3428), True, 'import numpy as np\n'), ((4181, 4210), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'c': 'colors[i]'}), '([], [], c=colors[i])\n', (4189, 4210), True, 'from matplotlib import pyplot as plt\n'), ((4339, 4353), 'numpy.ceil', 'np.ceil', (['(n / 8)'], {}), '(n / 8)\n', (4346, 4353), True, 'import numpy as np\n'), ((4413, 4450), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'c': '"""k"""'}), "([], [], c='k', **true_style)\n", (4421, 4450), True, 'from matplotlib import pyplot as plt\n'), ((4471, 4508), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'c': '"""k"""'}), "([], [], c='k', **pred_style)\n", (4479, 4508), True, 'from matplotlib import pyplot as plt\n'), ((6509, 6526), 'numpy.array', 'np.array', (['param_j'], {}), '(param_j)\n', (6517, 6526), True, 'import numpy as np\n'), ((3850, 3859), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3857, 3859), True, 'from matplotlib import pyplot as plt\n')] |
import time
import cv2
import numpy as np
from chainer import serializers, Variable, cuda
import chainer.functions as F
import argparse
from model import YOLOv2, YOLOv2Predictor
from lib.utils import nms
from lib.utils import Box
fcn = False
class Predictor:
def __init__(self, gpu=0):
# hyper parameters
weight_file = "./weight/fcn-un4-100"
self.n_classes_fcn = 7
self.n_classes_yolo = 2
self.n_boxes = 5
self.detection_thresh = 0.2
self.iou_thresh = 0.1
self.label_file = "./label.txt"
with open(self.label_file, "r") as f:
self.labels = f.read().strip().split("\n")
# load model
yolov2 = YOLOv2(n_classes_fcn=self.n_classes_fcn, n_classes_yolo=self.n_classes_yolo, n_boxes=self.n_boxes)
model = YOLOv2Predictor(yolov2, FCN=fcn)
serializers.load_npz(weight_file, model)
if gpu >= 0:
cuda.get_device(gpu).use()
model.to_gpu()
self.model = model
self.gpu = gpu
def __call__(self, img):
orig_input_height, orig_input_width, _ = img.shape
#img = cv2.resize(orig_img, (640, 640))
input_height, input_width, _ = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, dtype=np.float32) / 255.0
img = img.transpose(2, 0, 1)
# forward
x_data = img[np.newaxis, :, :, :]
x = Variable(x_data)
if self.gpu >= 0:
x.to_gpu()
pred = self.model.predict(x)
x, y, w, h, conf, prob = pred
# parse results
_, _, _, grid_h, grid_w = x.shape
x = F.reshape(x, (self.n_boxes, grid_h, grid_w)).data
y = F.reshape(y, (self.n_boxes, grid_h, grid_w)).data
w = F.reshape(w, (self.n_boxes, grid_h, grid_w)).data
h = F.reshape(h, (self.n_boxes, grid_h, grid_w)).data
conf = F.reshape(conf, (self.n_boxes, grid_h, grid_w)).data
prob = F.transpose(F.reshape(prob, (self.n_boxes, self.n_classes_yolo, grid_h, grid_w)), (1, 0, 2, 3)).data
detected_indices = (conf * prob).max(axis=0) > self.detection_thresh
if self.gpu >= 0:
x = cuda.to_cpu(x)
y = cuda.to_cpu(y)
w = cuda.to_cpu(w)
h = cuda.to_cpu(h)
conf = cuda.to_cpu(conf)
prob = cuda.to_cpu(prob)
detected_indices = cuda.to_cpu(detected_indices)
results = []
for i in range(detected_indices.sum()):
results.append({
"label": self.labels[prob.transpose(1, 2, 3, 0)[detected_indices][i].argmax()],
"probs": prob.transpose(1, 2, 3, 0)[detected_indices][i],
"conf" : conf[detected_indices][i],
"objectness": conf[detected_indices][i] * prob.transpose(1, 2, 3, 0)[detected_indices][i].max(),
"box" : Box(
x[detected_indices][i]*orig_input_width,
y[detected_indices][i]*orig_input_height,
w[detected_indices][i]*orig_input_width,
h[detected_indices][i]*orig_input_height).crop_region(orig_input_height, orig_input_width)
})
# nms
nms_results = nms(results, self.iou_thresh)
return nms_results
if __name__ == "__main__":
# argument parse
parser = argparse.ArgumentParser(description="predict image")
parser.add_argument('--path', help="input image path")
parser.add_argument('--gpu', default=0, type=int)
args = parser.parse_args()
image_file = args.path
# read image
orig_img = cv2.imread(image_file)
predictor = Predictor(gpu=args.gpu)
nms_results = predictor(orig_img)
# draw result
print(len(nms_results))
for result in nms_results:
left, top = result["box"].int_left_top()
cv2.rectangle(
orig_img,
result["box"].int_left_top(), result["box"].int_right_bottom(),
(255, 0, 255),
3
)
text = '%s(%2d%%)' % (result["label"], result["probs"].max()*result["conf"]*100)
cv2.putText(orig_img, text, (left, top-6), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
print(text)
cv2.imwrite('out.png', orig_img)
| [
"chainer.Variable",
"lib.utils.Box",
"cv2.putText",
"argparse.ArgumentParser",
"model.YOLOv2",
"chainer.serializers.load_npz",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.asarray",
"chainer.cuda.get_device",
"chainer.cuda.to_cpu",
"cv2.imread",
"model.YOLOv2Predictor",
"chainer.functions.reshape... | [((3398, 3450), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""predict image"""'}), "(description='predict image')\n", (3421, 3450), False, 'import argparse\n'), ((3655, 3677), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (3665, 3677), False, 'import cv2\n'), ((4271, 4303), 'cv2.imwrite', 'cv2.imwrite', (['"""out.png"""', 'orig_img'], {}), "('out.png', orig_img)\n", (4282, 4303), False, 'import cv2\n'), ((697, 799), 'model.YOLOv2', 'YOLOv2', ([], {'n_classes_fcn': 'self.n_classes_fcn', 'n_classes_yolo': 'self.n_classes_yolo', 'n_boxes': 'self.n_boxes'}), '(n_classes_fcn=self.n_classes_fcn, n_classes_yolo=self.n_classes_yolo,\n n_boxes=self.n_boxes)\n', (703, 799), False, 'from model import YOLOv2, YOLOv2Predictor\n'), ((812, 844), 'model.YOLOv2Predictor', 'YOLOv2Predictor', (['yolov2'], {'FCN': 'fcn'}), '(yolov2, FCN=fcn)\n', (827, 844), False, 'from model import YOLOv2, YOLOv2Predictor\n'), ((853, 893), 'chainer.serializers.load_npz', 'serializers.load_npz', (['weight_file', 'model'], {}), '(weight_file, model)\n', (873, 893), False, 'from chainer import serializers, Variable, cuda\n'), ((1231, 1267), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1243, 1267), False, 'import cv2\n'), ((1434, 1450), 'chainer.Variable', 'Variable', (['x_data'], {}), '(x_data)\n', (1442, 1450), False, 'from chainer import serializers, Variable, cuda\n'), ((3279, 3308), 'lib.utils.nms', 'nms', (['results', 'self.iou_thresh'], {}), '(results, self.iou_thresh)\n', (3282, 3308), False, 'from lib.utils import nms\n'), ((4153, 4252), 'cv2.putText', 'cv2.putText', (['orig_img', 'text', '(left, top - 6)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.6)', '(255, 255, 255)', '(2)'], {}), '(orig_img, text, (left, top - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.6,\n (255, 255, 255), 2)\n', (4164, 4252), False, 'import cv2\n'), ((1282, 1315), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1292, 1315), True, 'import numpy as np\n'), ((1654, 1698), 'chainer.functions.reshape', 'F.reshape', (['x', '(self.n_boxes, grid_h, grid_w)'], {}), '(x, (self.n_boxes, grid_h, grid_w))\n', (1663, 1698), True, 'import chainer.functions as F\n'), ((1716, 1760), 'chainer.functions.reshape', 'F.reshape', (['y', '(self.n_boxes, grid_h, grid_w)'], {}), '(y, (self.n_boxes, grid_h, grid_w))\n', (1725, 1760), True, 'import chainer.functions as F\n'), ((1778, 1822), 'chainer.functions.reshape', 'F.reshape', (['w', '(self.n_boxes, grid_h, grid_w)'], {}), '(w, (self.n_boxes, grid_h, grid_w))\n', (1787, 1822), True, 'import chainer.functions as F\n'), ((1840, 1884), 'chainer.functions.reshape', 'F.reshape', (['h', '(self.n_boxes, grid_h, grid_w)'], {}), '(h, (self.n_boxes, grid_h, grid_w))\n', (1849, 1884), True, 'import chainer.functions as F\n'), ((1905, 1952), 'chainer.functions.reshape', 'F.reshape', (['conf', '(self.n_boxes, grid_h, grid_w)'], {}), '(conf, (self.n_boxes, grid_h, grid_w))\n', (1914, 1952), True, 'import chainer.functions as F\n'), ((2193, 2207), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {}), '(x)\n', (2204, 2207), False, 'from chainer import serializers, Variable, cuda\n'), ((2224, 2238), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['y'], {}), '(y)\n', (2235, 2238), False, 'from chainer import serializers, Variable, cuda\n'), ((2255, 2269), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['w'], {}), '(w)\n', (2266, 2269), False, 'from chainer import serializers, Variable, cuda\n'), ((2286, 2300), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['h'], {}), '(h)\n', (2297, 2300), False, 'from chainer import serializers, Variable, cuda\n'), ((2320, 2337), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['conf'], {}), '(conf)\n', (2331, 2337), False, 'from chainer import serializers, Variable, cuda\n'), ((2357, 2374), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['prob'], {}), '(prob)\n', (2368, 2374), False, 'from chainer import serializers, Variable, cuda\n'), ((2406, 2435), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['detected_indices'], {}), '(detected_indices)\n', (2417, 2435), False, 'from chainer import serializers, Variable, cuda\n'), ((1985, 2053), 'chainer.functions.reshape', 'F.reshape', (['prob', '(self.n_boxes, self.n_classes_yolo, grid_h, grid_w)'], {}), '(prob, (self.n_boxes, self.n_classes_yolo, grid_h, grid_w))\n', (1994, 2053), True, 'import chainer.functions as F\n'), ((927, 947), 'chainer.cuda.get_device', 'cuda.get_device', (['gpu'], {}), '(gpu)\n', (942, 947), False, 'from chainer import serializers, Variable, cuda\n'), ((2895, 3081), 'lib.utils.Box', 'Box', (['(x[detected_indices][i] * orig_input_width)', '(y[detected_indices][i] * orig_input_height)', '(w[detected_indices][i] * orig_input_width)', '(h[detected_indices][i] * orig_input_height)'], {}), '(x[detected_indices][i] * orig_input_width, y[detected_indices][i] *\n orig_input_height, w[detected_indices][i] * orig_input_width, h[\n detected_indices][i] * orig_input_height)\n', (2898, 3081), False, 'from lib.utils import Box\n')] |
__author__ = "<NAME>"
import unittest
import numpy as np
import numpy.testing as npt
import skbio
from mohawk._format import (locus_generator,
extract_subsequences,
encode_sequence,
join_contigs,
sample_from_contig_set)
class FormattingTests(unittest.TestCase):
def test_contig_locus_generator(self):
length = 10
start = 0
stop = 20
n_indices = 10
def mock_function(start, stop, n):
# ...ignoring n for mocking
return np.arange(start, stop)
exp = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
obs = locus_generator(start, stop, length, n_indices,
mock_function)
npt.assert_equal(obs, exp)
def test_extract_subsequences(self):
# 0 1 2 3 4 5 6 7 8 9 10 11
sequence = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])
positions = np.array([0, 4, 10])
length = 5
exp = np.array([[0, 0, 0, 1, 1],
[1, 1, 2, 2, 2],
[3, 3, 0, 0, 0]])
obs = extract_subsequences(sequence, positions, length)
npt.assert_equal(obs, exp)
# verify we don't attempt to read "off the end"
length = 10
positions = np.array([0, 5, 18])
with self.assertRaises(ValueError):
extract_subsequences(sequence, positions, length)
def test_encode_sequence(self):
sequence = skbio.DNA('AAATTTGGGCCC')
exp = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])
obs = encode_sequence(sequence)
npt.assert_equal(obs, exp)
# note that skbio's DNA constructor protects us from lowercase
def test_join_contigs(self):
sequences = [skbio.DNA('AATTGG'), skbio.DNA('CCTTAA'),
skbio.DNA('ATAT')]
# 0123456789012345
exp_seq = skbio.DNA('AATTGGCCTTAAATAT')
exp_breaks = np.array([0, 6, 12, 16])
obs_seq, obs_breaks = join_contigs(sequences)
self.assertEqual(obs_seq, exp_seq)
npt.assert_equal(obs_breaks, exp_breaks)
def remap(self, seq_as_array):
d = {0: 'A', 1: 'T', 2: 'G', 3: 'C'}
return ''.join((d[c] for c in seq_as_array))
def test_sample_from_contig_set_one_short(self):
randfunc = np.random.randint
np.random.seed(1234)
# An integration test
# 0123456789012345678901234567890123456789
sequences = [skbio.DNA('ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC'),
skbio.DNA('CGTACCGGTT')]
depth = 100
length = 15
obs = sample_from_contig_set(sequences, depth, length, randfunc)
self.assertEqual(depth, len(obs))
def test_sample_from_contig_set(self):
def mock(start, stop, n):
if start == 0:
return np.tile([0, 5, 10, 15, 9, 12], 100)[:n]
else:
return np.tile([40, 41, 42, 43], 100)[:n]
np.random.seed(1234)
# An integration test
# 0123456789012345678901234567890123456789
sequences = [skbio.DNA('ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC'),
skbio.DNA('CGTACCGGTT')]
fullseq = skbio.DNA.concat(sequences)
depth = 100
length = 3
obs = sample_from_contig_set(sequences, depth, length, mock)
indices = []
for o in obs:
remapped = self.remap(o)
self.assertIn(remapped, fullseq)
indices.append(fullseq.index(remapped))
# we expect the both the first and second sequence to be fully
# represented by our starting indices except in rare stochastic
# scenario (as on average, 20 reads will come from the second contig)
self.assertTrue(set(indices) == {0, 5, 10, 15, 9, 12, 40, 41, 42, 43})
# we could potentially verify multinomial is working as expected but
# that may be getting a bit pedantic.
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"skbio.DNA.concat",
"skbio.DNA",
"mohawk._format.encode_sequence",
"numpy.array",
"mohawk._format.locus_generator",
"numpy.testing.assert_equal",
"mohawk._format.join_contigs",
"numpy.arange",
"mohawk._format.extract_subsequences",
"mohawk._format.sample_f... | [((4211, 4226), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4224, 4226), False, 'import unittest\n'), ((641, 681), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (649, 681), True, 'import numpy as np\n'), ((696, 758), 'mohawk._format.locus_generator', 'locus_generator', (['start', 'stop', 'length', 'n_indices', 'mock_function'], {}), '(start, stop, length, n_indices, mock_function)\n', (711, 758), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((797, 823), 'numpy.testing.assert_equal', 'npt.assert_equal', (['obs', 'exp'], {}), '(obs, exp)\n', (813, 823), True, 'import numpy.testing as npt\n'), ((949, 1035), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3,\n 3, 3])\n', (957, 1035), True, 'import numpy as np\n'), ((1081, 1101), 'numpy.array', 'np.array', (['[0, 4, 10]'], {}), '([0, 4, 10])\n', (1089, 1101), True, 'import numpy as np\n'), ((1135, 1196), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 1], [1, 1, 2, 2, 2], [3, 3, 0, 0, 0]]'], {}), '([[0, 0, 0, 1, 1], [1, 1, 2, 2, 2], [3, 3, 0, 0, 0]])\n', (1143, 1196), True, 'import numpy as np\n'), ((1259, 1308), 'mohawk._format.extract_subsequences', 'extract_subsequences', (['sequence', 'positions', 'length'], {}), '(sequence, positions, length)\n', (1279, 1308), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((1317, 1343), 'numpy.testing.assert_equal', 'npt.assert_equal', (['obs', 'exp'], {}), '(obs, exp)\n', (1333, 1343), True, 'import numpy.testing as npt\n'), ((1441, 1461), 'numpy.array', 'np.array', (['[0, 5, 18]'], {}), '([0, 5, 18])\n', (1449, 1461), True, 'import numpy as np\n'), ((1624, 1649), 'skbio.DNA', 'skbio.DNA', (['"""AAATTTGGGCCC"""'], {}), "('AAATTTGGGCCC')\n", (1633, 1649), False, 'import skbio\n'), ((1664, 1710), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3])\n', (1672, 1710), True, 'import numpy as np\n'), ((1725, 1750), 'mohawk._format.encode_sequence', 'encode_sequence', (['sequence'], {}), '(sequence)\n', (1740, 1750), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((1759, 1785), 'numpy.testing.assert_equal', 'npt.assert_equal', (['obs', 'exp'], {}), '(obs, exp)\n', (1775, 1785), True, 'import numpy.testing as npt\n'), ((2060, 2089), 'skbio.DNA', 'skbio.DNA', (['"""AATTGGCCTTAAATAT"""'], {}), "('AATTGGCCTTAAATAT')\n", (2069, 2089), False, 'import skbio\n'), ((2111, 2135), 'numpy.array', 'np.array', (['[0, 6, 12, 16]'], {}), '([0, 6, 12, 16])\n', (2119, 2135), True, 'import numpy as np\n'), ((2166, 2189), 'mohawk._format.join_contigs', 'join_contigs', (['sequences'], {}), '(sequences)\n', (2178, 2189), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((2241, 2281), 'numpy.testing.assert_equal', 'npt.assert_equal', (['obs_breaks', 'exp_breaks'], {}), '(obs_breaks, exp_breaks)\n', (2257, 2281), True, 'import numpy.testing as npt\n'), ((2516, 2536), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2530, 2536), True, 'import numpy as np\n'), ((2818, 2876), 'mohawk._format.sample_from_contig_set', 'sample_from_contig_set', (['sequences', 'depth', 'length', 'randfunc'], {}), '(sequences, depth, length, randfunc)\n', (2840, 2876), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((3173, 3193), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (3187, 3193), True, 'import numpy as np\n'), ((3437, 3464), 'skbio.DNA.concat', 'skbio.DNA.concat', (['sequences'], {}), '(sequences)\n', (3453, 3464), False, 'import skbio\n'), ((3520, 3574), 'mohawk._format.sample_from_contig_set', 'sample_from_contig_set', (['sequences', 'depth', 'length', 'mock'], {}), '(sequences, depth, length, mock)\n', (3542, 3574), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((603, 625), 'numpy.arange', 'np.arange', (['start', 'stop'], {}), '(start, stop)\n', (612, 625), True, 'import numpy as np\n'), ((1518, 1567), 'mohawk._format.extract_subsequences', 'extract_subsequences', (['sequence', 'positions', 'length'], {}), '(sequence, positions, length)\n', (1538, 1567), False, 'from mohawk._format import locus_generator, extract_subsequences, encode_sequence, join_contigs, sample_from_contig_set\n'), ((1913, 1932), 'skbio.DNA', 'skbio.DNA', (['"""AATTGG"""'], {}), "('AATTGG')\n", (1922, 1932), False, 'import skbio\n'), ((1934, 1953), 'skbio.DNA', 'skbio.DNA', (['"""CCTTAA"""'], {}), "('CCTTAA')\n", (1943, 1953), False, 'import skbio\n'), ((1976, 1993), 'skbio.DNA', 'skbio.DNA', (['"""ATAT"""'], {}), "('ATAT')\n", (1985, 1993), False, 'import skbio\n'), ((2661, 2714), 'skbio.DNA', 'skbio.DNA', (['"""ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC"""'], {}), "('ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC')\n", (2670, 2714), False, 'import skbio\n'), ((2737, 2760), 'skbio.DNA', 'skbio.DNA', (['"""CGTACCGGTT"""'], {}), "('CGTACCGGTT')\n", (2746, 2760), False, 'import skbio\n'), ((3318, 3371), 'skbio.DNA', 'skbio.DNA', (['"""ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC"""'], {}), "('ATGCAATTGGCCAAATTTGGGCCCAAAATTTTGGGGCCCC')\n", (3327, 3371), False, 'import skbio\n'), ((3394, 3417), 'skbio.DNA', 'skbio.DNA', (['"""CGTACCGGTT"""'], {}), "('CGTACCGGTT')\n", (3403, 3417), False, 'import skbio\n'), ((3048, 3083), 'numpy.tile', 'np.tile', (['[0, 5, 10, 15, 9, 12]', '(100)'], {}), '([0, 5, 10, 15, 9, 12], 100)\n', (3055, 3083), True, 'import numpy as np\n'), ((3129, 3159), 'numpy.tile', 'np.tile', (['[40, 41, 42, 43]', '(100)'], {}), '([40, 41, 42, 43], 100)\n', (3136, 3159), True, 'import numpy as np\n')] |
import numpy as np
import sumu
def test_pairwise_causal_estimation():
np.random.seed(0)
n = 6
bn = sumu.GaussianBNet.random(n)
data = bn.sample(20000)
dags, scores = sumu.Gadget(data=data, mcmc={"iters": 80000}).sample()
causal_effects = sumu.Beeps(dags=dags, data=data).sample_pairwise()
mse = (
(np.linalg.inv(np.eye(n) - bn.B) - causal_effects.mean(axis=0)) ** 2
).mean()
print(mse)
assert mse < 0.1
def main():
test_pairwise_causal_estimation()
if __name__ == "__main__":
main()
| [
"sumu.Gadget",
"numpy.random.seed",
"sumu.Beeps",
"numpy.eye",
"sumu.GaussianBNet.random"
] | [((76, 93), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (90, 93), True, 'import numpy as np\n'), ((113, 140), 'sumu.GaussianBNet.random', 'sumu.GaussianBNet.random', (['n'], {}), '(n)\n', (137, 140), False, 'import sumu\n'), ((188, 233), 'sumu.Gadget', 'sumu.Gadget', ([], {'data': 'data', 'mcmc': "{'iters': 80000}"}), "(data=data, mcmc={'iters': 80000})\n", (199, 233), False, 'import sumu\n'), ((264, 296), 'sumu.Beeps', 'sumu.Beeps', ([], {'dags': 'dags', 'data': 'data'}), '(dags=dags, data=data)\n', (274, 296), False, 'import sumu\n'), ((350, 359), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (356, 359), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
.. Authors
<NAME> <<EMAIL>>
A set of routines for related to Voigt distributions.
"""
import numpy as np
from scipy.interpolate import interp1d
from scipy.special import wofz
def voigt(
x
,intensity=None
,location=None
,sigma=None
,gamma=None):
"""
The Voigt function is also the real part of w(z) = exp(-z^2) erfc(iz),
the complex probability function, which is also known as the Faddeeva
function. Scipy has implemented this function under the name wofz()
"""
z = (x - location + 1j*gamma)/np.sqrt(2)/sigma
y = wofz(z).real/np.sqrt(2*np.pi)/sigma * intensity
return y
def voigt_cdf_tab(gamma, sigma, gridsize=None, cutoff=None):
# This is a numerical method to calculate the cumulative distribution fuction
# for a voigt profile. This works reasonably well, but is limited both by
# the sampling resolution, and the chosen bounds.
#
# In this case the CDF is calculated with a variable grid density to help
# mitigate those effects.
#
# There are a couple of possibilities to speed this up:
# 1. It may be possible to optimize the grid spacing by using
# a fuction that increases faster away from zero.
# 2. The CDF is symetric so only calculation up to x=0 is needed.
# 3. For some applicaiton I might be able to use a psudo-voigt
# calculation that may be faster than the wofz implementation
# (at the expense of accuracy.)
if gridsize is None: gridsize = 1000
if cutoff is None: cutoff = 1e-5
# The current scheme works well with a minimum of 100 points.
# It is possible to go as low as 50 points, but accuracy is not great.
gridsize_min = 100
fraction = 0.5
gauss_hwfm = np.sqrt(2.0*np.log(1.0/fraction))*sigma
lorentz_hwfm = gamma*np.sqrt(1.0/fraction - 1.0)
# This is always larger than the voigt hwfm (half width at percentile).
hwfm_max = np.sqrt(gauss_hwfm**2 + lorentz_hwfm**2)
min_spacing = hwfm_max/5.0
value = gridsize_min/2*min_spacing
# Determine a cutoff value.
lorentz_cutoff = gamma*np.sqrt(1.0/cutoff - 1.0)
gauss_cutoff = np.sqrt(-1 * sigma**2 * 2 * np.log(cutoff*sigma*np.sqrt(2*np.pi)))
value_cutoff = max(lorentz_cutoff, gauss_cutoff)
base = np.exp(1/10 * np.log(value_cutoff/value))
bounds = np.linspace(-value, value, gridsize+1)
bounds = bounds*base**np.abs(bounds/value*10)
cdf_x = (bounds[:-1]+bounds[1:])/2
# We must used a properly normalized voigt here (intensity=1.0)
cdf_y = voigt(
cdf_x
,intensity=1.0
,location=0.0
,sigma=sigma
,gamma=gamma)
cdf_ydx = (cdf_y*(bounds[1:]-bounds[:-1]))
cdf = np.cumsum(cdf_ydx)
# These checks are only useful if the user changes the number
# of calculated points.
if (np.sum((cdf > 0.25) & (cdf < 0.75)) < 3):
raise Exception('Voight CDF calculation does not have enough resolution.')
if (np.max(cdf) < 0.99):
raise Exception('Voight CDF calculation domain too small.')
return bounds[1:], cdf
def voigt_cdf_interp(gamma, sigma, gridsize=None):
x, cdf = voigt_cdf_tab(gamma, sigma, gridsize)
interp = interp1d(x, cdf, kind='quadratic')
return interp
def voigt_invcdf_interp(gamma, sigma, gridsize=None):
x, cdf = voigt_cdf_tab(gamma, sigma, gridsize)
interp = interp1d(cdf, x, kind='quadratic')
return interp
def voigt_cdf_numeric(x, gamma, sigma, gridsize=None):
cdf_x, cdf = voigt_cdf_tab(gamma, sigma, gridsize)
y = np.interp(x, cdf_x, cdf)
return y
def voigt_invcdf_numeric(x, gamma, sigma, gridsize=None):
cdf_x, cdf = voigt_cdf_tab(gamma, sigma, gridsize)
y = np.interp(x, cdf, cdf_x, left=-np.inf, right=np.inf)
return y
def voigt_random(gamma, sigma, size, **kwargs):
"""
Draw random samples from a Voigt distribution.
The tails of the distribution will be clipped;
the clipping level can be adjusted with the cutoff keyword.
The default values is 1e-5.
"""
cdf_x, cdf = voigt_cdf_tab(gamma, sigma, **kwargs)
random_y = np.random.uniform(np.min(cdf), np.max(cdf), size)
random_x = np.interp(random_y, cdf, cdf_x)
return random_x
| [
"numpy.sum",
"numpy.log",
"numpy.abs",
"numpy.cumsum",
"numpy.max",
"numpy.min",
"numpy.linspace",
"scipy.special.wofz",
"numpy.interp",
"scipy.interpolate.interp1d",
"numpy.sqrt"
] | [((1997, 2041), 'numpy.sqrt', 'np.sqrt', (['(gauss_hwfm ** 2 + lorentz_hwfm ** 2)'], {}), '(gauss_hwfm ** 2 + lorentz_hwfm ** 2)\n', (2004, 2041), True, 'import numpy as np\n'), ((2401, 2441), 'numpy.linspace', 'np.linspace', (['(-value)', 'value', '(gridsize + 1)'], {}), '(-value, value, gridsize + 1)\n', (2412, 2441), True, 'import numpy as np\n'), ((2785, 2803), 'numpy.cumsum', 'np.cumsum', (['cdf_ydx'], {}), '(cdf_ydx)\n', (2794, 2803), True, 'import numpy as np\n'), ((3278, 3312), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'cdf'], {'kind': '"""quadratic"""'}), "(x, cdf, kind='quadratic')\n", (3286, 3312), False, 'from scipy.interpolate import interp1d\n'), ((3451, 3485), 'scipy.interpolate.interp1d', 'interp1d', (['cdf', 'x'], {'kind': '"""quadratic"""'}), "(cdf, x, kind='quadratic')\n", (3459, 3485), False, 'from scipy.interpolate import interp1d\n'), ((3624, 3648), 'numpy.interp', 'np.interp', (['x', 'cdf_x', 'cdf'], {}), '(x, cdf_x, cdf)\n', (3633, 3648), True, 'import numpy as np\n'), ((3785, 3837), 'numpy.interp', 'np.interp', (['x', 'cdf', 'cdf_x'], {'left': '(-np.inf)', 'right': 'np.inf'}), '(x, cdf, cdf_x, left=-np.inf, right=np.inf)\n', (3794, 3837), True, 'import numpy as np\n'), ((4255, 4286), 'numpy.interp', 'np.interp', (['random_y', 'cdf', 'cdf_x'], {}), '(random_y, cdf, cdf_x)\n', (4264, 4286), True, 'import numpy as np\n'), ((1878, 1907), 'numpy.sqrt', 'np.sqrt', (['(1.0 / fraction - 1.0)'], {}), '(1.0 / fraction - 1.0)\n', (1885, 1907), True, 'import numpy as np\n'), ((2169, 2196), 'numpy.sqrt', 'np.sqrt', (['(1.0 / cutoff - 1.0)'], {}), '(1.0 / cutoff - 1.0)\n', (2176, 2196), True, 'import numpy as np\n'), ((2907, 2942), 'numpy.sum', 'np.sum', (['((cdf > 0.25) & (cdf < 0.75))'], {}), '((cdf > 0.25) & (cdf < 0.75))\n', (2913, 2942), True, 'import numpy as np\n'), ((3040, 3051), 'numpy.max', 'np.max', (['cdf'], {}), '(cdf)\n', (3046, 3051), True, 'import numpy as np\n'), ((4208, 4219), 'numpy.min', 'np.min', (['cdf'], {}), '(cdf)\n', (4214, 4219), True, 'import numpy as np\n'), ((4221, 4232), 'numpy.max', 'np.max', (['cdf'], {}), '(cdf)\n', (4227, 4232), True, 'import numpy as np\n'), ((599, 609), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (606, 609), True, 'import numpy as np\n'), ((2359, 2387), 'numpy.log', 'np.log', (['(value_cutoff / value)'], {}), '(value_cutoff / value)\n', (2365, 2387), True, 'import numpy as np\n'), ((2466, 2493), 'numpy.abs', 'np.abs', (['(bounds / value * 10)'], {}), '(bounds / value * 10)\n', (2472, 2493), True, 'import numpy as np\n'), ((637, 655), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (644, 655), True, 'import numpy as np\n'), ((1825, 1847), 'numpy.log', 'np.log', (['(1.0 / fraction)'], {}), '(1.0 / fraction)\n', (1831, 1847), True, 'import numpy as np\n'), ((624, 631), 'scipy.special.wofz', 'wofz', (['z'], {}), '(z)\n', (628, 631), False, 'from scipy.special import wofz\n'), ((2262, 2280), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2269, 2280), True, 'import numpy as np\n')] |
"""
Code for self-training with weak supervision.
Author: <NAME> (<EMAIL>)
"""
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda
class RAN:
"""
Rule Attention Network
* Input: text embedding x, array of rule predictions
* Output: aggregate label
"""
def __init__(self, config, num_rules, logger=None, name='ran'):
self.name = name
self.logger = logger
self.manual_seed = config.seed
tf.random.set_seed(self.manual_seed)
self.datapath = config.datapath
self.model_dir = config.logdir
self.sup_batch_size = config.train_batch_size
self.unsup_batch_size = config.unsup_batch_size
self.sup_epochs = config.num_epochs
self.unsup_epochs = config.num_unsup_epochs
self.num_labels = config.num_labels
self.num_rules = num_rules
# max_rule_seq_length: used for efficiency (Note: no rules are discarded.)
self.max_rule_seq_length = min(self.num_rules, config.max_rule_seq_length)
# Using Student as an extra rule
self.num_rules += 1
self.max_rule_seq_length += 1
self.student_rule_id = self.num_rules
self.hard_student_rule = config.hard_student_rule
self.preprocess = None
self.trained = False
self.xdim = -1
self.ignore_student = False
self.gpus = 1
def init(self, rule_pred):
# Initialize RAN as majority voting (all sources have equal weights)
self.majority_model = MajorityVoter(num_labels=self.num_labels)
return
def postprocess_rule_preds(self, rule_pred, student_pred=None):
"""
Converts rule predictions to appropriate format
:param rule_pred: a 2D array of rule preds: num_examples x num_rules
:return:
rule_one_hot: a 2D mask matrix: 1 if rule applies otherwise 0
rule_pred: a 3D rule prediction matrix (N x num_rules x num_classes): converting class indices to one-hot vectors
# if a rule predicts -1, then pred = [0,...,0]
student_pred: the soft predictions of a student network
"""
max_rule_seq_length = self.max_rule_seq_length - 1 # -1: Using student as extra rule
N = rule_pred.shape[0]
rule_mask = (rule_pred != -1).astype(int)
fired_rule_ids = [(np.nonzero(x)[0] + 1).tolist() for x in rule_mask]
non_zero_rule_pred = []
for i, fired_rules in enumerate(fired_rule_ids):
preds_i = [rule_pred[i, j-1] for j in fired_rules]
preds_i = preds_i + [self.num_labels] * (max_rule_seq_length - len(preds_i))
if len(preds_i) > max_rule_seq_length:
self.logger.info("WARNING: Num firing rules = {} > max_rule_seq_length = {}".format(len(preds_i), max_rule_seq_length))
preds_i = preds_i[:max_rule_seq_length]
non_zero_rule_pred.append(preds_i)
one_hot_rule_pred = tf.one_hot(non_zero_rule_pred, self.num_labels + 1).numpy()
one_hot_rule_pred = one_hot_rule_pred[:, :, :-1]
fired_rule_ids = [x + [0] * (max_rule_seq_length - len(x)) for x in fired_rule_ids]
fired_rule_ids = np.array(fired_rule_ids)
if student_pred is not None:
mask_one = np.ones((N, 1))
if student_pred.ndim > 2:
student_pred = np.squeeze(student_pred, axis=None)
if self.hard_student_rule:
# Convert Student's soft probabilities to hard labels
student_pred = to_one_hot(np.argmax(student_pred, axis=1), self.num_labels)
student_pred = student_pred[..., np.newaxis, :] # Add axis=1
one_hot_rule_pred = np.concatenate([student_pred, one_hot_rule_pred], axis=1)
rule_mask = np.concatenate([mask_one, rule_mask], axis=1)
if not self.ignore_student:
student_rule_id = np.ones((N, 1)) * self.student_rule_id
else:
student_rule_id = np.zeros((N, 1))
fired_rule_ids = np.concatenate([student_rule_id, fired_rule_ids], axis=1)
return rule_mask, fired_rule_ids, one_hot_rule_pred
def train(self, x_train, rule_pred_train, y_train, x_dev=None, rule_pred_dev=None, y_dev=None,
student_pred_train=None, student_pred_dev=None,
x_unsup=None, rule_pred_unsup=None, student_pred_unsup=None):
assert x_unsup is not None, "For SSL RAN you need to also provide unlabeled data... "
if x_train is not None:
x_train = np.array(x_train)
y_train = np.array(y_train)
rule_one_hot_train, fired_rule_ids_train, rule_pred_train = self.postprocess_rule_preds(rule_pred_train, student_pred_train)
self.logger.info("X Train Shape " + str(x_train.shape) + ' ' + str(rule_pred_train.shape) + ' ' + str(y_train.shape))
else:
rule_one_hot_train, fired_rule_ids_train, rule_pred_train = None, None, None
if x_dev is not None:
x_dev = np.array(x_dev)
y_dev = np.array(y_dev)
rule_one_hot_dev, fired_rule_ids_dev, rule_pred_dev = self.postprocess_rule_preds(rule_pred_dev, student_pred_dev)
self.logger.info("X Dev Shape " + str(x_dev.shape) + ' ' + str(rule_pred_dev.shape) + ' ' + str(y_dev.shape))
else:
rule_one_hot_dev, fired_rule_ids_dev, rule_pred_dev = None, None, None
x_unsup = np.array(x_unsup)
rule_one_hot_unsup, fired_rule_ids_unsup, rule_pred_unsup = self.postprocess_rule_preds(rule_pred_unsup, student_pred_unsup)
self.logger.info("X Unsup Shape " + str(x_unsup.shape) + ' ' + str(rule_pred_unsup.shape))
if not self.trained or (x_train is not None and self.xdim != x_train.shape[1]):
if self.trained and self.xdim != x_train.shape[1]:
self.logger.info("WARNING: Changing dimensionality of x from {} to {}".format(self.xdim, x_train.shape[1]))
self.xdim = x_train.shape[1] if x_train is not None else x_unsup.shape[1]
self.model = construct_rule_network(self.xdim,
num_rules=self.num_rules,
num_labels=self.num_labels,
max_rule_seq_length=self.max_rule_seq_length,
seed=self.manual_seed)
self.logger.info("\n\n\t\t*** Training RAN ***")
loss_fn = MinEntropyLoss(batch_size=self.unsup_batch_size * self.gpus) # SSLLoss()
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=loss_fn,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.fit(
x=[x_unsup, fired_rule_ids_unsup, rule_pred_unsup],
y=np.array([-1] * x_unsup.shape[0]),
batch_size=self.unsup_batch_size * self.gpus,
shuffle=True,
epochs=self.sup_epochs,
callbacks=[
create_learning_rate_scheduler(max_learn_rate=1e-2, end_learn_rate=1e-5, warmup_epoch_count=20,
total_epoch_count=self.sup_epochs),
K.callbacks.EarlyStopping(patience=20, restore_best_weights=True)
],
validation_data=([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev))
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
self.model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=loss_fn,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.fit(
x=[x_train, fired_rule_ids_train, rule_pred_train],
y=y_train,
batch_size=self.sup_batch_size * self.gpus,
shuffle=True,
epochs=self.sup_epochs,
callbacks=[
create_learning_rate_scheduler(max_learn_rate=1e-2, end_learn_rate=1e-5, warmup_epoch_count=20,
total_epoch_count=self.sup_epochs),
K.callbacks.EarlyStopping(patience=20, restore_best_weights=True)
],
validation_data=([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev))
self.trained = True
dev_loss = self.model.evaluate([x_dev, fired_rule_ids_dev, rule_pred_dev], y_dev)
res = {}
res['dev_loss'] = dev_loss
return res
def predict(self, rule_pred, student_features, student_pred=None):
if not self.trained:
return self.predict_majority(rule_pred)
else:
return self.predict_ran(student_features, rule_pred, student_pred=student_pred)
def predict_majority(self, rule_pred):
agg_labels = self.majority_model.predict(rule_pred)
agg_proba = self.majority_model.predict_proba(rule_pred)
return {
'preds': agg_labels,
'proba': agg_proba,
"att_scores": None,
"rule_mask": None
}
def predict_ran(self, x, rule_pred, student_pred=None, batch_size=128, prefix=""):
x = np.array(x)
if student_pred is None:
random_pred = (rule_pred != -1).sum(axis=1) == 0
else:
random_pred = np.array([False] * rule_pred.shape[0])
rule_mask, fired_rule_ids, rule_pred_one_hot = self.postprocess_rule_preds(rule_pred, student_pred)
self.logger.info("RAN - Predicting labels for {} texts".format(x.shape[0]))
y_pred = self.model.predict(
[x, fired_rule_ids, rule_pred_one_hot],
batch_size=batch_size
)
self.logger.info("DONE, Getting attention scores...".format(x.shape[0]))
desiredOutputs = [self.model.get_layer("attention").output]
newModel = tf.keras.Model(self.model.inputs, desiredOutputs)
att_scores = newModel.predict(
[x, fired_rule_ids, rule_pred_one_hot],
batch_size=batch_size)
preds = np.argmax(y_pred, axis=-1).flatten()
dist_to_random = 0.0
confidence_thres = 1 / self.num_labels + dist_to_random
max_proba = np.max(y_pred, axis=-1).flatten()
ignore_pred = max_proba < confidence_thres
random_pred[ignore_pred] = True
soft_proba = y_pred
preds[random_pred] = -1
return {
'preds': preds,
'proba': soft_proba,
"att_scores": att_scores,
"rule_mask": rule_mask,
}
def load(self, savefile):
self.logger.info("loading rule attention network from {}".format(savefile))
self.model.load_weights(savefile)
def save(self, savefile):
self.logger.info("Saving rule attention network at {}".format(savefile))
self.model.save_weights(savefile)
return
def construct_rule_network(student_emb_dim, num_rules, num_labels, dense_dropout=0.3, max_rule_seq_length=10, seed=42):
# Rule Attention Network
# encoder = TFBertModel.from_pretrained(model_type)
student_embeddings = Input(shape=(student_emb_dim,), name="student_embeddings")
rule_ids = Input(shape=(max_rule_seq_length,), dtype=tf.int32, name="rule_ids")
rule_preds_onehot = Input(shape=(max_rule_seq_length, num_labels,), name="rule_preds")
# x_hidden: batch_size x 128
x_hidden = Dropout(dense_dropout)(student_embeddings)
x_hidden = Dense(units=128, activation="relu", name="dense")(x_hidden)
x_hidden = Dropout(dense_dropout)(x_hidden)
# rule_embeddings_hidden: batch_size x 128 x max_rule_seq_length
rule_embeddings = Embedding(num_rules+1, 128,
# embeddings_initializer='uniform',
embeddings_initializer=tf.keras.initializers.GlorotUniform(seed=seed),
embeddings_regularizer=None, activity_regularizer=None,
embeddings_constraint=None, mask_zero=True, input_length=max_rule_seq_length,
name="rule_embed")(rule_ids)
# Rule bias parameters
rule_biases = Embedding(num_rules+1, 1,
embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None,
embeddings_constraint=None, mask_zero=True, input_length=max_rule_seq_length,
name="rule_bias")(rule_ids)
# Compute attention scores
att_scores = tf.keras.layers.Dot(axes=[1, 2])([x_hidden, rule_embeddings])
att_scores = tf.keras.layers.Add()([att_scores, tf.keras.backend.squeeze(rule_biases, axis=-1)])
att_sigmoid_proba = Lambda(lambda x: tf.keras.activations.sigmoid(x), name='attention')(att_scores)
outputs = tf.keras.layers.Dot(axes=[1, 1], name='raw_outputs')([att_sigmoid_proba, rule_preds_onehot])
outputs = Lambda(lambda x: normalize_with_random_rule(x[0], x[1], x[2]), name='outputs_with_uniform')((outputs, att_sigmoid_proba, rule_preds_onehot))
# Normalize Outputs
outputs = Lambda(lambda x: l1_normalize(x, num_labels), name='normalized_outputs')(outputs)
# Build Model
model = tf.keras.Model(inputs=[student_embeddings, rule_ids, rule_preds_onehot], outputs=outputs)
print(model.summary())
return model
def MinEntropyLoss(batch_size):
def loss(y_true, y_prob):
per_example_loss = -y_prob * tf.math.log(y_prob)
return tf.nn.compute_average_loss(per_example_loss, global_batch_size=batch_size)
return loss
class MajorityVoter:
"""
Predicts probabilities using the majority vote of the weak sources
Code adapted from the Snorkel source:
https://github.com/snorkel-team/snorkel/blob/b3b0669f716a7b3ed6cd573b57f3f8e12bcd495a/snorkel/labeling/model/baselines.py
"""
def __init__(self, num_labels):
self.num_labels = num_labels
def predict(self, rule_pred):
Y_probs = self.predict_proba(rule_pred)
Y_p = self.probs_to_preds(Y_probs)
return Y_p
def predict_proba(self, rule_pred):
n, m = rule_pred.shape
pred = np.zeros((n, self.num_labels))
for i in range(n):
counts = np.zeros(self.num_labels)
for j in range(m):
if rule_pred[i, j] != -1:
counts[rule_pred[i, j]] += 1
pred[i, :] = np.where(counts == max(counts), 1, 0)
pred /= pred.sum(axis=1).reshape(-1, 1)
return pred
def probs_to_preds(self, probs):
num_datapoints, num_classes = probs.shape
Y_pred = np.empty(num_datapoints)
diffs = np.abs(probs - probs.max(axis=1).reshape(-1, 1))
for i in range(num_datapoints):
max_idxs = np.where(diffs[i, :] < 1e-5)[0]
if len(max_idxs) == 1:
Y_pred[i] = max_idxs[0]
else:
Y_pred[i] = -1
return Y_pred.astype(np.int)
def to_one_hot(x, num_classes):
targets = np.array([x]).reshape(-1)
return np.eye(num_classes)[targets]
def create_learning_rate_scheduler(max_learn_rate=5e-5,
end_learn_rate=1e-7,
warmup_epoch_count=10,
total_epoch_count=90):
def lr_scheduler(epoch):
if epoch < warmup_epoch_count:
res = (max_learn_rate / warmup_epoch_count) * (epoch + 1)
else:
res = max_learn_rate * math.exp(
math.log(end_learn_rate / max_learn_rate) * (epoch - warmup_epoch_count + 1) / (
total_epoch_count - warmup_epoch_count + 1))
return float(res)
learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1)
return learning_rate_scheduler
def l1_normalize(x, num_labels):
x = x + 1e-05 # avoid stability issues
l1_norm = tf.keras.backend.stop_gradient(tf.keras.backend.sum(x, axis=-1))
l1_norm = tf.keras.backend.repeat_elements(tf.keras.backend.expand_dims(l1_norm), num_labels, axis=-1)
return x / l1_norm
def normalize_with_random_rule(output, att_sigmoid_proba, rule_preds_onehot):
num_labels = rule_preds_onehot.shape[-1]
sum_prob = tf.keras.backend.stop_gradient(tf.keras.backend.sum(rule_preds_onehot, axis=-1))
rule_mask = tf.keras.backend.cast(sum_prob > 0, 'float32')
num_rules = tf.keras.backend.cast(tf.keras.backend.sum(sum_prob, axis=-1), 'float32')
masked_att_proba = att_sigmoid_proba * rule_mask
sum_masked_att_proba = tf.keras.backend.sum(masked_att_proba, axis=-1)
uniform_rule_att_proba = num_rules - sum_masked_att_proba
uniform_vec = tf.ones((tf.shape(uniform_rule_att_proba)[0], num_labels)) / num_labels
uniform_pred = tf.math.multiply(
tf.keras.backend.repeat_elements(tf.keras.backend.expand_dims(uniform_rule_att_proba), num_labels, axis=-1),
uniform_vec)
output_with_uniform_rule = output+uniform_pred
return output_with_uniform_rule | [
"tensorflow.random.set_seed",
"tensorflow.nn.compute_average_loss",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"numpy.empty",
"numpy.ones",
"tensorflow.keras.initializers.GlorotUniform",
"tensorflow.keras.activations.sigmoid",
"tensorflow.keras.callbacks.EarlyS... | [((11507, 11565), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(student_emb_dim,)', 'name': '"""student_embeddings"""'}), "(shape=(student_emb_dim,), name='student_embeddings')\n", (11512, 11565), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((11582, 11650), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(max_rule_seq_length,)', 'dtype': 'tf.int32', 'name': '"""rule_ids"""'}), "(shape=(max_rule_seq_length,), dtype=tf.int32, name='rule_ids')\n", (11587, 11650), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((11676, 11741), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(max_rule_seq_length, num_labels)', 'name': '"""rule_preds"""'}), "(shape=(max_rule_seq_length, num_labels), name='rule_preds')\n", (11681, 11741), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((13620, 13713), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[student_embeddings, rule_ids, rule_preds_onehot]', 'outputs': 'outputs'}), '(inputs=[student_embeddings, rule_ids, rule_preds_onehot],\n outputs=outputs)\n', (13634, 13713), True, 'import tensorflow as tf\n'), ((16208, 16273), 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['lr_scheduler'], {'verbose': '(1)'}), '(lr_scheduler, verbose=1)\n', (16248, 16273), True, 'import tensorflow as tf\n'), ((16850, 16896), 'tensorflow.keras.backend.cast', 'tf.keras.backend.cast', (['(sum_prob > 0)', '"""float32"""'], {}), "(sum_prob > 0, 'float32')\n", (16871, 16896), True, 'import tensorflow as tf\n'), ((17070, 17117), 'tensorflow.keras.backend.sum', 'tf.keras.backend.sum', (['masked_att_proba'], {'axis': '(-1)'}), '(masked_att_proba, axis=-1)\n', (17090, 17117), True, 'import tensorflow as tf\n'), ((581, 617), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.manual_seed'], {}), '(self.manual_seed)\n', (599, 617), True, 'import tensorflow as tf\n'), ((3383, 3407), 'numpy.array', 'np.array', (['fired_rule_ids'], {}), '(fired_rule_ids)\n', (3391, 3407), True, 'import numpy as np\n'), ((5681, 5698), 'numpy.array', 'np.array', (['x_unsup'], {}), '(x_unsup)\n', (5689, 5698), True, 'import numpy as np\n'), ((7725, 7789), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (7770, 7789), True, 'import tensorflow as tf\n'), ((9533, 9544), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9541, 9544), True, 'import numpy as np\n'), ((10226, 10275), 'tensorflow.keras.Model', 'tf.keras.Model', (['self.model.inputs', 'desiredOutputs'], {}), '(self.model.inputs, desiredOutputs)\n', (10240, 10275), True, 'import tensorflow as tf\n'), ((11795, 11817), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dense_dropout'], {}), '(dense_dropout)\n', (11802, 11817), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((11854, 11903), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(128)', 'activation': '"""relu"""', 'name': '"""dense"""'}), "(units=128, activation='relu', name='dense')\n", (11859, 11903), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((11930, 11952), 'tensorflow.keras.layers.Dropout', 'Dropout', (['dense_dropout'], {}), '(dense_dropout)\n', (11937, 11952), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((12568, 12794), 'tensorflow.keras.layers.Embedding', 'Embedding', (['(num_rules + 1)', '(1)'], {'embeddings_initializer': '"""uniform"""', 'embeddings_regularizer': 'None', 'activity_regularizer': 'None', 'embeddings_constraint': 'None', 'mask_zero': '(True)', 'input_length': 'max_rule_seq_length', 'name': '"""rule_bias"""'}), "(num_rules + 1, 1, embeddings_initializer='uniform',\n embeddings_regularizer=None, activity_regularizer=None,\n embeddings_constraint=None, mask_zero=True, input_length=\n max_rule_seq_length, name='rule_bias')\n", (12577, 12794), False, 'from tensorflow.keras.layers import Embedding, Input, Dropout, Dense, Lambda\n'), ((12929, 12961), 'tensorflow.keras.layers.Dot', 'tf.keras.layers.Dot', ([], {'axes': '[1, 2]'}), '(axes=[1, 2])\n', (12948, 12961), True, 'import tensorflow as tf\n'), ((13009, 13030), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (13028, 13030), True, 'import tensorflow as tf\n'), ((13213, 13265), 'tensorflow.keras.layers.Dot', 'tf.keras.layers.Dot', ([], {'axes': '[1, 1]', 'name': '"""raw_outputs"""'}), "(axes=[1, 1], name='raw_outputs')\n", (13232, 13265), True, 'import tensorflow as tf\n'), ((13898, 13972), 'tensorflow.nn.compute_average_loss', 'tf.nn.compute_average_loss', (['per_example_loss'], {'global_batch_size': 'batch_size'}), '(per_example_loss, global_batch_size=batch_size)\n', (13924, 13972), True, 'import tensorflow as tf\n'), ((14592, 14622), 'numpy.zeros', 'np.zeros', (['(n, self.num_labels)'], {}), '((n, self.num_labels))\n', (14600, 14622), True, 'import numpy as np\n'), ((15067, 15091), 'numpy.empty', 'np.empty', (['num_datapoints'], {}), '(num_datapoints)\n', (15075, 15091), True, 'import numpy as np\n'), ((15513, 15532), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (15519, 15532), True, 'import numpy as np\n'), ((16441, 16473), 'tensorflow.keras.backend.sum', 'tf.keras.backend.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (16461, 16473), True, 'import tensorflow as tf\n'), ((16523, 16560), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['l1_norm'], {}), '(l1_norm)\n', (16551, 16560), True, 'import tensorflow as tf\n'), ((16783, 16831), 'tensorflow.keras.backend.sum', 'tf.keras.backend.sum', (['rule_preds_onehot'], {'axis': '(-1)'}), '(rule_preds_onehot, axis=-1)\n', (16803, 16831), True, 'import tensorflow as tf\n'), ((16936, 16975), 'tensorflow.keras.backend.sum', 'tf.keras.backend.sum', (['sum_prob'], {'axis': '(-1)'}), '(sum_prob, axis=-1)\n', (16956, 16975), True, 'import tensorflow as tf\n'), ((3472, 3487), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (3479, 3487), True, 'import numpy as np\n'), ((3907, 3964), 'numpy.concatenate', 'np.concatenate', (['[student_pred, one_hot_rule_pred]'], {'axis': '(1)'}), '([student_pred, one_hot_rule_pred], axis=1)\n', (3921, 3964), True, 'import numpy as np\n'), ((3990, 4035), 'numpy.concatenate', 'np.concatenate', (['[mask_one, rule_mask]'], {'axis': '(1)'}), '([mask_one, rule_mask], axis=1)\n', (4004, 4035), True, 'import numpy as np\n'), ((4252, 4309), 'numpy.concatenate', 'np.concatenate', (['[student_rule_id, fired_rule_ids]'], {'axis': '(1)'}), '([student_rule_id, fired_rule_ids], axis=1)\n', (4266, 4309), True, 'import numpy as np\n'), ((4770, 4787), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (4778, 4787), True, 'import numpy as np\n'), ((4811, 4828), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4819, 4828), True, 'import numpy as np\n'), ((5257, 5272), 'numpy.array', 'np.array', (['x_dev'], {}), '(x_dev)\n', (5265, 5272), True, 'import numpy as np\n'), ((5294, 5309), 'numpy.array', 'np.array', (['y_dev'], {}), '(y_dev)\n', (5302, 5309), True, 'import numpy as np\n'), ((9683, 9721), 'numpy.array', 'np.array', (['([False] * rule_pred.shape[0])'], {}), '([False] * rule_pred.shape[0])\n', (9691, 9721), True, 'import numpy as np\n'), ((13044, 13090), 'tensorflow.keras.backend.squeeze', 'tf.keras.backend.squeeze', (['rule_biases'], {'axis': '(-1)'}), '(rule_biases, axis=-1)\n', (13068, 13090), True, 'import tensorflow as tf\n'), ((13862, 13881), 'tensorflow.math.log', 'tf.math.log', (['y_prob'], {}), '(y_prob)\n', (13873, 13881), True, 'import tensorflow as tf\n'), ((14673, 14698), 'numpy.zeros', 'np.zeros', (['self.num_labels'], {}), '(self.num_labels)\n', (14681, 14698), True, 'import numpy as np\n'), ((15475, 15488), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (15483, 15488), True, 'import numpy as np\n'), ((17352, 17404), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['uniform_rule_att_proba'], {}), '(uniform_rule_att_proba)\n', (17380, 17404), True, 'import tensorflow as tf\n'), ((3146, 3197), 'tensorflow.one_hot', 'tf.one_hot', (['non_zero_rule_pred', '(self.num_labels + 1)'], {}), '(non_zero_rule_pred, self.num_labels + 1)\n', (3156, 3197), True, 'import tensorflow as tf\n'), ((3559, 3594), 'numpy.squeeze', 'np.squeeze', (['student_pred'], {'axis': 'None'}), '(student_pred, axis=None)\n', (3569, 3594), True, 'import numpy as np\n'), ((4205, 4221), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (4213, 4221), True, 'import numpy as np\n'), ((6872, 6898), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (6896, 6898), True, 'import tensorflow as tf\n'), ((7143, 7176), 'numpy.array', 'np.array', (['([-1] * x_unsup.shape[0])'], {}), '([-1] * x_unsup.shape[0])\n', (7151, 7176), True, 'import numpy as np\n'), ((7828, 7854), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (7852, 7854), True, 'import tensorflow as tf\n'), ((10424, 10450), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (10433, 10450), True, 'import numpy as np\n'), ((10577, 10600), 'numpy.max', 'np.max', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (10583, 10600), True, 'import numpy as np\n'), ((12211, 12257), 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {'seed': 'seed'}), '(seed=seed)\n', (12246, 12257), True, 'import tensorflow as tf\n'), ((13135, 13166), 'tensorflow.keras.activations.sigmoid', 'tf.keras.activations.sigmoid', (['x'], {}), '(x)\n', (13163, 13166), True, 'import tensorflow as tf\n'), ((15225, 15254), 'numpy.where', 'np.where', (['(diffs[i, :] < 1e-05)'], {}), '(diffs[i, :] < 1e-05)\n', (15233, 15254), True, 'import numpy as np\n'), ((3749, 3780), 'numpy.argmax', 'np.argmax', (['student_pred'], {'axis': '(1)'}), '(student_pred, axis=1)\n', (3758, 3780), True, 'import numpy as np\n'), ((4112, 4127), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (4119, 4127), True, 'import numpy as np\n'), ((6979, 7033), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""acc"""'}), "(name='acc')\n", (7021, 7033), True, 'import tensorflow as tf\n'), ((7540, 7605), 'tensorflow.keras.callbacks.EarlyStopping', 'K.callbacks.EarlyStopping', ([], {'patience': '(20)', 'restore_best_weights': '(True)'}), '(patience=20, restore_best_weights=True)\n', (7565, 7605), True, 'import tensorflow.keras as K\n'), ((7939, 7993), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""acc"""'}), "(name='acc')\n", (7981, 7993), True, 'import tensorflow as tf\n'), ((8472, 8537), 'tensorflow.keras.callbacks.EarlyStopping', 'K.callbacks.EarlyStopping', ([], {'patience': '(20)', 'restore_best_weights': '(True)'}), '(patience=20, restore_best_weights=True)\n', (8497, 8537), True, 'import tensorflow.keras as K\n'), ((17209, 17241), 'tensorflow.shape', 'tf.shape', (['uniform_rule_att_proba'], {}), '(uniform_rule_att_proba)\n', (17217, 17241), True, 'import tensorflow as tf\n'), ((2527, 2540), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (2537, 2540), True, 'import numpy as np\n'), ((15997, 16038), 'math.log', 'math.log', (['(end_learn_rate / max_learn_rate)'], {}), '(end_learn_rate / max_learn_rate)\n', (16005, 16038), False, 'import math\n')] |
import os
import sys
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn import metrics
sys.path.append(os.path.abspath(os.path.join("./libs/residual2vec")))
from residual2vec.residual2vec import _find_blocks_by_sbm
sys.path.append(os.path.abspath(os.path.join("./libs/graph_embeddings")))
from graph_embeddings import utils
def log_trans_prob(A, window_length, restart_prob=0):
weights = np.power((1 - restart_prob), np.arange(window_length))
weights = weights / np.sum(weights)
P = utils.to_trans_mat(A)
P = utils.calc_rwr(P, None, window_length, w=weights)
P.data = utils.safe_log(P.data)
return P
def approx_log_trans_prob(
A, window_length, beta=0.66, restart_prob=0, approx_order=2, directed=False
):
if window_length <= approx_order:
logP = log_trans_prob(A, window_length=window_length, restart_prob=restart_prob)
return [[logP]]
K = np.ceil(np.power(A.shape[0], beta)).astype(int)
cids = _find_blocks_by_sbm(A, K, directed=directed)
num_nodes = A.shape[0]
num_coms = np.max(cids) + 1
U = sparse.csr_matrix(
(np.ones(num_nodes), (np.arange(num_nodes), cids)), shape=(num_nodes, num_coms)
)
# Compute the parameter for the SBM
din = np.array(A.sum(axis=0)).reshape(-1)
Din = np.array(din @ U).reshape(-1)
theta_in = din / Din[cids]
adj_com = U.T @ A @ U
trans_com = utils.to_trans_mat(adj_com)
#
# Precompute the transition matrix for short steps
#
P = utils.to_trans_mat(A)
Pt = [P]
for t in range(1, approx_order):
Pt += [P @ Pt[t - 1]]
#
# Calculate the trans prob for short steps
#
weights = np.power((1 - restart_prob), np.arange(window_length))
weights = weights / np.sum(weights)
Pshort = weights[0] * P.copy()
for t in range(1, approx_order):
Pshort += weights[t] * Pt[t]
#
# Approximate the long steps by SBM
#
# Compute the trans prob from node to a community
n2c = utils.calc_rwr(
trans_com, None, window_length - approx_order, w=weights[approx_order:]
).toarray()
n2c = np.array((Pt[approx_order - 1] @ U) @ n2c)
# Make a mask to prevent exp(0) = 1. 0 in n2 means 0
mask = n2c.copy()
mask[mask > 0] = 1
log_theta_in = theta_in.copy()
log_theta_in[theta_in > 0] = np.log(theta_in[theta_in > 0])
ThetaOut = sparse.csr_matrix(U.T) @ sparse.diags(log_theta_in)
logn2c = n2c.copy()
logn2c[n2c > 0] = np.log(logn2c[n2c > 0])
logP_long = [
[logn2c, sparse.csr_matrix(U.T)],
[mask, ThetaOut],
]
#
# Merge
#
r, c, v = sparse.find(Pshort)
v = (
np.log(v + n2c[(r, cids[c])] * theta_in[c])
- logn2c[(r, cids[c])]
- np.log(theta_in[c])
)
# utils.safe_log(
# n2c[(r, cids[c])] * theta_in[c]
# )
logP_short = sparse.csr_matrix(
(np.array(v).reshape(-1), (r, c)), shape=(num_nodes, num_nodes)
)
logP = [[logP_short]] + logP_long
return logP
if "snakemake" in sys.modules:
edge_file = snakemake.input["edge_file"]
output_file = snakemake.output["output_file"]
approx_order = snakemake.params["approx_order"]
sample_max = 20000
else:
edge_file = "../../../data/link-prediction/data/polblogs.csv"
output_file = "../../../data/log-trans-approximation/yypred-polblogs.csv"
approx_order = 2
sample_max = 20000
edge_table = pd.read_csv(edge_file, header=None, names=["src", "trg"]).astype(int)
N = np.max(edge_table.values) + 1
net = sparse.csr_matrix(
(np.ones(edge_table.shape[0]), (edge_table["src"], edge_table["trg"])), shape=(N, N)
)
#
# Load networks
#
net = net + 0.01 * net.T # to prevent the dangling nodes
#
# Calculate the log probability
#
dflist = []
for window_length in [3, 5, 10, 20]:
logPapx = approx_log_trans_prob(
net, window_length, approx_order=approx_order, directed=True
)
logPapx = utils.mat_prod_matrix_seq(logPapx, np.eye(net.shape[0]))
logP = log_trans_prob(net, window_length)
#
# Formatting
#
y, ypred = logP.toarray().reshape(-1), logPapx.reshape(-1)
s = ~np.isclose(y, 0)
y, ypred = y[s], ypred[s]
#
# Save results
#
df = pd.DataFrame({"y": y, "ypred": ypred})
df["window_length"] = window_length
if df.shape[0] > sample_max:
df = df.sample(sample_max)
dflist += [df]
df = pd.concat(dflist, ignore_index=True)
df.to_csv(output_file)
# %%
| [
"numpy.sum",
"pandas.read_csv",
"numpy.ones",
"numpy.isclose",
"numpy.arange",
"graph_embeddings.utils.to_trans_mat",
"os.path.join",
"pandas.DataFrame",
"numpy.power",
"residual2vec.residual2vec._find_blocks_by_sbm",
"numpy.max",
"pandas.concat",
"graph_embeddings.utils.calc_rwr",
"scipy.... | [((4431, 4467), 'pandas.concat', 'pd.concat', (['dflist'], {'ignore_index': '(True)'}), '(dflist, ignore_index=True)\n', (4440, 4467), True, 'import pandas as pd\n'), ((526, 547), 'graph_embeddings.utils.to_trans_mat', 'utils.to_trans_mat', (['A'], {}), '(A)\n', (544, 547), False, 'from graph_embeddings import utils\n'), ((556, 605), 'graph_embeddings.utils.calc_rwr', 'utils.calc_rwr', (['P', 'None', 'window_length'], {'w': 'weights'}), '(P, None, window_length, w=weights)\n', (570, 605), False, 'from graph_embeddings import utils\n'), ((619, 641), 'graph_embeddings.utils.safe_log', 'utils.safe_log', (['P.data'], {}), '(P.data)\n', (633, 641), False, 'from graph_embeddings import utils\n'), ((987, 1031), 'residual2vec.residual2vec._find_blocks_by_sbm', '_find_blocks_by_sbm', (['A', 'K'], {'directed': 'directed'}), '(A, K, directed=directed)\n', (1006, 1031), False, 'from residual2vec.residual2vec import _find_blocks_by_sbm\n'), ((1413, 1440), 'graph_embeddings.utils.to_trans_mat', 'utils.to_trans_mat', (['adj_com'], {}), '(adj_com)\n', (1431, 1440), False, 'from graph_embeddings import utils\n'), ((1517, 1538), 'graph_embeddings.utils.to_trans_mat', 'utils.to_trans_mat', (['A'], {}), '(A)\n', (1535, 1538), False, 'from graph_embeddings import utils\n'), ((2137, 2177), 'numpy.array', 'np.array', (['(Pt[approx_order - 1] @ U @ n2c)'], {}), '(Pt[approx_order - 1] @ U @ n2c)\n', (2145, 2177), True, 'import numpy as np\n'), ((2352, 2382), 'numpy.log', 'np.log', (['theta_in[theta_in > 0]'], {}), '(theta_in[theta_in > 0])\n', (2358, 2382), True, 'import numpy as np\n'), ((2497, 2520), 'numpy.log', 'np.log', (['logn2c[n2c > 0]'], {}), '(logn2c[n2c > 0])\n', (2503, 2520), True, 'import numpy as np\n'), ((2653, 2672), 'scipy.sparse.find', 'sparse.find', (['Pshort'], {}), '(Pshort)\n', (2664, 2672), False, 'from scipy import sparse\n'), ((3526, 3551), 'numpy.max', 'np.max', (['edge_table.values'], {}), '(edge_table.values)\n', (3532, 3551), True, 'import numpy as np\n'), ((4258, 4296), 'pandas.DataFrame', 'pd.DataFrame', (["{'y': y, 'ypred': ypred}"], {}), "({'y': y, 'ypred': ypred})\n", (4270, 4296), True, 'import pandas as pd\n'), ((147, 182), 'os.path.join', 'os.path.join', (['"""./libs/residual2vec"""'], {}), "('./libs/residual2vec')\n", (159, 182), False, 'import os\n'), ((276, 315), 'os.path.join', 'os.path.join', (['"""./libs/graph_embeddings"""'], {}), "('./libs/graph_embeddings')\n", (288, 315), False, 'import os\n'), ((452, 476), 'numpy.arange', 'np.arange', (['window_length'], {}), '(window_length)\n', (461, 476), True, 'import numpy as np\n'), ((502, 517), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (508, 517), True, 'import numpy as np\n'), ((1075, 1087), 'numpy.max', 'np.max', (['cids'], {}), '(cids)\n', (1081, 1087), True, 'import numpy as np\n'), ((1722, 1746), 'numpy.arange', 'np.arange', (['window_length'], {}), '(window_length)\n', (1731, 1746), True, 'import numpy as np\n'), ((1772, 1787), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (1778, 1787), True, 'import numpy as np\n'), ((2398, 2420), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['U.T'], {}), '(U.T)\n', (2415, 2420), False, 'from scipy import sparse\n'), ((2423, 2449), 'scipy.sparse.diags', 'sparse.diags', (['log_theta_in'], {}), '(log_theta_in)\n', (2435, 2449), False, 'from scipy import sparse\n'), ((2776, 2795), 'numpy.log', 'np.log', (['theta_in[c]'], {}), '(theta_in[c])\n', (2782, 2795), True, 'import numpy as np\n'), ((3452, 3509), 'pandas.read_csv', 'pd.read_csv', (['edge_file'], {'header': 'None', 'names': "['src', 'trg']"}), "(edge_file, header=None, names=['src', 'trg'])\n", (3463, 3509), True, 'import pandas as pd\n'), ((3586, 3614), 'numpy.ones', 'np.ones', (['edge_table.shape[0]'], {}), '(edge_table.shape[0])\n', (3593, 3614), True, 'import numpy as np\n'), ((3999, 4019), 'numpy.eye', 'np.eye', (['net.shape[0]'], {}), '(net.shape[0])\n', (4005, 4019), True, 'import numpy as np\n'), ((4170, 4186), 'numpy.isclose', 'np.isclose', (['y', '(0)'], {}), '(y, 0)\n', (4180, 4186), True, 'import numpy as np\n'), ((1128, 1146), 'numpy.ones', 'np.ones', (['num_nodes'], {}), '(num_nodes)\n', (1135, 1146), True, 'import numpy as np\n'), ((1310, 1327), 'numpy.array', 'np.array', (['(din @ U)'], {}), '(din @ U)\n', (1318, 1327), True, 'import numpy as np\n'), ((2015, 2107), 'graph_embeddings.utils.calc_rwr', 'utils.calc_rwr', (['trans_com', 'None', '(window_length - approx_order)'], {'w': 'weights[approx_order:]'}), '(trans_com, None, window_length - approx_order, w=weights[\n approx_order:])\n', (2029, 2107), False, 'from graph_embeddings import utils\n'), ((2557, 2579), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['U.T'], {}), '(U.T)\n', (2574, 2579), False, 'from scipy import sparse\n'), ((2691, 2732), 'numpy.log', 'np.log', (['(v + n2c[r, cids[c]] * theta_in[c])'], {}), '(v + n2c[r, cids[c]] * theta_in[c])\n', (2697, 2732), True, 'import numpy as np\n'), ((936, 962), 'numpy.power', 'np.power', (['A.shape[0]', 'beta'], {}), '(A.shape[0], beta)\n', (944, 962), True, 'import numpy as np\n'), ((1149, 1169), 'numpy.arange', 'np.arange', (['num_nodes'], {}), '(num_nodes)\n', (1158, 1169), True, 'import numpy as np\n'), ((2918, 2929), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2926, 2929), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""@package Methods.Geometry.Arc1.get_center
Compute the coordinate of the center of an Arc1 method
@date Created on Fri Dec 05 13:37:19 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi
def get_center(self):
"""Return the center of the arc
Parameters
----------
self : Arc1
An Arc1 object
Returns
-------
Zc: complex
Complex coordinates of the center of the Arc1
"""
self.check()
# The center is on the bisection of [begin, end]
z1 = self.begin
z2 = self.end
R = self.radius
# Centre at the middle of begin and end (distance(Z1, Z2) = diameter )
if abs(abs(z2 - z1) - abs(2 * R)) < 1e-6:
Zc = (z2 + z1) / 2.0
else:
# Alpha is the opening angle (Begin-Center-End)
alpha = 2 * arcsin(abs(z2 - z1) / (2 * R))
if R > 0:
Zc = z2 + R * exp(1j * (np_angle(z2 - z1) % (2 * pi))) * exp(
1j * (pi / 2 + np_abs(alpha) / 2)
)
else:
Zc = z1 - R * exp(1j * (np_angle(z1 - z2) % (2 * pi))) * exp(
1j * (pi / 2 + np_abs(alpha) / 2)
)
# Return (0,0) if the point is too close from 0
if np_abs(Zc) < 1e-6:
Zc = 0
return Zc
| [
"numpy.angle",
"numpy.abs"
] | [((1298, 1308), 'numpy.abs', 'np_abs', (['Zc'], {}), '(Zc)\n', (1304, 1308), True, 'from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi\n'), ((984, 1001), 'numpy.angle', 'np_angle', (['(z2 - z1)'], {}), '(z2 - z1)\n', (992, 1001), True, 'from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi\n'), ((1053, 1066), 'numpy.abs', 'np_abs', (['alpha'], {}), '(alpha)\n', (1059, 1066), True, 'from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi\n'), ((1136, 1153), 'numpy.angle', 'np_angle', (['(z1 - z2)'], {}), '(z1 - z2)\n', (1144, 1153), True, 'from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi\n'), ((1205, 1218), 'numpy.abs', 'np_abs', (['alpha'], {}), '(alpha)\n', (1211, 1218), True, 'from numpy import abs as np_abs, angle as np_angle, arcsin, exp, pi\n')] |
"""Migdal effect
"""
import numericalunits as nu
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy.integrate import dblquad
import wimprates as wr
export, __all__ = wr.exporter()
# Differential transition probabilities for Xe vs energy (eV)
df_migdal = pd.read_csv(wr.data_file('migdal/migdal_transition_ps.csv'))
# Relevant (n, l) electronic states
migdal_states = df_migdal.columns.values.tolist()
migdal_states.remove('E')
# Binding energies of the relevant Xenon electronic states
# From table II of 1707.07258
binding_es_for_migdal = dict(zip(
migdal_states,
np.array([3.5e4,
5.4e3, 4.9e3,
1.1e3, 9.3e2, 6.6e2,
2e2, 1.4e2, 6.1e1,
2.1e1, 9.8])))
def vmin_migdal(w, erec, mw):
"""Return minimum WIMP velocity to make a Migdal signal with energy w,
given elastic recoil energy erec and WIMP mass mw.
"""
y = (wr.mn() * erec / (2 * wr.mu_nucleus(mw) ** 2))**0.5
y += w / (2 * wr.mn() * erec)**0.5
return np.maximum(0, y)
@export
@wr.vectorize_first
def rate_migdal(w, mw, sigma_nucleon, interaction='SI', m_med=float('inf'),
include_approx_nr=False,
t=None, halo_model=None, **kwargs):
"""Differential rate per unit detector mass and deposited ER energy of
Migdal effect WIMP-nucleus scattering
:param w: ER energy deposited in detector through Migdal effect
:param mw: Mass of WIMP
:param sigma_nucleon: WIMP/nucleon cross-section
:param interaction: string describing DM-nucleus interaction.
See sigma_erec for options
:param m_med: Mediator mass. If not given, assumed very heavy.
:param include_approx_nr: If True, instead return differential rate
per *detected* energy, including the contribution of
the simultaneous NR signal approximately, assuming q_{NR} = 0.15.
This is how https://arxiv.org/abs/1707.07258
presented the Migdal spectra.
:param t: A J2000.0 timestamp.
If not given, conservative velocity distribution is used.
:param halo_model: class (default to standard halo model)
containing velocity distribution
:param progress_bar: if True, show a progress bar during evaluation
(if w is an array)
Further kwargs are passed to scipy.integrate.quad numeric integrator
(e.g. error tolerance).
"""
halo_model = wr.StandardHaloModel() if halo_model is None else halo_model
include_approx_nr = 1 if include_approx_nr else 0
result = 0
for state, binding_e in binding_es_for_migdal.items():
binding_e *= nu.eV
# Only consider n=3 and n=4
# n=5 is the valence band so unreliable in in liquid
# n=1,2 contribute very little
if state[0] not in ['3', '4']:
continue
# Lookup for differential probability (units of ev^-1)
p = interp1d(df_migdal['E'].values * nu.eV,
df_migdal[state].values / nu.eV,
bounds_error=False,
fill_value=0)
def diff_rate(v, erec):
# Observed energy = energy of emitted electron
# + binding energy of state
eelec = w - binding_e - include_approx_nr * erec * 0.15
if eelec < 0:
return 0
return (
# Usual elastic differential rate,
# common constants follow at end
wr.sigma_erec(erec, v, mw, sigma_nucleon, interaction,
m_med=m_med)
* v * halo_model.velocity_dist(v, t)
# Migdal effect |Z|^2
# TODO: ?? what is explicit (eV/c)**2 doing here?
* (nu.me * (2 * erec / wr.mn())**0.5 / (nu.eV / nu.c0))**2
/ (2 * np.pi)
* p(eelec))
# Note dblquad expects the function to be f(y, x), not f(x, y)...
r = dblquad(
diff_rate,
0,
wr.e_max(mw, wr.v_max(t, halo_model.v_esc)),
lambda erec: vmin_migdal(w - include_approx_nr * erec * 0.15,
erec, mw),
lambda _: wr.v_max(t, halo_model.v_esc),
**kwargs)[0]
result += r
return halo_model.rho_dm / mw * (1 / wr.mn()) * result
| [
"wimprates.data_file",
"numpy.maximum",
"wimprates.v_max",
"wimprates.exporter",
"wimprates.mu_nucleus",
"scipy.interpolate.interp1d",
"numpy.array",
"wimprates.StandardHaloModel",
"wimprates.mn",
"wimprates.sigma_erec"
] | [((206, 219), 'wimprates.exporter', 'wr.exporter', ([], {}), '()\n', (217, 219), True, 'import wimprates as wr\n'), ((307, 354), 'wimprates.data_file', 'wr.data_file', (['"""migdal/migdal_transition_ps.csv"""'], {}), "('migdal/migdal_transition_ps.csv')\n", (319, 354), True, 'import wimprates as wr\n'), ((1039, 1055), 'numpy.maximum', 'np.maximum', (['(0)', 'y'], {}), '(0, y)\n', (1049, 1055), True, 'import numpy as np\n'), ((616, 708), 'numpy.array', 'np.array', (['[35000.0, 5400.0, 4900.0, 1100.0, 930.0, 660.0, 200.0, 140.0, 61.0, 21.0, 9.8]'], {}), '([35000.0, 5400.0, 4900.0, 1100.0, 930.0, 660.0, 200.0, 140.0, 61.0,\n 21.0, 9.8])\n', (624, 708), True, 'import numpy as np\n'), ((2402, 2424), 'wimprates.StandardHaloModel', 'wr.StandardHaloModel', ([], {}), '()\n', (2422, 2424), True, 'import wimprates as wr\n'), ((2891, 3001), 'scipy.interpolate.interp1d', 'interp1d', (["(df_migdal['E'].values * nu.eV)", '(df_migdal[state].values / nu.eV)'], {'bounds_error': '(False)', 'fill_value': '(0)'}), "(df_migdal['E'].values * nu.eV, df_migdal[state].values / nu.eV,\n bounds_error=False, fill_value=0)\n", (2899, 3001), False, 'from scipy.interpolate import interp1d\n'), ((937, 944), 'wimprates.mn', 'wr.mn', ([], {}), '()\n', (942, 944), True, 'import wimprates as wr\n'), ((4309, 4316), 'wimprates.mn', 'wr.mn', ([], {}), '()\n', (4314, 4316), True, 'import wimprates as wr\n'), ((959, 976), 'wimprates.mu_nucleus', 'wr.mu_nucleus', (['mw'], {}), '(mw)\n', (972, 976), True, 'import wimprates as wr\n'), ((1007, 1014), 'wimprates.mn', 'wr.mn', ([], {}), '()\n', (1012, 1014), True, 'import wimprates as wr\n'), ((4014, 4043), 'wimprates.v_max', 'wr.v_max', (['t', 'halo_model.v_esc'], {}), '(t, halo_model.v_esc)\n', (4022, 4043), True, 'import wimprates as wr\n'), ((4190, 4219), 'wimprates.v_max', 'wr.v_max', (['t', 'halo_model.v_esc'], {}), '(t, halo_model.v_esc)\n', (4198, 4219), True, 'import wimprates as wr\n'), ((3466, 3533), 'wimprates.sigma_erec', 'wr.sigma_erec', (['erec', 'v', 'mw', 'sigma_nucleon', 'interaction'], {'m_med': 'm_med'}), '(erec, v, mw, sigma_nucleon, interaction, m_med=m_med)\n', (3479, 3533), True, 'import wimprates as wr\n'), ((3761, 3768), 'wimprates.mn', 'wr.mn', ([], {}), '()\n', (3766, 3768), True, 'import wimprates as wr\n')] |
from qutip import *
import numpy as np
import scipy
import itertools
import random
import matplotlib.pyplot as plt
import pickle
from time import time
#Pauli matrices
#s = [sigmax(), sigmay(), sigmaz()]
s = {0: sigmax(), 1: sigmay(), 2: sigmaz()}
#General qubit state, input as list of Bloch vector components, i.e. r = [rx, ry, rz]
def rho(r):
if np.linalg.norm(r) != 1:
r = np.array(r)/np.linalg.norm(r)
return (qeye(2) + sum([r[i] * s[i] for i in range(3)])) / 2
#Parametrize 2-qubit density matrices, 16 parameters
def qubit2density(p):#p is list of 16 numbers
d = np.zeros([4, 4], dtype=complex)
p = list(p)
#Set diagonal elements
for i in range(4):
d[i][i] = p.pop(0)
#set real elements
for i in range(3):
for j in range(i+1, 4):
elem = p.pop(0)
#d[i][j] = elem
d[j][i] = elem
#set complex elements
for i in range(3):
for j in range(i+1, 4):
elem = p.pop(0)
#d[i][j] += elem*(-1j)
d[j][i] += elem*(1j)
d = d.T.conj() @ d
return d
def density_to_vector(density):#p is list of 16 numbers
d = np.array(density, dtype=complex)
p = []
#Set diagonal elements
for i in range(4):
#d[i][i] = p.pop(0)
p.append(np.real(d[i][i]))
#set real elements
for i in range(3):
for j in range(i+1, 4):
p.append(np.real(d[i][j]))
#set complex elements
for i in range(3):
for j in range(i+1, 4):
p.append(np.imag(d[i][j]))
return tuple(p)
#Convert simulated statistics to 2-qubit statistics
def convert_statistics(statistics, q1, q2, M, M2):
conv_stat = []
for i in range(len(statistics)):
s = M[statistics[i]]
c_s = s[q1] + s[q2]
conv_stat.append(M2.index(c_s))
return conv_stat
def maximum_likelihood(p, m_2_obs, frequencies):
density_matrix = qubit2density(p)
max_sum = 0
for i in range(len(m_2_obs)):
s = np.real(np.trace(m_2_obs[i] * density_matrix))
if s != 0:
max_sum += frequencies.count(i)*np.log(s)
return np.real(-max_sum/len(frequencies))
#Function for fitting
def func(x, a, b, c):
return a - b / np.log(c * x)
# Log-likelihood function
def log_l(rho, observables, counts):
return sum([counts[obs] * np.log(np.real((observables[obs] @ rho).trace())) for obs in counts]) / sum(counts.values())
# R(rho) operator
def R(rho, observables, counts):
R = np.zeros((rho.shape[0], rho.shape[0]), dtype=complex)
for obs in counts:
R += (counts[obs] / (observables[obs] @ rho).trace()) * observables[obs]
R /= sum(counts.values())
return R
# Returns rho^{(k+1)} given rho (not diluted)
def RrR(rho, observables, marginals):
rhok = R(rho, observables, marginals) @ rho @ R(rho, observables, marginals)
return rhok / rhok.trace()
# Returns rho^{(k+1)} given rho and epsilon
def IRrR(rho, observables, marginals, epsilon):
M = (np.eye(len(rho)) + epsilon * R(rho, observables, marginals)) / (1 + epsilon)
rhok = M @ rho @ M
return rhok / rhok.trace()
def marginalize(marginals, qlist):
new_marginals = {}
for key in marginals.keys():
new_key = ''
for i in range(len(qlist)):
new_key += key[qlist[i]]
outcomes = new_marginals.get(new_key, 0)
outcomes += marginals[key]
new_marginals[new_key] = outcomes
return new_marginals
# Maximises the log-likelihood
def infer_state(marginals, qlist, observables, tol=1e-15, maxiter=1000, epsilon_range=1e-8, n_epsilons=1000):
"""
Returns the state that maximises the log-likelihood given the observations.
input:
marginals (dict): dictionary with the marginal counts for all the groups of k qubits under consideration.
qlist (tuple): qubits for which the maximisation is carried out.
observables (dict): dictionary with the effect (numpy array) corresponding to each outcome.
tol (float): tolerance for the convergence of the algorithm.
maxiter (int): maximum number of iterations.
epsilon_range (float): range in which random values of epsilon are sampled in the second and third phases.
n_epsilons (int): number of values of epsilon in the range (0, epsilon_range] to be maximised over in phase 3.
The format for the keys in 'marginals' and 'observables' is a chain of outcomes for the given POVM with
the same order as qlist. For instance, '031' corresponds to qlist[0] with outcome '1', qlist[1] yielding '3',
and qlist[2], '0'.
output:
A density matrix (numpy array).
"""
# Number of qubits
k = len(qlist)
marginals = marginalize(marginals, qlist)
# Phase 1: iterative algorithm without (not diluted)
rhok = np.eye(2**k) / 2**k
for iteration_one in range(maxiter):
rho = rhok
rhok = RrR(rho, observables, marginals)
if log_l(rhok, observables, marginals) < log_l(rho, observables, marginals):
# Stop if likelihood decreases (and do not accept last step)
rhok = rho
break
elif np.isclose(log_l(rhok, observables, marginals), log_l(rho, observables, marginals), atol=tol, rtol=0) and np.isclose(rhok, rho, atol=tol, rtol=0).all():
# Stop if increase in likelihood and rhok-rho are small enough
break
# Phase 2: iterate diluted algorithm with random epsilon
for iteration_two in range(maxiter):
rho = rhok
epsilon = np.random.rand() * epsilon_range
rhok = IRrR(rho, observables, marginals, epsilon)
if log_l(rhok, observables, marginals) < log_l(rho, observables, marginals):
# If likelihood decreases, do not accept the change but continue
rhok = rho
elif np.isclose(log_l(rhok, observables, marginals), log_l(rho, observables, marginals), atol=tol, rtol=0) and np.isclose(rhok, rho, atol=tol, rtol=0).all():
# Stop if increase in likelihood and rhok-rho are small enough
break
# Phase 3: iterate dilute algorithm for largest value of epsilon
epsilons = np.linspace(0, epsilon_range, n_epsilons+1)[1:]
for iteration_three in range(maxiter):
# Find largest increase in log-likelihood
delta_logl = {epsilon: log_l(IRrR(rhok, observables, marginals, epsilon), observables, marginals) - log_l(rhok, observables, marginals) for epsilon in epsilons}
max_epsilon = max(delta_logl, key=delta_logl.get)
if delta_logl[max_epsilon] > tol:
rhok = IRrR(rhok, observables, marginals, epsilon)
else:
break
# Verify result
delta_logl = {epsilon: log_l(IRrR(rhok, observables, marginals, epsilon), observables, marginals) - log_l(rhok, observables, marginals) for epsilon in epsilons}
if not (max(delta_logl.values()) < tol and np.isclose(log_l(rhok, observables, marginals), log_l(rho, observables, marginals), atol=tol, rtol=0) and np.isclose(rhok, rho, atol=tol, rtol=0).all()):
print('Convergence not achieved:')
print('Delta log-likelihood:', np.abs(log_l(rhok, observables, marginals) - log_l(rho, observables, marginals)))
print('Largest difference in operators:', np.amax(np.abs(rho - rhok)))
print('Iterations:')
print('Phase 1:', iteration_one+1)
print('Phase 2:', iteration_two+1)
print('Phase 3:', iteration_three+1)
return rhok
def calculate_expectation(POVM, input_state, outcomes, i, n):
effects = [POVM[int(outcomes[i][j])] for j in range(n)]
return np.real((tensor(effects) * input_state).tr())
def main(n, k, qlist, input_state, POVM, expectations, state_name, meas_name, seed=False):
#s0 = time()
if seed:
random.seed(seed)
povm_string = ""
for i in range(len(POVM)):
povm_string += str(i)
#print("Listing outcomes")
#N-qubit POVM from the SIC-POVMs
M = {}
outcomes = []
for i, item in enumerate(itertools.product(povm_string, repeat=n)):
M["".join(item)] = 0
outcomes.append("".join(item))
#s1 = time()
#print(s1 - s0)
#s2 = time()
#print("Simulating statistics")
#Simulate outcome statistics
sim = np.random.choice(outcomes, size=k, p=expectations)
#s25 = time()
#print(s25 - s2)
#print("Assigning values to dict")
for i in sim:
outcomes = M.get(i, 0)
outcomes += 1
M[i] = outcomes
#s3 = time()
#print(s3 - s25)
#Reconstruct k-qubit states from outcome statistic
k_wise = len(qlist)
#print("Listing k-qubit outcomes and forming effects")
#k-qubit optimization, maximum likelihood
#k-qubit observable
M2 = {}
outcomes2 = []
for i, item in enumerate(itertools.product(povm_string, repeat=k_wise)):
M2["".join(item)] = 0
outcomes2.append("".join(item))
m_2_obs = {}
for i in range(len(outcomes2)):
effects = [POVM[int(outcomes2[i][j])] for j in range(k_wise)]
m_2_obs[outcomes2[i]] = np.array(tensor(effects))
#s4 = time()
#print(s4 - s3)
if k_wise < n:
partial_W = Qobj(np.array(input_state.ptrace(list(qlist))))
else:
partial_W = input_state
#print("Inferring state")
sol_den = infer_state(M, qlist, m_2_obs, tol=1e-15, maxiter=1000, epsilon_range=1e-8, n_epsilons=1000)
#s5 = time()
#print(s5 - s4)
sol_den = Qobj(sol_den)
fid = fidelity(partial_W, sol_den)
#print(fid)
#print("Total time: {} minutes".format((time() - s0)/60))
return fid, sol_den, partial_W
def w_state(n):
w_vec = []
for i in range(n):
vec = []
for j in range(n):
if i == j:
vec.append(basis(2, 0))
else:
vec.append(basis(2, 1))
##print(tensor(vec))
w_vec.append(tensor(vec))
w_vec = sum(w_vec).unit()
#Density matrix for W state
w_state = w_vec * w_vec.dag()
return w_state
if __name__ == "__main__":
E1 = basis(2, 0) * basis(2, 0).dag() / 2
#E1 = np.array(E1)
e2 = basis(2, 0) / np.sqrt(3) + np.sqrt(2/3) * basis(2, 1)
E2 = e2 * e2.dag() / 2
#E2 = np.array(E2)
e3 = basis(2, 0) / np.sqrt(3) + np.sqrt(2/3) * np.exp(1j*2*np.pi/3) * basis(2, 1)
E3 = e3 * e3.dag() / 2
#E3 = np.array(E3)
e4 = basis(2, 0) / np.sqrt(3) + np.sqrt(2/3) * np.exp(1j*4*np.pi/3) * basis(2, 1)
E4 = e4 * e4.dag() / 2
#E4 = np.array(E4)
E = [E1, E2, E3, E4]
init_state = w_state(8)
#s0= time()
#print("Initializing")
povm_string = ""
for i in range(len(E)):
povm_string += str(i)
outcomes = []
for i, item in enumerate(itertools.product(povm_string, repeat=8)):
outcomes.append("".join(item))
expectations = np.array([calculate_expectation(E, init_state, outcomes, i, 8) for i in range(len(outcomes))])
#s1 = time()
#print(s1 - s0)
fids2 = []
fids3 = []
fids4 = []
for i in range(10):
fids4.append(main(8, 8192, (1, 4, 5, 7), init_state, E, expectations, "W", "sic", seed=False))
fids3.append(main(8, 8192, (1, 4, 7), init_state, E, expectations, "W", "sic", seed=False))
fids2.append(main(8, 8192, (4, 7), init_state, E, expectations, "W", "sic", seed=False))
#print(np.std(fids2))
#print(np.average(fids2))
#print(np.std(fids3))
#print(np.average(fids3))
#print(np.std(fids4))
#print(np.average(fids4))
| [
"numpy.trace",
"numpy.abs",
"numpy.log",
"numpy.random.rand",
"numpy.zeros",
"numpy.imag",
"numpy.isclose",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.random.choice",
"random.seed",
"numpy.eye",
"numpy.real",
"itertools.product",
"numpy.exp",
"numpy.sqrt"
] | [((578, 609), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {'dtype': 'complex'}), '([4, 4], dtype=complex)\n', (586, 609), True, 'import numpy as np\n'), ((1040, 1072), 'numpy.array', 'np.array', (['density'], {'dtype': 'complex'}), '(density, dtype=complex)\n', (1048, 1072), True, 'import numpy as np\n'), ((2246, 2299), 'numpy.zeros', 'np.zeros', (['(rho.shape[0], rho.shape[0])'], {'dtype': 'complex'}), '((rho.shape[0], rho.shape[0]), dtype=complex)\n', (2254, 2299), True, 'import numpy as np\n'), ((7932, 7982), 'numpy.random.choice', 'np.random.choice', (['outcomes'], {'size': 'k', 'p': 'expectations'}), '(outcomes, size=k, p=expectations)\n', (7948, 7982), True, 'import numpy as np\n'), ((352, 369), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (366, 369), True, 'import numpy as np\n'), ((4545, 4559), 'numpy.eye', 'np.eye', (['(2 ** k)'], {}), '(2 ** k)\n', (4551, 4559), True, 'import numpy as np\n'), ((5895, 5940), 'numpy.linspace', 'np.linspace', (['(0)', 'epsilon_range', '(n_epsilons + 1)'], {}), '(0, epsilon_range, n_epsilons + 1)\n', (5906, 5940), True, 'import numpy as np\n'), ((7512, 7529), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7523, 7529), False, 'import random\n'), ((7713, 7753), 'itertools.product', 'itertools.product', (['povm_string'], {'repeat': 'n'}), '(povm_string, repeat=n)\n', (7730, 7753), False, 'import itertools\n'), ((8407, 8452), 'itertools.product', 'itertools.product', (['povm_string'], {'repeat': 'k_wise'}), '(povm_string, repeat=k_wise)\n', (8424, 8452), False, 'import itertools\n'), ((10110, 10150), 'itertools.product', 'itertools.product', (['povm_string'], {'repeat': '(8)'}), '(povm_string, repeat=8)\n', (10127, 10150), False, 'import itertools\n'), ((382, 393), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (390, 393), True, 'import numpy as np\n'), ((394, 411), 'numpy.linalg.norm', 'np.linalg.norm', (['r'], {}), '(r)\n', (408, 411), True, 'import numpy as np\n'), ((1158, 1174), 'numpy.real', 'np.real', (['d[i][i]'], {}), '(d[i][i])\n', (1165, 1174), True, 'import numpy as np\n'), ((1783, 1820), 'numpy.trace', 'np.trace', (['(m_2_obs[i] * density_matrix)'], {}), '(m_2_obs[i] * density_matrix)\n', (1791, 1820), True, 'import numpy as np\n'), ((1984, 1997), 'numpy.log', 'np.log', (['(c * x)'], {}), '(c * x)\n', (1990, 1997), True, 'import numpy as np\n'), ((5271, 5287), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5285, 5287), True, 'import numpy as np\n'), ((9576, 9586), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9583, 9586), True, 'import numpy as np\n'), ((9589, 9603), 'numpy.sqrt', 'np.sqrt', (['(2 / 3)'], {}), '(2 / 3)\n', (9596, 9603), True, 'import numpy as np\n'), ((9680, 9690), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9687, 9690), True, 'import numpy as np\n'), ((9807, 9817), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9814, 9817), True, 'import numpy as np\n'), ((1254, 1270), 'numpy.real', 'np.real', (['d[i][j]'], {}), '(d[i][j])\n', (1261, 1270), True, 'import numpy as np\n'), ((1353, 1369), 'numpy.imag', 'np.imag', (['d[i][j]'], {}), '(d[i][j])\n', (1360, 1369), True, 'import numpy as np\n'), ((1870, 1879), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (1876, 1879), True, 'import numpy as np\n'), ((7013, 7031), 'numpy.abs', 'np.abs', (['(rho - rhok)'], {}), '(rho - rhok)\n', (7019, 7031), True, 'import numpy as np\n'), ((9693, 9707), 'numpy.sqrt', 'np.sqrt', (['(2 / 3)'], {}), '(2 / 3)\n', (9700, 9707), True, 'import numpy as np\n'), ((9708, 9736), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi / 3)'], {}), '(1.0j * 2 * np.pi / 3)\n', (9714, 9736), True, 'import numpy as np\n'), ((9820, 9834), 'numpy.sqrt', 'np.sqrt', (['(2 / 3)'], {}), '(2 / 3)\n', (9827, 9834), True, 'import numpy as np\n'), ((9835, 9863), 'numpy.exp', 'np.exp', (['(1.0j * 4 * np.pi / 3)'], {}), '(1.0j * 4 * np.pi / 3)\n', (9841, 9863), True, 'import numpy as np\n'), ((6743, 6782), 'numpy.isclose', 'np.isclose', (['rhok', 'rho'], {'atol': 'tol', 'rtol': '(0)'}), '(rhok, rho, atol=tol, rtol=0)\n', (6753, 6782), True, 'import numpy as np\n'), ((4991, 5030), 'numpy.isclose', 'np.isclose', (['rhok', 'rho'], {'atol': 'tol', 'rtol': '(0)'}), '(rhok, rho, atol=tol, rtol=0)\n', (5001, 5030), True, 'import numpy as np\n'), ((5666, 5705), 'numpy.isclose', 'np.isclose', (['rhok', 'rho'], {'atol': 'tol', 'rtol': '(0)'}), '(rhok, rho, atol=tol, rtol=0)\n', (5676, 5705), True, 'import numpy as np\n')] |
# Built-in libraries
import json
import math
import random
from functools import partial
from typing import Any, Callable, Dict, List
import numpy as np
# Plotting settings
import plotly.graph_objects as go
import seaborn as sns
from plotly.offline import plot as plotoffline
from .data_input import check_valid_batch_dict
def get_rgba_from_triplet(incolour: list, alpha=1, as_string=False):
"""
Convert the input colour triplet (list) to a Plotly rgba(r,g,b,a) string if
`as_string` is True. If `False` it will return the list of 3 integer RGB
values.
E.g. [0.9677975592919913, 0.44127456009157356, 0.5358103155058701] -> 'rgba(246,112,136,1)'
"""
assert (
3 <= len(incolour) <= 4
), "`incolour` must be a list of 3 or 4 values; ignores 4th entry"
colours = [max(0, int(math.floor(c * 255))) for c in list(incolour)[0:3]]
if as_string:
return f"rgba({colours[0]},{colours[1]},{colours[2]},{float(alpha)})"
else:
return colours
def plot_to_HTML(filename: str, fig: dict):
config = dict(
scrollZoom=True,
displayModeBar=True,
editable=False,
displaylogo=False,
showLink=False,
resonsive=True,
)
return plotoffline(
figure_or_data=fig,
config=config,
filename=filename,
include_plotlyjs="cdn",
include_mathjax="cdn",
auto_open=False,
)
def plot_all_batches_per_tag(
df_dict: dict,
tag: str,
tag_y2: str = None,
time_column: str = None,
extra_info="",
batches_to_highlight={},
x_axis_label: str = "Time [sequence order]",
highlight_width: int = 5,
html_image_height: int = 900,
html_aspect_ratio_w_over_h: float = 16 / 9,
y1_limits: tuple = (None, None),
y2_limits: tuple = (None, None),
) -> go.Figure:
"""Plots a particular `tag` over all batches in the given dataframe `df`.
Parameters
----------
df_dict : dict
Standard data format for batches.
tag : str
Which tag to plot? [on the y1 (left) axis]
tag_y2 : str, optional
Which tag to plot? [on the y2 (right) axis]
Tag will be plotted with different scaling on the secondary axis, to allow time-series
comparisons to be easier.
time_column : str, optional
Which tag on the x-axis. If not specified, creates sequential integers, starting from 0
if left as the default, `None`.
extra_info : str, optional
Used in the plot title to add any extra details, by default ""
batches_to_highlight : dict, optional
keys: an string which can be json.loads(...) and turns into a Plotly line specifier.
For example:
batches_to_highlight = grouper= {'{"width": 2, "color": "rgba(255,0,0,0.5)"}': redlist}
will plot batch identifiers (must be valid keys in `df_dict`) in the "redlist" list
with that colour and linewidth.
x_axis_label : str, optional
String label for the x-axis, by default "Time [sequence order]"
highlight_width: int, optional
The width of the highlighted lines; default = 5.
html_image_height : int, optional
HTML image output height, by default 900
html_aspect_ratio_w_over_h : float, optional
HTML image aspect ratio: 16/9 (therefore the default width will be 1600 px)
y1_limits: tuple, optional
Axis limits enforced on the y1 (left) axis. Default is (None, None) which means the data
themselves are used to determine the limits. Specify BOTH limits. Plotly requires
(at the moment https://github.com/plotly/plotly.js/issues/400) that you specify both.
Order: (low limit, high limit)
y2_limits: tuple, optional
Axis limits enforced on the y2 (right) axis. Default is (None, None) which means the data
themselves are used to determine the limits. Specify BOTH limits. Plotly requires
(at the moment https://github.com/plotly/plotly.js/issues/400) that you specify both.
Returns
-------
go.Figure
Standard Plotly fig object (dictionary-like).
"""
default_line_width = 2
unique_items = list(df_dict.keys())
n_colours = len(unique_items)
random.seed(13)
colours = list(sns.husl_palette(n_colours))
random.shuffle(colours)
colours = [get_rgba_from_triplet(c, as_string=True) for c in colours]
line_styles = {
k: dict(width=default_line_width, color=v)
for k, v in zip(unique_items, colours)
}
for key, val in batches_to_highlight.items():
line_styles.update(
{item: json.loads(key) for item in val if item in df_dict.keys()}
)
highlight_list = []
for key, val in batches_to_highlight.items():
highlight_list.extend(val)
highlight_list = list(set(highlight_list))
fig = go.Figure()
for batch_id, batch_df in df_dict.items():
assert (
tag in batch_df.columns
), f"Tag '{tag}' not found in the batch with id {batch_id}."
if tag_y2:
assert (
tag_y2 in batch_df.columns
), f"Tag '{tag}' not found in the batch with id {batch_id}."
if time_column in batch_df.columns:
time_data = batch_df[time_column]
else:
time_data = list(range(batch_df.shape[0]))
if batch_id in highlight_list:
continue # come to this later
else:
fig.add_trace(
go.Scatter(
x=time_data,
y=batch_df[tag],
name=batch_id,
line=line_styles[batch_id],
mode="lines",
opacity=0.8,
yaxis="y1",
)
)
if tag_y2:
fig.add_trace(
go.Scatter(
x=time_data,
y=batch_df[tag_y2],
name=batch_id,
line=line_styles[batch_id],
mode="lines",
opacity=0.8,
yaxis="y2",
)
)
# Add the highlighted batches last: therefore, sadly, we have to do another run-through.
# Plotly does not yet support z-orders.
for batch_id, batch_df in df_dict.items():
if time_column in batch_df.columns:
time_data = batch_df[time_column]
else:
time_data = list(range(batch_df.shape[0]))
if batch_id in highlight_list:
fig.add_trace(
go.Scatter(
x=time_data,
y=batch_df[tag],
line=line_styles[batch_id],
name=batch_id,
mode="lines",
opacity=0.8,
yaxis="y1",
)
)
if tag_y2:
fig.add_trace(
go.Scatter(
x=time_data,
y=batch_df[tag_y2],
line=line_styles[batch_id],
name=batch_id,
mode="lines",
opacity=0.8,
yaxis="y2",
)
)
yaxis1_dict = dict(
title=tag, gridwidth=2, matches="y1", showticklabels=True, side="left"
)
if (y1_limits[0] is not None) or (y1_limits[1] is not None):
yaxis1_dict["autorange"] = False
yaxis1_dict["range"] = y1_limits
yaxis2_dict: Dict[str, Any] = dict(
title=tag_y2, gridwidth=2, matches="y2", showticklabels=True, side="right"
)
if (y2_limits[0] is not None) or (y2_limits[1] is not None):
yaxis2_dict["autorange"] = False
yaxis2_dict["range"] = y2_limits
fig.update_layout(
title=f"Plot of: '{tag}'"
+ (f" on left axis; with '{str(tag_y2)}' on right axis." if tag_y2 else ".")
+ (f" [{str(extra_info)}]" if extra_info else ""),
hovermode="closest",
showlegend=True,
legend=dict(
orientation="h",
traceorder="normal",
font=dict(family="sans-serif", size=12, color="#000"),
bordercolor="#DDDDDD",
borderwidth=1,
),
autosize=False,
xaxis=dict(title=x_axis_label, gridwidth=1),
yaxis=yaxis1_dict,
width=html_aspect_ratio_w_over_h * html_image_height,
height=html_image_height,
)
if tag_y2:
fig.update_layout(yaxis2=yaxis2_dict)
return fig
def colours_per_batch_id(
batch_ids: list,
batches_to_highlight: dict,
default_line_width: float,
use_default_colour: bool = False,
colour_map: Callable = partial(sns.color_palette, "hls"),
) -> Dict[Any, Dict]:
"""
Returns a colour to use for each trace in the plot. A dictionary: keys are batch ids, and
the value is a colour and line width setting for Plotly.
override_default_colour: bool
If True, then the default colour is used (grey: 0.5, 0.5, 0.5)
"""
random.seed(13)
n_colours = len(batch_ids)
colours = (
list(colour_map(n_colours))
if not (use_default_colour)
else [(0.5, 0.5, 0.5)] * n_colours
)
random.shuffle(colours)
colours = [get_rgba_from_triplet(c, as_string=True) for c in colours]
colour_assignment = {
key: dict(width=default_line_width, color=val)
for key, val in zip(list(batch_ids), colours)
}
for key, val in batches_to_highlight.items():
colour_assignment.update(
{item: json.loads(key) for item in val if item in batch_ids}
)
return colour_assignment
# flake8: noqa: C901
def plot_multitags(
df_dict: dict,
batch_list: list = None,
tag_list: list = None,
time_column: str = None,
batches_to_highlight: dict = {},
settings: dict = None,
fig=None,
) -> go.Figure:
"""
Plots all the tags for a batch; or a subset of tags, if specified in `tag_list`.
Parameters
----------
df_dict : dict
Standard data format for batches.
batch_list : list [default: None, will plot all batches in df_dict]
Which batches to plot; if provided, must be a list of valid keys into df_dict.
tag_list : list [default: None, will plot all tags in the dataframes]
Which tags to plot; tags will also be plotted in this order, or in the order of the
first dataframe if not specified.
time_column : str, optional
Which tag on the x-axis. If not specified, creates sequential integers, starting from 0
if left as the default, `None`.
batches_to_highlight : dict, optional
keys: an string which can be json.loads(...) and turns into a Plotly line specifier.
For example:
batches_to_highlight = grouper= {'{"width": 2, "color": "rgba(255,0,0,0.5)"}': redlist}
will plot batch identifiers (must be valid keys in `df_dict`) in the "redlist" list
with that colour and linewidth.
settings : dict
Default settings are = {
"nrows": 1 [int],
Number of rows in the plot.
"ncols": None
None = use as many columns as required to plot the data; else, supply an integer.
"x_axis_label": "Time, grouped per tag" <-- still TODO: make this show up.
What label is added to the x-axis?
"title": ""
Overall plot title
"show_legend": True,
Add a legend item for each tag
"html_image_height": 900,
in pixels
"html_aspect_ratio_w_over_h": 16/9,
sets the image width, as a ratio of the height
}
fig : go.Figure
If supplied, uses the existing Plotly figure to draw in.
"""
font_size = 12
margin_dict = dict(l=10, r=10, b=5, t=80) # Defaults: l=80, r=80, t=100, b=80
hovertemplate = "Time: %{x}\ny: %{y}"
# This will be clumsy, until we have Python 3.9. TODO: use pydantic instead
# This will be clumsy, until we have Python 3.9. TODO: use pydantic instead
default_settings: Dict[str, Any] = dict(
# Pydantic: int
nrows=1,
# Pydantic: int
ncols=0,
# Pydantic: str
x_axis_label="Time, grouped per tag",
# Pydantic: str
title="",
# Pydantic: bool
show_legend=True,
# Pydantic: >0
html_image_height=900,
# Pydantic: >0
html_aspect_ratio_w_over_h=16 / 9,
# Pydantic: >0
default_line_width=2,
# Pydantic: callable
colour_map=sns.husl_palette,
# Pydantic: bool
animate=False,
# Pydantic: list
animate_batches_to_highlight=[],
# Pydantic: bool
animate_show_slider=True,
# Pydantic: bool
animate_show_pause=True,
# Pydantic: str
animate_slider_prefix="Index: ",
# Pydantic: bool
# fraction of figure height. Default should be OK, but depends if the
# legend is show and length of batch names
animate_slider_vertical_offset=-0.3,
# Pydantic: > 0
animate_line_width=4, # the animated lines are drawn on top of the historical lines
# Pydantic: optional or int
animate_n_frames=None, # takes max frames required to give every time step 1 frame.
# Pydantic: int >= 0
animate_framerate_milliseconds=0,
)
if settings:
default_settings.update(settings)
settings = default_settings
if len(settings["animate_batches_to_highlight"]) == 0:
settings["animate"] = False
if settings["animate"]:
# override for animations, because we want to see everything in frame zero
settings["default_line_width"] = 0.5
# Override these settings for animations, because we want to see everything in frame zero
animation_colour_assignment = colours_per_batch_id(
batch_ids=list(df_dict.keys()),
batches_to_highlight=batches_to_highlight or dict(),
default_line_width=settings["animate_line_width"],
use_default_colour=False,
colour_map=settings["colour_map"],
)
else:
# Adjust the other animate settings in such a way that the regular functionality works
settings["animate_show_slider"] = False
settings["animate_show_pause"] = False
settings["animate_line_width"] = 0
settings["animate_n_frames"] = 0
settings["animate_batches_to_highlight"] = []
if fig is None:
fig = go.Figure()
batch1 = df_dict[list(df_dict.keys())[0]]
if tag_list is None:
tag_list = list(batch1.columns)
tag_list = list(tag_list) # Force it; sometimes we get non-list inputs
if batch_list is None:
batch_list = list(df_dict.keys())
batch_list = list(batch_list)
if settings["animate"]:
for batch_id in settings["animate_batches_to_highlight"]:
batch_list.remove(batch_id)
# Afterwards, add them back, at the end.
batch_list.extend(settings["animate_batches_to_highlight"])
if time_column in tag_list:
tag_list.remove(time_column)
# Check that the tag_list is present in all batches.
assert check_valid_batch_dict(
{k: v[tag_list] for k, v in df_dict.items() if k in batch_list}, no_nan=False
)
if settings["ncols"] == 0:
settings["ncols"] = int(np.ceil(len(tag_list) / int(settings["nrows"])))
specs = [[{"type": "scatter"}] * int(settings["ncols"])] * int(settings["nrows"])
fig.set_subplots(
rows=settings["nrows"],
cols=settings["ncols"],
shared_xaxes="all",
shared_yaxes=False,
start_cell="top-left",
vertical_spacing=0.2 / settings["nrows"],
horizontal_spacing=0.2 / settings["ncols"],
subplot_titles=tag_list,
specs=specs,
)
colour_assignment = colours_per_batch_id(
batch_ids=list(df_dict.keys()),
batches_to_highlight=batches_to_highlight,
default_line_width=settings["default_line_width"],
# if animating, yes, use grey for all lines; unless `batches_to_highlight` was specified
use_default_colour=settings["animate"]
if settings["animate"] and (len(batches_to_highlight) == 0)
else False,
colour_map=settings["colour_map"],
)
# Initial plot (what is visible before animation starts)
longest_time_length: int = 0
for batch_id in batch_list:
batch_df = df_dict[batch_id]
# Time axis values
if time_column in batch_df.columns:
time_data = batch_df[time_column]
else:
time_data = list(range(batch_df.shape[0]))
longest_time_length = max(longest_time_length, len(time_data))
row = col = 1
for tag in tag_list:
showlegend = settings["show_legend"] if tag == tag_list[0] else False
# This feels right, but leads to the animated batched taking the places of the
# first few non-animated batches in the legend.
# Ugh, even without this, they still overwrite them. Sadly.
# if batch_id in settings["animate_batches_to_highlight"]:
# showlegend = False # overridden. If required, we will add it during the animation
trace = go.Scatter(
x=time_data,
y=batch_df[tag],
name=batch_id,
mode="lines",
hovertemplate=hovertemplate,
line=colour_assignment[batch_id],
legendgroup=batch_id,
# Only add batch_id to legend the first time it is plotted (the first subplot)
showlegend=showlegend,
xaxis=fig.get_subplot(row, col)[1]["anchor"],
yaxis=fig.get_subplot(row, col)[0]["anchor"],
)
fig.add_trace(trace)
col += 1
if col > settings["ncols"]:
row += 1
col = 1
# Create the slider; will be ignore later if not required
# https://plotly.com/python/reference/layout/sliders/
slider_baseline_dict = {
"active": 0,
"yanchor": "top",
"xanchor": "left",
"font": {"size": font_size},
"currentvalue": {
"font": {"size": font_size},
"prefix": settings["animate_slider_prefix"],
"visible": True,
"xanchor": "left",
},
"transition": {
"duration": settings["animate_framerate_milliseconds"],
"easing": "linear",
},
"pad": {"b": 0, "t": 0},
"lenmode": "fraction",
"len": 0.9,
"x": 0.05,
"y": settings["animate_slider_vertical_offset"],
"name": "Slider",
"steps": [],
}
# Create other animation settings. Again, these will be ignored if not needed
frames: List = []
slider_steps = []
frame_settings = dict(
frame={"duration": settings["animate_framerate_milliseconds"], "redraw": True},
mode="immediate",
transition={"duration": 0},
)
settings["animate_n_frames"] = (
settings["animate_n_frames"]
if settings["animate_n_frames"] >= 0
else longest_time_length
)
for index in np.linspace(0, longest_time_length, settings["animate_n_frames"]):
# TO OPTIMIZE: add hover template only on the last iteration
# TO OPTIMIZE: can you add only the incremental new piece of animation?
index = int(np.floor(index))
frame_name = f"{index}" # this is the link with the slider and the animation in the play button
one_frame = generate_one_frame(
df_dict,
tag_list,
fig,
up_to_index=index + 1,
time_column=time_column,
batch_ids_to_animate=settings["animate_batches_to_highlight"],
animation_colour_assignment=animation_colour_assignment,
show_legend=settings["show_legend"],
hovertemplate=hovertemplate,
max_columns=settings["ncols"],
)
frames.append(go.Frame(data=one_frame, name=frame_name))
slider_dict = dict(
args=[
[frame_name],
frame_settings,
],
label=frame_name,
method="animate",
)
slider_steps.append(slider_dict)
# Buttons: for animations
button_play = dict(
label="Play",
method="animate",
args=[
None,
dict(
frame=dict(duration=0, redraw=False),
transition=dict(duration=30, easing="quadratic-in-out"),
fromcurrent=True,
mode="immediate",
),
],
)
button_pause = dict(
label="Pause",
method="animate",
args=[
# https://plotly.com/python/animations/
# Note the None is in a list!
[[None]], # TODO: does not work at the moment.
dict(
frame=dict(duration=0, redraw=False),
transition=dict(duration=0),
mode="immediate",
),
],
)
# OK, pull things together to render the fig
slider_baseline_dict["steps"] = slider_steps
button_list: List[Any] = []
if settings["animate"]:
fig.update(frames=frames)
button_list.append(button_play)
if settings["animate_show_pause"]:
button_list.append(button_pause)
fig.update_layout(
title=settings["title"],
margin=margin_dict,
hovermode="closest",
showlegend=settings["show_legend"],
legend=dict(
orientation="h",
traceorder="normal",
font=dict(family="sans-serif", size=12, color="#000"),
bordercolor="#DDDDDD",
borderwidth=1,
),
autosize=False,
xaxis=dict(
gridwidth=1,
mirror=True, # ticks are mirror at the top of the frame also
showspikes=True,
visible=True,
),
yaxis=dict(
gridwidth=2,
type="linear",
autorange=True,
showspikes=True,
visible=True,
showline=True, # show a separating line
side="left", # show on the RHS
),
width=settings["html_aspect_ratio_w_over_h"] * settings["html_image_height"],
height=settings["html_image_height"],
sliders=[slider_baseline_dict] if settings["animate_show_slider"] else [],
updatemenus=[
dict(
type="buttons",
showactive=False,
y=0,
x=1.05,
xanchor="left",
yanchor="bottom",
buttons=button_list,
)
],
)
return fig
def generate_one_frame(
df_dict: dict,
tag_list: list,
fig,
up_to_index,
time_column,
batch_ids_to_animate: list,
animation_colour_assignment,
show_legend=False,
hovertemplate: str = "",
max_columns=0,
) -> List[Dict]:
"""
Returns a list of dictionaries.
Each entry in the list is for each subplot; in the order of the subplots.
Since each subplot is a tag, we need the `tag_list` as input.
"""
output = []
row = col = 1
for tag in tag_list:
for batch_id in batch_ids_to_animate:
# These 4 lines are duplicated from the outside function
if time_column in df_dict[batch_id].columns:
time_data = df_dict[batch_id][time_column]
else:
time_data = list(range(df_dict[batch_id].shape[0]))
output.append(
go.Scatter(
x=time_data[0:up_to_index],
y=df_dict[batch_id][tag][0:up_to_index],
name=batch_id,
mode="lines",
hovertemplate=hovertemplate,
line=animation_colour_assignment[batch_id],
legendgroup=batch_id,
showlegend=show_legend if tag == tag_list[0] else False,
xaxis=fig.get_subplot(row, col)[1]["anchor"],
yaxis=fig.get_subplot(row, col)[0]["anchor"],
)
)
# One level outdented: if the loop for the tags, not in the loop for
# the `batch_ids_to_animate`!
col += 1
if col > max_columns:
row += 1
col = 1
return output
| [
"functools.partial",
"plotly.graph_objects.Scatter",
"plotly.graph_objects.Frame",
"json.loads",
"plotly.graph_objects.Figure",
"random.shuffle",
"seaborn.husl_palette",
"numpy.floor",
"math.floor",
"plotly.offline.plot",
"random.seed",
"numpy.linspace"
] | [((1240, 1373), 'plotly.offline.plot', 'plotoffline', ([], {'figure_or_data': 'fig', 'config': 'config', 'filename': 'filename', 'include_plotlyjs': '"""cdn"""', 'include_mathjax': '"""cdn"""', 'auto_open': '(False)'}), "(figure_or_data=fig, config=config, filename=filename,\n include_plotlyjs='cdn', include_mathjax='cdn', auto_open=False)\n", (1251, 1373), True, 'from plotly.offline import plot as plotoffline\n'), ((4240, 4255), 'random.seed', 'random.seed', (['(13)'], {}), '(13)\n', (4251, 4255), False, 'import random\n'), ((4308, 4331), 'random.shuffle', 'random.shuffle', (['colours'], {}), '(colours)\n', (4322, 4331), False, 'import random\n'), ((4865, 4876), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4874, 4876), True, 'import plotly.graph_objects as go\n'), ((8811, 8844), 'functools.partial', 'partial', (['sns.color_palette', '"""hls"""'], {}), "(sns.color_palette, 'hls')\n", (8818, 8844), False, 'from functools import partial\n'), ((9149, 9164), 'random.seed', 'random.seed', (['(13)'], {}), '(13)\n', (9160, 9164), False, 'import random\n'), ((9337, 9360), 'random.shuffle', 'random.shuffle', (['colours'], {}), '(colours)\n', (9351, 9360), False, 'import random\n'), ((19515, 19580), 'numpy.linspace', 'np.linspace', (['(0)', 'longest_time_length', "settings['animate_n_frames']"], {}), "(0, longest_time_length, settings['animate_n_frames'])\n", (19526, 19580), True, 'import numpy as np\n'), ((4275, 4302), 'seaborn.husl_palette', 'sns.husl_palette', (['n_colours'], {}), '(n_colours)\n', (4291, 4302), True, 'import seaborn as sns\n'), ((14746, 14757), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (14755, 14757), True, 'import plotly.graph_objects as go\n'), ((19752, 19767), 'numpy.floor', 'np.floor', (['index'], {}), '(index)\n', (19760, 19767), True, 'import numpy as np\n'), ((20356, 20397), 'plotly.graph_objects.Frame', 'go.Frame', ([], {'data': 'one_frame', 'name': 'frame_name'}), '(data=one_frame, name=frame_name)\n', (20364, 20397), True, 'import plotly.graph_objects as go\n'), ((824, 843), 'math.floor', 'math.floor', (['(c * 255)'], {}), '(c * 255)\n', (834, 843), False, 'import math\n'), ((4627, 4642), 'json.loads', 'json.loads', (['key'], {}), '(key)\n', (4637, 4642), False, 'import json\n'), ((5502, 5629), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'time_data', 'y': 'batch_df[tag]', 'name': 'batch_id', 'line': 'line_styles[batch_id]', 'mode': '"""lines"""', 'opacity': '(0.8)', 'yaxis': '"""y1"""'}), "(x=time_data, y=batch_df[tag], name=batch_id, line=line_styles[\n batch_id], mode='lines', opacity=0.8, yaxis='y1')\n", (5512, 5629), True, 'import plotly.graph_objects as go\n'), ((6634, 6761), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'time_data', 'y': 'batch_df[tag]', 'line': 'line_styles[batch_id]', 'name': 'batch_id', 'mode': '"""lines"""', 'opacity': '(0.8)', 'yaxis': '"""y1"""'}), "(x=time_data, y=batch_df[tag], line=line_styles[batch_id], name=\n batch_id, mode='lines', opacity=0.8, yaxis='y1')\n", (6644, 6761), True, 'import plotly.graph_objects as go\n'), ((9679, 9694), 'json.loads', 'json.loads', (['key'], {}), '(key)\n', (9689, 9694), False, 'import json\n'), ((5872, 6002), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'time_data', 'y': 'batch_df[tag_y2]', 'name': 'batch_id', 'line': 'line_styles[batch_id]', 'mode': '"""lines"""', 'opacity': '(0.8)', 'yaxis': '"""y2"""'}), "(x=time_data, y=batch_df[tag_y2], name=batch_id, line=line_styles\n [batch_id], mode='lines', opacity=0.8, yaxis='y2')\n", (5882, 6002), True, 'import plotly.graph_objects as go\n'), ((7004, 7133), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'time_data', 'y': 'batch_df[tag_y2]', 'line': 'line_styles[batch_id]', 'name': 'batch_id', 'mode': '"""lines"""', 'opacity': '(0.8)', 'yaxis': '"""y2"""'}), "(x=time_data, y=batch_df[tag_y2], line=line_styles[batch_id],\n name=batch_id, mode='lines', opacity=0.8, yaxis='y2')\n", (7014, 7133), True, 'import plotly.graph_objects as go\n')] |
# emmm.py: EM algorithm on discrete random points.
import argparse
from math import exp, inf, log, pi
import matplotlib.pyplot as plt
import numpy as np
from kmeans import randCent
X = None
mju = None
sigma = None
pre = None
gamma = None
n = 0
k = 0
def multiNormal(x, mean, stdvar):
dis = x - mean
frac = -0.5 * np.dot(dis, np.dot(np.linalg.inv(stdvar), dis))
return exp(frac) / ((2 * pi) ** (len(x) / 2) * np.linalg.det(stdvar) ** 0.5)
def likelihood():
like = 0
for i in range(n):
w = 0
for j in range(k):
w += pre[j] * multiNormal(X[i], mju[j], sigma[j])
like += log(w)
return like
def estep():
slot = np.zeros(k)
for i in range(n):
denom = 0
for j in range(k):
slot[j] = pre[j] * multiNormal(X[i], mju[j], sigma[j])
denom += slot[j]
for t in range(k):
gamma[i, t] = slot[t]/denom
def mstep():
dim = X.shape[1]
for i in range(k):
Nk = np.sum(gamma[:, i])
mju[i] = np.dot(gamma[:, i], X) / Nk
dis = X - mju[i] # row vector matrix
#sigma[i] = np.dot(gamma[:, i] * dis.T, dis) / Nk
tsig = np.zeros((dim, dim))
for j in range(n):
jcol = dis[j].reshape((dim, 1))
tsig += gamma[j, i] * np.dot(jcol, jcol.T)
sigma[i] = tsig / Nk
pre[i] = Nk/n
def main():
global X, n, k, pre, mju, sigma, gamma
data = np.loadtxt('rawdata.csv', delimiter=',')
parser = argparse.ArgumentParser(
description='EM clustering program.')
parser.add_argument('-k', type=int, default=3,
help='The number of clusters. Default is 3.')
args = parser.parse_args()
X = data[:, 0:-1]
n = X.shape[0]
k = args.k
pre = [1/k for i in range(k)]
# Generate random mju initial values
mju = randCent(X, k)
sigma = [np.eye(X.shape[1]) for i in range(k)]
gamma = np.zeros((n, k))
step, like = EM()
print('Total steps:', step)
print('Likelihood:', like)
print('Centers: \n', mju)
print('Constituents: \n', gamma)
# Drawing
colors=['black', 'blue', 'green', 'lime', 'maroon', 'olive', 'orange', 'purple', 'red', 'teal', 'yellow']
np.random.shuffle(colors)
major = np.argmax(gamma,axis=1)
for i in range(k):
plt.scatter(mju[i, 0], mju[i, 1], c=colors[i], marker='*')
cluPoints = X[np.where(major == i)]
plt.scatter(cluPoints[:, 0], cluPoints[:, 1], c=colors[i])
plt.show()
def EM():
step = 0
like = 0
last_like = inf
while True:
estep()
mstep()
like = likelihood()
step += 1
print(f'Likelihood after {step} steps = {like}')
if abs(like - last_like) <= 1e-6:
return step, like
else:
last_like = like
if __name__ == '__main__':
main()
| [
"math.exp",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.argmax",
"matplotlib.pyplot.scatter",
"kmeans.randCent",
"numpy.zeros",
"numpy.where",
"numpy.linalg.inv",
"numpy.loadtxt",
"numpy.linalg.det",
"numpy.dot",
"numpy.eye",
"math.log",
"numpy.random.shuff... | [((679, 690), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (687, 690), True, 'import numpy as np\n'), ((1445, 1485), 'numpy.loadtxt', 'np.loadtxt', (['"""rawdata.csv"""'], {'delimiter': '""","""'}), "('rawdata.csv', delimiter=',')\n", (1455, 1485), True, 'import numpy as np\n'), ((1499, 1560), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""EM clustering program."""'}), "(description='EM clustering program.')\n", (1522, 1560), False, 'import argparse\n'), ((1863, 1877), 'kmeans.randCent', 'randCent', (['X', 'k'], {}), '(X, k)\n', (1871, 1877), False, 'from kmeans import randCent\n'), ((1941, 1957), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (1949, 1957), True, 'import numpy as np\n'), ((2238, 2263), 'numpy.random.shuffle', 'np.random.shuffle', (['colors'], {}), '(colors)\n', (2255, 2263), True, 'import numpy as np\n'), ((2276, 2300), 'numpy.argmax', 'np.argmax', (['gamma'], {'axis': '(1)'}), '(gamma, axis=1)\n', (2285, 2300), True, 'import numpy as np\n'), ((2505, 2515), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2513, 2515), True, 'import matplotlib.pyplot as plt\n'), ((385, 394), 'math.exp', 'exp', (['frac'], {}), '(frac)\n', (388, 394), False, 'from math import exp, inf, log, pi\n'), ((630, 636), 'math.log', 'log', (['w'], {}), '(w)\n', (633, 636), False, 'from math import exp, inf, log, pi\n'), ((994, 1013), 'numpy.sum', 'np.sum', (['gamma[:, i]'], {}), '(gamma[:, i])\n', (1000, 1013), True, 'import numpy as np\n'), ((1178, 1198), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (1186, 1198), True, 'import numpy as np\n'), ((1891, 1909), 'numpy.eye', 'np.eye', (['X.shape[1]'], {}), '(X.shape[1])\n', (1897, 1909), True, 'import numpy as np\n'), ((2331, 2389), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mju[i, 0]', 'mju[i, 1]'], {'c': 'colors[i]', 'marker': '"""*"""'}), "(mju[i, 0], mju[i, 1], c=colors[i], marker='*')\n", (2342, 2389), True, 'import matplotlib.pyplot as plt\n'), ((2442, 2500), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cluPoints[:, 0]', 'cluPoints[:, 1]'], {'c': 'colors[i]'}), '(cluPoints[:, 0], cluPoints[:, 1], c=colors[i])\n', (2453, 2500), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1053), 'numpy.dot', 'np.dot', (['gamma[:, i]', 'X'], {}), '(gamma[:, i], X)\n', (1037, 1053), True, 'import numpy as np\n'), ((2412, 2432), 'numpy.where', 'np.where', (['(major == i)'], {}), '(major == i)\n', (2420, 2432), True, 'import numpy as np\n'), ((345, 366), 'numpy.linalg.inv', 'np.linalg.inv', (['stdvar'], {}), '(stdvar)\n', (358, 366), True, 'import numpy as np\n'), ((425, 446), 'numpy.linalg.det', 'np.linalg.det', (['stdvar'], {}), '(stdvar)\n', (438, 446), True, 'import numpy as np\n'), ((1305, 1325), 'numpy.dot', 'np.dot', (['jcol', 'jcol.T'], {}), '(jcol, jcol.T)\n', (1311, 1325), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.