code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import options
import twoDtest
import numpy as np
opt = options.test_options()
opt.istest = 0
#First use the validation set to pick best model
text_file = open(opt.dataset + "_progress.txt", "w")
auc1 = []
auc2=[]
auc3=[]
auc4=[]
ran = range(0,400,10)
for i in ran:
opt.epochs = i
roc_auc = twoDtest.main(opt)
print(roc_auc)
auc1+=roc_auc[0]
auc2+=roc_auc[1]
auc3+=roc_auc[2]
auc4+=roc_auc[3]
text_file.write("%s %s %s %s %s\n" % (str(i), str(roc_auc[0]), str(roc_auc[1]),str(roc_auc[2]),str(roc_auc[3])))
text_file.close()
print('Validation Done')
#Pick best model w.r.t criterion 1
i = np.argmax(np.array(auc1))
opt.epochs = ran[i]
opt.istest=0
print("AUC for criterion 1 (val): " + twoDtest.main(opt))
opt.istest=1
print("AUC for criterion 1 (test): " + twoDtest.main(opt))
#Pick best model w.r.t criterion 2
i = np.argmax(np.array(auc2))
opt.epochs = ran[i]
opt.istest=0
print("AUC for criterion 2 (val): " + twoDtest.main(opt))
opt.istest=1
print("AUC for criterion 2 (test): " + twoDtest.main(opt))
#Pick best model w.r.t criterion 3
i = np.argmax(np.array(auc3))
opt.epochs = ran[i]
opt.istest=0
print("AUC for criterion 3 (val): " + twoDtest.main(opt))
opt.istest=1
print("AUC for criterion 3 (test): " + twoDtest.main(opt))
#Pick best model w.r.t criterion 4
i = np.argmax(np.array(auc4))
opt.epochs = ran[i]
opt.istest=0
print("AUC for criterion 4 (val): " + twoDtest.main(opt))
opt.istest=1
print("AUC for criterion 4 (test): " + twoDtest.main(opt))
| [
"options.test_options",
"numpy.array",
"twoDtest.main"
] | [((57, 79), 'options.test_options', 'options.test_options', ([], {}), '()\n', (77, 79), False, 'import options\n'), ((301, 319), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (314, 319), False, 'import twoDtest\n'), ((633, 647), 'numpy.array', 'np.array', (['auc1'], {}), '(auc1)\n', (641, 647), True, 'import numpy as np\n'), ((864, 878), 'numpy.array', 'np.array', (['auc2'], {}), '(auc2)\n', (872, 878), True, 'import numpy as np\n'), ((1095, 1109), 'numpy.array', 'np.array', (['auc3'], {}), '(auc3)\n', (1103, 1109), True, 'import numpy as np\n'), ((1326, 1340), 'numpy.array', 'np.array', (['auc4'], {}), '(auc4)\n', (1334, 1340), True, 'import numpy as np\n'), ((720, 738), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (733, 738), False, 'import twoDtest\n'), ((792, 810), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (805, 810), False, 'import twoDtest\n'), ((951, 969), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (964, 969), False, 'import twoDtest\n'), ((1023, 1041), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (1036, 1041), False, 'import twoDtest\n'), ((1182, 1200), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (1195, 1200), False, 'import twoDtest\n'), ((1254, 1272), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (1267, 1272), False, 'import twoDtest\n'), ((1413, 1431), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (1426, 1431), False, 'import twoDtest\n'), ((1485, 1503), 'twoDtest.main', 'twoDtest.main', (['opt'], {}), '(opt)\n', (1498, 1503), False, 'import twoDtest\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
from torch import nn
import numpy as np
from .line_ocr_engine import BaseEngineLineOCR
# scores_probs should be N,C,T, blank is last class
def greedy_decode_ctc(scores_probs, chars):
best = torch.argmax(scores_probs, 1) + 1
mask = best[:, :-1] == best[:, 1:]
best = best[:, 1:]
best[mask] = 0
best[best == scores_probs.shape[1]] = 0
best = best.cpu().numpy() - 1
outputs = []
for line in best:
line = line[np.nonzero(line >= 0)]
outputs.append(''.join([chars[c] for c in line]))
return outputs
class PytorchEngineLineOCR(BaseEngineLineOCR):
def __init__(self, json_def, gpu_id=0, batch_size=8):
super(PytorchEngineLineOCR, self).__init__(json_def, gpu_id=0, batch_size=8)
self.net_subsampling = 4
self.characters = list(self.characters) + ['|']
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = PYTORCH_NETS[self.net_name]
self.model = net[0](num_classes=len(self.characters), in_height=self.line_px_height, **net[1])
self.model.load_state_dict(torch.load(self.checkpoint, map_location=self.device))
self.model = self.model.to(self.device)
self.model = self.model.eval()
def run_ocr(self, batch_data):
with torch.no_grad():
batch_data = torch.from_numpy(batch_data).to(self.device).float() / 255.0
logits = self.model(batch_data)
decoded = greedy_decode_ctc(logits, self.characters)
logits = logits.permute(0, 2, 1).cpu().numpy()
return decoded, logits
def create_vgg_block_2d(in_channels, out_channels, stride=(2,2), layer_count=2, norm='bn'):
layers = []
for i in range(layer_count):
if norm == 'bn':
layers += [
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(out_channels),
torch.nn.LeakyReLU(),
]
elif norm == 'none':
layers += [
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(),
]
else:
print(f'ERROR: Normalization "f{norm}" is not implemented')
raise "Unknown norm"
in_channels = out_channels
layers += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
return nn.Sequential(*layers)
def create_vgg_block_1d(in_channels, out_channels, stride=(2,2), layer_count=2, norm='bn'):
layers = []
for i in range(layer_count):
if norm == 'bn':
layers += [
nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm1d(out_channels),
torch.nn.LeakyReLU(),
]
elif norm == 'none':
layers += [
nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(),
]
else:
print(f'ERROR: Normalization "f{norm}" is not implemented')
raise "Unknown norm"
in_channels = out_channels
return nn.Sequential(*layers)
class NET_VGG(nn.Module):
def __init__(self, num_classes, in_height=32, base_channels=16, conv_blocks=4, subsampling=4, in_channels=3, layers_2d=None):
super(NET_VGG, self).__init__()
if layers_2d is None:
layers_2d = 16
if type(layers_2d) is int:
import torchvision
vgg = torchvision.models.vgg16(pretrained=True)
layers_2d = list(vgg.features[:layers_2d])
start_level = 0
self.blocks_2d = []
actual_subsampling_h = 1
actual_subsampling_v = 1
for layer in layers_2d:
if type(layer) == torch.nn.modules.pooling.MaxPool2d:
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
self.blocks_2d += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
start_level += 1
else:
self.blocks_2d.append(layer)
if type(layer) == torch.nn.modules.conv.Conv2d:
in_channels = layer.bias.shape[0]
out_channels = in_channels
for i in range(start_level, conv_blocks):
out_channels = base_channels*(2**i)
if actual_subsampling_h < subsampling:
stride=(2, 2)
else:
stride=(2, 1)
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
self.blocks_2d += [
create_vgg_block_2d(in_channels, out_channels, stride=stride, norm='none'),
torch.nn.BatchNorm2d(out_channels),
]
in_channels = out_channels
self.blocks_2d = nn.Sequential(*self.blocks_2d)
self.block_1d = create_vgg_block_1d(in_channels , out_channels)
self.gru = torch.nn.LSTM(out_channels, out_channels // 2, num_layers=2, bidirectional=True)
self.output_layer = nn.Conv1d(out_channels, num_classes, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
out = self.blocks_2d(x)
out = torch.mean(out, 2)
out = self.block_1d(out)
out, _ = self.gru(out.permute(2, 0, 1))
out = out.permute(1, 2, 0)
out = self.output_layer(out)
return out
class VGG_conv_module(nn.Module):
def __init__(self, base_channels=16, conv_blocks=4, subsampling=4, in_channels=3, layers_2d=None):
super(VGG_conv_module, self).__init__()
if layers_2d is None:
layers_2d = 16
if type(layers_2d) is int:
import torchvision
vgg = torchvision.models.vgg16(pretrained=True)
layers_2d = list(vgg.features[:layers_2d])
start_level = 0
self.blocks_2d = []
actual_subsampling_h = 1
actual_subsampling_v = 1
for layer in layers_2d:
if type(layer) == torch.nn.modules.pooling.MaxPool2d:
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
self.blocks_2d += [nn.MaxPool2d(kernel_size=stride, stride=stride)]
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
start_level += 1
else:
self.blocks_2d.append(layer)
if type(layer) == torch.nn.modules.conv.Conv2d:
in_channels = layer.bias.shape[0]
print('Pretrained layers')
print(self.blocks_2d)
out_channels = in_channels
for i in range(start_level, conv_blocks):
out_channels = base_channels*(2**i)
if actual_subsampling_h < subsampling:
stride = (2, 2)
else:
stride = (2, 1)
actual_subsampling_h *= stride[1]
actual_subsampling_v *= stride[0]
self.blocks_2d += [
create_vgg_block_2d(in_channels, out_channels, stride=stride, norm='none'),
torch.nn.BatchNorm2d(out_channels),
]
in_channels = out_channels
self.blocks_2d = nn.Sequential(*self.blocks_2d)
self.out_channels = out_channels
def forward(self, x):
return self.blocks_2d(x.contiguous())
class MultiscaleRecurrentBlock(nn.Module):
def __init__(self, channels, layers_per_scale=2, scales=4):
super(MultiscaleRecurrentBlock, self).__init__()
self.layers = nn.ModuleList([torch.nn.LSTM(channels, channels // 2, num_layers=layers_per_scale, bidirectional=True)
for scale in range(scales)])
self.final_layer = torch.nn.LSTM(channels, channels // 2, num_layers=1, bidirectional=True)
def forward(self, x):
outputs = []
for depth, layer in enumerate(self.layers):
if depth == 0:
scaled_data = x
else:
scaled_data = torch.nn.functional.max_pool1d(scaled_data, kernel_size=2, stride=2)
out, _ = layer(scaled_data.permute(2, 0, 1))
out = out.permute(1, 2, 0)
if depth != 0:
out = torch.nn.functional.interpolate(out, scale_factor=2**depth, mode='nearest')
outputs.append(out)
out = outputs[0]
for output in outputs[1:]:
out = out + output
out, _ = self.final_layer(out.permute(2, 0, 1))
return out.permute(1, 2, 0)
class NET_VGG_LSTM(nn.Module):
def __init__(self, num_classes, in_height=32, in_channels=3, dropout_rate=0.0, base_channels=16, conv_blocks=4,
subsampling=4, layers_2d=None):
super(NET_VGG_LSTM, self).__init__()
self.output_subsampling = subsampling
self.blocks_2d = VGG_conv_module(base_channels=base_channels, conv_blocks=conv_blocks, subsampling=subsampling,
in_channels=in_channels, layers_2d=layers_2d)
rnn_channels = self.blocks_2d.out_channels
self.recurrent_block = MultiscaleRecurrentBlock(rnn_channels, layers_per_scale=2, scales=3)
self.output_layer = nn.Conv1d(rnn_channels, num_classes, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
out = self.blocks_2d(x)
out, _ = torch.max(out, 2)
out = self.recurrent_block(out)
out = self.output_layer(out)
return out
PYTORCH_NETS = {
"VGG_B32_L16_S4_CB4": (NET_VGG, {'in_channels': 3, 'base_channels': 32, 'conv_blocks': 4, 'subsampling': 4, 'layers_2d': 16}),
"VGG_LSTM_B64_L17_S4_CB4": (NET_VGG_LSTM, {'in_channels': 3, 'base_channels': 64, 'conv_blocks': 4, 'subsampling': 4, 'layers_2d': 17})
}
| [
"torch.argmax",
"torch.no_grad",
"torch.load",
"torch.nn.Conv1d",
"torchvision.models.vgg16",
"torch.nn.LSTM",
"torch.mean",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.BatchNorm2d",
"torch.max",
"torch.cuda.is_available",
"torch.nn.functional.max_pool1d",
"torch.nn.MaxPool2d",
... | [((2478, 2500), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2491, 2500), False, 'from torch import nn\n'), ((3247, 3269), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3260, 3269), False, 'from torch import nn\n'), ((272, 301), 'torch.argmax', 'torch.argmax', (['scores_probs', '(1)'], {}), '(scores_probs, 1)\n', (284, 301), False, 'import torch\n'), ((2418, 2465), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'stride', 'stride': 'stride'}), '(kernel_size=stride, stride=stride)\n', (2430, 2465), False, 'from torch import nn\n'), ((5085, 5115), 'torch.nn.Sequential', 'nn.Sequential', (['*self.blocks_2d'], {}), '(*self.blocks_2d)\n', (5098, 5115), False, 'from torch import nn\n'), ((5207, 5292), 'torch.nn.LSTM', 'torch.nn.LSTM', (['out_channels', '(out_channels // 2)'], {'num_layers': '(2)', 'bidirectional': '(True)'}), '(out_channels, out_channels // 2, num_layers=2, bidirectional=True\n )\n', (5220, 5292), False, 'import torch\n'), ((5316, 5388), 'torch.nn.Conv1d', 'nn.Conv1d', (['out_channels', 'num_classes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(out_channels, num_classes, kernel_size=3, stride=1, padding=1)\n', (5325, 5388), False, 'from torch import nn\n'), ((5496, 5514), 'torch.mean', 'torch.mean', (['out', '(2)'], {}), '(out, 2)\n', (5506, 5514), False, 'import torch\n'), ((7561, 7591), 'torch.nn.Sequential', 'nn.Sequential', (['*self.blocks_2d'], {}), '(*self.blocks_2d)\n', (7574, 7591), False, 'from torch import nn\n'), ((8073, 8145), 'torch.nn.LSTM', 'torch.nn.LSTM', (['channels', '(channels // 2)'], {'num_layers': '(1)', 'bidirectional': '(True)'}), '(channels, channels // 2, num_layers=1, bidirectional=True)\n', (8086, 8145), False, 'import torch\n'), ((9538, 9610), 'torch.nn.Conv1d', 'nn.Conv1d', (['rnn_channels', 'num_classes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(rnn_channels, num_classes, kernel_size=3, stride=1, padding=1)\n', (9547, 9610), False, 'from torch import nn\n'), ((9721, 9738), 'torch.max', 'torch.max', (['out', '(2)'], {}), '(out, 2)\n', (9730, 9738), False, 'import torch\n'), ((525, 546), 'numpy.nonzero', 'np.nonzero', (['(line >= 0)'], {}), '(line >= 0)\n', (535, 546), True, 'import numpy as np\n'), ((1172, 1225), 'torch.load', 'torch.load', (['self.checkpoint'], {'map_location': 'self.device'}), '(self.checkpoint, map_location=self.device)\n', (1182, 1225), False, 'import torch\n'), ((1363, 1378), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1376, 1378), False, 'import torch\n'), ((3610, 3651), 'torchvision.models.vgg16', 'torchvision.models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3634, 3651), False, 'import torchvision\n'), ((6016, 6057), 'torchvision.models.vgg16', 'torchvision.models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (6040, 6057), False, 'import torchvision\n'), ((954, 979), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (977, 979), False, 'import torch\n'), ((1874, 1946), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n', (1883, 1946), False, 'from torch import nn\n'), ((1964, 1998), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1984, 1998), False, 'import torch\n'), ((2016, 2036), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (2034, 2036), False, 'import torch\n'), ((2709, 2781), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n', (2718, 2781), False, 'from torch import nn\n'), ((2799, 2833), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['out_channels'], {}), '(out_channels)\n', (2819, 2833), False, 'import torch\n'), ((2851, 2871), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (2869, 2871), False, 'import torch\n'), ((4966, 5000), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (4986, 5000), False, 'import torch\n'), ((7442, 7476), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (7462, 7476), False, 'import torch\n'), ((7910, 8001), 'torch.nn.LSTM', 'torch.nn.LSTM', (['channels', '(channels // 2)'], {'num_layers': 'layers_per_scale', 'bidirectional': '(True)'}), '(channels, channels // 2, num_layers=layers_per_scale,\n bidirectional=True)\n', (7923, 8001), False, 'import torch\n'), ((8353, 8421), 'torch.nn.functional.max_pool1d', 'torch.nn.functional.max_pool1d', (['scaled_data'], {'kernel_size': '(2)', 'stride': '(2)'}), '(scaled_data, kernel_size=2, stride=2)\n', (8383, 8421), False, 'import torch\n'), ((8568, 8645), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['out'], {'scale_factor': '(2 ** depth)', 'mode': '"""nearest"""'}), "(out, scale_factor=2 ** depth, mode='nearest')\n", (8599, 8645), False, 'import torch\n'), ((2121, 2193), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n', (2130, 2193), False, 'from torch import nn\n'), ((2211, 2231), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (2229, 2231), False, 'import torch\n'), ((2956, 3028), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n', (2965, 3028), False, 'from torch import nn\n'), ((3046, 3066), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (3064, 3066), False, 'import torch\n'), ((4108, 4155), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'stride', 'stride': 'stride'}), '(kernel_size=stride, stride=stride)\n', (4120, 4155), False, 'from torch import nn\n'), ((6514, 6561), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': 'stride', 'stride': 'stride'}), '(kernel_size=stride, stride=stride)\n', (6526, 6561), False, 'from torch import nn\n'), ((1405, 1433), 'torch.from_numpy', 'torch.from_numpy', (['batch_data'], {}), '(batch_data)\n', (1421, 1433), False, 'import torch\n')] |
# ===== [IMPORT: Importing standard libraries] =====
import os.path
import pafy
import shutil
import json
import random
import datetime
# ===== [IMPORT: Importing external libraries] =====
from cv2 import cv2
import tensorflow as tf
import numpy as np
import pandas as pd
import moviepy.editor
#_______________________________________________________________________________________________________________
class VideoEmotionAnalyzer:
def __init__(self) -> None:
self.SETTINGS_PROJECT_PREVIEW_IMG_WIDTH = 60
self.SETTINGS_PROJECT_PREVIEW_IMG_HEIGHT = 60
self.FOLDER_ROOT = './' # '/'
self.FOLDER_STATIC = self.FOLDER_ROOT + 'static/'
self.FOLDER_MODEL = self.FOLDER_STATIC + 'model/'
self.FOLDER_FRAMES = 'frames/'
self.FOLDER_PROJECTS = 'projects/'
self.FOLDER_PREDICTED_FRAMES = 'predicted_frames/'
self.PATH_PROJECTS = self.FOLDER_STATIC + self.FOLDER_PROJECTS
self.FILE_NAME_IMG_PREVIEW = 'preview.jpg'
self.FILE_NAME_PROJECT_INFO = 'project_info.json'
self.FILE_NAME_DATA = 'data.csv'
self.FILE_NAME_EMOJIS_DATA = 'emojisData.json'
self.FILE_NAME_AUDIO = 'extracted_audio.mp3'
self.FILE_PREFIX_ANALYZED = 'analyzed_'
self.FILE_PREFIX_FRAME = 'frame_'
self.AVAILABLE_VDIDEO_EXTENSIONS = ['.mp4'] #, '.avi']
self.CURRENT_PROJECT_name = None
self.CURRENT_PROJECT_path = None
self.CURRENT_PROJECT_original_video_file = None
self.CURRENT_PROJECT_analyzed_video_file = None
self.CURRENT_PROJECT_data_file = None
self.CURRENT_PROJECT_emojis_data_file = None
self.CURRENT_PROJECT_meta_data = {}
# ========== [Проверка на существование соответствующей директории для проекта] ==========
def get_path_by_project_name(self, project_name):
return self.PATH_PROJECTS + project_name + '/'
# ========== [Проверка на существование соответствующей директории для проекта] ==========
def is_project_has_own_folder(self, project_name):
path = self.get_path_by_project_name(project_name)
if os.path.exists(path):
return True, path
return False, path
# ========== [Проверка на существование директории frames для проекта] ==========
def is_project_has_frames_folder(self, project_name):
path = self.get_path_by_project_name(project_name) + self.FOLDER_FRAMES
if os.path.exists(path):
return True, path
return False, path
# ========== [Проверка на существование директории frames для проекта] ==========
def is_project_has_predicted_frames_folder(self, project_name):
path = self.get_path_by_project_name(project_name) + self.FOLDER_PREDICTED_FRAMES
if os.path.exists(path):
return True, path
return False, path
# ========== [Проверка на существование директории frames для проекта] ==========
def create_folder_for_project(self, project_name, folder_name):
folder_path = self.get_path_by_project_name(project_name) + folder_name
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return folder_path
# ========== [Получение данных проекта (также своего рода проверка на существование данных в проекте)] ==========
def get_project_data_info(self, project_name) -> object:
if not self.is_project_has_own_folder(project_name)[0]: # если как минимум нету папки проекта
return {} # False # то данных проекта не существует
project_info = {
'original_video_filename': None,
'analyzed_video_filename': None,
'prediction_data': None,
'emojis_data': None,
# 'original_video_duration': None, # TODO
# 'analyzed_video_duration': None # TODO
}
for current_filename in os.listdir(self.get_path_by_project_name(project_name)): # пройдемся по файлам
file_name, file_extension = os.path.splitext(current_filename) # извлечем имя и расширение файла
if file_extension in self.AVAILABLE_VDIDEO_EXTENSIONS: # если такой формат файла можно рассмотреть
if file_name.startswith('analyzed'): # если название файла начинается со слова analyzed
project_info.update({ 'analyzed_video_filename': current_filename })
if file_name == project_name: # если название файла совпадает с названием проекта
project_info.update({ 'original_video_filename': current_filename })
elif current_filename == self.FILE_NAME_DATA:
project_info.update({'prediction_data': current_filename})
# with open(self.get_path_by_project_name(project_name) + current_filename, "r") as file:
# project_info.update({'prediction_data': json.load(file)})
elif current_filename == self.FILE_NAME_EMOJIS_DATA:
project_info.update({'emojis_data': current_filename})
return project_info
# ========== [Загрузка метаданных проекта] ==========
def get_project_meta_data_info(self, project_path):
print(project_path)
result = {}
try:
with open(project_path + '/' + self.FILE_NAME_PROJECT_INFO) as json_file: # self.CURRENT_PROJECT_path + '/' + self.FILE_NAME_PROJECT_INFO
result = json.load(json_file)
except:
pass
return result
# ========== [Создать файл с информацией о проекте] ==========
def make_project_info(self, video_file_path, project_path):
video = moviepy.editor.VideoFileClip(video_file_path)
seconds_duration = video.duration
normalized_duration = str(datetime.timedelta(seconds=seconds_duration))
corrected_duration = normalized_duration.split(':')
for i, time_value in enumerate(corrected_duration):
print("\ttime_value:", time_value)
if len(time_value) == 1 and int(time_value) < 10:
corrected_duration[i] = '0' + time_value
elif '.' in time_value:
corrected_duration[i] = str(round(float(time_value)))
corrected_duration = ':'.join(corrected_duration)
print("[DURATION 2]:corrected_duration", corrected_duration)
now = datetime.datetime.now()
current_date_and_time = now.strftime("%d.%m.%Y %H:%M")
self.CURRENT_PROJECT_meta_data.update({
"created_date": current_date_and_time,
"video_duration": corrected_duration
})
with open(project_path + '/' + self.FILE_NAME_PROJECT_INFO, "w", encoding="utf-8") as file:
json.dump(self.CURRENT_PROJECT_meta_data, file, indent = 4, ensure_ascii = False)
print("Made project info")
return True
# ========== [Выбор текущего проекта] ==========
def select_project(self, project_name):
project_info = self.get_project_data_info(project_name)
if not project_info:
return False
self.CURRENT_PROJECT_name = project_name
self.CURRENT_PROJECT_path = self.get_path_by_project_name(project_name)
self.CURRENT_PROJECT_original_video_file = project_info['original_video_filename']
self.CURRENT_PROJECT_analyzed_video_file = project_info['analyzed_video_filename']
self.CURRENT_PROJECT_data_file = project_info['prediction_data']
self.CURRENT_PROJECT_emojis_data_file = project_info['emojis_data']
self.CURRENT_PROJECT_meta_data = self.get_project_meta_data_info(self.CURRENT_PROJECT_path)
return True
# ========== [Создать дефолтное Изображение "превью проекта"] ==========
def make_default_project_preview_by_video(self, video_file_path, project_path):
# Картинка превью проекта
capture = cv2.VideoCapture(video_file_path)
frames_amount = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
chosen_frame = random.randint(24, frames_amount)
capture.set(cv2.CAP_PROP_POS_FRAMES, chosen_frame) # возьмем рандомный chosen_frame кадр
ret, frame = capture.read()
if ret:
# img = cv2.imread(frame)
frame = cv2.resize(frame, (self.SETTINGS_PROJECT_PREVIEW_IMG_WIDTH, self.SETTINGS_PROJECT_PREVIEW_IMG_HEIGHT))
cv2.imwrite(project_path + '/' + self.FILE_NAME_IMG_PREVIEW, frame) # создадим изображение кадра
print("Preview for the project was successfully created")
capture.release()
cv2.destroyAllWindows()
# ========== [Создание проекта] ==========
def create_project(self, project_name):
project_path = self.get_path_by_project_name(project_name)
if not self.is_project_has_own_folder(project_name)[0]: # если у проекта нету папки
os.makedirs( project_path ) # создадим папку
return project_path
# ========== [Создание проекта по скачиванию видео из YouTube] ==========
def create_project_from_youtube(self, url):
vPafy = pafy.new(url)
video = vPafy.getbest(preftype="mp4")
project_name = video.title
project_info = self.get_project_data_info(project_name) # извлекаем информацию о проекте
if not project_info or project_info['original_video_filename'] is None: # если информации нет, либо видео для проекта не загружено
pjct_path = self.create_project(project_name) # создаем проект
# print("DOWNLOAD INTO:", pjct_path)
video_file_path = pjct_path + video.title + "." + video.extension
# print("video_file_path:", video_file_path)
video.download(filepath = video_file_path) # загружаем видео
# return video_file_path
self.make_default_project_preview_by_video(video_file_path, pjct_path) # Картинка превью проекта
self.make_project_info(video_file_path, pjct_path)
return project_name # возвращаем название проекта
# ========== [Создание проекта по скачиванию видео из YouTube] ==========
def create_project_local_storage(self, file):
project_name, file_extension = os.path.splitext(file.filename)
if file_extension in self.AVAILABLE_VDIDEO_EXTENSIONS: # если такой формат файла можно рассмотреть
project_info = self.get_project_data_info(project_name) # извлекаем информацию о проекте
if not project_info or project_info['original_video_filename'] is None: # если информации нет, либо видео для проекта не загружено
pjct_path = self.create_project(project_name) # создаем проект
video_file_path = pjct_path + file.filename
file.save(video_file_path) # сохраняем видеофайл
self.make_default_project_preview_by_video(video_file_path, pjct_path) # Картинка превью проекта
self.make_project_info(video_file_path, pjct_path)
return project_name # возвращаем название проекта
# ========== [Обзор имеющихся проектов (папок)] ==========
def browse_all_projects(self):
projects_list = []
projects_paths = []
if os.path.exists(self.PATH_PROJECTS):
projects_list = os.listdir(self.PATH_PROJECTS) #TODO what if there is no video file
for project in projects_list:
projects_paths.append(
os.path.join(self.PATH_PROJECTS, project)
)
print("path_concatenation has been done:", projects_paths)
return projects_list, projects_paths
# ========== [Разбиение видео на кадры] ==========
def extract_audio_of_video(self, video_file_path = None):
video_file_path = video_file_path or self.CURRENT_PROJECT_path + '/' + self.CURRENT_PROJECT_original_video_file
new_audio_file_path = self.get_path_by_project_name(self.CURRENT_PROJECT_name) + self.FILE_NAME_AUDIO
video_file_audio = moviepy.editor.AudioFileClip(video_file_path)
video_file_audio.write_audiofile(new_audio_file_path)
return True, new_audio_file_path
# ========== [Разбиение видео на кадры] ==========
def cut_video_into_frames(self, video_file_path = None):
frames_folder_exists, frames_folder_path = self.is_project_has_frames_folder(self.CURRENT_PROJECT_name)
video_file_path = video_file_path or self.CURRENT_PROJECT_path + '/' + self.CURRENT_PROJECT_original_video_file
capture = cv2.VideoCapture(video_file_path)
total_frames_amount = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
try:
if frames_folder_exists:
# shutil.rmtree(frames_folder_path) # очищаем кадры, если они были там до этого
# os.makedirs(frames_folder_path)
pass
else:
os.makedirs(frames_folder_path) # создаем папку, если она не была ранее создана
except OSError:
print ('Error: Creating directory of data')
print("\tCutting video into frames...")
print("\tTotal amount of frames:", total_frames_amount)
created_frame_counter = 0
while(True):
# Читаем видеофайл покадроово
print("\t\tcurrent frame is:", created_frame_counter, "/", total_frames_amount, end='\r')
ret, frame = capture.read()
# если кадр еще имеется, то создадим для него изображение:
if ret:
frame_file_name = frames_folder_path + "frame_" + str(created_frame_counter) + '.jpg'
cv2.imwrite(frame_file_name, frame) # создадим изображение кадра
created_frame_counter += 1 # инкрементируем счетчик действительно созданных кадров
# иначе
else:
break # прекратим цикл
print("\tFrames were successfully created:", created_frame_counter, "/", total_frames_amount)
capture.release()
cv2.destroyAllWindows()
# ========== [Сборка видео из кадров] ==========
def build_video_from_frames(self, video_file_path):
print("\tBuilding analyzed video from frames...")
_, predicted_frames_folder_path = self.is_project_has_predicted_frames_folder(self.CURRENT_PROJECT_name)
video_file_path = video_file_path or self.CURRENT_PROJECT_path + '/' + self.CURRENT_PROJECT_original_video_file
capture = cv2.VideoCapture(video_file_path)
video_file_fps = capture.get(cv2.CAP_PROP_FPS)
predicted_images = []
print("predicted_frames_folder_path:", predicted_frames_folder_path)
for frame_number in range( len(os.listdir( predicted_frames_folder_path )) ):
frame_file_name = predicted_frames_folder_path + "frame_" + str(frame_number) + '.jpg'
current_frame_image = cv2.imread(frame_file_name)
height, width, layers = current_frame_image.shape
size = (width, height)
predicted_images.append(current_frame_image)
print("\t\treading frame №", frame_number, "|frame_file_name:", frame_file_name, end='\r')
original_video_file_name, original_video_extension = os.path.splitext(self.CURRENT_PROJECT_original_video_file)
new_video_file_name = self.FILE_PREFIX_ANALYZED + original_video_file_name + '.avi' # с '.mp4' как-то не заладилось, поэтому пришлось использовать .avi
new_video_file_path = self.CURRENT_PROJECT_path + '/' + new_video_file_name
fourcc = cv2.VideoWriter_fourcc(*'MPEG') # выбираем 4-byte code codec # ранее: 'avc1' 'mp4v', 'MPEG', 'H264', 'X264'
predicted_video = cv2.VideoWriter(
# "analyzed_output.mp4",
new_video_file_path,
fourcc,
video_file_fps,
size
)
for i in range(len(predicted_images)):
print("\t\tmerging frame №", frame_number, end='\r')
predicted_video.write(predicted_images[i])
predicted_video.release()
cv2.destroyAllWindows()
print("\n\tVideo has been successfully created:", new_video_file_name)
return True, new_video_file_path, video_file_fps
# ========== [Сборка видео из кадров] ==========
def merge_video_with_audio(self, video_file_path, audio_file_path, fps):
print("\n\tMerging audio with video:\n\t\tVideo file:\n\t\t", video_file_path, "\n\t\tAudio file:\n\t\t", audio_file_path)
target_video = moviepy.editor.VideoFileClip(video_file_path)
target_audio = moviepy.editor.AudioFileClip(audio_file_path)
merged_video_file_folder_path = os.path.dirname(video_file_path)
merged_video_file_name, _ = os.path.splitext(os.path.basename(video_file_path))
new_merged_video_file_path = merged_video_file_folder_path + '/' + self.FILE_PREFIX_ANALYZED + merged_video_file_name + '.mp4'
merged_video_file = target_video.set_audio(target_audio)
merged_video_file.write_videofile(new_merged_video_file_path, fps = fps)
os.remove(video_file_path) # удаляем старый временный .avi файл
return True, new_merged_video_file_path
# ========== [Распознавание изображения] ==========
def predict(self, frames_folder_path = None):
previewDone = False
frames_folder_path = frames_folder_path or self.CURRENT_PROJECT_path + self.FOLDER_FRAMES
categories = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
print("Predicting result..")
model = tf.keras.models.load_model(self.FOLDER_MODEL + 'model.h5')
_,w,h,_ = model.input.shape # here
def feature_extrator_fun(img):
resized_image = cv2.resize(img, (w,h))
resized_image = cv2.cvtColor(resized_image,cv2.COLOR_BGR2RGB)
x=resized_image.astype(np.float32)
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
x = np.expand_dims(x, axis=0)
features = model.predict(x)
return features[0]
pred="none"
pred_stats={"Positive": 0, "Negative": 0, "Neutral": 0}
json_preds = {
"angry": [],
"disgust": [],
"fear": [],
"happy": [],
"neutral": [],
"sad": [],
"surprise": []
} #[]
pos=0
neg=0
neu=0
all_preds = []
facec = cv2.CascadeClassifier(self.FOLDER_MODEL + 'haarcascade_frontalface_default.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
emojisFrameData = {} # emojisFrameData = []
total_frames_amount = len(os.listdir(frames_folder_path))
for count in range(total_frames_amount):
frame_filename = frames_folder_path + '/frame_' + str(count) + '.jpg' # извлекаем изображение кадра
frame = cv2.imread(frame_filename) # загружаем изображение кадра в библиотеку OpenCV
feats=feature_extrator_fun(frame)
pred = categories[np.argmax(feats)]
all_preds.append(feats)
gray_fr = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_locations = facec.detectMultiScale(gray_fr, 1.3, 5, minSize=(w, h)) # найдем все лица на текущем кадре
face=0
# emojisFrameData.append( [] ) # добавляем массив, который будет иметь данные для каждого найденного лица на момент текущего рассматриваемого кадра
for top, right, bottom, left in face_locations:
# Draw a box around the face
face=face+1
frame_second = count#/24%60
if frame_second in emojisFrameData:
emojisFrameData[frame_second].append(pred.lower())
else:
emojisFrameData[frame_second] = [pred.lower()]
# Blue color in BGR
rgb_value = (255,255,255)
if pred=="Negative" or pred=="Angry" or pred=="Disgust" or pred=="Fear" or pred=="Sad":
rgb_value=(0,0,255)
neg+=1
elif pred=="Neutral":
rgb_value=(255,0,0)
neu+=1
elif pred=="Positive" or pred=="Surprise" or pred=="Happy" :
rgb_value=(0,255,0)
pos+=1
# Превью проекта
if not previewDone:
sub_face = frame[right:(right+left), top:(top+w)] # [right+150:right+left + 150, top+150:top+w + 150]
previewFrame = cv2.resize(sub_face, (50, 50))
cv2.imwrite(self.CURRENT_PROJECT_path + self.FILE_NAME_IMG_PREVIEW, previewFrame)
print("Preview has been updated")
previewDone = True
cv2.putText(frame, pred, (top,right), font, 1, rgb_value, 2)
cv2.rectangle(frame, (top,right), (top+bottom,right+left), rgb_value, 2)
if os.path.isfile(frame_filename):
cv2.imwrite(frame_filename, frame)
pred_stats["Positive"]=pos
pred_stats["Negative"]=neg
pred_stats["Neutral"]=neu
with open(self.CURRENT_PROJECT_path + '/' + self.FILE_NAME_EMOJIS_DATA, "w", encoding="utf-8") as file:
json.dump(emojisFrameData, file, indent = 4, ensure_ascii = False)
predictionsDataFrame = pd.DataFrame(all_preds, columns=[category.lower() for category in categories])
predictionsDataFrame.to_csv(self.CURRENT_PROJECT_path + self.FILE_NAME_DATA, sep=',', index=False, header=True)
return all_preds, pred_stats, emojisFrameData | [
"numpy.argmax",
"cv2.cv2.rectangle",
"cv2.cv2.cvtColor",
"cv2.cv2.CascadeClassifier",
"cv2.cv2.destroyAllWindows",
"cv2.cv2.putText",
"random.randint",
"pafy.new",
"cv2.cv2.VideoWriter",
"cv2.cv2.resize",
"datetime.timedelta",
"datetime.datetime.now",
"json.dump",
"tensorflow.keras.models.... | [((6445, 6468), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6466, 6468), False, 'import datetime\n'), ((7965, 7998), 'cv2.cv2.VideoCapture', 'cv2.VideoCapture', (['video_file_path'], {}), '(video_file_path)\n', (7981, 7998), False, 'from cv2 import cv2\n'), ((8089, 8122), 'random.randint', 'random.randint', (['(24)', 'frames_amount'], {}), '(24, frames_amount)\n', (8103, 8122), False, 'import random\n'), ((8665, 8688), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8686, 8688), False, 'from cv2 import cv2\n'), ((9182, 9195), 'pafy.new', 'pafy.new', (['url'], {}), '(url)\n', (9190, 9195), False, 'import pafy\n'), ((12614, 12647), 'cv2.cv2.VideoCapture', 'cv2.VideoCapture', (['video_file_path'], {}), '(video_file_path)\n', (12630, 12647), False, 'from cv2 import cv2\n'), ((14091, 14114), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14112, 14114), False, 'from cv2 import cv2\n'), ((14536, 14569), 'cv2.cv2.VideoCapture', 'cv2.VideoCapture', (['video_file_path'], {}), '(video_file_path)\n', (14552, 14569), False, 'from cv2 import cv2\n'), ((15642, 15673), 'cv2.cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MPEG'"], {}), "(*'MPEG')\n", (15664, 15673), False, 'from cv2 import cv2\n'), ((15776, 15842), 'cv2.cv2.VideoWriter', 'cv2.VideoWriter', (['new_video_file_path', 'fourcc', 'video_file_fps', 'size'], {}), '(new_video_file_path, fourcc, video_file_fps, size)\n', (15791, 15842), False, 'from cv2 import cv2\n'), ((16165, 16188), 'cv2.cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (16186, 16188), False, 'from cv2 import cv2\n'), ((17678, 17736), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["(self.FOLDER_MODEL + 'model.h5')"], {}), "(self.FOLDER_MODEL + 'model.h5')\n", (17704, 17736), True, 'import tensorflow as tf\n'), ((18602, 18687), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(self.FOLDER_MODEL + 'haarcascade_frontalface_default.xml')"], {}), "(self.FOLDER_MODEL + 'haarcascade_frontalface_default.xml'\n )\n", (18623, 18687), False, 'from cv2 import cv2\n'), ((5830, 5874), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'seconds_duration'}), '(seconds=seconds_duration)\n', (5848, 5874), False, 'import datetime\n'), ((6805, 6882), 'json.dump', 'json.dump', (['self.CURRENT_PROJECT_meta_data', 'file'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(self.CURRENT_PROJECT_meta_data, file, indent=4, ensure_ascii=False)\n', (6814, 6882), False, 'import json\n'), ((8340, 8447), 'cv2.cv2.resize', 'cv2.resize', (['frame', '(self.SETTINGS_PROJECT_PREVIEW_IMG_WIDTH, self.\n SETTINGS_PROJECT_PREVIEW_IMG_HEIGHT)'], {}), '(frame, (self.SETTINGS_PROJECT_PREVIEW_IMG_WIDTH, self.\n SETTINGS_PROJECT_PREVIEW_IMG_HEIGHT))\n', (8350, 8447), False, 'from cv2 import cv2\n'), ((8455, 8522), 'cv2.cv2.imwrite', 'cv2.imwrite', (["(project_path + '/' + self.FILE_NAME_IMG_PREVIEW)", 'frame'], {}), "(project_path + '/' + self.FILE_NAME_IMG_PREVIEW, frame)\n", (8466, 8522), False, 'from cv2 import cv2\n'), ((14954, 14981), 'cv2.cv2.imread', 'cv2.imread', (['frame_file_name'], {}), '(frame_file_name)\n', (14964, 14981), False, 'from cv2 import cv2\n'), ((17848, 17871), 'cv2.cv2.resize', 'cv2.resize', (['img', '(w, h)'], {}), '(img, (w, h))\n', (17858, 17871), False, 'from cv2 import cv2\n'), ((17899, 17945), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_BGR2RGB'], {}), '(resized_image, cv2.COLOR_BGR2RGB)\n', (17911, 17945), False, 'from cv2 import cv2\n'), ((18106, 18131), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (18120, 18131), True, 'import numpy as np\n'), ((19025, 19051), 'cv2.cv2.imread', 'cv2.imread', (['frame_filename'], {}), '(frame_filename)\n', (19035, 19051), False, 'from cv2 import cv2\n'), ((19256, 19295), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (19268, 19295), False, 'from cv2 import cv2\n'), ((21467, 21529), 'json.dump', 'json.dump', (['emojisFrameData', 'file'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(emojisFrameData, file, indent=4, ensure_ascii=False)\n', (21476, 21529), False, 'import json\n'), ((5482, 5502), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5491, 5502), False, 'import json\n'), ((13705, 13740), 'cv2.cv2.imwrite', 'cv2.imwrite', (['frame_file_name', 'frame'], {}), '(frame_file_name, frame)\n', (13716, 13740), False, 'from cv2 import cv2\n'), ((19179, 19195), 'numpy.argmax', 'np.argmax', (['feats'], {}), '(feats)\n', (19188, 19195), True, 'import numpy as np\n'), ((20976, 21037), 'cv2.cv2.putText', 'cv2.putText', (['frame', 'pred', '(top, right)', 'font', '(1)', 'rgb_value', '(2)'], {}), '(frame, pred, (top, right), font, 1, rgb_value, 2)\n', (20987, 21037), False, 'from cv2 import cv2\n'), ((21053, 21131), 'cv2.cv2.rectangle', 'cv2.rectangle', (['frame', '(top, right)', '(top + bottom, right + left)', 'rgb_value', '(2)'], {}), '(frame, (top, right), (top + bottom, right + left), rgb_value, 2)\n', (21066, 21131), False, 'from cv2 import cv2\n'), ((21190, 21224), 'cv2.cv2.imwrite', 'cv2.imwrite', (['frame_filename', 'frame'], {}), '(frame_filename, frame)\n', (21201, 21224), False, 'from cv2 import cv2\n'), ((20733, 20763), 'cv2.cv2.resize', 'cv2.resize', (['sub_face', '(50, 50)'], {}), '(sub_face, (50, 50))\n', (20743, 20763), False, 'from cv2 import cv2\n'), ((20784, 20869), 'cv2.cv2.imwrite', 'cv2.imwrite', (['(self.CURRENT_PROJECT_path + self.FILE_NAME_IMG_PREVIEW)', 'previewFrame'], {}), '(self.CURRENT_PROJECT_path + self.FILE_NAME_IMG_PREVIEW,\n previewFrame)\n', (20795, 20869), False, 'from cv2 import cv2\n')] |
import argparse
import torch
import os
import platform
import sys
import math
import cv2
import numpy as np
from api.setup import SingleImageAlphaPose, get_args
from detector.apis import get_detector
from tqdm import tqdm
def key_sort(element):
return int(element.split('.')[0])
def evaluate(image_files_path, outputpath, save_img, save_keypts, save_json):
if not os.path.exists(outputpath + '/vis'):
os.makedirs(outputpath + '/vis')
# Create Network.
args, cfg = get_args()
demo = SingleImageAlphaPose(args, cfg)
image_files = [os.path.join(image_files_path, x) for x in sorted(os.listdir(image_files_path), key=key_sort)]
# image_files = ['../tf-pose-estimation/uplara_tops_data/421.jpg']
# import ipdb; ipdb.set_trace()
faulty_images = []
count = 0
for im_name in tqdm(image_files):
# Give path of the image.
image = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB)
# Estimate key-points and scores.
pose = demo.process(im_name, image)
if pose is None:
key_points = np.zeros((17, 2))
faulty_images.append(count)
else:
# Access key-points and store them.
key_points = pose['result'][0]['keypoints'].detach().cpu().numpy()
score = pose['result'][0]['kp_score'].detach().cpu().numpy()
score_binary = score > 0.4
key_points = key_points * score_binary
key_points = key_points.astype(np.int32)
if save_keypts: np.savez(os.path.join(outputpath, 'vis', os.path.basename(im_name).split('.')[0]+'.npz'), key_points = key_points)
if save_img:
img = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB)
img = demo.vis(img, pose) # visulize the pose result
cv2.imwrite(os.path.join(outputpath, 'vis', os.path.basename(im_name)), cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# if you want to vis the img:
# import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# write the result to json:
if save_json:
result = [pose]
demo.writeJson(result, outputpath, form=args.format, for_eval=args.eval)
count += 1
np.savez(os.path.join(outputpath, 'faulty_image_idx.npz'), data=np.array(faulty_images))
if __name__ == "__main__":
image_files_path = '../tf-pose-estimation/uplara_tops_data'
outputpath = 'examples/res/'
save_img = False
save_keypts = True
save_json = False
evaluate(image_files_path, outputpath, save_img, save_keypts, save_json)
| [
"tqdm.tqdm",
"os.makedirs",
"os.path.basename",
"cv2.cvtColor",
"api.setup.SingleImageAlphaPose",
"os.path.exists",
"numpy.zeros",
"api.setup.get_args",
"cv2.imread",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((475, 485), 'api.setup.get_args', 'get_args', ([], {}), '()\n', (483, 485), False, 'from api.setup import SingleImageAlphaPose, get_args\n'), ((494, 525), 'api.setup.SingleImageAlphaPose', 'SingleImageAlphaPose', (['args', 'cfg'], {}), '(args, cfg)\n', (514, 525), False, 'from api.setup import SingleImageAlphaPose, get_args\n'), ((787, 804), 'tqdm.tqdm', 'tqdm', (['image_files'], {}), '(image_files)\n', (791, 804), False, 'from tqdm import tqdm\n'), ((370, 405), 'os.path.exists', 'os.path.exists', (["(outputpath + '/vis')"], {}), "(outputpath + '/vis')\n", (384, 405), False, 'import os\n'), ((409, 441), 'os.makedirs', 'os.makedirs', (["(outputpath + '/vis')"], {}), "(outputpath + '/vis')\n", (420, 441), False, 'import os\n'), ((542, 575), 'os.path.join', 'os.path.join', (['image_files_path', 'x'], {}), '(image_files_path, x)\n', (554, 575), False, 'import os\n'), ((2009, 2057), 'os.path.join', 'os.path.join', (['outputpath', '"""faulty_image_idx.npz"""'], {}), "(outputpath, 'faulty_image_idx.npz')\n", (2021, 2057), False, 'import os\n'), ((857, 876), 'cv2.imread', 'cv2.imread', (['im_name'], {}), '(im_name)\n', (867, 876), False, 'import cv2\n'), ((1008, 1025), 'numpy.zeros', 'np.zeros', (['(17, 2)'], {}), '((17, 2))\n', (1016, 1025), True, 'import numpy as np\n'), ((2064, 2087), 'numpy.array', 'np.array', (['faulty_images'], {}), '(faulty_images)\n', (2072, 2087), True, 'import numpy as np\n'), ((592, 620), 'os.listdir', 'os.listdir', (['image_files_path'], {}), '(image_files_path)\n', (602, 620), False, 'import os\n'), ((1526, 1545), 'cv2.imread', 'cv2.imread', (['im_name'], {}), '(im_name)\n', (1536, 1545), False, 'import cv2\n'), ((1699, 1735), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1711, 1735), False, 'import cv2\n'), ((1671, 1696), 'os.path.basename', 'os.path.basename', (['im_name'], {}), '(im_name)\n', (1687, 1696), False, 'import os\n'), ((1413, 1438), 'os.path.basename', 'os.path.basename', (['im_name'], {}), '(im_name)\n', (1429, 1438), False, 'import os\n')] |
import numpy as np
from numpy.linalg import inv
#random y of type int32
y= np.random.randint(1,5,(20,1), dtype ='int32')
#print(y)
x = np.random.normal(0,1.0 , (20,20)) #we can change values mean,std deviation
#print(x)
xinv = inv(x)
xtrans = x.transpose()
#print(xtrans)
#print(xinv)
a=np.matmul(xtrans,x)
b=np.matmul(xtrans,y)
j=inv(a)
theta = np.matmul(j,b)
print(theta)
| [
"numpy.matmul",
"numpy.random.randint",
"numpy.linalg.inv",
"numpy.random.normal"
] | [((76, 123), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(20, 1)'], {'dtype': '"""int32"""'}), "(1, 5, (20, 1), dtype='int32')\n", (93, 123), True, 'import numpy as np\n'), ((137, 171), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1.0)', '(20, 20)'], {}), '(0, 1.0, (20, 20))\n', (153, 171), True, 'import numpy as np\n'), ((231, 237), 'numpy.linalg.inv', 'inv', (['x'], {}), '(x)\n', (234, 237), False, 'from numpy.linalg import inv\n'), ((292, 312), 'numpy.matmul', 'np.matmul', (['xtrans', 'x'], {}), '(xtrans, x)\n', (301, 312), True, 'import numpy as np\n'), ((314, 334), 'numpy.matmul', 'np.matmul', (['xtrans', 'y'], {}), '(xtrans, y)\n', (323, 334), True, 'import numpy as np\n'), ((336, 342), 'numpy.linalg.inv', 'inv', (['a'], {}), '(a)\n', (339, 342), False, 'from numpy.linalg import inv\n'), ((351, 366), 'numpy.matmul', 'np.matmul', (['j', 'b'], {}), '(j, b)\n', (360, 366), True, 'import numpy as np\n')] |
import gw_stripping
import math
import fire
import sys
import numpy as np
import pandas
def er(msg):
raise Exception(f"Fatal error:{msg}")
def write_res(res, out):
df = pandas.DataFrame(data=res)
s = df.to_csv(header=None, index=False)
out.write(s)
def run(m1, m2, r1, a0, eta, rel, max_time):
M = m1+m2 # total mass
q = m2/M # mass ratio (not according to Bisicalo!)
x1 = 2.953286590373 # 2*G*M_solar/km*c**2
t0 = math.sqrt(2*a0/(x1*M))
t0 = t0*a0/(299792.458) # the characteristic time scale in s
E0 = x1*M**2/(2*a0)*1.7871746729e+54 # the characteristic energy in erg
L_gw = (2/5)*(x1/a0)**4
L_gw = (L_gw/a0)*5.357734049e+59 # the characteristic GW luminosity in erg/s
alpha_G = (2*x1*M/a0)**(5/2)/5 # parameter
eps = 1e-8
f, f_q, f_a = gw_stripping.roche(q, rel, M, a0)
r2, m2_r2, r2_m2 = gw_stripping.mass_to_radius(m2, eta)
if r2 > a0*f:
er('error: start R2 > R_roche, increase start distance a0!')
if (rel == 'off'):
# start (dimensionless) distance of mass transfer
a1 = r2/(a0*f)
t1 = (1-a1**4)/(8*q*(1-q)*alpha_G)
else:
x1 = 10
x2 = 1
while abs((x2-x1)/x1) > eps:
x1 = x2
a = x1*a0
f, f_q, f_a = gw_stripping.roche(q, rel, M, a)
x2 = 1-a*f/r2
x2 = x2*r2/(a0*(f+f_a))+x1
a1 = x2 # start (dimensionless) distance of mass transfer (rel = 'on')
t1 = (1-a1**4)/(8*q*(1-q)*alpha_G)
N1 = 1000
t = 0
x2 = 1
LG = M**5*q**2*(1-q)**2/x2**5
LG = LG*L_gw
tau1 = t1/N1
res_init = [-t1*t0, a0, q, m2, LG]
t_array = np.linspace(0, t1, N1)
x2_array = np.empty(N1)
for i, x in enumerate(t_array): x2_array[i] = ((1-(8*q*(1-q)*alpha_G*x))**(1/4)) * a0
x1 = x2_array[-1] / a0
time_array = np.empty(N1)
for i, x in enumerate(t_array): time_array[i] = (x-t1)*t0
q_array = np.full(N1, q)
m2_array = np.full(N1, m2)
LG_array = np.empty(N1)
for i, x in enumerate(x2_array): LG_array[i] = (M**5*q**2*(1-q)**2/(x/a0)**5) * L_gw
res1 = list(zip(time_array, x2_array, q_array, m2_array, LG_array))
time2_array = np.empty(N1 + 1)
for i, x in enumerate(t_array): time2_array[i+1] = (x/2 - t1) * t0
L_nu = np.empty(N1)
res2 = list(zip(time2_array, L_nu[1:-1]))
###########################################################################
# Stage 2: R2=R_roche, stable mass transfer
t = t1
tau2 = tau1
chi = 0.55 # parameter of the scheme
a = x1*a0
f, f_q, f_a = gw_stripping.roche(q, rel, M, a)
r2 = a0*x1*f
m2, m2_r2, r2_m2 = gw_stripping.radius_to_mass(r2, eta)
q1 = q
stab_corr = 1+f_a/f
stab = f_q*q1/f-2*(1-2*q1)*stab_corr/(1-q1)
corr = 0.005
while r2_m2 > (stab+corr): # stability testing
delta = 10
q2 = q1*(1-1e-6)
x2 = x1*(1+1e-6)
# solving the system of nonlinear equations f1 and f2
while delta > eps:
a = x2*a0
f, f_q, f_a = gw_stripping.roche(q2, rel, M, a)
r2 = a0*x2*f
m2, m2_r2, r2_m2 = gw_stripping.radius_to_mass(r2, eta)
q_2 = 1-q2
f1 = 1/(q2*q_2*math.sqrt(x2))
f1 = f1-1/(q1*(1-q1)*math.sqrt(x1))
f1 = f1-alpha_G*tau2*(chi/x2**(9/2)+(1-chi)/x1**(9/2))
f2 = q2-m2/M
f1_x2 = -1/(2*q2*q_2*x2**(3/2))
f1_x2 = f1_x2+9*alpha_G*tau2*chi/(2*x2**(11/2))
f1_q2 = (2*q2-1)/(math.sqrt(x2)*(q2*q_2)**2)
f2_x2 = -m2_r2*m2/(M*x2)
f2_x2 = f2_x2-m2_r2*m2*f_a/(M*x2*f)
f2_q2 = 1-m2_r2*m2*f_q/(M*f)
delta_x = f2*f1_q2-f1*f2_q2
delta_x = delta_x/(f1_x2*f2_q2-f2_x2*f1_q2)
delta_q = (-f2-f2_x2*delta_x)/f2_q2
if abs(delta_x/x2) > 0.1 or abs(delta_q/q2) > 0.1:
k = min(abs(delta_x/x2), abs(delta_q/q2), 0.1)
else:
k = 1
x2 = x2+delta_x*k
q2 = q2+delta_q*k
delta = math.sqrt(delta_x**2+delta_q**2)
delta = delta/math.sqrt(x2**2+q2**2)
t = t+tau2
# Calculations of luminosities
LG = M**5*q2**2*(1-q2)**2/x2**5
LG = LG*L_gw
L_nu = (1-(q1+q2)/2)*(q1-q2)/tau2
L_nu = L_nu*(E0*a0/(t0*r1))
line = ((t-t1)*t0, x2*a0, q2, q2*M, LG)
res1.append(list(line))
line = ((t-tau2/2-t1)*t0, L_nu)
res2.append(list(line))
if (abs((q2-q1)/q1) < 1.e-4 and abs((x2-x1)/x1) < 1.e-4):
tau2 = tau2*2
if (abs((q2-q1)/q1) > 5.e-3 or abs((x2-x1)/x1) > 5.e-3):
tau2 = tau2/2
x1 = x2
q1 = q2
a = x2*a0
f, f_q, f_a = gw_stripping.roche(q2, rel, M, a)
r2 = a0*x2*f
m2, m2_r2, r2_m2 = gw_stripping.radius_to_mass(r2, eta)
stab_corr = 1+f_a/f
stab = f_q*q2/f-2*(1-2*q2)*stab_corr/(1-q2)
if ((t-t1)*t0 > max_time):
print(
'Calculations were stopped because time of stable mass transfer > ', max_time, 'sec !', file=sys.stderr)
break
return res1, res2
out1_path_default = "stripping_dist_mass.dat"
out2_path_default = "stripping_rad.dat"
def main(m1=1.4, m2=0.3, r1=10, a0=100, eta=110, rel="off",
max_time=1, out1_path=out1_path_default, out2_path=out2_path_default):
"""
Main function
Final evolution of close neutron star binary, stripping model
input variables:
m1(float): mass of neutron star (NS) in M_solar
m2(float): mass of low-mass NS in M_solar
r1(float): radius of NS in km
a0(float): initial distance between NSs in km
out1_path: stripping dist mass output file
out2_path: stripping rad output file
additional parameters:
eta(float): (K0*L**2)**(1/3), auxiliary parameter in MeV (see Sotani et al.)
rel(str): 'on' or 'off', relativistic correction for the Roche lobe
max_time(float): stopping time in s
output files:
'stripping.dat' - [time in s; distance between NSs in km; q=m2/M; m2 in M_solar; GW luminosity in erg/s]
'stripping_rad.dat' - [time in s; nutrino luminosity in erq/s]
note:
see file description_rus.pdf for details
"""
if not (rel in ("on", "off")):
er("wrong rel")
if m1 < m2:
er('m1 must be more than m2!')
if eta > 200:
er('error: eta must be less than 200!')
if eta < 60:
er("error: eta must be more than 60!")
if not (m2 > 0.1 and m2 < 0.8):
er('error: m2 must be in (0.1, 0.8)!')
if m2 < 0:
er('error: m2 must be positive!')
if m2 > 1:
er('warning, mass of m1 is more than 1 M_sol so approximation of Sotani may be not valid!')
if m1 > 2.3:
print('Warning, m1 have a mass of BH!')
with open(out1_path, 'w') as out1:
with open(out2_path, 'w') as out2:
res1, res2 = run(m1=m1, m2=m2, r1=r1, a0=a0, eta=eta, rel=rel, max_time=max_time)
write_res(res1, out1)
write_res(res2, out2)
if __name__ == "__main__":
# m1=1.4
# m2=0.3
# r1=10
# a0=100
# eta=110
# rel="on"
# max_time=1
# run(m1=m1, m2=m2, r1=r1, a0=a0, eta=eta, rel=rel, max_time=max_time)
fire.Fire(main) | [
"pandas.DataFrame",
"numpy.full",
"gw_stripping.roche",
"fire.Fire",
"math.sqrt",
"numpy.empty",
"numpy.linspace",
"gw_stripping.mass_to_radius",
"gw_stripping.radius_to_mass"
] | [((180, 206), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'data': 'res'}), '(data=res)\n', (196, 206), False, 'import pandas\n'), ((528, 556), 'math.sqrt', 'math.sqrt', (['(2 * a0 / (x1 * M))'], {}), '(2 * a0 / (x1 * M))\n', (537, 556), False, 'import math\n'), ((909, 942), 'gw_stripping.roche', 'gw_stripping.roche', (['q', 'rel', 'M', 'a0'], {}), '(q, rel, M, a0)\n', (927, 942), False, 'import gw_stripping\n'), ((966, 1002), 'gw_stripping.mass_to_radius', 'gw_stripping.mass_to_radius', (['m2', 'eta'], {}), '(m2, eta)\n', (993, 1002), False, 'import gw_stripping\n'), ((1765, 1787), 'numpy.linspace', 'np.linspace', (['(0)', 't1', 'N1'], {}), '(0, t1, N1)\n', (1776, 1787), True, 'import numpy as np\n'), ((1804, 1816), 'numpy.empty', 'np.empty', (['N1'], {}), '(N1)\n', (1812, 1816), True, 'import numpy as np\n'), ((1952, 1964), 'numpy.empty', 'np.empty', (['N1'], {}), '(N1)\n', (1960, 1964), True, 'import numpy as np\n'), ((2046, 2060), 'numpy.full', 'np.full', (['N1', 'q'], {}), '(N1, q)\n', (2053, 2060), True, 'import numpy as np\n'), ((2076, 2091), 'numpy.full', 'np.full', (['N1', 'm2'], {}), '(N1, m2)\n', (2083, 2091), True, 'import numpy as np\n'), ((2108, 2120), 'numpy.empty', 'np.empty', (['N1'], {}), '(N1)\n', (2116, 2120), True, 'import numpy as np\n'), ((2303, 2319), 'numpy.empty', 'np.empty', (['(N1 + 1)'], {}), '(N1 + 1)\n', (2311, 2319), True, 'import numpy as np\n'), ((2403, 2415), 'numpy.empty', 'np.empty', (['N1'], {}), '(N1)\n', (2411, 2415), True, 'import numpy as np\n'), ((2721, 2753), 'gw_stripping.roche', 'gw_stripping.roche', (['q', 'rel', 'M', 'a'], {}), '(q, rel, M, a)\n', (2739, 2753), False, 'import gw_stripping\n'), ((2794, 2830), 'gw_stripping.radius_to_mass', 'gw_stripping.radius_to_mass', (['r2', 'eta'], {}), '(r2, eta)\n', (2821, 2830), False, 'import gw_stripping\n'), ((7444, 7459), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (7453, 7459), False, 'import fire\n'), ((4910, 4943), 'gw_stripping.roche', 'gw_stripping.roche', (['q2', 'rel', 'M', 'a'], {}), '(q2, rel, M, a)\n', (4928, 4943), False, 'import gw_stripping\n'), ((4992, 5028), 'gw_stripping.radius_to_mass', 'gw_stripping.radius_to_mass', (['r2', 'eta'], {}), '(r2, eta)\n', (5019, 5028), False, 'import gw_stripping\n'), ((1385, 1417), 'gw_stripping.roche', 'gw_stripping.roche', (['q', 'rel', 'M', 'a'], {}), '(q, rel, M, a)\n', (1403, 1417), False, 'import gw_stripping\n'), ((3204, 3237), 'gw_stripping.roche', 'gw_stripping.roche', (['q2', 'rel', 'M', 'a'], {}), '(q2, rel, M, a)\n', (3222, 3237), False, 'import gw_stripping\n'), ((3294, 3330), 'gw_stripping.radius_to_mass', 'gw_stripping.radius_to_mass', (['r2', 'eta'], {}), '(r2, eta)\n', (3321, 3330), False, 'import gw_stripping\n'), ((4217, 4255), 'math.sqrt', 'math.sqrt', (['(delta_x ** 2 + delta_q ** 2)'], {}), '(delta_x ** 2 + delta_q ** 2)\n', (4226, 4255), False, 'import math\n'), ((4276, 4304), 'math.sqrt', 'math.sqrt', (['(x2 ** 2 + q2 ** 2)'], {}), '(x2 ** 2 + q2 ** 2)\n', (4285, 4304), False, 'import math\n'), ((3382, 3395), 'math.sqrt', 'math.sqrt', (['x2'], {}), '(x2)\n', (3391, 3395), False, 'import math\n'), ((3672, 3685), 'math.sqrt', 'math.sqrt', (['x2'], {}), '(x2)\n', (3681, 3685), False, 'import math\n'), ((3430, 3443), 'math.sqrt', 'math.sqrt', (['x1'], {}), '(x1)\n', (3439, 3443), False, 'import math\n')] |
#%%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
import copy
exec(open('../../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
#%%
###############################################################################
# Dictionaries for latent variable models
###############################################################################
filename = os.path.join(os.path.realpath(dir_picklejar), 'observed_dict_selfreport')
infile = open(filename,'rb')
dict_selfreport = pickle.load(infile)
infile.close()
filename = os.path.join(os.path.realpath(dir_picklejar), 'observed_dict_random_ema')
infile = open(filename,'rb')
dict_random_ema = pickle.load(infile)
infile.close()
#%%
###############################################################################
# Create a data frame with records of start & end of day timestamps
# for each participant-day
###############################################################################
# output of this script is the data frame data_day_limits
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'tie_together', 'setup-day-limits.py')).read())
data_reference = data_day_limits.loc[:,['participant_id','study_day']].groupby('participant_id').count().reset_index()
data_reference = data_reference.rename(columns = {'study_day':'max_study_day'})
# SANITY CHECK
#data_reference['max_study_day'].value_counts() # this is equal to 14
#%%
###############################################################################
# Knit together various data streams
###############################################################################
all_participant_id = data_hq_episodes['id'].drop_duplicates()
all_participant_id.index = np.array(range(0,len(all_participant_id.index)))
all_dict = {}
# %%
for i in range(0, len(all_participant_id)):
current_participant = all_participant_id[i]
current_dict = {}
for j in range(1, 15):
this_study_day = j
# Lets work with selfeport first ##########################################
current_dict_selfreport = dict_selfreport[current_participant][j]
if len(current_dict_selfreport['hours_since_start_day'])==0:
tmp_selfreport = pd.DataFrame({})
else:
tmp_selfreport = pd.DataFrame({'assessment_type':'selfreport',
'hours_since_start_day': current_dict_selfreport['hours_since_start_day'],
'smoke': 'Yes',
'when_smoke': current_dict_selfreport['message']
})
# Now let's work with Random EMA ##########################################
current_dict_random_ema = dict_random_ema[current_participant][j]
if len(current_dict_random_ema['hours_since_start_day'])==0:
tmp_random_ema = pd.DataFrame({})
else:
tmp_random_ema = pd.DataFrame({'assessment_type':'random_ema',
'hours_since_start_day': current_dict_random_ema['hours_since_start_day'],
'smoke': current_dict_random_ema['smoke'],
'when_smoke': current_dict_random_ema['when_smoke']
})
# Now, let's concatanate ##################################################
frames = [tmp_selfreport, tmp_random_ema]
result = pd.concat(frames)
if len(result.index) > 0:
# important step to sort according to hours_since_start_day
result.sort_values(by=['hours_since_start_day'], inplace=True)
result['hours_since_start_day_shifted'] = result['hours_since_start_day'].shift(periods=+1)
result['hours_since_start_day_shifted'] = np.where(pd.isna(result['hours_since_start_day_shifted']), 0, result['hours_since_start_day_shifted'])
result['time_between'] = result['hours_since_start_day'] - result['hours_since_start_day_shifted']
which_not_duplicate = (result['time_between']!=0)
which_idx = np.where(which_not_duplicate)
result = result.iloc[which_idx]
# Combine information into a dictionary ###################################
new_dict = {this_study_day: result}
current_dict.update(new_dict)
# Update participant ########################################################
all_dict.update({current_participant:current_dict})
# %%
clean_data = copy.deepcopy(all_dict)
for participant in clean_data.keys():
for days in clean_data[participant].keys():
current_data = clean_data[participant][days]
if len(current_data.index)>0:
current_data['assessment_order'] = np.arange(len(current_data.index)) # begins with zero (not 1)
current_data = current_data.rename(columns = {'assessment_type':'assessment_type',
'assessment_order':'assessment_order',
'hours_since_start_day':'assessment_begin',
'hours_since_start_day_shifted':'assessment_begin_shifted',
'smoke':'smoke',
'when_smoke':'windowtag'})
clean_data[participant][days] = current_data
# %%
# Now, let's convert each PERSON-DAY of clean_data into a dictionary
for participant in clean_data.keys():
for days in clean_data[participant].keys():
current_data = clean_data[participant][days]
if len(current_data.index)>0:
current_dict = {'participant_id':participant,
'study_day': days,
'assessment_order': np.array(current_data['assessment_order']),
'assessment_type': np.array(current_data['assessment_type']),
'assessment_begin_shifted': np.array(current_data['assessment_begin_shifted']),
'assessment_begin': np.array(current_data['assessment_begin']),
'smoke': np.array(current_data['smoke']),
'windowtag': np.array(current_data['windowtag'])}
clean_data[participant][days] = current_dict
else:
current_dict = {'participant_id':participant,
'study_day': days,
'assessment_order': np.array([]),
'assessment_type': np.array([]),
'assessment_begin_shifted': np.array([]),
'assessment_begin': np.array([]),
'smoke': np.array([]),
'windowtag': np.array([])}
clean_data[participant][days] = current_dict
#%%
filename = os.path.join(os.path.realpath(dir_picklejar), 'observed_dict_all_ema')
outfile = open(filename, 'wb')
pickle.dump(clean_data, outfile)
outfile.close() | [
"pandas.DataFrame",
"copy.deepcopy",
"pickle.dump",
"os.path.realpath",
"pickle.load",
"numpy.where",
"numpy.array",
"pandas.isna",
"pandas.concat"
] | [((649, 668), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (660, 668), False, 'import pickle\n'), ((817, 836), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (828, 836), False, 'import pickle\n'), ((4501, 4524), 'copy.deepcopy', 'copy.deepcopy', (['all_dict'], {}), '(all_dict)\n', (4514, 4524), False, 'import copy\n'), ((7043, 7075), 'pickle.dump', 'pickle.dump', (['clean_data', 'outfile'], {}), '(clean_data, outfile)\n', (7054, 7075), False, 'import pickle\n'), ((541, 572), 'os.path.realpath', 'os.path.realpath', (['dir_picklejar'], {}), '(dir_picklejar)\n', (557, 572), False, 'import os\n'), ((709, 740), 'os.path.realpath', 'os.path.realpath', (['dir_picklejar'], {}), '(dir_picklejar)\n', (725, 740), False, 'import os\n'), ((6954, 6985), 'os.path.realpath', 'os.path.realpath', (['dir_picklejar'], {}), '(dir_picklejar)\n', (6970, 6985), False, 'import os\n'), ((3501, 3518), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (3510, 3518), True, 'import pandas as pd\n'), ((2327, 2343), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (2339, 2343), True, 'import pandas as pd\n'), ((2377, 2573), 'pandas.DataFrame', 'pd.DataFrame', (["{'assessment_type': 'selfreport', 'hours_since_start_day':\n current_dict_selfreport['hours_since_start_day'], 'smoke': 'Yes',\n 'when_smoke': current_dict_selfreport['message']}"], {}), "({'assessment_type': 'selfreport', 'hours_since_start_day':\n current_dict_selfreport['hours_since_start_day'], 'smoke': 'Yes',\n 'when_smoke': current_dict_selfreport['message']})\n", (2389, 2573), True, 'import pandas as pd\n'), ((2944, 2960), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (2956, 2960), True, 'import pandas as pd\n'), ((2994, 3225), 'pandas.DataFrame', 'pd.DataFrame', (["{'assessment_type': 'random_ema', 'hours_since_start_day':\n current_dict_random_ema['hours_since_start_day'], 'smoke':\n current_dict_random_ema['smoke'], 'when_smoke': current_dict_random_ema\n ['when_smoke']}"], {}), "({'assessment_type': 'random_ema', 'hours_since_start_day':\n current_dict_random_ema['hours_since_start_day'], 'smoke':\n current_dict_random_ema['smoke'], 'when_smoke': current_dict_random_ema\n ['when_smoke']})\n", (3006, 3225), True, 'import pandas as pd\n'), ((4115, 4144), 'numpy.where', 'np.where', (['which_not_duplicate'], {}), '(which_not_duplicate)\n', (4123, 4144), True, 'import numpy as np\n'), ((3841, 3889), 'pandas.isna', 'pd.isna', (["result['hours_since_start_day_shifted']"], {}), "(result['hours_since_start_day_shifted'])\n", (3848, 3889), True, 'import pandas as pd\n'), ((5849, 5891), 'numpy.array', 'np.array', (["current_data['assessment_order']"], {}), "(current_data['assessment_order'])\n", (5857, 5891), True, 'import numpy as np\n'), ((5940, 5981), 'numpy.array', 'np.array', (["current_data['assessment_type']"], {}), "(current_data['assessment_type'])\n", (5948, 5981), True, 'import numpy as np\n'), ((6039, 6089), 'numpy.array', 'np.array', (["current_data['assessment_begin_shifted']"], {}), "(current_data['assessment_begin_shifted'])\n", (6047, 6089), True, 'import numpy as np\n'), ((6139, 6181), 'numpy.array', 'np.array', (["current_data['assessment_begin']"], {}), "(current_data['assessment_begin'])\n", (6147, 6181), True, 'import numpy as np\n'), ((6220, 6251), 'numpy.array', 'np.array', (["current_data['smoke']"], {}), "(current_data['smoke'])\n", (6228, 6251), True, 'import numpy as np\n'), ((6294, 6329), 'numpy.array', 'np.array', (["current_data['windowtag']"], {}), "(current_data['windowtag'])\n", (6302, 6329), True, 'import numpy as np\n'), ((6555, 6567), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6563, 6567), True, 'import numpy as np\n'), ((6616, 6628), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6624, 6628), True, 'import numpy as np\n'), ((6686, 6698), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6694, 6698), True, 'import numpy as np\n'), ((6748, 6760), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6756, 6760), True, 'import numpy as np\n'), ((6799, 6811), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6807, 6811), True, 'import numpy as np\n'), ((6854, 6866), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6862, 6866), True, 'import numpy as np\n'), ((1194, 1228), 'os.path.realpath', 'os.path.realpath', (['dir_code_methods'], {}), '(dir_code_methods)\n', (1210, 1228), False, 'import os\n')] |
import cv2
import numpy as np
from .TFLiteFaceDetection import UltraLightFaceDetecion
from .TFLiteFaceAlignment import DenseFaceReconstruction, DepthFacialLandmarks
from .CtypesMeshRender import TrianglesMeshRender
def rotationMatrixToEulerAngles(R):
'''
Ref: https://stackoverflow.com/a/15029416
'''
sy = np.sqrt(R[0, 0] ** 2 + R[1, 0] ** 2)
if sy < 1e-6:
x = np.arctan2(-R[1, 2], R[1, 1])
y = np.arctan2(-R[2, 0], sy)
z = 0
else:
x = np.arctan2(R[2, 1], R[2, 2])
y = np.arctan2(-R[2, 0], sy)
z = np.arctan2(R[1, 0], R[0, 0])
return np.degrees([x, y, z])
def build_projection_matrix(rear_size, factor=np.sqrt(2)):
rear_depth = 0
front_size = front_depth = factor * rear_size
projections = np.array([
[-rear_size, -rear_size, rear_depth],
[-rear_size, rear_size, rear_depth],
[rear_size, rear_size, rear_depth],
[rear_size, -rear_size, rear_depth],
[-front_size, -front_size, front_depth],
[-front_size, front_size, front_depth],
[front_size, front_size, front_depth],
[front_size, -front_size, front_depth],
], dtype=np.float32)
return projections
def draw_projection(frame, R, landmarks, color, thickness=2):
# build projection matrix
radius = np.max(np.max(landmarks, 0) - np.min(landmarks, 0)) // 2
projections = build_projection_matrix(radius)
# refine rotate matrix
rotate_matrix = R[:, :2]
rotate_matrix[:, 1] *= -1
# 3D -> 2D
center = np.mean(landmarks[:27], axis=0)
points = projections @ rotate_matrix + center
points = points.astype(np.int32)
# draw poly
cv2.polylines(frame, np.take(points, [
[0, 1], [1, 2], [2, 3], [3, 0],
[0, 4], [1, 5], [2, 6], [3, 7],
[4, 5], [5, 6], [6, 7], [7, 4]
], axis=0), False, color, thickness, cv2.LINE_AA)
def draw_poly(frame, landmarks, color=(128, 255, 255), thickness=1):
cv2.polylines(frame, [
landmarks[:17],
landmarks[17:22],
landmarks[22:27],
landmarks[27:31],
landmarks[31:36]
], False, color, thickness=thickness)
cv2.polylines(frame, [
landmarks[36:42],
landmarks[42:48],
landmarks[48:60],
landmarks[60:]
], True, color, thickness=thickness)
def sparse(frame, results, color):
landmarks = np.round(results[0]).astype(np.int)
for p in landmarks:
cv2.circle(frame, tuple(p), 2, color, 0, cv2.LINE_AA)
draw_poly(frame, landmarks, color=color)
def dense(frame, results, color):
landmarks = np.round(results[0]).astype(np.int)
for p in landmarks[::6, :2]:
cv2.circle(frame, tuple(p), 1, color, 0, cv2.LINE_AA)
def mesh(frame, results, color):
landmarks = results[0].astype(np.float32)
color.render(landmarks.copy(), frame)
def pose(frame, results, color):
landmarks, params = results
# rotate matrix
R = params[:3, :3].copy()
# decompose matrix to ruler angle
euler = rotationMatrixToEulerAngles(R)
#print(f"Pitch: {euler[0]}; Yaw: {euler[1]}; Roll: {euler[2]};")
draw_projection(frame, R, landmarks, color)
return euler
| [
"numpy.arctan2",
"cv2.polylines",
"numpy.degrees",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.take",
"numpy.min",
"numpy.round",
"numpy.sqrt"
] | [((325, 361), 'numpy.sqrt', 'np.sqrt', (['(R[0, 0] ** 2 + R[1, 0] ** 2)'], {}), '(R[0, 0] ** 2 + R[1, 0] ** 2)\n', (332, 361), True, 'import numpy as np\n'), ((615, 636), 'numpy.degrees', 'np.degrees', (['[x, y, z]'], {}), '([x, y, z])\n', (625, 636), True, 'import numpy as np\n'), ((685, 695), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (692, 695), True, 'import numpy as np\n'), ((786, 1139), 'numpy.array', 'np.array', (['[[-rear_size, -rear_size, rear_depth], [-rear_size, rear_size, rear_depth],\n [rear_size, rear_size, rear_depth], [rear_size, -rear_size, rear_depth],\n [-front_size, -front_size, front_depth], [-front_size, front_size,\n front_depth], [front_size, front_size, front_depth], [front_size, -\n front_size, front_depth]]'], {'dtype': 'np.float32'}), '([[-rear_size, -rear_size, rear_depth], [-rear_size, rear_size,\n rear_depth], [rear_size, rear_size, rear_depth], [rear_size, -rear_size,\n rear_depth], [-front_size, -front_size, front_depth], [-front_size,\n front_size, front_depth], [front_size, front_size, front_depth], [\n front_size, -front_size, front_depth]], dtype=np.float32)\n', (794, 1139), True, 'import numpy as np\n'), ((1548, 1579), 'numpy.mean', 'np.mean', (['landmarks[:27]'], {'axis': '(0)'}), '(landmarks[:27], axis=0)\n', (1555, 1579), True, 'import numpy as np\n'), ((1975, 2124), 'cv2.polylines', 'cv2.polylines', (['frame', '[landmarks[:17], landmarks[17:22], landmarks[22:27], landmarks[27:31],\n landmarks[31:36]]', '(False)', 'color'], {'thickness': 'thickness'}), '(frame, [landmarks[:17], landmarks[17:22], landmarks[22:27],\n landmarks[27:31], landmarks[31:36]], False, color, thickness=thickness)\n', (1988, 2124), False, 'import cv2\n'), ((2171, 2301), 'cv2.polylines', 'cv2.polylines', (['frame', '[landmarks[36:42], landmarks[42:48], landmarks[48:60], landmarks[60:]]', '(True)', 'color'], {'thickness': 'thickness'}), '(frame, [landmarks[36:42], landmarks[42:48], landmarks[48:60],\n landmarks[60:]], True, color, thickness=thickness)\n', (2184, 2301), False, 'import cv2\n'), ((393, 422), 'numpy.arctan2', 'np.arctan2', (['(-R[1, 2])', 'R[1, 1]'], {}), '(-R[1, 2], R[1, 1])\n', (403, 422), True, 'import numpy as np\n'), ((435, 459), 'numpy.arctan2', 'np.arctan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (445, 459), True, 'import numpy as np\n'), ((496, 524), 'numpy.arctan2', 'np.arctan2', (['R[2, 1]', 'R[2, 2]'], {}), '(R[2, 1], R[2, 2])\n', (506, 524), True, 'import numpy as np\n'), ((537, 561), 'numpy.arctan2', 'np.arctan2', (['(-R[2, 0])', 'sy'], {}), '(-R[2, 0], sy)\n', (547, 561), True, 'import numpy as np\n'), ((574, 602), 'numpy.arctan2', 'np.arctan2', (['R[1, 0]', 'R[0, 0]'], {}), '(R[1, 0], R[0, 0])\n', (584, 602), True, 'import numpy as np\n'), ((1709, 1834), 'numpy.take', 'np.take', (['points', '[[0, 1], [1, 2], [2, 3], [3, 0], [0, 4], [1, 5], [2, 6], [3, 7], [4, 5], [5,\n 6], [6, 7], [7, 4]]'], {'axis': '(0)'}), '(points, [[0, 1], [1, 2], [2, 3], [3, 0], [0, 4], [1, 5], [2, 6], [3,\n 7], [4, 5], [5, 6], [6, 7], [7, 4]], axis=0)\n', (1716, 1834), True, 'import numpy as np\n'), ((2389, 2409), 'numpy.round', 'np.round', (['results[0]'], {}), '(results[0])\n', (2397, 2409), True, 'import numpy as np\n'), ((2608, 2628), 'numpy.round', 'np.round', (['results[0]'], {}), '(results[0])\n', (2616, 2628), True, 'import numpy as np\n'), ((1332, 1352), 'numpy.max', 'np.max', (['landmarks', '(0)'], {}), '(landmarks, 0)\n', (1338, 1352), True, 'import numpy as np\n'), ((1355, 1375), 'numpy.min', 'np.min', (['landmarks', '(0)'], {}), '(landmarks, 0)\n', (1361, 1375), True, 'import numpy as np\n')] |
import numpy as np
from word2vec_np.utils.activations import sigmoid
def cost_ns(U):
# Get the the batch size.
m = U.shape[0]
# The first column of U corresponds to true labels, the rest are for negative samples
cost = np.sum(-np.log(sigmoid(U[:, 0])) - np.sum(np.log(sigmoid(-U[:, 1:])), axis=1), axis=0)
return cost / m
def cost_sm(U, YT):
# Get the the batch size.
m = U.shape[1]
cost = np.sum(np.log(np.sum(np.exp(U), axis=0)) - np.sum(np.multiply(YT, U), axis=0))
return cost / m
| [
"numpy.multiply",
"numpy.exp",
"word2vec_np.utils.activations.sigmoid"
] | [((491, 509), 'numpy.multiply', 'np.multiply', (['YT', 'U'], {}), '(YT, U)\n', (502, 509), True, 'import numpy as np\n'), ((260, 276), 'word2vec_np.utils.activations.sigmoid', 'sigmoid', (['U[:, 0]'], {}), '(U[:, 0])\n', (267, 276), False, 'from word2vec_np.utils.activations import sigmoid\n'), ((294, 312), 'word2vec_np.utils.activations.sigmoid', 'sigmoid', (['(-U[:, 1:])'], {}), '(-U[:, 1:])\n', (301, 312), False, 'from word2vec_np.utils.activations import sigmoid\n'), ((462, 471), 'numpy.exp', 'np.exp', (['U'], {}), '(U)\n', (468, 471), True, 'import numpy as np\n')] |
# Author: <NAME>
# Email: <EMAIL>
"""A script to train a deepSpeech model on LibriSpeech data using multiple GPUs
with synchronous updates (data parallel training).
References:
1. Hannun, Awni, et al. "Deep speech: Scaling up end-to-end
speech recognition." arXiv preprint arXiv:1412.5567 (2014).
2. Amodei, Dario, et al. "Deep speech 2: End-to-end
speech recognition in english and mandarin."
arXiv preprint arXiv:1512.02595 (2015).
Accuracy:
deepSpeech_multi_gpu_train.py achieves 15% CER on LibriSpeech data
after 30k steps (~100 epochs of data) as judged by deepSpeech_test.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Loss
--------------------------------------------------------------------
3 TitanX Pascal | 0.25-1.45 | < 20 at 20K steps (32 hours)
Usage:
Please see the tutorial and website for how to download the LibriSpeech
data set and train the model.
http://github.com/fordspeech/deepSpeech
"""
from datetime import datetime
import os.path
import re
import time
import argparse
import json
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
import deepSpeech
import helper_routines
def parse_args():
" Parses command line arguments."
num_gpus = len([x for x in device_lib.list_local_devices()
if x.device_type == "GPU"])
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', type=str,
default='../models/librispeech/train',
help='Directory to write event logs and checkpoints')
parser.add_argument('--data_dir', type=str,
default='../data/librispeech/processed/',
help='Path to the audio data directory')
parser.add_argument('--max_steps', type=int, default=20000,
help='Number of batches to run')
parser.add_argument('--num_gpus', type=int, default=num_gpus,
help='How many GPUs to use')
parser.add_argument('--log_device_placement', type=bool, default=False,
help='Whether to log device placement')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of inputs to process in a batch per GPU')
parser.add_argument('--temporal_stride', type=int, default=2,
help='Stride along time')
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--shuffle', dest='shuffle',
action='store_true')
feature_parser.add_argument('--no-shuffle', dest='shuffle',
action='store_false')
parser.set_defaults(shuffle=True)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--use_fp16', dest='use_fp16',
action='store_true')
feature_parser.add_argument('--use_fp32', dest='use_fp16',
action='store_false')
parser.set_defaults(use_fp16=False)
parser.add_argument('--keep_prob', type=float, default=0.5,
help='Keep probability for dropout')
parser.add_argument('--num_hidden', type=int, default=1024,
help='Number of hidden nodes')
parser.add_argument('--num_rnn_layers', type=int, default=2,
help='Number of recurrent layers')
parser.add_argument('--checkpoint', type=str, default=None,
help='Continue training from checkpoint file')
parser.add_argument('--rnn_type', type=str, default='uni-dir',
help='uni-dir or bi-dir')
parser.add_argument('--initial_lr', type=float, default=0.00001,
help='Initial learning rate for training')
parser.add_argument('--num_filters', type=int, default=64,
help='Number of convolutional filters')
parser.add_argument('--moving_avg_decay', type=float, default=0.9999,
help='Decay to use for the moving average of weights')
parser.add_argument('--num_epochs_per_decay', type=int, default=5,
help='Epochs after which learning rate decays')
parser.add_argument('--lr_decay_factor', type=float, default=0.9,
help='Learning rate decay factor')
args = parser.parse_args()
# Read architecture hyper-parameters from checkpoint file
# if one is provided.
if args.checkpoint is not None:
param_file = args.checkpoint + '/deepSpeech_parameters.json'
with open(param_file, 'r') as file:
params = json.load(file)
# Read network architecture parameters from previously saved
# parameter file.
args.num_hidden = params['num_hidden']
args.num_rnn_layers = params['num_rnn_layers']
args.rnn_type = params['rnn_type']
args.num_filters = params['num_filters']
args.use_fp16 = params['use_fp16']
args.temporal_stride = params['temporal_stride']
args.initial_lr = params['initial_lr']
args.num_gpus = params['num_gpus']
return args
def tower_loss(scope, feats, labels, seq_lens):
"""Calculate the total loss on a single tower running the deepSpeech model.
This function builds the graph for computing the loss per tower(GPU).
ARGS:
scope: unique prefix string identifying the
deepSpeech tower, e.g. 'tower_0'
feats: Tensor of shape BxFxT representing the
audio features (mfccs or spectrogram).
labels: sparse tensor holding labels of each utterance.
seq_lens: tensor of shape [batch_size] holding
the sequence length per input utterance.
Returns:
Tensor of shape [batch_size] containing
the total loss for a batch of data
"""
# Build inference Graph.
logits = deepSpeech.inference(feats, seq_lens, ARGS)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
strided_seq_lens = tf.div(seq_lens, ARGS.temporal_stride)
_ = deepSpeech.loss(logits, labels, strided_seq_lens)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
#loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
#loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss;
# do the same for the averaged version of the losses.
for loss in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a
# multi-GPU training session. This helps the clarity
# of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % helper_routines.TOWER_NAME, '',
loss.op.name)
# Name each loss as '(raw)' and name the moving average
# version of the loss as the original loss name.
tf.summary.scalar(loss_name + '(raw)', loss)
#tf.summary.scalar(loss_name, loss_averages.average(loss))
# Without this loss_averages_op would never run
#with tf.control_dependencies([loss_averages_op]):
#total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the
gradient has been averaged across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for each_grad, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(each_grad, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0,values=grads)
grad = tf.reduce_mean(grad, 0)
# The variables are redundant because they are shared
# across towers. So we will just return the first tower's pointer to
# the Variable.
weights = grad_and_vars[0][1]
grad_and_var = (grad, weights)
average_grads.append(grad_and_var)
return average_grads
def set_learning_rate():
""" Set up learning rate schedule """
# Create a variable to count the number of train() calls.
# This equals the number of batches processed * ARGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (deepSpeech.NUM_PER_EPOCH_FOR_TRAIN /
ARGS.batch_size)
decay_steps = int(num_batches_per_epoch * ARGS.num_epochs_per_decay)
# Decay the learning rate exponentially based on the number of steps.
learning_rate = tf.train.exponential_decay(
ARGS.initial_lr,
global_step,
decay_steps,
ARGS.lr_decay_factor,
staircase=True)
return learning_rate, global_step
def fetch_data():
""" Fetch features, labels and sequence_lengths from a common queue."""
tot_batch_size = ARGS.batch_size * ARGS.num_gpus
feats, labels, seq_lens = deepSpeech.inputs(eval_data='train',
data_dir=ARGS.data_dir,
batch_size=tot_batch_size,
use_fp16=ARGS.use_fp16,
shuffle=ARGS.shuffle)
# Split features and labels and sequence lengths for each tower
split_feats = tf.split(feats, ARGS.num_gpus, 0)
split_labels = tf.sparse_split(sp_input = labels, num_split = ARGS.num_gpus, axis= 0)
split_seq_lens = tf.split(seq_lens, ARGS.num_gpus, 0)
return split_feats, split_labels, split_seq_lens
def get_loss_grads(data, optimizer):
""" Set up loss and gradient ops.
Add summaries to trainable variables """
# Calculate the gradients for each model tower.
[feats, labels, seq_lens] = data
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(ARGS.num_gpus):
with tf.device('/gpu:%d' % i):
name_scope = '%s_%d' % (helper_routines.TOWER_NAME, i)
with tf.name_scope(name_scope) as scope:
# Calculate the loss for one tower of the deepSpeech model.
# This function constructs the entire deepSpeech model
# but shares the variables across all towers.
loss = tower_loss(scope, feats[i], labels[i], seq_lens[i])
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of
# data on this tower.
grads_and_vars = optimizer.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads_and_vars)
return loss, tower_grads, summaries
def run_train_loop(sess, operations, saver):
""" Train the model for required number of steps."""
(train_op, loss_op, summary_op) = operations
summary_writer = tf.summary.FileWriter(ARGS.train_dir, sess.graph)
# Evaluate the ops for max_steps
for step in range(ARGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss_op])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
# Print progress periodically.
if step % 10 == 0:
examples_per_sec = (ARGS.batch_size * ARGS.num_gpus) / duration
format_str = ('%s: step %d, '
'loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration / ARGS.num_gpus))
# Run the summary ops periodically.
if step % 50 == 0:
summary_writer.add_summary(sess.run(summary_op), step)
# Save the model checkpoint periodically.
if step % 100 == 0 or (step + 1) == ARGS.max_steps:
checkpoint_path = os.path.join(ARGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def initialize_from_checkpoint(sess, saver):
""" Initialize variables on the graph"""
# Initialise variables from a checkpoint file, if provided.
ckpt = tf.train.get_checkpoint_state(ARGS.checkpoint)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/train/model.ckpt-0,
# extract global_step from it.
checkpoint_path = ckpt.model_checkpoint_path
global_step = checkpoint_path.split('/')[-1].split('-')[-1]
return global_step
else:
print('No checkpoint file found')
return
def add_summaries(summaries, learning_rate, grads):
""" Add summary ops"""
# Track quantities for Tensorboard display
summaries.append(tf.summary.scalar('learning_rate', learning_rate))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.summary.histogram(var.op.name +
'/gradients', grad))
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
return summary_op
def train():
"""Train deepSpeech for a number of steps.
This function build a set of ops required to build the model and optimize
weights.
"""
with tf.Graph().as_default(), tf.device('/cpu'):
# Learning rate set up
learning_rate, global_step = set_learning_rate()
# Create an optimizer that performs gradient descent.
optimizer = tf.train.AdamOptimizer(learning_rate)
# Fetch a batch worth of data for each tower
data = fetch_data()
# Construct loss and gradient ops
loss_op, tower_grads, summaries = get_loss_grads(data, optimizer)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = optimizer.apply_gradients(grads,
global_step=global_step)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
ARGS.moving_avg_decay, global_step)
variables_averages_op = variable_averages.apply(
tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Build summary op
summary_op = add_summaries(summaries, learning_rate, grads)
# Create a saver.
saver = tf.train.Saver(tf.all_variables(), max_to_keep=100)
# Start running operations on the Graph. allow_soft_placement
# must be set to True to build towers on GPU, as some of the
# ops do not have GPU implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=ARGS.log_device_placement))
# Initialize vars.
if ARGS.checkpoint is not None:
global_step = initialize_from_checkpoint(sess, saver)
else:
sess.run(tf.initialize_all_variables())
# Start the queue runners.
tf.train.start_queue_runners(sess)
# Run training loop
run_train_loop(sess, (train_op, loss_op, summary_op), saver)
def main():
"""
Creates checkpoint directory to save training progress and records
training parameters in a json file before initiating the training session.
"""
if ARGS.train_dir != ARGS.checkpoint:
if tf.gfile.Exists(ARGS.train_dir):
tf.gfile.DeleteRecursively(ARGS.train_dir)
tf.gfile.MakeDirs(ARGS.train_dir)
# Dump command line arguments to a parameter file,
# in-case the network training resumes at a later time.
with open(os.path.join(ARGS.train_dir,
'deepSpeech_parameters.json'), 'w') as outfile:
json.dump(vars(ARGS), outfile, sort_keys=True, indent=4)
train()
if __name__ == '__main__':
ARGS = parse_args()
main()
| [
"tensorflow.gfile.Exists",
"argparse.ArgumentParser",
"deepSpeech.loss",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.constant_initializer",
"tensorflow.get_variable_scope",
"numpy.isnan",
"tensorflow.ConfigProto",
"tensorflow.sparse_split",
"tensorflow.summary.merg... | [((1384, 1409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1407, 1409), False, 'import argparse\n'), ((5997, 6040), 'deepSpeech.inference', 'deepSpeech.inference', (['feats', 'seq_lens', 'ARGS'], {}), '(feats, seq_lens, ARGS)\n', (6017, 6040), False, 'import deepSpeech\n'), ((6205, 6243), 'tensorflow.div', 'tf.div', (['seq_lens', 'ARGS.temporal_stride'], {}), '(seq_lens, ARGS.temporal_stride)\n', (6211, 6243), True, 'import tensorflow as tf\n'), ((6252, 6301), 'deepSpeech.loss', 'deepSpeech.loss', (['logits', 'labels', 'strided_seq_lens'], {}), '(logits, labels, strided_seq_lens)\n', (6267, 6301), False, 'import deepSpeech\n'), ((6377, 6411), 'tensorflow.get_collection', 'tf.get_collection', (['"""losses"""', 'scope'], {}), "('losses', scope)\n", (6394, 6411), True, 'import tensorflow as tf\n'), ((6484, 6519), 'tensorflow.add_n', 'tf.add_n', (['losses'], {'name': '"""total_loss"""'}), "(losses, name='total_loss')\n", (6492, 6519), True, 'import tensorflow as tf\n'), ((9737, 9849), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['ARGS.initial_lr', 'global_step', 'decay_steps', 'ARGS.lr_decay_factor'], {'staircase': '(True)'}), '(ARGS.initial_lr, global_step, decay_steps, ARGS.\n lr_decay_factor, staircase=True)\n', (9763, 9849), True, 'import tensorflow as tf\n'), ((10105, 10243), 'deepSpeech.inputs', 'deepSpeech.inputs', ([], {'eval_data': '"""train"""', 'data_dir': 'ARGS.data_dir', 'batch_size': 'tot_batch_size', 'use_fp16': 'ARGS.use_fp16', 'shuffle': 'ARGS.shuffle'}), "(eval_data='train', data_dir=ARGS.data_dir, batch_size=\n tot_batch_size, use_fp16=ARGS.use_fp16, shuffle=ARGS.shuffle)\n", (10122, 10243), False, 'import deepSpeech\n'), ((10518, 10551), 'tensorflow.split', 'tf.split', (['feats', 'ARGS.num_gpus', '(0)'], {}), '(feats, ARGS.num_gpus, 0)\n', (10526, 10551), True, 'import tensorflow as tf\n'), ((10571, 10636), 'tensorflow.sparse_split', 'tf.sparse_split', ([], {'sp_input': 'labels', 'num_split': 'ARGS.num_gpus', 'axis': '(0)'}), '(sp_input=labels, num_split=ARGS.num_gpus, axis=0)\n', (10586, 10636), True, 'import tensorflow as tf\n'), ((10663, 10699), 'tensorflow.split', 'tf.split', (['seq_lens', 'ARGS.num_gpus', '(0)'], {}), '(seq_lens, ARGS.num_gpus, 0)\n', (10671, 10699), True, 'import tensorflow as tf\n'), ((12335, 12384), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['ARGS.train_dir', 'sess.graph'], {}), '(ARGS.train_dir, sess.graph)\n', (12356, 12384), True, 'import tensorflow as tf\n'), ((13655, 13701), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['ARGS.checkpoint'], {}), '(ARGS.checkpoint)\n', (13684, 13701), True, 'import tensorflow as tf\n'), ((14699, 14723), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (14721, 14723), True, 'import tensorflow as tf\n'), ((14873, 14900), 'tensorflow.summary.merge', 'tf.summary.merge', (['summaries'], {}), '(summaries)\n', (14889, 14900), True, 'import tensorflow as tf\n'), ((7098, 7165), 're.sub', 're.sub', (["('%s_[0-9]*/' % helper_routines.TOWER_NAME)", '""""""', 'loss.op.name'], {}), "('%s_[0-9]*/' % helper_routines.TOWER_NAME, '', loss.op.name)\n", (7104, 7165), False, 'import re\n'), ((7322, 7366), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(loss_name + '(raw)')", 'loss'], {}), "(loss_name + '(raw)', loss)\n", (7339, 7366), True, 'import tensorflow as tf\n'), ((8707, 8738), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (8716, 8738), True, 'import tensorflow as tf\n'), ((8753, 8776), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (8767, 8776), True, 'import tensorflow as tf\n'), ((12483, 12494), 'time.time', 'time.time', ([], {}), '()\n', (12492, 12494), False, 'import time\n'), ((14354, 14403), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (14371, 14403), True, 'import tensorflow as tf\n'), ((15119, 15136), 'tensorflow.device', 'tf.device', (['"""/cpu"""'], {}), "('/cpu')\n", (15128, 15136), True, 'import tensorflow as tf\n'), ((15310, 15347), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (15332, 15347), True, 'import tensorflow as tf\n'), ((16019, 16088), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['ARGS.moving_avg_decay', 'global_step'], {}), '(ARGS.moving_avg_decay, global_step)\n', (16052, 16088), True, 'import tensorflow as tf\n'), ((16272, 16322), 'tensorflow.group', 'tf.group', (['apply_gradient_op', 'variables_averages_op'], {}), '(apply_gradient_op, variables_averages_op)\n', (16280, 16322), True, 'import tensorflow as tf\n'), ((17094, 17128), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), '(sess)\n', (17122, 17128), True, 'import tensorflow as tf\n'), ((17460, 17491), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['ARGS.train_dir'], {}), '(ARGS.train_dir)\n', (17475, 17491), True, 'import tensorflow as tf\n'), ((17556, 17589), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['ARGS.train_dir'], {}), '(ARGS.train_dir)\n', (17573, 17589), True, 'import tensorflow as tf\n'), ((4707, 4722), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4716, 4722), False, 'import json\n'), ((8500, 8528), 'tensorflow.expand_dims', 'tf.expand_dims', (['each_grad', '(0)'], {}), '(each_grad, 0)\n', (8514, 8528), True, 'import tensorflow as tf\n'), ((9367, 9393), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (9390, 9393), True, 'import tensorflow as tf\n'), ((11014, 11037), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (11035, 11037), True, 'import tensorflow as tf\n'), ((12568, 12579), 'time.time', 'time.time', ([], {}), '()\n', (12577, 12579), False, 'import time\n'), ((12612, 12632), 'numpy.isnan', 'np.isnan', (['loss_value'], {}), '(loss_value)\n', (12620, 12632), True, 'import numpy as np\n'), ((14750, 14788), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.op.name', 'var'], {}), '(var.op.name, var)\n', (14770, 14788), True, 'import tensorflow as tf\n'), ((16171, 16195), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (16193, 16195), True, 'import tensorflow as tf\n'), ((16477, 16495), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (16493, 16495), True, 'import tensorflow as tf\n'), ((17505, 17547), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['ARGS.train_dir'], {}), '(ARGS.train_dir)\n', (17531, 17547), True, 'import tensorflow as tf\n'), ((1291, 1322), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (1320, 1322), False, 'from tensorflow.python.client import device_lib\n'), ((11096, 11120), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % i)"], {}), "('/gpu:%d' % i)\n", (11105, 11120), True, 'import tensorflow as tf\n'), ((14544, 14598), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["(var.op.name + '/gradients')", 'grad'], {}), "(var.op.name + '/gradients', grad)\n", (14564, 14598), True, 'import tensorflow as tf\n'), ((15094, 15104), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (15102, 15104), True, 'import tensorflow as tf\n'), ((16734, 16828), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': 'ARGS.log_device_placement'}), '(allow_soft_placement=True, log_device_placement=ARGS.\n log_device_placement)\n', (16748, 16828), True, 'import tensorflow as tf\n'), ((17019, 17048), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (17046, 17048), True, 'import tensorflow as tf\n'), ((11214, 11239), 'tensorflow.name_scope', 'tf.name_scope', (['name_scope'], {}), '(name_scope)\n', (11227, 11239), True, 'import tensorflow as tf\n'), ((11769, 11817), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES', 'scope'], {}), '(tf.GraphKeys.SUMMARIES, scope)\n', (11786, 11817), True, 'import tensorflow as tf\n'), ((12990, 13004), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13002, 13004), False, 'from datetime import datetime\n'), ((11629, 11652), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (11650, 11652), True, 'import tensorflow as tf\n')] |
import os
import sys
import numpy as np
from binder import GroupByLength
from normalize import RecordNormalizedData, Normalize
from verify import VerifyMatching
from utils import ArrayToString, IntArrayToString, ReverseComplement, LeastGreatestMultiple, ReadMatrixFromFile
from plot import PlotSettings, Plot
from startup import DisplayLogoLicense, CheckDependencies
class RNAPostProcessor():
"""
Contains all the information required for post processing the output of the sRNA profiler.
"""
def __init__(self):
# Basic details
self.poolfname = None
self.genefname = None
self.tolerance = 0
self.file_skip = 0
self.is_circular = 0
self.poolsize = 0
self.max_nuc_length = 0
self.nSequencesByLengths = None
self.scaled_forward = None
self.scaled_reverse = None
self.lengths = None
return None
def load_output(self):
# Set the filenames for the matching data and load data
# Matching data.
self.forwMatchDataFile = ("./../data/output/forward_matchings_%s_%s_tol%d.txt" % (self.poolfname[:-4], self.genefname[:-4], self.tolerance))
self.nForw = ReadMatrixFromFile(self.forwMatchDataFile, dataType = 'f')
self.gene_length = self.nForw.shape[1] - 1
self.revMatchDataFile = ("./../data/output/reverse_matchings_%s_%s_tol%d.txt" % (self.poolfname[:-4], self.genefname[:-4], self.tolerance))
self.nRev = ReadMatrixFromFile(self.revMatchDataFile, dataType = 'f')
# Normalized matching data.
self.normalized_forward = None
self.norm_ForwDataFile = ("./../data/output/norm_forward_matchings_%s_%s_tol%d.txt" % (self.poolfname[:-4], self.genefname[:-4], self.tolerance))
self.normalized_reverse = None
self.norm_RevDataFile = ("./../data/output/norm_reverse_matchings_%s_%s_tol%d.txt" % (self.poolfname[:-4], self.genefname[:-4], self.tolerance))
return None
def Usage():
# Print the usage.
print("\033[2m./analyze.py <input file>\033[0m")
print("\033[2mwhere the input file should be in data/input/ and the same format as for vbind.sh.\033[0m")
return None
def IdentifyInstance(rnap, input_file, line_number):
# Extract the parameters from an input file that specify an instance for post-processing.
with open(input_file, "r") as fp:
lno = 1
for line in fp:
if (line[0] != "#"):
if (lno == line_number):
line = list(map(lambda ln: ln.strip("\n").strip(" "), line.split(" ")))
rnap.genefname = line[0]
rnap.poolfname = line[1]
rnap.file_skip = int(line[2])
rnap.tolerance = int(line[3])
rnap.is_circular = int(line[4])
# ncores = int(line[5])
break
lno = lno + 1
rnap.load_output()
return rnap
def ScaleMatchings(plobj, forward, reverse):
# If there is a matching at position i, for length l nucleotide,
# then we want to set: y[i + j] = max(y[i + j], y[i]), for all 0 < j < l.
# print("plobj.gene_length = {}\nforward: shape = {}\n{}\nreverse: shape = {}\n{}".format(plobj.gene_length, forward.shape, forward, reverse.shape, reverse))
plobj.scaled_forward = np.zeros_like(forward)
plobj.scaled_reverse = np.zeros_like(reverse)
for l in range(len(plobj.lengths)):
for i in range(plobj.gene_length):
if (np.abs(forward[l, i]) > 0):
for j in range(sum(plobj.lengths[l])):
plobj.scaled_forward[l, (i + j) % plobj.gene_length] = max(forward[l, i], plobj.scaled_forward[l, (i + j) % plobj.gene_length])
if (np.abs(reverse[l, i]) > 0):
for j in range(sum(plobj.lengths[l])):
plobj.scaled_reverse[l, (i + j) % plobj.gene_length] = min(reverse[l, i], plobj.scaled_reverse[l, (i + j) % plobj.gene_length])
return None
def Load(plobj):
# Load the data required for plotting from a file
plobj.forwardMatchData = ReadMatrixFromFile(plobj.forwMatchDataFile)
plobj.reverseMatchData = ReadMatrixFromFile(plobj.revMatchDataFile)
return None
def GatherMatchingData(rnap, forward, reverse):
# Compute the number of matchings per each length set.
rnap.gathered_forward = np.zeros((len(rnap.lengths), forward.shape[1] - 1), dtype = np.int)
rnap.gathered_reverse = np.zeros((len(rnap.lengths), reverse.shape[1] - 1), dtype = np.int)
for s in range(len(rnap.lengths)):
for d in range(2):
if (d == 0):
nuclens = np.where(np.in1d(forward[:, 0], rnap.lengths[s]))[0]
rnap.gathered_forward[s, :] = np.sum(forward[nuclens, 1:], axis = 0)
else:
nuclens = np.where(np.in1d(reverse[:, 0], rnap.lengths[s]))[0]
rnap.gathered_reverse[s, :] = (-1) * np.sum(reverse[nuclens, 1:], axis = 0)
return None
def SummarizeForwardMatching(dset):
# List all the forward matching sequences
# If the forward matching array element, F[i][j] = x, then we need the gene-substring gene[i:(i + lengths[i])] and x.
with open("./../data/input/%s" % (dset.genefname), "r") as gf:
gene_seq = gf.readline().strip(" ").strip("\n")
forward_matches_log = "./../data/output/explicit_forward_%s_%s_%d.txt" % (dset.genefname, dset.poolfname, dset.tolerance)
topology = ["linear", "circular"][dset.is_circular]
with open(forward_matches_log, "w") as fl:
fl.write("Forward matchings\n\n")
fl.write("Gene: %s\n" % (dset.genefname))
fl.write("Pool: %s\n" % (dset.poolfname))
fl.write("Topology: %s\n" % (topology))
fl.write("Mismatches: %d\n" % (dset.tolerance))
fl.write("*************************\n\n")
for l in range(dset.nForw.shape[0]):
nuc_len = int(dset.nForw[l, 0])
gene_indices, = np.nonzero(dset.nForw[l, 1:])
match_freq = dset.nForw[l, 1 + gene_indices].astype(np.int)
if (gene_indices.shape[0] > 0):
fl.write("Length: %d\n" % (nuc_len))
fl.write("{:^12} | {:^8}\n".format("Sequence", "Frequency"))
fl.write("-------------------------\n")
for s in range(gene_indices.shape[0]):
gene_subseq = [gene_seq[(gene_indices[s] + g) % len(gene_seq)] for g in range(nuc_len)]
fl.write("{:^12} | {:^8}\n".format("".join(gene_subseq), match_freq[s]))
fl.write("*************************\n\n")
return None
def SummarizeReverseMatching(dset):
# List all the reverse matching sequences
# If the reverse matching array element, F[i][j] = x, then we need the gene-substring gene[i:(i + lengths[i])] and x.
with open("./../data/input/%s" % (dset.genefname), "r") as gf:
gene_seq = gf.readline().strip(" ").strip("\n")
reverse_matches_log = "./../data/output/explicit_reverse_%s_%s_%d.txt" % (dset.genefname, dset.poolfname, dset.tolerance)
topology = ["linear", "circular"][dset.is_circular]
with open(reverse_matches_log, "w") as fl:
fl.write("Reverse matchings\n\n")
fl.write("Gene: %s\n" % (dset.genefname))
fl.write("Pool: %s\n" % (dset.poolfname))
fl.write("Topology: %s\n" % (topology))
fl.write("Mismatches: %d\n" % (dset.tolerance))
fl.write("*************************\n\n")
for l in range(dset.nRev.shape[0]):
nuc_len = int(dset.nRev[l, 0])
gene_indices, = np.nonzero(dset.nRev[l, 1:])
match_freq = dset.nRev[l, 1 + gene_indices].astype(np.int)
if (gene_indices.shape[0] > 0):
fl.write("Length: %d\n" % (nuc_len))
fl.write("{:^12} | {:^8}\n".format("Sequence", "Frequency"))
fl.write("-------------------------\n")
for s in range(gene_indices.shape[0]):
gene_subseq = ReverseComplement([gene_seq[(gene_indices[s] + g) % len(gene_seq)] for g in range(nuc_len)])
fl.write("{:^12} | {:^8}\n".format("".join(gene_subseq), match_freq[s]))
fl.write("*************************\n\n")
return None
def SummarizeMatching(dset):
# List the forward and reverse matching output.
SummarizeForwardMatching(dset)
SummarizeReverseMatching(dset)
return None
def ParseNucLengths():
# Parse the string input specifying the nucleotide lengths.
# The nucleotide lengths is a list of lists.
# Each list in the string is separated by a semicolon ";" and each element of a list is separated by a comma ",".
is_help = 1
while (is_help == 1):
lengths_encoding = input(">>Lengths (enter \"Help\" for guidance): ").strip("\n").strip(" ")
if (lengths_encoding.lower() == "help"):
is_help = 1
print("\033[2mFormat for specifying lengths:\033[0m")
print("\033[2mWe can specify multiple sets of lengths as a string.\033[0m")
print("\033[2mEach set should be demarcated by a \";\" and the elements in every list should be separated by \",\".\033[0m")
print("\033[2mFor eg., the set of lengths 2,3,4 and 3,4,5 and 5 and 6 should be specified by the string: 2,3,4;3,4,5;5;6.\033[0m")
else:
is_help = 0
lengths_string = list(map(lambda ln: ln.strip("\n").strip(" ").split(","), lengths_encoding.strip("\n").strip(" ").split(";")))
lengths = [list(map(int, ln)) for ln in lengths_string]
return lengths
if __name__ == '__main__':
# Display the logo and license information
DisplayLogoLicense()
# Check if all the required packages exist
CheckDependencies()
rnap = RNAPostProcessor()
# Read the parameters to identify the instance for post-processing
if (len(sys.argv) < 2):
Usage()
exit(0)
else:
input_file = ("./../data/input/%s" % sys.argv[1].strip("\n"))
completed = 0
user_choice = 6
while (completed == 0):
if (user_choice == 1):
# inputs = [("example_gene.txt", "example_pool.txt", 1)]
instance = int(input(">>Problem instance from the input file %s: " % (os.path.basename(input_file))).strip("\n").strip(" "))
IdentifyInstance(rnap, input_file, instance)
rnap.pool_lengths = GroupByLength(rnap)
elif (user_choice == 2):
rnap.lengths = ParseNucLengths()
# print("lengths: {}".format(rnap.lengths))
GatherMatchingData(rnap, rnap.nForw, rnap.nRev)
is_scaled = int(input(">>Plot normalized data? [1]Yes, [0]No: ").strip("\n").strip(" "))
if (is_scaled == 1):
Normalize(rnap, rnap.gathered_forward, rnap.gathered_reverse)
# GatherMatchingData(rnap, rnap.gathered_forward, rnap.gathered_reverse)
(rnap.gathered_forward, rnap.gathered_reverse) = (rnap.normalized_forward, rnap.normalized_reverse)
ScaleMatchings(rnap, rnap.gathered_forward, rnap.gathered_reverse)
# print("Reverse\n{}".format(rnap.gathered_reverse))
# Load plot settings
plot_settings = PlotSettings()
settings_fname = input(">>Settings file name (leave blank for default): ").strip("\n").strip(" ")
if (len(settings_fname) == 0):
settings_fname = "./../data/input/default_plot_settings.txt"
plot_settings.load(settings_fname)
Plot(rnap, plot_settings)
elif (user_choice == 3):
rnap.lengths = ParseNucLengths()
RecordNormalizedData(rnap)
elif (user_choice == 4):
topology = ["linear", "circular"][rnap.is_circular]
print("Gene: %s" % (rnap.genefname))
print("Pool: %s" % (rnap.poolfname))
print("Topology: %s" % (topology))
print("Mismatches: %d" % (rnap.tolerance))
SummarizeMatching(rnap)
elif (user_choice == 5):
rnap.lengths = ParseNucLengths()
VerifyMatching(rnap)
elif (user_choice == 6):
print("**** MENU ****")
print("0 -- Quit")
print("1 -- Load new data for matching.")
print("2 -- Plot the latest dataset.")
print("3 -- Normalize matching data.")
print("4 -- Save the matching summary to a file.")
print("5 -- Verify matching results.")
print("6 -- Show menu")
print("**** MENU ****")
else:
pass
print("\033[2m---Enter 6 to show the menu---\033[0m")
user_input = input(">>Menu Option: ").strip("\n").strip(" ")
if user_input.isnumeric():
user_choice = int(user_input)
else:
user_choice = -1
if (user_choice == 0):
completed = 1
print("\033[2mxxxxxxxx\033[0m")
| [
"numpy.zeros_like",
"numpy.abs",
"numpy.sum",
"verify.VerifyMatching",
"os.path.basename",
"plot.PlotSettings",
"startup.CheckDependencies",
"startup.DisplayLogoLicense",
"normalize.RecordNormalizedData",
"binder.GroupByLength",
"numpy.nonzero",
"plot.Plot",
"normalize.Normalize",
"utils.R... | [((3066, 3088), 'numpy.zeros_like', 'np.zeros_like', (['forward'], {}), '(forward)\n', (3079, 3088), True, 'import numpy as np\n'), ((3114, 3136), 'numpy.zeros_like', 'np.zeros_like', (['reverse'], {}), '(reverse)\n', (3127, 3136), True, 'import numpy as np\n'), ((3756, 3799), 'utils.ReadMatrixFromFile', 'ReadMatrixFromFile', (['plobj.forwMatchDataFile'], {}), '(plobj.forwMatchDataFile)\n', (3774, 3799), False, 'from utils import ArrayToString, IntArrayToString, ReverseComplement, LeastGreatestMultiple, ReadMatrixFromFile\n'), ((3827, 3869), 'utils.ReadMatrixFromFile', 'ReadMatrixFromFile', (['plobj.revMatchDataFile'], {}), '(plobj.revMatchDataFile)\n', (3845, 3869), False, 'from utils import ArrayToString, IntArrayToString, ReverseComplement, LeastGreatestMultiple, ReadMatrixFromFile\n'), ((8853, 8873), 'startup.DisplayLogoLicense', 'DisplayLogoLicense', ([], {}), '()\n', (8871, 8873), False, 'from startup import DisplayLogoLicense, CheckDependencies\n'), ((8921, 8940), 'startup.CheckDependencies', 'CheckDependencies', ([], {}), '()\n', (8938, 8940), False, 'from startup import DisplayLogoLicense, CheckDependencies\n'), ((1117, 1173), 'utils.ReadMatrixFromFile', 'ReadMatrixFromFile', (['self.forwMatchDataFile'], {'dataType': '"""f"""'}), "(self.forwMatchDataFile, dataType='f')\n", (1135, 1173), False, 'from utils import ArrayToString, IntArrayToString, ReverseComplement, LeastGreatestMultiple, ReadMatrixFromFile\n'), ((1382, 1437), 'utils.ReadMatrixFromFile', 'ReadMatrixFromFile', (['self.revMatchDataFile'], {'dataType': '"""f"""'}), "(self.revMatchDataFile, dataType='f')\n", (1400, 1437), False, 'from utils import ArrayToString, IntArrayToString, ReverseComplement, LeastGreatestMultiple, ReadMatrixFromFile\n'), ((5481, 5510), 'numpy.nonzero', 'np.nonzero', (['dset.nForw[l, 1:]'], {}), '(dset.nForw[l, 1:])\n', (5491, 5510), True, 'import numpy as np\n'), ((6948, 6976), 'numpy.nonzero', 'np.nonzero', (['dset.nRev[l, 1:]'], {}), '(dset.nRev[l, 1:])\n', (6958, 6976), True, 'import numpy as np\n'), ((9512, 9531), 'binder.GroupByLength', 'GroupByLength', (['rnap'], {}), '(rnap)\n', (9525, 9531), False, 'from binder import GroupByLength\n'), ((3221, 3242), 'numpy.abs', 'np.abs', (['forward[l, i]'], {}), '(forward[l, i])\n', (3227, 3242), True, 'import numpy as np\n'), ((3435, 3456), 'numpy.abs', 'np.abs', (['reverse[l, i]'], {}), '(reverse[l, i])\n', (3441, 3456), True, 'import numpy as np\n'), ((4361, 4397), 'numpy.sum', 'np.sum', (['forward[nuclens, 1:]'], {'axis': '(0)'}), '(forward[nuclens, 1:], axis=0)\n', (4367, 4397), True, 'import numpy as np\n'), ((10240, 10254), 'plot.PlotSettings', 'PlotSettings', ([], {}), '()\n', (10252, 10254), False, 'from plot import PlotSettings, Plot\n'), ((10501, 10526), 'plot.Plot', 'Plot', (['rnap', 'plot_settings'], {}), '(rnap, plot_settings)\n', (10505, 10526), False, 'from plot import PlotSettings, Plot\n'), ((4520, 4556), 'numpy.sum', 'np.sum', (['reverse[nuclens, 1:]'], {'axis': '(0)'}), '(reverse[nuclens, 1:], axis=0)\n', (4526, 4556), True, 'import numpy as np\n'), ((9822, 9883), 'normalize.Normalize', 'Normalize', (['rnap', 'rnap.gathered_forward', 'rnap.gathered_reverse'], {}), '(rnap, rnap.gathered_forward, rnap.gathered_reverse)\n', (9831, 9883), False, 'from normalize import RecordNormalizedData, Normalize\n'), ((10598, 10624), 'normalize.RecordNormalizedData', 'RecordNormalizedData', (['rnap'], {}), '(rnap)\n', (10618, 10624), False, 'from normalize import RecordNormalizedData, Normalize\n'), ((4282, 4321), 'numpy.in1d', 'np.in1d', (['forward[:, 0]', 'rnap.lengths[s]'], {}), '(forward[:, 0], rnap.lengths[s])\n', (4289, 4321), True, 'import numpy as np\n'), ((4434, 4473), 'numpy.in1d', 'np.in1d', (['reverse[:, 0]', 'rnap.lengths[s]'], {}), '(reverse[:, 0], rnap.lengths[s])\n', (4441, 4473), True, 'import numpy as np\n'), ((10978, 10998), 'verify.VerifyMatching', 'VerifyMatching', (['rnap'], {}), '(rnap)\n', (10992, 10998), False, 'from verify import VerifyMatching\n'), ((9384, 9412), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (9400, 9412), False, 'import os\n')] |
# ===============================================================================
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import (
HasTraits,
Float,
Int,
List,
Str,
Any,
Event,
Property,
on_trait_change,
Range,
)
from traitsui.api import (
View,
Item,
HGroup,
spring,
EnumEditor,
ButtonEditor,
Group,
TextEditor,
)
# ============= standard library imports ========================
from numpy import array, hstack, Inf, savetxt
import csv
import os
from threading import Thread
import struct
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_path
from pychron.core.helpers.isotope_utils import sort_isotopes
from pychron.paths import paths
from pychron.spectrometer.jobs.magnet_sweep import MagnetSweep
from pychron.core.stats.peak_detection import (
find_peaks,
calculate_peak_center,
PeakCenterError,
)
from pychron.core.ui.gui import invoke_in_main_thread
import six
from six.moves import zip
DELTA_TOOLTIP = """The minimum difference between a peak and
the following points, before a peak may be considered a peak"""
class CalibrationPeak(HasTraits):
isotope = Str("Ar40")
dac = Float
isotopes = List
ruler = Any
class MassCalibratorSweep(MagnetSweep):
db = Any
start_dac = Float(4)
stop_dac = Float(8.0)
step_dac = Float(0.1)
period = 10
calibration_peaks = List
selected = Any
# peak detection tuning parameters
min_peak_height = Float(1)
min_peak_separation = Range(0.0001, 1000)
# if the next point is less than delta from the current point than this is not a peak
# essentially how much does the peak stand out from the background
delta = Float(1)
fperiod = Int(50)
fwindow = Float(1)
fstep_dac = Float(0.1)
fexecute_button = Event
fexecute_label = Property(depends_on="_alive")
fine_scan_enabled = Property(depends_on="calibration_peaks:isotope")
_fine_scanning = False
def setup_graph(self):
g = self.graph
g.new_plot()
g.set_x_title("DAC")
g.new_series()
mi = min(self.start_dac, self.stop_dac)
ma = max(self.start_dac, self.stop_dac)
g.set_x_limits(min_=mi, max_=ma, pad="0.1")
def _fine_scan(self):
operiod = self.period
self.period = self.fperiod
self._fine_scanning = True
i = 1
self.graph.new_plot(padding_top=10, xtitle="Relative DAC")
w = self.fwindow / 2.0
self.graph.set_x_limits(min_=-w, max_=w, plotid=1)
self._redraw()
for cp in self.calibration_peaks:
if not cp.isotope:
continue
if self.isAlive():
self.selected = cp
self.info(
"Fine scan calibration peak {}. {} dac={}".format(
i, cp.isotope, cp.dac
)
)
self._fine_scan_peak(cp)
i += 1
self.period = operiod
self._fine_scanning = False
if self.isAlive():
if self.confirmation_dialog("Save to Database"):
self._save_to_db()
if self.confirmation_dialog("Apply Calibration"):
self._apply_calibration()
def _pack(self, d):
data = "".join([struct.pack(">ff", x, y) for x, y in d])
return data
def _save_to_db(self):
db = self.db
spectrometer = "Obama"
hist = db.add_mass_calibration_history(spectrometer)
# add coarse scan
d = self._get_coarse_data()
data = self._pack(d)
db.add_mass_calibration_scan(hist, blob=data)
# add fine scans
plot = self.graph.plots[1]
cps = [cp for cp in self.calibration_peaks if cp.isotope]
for cp, ki in zip(cps, sorted(plot.plots.keys())):
p = plot.plots[ki][0]
xs = p.index.get_data()
ys = p.value.get_data()
d = array((xs, ys)).T
data = self._pack(d)
db.add_mass_calibration_scan(
hist,
cp.isotope,
blob=data,
center=cp.dac,
)
db.commit()
def _apply_calibration(self):
"""
save calibration peaks as mag field table
"""
p = os.path.join(paths.spectrometer_dir, "mftable.csv")
with open(p, "w") as wfile:
writer = csv.writer(wfile, delimiter=",")
for cp in self.calibration_peaks:
if cp.isotope:
writer.writerow([cp.isotope, cp.dac])
def _fine_scan_peak(self, cp):
line, _ = self.graph.new_series(plotid=1)
c = cp.dac
w = self.fwindow / 2.0
steps = self._calc_step_values(c - w, c + w, self.fstep_dac)
self._scan_dac(steps)
# get last scan
xs = line.index.get_data()
ys = line.value.get_data()
try:
center = calculate_peak_center(xs, ys)
# if not isinstance(center, str):
[lx, cx, hx], [ly, cy, hy], mx, my = center
self.graph.add_vertical_rule(cx, plotid=1)
self.info(
"new peak center. {} nominal={} dx={}".format(cp.isotope, cp.dac, cx)
)
cp.dac += cx
self._redraw()
except PeakCenterError as e:
self.warning(e)
# else:
# self.warning(center)
def _update_graph_data(self, *args, **kw):
"""
add and scale scans
"""
if self._fine_scanning:
self._update_fine_graph_data(*args, **kw)
else:
super(MassCalibratorSweep, self)._update_graph_data(*args, **kw)
def _update_fine_graph_data(self, plot, di, intensities, **kw):
# print di, intensities
# convert dac to a relative dac
di -= self.selected.dac
ks = sorted(plot.plots.keys())
cur = plot.plots[ks[-1]][0]
if hasattr(cur, "odata"):
oys = getattr(cur, "odata")
oys = hstack((oys, intensities[:1]))
else:
oys = array(intensities)
setattr(cur, "odata", oys)
xs = cur.index.get_data()
xs = hstack((xs, di))
cur.index.set_data(xs)
_R = -Inf
# get the max range and normalize all series
for p in six.itervalues(plot.plots):
p = p[0]
high, low = max(p.odata), min(p.odata)
tR = high - low
if tR > _R:
_R = tR
miR = low
for p in six.itervalues(plot.plots):
p = p[0]
oys = p.odata
high, low = max(p.odata), min(p.odata)
r = high - low
if r:
oys = (oys - low) * _R / r + miR
p.value.set_data(oys)
def _fine_graph_hook(self, *args, **kw):
plot = self.graph.plots[1]
self._update_graph_data(plot, *args, **kw)
def _graph_hook(self, *args, **kw):
if self._fine_scanning:
self._fine_graph_hook(*args, **kw)
else:
super(MassCalibratorSweep, self)._graph_hook(*args, **kw)
def _dump_scan(self):
root = os.path.join(paths.data_dir, "mass_calibration_scans")
if not os.path.isdir(root):
os.mkdir(root)
p, _ = unique_path(root, "scan")
d = self._get_coarse_data()
savetxt(p, d)
def _get_coarse_data(self):
"""
return coarse scan as (dac,intensity) pairs
"""
data = self.graph.plots[0].data
xs = data.get_data("x0")
ys = data.get_data("y0")
return array((xs, ys)).T
def _find_peaks(self):
if self.graph.plots:
# clear peaks
self.graph.remove_rulers()
data = self.graph.plots[0].data
xs = data.get_data("x0")
ys = data.get_data("y0")
if len(xs) and len(ys):
lookahead = max(1, int(self.min_peak_separation / self.fstep_dac))
mxp, mip = find_peaks(ys, xs, lookahead=lookahead, delta=self.delta)
pks = []
isos = list(self.spectrometer.molecular_weights.keys())
isos = sort_isotopes(isos)
for dac, v in mxp:
if v > self.min_peak_height:
l = self.graph.add_vertical_rule(dac)
pks.append(CalibrationPeak(dac=dac, isotopes=isos, ruler=l))
self.calibration_peaks = pks
self._redraw()
def _set_x_limits(self):
if self.graph:
mi = min(self.start_dac, self.stop_dac)
ma = max(self.start_dac, self.stop_dac)
self.graph.set_x_limits(min_=mi, max_=ma, pad="0.1")
def _redraw(self):
invoke_in_main_thread(self.graph.redraw)
def _execute(self):
self.spectrometer.magnet.settling_time = 0.001
sm = self.start_dac
em = self.stop_dac
stm = self.step_dac
self.verbose = True
if abs(sm - em) > stm:
# do initial scan
self._do_sweep(sm, em, stm, map_mass=False)
self._alive = False
# write data to file for testing
self._dump_scan()
# find peaks
self._find_peaks()
self._post_execute()
self.verbose = False
def _end(self):
self._fine_scanning = False
# ===================================================================================================================
# handlers
# ===================================================================================================================
@on_trait_change("min_peak_height, min_peak_separation, delta")
def _handle_peak_detection_change(self):
self._find_peaks()
def _fexecute_button_fired(self):
if self.isAlive():
self.stop()
self._end()
else:
self._alive = True
t = Thread(name="fine scan", target=self._fine_scan)
t.start()
def _selected_changed(self):
for p in self.calibration_peaks:
ruler = p.ruler
ruler.line_width = 1
ruler.color = (1.0, 0, 0)
if self.selected:
self.selected.ruler.line_width = 5
self.selected.ruler.color = (0, 1.0, 0)
self.graph.redraw()
def _start_dac_changed(self):
self._set_x_limits()
def _stop_dac_changed(self):
self._set_x_limits()
def traits_view(self):
coarse_grp = Group(
Item("reference_detector", editor=EnumEditor(name="detectors")),
Item("start_dac", label="Start"),
Item("stop_dac", label="Stop"),
Item("step_dac", label="Step"),
Item("period", label="Scan Period (ms)"),
HGroup(
spring,
Item(
"execute_button",
editor=ButtonEditor(label_value="execute_label"),
show_label=False,
),
),
label="Coarse",
)
peak_detection_grp = Group(
Item("min_peak_height", label="Min. Height (fA)"),
Item(
"min_peak_separation",
label="Min. Separation (V)",
editor=TextEditor(evaluate=float),
),
Item("delta", tooltip=DELTA_TOOLTIP),
label="Peak Detection",
)
fine_grp = Group(
Item("fwindow", label="Window (V)", tooltip="+/- volts centered at peak_i"),
Item(
"fperiod",
label="Scan Period (ms)",
tooltip="fine scan integration time",
),
HGroup(
spring,
Item(
"fexecute_button",
editor=ButtonEditor(label_value="fexecute_label"),
show_label=False,
),
),
label="Fine",
enabled_when="fine_scan_enabled",
)
v = View(Group(coarse_grp, peak_detection_grp, fine_grp, layout="tabbed"))
return v
def _get_fine_scan_enabled(self):
return len([cp for cp in self.calibration_peaks if cp.isotope]) > 2
def _get_fexecute_label(self):
return "Stop" if self.isAlive() else "Start"
# ============= EOF =============================================
| [
"os.mkdir",
"traitsui.api.ButtonEditor",
"traits.api.Str",
"traits.api.Range",
"os.path.join",
"pychron.core.stats.peak_detection.calculate_peak_center",
"traitsui.api.EnumEditor",
"numpy.savetxt",
"struct.pack",
"pychron.core.stats.peak_detection.find_peaks",
"threading.Thread",
"csv.writer",... | [((1956, 1967), 'traits.api.Str', 'Str', (['"""Ar40"""'], {}), "('Ar40')\n", (1959, 1967), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2092, 2100), 'traits.api.Float', 'Float', (['(4)'], {}), '(4)\n', (2097, 2100), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2116, 2126), 'traits.api.Float', 'Float', (['(8.0)'], {}), '(8.0)\n', (2121, 2126), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2142, 2152), 'traits.api.Float', 'Float', (['(0.1)'], {}), '(0.1)\n', (2147, 2152), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2281, 2289), 'traits.api.Float', 'Float', (['(1)'], {}), '(1)\n', (2286, 2289), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2316, 2335), 'traits.api.Range', 'Range', (['(0.0001)', '(1000)'], {}), '(0.0001, 1000)\n', (2321, 2335), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2509, 2517), 'traits.api.Float', 'Float', (['(1)'], {}), '(1)\n', (2514, 2517), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2533, 2540), 'traits.api.Int', 'Int', (['(50)'], {}), '(50)\n', (2536, 2540), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2555, 2563), 'traits.api.Float', 'Float', (['(1)'], {}), '(1)\n', (2560, 2563), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2580, 2590), 'traits.api.Float', 'Float', (['(0.1)'], {}), '(0.1)\n', (2585, 2590), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2640, 2669), 'traits.api.Property', 'Property', ([], {'depends_on': '"""_alive"""'}), "(depends_on='_alive')\n", (2648, 2669), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((2694, 2742), 'traits.api.Property', 'Property', ([], {'depends_on': '"""calibration_peaks:isotope"""'}), "(depends_on='calibration_peaks:isotope')\n", (2702, 2742), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((10567, 10629), 'traits.api.on_trait_change', 'on_trait_change', (['"""min_peak_height, min_peak_separation, delta"""'], {}), "('min_peak_height, min_peak_separation, delta')\n", (10582, 10629), False, 'from traits.api import HasTraits, Float, Int, List, Str, Any, Event, Property, on_trait_change, Range\n'), ((5143, 5194), 'os.path.join', 'os.path.join', (['paths.spectrometer_dir', '"""mftable.csv"""'], {}), "(paths.spectrometer_dir, 'mftable.csv')\n", (5155, 5194), False, 'import os\n'), ((7060, 7076), 'numpy.hstack', 'hstack', (['(xs, di)'], {}), '((xs, di))\n', (7066, 7076), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((7197, 7223), 'six.itervalues', 'six.itervalues', (['plot.plots'], {}), '(plot.plots)\n', (7211, 7223), False, 'import six\n'), ((7417, 7443), 'six.itervalues', 'six.itervalues', (['plot.plots'], {}), '(plot.plots)\n', (7431, 7443), False, 'import six\n'), ((8050, 8104), 'os.path.join', 'os.path.join', (['paths.data_dir', '"""mass_calibration_scans"""'], {}), "(paths.data_dir, 'mass_calibration_scans')\n", (8062, 8104), False, 'import os\n'), ((8184, 8209), 'pychron.core.helpers.filetools.unique_path', 'unique_path', (['root', '"""scan"""'], {}), "(root, 'scan')\n", (8195, 8209), False, 'from pychron.core.helpers.filetools import unique_path\n'), ((8255, 8268), 'numpy.savetxt', 'savetxt', (['p', 'd'], {}), '(p, d)\n', (8262, 8268), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((9665, 9705), 'pychron.core.ui.gui.invoke_in_main_thread', 'invoke_in_main_thread', (['self.graph.redraw'], {}), '(self.graph.redraw)\n', (9686, 9705), False, 'from pychron.core.ui.gui import invoke_in_main_thread\n'), ((5252, 5284), 'csv.writer', 'csv.writer', (['wfile'], {'delimiter': '""","""'}), "(wfile, delimiter=',')\n", (5262, 5284), False, 'import csv\n'), ((5787, 5816), 'pychron.core.stats.peak_detection.calculate_peak_center', 'calculate_peak_center', (['xs', 'ys'], {}), '(xs, ys)\n', (5808, 5816), False, 'from pychron.core.stats.peak_detection import find_peaks, calculate_peak_center, PeakCenterError\n'), ((6894, 6924), 'numpy.hstack', 'hstack', (['(oys, intensities[:1])'], {}), '((oys, intensities[:1]))\n', (6900, 6924), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((6957, 6975), 'numpy.array', 'array', (['intensities'], {}), '(intensities)\n', (6962, 6975), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((8120, 8139), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (8133, 8139), False, 'import os\n'), ((8153, 8167), 'os.mkdir', 'os.mkdir', (['root'], {}), '(root)\n', (8161, 8167), False, 'import os\n'), ((8499, 8514), 'numpy.array', 'array', (['(xs, ys)'], {}), '((xs, ys))\n', (8504, 8514), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((10877, 10925), 'threading.Thread', 'Thread', ([], {'name': '"""fine scan"""', 'target': 'self._fine_scan'}), "(name='fine scan', target=self._fine_scan)\n", (10883, 10925), False, 'from threading import Thread\n'), ((11549, 11581), 'traitsui.api.Item', 'Item', (['"""start_dac"""'], {'label': '"""Start"""'}), "('start_dac', label='Start')\n", (11553, 11581), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((11595, 11625), 'traitsui.api.Item', 'Item', (['"""stop_dac"""'], {'label': '"""Stop"""'}), "('stop_dac', label='Stop')\n", (11599, 11625), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((11639, 11669), 'traitsui.api.Item', 'Item', (['"""step_dac"""'], {'label': '"""Step"""'}), "('step_dac', label='Step')\n", (11643, 11669), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((11683, 11723), 'traitsui.api.Item', 'Item', (['"""period"""'], {'label': '"""Scan Period (ms)"""'}), "('period', label='Scan Period (ms)')\n", (11687, 11723), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12058, 12107), 'traitsui.api.Item', 'Item', (['"""min_peak_height"""'], {'label': '"""Min. Height (fA)"""'}), "('min_peak_height', label='Min. Height (fA)')\n", (12062, 12107), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12289, 12325), 'traitsui.api.Item', 'Item', (['"""delta"""'], {'tooltip': 'DELTA_TOOLTIP'}), "('delta', tooltip=DELTA_TOOLTIP)\n", (12293, 12325), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12412, 12487), 'traitsui.api.Item', 'Item', (['"""fwindow"""'], {'label': '"""Window (V)"""', 'tooltip': '"""+/- volts centered at peak_i"""'}), "('fwindow', label='Window (V)', tooltip='+/- volts centered at peak_i')\n", (12416, 12487), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12501, 12580), 'traitsui.api.Item', 'Item', (['"""fperiod"""'], {'label': '"""Scan Period (ms)"""', 'tooltip': '"""fine scan integration time"""'}), "('fperiod', label='Scan Period (ms)', tooltip='fine scan integration time')\n", (12505, 12580), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12992, 13056), 'traitsui.api.Group', 'Group', (['coarse_grp', 'peak_detection_grp', 'fine_grp'], {'layout': '"""tabbed"""'}), "(coarse_grp, peak_detection_grp, fine_grp, layout='tabbed')\n", (12997, 13056), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((4130, 4154), 'struct.pack', 'struct.pack', (['""">ff"""', 'x', 'y'], {}), "('>ff', x, y)\n", (4141, 4154), False, 'import struct\n'), ((4787, 4802), 'numpy.array', 'array', (['(xs, ys)'], {}), '((xs, ys))\n', (4792, 4802), False, 'from numpy import array, hstack, Inf, savetxt\n'), ((8905, 8962), 'pychron.core.stats.peak_detection.find_peaks', 'find_peaks', (['ys', 'xs'], {'lookahead': 'lookahead', 'delta': 'self.delta'}), '(ys, xs, lookahead=lookahead, delta=self.delta)\n', (8915, 8962), False, 'from pychron.core.stats.peak_detection import find_peaks, calculate_peak_center, PeakCenterError\n'), ((9084, 9103), 'pychron.core.helpers.isotope_utils.sort_isotopes', 'sort_isotopes', (['isos'], {}), '(isos)\n', (9097, 9103), False, 'from pychron.core.helpers.isotope_utils import sort_isotopes\n'), ((11506, 11534), 'traitsui.api.EnumEditor', 'EnumEditor', ([], {'name': '"""detectors"""'}), "(name='detectors')\n", (11516, 11534), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12234, 12260), 'traitsui.api.TextEditor', 'TextEditor', ([], {'evaluate': 'float'}), '(evaluate=float)\n', (12244, 12260), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((11856, 11897), 'traitsui.api.ButtonEditor', 'ButtonEditor', ([], {'label_value': '"""execute_label"""'}), "(label_value='execute_label')\n", (11868, 11897), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n'), ((12777, 12819), 'traitsui.api.ButtonEditor', 'ButtonEditor', ([], {'label_value': '"""fexecute_label"""'}), "(label_value='fexecute_label')\n", (12789, 12819), False, 'from traitsui.api import View, Item, HGroup, spring, EnumEditor, ButtonEditor, Group, TextEditor\n')] |
import numpy as np
from scipy import linalg
from pressio4py import logger
from pressio4py import rom as rom
from pressio4py import solvers as solvers
np.set_printoptions(linewidth=140)
#----------------------------
class MyMasker:
def __init__(self, indices):
self.rows_ = indices
self.sampleMeshSize_ = len(indices)
def createApplyMaskResult(self, operand):
if (operand.ndim == 1):
return np.zeros(self.sampleMeshSize_)
else:
return np.zeros((self.sampleMeshSize_, operand.shape[1]))
def applyMask(self, operand, time, result):
result[:] = np.take(operand, self.rows_, axis=0)
#----------------------------
class MyTestApp:
def __init__(self, N):
self.N_ = N
self.callCount_ = 0
def createDiscreteTimeResidual(self):
return np.zeros(self.N_)
def createApplyDiscreteTimeJacobianResult(self, B):
return np.zeros((self.N_, B.shape[1]))
def discreteTimeResidual(self, step, time, dt, R, ynp1, yn):
self.callCount_ += 1
# assert(len(R) == self.N_)
# assert(len(ynp1) == self.N_)
# assert(len(yn) == self.N_)
# if self.callCount_ == 1:
# ynp1_gold = np.array([3.,6.,9.,12.,15.,18.,21.])
# assert( np.allclose(ynp1, ynp1_gold, atol=1e-12) )
# yn_gold = np.array([3.,6.,9.,12.,15.,18.,21.])
# assert( np.allclose(yn, yn_gold, atol=1e-12) )
f = np.arange(10., 110., 10.)
R[:] = ynp1 - yn -dt*f
print("ynp1")
print(ynp1)
print("yn")
print(yn)
print("R")
print(R)
def applyDiscreteTimeJacobian(self, step, time, dt, B, A, ynp1, yn):
assert(A.shape[0] == self.N_)
assert(A.shape[1] == 3)
A[:,0] = 1.;
A[:,1] = 2.;
A[:,2] = 3.;
#----------------------------
class MyLinSolver:
def __init__(self):
self.callCount_ = 0
# recall that this is called from the nonlinear solver
# and x is the correction to apply to the nonlinear state
def solve(self, A, b, x):
self.callCount_ += 1
x[:] = 1.
print("\n")
print(A)
print(b)
if self.callCount_ == 1:
bGold = np.ones(3)*(-26.)
assert( np.allclose(b, bGold, atol=1e-12) )
hGold = np.array(([4.,8.,12.],
[4.,8.,12.],
[4.,8.,12.]))
assert( np.allclose(A, hGold, atol=1e-12) )
#----------------------------
def test():
'''
check that masked Galerkin with discrete-time api works correctly
'''
logger.initialize(logger.logto.terminal, "null")
logger.setVerbosity([logger.loglevel.info])
N = 10
romSize = 3
Nsteps = 1
dt = 0.1
appObj = MyTestApp(N)
yRef = np.zeros(N)
yRom = np.zeros(romSize)
# create a dummy phi = all 1s
# and make phi column-major so decoder only views it
# and does not make a copy of it
phi = np.ones((N,romSize), order='F')
decoder = rom.Decoder(phi)
# pick sample mesh indices
sampleMeshIndices = [2,5,6,9]
# create phi on the "sample mesh"
phiSM = np.take(phi, sampleMeshIndices, axis=0)
# create projector (pass the phiSM)
projector = rom.galerkin.ArbitraryProjector(phiSM)
# create masker
masker = MyMasker(sampleMeshIndices)
problem = rom.galerkin.masked.ProblemDiscreteTimeTwoStates(appObj, decoder, yRom, yRef, masker, projector)
# linear and non linear solver
lsO = MyLinSolver()
nlsO = solvers.createNewtonRaphson(problem, yRom, lsO)
nlsO.setUpdatingCriterion(solvers.update.standard)
nlsO.setMaxIterations(1)
nlsO.setStoppingCriterion(solvers.stop.afterMaxIters)
# solve
rom.galerkin.advanceNSteps(problem, yRom, 0., dt, Nsteps, nlsO)
| [
"pressio4py.logger.initialize",
"pressio4py.rom.galerkin.masked.ProblemDiscreteTimeTwoStates",
"numpy.set_printoptions",
"pressio4py.solvers.createNewtonRaphson",
"pressio4py.rom.galerkin.ArbitraryProjector",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"pressio4py.logger.setVerbosity",
"pressio... | [((153, 187), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(140)'}), '(linewidth=140)\n', (172, 187), True, 'import numpy as np\n'), ((2410, 2458), 'pressio4py.logger.initialize', 'logger.initialize', (['logger.logto.terminal', '"""null"""'], {}), "(logger.logto.terminal, 'null')\n", (2427, 2458), False, 'from pressio4py import logger\n'), ((2461, 2504), 'pressio4py.logger.setVerbosity', 'logger.setVerbosity', (['[logger.loglevel.info]'], {}), '([logger.loglevel.info])\n', (2480, 2504), False, 'from pressio4py import logger\n'), ((2595, 2606), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2603, 2606), True, 'import numpy as np\n'), ((2616, 2633), 'numpy.zeros', 'np.zeros', (['romSize'], {}), '(romSize)\n', (2624, 2633), True, 'import numpy as np\n'), ((2765, 2797), 'numpy.ones', 'np.ones', (['(N, romSize)'], {'order': '"""F"""'}), "((N, romSize), order='F')\n", (2772, 2797), True, 'import numpy as np\n'), ((2809, 2825), 'pressio4py.rom.Decoder', 'rom.Decoder', (['phi'], {}), '(phi)\n', (2820, 2825), True, 'from pressio4py import rom as rom\n'), ((2934, 2973), 'numpy.take', 'np.take', (['phi', 'sampleMeshIndices'], {'axis': '(0)'}), '(phi, sampleMeshIndices, axis=0)\n', (2941, 2973), True, 'import numpy as np\n'), ((3026, 3064), 'pressio4py.rom.galerkin.ArbitraryProjector', 'rom.galerkin.ArbitraryProjector', (['phiSM'], {}), '(phiSM)\n', (3057, 3064), True, 'from pressio4py import rom as rom\n'), ((3135, 3235), 'pressio4py.rom.galerkin.masked.ProblemDiscreteTimeTwoStates', 'rom.galerkin.masked.ProblemDiscreteTimeTwoStates', (['appObj', 'decoder', 'yRom', 'yRef', 'masker', 'projector'], {}), '(appObj, decoder, yRom,\n yRef, masker, projector)\n', (3183, 3235), True, 'from pressio4py import rom as rom\n'), ((3297, 3344), 'pressio4py.solvers.createNewtonRaphson', 'solvers.createNewtonRaphson', (['problem', 'yRom', 'lsO'], {}), '(problem, yRom, lsO)\n', (3324, 3344), True, 'from pressio4py import solvers as solvers\n'), ((3494, 3558), 'pressio4py.rom.galerkin.advanceNSteps', 'rom.galerkin.advanceNSteps', (['problem', 'yRom', '(0.0)', 'dt', 'Nsteps', 'nlsO'], {}), '(problem, yRom, 0.0, dt, Nsteps, nlsO)\n', (3520, 3558), True, 'from pressio4py import rom as rom\n'), ((585, 621), 'numpy.take', 'np.take', (['operand', 'self.rows_'], {'axis': '(0)'}), '(operand, self.rows_, axis=0)\n', (592, 621), True, 'import numpy as np\n'), ((787, 804), 'numpy.zeros', 'np.zeros', (['self.N_'], {}), '(self.N_)\n', (795, 804), True, 'import numpy as np\n'), ((871, 902), 'numpy.zeros', 'np.zeros', (['(self.N_, B.shape[1])'], {}), '((self.N_, B.shape[1]))\n', (879, 902), True, 'import numpy as np\n'), ((1358, 1386), 'numpy.arange', 'np.arange', (['(10.0)', '(110.0)', '(10.0)'], {}), '(10.0, 110.0, 10.0)\n', (1367, 1386), True, 'import numpy as np\n'), ((417, 447), 'numpy.zeros', 'np.zeros', (['self.sampleMeshSize_'], {}), '(self.sampleMeshSize_)\n', (425, 447), True, 'import numpy as np\n'), ((471, 521), 'numpy.zeros', 'np.zeros', (['(self.sampleMeshSize_, operand.shape[1])'], {}), '((self.sampleMeshSize_, operand.shape[1]))\n', (479, 521), True, 'import numpy as np\n'), ((2087, 2120), 'numpy.allclose', 'np.allclose', (['b', 'bGold'], {'atol': '(1e-12)'}), '(b, bGold, atol=1e-12)\n', (2098, 2120), True, 'import numpy as np\n'), ((2137, 2201), 'numpy.array', 'np.array', (['([4.0, 8.0, 12.0], [4.0, 8.0, 12.0], [4.0, 8.0, 12.0])'], {}), '(([4.0, 8.0, 12.0], [4.0, 8.0, 12.0], [4.0, 8.0, 12.0]))\n', (2145, 2201), True, 'import numpy as np\n'), ((2249, 2282), 'numpy.allclose', 'np.allclose', (['A', 'hGold'], {'atol': '(1e-12)'}), '(A, hGold, atol=1e-12)\n', (2260, 2282), True, 'import numpy as np\n'), ((2055, 2065), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2062, 2065), True, 'import numpy as np\n')] |
import time
import os
import sys
import numpy as np
import xarray as xr
from glob import glob
from functools import partial
from multiprocessing import Pool, cpu_count
def ncks_subset(i, fl, gridtype, wait=True):
from time import sleep
from subprocess import call
from psutil import virtual_memory
f = fl[i]
mem_need = round(os.path.getsize(f)/10e8, 3) * 1.5
if gridtype == 'isobaric':
cmd = 'ncks --no_tmp_fl -O -d longitude,928,1040 -d latitude,160,241 -d level,14,36 {0}.nc {0}.WE.nc'.format(f[:-3])
elif gridtype == 'surface':
cmd = 'ncks --no_tmp_fl -O -d longitude,928,1040 -d latitude,160,241 {0}.nc {0}.WE.nc'.format(f[:-3])
while wait:
mem_avail = round(virtual_memory().available/10e8, 3)
if mem_avail > (mem_need*2):
print('Processing {}/{} REQ:{}GB AVAIL:{}GB [{}]'.format(i+1, len(fl), mem_need, mem_avail, f))
wait = False
# print(cmd)
return call(cmd, shell=True)
else:
print('Waiting - RAM Full {}/{} {}GB {}GB [{}]'.format(i+1, len(fl), mem_need, mem_avail, f))
sleep(15)
if __name__ == '__main__':
gridtype = sys.argv[1]
isodir = '/scratch/general/lustre/u1070830/era5_temp/'
sfcdir = '/scratch/general/lustre/u1070830/era5_temp/'
model_dir = isodir if gridtype == 'isobaric' else sfcdir
dirlist = np.array(glob(model_dir + '*'))
dirlist = dirlist[np.argsort(dirlist)]
for d in dirlist:
print('pwd: %s'%d)
gridtype_spec = '.pl.' if gridtype == 'isobaric' else '.sfc.'
flist = sorted(glob(d + '/*%s*.nc'%gridtype_spec))
flist = [f for f in flist if '.WE.nc' not in f]
ncks_subset_mp = partial(ncks_subset, fl=flist, gridtype=gridtype, wait=True)
# print(sorted([f.split('_')[3].split('.')[0] for f in flist]))
if len(flist) > 0:
print(len(flist), d)
# Add a failsafe that loads flist[0] and displays the lat/lon bounds
# Then yes/no prompt for user to continue with this year and var set
# ncks_subset_mp = partial(ncks_subset, fl=flist, gridtype=gridtype)
ncks_subset_mp(0)
fi = glob(d + '/*.WE.nc')[0]
sample_post = xr.open_dataset(fi)
lon = sample_post.longitude-360
xlon, nlon = lon.max().values, lon.min().values
sample_post.close()
if xlon == -100.0 and nlon == -128.0:
time.sleep(5)
p = Pool(cpu_count()-1)
returns = p.map(ncks_subset_mp, range(len(flist)), chunksize=1)
p.close()
p.join()
else:
print('LAT LON SUBSET INCORRECT')
raise
# BE CAREFUL HERE!
nfiles = 300 if gridtype == 'isobaric' else 60
flist_check = glob(d + '/*.WE.nc')
print('check: ', len(flist), len(flist_check))
if len(flist_check) == len(flist):
[os.remove(f) for f in flist]
| [
"functools.partial",
"psutil.virtual_memory",
"os.remove",
"os.path.getsize",
"xarray.open_dataset",
"time.sleep",
"numpy.argsort",
"subprocess.call",
"glob.glob",
"multiprocessing.cpu_count"
] | [((1424, 1445), 'glob.glob', 'glob', (["(model_dir + '*')"], {}), "(model_dir + '*')\n", (1428, 1445), False, 'from glob import glob\n'), ((1469, 1488), 'numpy.argsort', 'np.argsort', (['dirlist'], {}), '(dirlist)\n', (1479, 1488), True, 'import numpy as np\n'), ((1772, 1832), 'functools.partial', 'partial', (['ncks_subset'], {'fl': 'flist', 'gridtype': 'gridtype', 'wait': '(True)'}), '(ncks_subset, fl=flist, gridtype=gridtype, wait=True)\n', (1779, 1832), False, 'from functools import partial\n'), ((987, 1008), 'subprocess.call', 'call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (991, 1008), False, 'from subprocess import call\n'), ((1154, 1163), 'time.sleep', 'sleep', (['(15)'], {}), '(15)\n', (1159, 1163), False, 'from time import sleep\n'), ((1646, 1682), 'glob.glob', 'glob', (["(d + '/*%s*.nc' % gridtype_spec)"], {}), "(d + '/*%s*.nc' % gridtype_spec)\n", (1650, 1682), False, 'from glob import glob\n'), ((2315, 2334), 'xarray.open_dataset', 'xr.open_dataset', (['fi'], {}), '(fi)\n', (2330, 2334), True, 'import xarray as xr\n'), ((2960, 2980), 'glob.glob', 'glob', (["(d + '/*.WE.nc')"], {}), "(d + '/*.WE.nc')\n", (2964, 2980), False, 'from glob import glob\n'), ((351, 369), 'os.path.getsize', 'os.path.getsize', (['f'], {}), '(f)\n', (366, 369), False, 'import os\n'), ((2265, 2285), 'glob.glob', 'glob', (["(d + '/*.WE.nc')"], {}), "(d + '/*.WE.nc')\n", (2269, 2285), False, 'from glob import glob\n'), ((2550, 2563), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2560, 2563), False, 'import time\n'), ((736, 752), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (750, 752), False, 'from psutil import virtual_memory\n'), ((3130, 3142), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (3139, 3142), False, 'import os\n'), ((2606, 2617), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2615, 2617), False, 'from multiprocessing import Pool, cpu_count\n')] |
#!/usr/bin/env python
"""
Module test_miri_filters - unit tests for the MiriFilter classes.
:History:
11 Jul 2014: Created to replace the test_filters.py module, which was
based on the old Filters class.
21 Jul 2014: Detector names changed to MIRIMAGE, MIRIFUSHORT and MIRIFULONG.
08 Sep 2015: Made compatible with Python 3
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
17 Oct 2018: 'N/A' used as a metadata wildcard instead of 'ANY'.
"""
import os
import unittest
import warnings
import numpy as np
from miri.datamodels.tests.util import assert_recarray_equal, \
assert_products_equal
from miri.datamodels.miri_filters import MiriFilter, \
MiriBandPassFilter, MiriQuantumEfficiency
class TestMiriFilter(unittest.TestCase):
def setUp(self):
# Create a MiriFilter object containing test data
self.transmissions = [
(0.5, 0.5),
(1.0, 0.5),
(1.5, 0.5),
(2.0, 0.5),
(2.5, 0.5),
(3.0, 0.5),
(3.5, 0.5),
(4.0, 0.5),
(4.5, 0.5),
(5.0, 0.5),
(5.5, 0.5),
(6.0, 0.5),
(6.5, 0.5),
(7.0, 0.5),
(7.5, 0.5),
(8.0, 0.5),
(8.5, 0.5),
(9.0, 0.5),
(9.5, 0.5),
(10.0, 0.5)]
self.filt = MiriFilter(filter_table=self.transmissions,
filter_name='N/A', filter_type='N/A')
# Add some typical metadata
self.filt.set_instrument_metadata(detector='MIRIMAGE', modelnam='FM',
filt='N/A', channel='', band='',
ccc_pos='OPEN', deck_temperature=14.0,
detector_temperature=6.7)
# Name of temporary file for testing FITS I/O.
self.tempfile = 'test_miri_filter.fits'
def tearDown(self):
# Clean temporary files.
if os.path.isfile(self.tempfile):
try:
os.remove(self.tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.tempfile + \
"\n " + str(e)
warnings.warn(strg)
# Clean python variables
del self.filt, self.transmissions
def test_creation(self):
# TBD
pass
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.filt)
self.assertIsNotNone(descr)
del descr
descr = repr(self.filt)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.transmission)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.wavelength)
self.assertIsNotNone(descr)
del descr
def test_apply(self):
# Test that a constant transmission is correctly applied
flux = np.linspace(120.0, 150.0, len(self.transmissions))
# The result should be the same as the efficiency array at those
# corresponding wavelength values times the same constant used
# at setUp.
result = self.filt.apply_filter(flux)
self.assertTrue(np.allclose(0.5*flux, result))
# Same test but a wavelength array is provided and interpolation is
# required
wave = np.linspace(1.0, 9.0, 50)
flux = np.linspace(120.0, 150.0, 50)
# The result should be the same as the efficiency array at those
# corresponding wavelength values times the same constant used
# at setUp.
result = self.filt.apply_filter(flux, wave)
self.assertTrue(np.allclose(0.5*flux, result))
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.filt.save(self.tempfile, overwrite=True)
with MiriFilter(self.tempfile) as readback:
assert_products_equal( self, self.filt, readback,
arrays=[], tables='filter_table' )
del readback
class TestMiriBandPassFilter(unittest.TestCase):
def setUp(self):
# Create a MiriBandPassFilter object containing test data
self.transmissions = [
(5.80, 0.8698470),
(5.81, 0.8759494),
(5.82, 0.8944225),
(5.83, 0.8899569),
(5.84, 0.8760563),
(5.85, 0.8726164),
(5.86, 0.8782486),
(5.87, 0.8753881),
(5.88, 0.8844002),
(5.89, 0.8682995),
(5.90, 0.8495247),
(5.91, 0.8289118),
(5.92, 0.8211463),
(5.93, 0.8199366),
(5.94, 0.8202344),
(5.95, 0.7952070),
(5.96, 0.7884885),
(5.97, 0.7938501),
(5.98, 0.7938051),
(5.99, 0.8033671),
(6.00, 0.7985086)
]
self.filt = MiriBandPassFilter(filter_table=self.transmissions,
filter_name='F560W', wavecent=5.6, fwhm=1.2)
# Add some typical metadata
self.filt.set_instrument_metadata(detector='MIRIMAGE', modelnam='FM',
filt='F560W', channel='', band='',
ccc_pos='OPEN', deck_temperature=14.0,
detector_temperature=6.7)
# Name of temporary file for testing FITS I/O.
self.tempfile = 'test_miri_bandpass_filter.fits'
def tearDown(self):
# Clean temporary files.
if os.path.isfile(self.tempfile):
try:
os.remove(self.tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.tempfile + \
"\n " + str(e)
warnings.warn(strg)
# Clean python variables
del self.filt, self.transmissions
def test_creation(self):
# TBD
pass
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.filt)
self.assertIsNotNone(descr)
del descr
descr = repr(self.filt)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.transmission)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.wavelength)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.meta.instrument.filter_wavecent)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.meta.instrument.filter_fwhm)
self.assertIsNotNone(descr)
del descr
def test_apply(self):
# Test that a non-constant transmission is applied without problem.
flux = np.linspace(120.0, 150.0, len(self.transmissions))
# The result should at least be the same length.
result = self.filt.apply_filter(flux)
self.assertEqual(len(flux), len(result))
# Same test but a wavelength array is provided and interpolation is
# required
wave = np.linspace(5.85, 5.95, 50)
flux = np.linspace(120.0, 150.0, 50)
# The result should at least be the same length.
result = self.filt.apply_filter(flux, wave)
self.assertEqual(len(flux), len(result))
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.filt.save(self.tempfile, overwrite=True)
with MiriBandPassFilter(self.tempfile) as readback:
assert_products_equal( self, self.filt, readback,
arrays=[], tables='filter_table' )
del readback
class TestMiriQuantumEfficiency(unittest.TestCase):
def setUp(self):
# Create a MiriQuantumEfficiency object containing test data
self.efficiency = [
(5.80, 0.8698470),
(5.81, 0.8759494),
(5.82, 0.8944225),
(5.83, 0.8899569),
(5.84, 0.8760563),
(5.85, 0.8726164),
(5.86, 0.8782486),
(5.87, 0.8753881),
(5.88, 0.8844002),
(5.89, 0.8682995),
(5.90, 0.8495247),
(5.91, 0.8289118),
(5.92, 0.8211463),
(5.93, 0.8199366),
(5.94, 0.8202344),
(5.95, 0.7952070),
(5.96, 0.7884885),
(5.97, 0.7938501),
(5.98, 0.7938051),
(5.99, 0.8033671),
(6.00, 0.7985086)
]
self.filt = MiriQuantumEfficiency(qe_table=self.efficiency,
detector='MIRIMAGE', temperature=6.7)
# Add some typical metadata
self.filt.set_instrument_metadata(detector='MIRIMAGE', modelnam='FM',
filt='N/A', channel='', band='',
ccc_pos='OPEN', deck_temperature=14.0,
detector_temperature=6.7)
# Name of temporary file for testing FITS I/O.
self.tempfile = 'test_miri_quantum_efficiency.fits'
def tearDown(self):
# Clean temporary files.
if os.path.isfile(self.tempfile):
try:
os.remove(self.tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.tempfile + \
"\n " + str(e)
warnings.warn(strg)
# Clean python variables
del self.filt, self.efficiency
def test_creation(self):
# TBD
pass
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.filt)
self.assertIsNotNone(descr)
del descr
descr = repr(self.filt)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.efficiency)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.wavelength)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.meta.instrument.detector_temperature)
self.assertIsNotNone(descr)
del descr
descr = str(self.filt.meta.instrument.filter_fwhm)
self.assertIsNotNone(descr)
del descr
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.filt.save(self.tempfile, overwrite=True)
with MiriBandPassFilter(self.tempfile) as readback:
assert_products_equal( self, self.filt, readback,
arrays=[], tables='filter_table' )
del readback
# If being run as a main program, run the tests.
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.remove",
"warnings.simplefilter",
"miri.datamodels.miri_filters.MiriQuantumEfficiency",
"miri.datamodels.miri_filters.MiriBandPassFilter",
"numpy.allclose",
"miri.datamodels.miri_filters.MiriFilter",
"os.path.isfile",
"warnings.catch_warnings",
"numpy.linspace",
"warnings.wa... | [((12507, 12522), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12520, 12522), False, 'import unittest\n'), ((1746, 1832), 'miri.datamodels.miri_filters.MiriFilter', 'MiriFilter', ([], {'filter_table': 'self.transmissions', 'filter_name': '"""N/A"""', 'filter_type': '"""N/A"""'}), "(filter_table=self.transmissions, filter_name='N/A', filter_type=\n 'N/A')\n", (1756, 1832), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((2340, 2369), 'os.path.isfile', 'os.path.isfile', (['self.tempfile'], {}), '(self.tempfile)\n', (2354, 2369), False, 'import os\n'), ((3879, 3904), 'numpy.linspace', 'np.linspace', (['(1.0)', '(9.0)', '(50)'], {}), '(1.0, 9.0, 50)\n', (3890, 3904), True, 'import numpy as np\n'), ((3920, 3949), 'numpy.linspace', 'np.linspace', (['(120.0)', '(150.0)', '(50)'], {}), '(120.0, 150.0, 50)\n', (3931, 3949), True, 'import numpy as np\n'), ((5834, 5934), 'miri.datamodels.miri_filters.MiriBandPassFilter', 'MiriBandPassFilter', ([], {'filter_table': 'self.transmissions', 'filter_name': '"""F560W"""', 'wavecent': '(5.6)', 'fwhm': '(1.2)'}), "(filter_table=self.transmissions, filter_name='F560W',\n wavecent=5.6, fwhm=1.2)\n", (5852, 5934), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((6454, 6483), 'os.path.isfile', 'os.path.isfile', (['self.tempfile'], {}), '(self.tempfile)\n', (6468, 6483), False, 'import os\n'), ((8136, 8163), 'numpy.linspace', 'np.linspace', (['(5.85)', '(5.95)', '(50)'], {}), '(5.85, 5.95, 50)\n', (8147, 8163), True, 'import numpy as np\n'), ((8179, 8208), 'numpy.linspace', 'np.linspace', (['(120.0)', '(150.0)', '(50)'], {}), '(120.0, 150.0, 50)\n', (8190, 8208), True, 'import numpy as np\n'), ((9991, 10080), 'miri.datamodels.miri_filters.MiriQuantumEfficiency', 'MiriQuantumEfficiency', ([], {'qe_table': 'self.efficiency', 'detector': '"""MIRIMAGE"""', 'temperature': '(6.7)'}), "(qe_table=self.efficiency, detector='MIRIMAGE',\n temperature=6.7)\n", (10012, 10080), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((10601, 10630), 'os.path.isfile', 'os.path.isfile', (['self.tempfile'], {}), '(self.tempfile)\n', (10615, 10630), False, 'import os\n'), ((3729, 3760), 'numpy.allclose', 'np.allclose', (['(0.5 * flux)', 'result'], {}), '(0.5 * flux, result)\n', (3740, 3760), True, 'import numpy as np\n'), ((4190, 4221), 'numpy.allclose', 'np.allclose', (['(0.5 * flux)', 'result'], {}), '(0.5 * flux, result)\n', (4201, 4221), True, 'import numpy as np\n'), ((4299, 4324), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4322, 4324), False, 'import warnings\n'), ((4338, 4369), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4359, 4369), False, 'import warnings\n'), ((8445, 8470), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8468, 8470), False, 'import warnings\n'), ((8484, 8515), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8505, 8515), False, 'import warnings\n'), ((11921, 11946), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (11944, 11946), False, 'import warnings\n'), ((11960, 11991), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (11981, 11991), False, 'import warnings\n'), ((2404, 2428), 'os.remove', 'os.remove', (['self.tempfile'], {}), '(self.tempfile)\n', (2413, 2428), False, 'import os\n'), ((4588, 4613), 'miri.datamodels.miri_filters.MiriFilter', 'MiriFilter', (['self.tempfile'], {}), '(self.tempfile)\n', (4598, 4613), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((4643, 4730), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.filt', 'readback'], {'arrays': '[]', 'tables': '"""filter_table"""'}), "(self, self.filt, readback, arrays=[], tables=\n 'filter_table')\n", (4664, 4730), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((6518, 6542), 'os.remove', 'os.remove', (['self.tempfile'], {}), '(self.tempfile)\n', (6527, 6542), False, 'import os\n'), ((8734, 8767), 'miri.datamodels.miri_filters.MiriBandPassFilter', 'MiriBandPassFilter', (['self.tempfile'], {}), '(self.tempfile)\n', (8752, 8767), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((8797, 8884), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.filt', 'readback'], {'arrays': '[]', 'tables': '"""filter_table"""'}), "(self, self.filt, readback, arrays=[], tables=\n 'filter_table')\n", (8818, 8884), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((10665, 10689), 'os.remove', 'os.remove', (['self.tempfile'], {}), '(self.tempfile)\n', (10674, 10689), False, 'import os\n'), ((12210, 12243), 'miri.datamodels.miri_filters.MiriBandPassFilter', 'MiriBandPassFilter', (['self.tempfile'], {}), '(self.tempfile)\n', (12228, 12243), False, 'from miri.datamodels.miri_filters import MiriFilter, MiriBandPassFilter, MiriQuantumEfficiency\n'), ((12273, 12360), 'miri.datamodels.tests.util.assert_products_equal', 'assert_products_equal', (['self', 'self.filt', 'readback'], {'arrays': '[]', 'tables': '"""filter_table"""'}), "(self, self.filt, readback, arrays=[], tables=\n 'filter_table')\n", (12294, 12360), False, 'from miri.datamodels.tests.util import assert_recarray_equal, assert_products_equal\n'), ((2600, 2619), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (2613, 2619), False, 'import warnings\n'), ((6714, 6733), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (6727, 6733), False, 'import warnings\n'), ((10861, 10880), 'warnings.warn', 'warnings.warn', (['strg'], {}), '(strg)\n', (10874, 10880), False, 'import warnings\n')] |
import pyrealsense2 as rs
import numpy as np
import cv2
import os
def image_file_counter(path):
files = 0
for _, _, filenames in os.walk(path):
files += len(filenames)
return files + 1
def spatial_filtering(depth_frame, magnitude=2, alpha=0.5, delta=20, holes_fill=0):
spatial = rs.spatial_filter()
spatial.set_option(rs.option.filter_magnitude, magnitude)
spatial.set_option(rs.option.filter_smooth_alpha, alpha)
spatial.set_option(rs.option.filter_smooth_delta, delta)
spatial.set_option(rs.option.holes_fill, holes_fill)
depth_frame = spatial.process(depth_frame)
return depth_frame
def hole_filling(depth_frame):
hole_filling = rs.hole_filling_filter()
depth_frame = hole_filling.process(depth_frame)
return depth_frame
# define global variables
# ========================
# file names and paths
rgb_img_path = 'captured_images/rgb_image/'
depth_img_path = 'captured_images/depth_image/'
colored_depth_img_path = 'captured_images/coloured_depth_image/'
intrinsics = True
rotate_camera = False
if __name__ == "__main__":
# ========================
# 1. Configure all streams
# ========================
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# ======================
# 2. Start the streaming
# ======================
print("Starting up the Intel Realsense D435...")
print("")
profile = pipeline.start(config)
# =================================
# 3. The depth sensor's depth scale
# =================================
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale)
print("")
# ==========================================
# 4. Create an align object.
# Align the depth image to the rgb image.
# ==========================================
align_to = rs.stream.color
align = rs.align(align_to)
try:
# ===========================================
# 5. Skip the first 30 frames.
# This gives the Auto-Exposure time to adjust
# ===========================================
for x in range(30):
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
print("Intel Realsense D435 started successfully.")
print("")
while True:
# ======================================
# 6. Wait for a coherent pair of frames:
# ======================================
frames = pipeline.wait_for_frames()
# =======================================
# 7. Align the depth frame to color frame
# =======================================
aligned_frames = align.process(frames)
# ================================================
# 8. Fetch the depth and colour frames from stream
# ================================================
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# print the camera intrinsics just once. it is always the same
if intrinsics:
print("Intel Realsense D435 Camera Intrinsics: ")
print("========================================")
print(depth_frame.profile.as_video_stream_profile().intrinsics)
print(color_frame.profile.as_video_stream_profile().intrinsics)
print("")
intrinsics = False
# =====================================
# 9. Apply filtering to the depth image
# =====================================
# Apply a spatial filter without hole_filling (i.e. holes_fill=0)
depth_frame = spatial_filtering(depth_frame, magnitude=2, alpha=0.5, delta=50, holes_fill=0)
# Apply hole filling filter
depth_frame = hole_filling(depth_frame)
# ===========================
# 10. colourise the depth map
# ===========================
depth_color_frame = rs.colorizer().colorize(depth_frame)
# ==================================
# 11. Convert images to numpy arrays
# ==================================
depth_image = np.array(depth_frame.get_data())
depth_color_image = np.asanyarray(depth_color_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# ======================================================================
# 12. Only rotate the images if the realsense camera is placed vertical.
# Otherwise set the variable "rotate_camera = False"
# ======================================================================
if rotate_camera:
depth_image = np.rot90(depth_image)
depth_color_image = np.rot90(depth_color_image)
color_image = np.rot90(color_image)
# Stack rgb and depth map images horizontally for visualisation only
images = np.hstack((color_image, depth_color_image))
# Show horizontally stacked rgb and depth map images
cv2.namedWindow('RGB and Depth Map Images')
cv2.imshow('RGB and Depth Map Images', images)
c = cv2.waitKey(1)
# =============================================
# If the 's' key is pressed, we save the images
# =============================================
if c == ord('s'):
img_counter = image_file_counter(rgb_img_path)
'''create a stream folders'''
if not os.path.exists(rgb_img_path):
os.makedirs(rgb_img_path)
if not os.path.exists(depth_img_path):
os.makedirs(depth_img_path)
if not os.path.exists(colored_depth_img_path):
os.makedirs(colored_depth_img_path)
filename = str(img_counter) + '.png'
filename_raw = str(img_counter) + '.raw'
# save the rgb colour image
cv2.imwrite(os.path.join(rgb_img_path, filename), color_image)
# Save the depth image in raw binary format uint16.
f = open(os.path.join(depth_img_path, filename_raw), mode='wb')
depth_image.tofile(f)
cv2.imwrite(os.path.join(colored_depth_img_path, filename), depth_color_image)
print('images have been successfully saved')
elif c == 27: # esc to exit
break
finally:
# Stop streaming
pipeline.stop() | [
"os.makedirs",
"pyrealsense2.pipeline",
"cv2.waitKey",
"pyrealsense2.spatial_filter",
"os.walk",
"os.path.exists",
"pyrealsense2.align",
"pyrealsense2.colorizer",
"pyrealsense2.config",
"numpy.hstack",
"numpy.rot90",
"pyrealsense2.hole_filling_filter",
"cv2.imshow",
"os.path.join",
"cv2.... | [((139, 152), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (146, 152), False, 'import os\n'), ((307, 326), 'pyrealsense2.spatial_filter', 'rs.spatial_filter', ([], {}), '()\n', (324, 326), True, 'import pyrealsense2 as rs\n'), ((690, 714), 'pyrealsense2.hole_filling_filter', 'rs.hole_filling_filter', ([], {}), '()\n', (712, 714), True, 'import pyrealsense2 as rs\n'), ((1205, 1218), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (1216, 1218), True, 'import pyrealsense2 as rs\n'), ((1232, 1243), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (1241, 1243), True, 'import pyrealsense2 as rs\n'), ((2091, 2109), 'pyrealsense2.align', 'rs.align', (['align_to'], {}), '(align_to)\n', (2099, 2109), True, 'import pyrealsense2 as rs\n'), ((5457, 5500), 'numpy.hstack', 'np.hstack', (['(color_image, depth_color_image)'], {}), '((color_image, depth_color_image))\n', (5466, 5500), True, 'import numpy as np\n'), ((5579, 5622), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RGB and Depth Map Images"""'], {}), "('RGB and Depth Map Images')\n", (5594, 5622), False, 'import cv2\n'), ((5635, 5681), 'cv2.imshow', 'cv2.imshow', (['"""RGB and Depth Map Images"""', 'images'], {}), "('RGB and Depth Map Images', images)\n", (5645, 5681), False, 'import cv2\n'), ((5698, 5712), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5709, 5712), False, 'import cv2\n'), ((5216, 5237), 'numpy.rot90', 'np.rot90', (['depth_image'], {}), '(depth_image)\n', (5224, 5237), True, 'import numpy as np\n'), ((5274, 5301), 'numpy.rot90', 'np.rot90', (['depth_color_image'], {}), '(depth_color_image)\n', (5282, 5301), True, 'import numpy as np\n'), ((5332, 5353), 'numpy.rot90', 'np.rot90', (['color_image'], {}), '(color_image)\n', (5340, 5353), True, 'import numpy as np\n'), ((4451, 4465), 'pyrealsense2.colorizer', 'rs.colorizer', ([], {}), '()\n', (4463, 4465), True, 'import pyrealsense2 as rs\n'), ((6057, 6085), 'os.path.exists', 'os.path.exists', (['rgb_img_path'], {}), '(rgb_img_path)\n', (6071, 6085), False, 'import os\n'), ((6107, 6132), 'os.makedirs', 'os.makedirs', (['rgb_img_path'], {}), '(rgb_img_path)\n', (6118, 6132), False, 'import os\n'), ((6156, 6186), 'os.path.exists', 'os.path.exists', (['depth_img_path'], {}), '(depth_img_path)\n', (6170, 6186), False, 'import os\n'), ((6208, 6235), 'os.makedirs', 'os.makedirs', (['depth_img_path'], {}), '(depth_img_path)\n', (6219, 6235), False, 'import os\n'), ((6259, 6297), 'os.path.exists', 'os.path.exists', (['colored_depth_img_path'], {}), '(colored_depth_img_path)\n', (6273, 6297), False, 'import os\n'), ((6319, 6354), 'os.makedirs', 'os.makedirs', (['colored_depth_img_path'], {}), '(colored_depth_img_path)\n', (6330, 6354), False, 'import os\n'), ((6538, 6574), 'os.path.join', 'os.path.join', (['rgb_img_path', 'filename'], {}), '(rgb_img_path, filename)\n', (6550, 6574), False, 'import os\n'), ((6682, 6724), 'os.path.join', 'os.path.join', (['depth_img_path', 'filename_raw'], {}), '(depth_img_path, filename_raw)\n', (6694, 6724), False, 'import os\n'), ((6803, 6849), 'os.path.join', 'os.path.join', (['colored_depth_img_path', 'filename'], {}), '(colored_depth_img_path, filename)\n', (6815, 6849), False, 'import os\n')] |
#!/usr/bin/env python
"""Executable for generating depth images from a stereo pair."""
import argparse
import cv2
import logging as log
import numpy as np
from scipy import signal
from tqdm import tqdm
from clubs_dataset_tools.stereo_matching import (rectify_images, stereo_match,
StereoMatchingParams)
from clubs_dataset_tools.filesystem_tools import (
read_images, find_files_with_extension_in_folder, find_all_folders,
find_ir_image_folders, compare_image_names, create_stereo_depth_folder,
create_rectified_images_folder)
from clubs_dataset_tools.common import (CalibrationParams)
def compute_stereo_depth(scene_folder,
sensor_folder,
stereo_params,
calib_params,
save_rectified=False):
"""Rectify an image and apply a SGBM algorithm to compute the depth image.
Args:
scene_folder (str): Path to the scene folder.
sensor_folder (list(str)): List containing folder names for left
and right IR image, as well as the sensor root folder.
stereo_params (StereoMatchingParams): Parameters for stereo matching.
calib_params (CalibrationParams): Calibration parameters from the
camera.
save_rectified (bool, optional): If set to True, rectified images are
saved. Defaults to False.
"""
images_left = find_files_with_extension_in_folder(scene_folder +
sensor_folder[0])
images_right = find_files_with_extension_in_folder(scene_folder +
sensor_folder[1])
timestamps = compare_image_names(images_left, images_right)
if len(timestamps) != 0:
image_paths_left = [
scene_folder + sensor_folder[0] + '/' + image_left
for image_left in images_left
]
image_paths_right = [
scene_folder + sensor_folder[1] + '/' + image_right
for image_right in images_right
]
ir_left = read_images(image_paths_left)
ir_right = read_images(image_paths_right)
stereo_depth_folder = create_stereo_depth_folder(scene_folder +
sensor_folder[2])
if save_rectified:
rectified_images_folder = create_rectified_images_folder(
scene_folder + sensor_folder[2])
stereo_bar = tqdm(total=len(ir_left), desc="Stereo Matching Progress")
for i in range(len(ir_left)):
log.debug("Rectifying " + str(i) + ". image pair")
(rectified_l, rectified_r, disparity_to_depth_map,
rotation_matrix_left, new_calibration_left) = rectify_images(
ir_left[i], calib_params.ir1_intrinsics,
calib_params.ir1_distortion_coeffs, ir_right[i],
calib_params.ir2_intrinsics,
calib_params.ir2_distortion_coeffs, calib_params.extrinsics_r,
calib_params.extrinsics_t)
if save_rectified:
cv2.imwrite(
rectified_images_folder + '/' + timestamps[i] +
'_rect_l.png', rectified_l)
cv2.imwrite(
rectified_images_folder + '/' + timestamps[i] +
'_rect_r.png', rectified_r)
log.debug("Stereo matching " + str(i) + '. image pair')
depth_scale = 1000 / calib_params.depth_scale
depth_uint, depth_float, disparity_float = stereo_match(
rectified_l, rectified_r, calib_params.extrinsics_t[0],
new_calibration_left[0, 0], stereo_params, sensor_folder[2][1:],
depth_scale)
zero_distortion = np.array([0, 0, 0, 0, 0])
map_l1, map_l2 = cv2.initUndistortRectifyMap(
new_calibration_left[:3, :3], zero_distortion,
np.linalg.inv(rotation_matrix_left),
new_calibration_left[:3, :3], depth_float.shape[::-1],
cv2.CV_16SC2)
depth_float = cv2.remap(depth_float, map_l1, map_l2,
cv2.INTER_LINEAR)
if stereo_params.use_median_filter:
depth_float = signal.medfilt2d(depth_float,
stereo_params.median_filter_size)
depth_uint = depth_float * depth_scale
depth_uint = depth_uint.astype(np.uint16)
cv2.imwrite(
stereo_depth_folder + '/' + timestamps[i] + '_stereo_depth.png',
depth_uint)
stereo_bar.update()
stereo_bar.close()
else:
log.error("\nImage names are not consistent for left and right image.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
"Perform stereo matching for the infrared images and save it as a "
"depth image. There are two different ways this function can be "
"called. First one is by passing in the dataset root folder "
"(flag --dataset_folder) which will create a new folder for each "
"object/box scene and each sensor (d415 and d435), containing the "
"depth image obtained through stereo matching. A second way is to "
"pass object/box scene root folder (flag --scene_folder) which "
"will do the same for that specific scene."))
parser.add_argument(
'--dataset_folder', type=str, help="Path to the dataset root folder.")
parser.add_argument(
'--scene_folder', type=str, help="Path to the scene root folder.")
parser.add_argument(
'--d415_calib_file',
type=str,
default='config/realsense_d415_stereo_depth.yaml',
help=("Path to RealSense D415 calibration yaml file. Defaults to "
"config/realsense_d415_stereo_depth.yaml"))
parser.add_argument(
'--d435_calib_file',
type=str,
default='config/realsense_d435_stereo_depth.yaml',
help=("Path to RealSense D435 calibration yaml file. Defaults to "
"config/realsense_d435_stereo_depth.yaml"))
parser.add_argument(
'--stereo_params_file',
type=str,
default='config/default_stereo_params.yaml',
help=("Path to stereo parameters yaml file. Defaults to "
"config/default_stereo_params.yaml"))
parser.add_argument(
'--use_only_boxes',
action='store_true',
help=("If this flag is set, depth from stereo will only be computed "
"for the box scenes."))
parser.add_argument(
'--save_rectified',
action='store_true',
help=("If this flag is set, rectified stereo images will be saved."))
parser.add_argument(
'--log',
type=str,
default='CRITICAL',
help=("Logging verbosity (DEBUG, INFO, WARNING, ERROR, CRITICAL)."
"Defaults to CRITICAL."))
args = parser.parse_args()
numeric_level = getattr(log, args.log.upper(), None)
log.basicConfig(level=numeric_level)
log.debug("Setting log verbosity to " + args.log)
used_scenes = []
stereo_params = StereoMatchingParams()
stereo_params.read_from_yaml(args.stereo_params_file)
calib_params = CalibrationParams()
if args.dataset_folder is not None:
log.debug("Received dataset_folder.")
object_scenes, box_scenes = find_all_folders(args.dataset_folder)
if args.use_only_boxes is True:
log.debug("Processing only box scenes.")
used_scenes = box_scenes
else:
log.debug("Processing both box and object scenes.")
used_scenes = object_scenes + box_scenes
progress_bar = tqdm(total=len(used_scenes) * 2, desc="Overall Progress")
for i in range(len(used_scenes)):
scene = used_scenes[i]
log.debug("Processing " + str(scene))
d415_folder, d435_folder = find_ir_image_folders(scene)
calib_params.read_from_yaml(args.d415_calib_file)
if d415_folder != []:
compute_stereo_depth(scene, d415_folder, stereo_params,
calib_params, args.save_rectified)
progress_bar.update()
calib_params.read_from_yaml(args.d435_calib_file)
if d435_folder != []:
compute_stereo_depth(scene, d435_folder, stereo_params,
calib_params, args.save_rectified)
progress_bar.update()
progress_bar.close()
elif args.scene_folder is not None:
log.debug("Processing single scene " + str(args.scene_folder))
d415_folder, d435_folder = find_ir_image_folders(args.scene_folder)
calib_params.read_from_yaml(args.d415_calib_file)
if d415_folder != []:
compute_stereo_depth(args.scene_folder, d415_folder, stereo_params,
calib_params, args.save_rectified)
calib_params.read_from_yaml(args.d435_calib_file)
if d435_folder != []:
compute_stereo_depth(args.scene_folder, d435_folder, stereo_params,
calib_params, args.save_rectified)
else:
parser.print_help()
| [
"argparse.ArgumentParser",
"clubs_dataset_tools.common.CalibrationParams",
"clubs_dataset_tools.filesystem_tools.create_stereo_depth_folder",
"clubs_dataset_tools.filesystem_tools.find_all_folders",
"cv2.remap",
"clubs_dataset_tools.stereo_matching.rectify_images",
"logging.error",
"clubs_dataset_tool... | [((1455, 1523), 'clubs_dataset_tools.filesystem_tools.find_files_with_extension_in_folder', 'find_files_with_extension_in_folder', (['(scene_folder + sensor_folder[0])'], {}), '(scene_folder + sensor_folder[0])\n', (1490, 1523), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((1597, 1665), 'clubs_dataset_tools.filesystem_tools.find_files_with_extension_in_folder', 'find_files_with_extension_in_folder', (['(scene_folder + sensor_folder[1])'], {}), '(scene_folder + sensor_folder[1])\n', (1632, 1665), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((1739, 1785), 'clubs_dataset_tools.filesystem_tools.compare_image_names', 'compare_image_names', (['images_left', 'images_right'], {}), '(images_left, images_right)\n', (1758, 1785), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((4896, 5429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform stereo matching for the infrared images and save it as a depth image. There are two different ways this function can be called. First one is by passing in the dataset root folder (flag --dataset_folder) which will create a new folder for each object/box scene and each sensor (d415 and d435), containing the depth image obtained through stereo matching. A second way is to pass object/box scene root folder (flag --scene_folder) which will do the same for that specific scene."""'}), "(description=\n 'Perform stereo matching for the infrared images and save it as a depth image. There are two different ways this function can be called. First one is by passing in the dataset root folder (flag --dataset_folder) which will create a new folder for each object/box scene and each sensor (d415 and d435), containing the depth image obtained through stereo matching. A second way is to pass object/box scene root folder (flag --scene_folder) which will do the same for that specific scene.'\n )\n", (4919, 5429), False, 'import argparse\n'), ((7181, 7217), 'logging.basicConfig', 'log.basicConfig', ([], {'level': 'numeric_level'}), '(level=numeric_level)\n', (7196, 7217), True, 'import logging as log\n'), ((7222, 7271), 'logging.debug', 'log.debug', (["('Setting log verbosity to ' + args.log)"], {}), "('Setting log verbosity to ' + args.log)\n", (7231, 7271), True, 'import logging as log\n'), ((7315, 7337), 'clubs_dataset_tools.stereo_matching.StereoMatchingParams', 'StereoMatchingParams', ([], {}), '()\n', (7335, 7337), False, 'from clubs_dataset_tools.stereo_matching import rectify_images, stereo_match, StereoMatchingParams\n'), ((7415, 7434), 'clubs_dataset_tools.common.CalibrationParams', 'CalibrationParams', ([], {}), '()\n', (7432, 7434), False, 'from clubs_dataset_tools.common import CalibrationParams\n'), ((2127, 2156), 'clubs_dataset_tools.filesystem_tools.read_images', 'read_images', (['image_paths_left'], {}), '(image_paths_left)\n', (2138, 2156), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((2176, 2206), 'clubs_dataset_tools.filesystem_tools.read_images', 'read_images', (['image_paths_right'], {}), '(image_paths_right)\n', (2187, 2206), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((2238, 2297), 'clubs_dataset_tools.filesystem_tools.create_stereo_depth_folder', 'create_stereo_depth_folder', (['(scene_folder + sensor_folder[2])'], {}), '(scene_folder + sensor_folder[2])\n', (2264, 2297), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((4782, 4856), 'logging.error', 'log.error', (['"""\nImage names are not consistent for left and right image."""'], {}), '("""\nImage names are not consistent for left and right image.""")\n', (4791, 4856), True, 'import logging as log\n'), ((7484, 7521), 'logging.debug', 'log.debug', (['"""Received dataset_folder."""'], {}), "('Received dataset_folder.')\n", (7493, 7521), True, 'import logging as log\n'), ((7558, 7595), 'clubs_dataset_tools.filesystem_tools.find_all_folders', 'find_all_folders', (['args.dataset_folder'], {}), '(args.dataset_folder)\n', (7574, 7595), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((2421, 2484), 'clubs_dataset_tools.filesystem_tools.create_rectified_images_folder', 'create_rectified_images_folder', (['(scene_folder + sensor_folder[2])'], {}), '(scene_folder + sensor_folder[2])\n', (2451, 2484), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((2805, 3041), 'clubs_dataset_tools.stereo_matching.rectify_images', 'rectify_images', (['ir_left[i]', 'calib_params.ir1_intrinsics', 'calib_params.ir1_distortion_coeffs', 'ir_right[i]', 'calib_params.ir2_intrinsics', 'calib_params.ir2_distortion_coeffs', 'calib_params.extrinsics_r', 'calib_params.extrinsics_t'], {}), '(ir_left[i], calib_params.ir1_intrinsics, calib_params.\n ir1_distortion_coeffs, ir_right[i], calib_params.ir2_intrinsics,\n calib_params.ir2_distortion_coeffs, calib_params.extrinsics_r,\n calib_params.extrinsics_t)\n', (2819, 3041), False, 'from clubs_dataset_tools.stereo_matching import rectify_images, stereo_match, StereoMatchingParams\n'), ((3619, 3773), 'clubs_dataset_tools.stereo_matching.stereo_match', 'stereo_match', (['rectified_l', 'rectified_r', 'calib_params.extrinsics_t[0]', 'new_calibration_left[0, 0]', 'stereo_params', 'sensor_folder[2][1:]', 'depth_scale'], {}), '(rectified_l, rectified_r, calib_params.extrinsics_t[0],\n new_calibration_left[0, 0], stereo_params, sensor_folder[2][1:],\n depth_scale)\n', (3631, 3773), False, 'from clubs_dataset_tools.stereo_matching import rectify_images, stereo_match, StereoMatchingParams\n'), ((3846, 3871), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (3854, 3871), True, 'import numpy as np\n'), ((4173, 4229), 'cv2.remap', 'cv2.remap', (['depth_float', 'map_l1', 'map_l2', 'cv2.INTER_LINEAR'], {}), '(depth_float, map_l1, map_l2, cv2.INTER_LINEAR)\n', (4182, 4229), False, 'import cv2\n'), ((4583, 4675), 'cv2.imwrite', 'cv2.imwrite', (["(stereo_depth_folder + '/' + timestamps[i] + '_stereo_depth.png')", 'depth_uint'], {}), "(stereo_depth_folder + '/' + timestamps[i] + '_stereo_depth.png',\n depth_uint)\n", (4594, 4675), False, 'import cv2\n'), ((7649, 7689), 'logging.debug', 'log.debug', (['"""Processing only box scenes."""'], {}), "('Processing only box scenes.')\n", (7658, 7689), True, 'import logging as log\n'), ((7753, 7804), 'logging.debug', 'log.debug', (['"""Processing both box and object scenes."""'], {}), "('Processing both box and object scenes.')\n", (7762, 7804), True, 'import logging as log\n'), ((8106, 8134), 'clubs_dataset_tools.filesystem_tools.find_ir_image_folders', 'find_ir_image_folders', (['scene'], {}), '(scene)\n', (8127, 8134), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((8860, 8900), 'clubs_dataset_tools.filesystem_tools.find_ir_image_folders', 'find_ir_image_folders', (['args.scene_folder'], {}), '(args.scene_folder)\n', (8881, 8900), False, 'from clubs_dataset_tools.filesystem_tools import read_images, find_files_with_extension_in_folder, find_all_folders, find_ir_image_folders, compare_image_names, create_stereo_depth_folder, create_rectified_images_folder\n'), ((3163, 3254), 'cv2.imwrite', 'cv2.imwrite', (["(rectified_images_folder + '/' + timestamps[i] + '_rect_l.png')", 'rectified_l'], {}), "(rectified_images_folder + '/' + timestamps[i] + '_rect_l.png',\n rectified_l)\n", (3174, 3254), False, 'import cv2\n'), ((3308, 3399), 'cv2.imwrite', 'cv2.imwrite', (["(rectified_images_folder + '/' + timestamps[i] + '_rect_r.png')", 'rectified_r'], {}), "(rectified_images_folder + '/' + timestamps[i] + '_rect_r.png',\n rectified_r)\n", (3319, 3399), False, 'import cv2\n'), ((4009, 4044), 'numpy.linalg.inv', 'np.linalg.inv', (['rotation_matrix_left'], {}), '(rotation_matrix_left)\n', (4022, 4044), True, 'import numpy as np\n'), ((4346, 4409), 'scipy.signal.medfilt2d', 'signal.medfilt2d', (['depth_float', 'stereo_params.median_filter_size'], {}), '(depth_float, stereo_params.median_filter_size)\n', (4362, 4409), False, 'from scipy import signal\n')] |
from sklearn.externals import joblib
from collections import defaultdict, Counter
import gzip
import sys
import string
import numpy as np
def CreateXid(dict_in, limit):
word2id, id2word = {}, {}
word2id["PAD"] = 0
id2word[0] = "PAD"
word2id["<BOS>"] = 1
id2word[1] = "<BOS>"
word2id["<EOS>"] = 2
id2word[2] = "<EOS>"
word2id["unk"] = 3
id2word[3] = "unk"
i = 4
for key, value in dict_in.items():
if value > limit:
word2id[key] = i
id2word[i] = key
i += 1
return word2id, id2word
def load_word2vec(file_path):
word2vec = {}
with open(file_path) as lines:
for line in lines:
split = line.split(" ")
word = split[0]
vector_strings = split[1:]
vector = [float(num) for num in vector_strings]
word2vec[word] = np.array(vector)
return word2vec
def create_id2vec(id2word, word2vec):
unk_vec = word2vec["unk"]
dim_of_vector = len(unk_vec)
num_of_tokens = max(list(id2word.keys()))
id2vec = np.zeros((num_of_tokens + 1, dim_of_vector), dtype=np.float16)
for id, word in id2word.items():
if word != 'PAD':
id2vec[id, :] = word2vec[word] if word in word2vec else unk_vec
return id2vec
def collect_tokens(filename):
token_list = []
with gzip.open(filename, 'rt') as file:
for line in file:
a = line.split()
for word in a:
if "_" in word:
token_list += word.split("_")
else:
token_list.append(word.translate(str.maketrans('', '', string.punctuation)))
return token_list
if __name__ == '__main__':
PATH = sys.argv[1]
glove_path = sys.argv[2]
save_location_path = sys.argv[3]
limit = eval(sys.argv[4])
token_list = []
token_list += collect_tokens(PATH)
token_count = Counter(token_list)
word2id_, id2word_ = CreateXid(token_count, limit=limit)
word2vec_ = load_word2vec(glove_path)
id2vec = create_id2vec(id2word_, word2vec_)
print(id2vec.shape)
dict_data = {"id2vec": id2vec, "word2id": word2id_, "id2word": id2word_}
joblib.dump(dict_data, save_location_path)
| [
"sklearn.externals.joblib.dump",
"gzip.open",
"numpy.zeros",
"numpy.array",
"collections.Counter"
] | [((1119, 1181), 'numpy.zeros', 'np.zeros', (['(num_of_tokens + 1, dim_of_vector)'], {'dtype': 'np.float16'}), '((num_of_tokens + 1, dim_of_vector), dtype=np.float16)\n', (1127, 1181), True, 'import numpy as np\n'), ((1992, 2011), 'collections.Counter', 'Counter', (['token_list'], {}), '(token_list)\n', (1999, 2011), False, 'from collections import defaultdict, Counter\n'), ((2274, 2316), 'sklearn.externals.joblib.dump', 'joblib.dump', (['dict_data', 'save_location_path'], {}), '(dict_data, save_location_path)\n', (2285, 2316), False, 'from sklearn.externals import joblib\n'), ((1409, 1434), 'gzip.open', 'gzip.open', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (1418, 1434), False, 'import gzip\n'), ((912, 928), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (920, 928), True, 'import numpy as np\n')] |
from __future__ import with_statement
from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger
import time
import numpy as np
import katpoint
targets = {'ant1' : (25.119, -8.944, 0.083),
'ant2' : (90.315, 26.648, -0.067),
'ant3' : (3.989, 26.925, -0.006),
'ant4' : (-21.600, 25.500, 0.000),
# 'ant1' : (18.4, -8.7, 0),
# 'ant2' : (86.2, 25.5, 0),
# 'ant3' : (3.2, 27.3, 0),
# 'ant4' : (-21.6, 25.5, 0),
'ant5' : (-37.5, -1.3, 0),
'ant6' : (-61.5, -78.0, 0),
'ant7' : (-87.8, 76.3, 0),
'asc' : (57, -27, 0),
'12m' : (45, -43, 0),
# 'asc' : (46, -27, 0),
# '12m' : (33, -43, 0),
'minister' : (40., -40., 0),
'origin' : (63.7, -32.9, 0)}
def track(ants,targets,duration=10):
# send this target to the antenna.
for target,ant_x in zip(targets,ants):
ant_x.req.target(target)
ant_x.req.mode("POINT")
user_logger.info("Slewing %s to target : %s"%(ant_x.name,target,))
# wait for antennas to lock onto target
locks = 0
for ant_x in ants:
if ant_x.wait("lock", True, 300): locks += 1
if len(ants) == locks:
user_logger.info("Tracking Target : %s for %s seconds"%(target,str(duration)))
time.sleep(duration)
user_logger.info("Target tracked : %s "%(target,))
return True
else:
user_logger.warning("Unable to track Targe : %s "%(target,))
return False
def enu_to_azel(e, n, u):
"""Convert vector in ENU coordinates to (az, el) spherical coordinates.
This converts a vector in the local east-north-up (ENU) coordinate system to
the corresponding horizontal spherical coordinates (azimuth and elevation
angle). The ENU coordinates can be in any unit, as the vector length will be
normalised in the conversion process.
Parameters
----------
e, n, u : float or array
East, North, Up coordinates (any unit)
Returns
-------
az_rad, el_rad : float or array
Azimuth and elevation angle, in radians
"""
return np.arctan2(e, n), np.arctan2(u, np.sqrt(e * e + n * n))
# Parse command-line options that allow the defaults to be overridden
parser = standard_script_options(usage="%prog [options] <target>",
description="Point dishes at the given target and record data.")
# Generic options
parser.add_option('-m', '--max-duration', dest='max_duration', type="float", default=60.0,
help='Duration to run experiment, in seconds (default=%default)')
parser.set_defaults(description='Point to enu')
(opts, args) = parser.parse_args()
if len(args) == 0:
raise ValueError("Please specify one target argument (via name or coords, e.g. 'ant1' or '(50,50,0)')")
elif len(args) > 1:
raise ValueError("Please specify only one target argument (if using coords, don't include spaces, e.g. use '(50,50,0)')")
target = args[0]
if target in targets:
target_enu = targets[target]
else:
try:
target_enu = tuple(float(coord) for coord in target.strip('\n\t ()[]').split(','))
target = target.replace(',', '/')
except ValueError:
raise ValueError("Unknown target '%s', should be one of %s" % (target, targets.keys()))
if len(target_enu) != 3:
raise ValueError("Please provide 3 coordinates (east, north, up)")
# Various non-optional options...
if opts.description is 'point to enu':
opts.description = "Data recorded while pointing at '%s'" % target
with verify_and_connect(opts) as kat:
kat.ants.req.sensor_sampling("lock","event")
with start_session(kat, **vars(opts)) as session:
session.standard_setup(**vars(opts))
session.capture_start()
session.label('track')
session.ants.req.drive_strategy('shortest-slew')
session.ants.req.sensor_sampling("lock","event")
target_list = []
for ant in session.ants:
antenna = katpoint.Antenna(ant.sensor.observer.get_value())
enu = np.asarray(target_enu) - np.asarray(antenna.position_enu)
if np.all(enu == 0):
enu = np.array([0, 0, 1])
az, el = enu_to_azel(*enu)
az, el = katpoint.rad2deg(az), katpoint.rad2deg(el)
# Go to nearest point on horizon if target is below elevation limit
el = max(el, 3.0)
target_description = "%s, azel, %f, %f" % (target, az, el)
target_list.append(target_description)
track(session.ants,target_list,duration=opts.max_duration)
| [
"katpoint.rad2deg",
"numpy.arctan2",
"numpy.asarray",
"katcorelib.user_logger.info",
"time.sleep",
"katcorelib.standard_script_options",
"numpy.array",
"numpy.sqrt",
"katcorelib.verify_and_connect",
"numpy.all",
"katcorelib.user_logger.warning"
] | [((2321, 2448), 'katcorelib.standard_script_options', 'standard_script_options', ([], {'usage': '"""%prog [options] <target>"""', 'description': '"""Point dishes at the given target and record data."""'}), "(usage='%prog [options] <target>', description=\n 'Point dishes at the given target and record data.')\n", (2344, 2448), False, 'from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger\n'), ((3614, 3638), 'katcorelib.verify_and_connect', 'verify_and_connect', (['opts'], {}), '(opts)\n', (3632, 3638), False, 'from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger\n'), ((1039, 1107), 'katcorelib.user_logger.info', 'user_logger.info', (["('Slewing %s to target : %s' % (ant_x.name, target))"], {}), "('Slewing %s to target : %s' % (ant_x.name, target))\n", (1055, 1107), False, 'from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger\n'), ((1362, 1382), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (1372, 1382), False, 'import time\n'), ((1391, 1443), 'katcorelib.user_logger.info', 'user_logger.info', (["('Target tracked : %s ' % (target,))"], {}), "('Target tracked : %s ' % (target,))\n", (1407, 1443), False, 'from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger\n'), ((1480, 1542), 'katcorelib.user_logger.warning', 'user_logger.warning', (["('Unable to track Targe : %s ' % (target,))"], {}), "('Unable to track Targe : %s ' % (target,))\n", (1499, 1542), False, 'from katcorelib import standard_script_options, verify_and_connect, start_session, user_logger\n'), ((2185, 2201), 'numpy.arctan2', 'np.arctan2', (['e', 'n'], {}), '(e, n)\n', (2195, 2201), True, 'import numpy as np\n'), ((2217, 2239), 'numpy.sqrt', 'np.sqrt', (['(e * e + n * n)'], {}), '(e * e + n * n)\n', (2224, 2239), True, 'import numpy as np\n'), ((4193, 4209), 'numpy.all', 'np.all', (['(enu == 0)'], {}), '(enu == 0)\n', (4199, 4209), True, 'import numpy as np\n'), ((4120, 4142), 'numpy.asarray', 'np.asarray', (['target_enu'], {}), '(target_enu)\n', (4130, 4142), True, 'import numpy as np\n'), ((4145, 4177), 'numpy.asarray', 'np.asarray', (['antenna.position_enu'], {}), '(antenna.position_enu)\n', (4155, 4177), True, 'import numpy as np\n'), ((4233, 4252), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4241, 4252), True, 'import numpy as np\n'), ((4313, 4333), 'katpoint.rad2deg', 'katpoint.rad2deg', (['az'], {}), '(az)\n', (4329, 4333), False, 'import katpoint\n'), ((4335, 4355), 'katpoint.rad2deg', 'katpoint.rad2deg', (['el'], {}), '(el)\n', (4351, 4355), False, 'import katpoint\n')] |
import numpy as np
from mapel.roommates.models._utils import convert
from mapel.main._utils import *
def generate_roommates_ic_votes(num_agents: int = None):
""" Impartial Culture """
votes = [list(np.random.permutation(num_agents)) for _ in range(num_agents)]
return convert(votes)
def generate_roommates_group_ic_votes(num_agents: int = None, params: dict = None):
""" Impartial Culture with two groups """
if 'proportion' not in params:
params['proportion'] = 0.5
size_1 = int(params['proportion'] * num_agents)
size_2 = int(num_agents - size_1)
votes_1 = [list(np.random.permutation(size_1)) +
list(np.random.permutation([j for j in range(size_1, num_agents)]))
for _ in range(size_1)]
votes_2 = [list(np.random.permutation([j for j in range(size_1, num_agents)])) +
list(np.random.permutation(size_1))
for _ in range(size_2)]
votes = votes_1 + votes_2
return convert(votes)
def generate_roommates_id_votes(num_agents: int = None):
""" One of four extreme points for Compass """
votes = [list(range(num_agents)) for _ in range(num_agents)]
return convert(votes)
def generate_roommates_asymmetric_votes(num_agents: int = None):
""" One of four extreme points for Compass """
votes = [list(range(num_agents)) for _ in range(num_agents)]
votes = [rotate(vote, shift) for shift, vote in enumerate(votes)]
return convert(votes)
def generate_roommates_symmetric_votes(num_agents: int = None):
""" One of four extreme points for Compass """
num_rounds = num_agents - 1
def next(agents):
first = agents[0]
last = agents[-1]
middle = agents[1:-1]
new_agents = [first, last]
new_agents.extend(middle)
return new_agents
agents = [i for i in range(num_agents)]
rounds = []
for _ in range(num_rounds):
pairs = []
for i in range(num_agents // 2):
agent_1 = agents[i]
agent_2 = agents[num_agents - 1 - i]
pairs.append([agent_1, agent_2])
rounds.append(pairs)
agents = next(agents)
votes = np.zeros([num_agents, num_agents - 1], dtype=int)
for pos, partition in enumerate(rounds):
for x, y in partition:
votes[x][pos] = y
votes[y][pos] = x
return votes
def generate_roommates_chaos_votes(num_agents: int = None):
""" One of four extreme points for Compass """
num_rooms = num_agents // 2
matrix = np.zeros([num_agents, num_agents - 1], dtype=int)
matrix[0] = [i for i in range(num_agents - 1)]
for i in range(1, num_agents):
for j in range(num_rooms):
matrix[i][2 * j] = (i + j - 1) % (num_agents - 1)
if j < num_rooms - 1:
matrix[i][2 * j + 1] = (num_rooms + i + j - 1) % (num_agents - 1)
votes = np.zeros([num_agents, num_agents - 1], dtype=int)
for k1 in range(num_agents):
for k2 in range(num_agents - 1):
for i in range(num_agents):
if k1 != i and matrix[i][matrix[k1][k2]] == matrix[k1][k2]:
votes[k1][k2] = i
return votes
# # # # # # # # # # # # # # # #
# LAST CLEANUP ON: 16.03.2022 #
# # # # # # # # # # # # # # # #
| [
"mapel.roommates.models._utils.convert",
"numpy.zeros",
"numpy.random.permutation"
] | [((286, 300), 'mapel.roommates.models._utils.convert', 'convert', (['votes'], {}), '(votes)\n', (293, 300), False, 'from mapel.roommates.models._utils import convert\n'), ((990, 1004), 'mapel.roommates.models._utils.convert', 'convert', (['votes'], {}), '(votes)\n', (997, 1004), False, 'from mapel.roommates.models._utils import convert\n'), ((1193, 1207), 'mapel.roommates.models._utils.convert', 'convert', (['votes'], {}), '(votes)\n', (1200, 1207), False, 'from mapel.roommates.models._utils import convert\n'), ((1474, 1488), 'mapel.roommates.models._utils.convert', 'convert', (['votes'], {}), '(votes)\n', (1481, 1488), False, 'from mapel.roommates.models._utils import convert\n'), ((2191, 2240), 'numpy.zeros', 'np.zeros', (['[num_agents, num_agents - 1]'], {'dtype': 'int'}), '([num_agents, num_agents - 1], dtype=int)\n', (2199, 2240), True, 'import numpy as np\n'), ((2555, 2604), 'numpy.zeros', 'np.zeros', (['[num_agents, num_agents - 1]'], {'dtype': 'int'}), '([num_agents, num_agents - 1], dtype=int)\n', (2563, 2604), True, 'import numpy as np\n'), ((2919, 2968), 'numpy.zeros', 'np.zeros', (['[num_agents, num_agents - 1]'], {'dtype': 'int'}), '([num_agents, num_agents - 1], dtype=int)\n', (2927, 2968), True, 'import numpy as np\n'), ((211, 244), 'numpy.random.permutation', 'np.random.permutation', (['num_agents'], {}), '(num_agents)\n', (232, 244), True, 'import numpy as np\n'), ((616, 645), 'numpy.random.permutation', 'np.random.permutation', (['size_1'], {}), '(size_1)\n', (637, 645), True, 'import numpy as np\n'), ((877, 906), 'numpy.random.permutation', 'np.random.permutation', (['size_1'], {}), '(size_1)\n', (898, 906), True, 'import numpy as np\n')] |
import gym
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torch.optim as optim
import time
from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer
from spinup.utils.logx import EpochLogger
from spinup.utils.run_utils import setup_logger_kwargs
def sac_pytorch(env_fn, hidden_sizes=[256, 256], seed=0,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=3e-4, alpha=0.2, batch_size=256, start_steps=10000,
max_ep_len=1000, save_freq=1, dont_save=True, regularization_weight=1e-3,
logger_kwargs=dict(),):
"""
Largely following OpenAI documentation
But slightly different from tensorflow implementation
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
hidden_sizes: number of entries is number of hidden layers
each entry in this list indicate the size of that hidden layer.
applies to all networks
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch. Note the epoch here is just logging epoch
so every this many steps a logging to stdouot and also output file will happen
note: not to be confused with training epoch which is a term used often in literature for all kinds of
different things
epochs (int): Number of epochs to run and train agent. Usage of this term can be different in different
algorithms, use caution. Here every epoch you get new logs
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration. However during testing the action always come from policy
max_ep_len (int): Maximum length of trajectory / episode / rollout. Environment will get reseted if
timestep in an episode excedding this number
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
logger_kwargs (dict): Keyword args for EpochLogger.
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("running on device:" ,device)
"""set up logger"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
env, test_env = env_fn(), env_fn()
## seed torch and numpy
torch.manual_seed(seed)
np.random.seed(seed)
## seed environment along with env action space so that everything about env is seeded
env.seed(seed)
env.action_space.np_random.seed(seed)
test_env.seed(seed)
test_env.action_space.np_random.seed(seed)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# if environment has a smaller max episode length, then use the environment's max episode length
max_ep_len = env._max_episode_steps if max_ep_len > env._max_episode_steps else max_ep_len
# Action limit for clamping: critically, assumes all dimensions share the same bound!
# we need .item() to convert it from numpy float to python float
act_limit = env.action_space.high[0].item()
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
def test_agent(n=5):
"""
This will test the agent's performance by running n episodes
During the runs, the agent only take deterministic action, so the
actions are not drawn from a distribution, but just use the mean
:param n: number of episodes to run the agent
"""
ep_return_list = np.zeros(n)
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not (d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
a = policy_net.get_env_action(o, deterministic=True)
o, r, d, _ = test_env.step(a)
ep_ret += r
ep_len += 1
ep_return_list[j] = ep_ret
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
"""init all networks"""
# see line 1
policy_net = TanhGaussianPolicy(obs_dim, act_dim, hidden_sizes,action_limit=act_limit).to(device)
value_net = Mlp(obs_dim,1,hidden_sizes).to(device)
target_value_net = Mlp(obs_dim,1,hidden_sizes).to(device)
q1_net = Mlp(obs_dim+act_dim,1,hidden_sizes).to(device)
q2_net = Mlp(obs_dim+act_dim,1,hidden_sizes).to(device)
# see line 2: copy parameters from value_net to target_value_net
target_value_net.load_state_dict(value_net.state_dict())
# set up optimizers
policy_optimizer = optim.Adam(policy_net.parameters(),lr=lr)
value_optimizer = optim.Adam(value_net.parameters(),lr=lr)
q1_optimizer = optim.Adam(q1_net.parameters(),lr=lr)
q2_optimizer = optim.Adam(q2_net.parameters(),lr=lr)
# mean squared error loss for v and q networks
mse_criterion = nn.MSELoss()
# Main loop: collect experience in env and update/log each epoch
# NOTE: t here is the current number of total timesteps used
# it is not the number of timesteps passed in the current episode
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = policy_net.get_env_action(o, deterministic=False)
else:
a = env.action_space.sample()
# Step the env, get next observation, reward and done signal
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience (observation, action, reward, next observation, done) to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
Quoted from the original SAC paper: 'In practice, we take a single environment step
followed by one or several gradient step' after a single environment step,
the number of gradient steps is 1 for SAC. (see paper for reference)
"""
for j in range(ep_len):
# get data from replay buffer
batch = replay_buffer.sample_batch(batch_size)
obs_tensor = Tensor(batch['obs1']).to(device)
obs_next_tensor = Tensor(batch['obs2']).to(device)
acts_tensor = Tensor(batch['acts']).to(device)
# unsqueeze is to make sure rewards and done tensors are of the shape nx1, instead of n
# to prevent problems later
rews_tensor = Tensor(batch['rews']).unsqueeze(1).to(device)
done_tensor = Tensor(batch['done']).unsqueeze(1).to(device)
"""
now we do a SAC update, following the OpenAI spinup doc
check the openai sac document psudocode part for reference
line nubmers indicate lines in psudocode part
we will first compute each of the losses
and then update all the networks in the end
"""
# see line 12: get a_tilda, which is newly sampled action (not action from replay buffer)
a_tilda, mean_a_tilda, log_std_a_tilda, log_prob_a_tilda, _, _ = policy_net.forward(obs_tensor)
"""get q loss"""
# see line 12: first equation
v_from_target_v_net = target_value_net(obs_next_tensor)
y_q = rews_tensor + gamma*(1-done_tensor)*v_from_target_v_net
# see line 13: compute loss for the 2 q networks, note that we want to detach the y_q value
# since we only want to update q networks here, and don't want other gradients
q1_prediction = q1_net(torch.cat([obs_tensor,acts_tensor], 1))
q1_loss = mse_criterion(q1_prediction, y_q.detach())
q2_prediction = q2_net(torch.cat([obs_tensor, acts_tensor], 1))
q2_loss = mse_criterion(q2_prediction, y_q.detach())
"""get v loss"""
# see line 12: second equation
q1_a_tilda = q1_net(torch.cat([obs_tensor,a_tilda],1))
q2_a_tilda = q2_net(torch.cat([obs_tensor,a_tilda],1))
min_q1_q2_a_tilda = torch.min(torch.cat([q1_a_tilda,q2_a_tilda],1),1)[0].reshape(-1,1)
y_v = min_q1_q2_a_tilda - alpha*log_prob_a_tilda
# see line 14: compute loss for value network
v_prediction = value_net(obs_tensor)
v_loss = mse_criterion(v_prediction, y_v.detach())
"""policy loss"""
# line 15: note that here we are doing gradient ascent, so we add a minus sign in the front
policy_loss = - (q1_a_tilda - alpha*log_prob_a_tilda).mean()
"""
add policy regularization loss, this is not in openai's minimal version, but
they are in the original sac code, see https://github.com/vitchyr/rlkit for reference
this part is not necessary but might improve performance
"""
policy_mean_reg_weight = regularization_weight
policy_std_reg_weight = regularization_weight
mean_reg_loss = policy_mean_reg_weight * (mean_a_tilda ** 2).mean()
std_reg_loss = policy_std_reg_weight * (log_std_a_tilda ** 2).mean()
policy_loss = policy_loss + mean_reg_loss + std_reg_loss
"""update networks"""
q1_optimizer.zero_grad()
q1_loss.backward()
q1_optimizer.step()
q2_optimizer.zero_grad()
q2_loss.backward()
q2_optimizer.step()
value_optimizer.zero_grad()
v_loss.backward()
value_optimizer.step()
policy_optimizer.zero_grad()
policy_loss.backward()
policy_optimizer.step()
# see line 16: update target value network with value network
soft_update_model1_with_model2(target_value_net, value_net, polyak)
# store diagnostic info to logger
logger.store(LossPi=policy_loss.cpu().item(), LossQ1=q1_loss.cpu().item(), LossQ2=q2_loss.cpu().item(),
LossV=v_loss.cpu().item(),
Q1Vals=q1_prediction.detach().cpu().numpy(),
Q2Vals=q2_prediction.detach().cpu().numpy(),
VVals=v_prediction.detach().cpu().numpy(),
LogPi=log_prob_a_tilda.detach().cpu().numpy())
## store episode return and length to logger
logger.store(EpRet=ep_ret, EpLen=ep_len)
## reset environment
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if (t+1) % steps_per_epoch == 0:
epoch = t // steps_per_epoch
"""
Save pytorch model, very different from tensorflow version
We need to save the environment, the state_dict of each network
and also the state_dict of each optimizer
"""
if not dont_save:
sac_state_dict = {'env':env,'policy_net':policy_net.state_dict(),
'value_net':value_net.state_dict(), 'target_value_net':target_value_net.state_dict(),
'q1_net':q1_net.state_dict(), 'q2_net':q2_net.state_dict(),
'policy_opt':policy_optimizer, 'value_opt':value_optimizer,
'q1_opt':q1_optimizer, 'q2_opt':q2_optimizer}
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state(sac_state_dict, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--exp_name', type=str, default='sac')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--steps_per_epoch', type=int, default=5000)
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
sac_pytorch(lambda: gym.make(args.env), hidden_sizes=[args.hid] * args.l,
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
steps_per_epoch=args.steps_per_epoch,
logger_kwargs=logger_kwargs) | [
"spinup.utils.run_utils.setup_logger_kwargs",
"torch.nn.MSELoss",
"numpy.random.seed",
"spinup.algos.sac_pytorch.core.ReplayBuffer",
"argparse.ArgumentParser",
"gym.make",
"spinup.algos.sac_pytorch.core.soft_update_model1_with_model2",
"torch.manual_seed",
"numpy.zeros",
"spinup.algos.sac_pytorch.... | [((3295, 3323), 'spinup.utils.logx.EpochLogger', 'EpochLogger', ([], {}), '(**logger_kwargs)\n', (3306, 3323), False, 'from spinup.utils.logx import EpochLogger\n'), ((3430, 3453), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3447, 3453), False, 'import torch\n'), ((3458, 3478), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3472, 3478), True, 'import numpy as np\n'), ((4239, 4303), 'spinup.algos.sac_pytorch.core.ReplayBuffer', 'ReplayBuffer', ([], {'obs_dim': 'obs_dim', 'act_dim': 'act_dim', 'size': 'replay_size'}), '(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n', (4251, 4303), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((5159, 5170), 'time.time', 'time.time', ([], {}), '()\n', (5168, 5170), False, 'import time\n'), ((6126, 6138), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6136, 6138), True, 'import torch.nn as nn\n'), ((14816, 14841), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14839, 14841), False, 'import argparse\n'), ((15505, 15550), 'spinup.utils.run_utils.setup_logger_kwargs', 'setup_logger_kwargs', (['args.exp_name', 'args.seed'], {}), '(args.exp_name, args.seed)\n', (15524, 15550), False, 'from spinup.utils.run_utils import setup_logger_kwargs\n'), ((4649, 4660), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (4657, 4660), True, 'import numpy as np\n'), ((3179, 3204), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3202, 3204), False, 'import torch\n'), ((5335, 5409), 'spinup.algos.sac_pytorch.core.TanhGaussianPolicy', 'TanhGaussianPolicy', (['obs_dim', 'act_dim', 'hidden_sizes'], {'action_limit': 'act_limit'}), '(obs_dim, act_dim, hidden_sizes, action_limit=act_limit)\n', (5353, 5409), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((5436, 5465), 'spinup.algos.sac_pytorch.core.Mlp', 'Mlp', (['obs_dim', '(1)', 'hidden_sizes'], {}), '(obs_dim, 1, hidden_sizes)\n', (5439, 5465), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((5498, 5527), 'spinup.algos.sac_pytorch.core.Mlp', 'Mlp', (['obs_dim', '(1)', 'hidden_sizes'], {}), '(obs_dim, 1, hidden_sizes)\n', (5501, 5527), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((5550, 5589), 'spinup.algos.sac_pytorch.core.Mlp', 'Mlp', (['(obs_dim + act_dim)', '(1)', 'hidden_sizes'], {}), '(obs_dim + act_dim, 1, hidden_sizes)\n', (5553, 5589), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((5610, 5649), 'spinup.algos.sac_pytorch.core.Mlp', 'Mlp', (['(obs_dim + act_dim)', '(1)', 'hidden_sizes'], {}), '(obs_dim + act_dim, 1, hidden_sizes)\n', (5613, 5649), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((15576, 15594), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (15584, 15594), False, 'import gym\n'), ((11893, 11960), 'spinup.algos.sac_pytorch.core.soft_update_model1_with_model2', 'soft_update_model1_with_model2', (['target_value_net', 'value_net', 'polyak'], {}), '(target_value_net, value_net, polyak)\n', (11923, 11960), False, 'from spinup.algos.sac_pytorch.core import TanhGaussianPolicy, Mlp, soft_update_model1_with_model2, ReplayBuffer\n'), ((9563, 9602), 'torch.cat', 'torch.cat', (['[obs_tensor, acts_tensor]', '(1)'], {}), '([obs_tensor, acts_tensor], 1)\n', (9572, 9602), False, 'import torch\n'), ((9711, 9750), 'torch.cat', 'torch.cat', (['[obs_tensor, acts_tensor]', '(1)'], {}), '([obs_tensor, acts_tensor], 1)\n', (9720, 9750), False, 'import torch\n'), ((9938, 9973), 'torch.cat', 'torch.cat', (['[obs_tensor, a_tilda]', '(1)'], {}), '([obs_tensor, a_tilda], 1)\n', (9947, 9973), False, 'import torch\n'), ((10009, 10044), 'torch.cat', 'torch.cat', (['[obs_tensor, a_tilda]', '(1)'], {}), '([obs_tensor, a_tilda], 1)\n', (10018, 10044), False, 'import torch\n'), ((14696, 14707), 'time.time', 'time.time', ([], {}), '()\n', (14705, 14707), False, 'import time\n'), ((8039, 8060), 'torch.Tensor', 'Tensor', (["batch['obs1']"], {}), "(batch['obs1'])\n", (8045, 8060), False, 'from torch import Tensor\n'), ((8107, 8128), 'torch.Tensor', 'Tensor', (["batch['obs2']"], {}), "(batch['obs2'])\n", (8113, 8128), False, 'from torch import Tensor\n'), ((8171, 8192), 'torch.Tensor', 'Tensor', (["batch['acts']"], {}), "(batch['acts'])\n", (8177, 8192), False, 'from torch import Tensor\n'), ((8383, 8404), 'torch.Tensor', 'Tensor', (["batch['rews']"], {}), "(batch['rews'])\n", (8389, 8404), False, 'from torch import Tensor\n'), ((8460, 8481), 'torch.Tensor', 'Tensor', (["batch['done']"], {}), "(batch['done'])\n", (8466, 8481), False, 'from torch import Tensor\n'), ((10090, 10128), 'torch.cat', 'torch.cat', (['[q1_a_tilda, q2_a_tilda]', '(1)'], {}), '([q1_a_tilda, q2_a_tilda], 1)\n', (10099, 10128), False, 'import torch\n')] |
import pandas as pd
import numpy as np
import itertools
import os
from scipy import interpolate
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.mplot3d import Axes3D
''' Get directory '''
dir_config = '/home/hector/ros/ual_ws/src/upat_follower/config/'
dir_data = '/home/hector/ros/ual_ws/src/upat_follower/data/'
experiment_name = 'icuas2020/random_segments'
case_name = 'error_pol_1/sim_large_vmax_'
dir_experiment = dir_data + 'log/' + experiment_name + '/' + case_name
dir_save_data = dir_data + 'img/' + experiment_name + '/'
''' Create folder to save data '''
if not os.path.exists(dir_save_data):
os.makedirs(dir_save_data)
''' Get csv files '''
print(dir_experiment)
try:
pol_1_normal_dist_trajectory_m0_v1 = pd.read_csv(
dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V1 normal_dist_trajectory_m0.csv not found!')
try:
pol_1_normal_dist_trajectory_m0_v2 = pd.read_csv(
dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V2 normal_dist_trajectory_m0.csv not found!')
try:
pol_1_normal_dist_trajectory_m0_v3 = pd.read_csv(
dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V3 normal_dist_trajectory_m0.csv not found!')
try:
pol_1_normal_dist_trajectory_m0_v4 = pd.read_csv(
dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V4 normal_dist_trajectory_m0.csv not found!')
case_name = 'error_pol_2/sim_large_vmax_'
dir_experiment = dir_data + 'log/' + experiment_name + '/' + case_name
try:
pol_2_normal_dist_trajectory_m0_v1 = pd.read_csv(
dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V1 normal_dist_trajectory_m0.csv not found!')
try:
pol_2_normal_dist_trajectory_m0_v2 = pd.read_csv(
dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V2 normal_dist_trajectory_m0.csv not found!')
try:
pol_2_normal_dist_trajectory_m0_v3 = pd.read_csv(
dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V3 normal_dist_trajectory_m0.csv not found!')
try:
pol_2_normal_dist_trajectory_m0_v4 = pd.read_csv(
dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv', names=['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ', 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])
except FileNotFoundError:
print('V4 normal_dist_trajectory_m0.csv not found!')
def calcErrors(_normal_dist_trajectory_m0_v1, _normal_dist_trajectory_m0_v2, _normal_dist_trajectory_m0_v3, _normal_dist_trajectory_m0_v4):
maxs = [np.max(_normal_dist_trajectory_m0_v1.Linear),
np.max(_normal_dist_trajectory_m0_v2.Linear),
np.max(_normal_dist_trajectory_m0_v3.Linear),
np.max(_normal_dist_trajectory_m0_v4.Linear)]
mins = [np.min(_normal_dist_trajectory_m0_v1.Linear),
np.min(_normal_dist_trajectory_m0_v2.Linear),
np.min(_normal_dist_trajectory_m0_v3.Linear),
np.min(_normal_dist_trajectory_m0_v4.Linear)]
means = [np.mean(_normal_dist_trajectory_m0_v1.Linear), np.mean(_normal_dist_trajectory_m0_v2.Linear),
np.mean(_normal_dist_trajectory_m0_v3.Linear), np.mean(_normal_dist_trajectory_m0_v4.Linear)]
stds = [np.std(_normal_dist_trajectory_m0_v1.Linear), np.std(_normal_dist_trajectory_m0_v2.Linear),
np.std(_normal_dist_trajectory_m0_v3.Linear), np.std(_normal_dist_trajectory_m0_v4.Linear)]
vars = [np.var(_normal_dist_trajectory_m0_v1.Linear), np.var(_normal_dist_trajectory_m0_v2.Linear),
np.var(_normal_dist_trajectory_m0_v3.Linear), np.var(_normal_dist_trajectory_m0_v4.Linear)]
return maxs, mins, means, stds, vars
def calcErrorsTime(_normal_dist_trajectory_m0_v1, _normal_dist_trajectory_m0_v2, _normal_dist_trajectory_m0_v3, _normal_dist_trajectory_m0_v4):
delta_t_v1 = _normal_dist_trajectory_m0_v1.desTime - \
_normal_dist_trajectory_m0_v1.curTime
delta_t_v2 = _normal_dist_trajectory_m0_v2.desTime - \
_normal_dist_trajectory_m0_v2.curTime
delta_t_v3 = _normal_dist_trajectory_m0_v3.desTime - \
_normal_dist_trajectory_m0_v3.curTime
delta_t_v4 = _normal_dist_trajectory_m0_v4.desTime - \
_normal_dist_trajectory_m0_v4.curTime
delta_t_v1 = np.abs(delta_t_v1)
delta_t_v2 = np.abs(delta_t_v2)
delta_t_v3 = np.abs(delta_t_v3)
delta_t_v4 = np.abs(delta_t_v4)
maxs = [np.max(delta_t_v1),
np.max(delta_t_v2),
np.max(delta_t_v3),
np.max(delta_t_v4)]
mins = [np.min(delta_t_v1),
np.min(delta_t_v2),
np.min(delta_t_v3),
np.min(delta_t_v4)]
means = [np.mean(delta_t_v1), np.mean(delta_t_v2),
np.mean(delta_t_v3), np.mean(delta_t_v4)]
stds = [np.std(delta_t_v1), np.std(delta_t_v2),
np.std(delta_t_v3), np.std(delta_t_v4)]
vars = [np.var(delta_t_v1), np.var(delta_t_v2),
np.var(delta_t_v3), np.var(delta_t_v4)]
return maxs, mins, means, stds, vars
def plot2DErrors():
plt.figure(num='Novalid segments errors', figsize=(6, 6))
plt.subplots_adjust(hspace=0.3)
plt.subplot(211)
x = [1, 2, 3, 4]
maxs, mins, means, stds, vars = calcErrors(pol_1_normal_dist_trajectory_m0_v1, pol_1_normal_dist_trajectory_m0_v2,
pol_1_normal_dist_trajectory_m0_v3, pol_1_normal_dist_trajectory_m0_v4)
plt.errorbar(x, means, stds, alpha=0.9, color='red',
ls='none', lw=2, marker='o', ms=5, capsize=5, ecolor='red', elinewidth=2)
means = np.around(means, decimals=3)
stds = np.around(stds, decimals=3)
maxs = np.around(maxs, decimals=3)
mins = np.around(mins, decimals=3)
vars = np.around(vars, decimals=3)
print("[1] SPACE -> ", mins, means, maxs, stds, vars)
maxs, mins, means, stds, vars = calcErrors(pol_2_normal_dist_trajectory_m0_v1, pol_2_normal_dist_trajectory_m0_v2,
pol_2_normal_dist_trajectory_m0_v3, pol_2_normal_dist_trajectory_m0_v4)
plt.errorbar(x, means, stds, alpha=0.9, color='blue',
ls='none', lw=1, marker='o', ms=5, capsize=5, ecolor='blue', elinewidth=1)
means = np.around(means, decimals=3)
stds = np.around(stds, decimals=3)
maxs = np.around(maxs, decimals=3)
mins = np.around(mins, decimals=3)
vars = np.around(vars, decimals=3)
print("[2] SPACE -> ", mins, means, maxs, stds, vars)
plt.xlabel('Max velocity (m/s)')
plt.ylabel('Normal distance error (m)')
plt.xticks(np.arange(1, 5, step=1))
plt.legend(['Method 1', 'Method 2'])
# ------------------------------------------------------------------------------------------------------------- #
plt.subplot(212)
x = [1, 2, 3, 4]
maxs, mins, means, stds, vars = calcErrorsTime(
pol_1_normal_dist_trajectory_m0_v1, pol_1_normal_dist_trajectory_m0_v2, pol_1_normal_dist_trajectory_m0_v3, pol_1_normal_dist_trajectory_m0_v4)
plt.errorbar(x, means, stds, alpha=0.9, color='red',
ls='none', lw=2, marker='o', ms=5, capsize=5, ecolor='red', elinewidth=2)
means = np.around(means, decimals=3)
stds = np.around(stds, decimals=3)
maxs = np.around(maxs, decimals=3)
mins = np.around(mins, decimals=3)
vars = np.around(vars, decimals=3)
print("[1] TIME -> ", mins, means, maxs, stds, vars)
maxs, mins, means, stds, vars = calcErrorsTime(
pol_2_normal_dist_trajectory_m0_v1, pol_2_normal_dist_trajectory_m0_v2, pol_2_normal_dist_trajectory_m0_v3, pol_2_normal_dist_trajectory_m0_v4)
plt.errorbar(x, means, stds, alpha=1, color='blue',
ls='none', lw=1, marker='o', ms=5, capsize=5, ecolor='blue', elinewidth=1)
# plt.ylim(bottom=-300)
means = np.around(means, decimals=3)
stds = np.around(stds, decimals=3)
maxs = np.around(maxs, decimals=3)
mins = np.around(mins, decimals=3)
vars = np.around(vars, decimals=3)
print("[2] TIME -> ", mins, means, maxs, stds, vars)
plt.xlabel('Max velocity (m/s)')
plt.ylabel('Time error (s)')
plt.xticks(np.arange(1, 5, step=1))
plt.legend(['Method 1', 'Method 2'])
plt.savefig(dir_save_data + 'random_segments_' +
'errors_traj.eps', format='eps', dpi=1200,bbox_inches='tight')
plt.show(block=True)
plt.show()
plot2DErrors()
# print("Space ", np.around(np.min(pol_1_normal_dist_trajectory_m0_v1.Linear), 3), np.around(np.mean(pol_1_normal_dist_trajectory_m0_v1.Linear), 3), np.around(np.max(
# pol_1_normal_dist_trajectory_m0_v1.Linear), 3), np.around(np.std(pol_1_normal_dist_trajectory_m0_v1.Linear), 3), np.around(np.var(pol_1_normal_dist_trajectory_m0_v1.Linear), 3))
# delta_t = pol_1_normal_dist_trajectory_m0_v1.desTime - \
# pol_1_normal_dist_trajectory_m0_v1.curTime
# delta_t = np.abs(delta_t)
# print("Time ", np.around(np.min(delta_t), 3), np.around(np.mean(delta_t), 3), np.around(
# np.max(delta_t), 3), np.around(np.std(delta_t), 3), np.around(np.var(delta_t), 3))
plt.show(block=True)
print('-----------------------------------------------------------------------------')
# https://towardsdatascience.com/using-standard-deviation-in-python-77872c32ba9b
| [
"numpy.abs",
"pandas.read_csv",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.std",
"os.path.exists",
"numpy.max",
"numpy.var",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.min",
"matplotlib.pyplot.subplots_... | [((10248, 10268), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (10256, 10268), True, 'from matplotlib import pyplot as plt\n'), ((616, 645), 'os.path.exists', 'os.path.exists', (['dir_save_data'], {}), '(dir_save_data)\n', (630, 645), False, 'import os\n'), ((651, 677), 'os.makedirs', 'os.makedirs', (['dir_save_data'], {}), '(dir_save_data)\n', (662, 677), False, 'import os\n'), ((768, 985), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (779, 985), True, 'import pandas as pd\n'), ((1115, 1332), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (1126, 1332), True, 'import pandas as pd\n'), ((1462, 1679), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (1473, 1679), True, 'import pandas as pd\n'), ((1809, 2026), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (1820, 2026), True, 'import pandas as pd\n'), ((2271, 2488), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '1/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (2282, 2488), True, 'import pandas as pd\n'), ((2618, 2835), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '2/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (2629, 2835), True, 'import pandas as pd\n'), ((2965, 3182), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '3/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (2976, 3182), True, 'import pandas as pd\n'), ((3312, 3529), 'pandas.read_csv', 'pd.read_csv', (["(dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv')"], {'names': "['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz']"}), "(dir_experiment + '4/' + 'normal_dist_trajectory_m0.csv', names=\n ['curTime', 'desTime', 'Spline', 'Linear', 'PosX', 'PosY', 'PosZ',\n 'curVelx', 'curVely', 'curVelz', 'desVelx', 'desVely', 'desVelz'])\n", (3323, 3529), True, 'import pandas as pd\n'), ((5476, 5494), 'numpy.abs', 'np.abs', (['delta_t_v1'], {}), '(delta_t_v1)\n', (5482, 5494), True, 'import numpy as np\n'), ((5512, 5530), 'numpy.abs', 'np.abs', (['delta_t_v2'], {}), '(delta_t_v2)\n', (5518, 5530), True, 'import numpy as np\n'), ((5548, 5566), 'numpy.abs', 'np.abs', (['delta_t_v3'], {}), '(delta_t_v3)\n', (5554, 5566), True, 'import numpy as np\n'), ((5584, 5602), 'numpy.abs', 'np.abs', (['delta_t_v4'], {}), '(delta_t_v4)\n', (5590, 5602), True, 'import numpy as np\n'), ((6246, 6303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '"""Novalid segments errors"""', 'figsize': '(6, 6)'}), "(num='Novalid segments errors', figsize=(6, 6))\n", (6256, 6303), True, 'from matplotlib import pyplot as plt\n'), ((6308, 6339), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (6327, 6339), True, 'from matplotlib import pyplot as plt\n'), ((6344, 6360), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6355, 6360), True, 'from matplotlib import pyplot as plt\n'), ((6624, 6754), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'means', 'stds'], {'alpha': '(0.9)', 'color': '"""red"""', 'ls': '"""none"""', 'lw': '(2)', 'marker': '"""o"""', 'ms': '(5)', 'capsize': '(5)', 'ecolor': '"""red"""', 'elinewidth': '(2)'}), "(x, means, stds, alpha=0.9, color='red', ls='none', lw=2,\n marker='o', ms=5, capsize=5, ecolor='red', elinewidth=2)\n", (6636, 6754), True, 'from matplotlib import pyplot as plt\n'), ((6781, 6809), 'numpy.around', 'np.around', (['means'], {'decimals': '(3)'}), '(means, decimals=3)\n', (6790, 6809), True, 'import numpy as np\n'), ((6821, 6848), 'numpy.around', 'np.around', (['stds'], {'decimals': '(3)'}), '(stds, decimals=3)\n', (6830, 6848), True, 'import numpy as np\n'), ((6860, 6887), 'numpy.around', 'np.around', (['maxs'], {'decimals': '(3)'}), '(maxs, decimals=3)\n', (6869, 6887), True, 'import numpy as np\n'), ((6899, 6926), 'numpy.around', 'np.around', (['mins'], {'decimals': '(3)'}), '(mins, decimals=3)\n', (6908, 6926), True, 'import numpy as np\n'), ((6938, 6965), 'numpy.around', 'np.around', (['vars'], {'decimals': '(3)'}), '(vars, decimals=3)\n', (6947, 6965), True, 'import numpy as np\n'), ((7266, 7398), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'means', 'stds'], {'alpha': '(0.9)', 'color': '"""blue"""', 'ls': '"""none"""', 'lw': '(1)', 'marker': '"""o"""', 'ms': '(5)', 'capsize': '(5)', 'ecolor': '"""blue"""', 'elinewidth': '(1)'}), "(x, means, stds, alpha=0.9, color='blue', ls='none', lw=1,\n marker='o', ms=5, capsize=5, ecolor='blue', elinewidth=1)\n", (7278, 7398), True, 'from matplotlib import pyplot as plt\n'), ((7425, 7453), 'numpy.around', 'np.around', (['means'], {'decimals': '(3)'}), '(means, decimals=3)\n', (7434, 7453), True, 'import numpy as np\n'), ((7465, 7492), 'numpy.around', 'np.around', (['stds'], {'decimals': '(3)'}), '(stds, decimals=3)\n', (7474, 7492), True, 'import numpy as np\n'), ((7504, 7531), 'numpy.around', 'np.around', (['maxs'], {'decimals': '(3)'}), '(maxs, decimals=3)\n', (7513, 7531), True, 'import numpy as np\n'), ((7543, 7570), 'numpy.around', 'np.around', (['mins'], {'decimals': '(3)'}), '(mins, decimals=3)\n', (7552, 7570), True, 'import numpy as np\n'), ((7582, 7609), 'numpy.around', 'np.around', (['vars'], {'decimals': '(3)'}), '(vars, decimals=3)\n', (7591, 7609), True, 'import numpy as np\n'), ((7673, 7705), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Max velocity (m/s)"""'], {}), "('Max velocity (m/s)')\n", (7683, 7705), True, 'from matplotlib import pyplot as plt\n'), ((7710, 7749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normal distance error (m)"""'], {}), "('Normal distance error (m)')\n", (7720, 7749), True, 'from matplotlib import pyplot as plt\n'), ((7794, 7830), 'matplotlib.pyplot.legend', 'plt.legend', (["['Method 1', 'Method 2']"], {}), "(['Method 1', 'Method 2'])\n", (7804, 7830), True, 'from matplotlib import pyplot as plt\n'), ((7953, 7969), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (7964, 7969), True, 'from matplotlib import pyplot as plt\n'), ((8201, 8331), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'means', 'stds'], {'alpha': '(0.9)', 'color': '"""red"""', 'ls': '"""none"""', 'lw': '(2)', 'marker': '"""o"""', 'ms': '(5)', 'capsize': '(5)', 'ecolor': '"""red"""', 'elinewidth': '(2)'}), "(x, means, stds, alpha=0.9, color='red', ls='none', lw=2,\n marker='o', ms=5, capsize=5, ecolor='red', elinewidth=2)\n", (8213, 8331), True, 'from matplotlib import pyplot as plt\n'), ((8358, 8386), 'numpy.around', 'np.around', (['means'], {'decimals': '(3)'}), '(means, decimals=3)\n', (8367, 8386), True, 'import numpy as np\n'), ((8398, 8425), 'numpy.around', 'np.around', (['stds'], {'decimals': '(3)'}), '(stds, decimals=3)\n', (8407, 8425), True, 'import numpy as np\n'), ((8437, 8464), 'numpy.around', 'np.around', (['maxs'], {'decimals': '(3)'}), '(maxs, decimals=3)\n', (8446, 8464), True, 'import numpy as np\n'), ((8476, 8503), 'numpy.around', 'np.around', (['mins'], {'decimals': '(3)'}), '(mins, decimals=3)\n', (8485, 8503), True, 'import numpy as np\n'), ((8515, 8542), 'numpy.around', 'np.around', (['vars'], {'decimals': '(3)'}), '(vars, decimals=3)\n', (8524, 8542), True, 'import numpy as np\n'), ((8811, 8942), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'means', 'stds'], {'alpha': '(1)', 'color': '"""blue"""', 'ls': '"""none"""', 'lw': '(1)', 'marker': '"""o"""', 'ms': '(5)', 'capsize': '(5)', 'ecolor': '"""blue"""', 'elinewidth': '(1)'}), "(x, means, stds, alpha=1, color='blue', ls='none', lw=1, marker\n ='o', ms=5, capsize=5, ecolor='blue', elinewidth=1)\n", (8823, 8942), True, 'from matplotlib import pyplot as plt\n'), ((8996, 9024), 'numpy.around', 'np.around', (['means'], {'decimals': '(3)'}), '(means, decimals=3)\n', (9005, 9024), True, 'import numpy as np\n'), ((9036, 9063), 'numpy.around', 'np.around', (['stds'], {'decimals': '(3)'}), '(stds, decimals=3)\n', (9045, 9063), True, 'import numpy as np\n'), ((9075, 9102), 'numpy.around', 'np.around', (['maxs'], {'decimals': '(3)'}), '(maxs, decimals=3)\n', (9084, 9102), True, 'import numpy as np\n'), ((9114, 9141), 'numpy.around', 'np.around', (['mins'], {'decimals': '(3)'}), '(mins, decimals=3)\n', (9123, 9141), True, 'import numpy as np\n'), ((9153, 9180), 'numpy.around', 'np.around', (['vars'], {'decimals': '(3)'}), '(vars, decimals=3)\n', (9162, 9180), True, 'import numpy as np\n'), ((9244, 9276), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Max velocity (m/s)"""'], {}), "('Max velocity (m/s)')\n", (9254, 9276), True, 'from matplotlib import pyplot as plt\n'), ((9281, 9309), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time error (s)"""'], {}), "('Time error (s)')\n", (9291, 9309), True, 'from matplotlib import pyplot as plt\n'), ((9354, 9390), 'matplotlib.pyplot.legend', 'plt.legend', (["['Method 1', 'Method 2']"], {}), "(['Method 1', 'Method 2'])\n", (9364, 9390), True, 'from matplotlib import pyplot as plt\n'), ((9395, 9512), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir_save_data + 'random_segments_' + 'errors_traj.eps')"], {'format': '"""eps"""', 'dpi': '(1200)', 'bbox_inches': '"""tight"""'}), "(dir_save_data + 'random_segments_' + 'errors_traj.eps', format=\n 'eps', dpi=1200, bbox_inches='tight')\n", (9406, 9512), True, 'from matplotlib import pyplot as plt\n'), ((9527, 9547), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (9535, 9547), True, 'from matplotlib import pyplot as plt\n'), ((9553, 9563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9561, 9563), True, 'from matplotlib import pyplot as plt\n'), ((3767, 3811), 'numpy.max', 'np.max', (['_normal_dist_trajectory_m0_v1.Linear'], {}), '(_normal_dist_trajectory_m0_v1.Linear)\n', (3773, 3811), True, 'import numpy as np\n'), ((3825, 3869), 'numpy.max', 'np.max', (['_normal_dist_trajectory_m0_v2.Linear'], {}), '(_normal_dist_trajectory_m0_v2.Linear)\n', (3831, 3869), True, 'import numpy as np\n'), ((3883, 3927), 'numpy.max', 'np.max', (['_normal_dist_trajectory_m0_v3.Linear'], {}), '(_normal_dist_trajectory_m0_v3.Linear)\n', (3889, 3927), True, 'import numpy as np\n'), ((3941, 3985), 'numpy.max', 'np.max', (['_normal_dist_trajectory_m0_v4.Linear'], {}), '(_normal_dist_trajectory_m0_v4.Linear)\n', (3947, 3985), True, 'import numpy as np\n'), ((3999, 4043), 'numpy.min', 'np.min', (['_normal_dist_trajectory_m0_v1.Linear'], {}), '(_normal_dist_trajectory_m0_v1.Linear)\n', (4005, 4043), True, 'import numpy as np\n'), ((4057, 4101), 'numpy.min', 'np.min', (['_normal_dist_trajectory_m0_v2.Linear'], {}), '(_normal_dist_trajectory_m0_v2.Linear)\n', (4063, 4101), True, 'import numpy as np\n'), ((4115, 4159), 'numpy.min', 'np.min', (['_normal_dist_trajectory_m0_v3.Linear'], {}), '(_normal_dist_trajectory_m0_v3.Linear)\n', (4121, 4159), True, 'import numpy as np\n'), ((4173, 4217), 'numpy.min', 'np.min', (['_normal_dist_trajectory_m0_v4.Linear'], {}), '(_normal_dist_trajectory_m0_v4.Linear)\n', (4179, 4217), True, 'import numpy as np\n'), ((4233, 4278), 'numpy.mean', 'np.mean', (['_normal_dist_trajectory_m0_v1.Linear'], {}), '(_normal_dist_trajectory_m0_v1.Linear)\n', (4240, 4278), True, 'import numpy as np\n'), ((4280, 4325), 'numpy.mean', 'np.mean', (['_normal_dist_trajectory_m0_v2.Linear'], {}), '(_normal_dist_trajectory_m0_v2.Linear)\n', (4287, 4325), True, 'import numpy as np\n'), ((4340, 4385), 'numpy.mean', 'np.mean', (['_normal_dist_trajectory_m0_v3.Linear'], {}), '(_normal_dist_trajectory_m0_v3.Linear)\n', (4347, 4385), True, 'import numpy as np\n'), ((4387, 4432), 'numpy.mean', 'np.mean', (['_normal_dist_trajectory_m0_v4.Linear'], {}), '(_normal_dist_trajectory_m0_v4.Linear)\n', (4394, 4432), True, 'import numpy as np\n'), ((4446, 4490), 'numpy.std', 'np.std', (['_normal_dist_trajectory_m0_v1.Linear'], {}), '(_normal_dist_trajectory_m0_v1.Linear)\n', (4452, 4490), True, 'import numpy as np\n'), ((4492, 4536), 'numpy.std', 'np.std', (['_normal_dist_trajectory_m0_v2.Linear'], {}), '(_normal_dist_trajectory_m0_v2.Linear)\n', (4498, 4536), True, 'import numpy as np\n'), ((4550, 4594), 'numpy.std', 'np.std', (['_normal_dist_trajectory_m0_v3.Linear'], {}), '(_normal_dist_trajectory_m0_v3.Linear)\n', (4556, 4594), True, 'import numpy as np\n'), ((4596, 4640), 'numpy.std', 'np.std', (['_normal_dist_trajectory_m0_v4.Linear'], {}), '(_normal_dist_trajectory_m0_v4.Linear)\n', (4602, 4640), True, 'import numpy as np\n'), ((4654, 4698), 'numpy.var', 'np.var', (['_normal_dist_trajectory_m0_v1.Linear'], {}), '(_normal_dist_trajectory_m0_v1.Linear)\n', (4660, 4698), True, 'import numpy as np\n'), ((4700, 4744), 'numpy.var', 'np.var', (['_normal_dist_trajectory_m0_v2.Linear'], {}), '(_normal_dist_trajectory_m0_v2.Linear)\n', (4706, 4744), True, 'import numpy as np\n'), ((4758, 4802), 'numpy.var', 'np.var', (['_normal_dist_trajectory_m0_v3.Linear'], {}), '(_normal_dist_trajectory_m0_v3.Linear)\n', (4764, 4802), True, 'import numpy as np\n'), ((4804, 4848), 'numpy.var', 'np.var', (['_normal_dist_trajectory_m0_v4.Linear'], {}), '(_normal_dist_trajectory_m0_v4.Linear)\n', (4810, 4848), True, 'import numpy as np\n'), ((5616, 5634), 'numpy.max', 'np.max', (['delta_t_v1'], {}), '(delta_t_v1)\n', (5622, 5634), True, 'import numpy as np\n'), ((5648, 5666), 'numpy.max', 'np.max', (['delta_t_v2'], {}), '(delta_t_v2)\n', (5654, 5666), True, 'import numpy as np\n'), ((5680, 5698), 'numpy.max', 'np.max', (['delta_t_v3'], {}), '(delta_t_v3)\n', (5686, 5698), True, 'import numpy as np\n'), ((5712, 5730), 'numpy.max', 'np.max', (['delta_t_v4'], {}), '(delta_t_v4)\n', (5718, 5730), True, 'import numpy as np\n'), ((5744, 5762), 'numpy.min', 'np.min', (['delta_t_v1'], {}), '(delta_t_v1)\n', (5750, 5762), True, 'import numpy as np\n'), ((5776, 5794), 'numpy.min', 'np.min', (['delta_t_v2'], {}), '(delta_t_v2)\n', (5782, 5794), True, 'import numpy as np\n'), ((5808, 5826), 'numpy.min', 'np.min', (['delta_t_v3'], {}), '(delta_t_v3)\n', (5814, 5826), True, 'import numpy as np\n'), ((5840, 5858), 'numpy.min', 'np.min', (['delta_t_v4'], {}), '(delta_t_v4)\n', (5846, 5858), True, 'import numpy as np\n'), ((5873, 5892), 'numpy.mean', 'np.mean', (['delta_t_v1'], {}), '(delta_t_v1)\n', (5880, 5892), True, 'import numpy as np\n'), ((5894, 5913), 'numpy.mean', 'np.mean', (['delta_t_v2'], {}), '(delta_t_v2)\n', (5901, 5913), True, 'import numpy as np\n'), ((5928, 5947), 'numpy.mean', 'np.mean', (['delta_t_v3'], {}), '(delta_t_v3)\n', (5935, 5947), True, 'import numpy as np\n'), ((5949, 5968), 'numpy.mean', 'np.mean', (['delta_t_v4'], {}), '(delta_t_v4)\n', (5956, 5968), True, 'import numpy as np\n'), ((5982, 6000), 'numpy.std', 'np.std', (['delta_t_v1'], {}), '(delta_t_v1)\n', (5988, 6000), True, 'import numpy as np\n'), ((6002, 6020), 'numpy.std', 'np.std', (['delta_t_v2'], {}), '(delta_t_v2)\n', (6008, 6020), True, 'import numpy as np\n'), ((6034, 6052), 'numpy.std', 'np.std', (['delta_t_v3'], {}), '(delta_t_v3)\n', (6040, 6052), True, 'import numpy as np\n'), ((6054, 6072), 'numpy.std', 'np.std', (['delta_t_v4'], {}), '(delta_t_v4)\n', (6060, 6072), True, 'import numpy as np\n'), ((6086, 6104), 'numpy.var', 'np.var', (['delta_t_v1'], {}), '(delta_t_v1)\n', (6092, 6104), True, 'import numpy as np\n'), ((6106, 6124), 'numpy.var', 'np.var', (['delta_t_v2'], {}), '(delta_t_v2)\n', (6112, 6124), True, 'import numpy as np\n'), ((6138, 6156), 'numpy.var', 'np.var', (['delta_t_v3'], {}), '(delta_t_v3)\n', (6144, 6156), True, 'import numpy as np\n'), ((6158, 6176), 'numpy.var', 'np.var', (['delta_t_v4'], {}), '(delta_t_v4)\n', (6164, 6176), True, 'import numpy as np\n'), ((7765, 7788), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'step': '(1)'}), '(1, 5, step=1)\n', (7774, 7788), True, 'import numpy as np\n'), ((9325, 9348), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {'step': '(1)'}), '(1, 5, step=1)\n', (9334, 9348), True, 'import numpy as np\n')] |
"""
A framework for calibration assessment of binary classification models
written in Python.
References
----------
[1] <NAME>, <NAME>., <NAME>, and <NAME>.
Applied logistic regression. Vol. 398. John Wiley & Sons, 2013.
[2] Pigeon, <NAME>., and <NAME>.
An improved goodness of fit statistic for probability prediction models.
Biometrical Journal: Journal of Mathematical Methods in Biosciences 41.1 (1999): 71-82.
[3] <NAME>. (1986). Probabilistic prediction in patient management and clinical trials.
Statistics in medicine, 5(5), 421-433.
[4] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
A tutorial on calibration measurements and calibration models for clinical prediction models.
Journal of the American Medical Informatics Association, 27(4), 621-633.
[5] <NAME>. (2021). rms: Regression modeling strategies (R package version
6.2-0) [Computer software]. The Comprehensive R Archive Network.
Available from https://CRAN.R-project.org/package=rms
[6] <NAME>., <NAME>., & <NAME>. (2014). A new calibration test
and a reappraisal of the calibration belt for the assessment of prediction models
based on dichotomous outcomes. Statistics in medicine, 33(14), 2390-2407.
[7] <NAME>. (2021). calibrattion-belt: Assessment of calibration in binomial prediction models [Computer software].
Available from https://github.com/fabiankueppers/calibration-framework
[8] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017).
givitiR: The giviti calibration test and belt (R package version 1.3) [Computer
software]. The Comprehensive R Archive Network.
Available from https://CRAN.R-project.org/package=givitiR
[9] [<NAME>. (1926). The choice of a class interval.
Journal of the american statistical association, 21(153), 65-66.]
[10] "Hosmer-Lemeshow test", https://en.wikipedia.org/wiki/Hosmer-Lemeshow_test
[11] <NAME>., and <NAME>. "A cautionary note about assessing
the fit of logistic regression models." (1999): 847-853.
"""
from enum import Flag
from math import log2, ceil, sqrt
from typing import Union
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import chi2, norm
from scipy import integrate
from statsmodels.nonparametric.smoothers_lowess import lowess
from IPython.display import display
from .calbelt import CalibrationBelt
from .metrics import brier, auroc
from ._result_types import *
# SETS THE LIMIT FOR THE FREQUENCY IN CONTINGENCY TABLES
CHI_SQUARE_VIOLATION_LIMIT = 1
# SETS THE LIMIT FOR HIGHLIGHTING HIGH DEVIATION OF OBSERVED AND EXPECTED COUNTS IN CT
HIGHLIGHT_DEVIATION_LIMIT = 0.1
# SETS THE OUTPUT PRECISION OF FLOATS IN DATAFRAME
pd.options.display.precision = 3
class DEVEL(Flag):
INTERNAL = False
EXTERNAL = True
# _BaseCalibrationEvaluator <--(inherits from)-- CalibrationEvaluator
class _BaseCalibrationEvaluator:
# CONSTRUCTOR
def __init__(self, y_true:np.ndarray, y_pred:np.ndarray, outsample:bool, n_groups:Union[int,str]=10) -> None:
"""This is the main class for the PyCalEva framework bundeling statistical tests,
metrics and plot for calibration measurement of binary classification models.
Parameters
----------
y_true : array_like
Expected class labels given in test set. (Ground truth y)
y_pred : array_like
Observed probabilities predicted by a classification model.
outsample : bool
Set to 'False' for internal evaluation or set to 'True'
for external evaluation.
n_groups: int or str (optional, default=10)
Number of groups to use for grouping probabilities.
Set to 'auto' to use sturges function for estimation of optimal group size [9].
Raises
------
ValueError: If the given data (y_true,y_pred) or the given number of groups is invalid
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
References
----------
.. [9] <NAME>. (1926). The choice of a class interval.
Journal of the american statistical association, 21(153), 65-66.
"""
# Check parameters
self.__check_parameters(np.array(y_true), np.array(y_pred), outsample)
self.__y = np.array(y_true) # True class labels
self.__p = np.array(y_pred) # Predicted class probabilities
self.__n = len(y_true) # Sample size
self.__ngroups = None # Group size
# Set if external testset or internal trainingsset is used
if outsample:
self.__devel = DEVEL.EXTERNAL
else:
self.__devel = DEVEL.INTERNAL
# Define calibration metrics
self.__auroc = auroc(self.__y, self.__p) # Area under the receiver operating curve
self.__brier = brier(self.__y, self.__p, True) # Brier score scaled to [0.0 - 1.0]
self.__ace = None # Adative calibration error
self.__mce = None # Maximum calibration error
self.__awlc = None # Area within lowess curve
# Group data according to predicted probabilities --> will also set contengency table for groups
self.__data = None
self.__ct = None
self.group_data(n_groups) # --> This method will update all groupbased metrics as well
# PROPERTIES
#---------------------------------------------------------------------------------------------
@property
def contingency_table(self):
"""Get the contingency table for grouped observed and expected class membership probabilities.
Returns
-------
contingency_table : DataFrame
"""
return self.__ct
@property
def auroc(self):
"""Get the area under the receiver operating characteristic
Returns
-------
auroc : float
"""
return self.__auroc
@property
def brier(self):
"""Get the scaled brier score for the current y_true and y_pred of class instance.
Returns
-------
brier_score : float
"""
return self.__brier
@property
def ace(self):
"""Get the adaptive calibration error based on grouped data.
Returns
-------
adaptive calibration error : float
"""
return self.__ace
@property
def mce(self):
"""Get the maximum calibration error based on grouped data.
Returns
-------
maximum calibration error : float
"""
return self.__mce
@property
def awlc(self):
"""Get the area between the nonparametric curve estimated by lowess and
the theoritcally perfect calibration given by the calibration plot bisector.
Returns
-------
Area within lowess curve : float
"""
return self.__awlc
@property
def outsample(self):
"""Get information if outsample is set. External validation if set to 'True'.
Returns
-------
Outsample status : bool
"""
return self.__devel
# PRIVATE METHODS
# --------------------------------------------------------------------------------------------
# Check if parameters are valid
def __check_parameters(self, y, p, outsample) -> bool:
if (len(y) != len(p)):
raise ValueError("Observations y_true and Predictions y_pred differ in size!")
if not ( ((y==0) | (y==1)).all() ):
raise ValueError("Invalid class labels! y_train must be dichotomous containing only values 0 or 1")
if ( (p < 0.0 ).any() or (p > 1.0).any() ):
raise ValueError("Predicted probabilities y_pred must be in range [0.0 1.0]!")
if (abs( p.sum() - y.sum() ) < 1e-04 ) and outsample == True:
warnings.warn("Please set parameter outsample to 'false' if the evaluated model was fit on this dataset!", "UserWarning")
if ( y.sum() <= 1 ) or ( y.sum() >= (len(y) - 1) ):
raise ValueError("The number of events/non events in observations can not be less than 1.")
return True
def __calc_ace(self):
return np.abs((self.__ct.mean_predicted - self.__ct.mean_observed)).sum() / self.__ngroups
def __calc_mce(self):
return np.abs((self.__ct.mean_predicted - self.__ct.mean_observed)).max()
def __init_contingency_table(self) -> pd.DataFrame:
"""Initialize the contingency table using data
Returns:
contingency_table : DataFrame:
"""
data = self.__data
total = data['class'].groupby(data.dcl).count() # Total observations per group
mean_predicted = data['prob'].groupby(data.dcl).mean() # Mean predicted probability per group
mean_observed = data['class'].groupby(data.dcl).mean() # Mean observed probability per group
observed = data['class'].groupby(data.dcl).sum() # Number of observed class 1 events
predicted = data['prob'].groupby(data.dcl).sum() # Number of predicted class 1 events
c_table = pd.DataFrame({"total":total, "mean_predicted":mean_predicted, "mean_observed":mean_observed, \
"observed_0":total-observed, "predicted_0":total-predicted,
"observed_1":observed, "predicted_1":predicted})
c_table.index.rename('Interval', inplace=True) #Rename index column
return c_table
def __highlight_high_diff(self,row:pd.Series):
"""Highlight contingency table cells with high difference in observed and expected values
"""
props = [f'color: black']*len(row)
if ( abs(row.predicted_1 - row.observed_1) > (HIGHLIGHT_DEVIATION_LIMIT * row.total) ):
props[-1] = f'color: red'
return props
def __warn_expected_low(self):
"""Print warning message if expected frequencies are low.
"""
if (self.__ct.predicted_1 < CHI_SQUARE_VIOLATION_LIMIT).any():
print(f'Warning! Some expected frequencies are smaller then {CHI_SQUARE_VIOLATION_LIMIT}. ' +
'Possible violoation of chi²-distribution.')
def __show_contingency_table(self, phi=None):
"""Display the contingency table using IPython.
"""
ct_out = self.__ct.drop(['observed_0', 'predicted_0'], axis=1).copy()
# Add phi correction factor if values are given
if not phi is None:
ct_out.insert(3, "phi", phi)
ct_out.reset_index(inplace=True)
display(ct_out.style.apply(self.__highlight_high_diff, axis = 1))
def __update_groupbased_metrics(self):
"""Update all metrics of class instance that are based on grouping
"""
self.__ace = self.__calc_ace() # Update Adative Calibration Error
self.__mce = self.__calc_mce() # Update Maximum Calibration Error
self.__nonparametric_fit() # Calculate nonparametric fit and update Area Within Lowess Curve
def __nonparametric_fit(self, update_awlc=True):
# Nonparametric curve based on y and p using lowess
x_nonparametric = np.arange(0,1,0.005)
y_nonparametric = lowess(self.__y, self.__p, it=0, xvals=x_nonparametric)
if update_awlc:
diff = np.abs(x_nonparametric - y_nonparametric)
self.__awlc = integrate.trapezoid(diff, y_nonparametric) # Area within loss curve
return (x_nonparametric, y_nonparametric)
def __metrics_to_string(self):
"""Returns all metrics as formatted table.
Returns
-------
all_metrics: str
"""
metrics = {"AUROC":self.__auroc, r"$Brier_{scaled}$ ":self.__brier, "ACE":self.__ace, "MCE":self.__mce, "AWLC":self.__awlc }
lines = ['{:<10s}{:>8d}'.format("n",self.__n)]
for k, v in metrics.items():
lines.append('{:<10s}{:>8.3f}'.format(k,v))
textstr = '\n'.join(lines)
return textstr
# PUBLIC METHODS
# --------------------------------------------------------------------------------------------
# UTILITY: Return all metrics
def metrics(self):
"""Get all available calibration metrics as combined result tuple.
Returns
-------
auroc : float
Area under the receiver operating characteristic.
brier : float
The scaled brier score.
ace : int
Adaptive calibration error.
mce : float
Maximum calibration error.
awlc : float
Area within the lowess curve
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.metrics()
metrics_result(auroc=0.9739811912225705, brier=0.2677083794415594, ace=0.0361775962446639, mce=0.1837227304691177, awlc=0.041443052220213474)
"""
return metrics_result(self.__auroc, self.__brier, self.__ace, self.__mce, self.__awlc)
# UTILITY: Group data
def group_data(self, n_groups:Union[int,str]) -> None:
r"""Group class labels and predicted probabilities into equal sized groupes of size n.
Parameters
----------
n_groups: int or str
Number of groups to use for grouping probabilities.
Set to 'auto' to use sturges function for estimation of optimal group size [9].
Notes
-----
Sturges function for estimation of optimal group size:
.. math::
k=\left\lceil\log _{2} n\right\rceil+1
Hosmer and Lemeshow recommend setting number of groups to 10 and with equally sized groups [1].
Raises
------
ValueError: If the given number of groups is invalid.
References
----------
.. [1] <NAME>, <NAME>., <NAME>, and <NAME>.
Applied logistic regression. Vol. 398. <NAME>, 2013.
.. [9] <NAME>. (1926). The choice of a class interval.
Journal of the american statistical association, 21(153), 65-66.
"""
# Check group size parameter and set accordingly
if isinstance(n_groups, int) and 2 <= n_groups < self.__n:
self.__ngroups = n_groups # Group size
elif isinstance(n_groups, str) and n_groups == 'auto':
self.__ngroups = ceil(log2(self.__n)) + 1
else:
raise ValueError(f"'{n_groups}' is an invalid value of parameter n_groups!")
df = pd.DataFrame(data={'class':self.__y, 'prob':self.__p})
# Sort Values according to their probability
df = df.sort_values('prob')
# Group data using deciles of risks
try:
df['dcl'] = pd.qcut(df['prob'], self.__ngroups)
except ValueError:
raise Exception("Could not create groups. Maybe try with a lower number of groups or set n_groups to 'auto'.")
except BaseException as err:
print(f"Unexpected {err=}, {type(err)=}")
raise
self.__data = df
self.__ct = self.__init_contingency_table()
self.__update_groupbased_metrics()
# UTILITY: Merge Groups
def merge_groups(self, min_count:int=CHI_SQUARE_VIOLATION_LIMIT) -> None:
"""Merge groups in contingency table to have count of expected and observed class events >= min_count.
Parameters
----------
min_count : int (optional, default=1)
Notes
-----
Hosmer and Lemeshow mention the possibility to merge groups at low samplesize to have higher expected and observed class event counts [1].
This should guarantee that the requirements for chi-square goodness-of-fit tests are fullfilled.
Be aware that the power of tests will be lower after merge!
References
----------
.. [1] <NAME>, <NAME>., <NAME>, and <NAME>.
Applied logistic regression. Vol. 398. John Wiley & Sons, 2013.
Todo:
* Warn at low number of groups ( ~ at g<6 )
* Merge from both sides
"""
i = 0
merged_rows = self.__ct.iloc[0:i].sum(axis=0, numeric_only=True)
# Merge groups as long expected and observed count is below min_count
while (i < self.__ngroups and (merged_rows["observed_1"] < min_count or merged_rows["predicted_1"] < min_count) ):
merged_rows = self.__ct.iloc[0:i].sum(axis=0, numeric_only=True)
i += 1
# Reset index of contingency table and add merged row
idx = pd.Interval(self.__ct.index[0].left, self.__ct.index[i-1].right)
self.__ct.loc[idx] = merged_rows
self.__ct = self.__ct[i:]
self.__ct.sort_index(axis=0, inplace=True)
# Update number of groups
self.__ngroups = len(self.__ct)
# Update bins in data
self.__data['dcl'] = pd.cut(self.__data['prob'], self.__ct.index)
# Update metrics
self.__update_groupbased_metrics()
# STATISTICAL TEST: Hosmer Lemeshow Test
def hosmerlemeshow(self, verbose = True) -> hltest_result:
r""" Perform the Hosmer-Lemeshow goodness of fit test on the data of class instance.
The Hosmer-Lemeshow test checks the null hypothesis that the number of
given observed events match the number of expected events using given
probabilistic class predictions and dividing those into deciles of risks.
Parameters
----------
verbose : bool (optional, default=True)
Whether or not to show test results and contingency table the teststatistic
relies on.
Returns
-------
C : float
The Hosmer-Lemeshow test statistic.
p-value : float
The p-value of the test.
dof : int
Degrees of freedom
See Also
--------
CalibrationEvaluator.pigeonheyse
CalibrationEvaluator.z_test
scipy.stats.chisquare
Notes
-----
A low value for C and high p-value (>0.05) indicate a well calibrated model.
The power of this test is highly dependent on the sample size. Also the
teststatistic lacks fit to chi-squared distribution in some situations [3].
In order to decide on model fit it is recommended to check it's discrematory
power as well using metrics like AUROC, precision, recall. Furthermore a
calibration plot (or reliability plot) can help to identify regions of the
model underestimate or overestimate the true class membership probabilities.
Hosmer and Lemeshow estimated the degrees of freedom for the teststatistic
performing extensive simulations. According to their results the degrees of
freedom are k-2 where k is the number of subroups the data is divided into.
In the case of external evaluation the degrees of freedom is the same as k [1].
Teststatistc:
.. math::
E_{k 1}=\sum_{i=1}^{n_{k}} \hat{p}_{i 1}
.. math::
O_{k 1}=\sum_{i=1}^{n_{k}} y_{i 1}
.. math::
\hat{C}=\sum_{k=1}^{G} \frac{\left(O_{k 1}-E_{k 1}\right)^{2}}{E_{k 1}} + \frac{\left(O_{k 0}-E_{k 0}\right)^{2}}{E_{k 0}}
References
----------
.. [1] <NAME>, <NAME>., <NAME>, and <NAME>.
Applied logistic regression. Vol. 398. John Wiley & Sons, 2013.
.. [10] "Hosmer-Lemeshow test", https://en.wikipedia.org/wiki/Hosmer-Lemeshow_test
.. [11] Pigeon, <NAME>., and <NAME>. "A cautionary note about assessing
the fit of logistic regression models." (1999): 847-853.
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.hosmerlemeshow()
hltest_result(statistic=4.982635477424991, pvalue=0.8358193332183672, dof=9)
"""
# Calculate Hosmer Lemeshow Teststatistic based on contengency table
C_ = ( (self.__ct.observed_1 - self.__ct.predicted_1)**2 / \
(self.__ct.total*self.__ct.mean_predicted*(1-self.__ct.mean_predicted)) ).sum()
# DoF Internal = Number Subgroups - Parameters of Logistic Regression [1]
# DoF External = Number Subgroups [1]
if self.__devel == DEVEL.INTERNAL:
dof = self.__ngroups-2
else:
dof = self.__ngroups
# Calculate pvalue
pval = 1 - chi2.cdf(C_, dof)
# Show the contingency table
if verbose:
self.__show_contingency_table()
# Warn user if expected frequencies are < 5
self.__warn_expected_low()
if (pval < 0.001):
print(f'C({dof}): {C_:.2f} p-value: < 0.001')
else:
print(f'C({dof}): {C_:.2f} p-value: {pval:.3f}')
return hltest_result(C_, pval, dof)
# STATISTICAL TEST: Pigeon Heyse Test
def pigeonheyse(self, verbose = True) -> phtest_result:
r"""Perform the Pigeon-Heyse goodness of fit test.
The Pigeon-Heyse test checks the null hypothesis that number of given observed
events match the number of expected events over divided subgroups.
Unlike the Hosmer-Lemeshow test this test allows the use of different
grouping strategies and is more robust against variance within subgroups.
Parameters
----------
verbose : bool (optional, default=True)
Whether or not to show test results and contingency table the teststatistic
relies on.
Returns
-------
J : float
The Pigeon-Heyse test statistic J².
p : float
The p-value of the test.
dof : int
Degrees of freedom
See Also
--------
CalibrationEvaluator.hosmerlemeshow
CalibrationEvaluator.z_test
scipy.stats.chisquare
Notes
-----
This is an implemenation of the test proposed by <NAME> Heyse [2].
A low value for J² and high p-value (>0.05) indicate a well calibrated model.
Other then the Hosmer-Lemeshow test an adjustment factor is added to
the calculation of the teststatistic, making the use of different
grouping strategies possible as well.
The power of this test is highly dependent on the sample size.
In order to decide on model fit it is recommended to check it's discrematory
power as well using metrics like AUROC, precision, recall. Furthermore a
calibration plot (or reliability plot) can help to identify regions of the
model underestimate or overestimate the true class membership probabilities.
Teststatistc:
.. math::
\phi_{k}=\frac{\sum_{i=1}^{n_{k}} \hat{p}_{i 1}\left(1-\hat{p}_{i 1}\right)}{n_{k} \bar{p}_{k 1}\left(1-\bar{p}_{k 1}\right)}
.. math::
{J}^{2}=\sum_{k=1}^{G} \frac{\left(O_{k 1}-E_{k 1}\right)^{2}}{\phi_{k} E_{k 1}} + \frac{\left(O_{k 0}-E_{k 0}\right)^{2}}{\phi_{k} E_{k 0}}
References
----------
.. [1] <NAME>, <NAME>., <NAME>, and <NAME>.
Applied logistic regression. Vol. 398. John Wiley & Sons, 2013.
.. [2] Pigeon, <NAME>., and <NAME>. "An improved goodness of
fit statistic for probability prediction models."
Biometrical Journal: Journal of Mathematical Methods in Biosciences
41.1 (1999): 71-82.
.. [11] Pigeon, <NAME>., and <NAME>. "A cautionary note about assessing
the fit of logistic regression models." (1999): 847-853.
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.pigeonheyse()
phtest_result(statistic=5.269600396341568, pvalue=0.8102017228852412, dof=9)
"""
# Factor phi to adjust X² statistic
phi = ( self.__data['prob'].groupby(self.__data.dcl).apply(lambda x: (x *(1-x)).sum()) ) / \
( self.__ct.total * self.__ct.mean_predicted * (1 - self.__ct.mean_predicted) )
# Teststatistic
J_square = ( (self.__ct.observed_1 - self.__ct.predicted_1)**2 / \
(phi*self.__ct.total*self.__ct.mean_predicted*(1-self.__ct.mean_predicted)) ).sum()
# DoF Internal = Number Subgroups - 1 [2]
# DoF External = Number Subgroups [1]
if self.__devel == DEVEL.INTERNAL:
dof = self.__ngroups - 1
else:
dof = self.__ngroups
pval = 1 - chi2.cdf(J_square, dof) # Calculate pvalue
if verbose:
# Show the contingency table
self.__show_contingency_table(phi)
# Warn user if expected frequencies are < 5
self.__warn_expected_low()
if (pval < 0.001):
print(f'J²({dof}): {J_square:.2f} p-value: < 0.001')
else:
print(f'J²({dof}): {J_square:.2f} p-value: {pval:.3f}')
return phtest_result(J_square, pval, dof)
# STATISTICAL TEST: Spiegelhalter z-test
def z_test(self) -> ztest_result:
r"""Perform the Spieglhalter's z-test for calibration.
Returns
-------
statistic : float
The Spiegelhalter z-test statistic.
p : float
The p-value of the test.
See Also
--------
CalibrationEvaluator.hosmerlemeshow
CalibrationEvaluator.pigeonheyse
Notes
-----
This calibration test is performed in the manner of a z-test.
The nullypothesis is that the estimated probabilities are equal to the true class probabilities.
The test statistic under the nullypothesis can be approximated by a normal distribution.
A low value for Z and high p-value (>0.05) indicate a well calibrated model.
Other than Hosmer Lemeshow Test or Pigeon Heyse Test, this test is not based on grouping strategies.
Teststatistc:
.. math::
Z=\frac{\sum_{i=1}^{n}\left(y_{i}-\hat{p}_{i}\right)\left(1-2 \hat{p}_{i}\right)}{\sqrt{\sum_{i=1}^{n}\left(1-2 \hat{p}_{i}\right)^{2} \hat{p}_{i}\left(1-\hat{p}_{i}\right)}}
References
----------
.. [1] <NAME>. (1986). Probabilistic prediction in patient management and clinical trials.
Statistics in medicine, 5(5), 421-433.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020).
A tutorial on calibration measurements and calibration models for clinical prediction models.
Journal of the American Medical Informatics Association, 27(4), 621-633.
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.z_test()
ztest_result(statistic=-0.21590257919669287, pvalue=0.829063686607032)
"""
num = ( (self.__y - self.__p) * ( 1 - 2 * self.__p ) ).sum()
denom = sqrt( ((1 - 2 * self.__p)**2 * self.__p * ( 1 - self.__p)).sum() )
z = num / denom
pval = 2 * norm.cdf(-abs(z))
return ztest_result(z, pval)
# STATISTICAL TEST / PLOT : Calibration Belt
def calbelt(self, plot:bool=False, subset = None, confLevels=[0.8, 0.95], alpha=0.95) -> calbelt_result:
"""Calculate the calibration belt and draw plot if desired.
Parameters
----------
plot: boolean, optional
Decide if plot for calibration belt should be shown.
Much faster calculation if set to 'false'!
subset: array_like
An optional boolean vector specifying the subset of observations to be considered.
Defaults to None.
confLevels: list
A numeric vector containing the confidence levels of the calibration belt.
Defaults to [0.8,0.95].
alpha: float
The level of significance to use.
Returns
-------
T : float
The Calibration plot test statistic T.
p : float
The p-value of the test.
fig : matplotlib.figure
The calibration belt plot. Only returned if plot='True'
See Also
--------
pycaleva.calbelt.CalibrationBelt
CalibrationEvaluator.calplot
Notes
-----
This is an implemenation of the test proposed by Nattino et al. [6].
The implementation was built upon the python port of the R-Package givitiR [8] and the python implementation calibration-belt [7].
The calibration belt estimates the true underlying calibration curve given predicted probabilities and true class labels.
Instead of directly drawing the calibration curve a belt is drawn using confidence levels.
A low value for the teststatistic and a high p-value (>0.05) indicate a well calibrated model.
Other than Hosmer Lemeshow Test or Pigeon Heyse Test, this test is not based on grouping strategies.
References
----------
.. [6] <NAME>., <NAME>., & <NAME>. (2014). A new calibration test
and a reappraisal of the calibration belt for the assessment of prediction models
based on dichotomous outcomes. Statistics in medicine, 33(14), 2390-2407.
.. [7] <NAME>. (2021). calibrattion-belt: Assessment of calibration in binomial prediction models [Computer software].
Available from https://github.com/fabiankueppers/calibration-framework
.. [8] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017).
givitiR: The giviti calibration test and belt (R package version 1.3) [Computer
software]. The Comprehensive R Archive Network.
Available from https://CRAN.R-project.org/package=givitiR
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.calbelt(plot=False)
calbelt_result(statistic=1.6111330037643796, pvalue=0.4468347221346196, fig=None)
"""
cb = CalibrationBelt(self.__y, self.__p, self.__devel, subset=subset, confLevels=confLevels, alpha=alpha)
if plot:
return cb.plot()
else:
return cb.stats()
def calibration_plot(self):
"""Generate the calibration plot for the given predicted probabilities and true class labels of current class instance.
Returns
-------
plot : matplotlib.figure
Notes
-----
This calibration plot is showing the predicted class probability against the actual probability according to the true class labels
as a red triangle for each of the groups. An additional calibration curve is draw, estimated using the LOWESS algorithm.
A model is well calibrated, if the red triangles and the calibration curve are both close to the plots bisector.
In the left corner of the plot all available metrics are listed as well. This implementation was made following the example of the R package
rms [5].
See Also
--------
CalibrationEvaluator.calbelt
References
----------
.. [5] Jr, <NAME>. (2021). rms: Regression modeling strategies (R package version
6.2-0) [Computer software]. The Comprehensive R Archive Network.
Available from https://CRAN.R-project.org/package=rms
Examples
--------
>>> from pycaleva import CalibrationEvaluator
>>> ce = CalibrationEvaluator(y_test, pred_prob, outsample=True, n_groups='auto')
>>> ce.calibration_plot()
"""
fig, ax1 = plt.subplots(figsize=(10,6))
# Draw a calibration plot using matplotlib only
y_grouped = self.__ct["mean_observed"]
p_grouped = self.__ct["mean_predicted"]
# Get nonparametric curve based on y and p using lowess
x_nonparametric,y_nonparametric = self.__nonparametric_fit(update_awlc=False)
# Add calibration line for model
plt.scatter(p_grouped,y_grouped, marker="^", facecolors='none', edgecolors='r', label='Grouped observations')
# Add histogram on second axis
h, e = np.histogram(self.__p, bins=50)
h = h.astype('float')
h /= h.max() # Get relative frequencies
ax2 = ax1.twinx()
ax2.set_ylim(-0.1,5) # Scale down histogram
ax2.axis('off') # Hide labels and ticks
ax2.stem(e[:-1],h, linefmt="grey", markerfmt=" ", basefmt=" ")
# Add line for nonparametric fit using lowess
ax1.plot(x_nonparametric, y_nonparametric, label="Nonparametric")
# Add line for perfect calibration
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
ax1.set_xlabel('Predicted Probability')
ax1.set_ylabel('Actual Probability')
props = dict(boxstyle='round', facecolor='white', alpha=0.4)
ax1.text(0.00, 0.75, self.__metrics_to_string(), fontsize=10, family='monospace', bbox=props)
ax1.legend(loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))
return fig | [
"pandas.DataFrame",
"numpy.abs",
"pandas.Interval",
"matplotlib.pyplot.scatter",
"statsmodels.nonparametric.smoothers_lowess.lowess",
"pandas.cut",
"numpy.histogram",
"numpy.array",
"numpy.arange",
"pandas.qcut",
"scipy.integrate.trapezoid",
"warnings.warn",
"math.log2",
"matplotlib.pyplot... | [((4462, 4478), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (4470, 4478), True, 'import numpy as np\n'), ((4518, 4534), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4526, 4534), True, 'import numpy as np\n'), ((9454, 9678), 'pandas.DataFrame', 'pd.DataFrame', (["{'total': total, 'mean_predicted': mean_predicted, 'mean_observed':\n mean_observed, 'observed_0': total - observed, 'predicted_0': total -\n predicted, 'observed_1': observed, 'predicted_1': predicted}"], {}), "({'total': total, 'mean_predicted': mean_predicted,\n 'mean_observed': mean_observed, 'observed_0': total - observed,\n 'predicted_0': total - predicted, 'observed_1': observed, 'predicted_1':\n predicted})\n", (9466, 9678), True, 'import pandas as pd\n'), ((11571, 11593), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (11580, 11593), True, 'import numpy as np\n'), ((11618, 11673), 'statsmodels.nonparametric.smoothers_lowess.lowess', 'lowess', (['self.__y', 'self.__p'], {'it': '(0)', 'xvals': 'x_nonparametric'}), '(self.__y, self.__p, it=0, xvals=x_nonparametric)\n', (11624, 11673), False, 'from statsmodels.nonparametric.smoothers_lowess import lowess\n'), ((15104, 15160), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'class': self.__y, 'prob': self.__p}"}), "(data={'class': self.__y, 'prob': self.__p})\n", (15116, 15160), True, 'import pandas as pd\n'), ((17171, 17237), 'pandas.Interval', 'pd.Interval', (['self.__ct.index[0].left', 'self.__ct.index[i - 1].right'], {}), '(self.__ct.index[0].left, self.__ct.index[i - 1].right)\n', (17182, 17237), True, 'import pandas as pd\n'), ((17498, 17542), 'pandas.cut', 'pd.cut', (["self.__data['prob']", 'self.__ct.index'], {}), "(self.__data['prob'], self.__ct.index)\n", (17504, 17542), True, 'import pandas as pd\n'), ((33112, 33141), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (33124, 33141), True, 'import matplotlib.pyplot as plt\n'), ((33494, 33609), 'matplotlib.pyplot.scatter', 'plt.scatter', (['p_grouped', 'y_grouped'], {'marker': '"""^"""', 'facecolors': '"""none"""', 'edgecolors': '"""r"""', 'label': '"""Grouped observations"""'}), "(p_grouped, y_grouped, marker='^', facecolors='none', edgecolors\n ='r', label='Grouped observations')\n", (33505, 33609), True, 'import matplotlib.pyplot as plt\n'), ((33659, 33690), 'numpy.histogram', 'np.histogram', (['self.__p'], {'bins': '(50)'}), '(self.__p, bins=50)\n', (33671, 33690), True, 'import numpy as np\n'), ((4395, 4411), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (4403, 4411), True, 'import numpy as np\n'), ((4413, 4429), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4421, 4429), True, 'import numpy as np\n'), ((8179, 8310), 'warnings.warn', 'warnings.warn', (['"""Please set parameter outsample to \'false\' if the evaluated model was fit on this dataset!"""', '"""UserWarning"""'], {}), '(\n "Please set parameter outsample to \'false\' if the evaluated model was fit on this dataset!"\n , \'UserWarning\')\n', (8192, 8310), False, 'import warnings\n'), ((11718, 11759), 'numpy.abs', 'np.abs', (['(x_nonparametric - y_nonparametric)'], {}), '(x_nonparametric - y_nonparametric)\n', (11724, 11759), True, 'import numpy as np\n'), ((11786, 11828), 'scipy.integrate.trapezoid', 'integrate.trapezoid', (['diff', 'y_nonparametric'], {}), '(diff, y_nonparametric)\n', (11805, 11828), False, 'from scipy import integrate\n'), ((15347, 15382), 'pandas.qcut', 'pd.qcut', (["df['prob']", 'self.__ngroups'], {}), "(df['prob'], self.__ngroups)\n", (15354, 15382), True, 'import pandas as pd\n'), ((21494, 21511), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['C_', 'dof'], {}), '(C_, dof)\n', (21502, 21511), False, 'from scipy.stats import chi2, norm\n'), ((25773, 25796), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['J_square', 'dof'], {}), '(J_square, dof)\n', (25781, 25796), False, 'from scipy.stats import chi2, norm\n'), ((8655, 8713), 'numpy.abs', 'np.abs', (['(self.__ct.mean_predicted - self.__ct.mean_observed)'], {}), '(self.__ct.mean_predicted - self.__ct.mean_observed)\n', (8661, 8713), True, 'import numpy as np\n'), ((8529, 8587), 'numpy.abs', 'np.abs', (['(self.__ct.mean_predicted - self.__ct.mean_observed)'], {}), '(self.__ct.mean_predicted - self.__ct.mean_observed)\n', (8535, 8587), True, 'import numpy as np\n'), ((14967, 14981), 'math.log2', 'log2', (['self.__n'], {}), '(self.__n)\n', (14971, 14981), False, 'from math import log2, ceil, sqrt\n')] |
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
ATOL = 0.2
RTOL = 0.2
output_path = Path('output')
plot_path = Path('plot')
tools = {
'amici': 'AMICI',
'pypesto': 'pyPESTO',
}
vectors = {
'default': 'PEtab nominal',
'midpoint': 'Midpoint of scaled bounds',
}
data = {(vector_id, tool_id): {} for vector_id in vectors for tool_id in tools}
for full_model_path in output_path.glob('*'):
model_name = full_model_path.stem
for tool_id, tool_name in tools.items():
for vector_id, vector_name in vectors.items():
data_tsv = output_path / model_name / tool_id / 'result' / (vector_id + '.tsv')
try:
df = pd.read_csv(str(data_tsv), sep='\t')
result = int((np.array(df.abs_err < ATOL) | np.array(df.rel_err < RTOL)).all())
except FileNotFoundError:
result = -1
data[(vector_id, tool_id)][model_name] = result
sorted_data = {}
for vector_tool in sorted(data):
sorted_data[vector_tool] = {model_name: data[vector_tool][model_name] for model_name in sorted(data[vector_tool])}
df = pd.DataFrame(data=sorted_data)
fig, ax = plt.subplots(figsize=(10, 15))
sns.heatmap(df, ax=ax)
plt.tight_layout()
plt.savefig(str(plot_path / 'result.png'))
| [
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"seaborn.heatmap",
"pathlib.Path",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((158, 172), 'pathlib.Path', 'Path', (['"""output"""'], {}), "('output')\n", (162, 172), False, 'from pathlib import Path\n'), ((185, 197), 'pathlib.Path', 'Path', (['"""plot"""'], {}), "('plot')\n", (189, 197), False, 'from pathlib import Path\n'), ((1181, 1211), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'sorted_data'}), '(data=sorted_data)\n', (1193, 1211), True, 'import pandas as pd\n'), ((1222, 1252), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (1234, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1275), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'ax': 'ax'}), '(df, ax=ax)\n', (1264, 1275), True, 'import seaborn as sns\n'), ((1276, 1294), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1292, 1294), True, 'import matplotlib.pyplot as plt\n'), ((813, 840), 'numpy.array', 'np.array', (['(df.abs_err < ATOL)'], {}), '(df.abs_err < ATOL)\n', (821, 840), True, 'import numpy as np\n'), ((843, 870), 'numpy.array', 'np.array', (['(df.rel_err < RTOL)'], {}), '(df.rel_err < RTOL)\n', (851, 870), True, 'import numpy as np\n')] |
from __future__ import print_function
import ctypes
import numpy as np
import cv2
import tensorrt as trt
import pycuda.driver as cuda
try:
ctypes.cdll.LoadLibrary('../plugins/libyolo_layer.so')
except OSError as e:
raise SystemExit('ERROR: failed to load ./plugins/libyolo_layer.so. '
'Did you forget to do a "make" in the "./plugins/" '
'subdirectory?') from e
def _preprocess_pose(model_name, img, input_shape):
"""Preprocess an image before TRT POSE inferencing.
# Args
img: int8 numpy array of shape (img_h, img_w, 3)
input_shape: a tuple of (H, W)
# Returns
preprocessed img: float32 numpy array of shape (3, H, W)
"""
if 'resnet' in model_name:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
#print(img.shape)
img = img.transpose((2, 0, 1)).astype(np.float32)
img /= 255.0
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img[0] = np.subtract(img[0], mean[0])/std[0]
img[1] = np.subtract(img[1], mean[1])/std[1]
img[2] = np.subtract(img[2], mean[2])/std[2]
#print(img.shape)
#print(type(img[1][20][15]))
#print(img[1][20][15])
img = np.expand_dims(img, axis=0)
return img
class HostDeviceMem(object):
"""Simple helper data class that's a little nicer to use than a 2-tuple."""
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
"""Allocates all host/device in/out buffers required for an engine."""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * \
engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
# each grid has 3 anchors, each anchor generates a detection
# output of 7 float32 values
#print(size)
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def do_inference_v2(context, bindings, inputs, outputs, stream):
"""do_inference_v2 (for TensorRT 7.0+)
This function is generalized for multiple inputs/outputs for full
dimension networks.
Inputs and outputs are expected to be lists of HostDeviceMem objects.
"""
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
class TrtPOSE(object):
"""TrtYOLO class encapsulates things needed to run TRT YOLO."""
def _load_engine(self):
TRTbin = self.model
with open(TRTbin, 'rb') as f, trt.Runtime(self.trt_logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
def __init__(self, model_path, input_shape, cuda_ctx=None):
"""Initialize TensorRT plugins, engine and conetxt."""
self.model = model_path
self.input_shape = input_shape
self.cuda_ctx = cuda_ctx
if self.cuda_ctx:
self.cuda_ctx.push()
self.inference_fn = do_inference_v2
self.trt_logger = trt.Logger(trt.Logger.INFO)
self.engine = self._load_engine()
try:
self.context = self.engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = \
allocate_buffers(self.engine)
except Exception as e:
raise RuntimeError('fail to allocate CUDA resources') from e
finally:
if self.cuda_ctx:
self.cuda_ctx.pop()
def __del__(self):
"""Free CUDA memories."""
del self.outputs
del self.inputs
del self.stream
def estimation(self, model_name, model_input):
#img_resized = _preprocess_pose(model_name, img, (self.input_shape[0], self.input_shape[1]))
# Set host input to the image. The do_inference() function
# will copy the input to the GPU before executing.
self.inputs[0].host = np.ascontiguousarray(model_input)
#print(np.ascontiguousarray(model_input).shape)
if self.cuda_ctx:
self.cuda_ctx.push()
trt_outputs = self.inference_fn(
context=self.context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream)
if self.cuda_ctx:
self.cuda_ctx.pop()
if "384" in model_name:
output = np.array(trt_outputs).reshape(2, 17, 96, 72)
else:
output = np.array(trt_outputs).reshape(2, 17, 64, 48)
return output
| [
"pycuda.driver.Stream",
"tensorrt.Logger",
"pycuda.driver.memcpy_dtoh_async",
"numpy.subtract",
"pycuda.driver.pagelocked_empty",
"cv2.cvtColor",
"pycuda.driver.memcpy_htod_async",
"ctypes.cdll.LoadLibrary",
"numpy.expand_dims",
"pycuda.driver.mem_alloc",
"numpy.array",
"tensorrt.Runtime",
"... | [((147, 201), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['"""../plugins/libyolo_layer.so"""'], {}), "('../plugins/libyolo_layer.so')\n", (170, 201), False, 'import ctypes\n'), ((911, 942), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (919, 942), True, 'import numpy as np\n'), ((953, 984), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (961, 984), True, 'import numpy as np\n'), ((1224, 1251), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1238, 1251), True, 'import numpy as np\n'), ((1813, 1826), 'pycuda.driver.Stream', 'cuda.Stream', ([], {}), '()\n', (1824, 1826), True, 'import pycuda.driver as cuda\n'), ((765, 801), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (777, 801), False, 'import cv2\n'), ((998, 1026), 'numpy.subtract', 'np.subtract', (['img[0]', 'mean[0]'], {}), '(img[0], mean[0])\n', (1009, 1026), True, 'import numpy as np\n'), ((1047, 1075), 'numpy.subtract', 'np.subtract', (['img[1]', 'mean[1]'], {}), '(img[1], mean[1])\n', (1058, 1075), True, 'import numpy as np\n'), ((1096, 1124), 'numpy.subtract', 'np.subtract', (['img[2]', 'mean[2]'], {}), '(img[2], mean[2])\n', (1107, 1124), True, 'import numpy as np\n'), ((2080, 2114), 'pycuda.driver.pagelocked_empty', 'cuda.pagelocked_empty', (['size', 'dtype'], {}), '(size, dtype)\n', (2101, 2114), True, 'import pycuda.driver as cuda\n'), ((2136, 2167), 'pycuda.driver.mem_alloc', 'cuda.mem_alloc', (['host_mem.nbytes'], {}), '(host_mem.nbytes)\n', (2150, 2167), True, 'import pycuda.driver as cuda\n'), ((3006, 3058), 'pycuda.driver.memcpy_htod_async', 'cuda.memcpy_htod_async', (['inp.device', 'inp.host', 'stream'], {}), '(inp.device, inp.host, stream)\n', (3028, 3058), True, 'import pycuda.driver as cuda\n'), ((3227, 3279), 'pycuda.driver.memcpy_dtoh_async', 'cuda.memcpy_dtoh_async', (['out.host', 'out.device', 'stream'], {}), '(out.host, out.device, stream)\n', (3249, 3279), True, 'import pycuda.driver as cuda\n'), ((4083, 4110), 'tensorrt.Logger', 'trt.Logger', (['trt.Logger.INFO'], {}), '(trt.Logger.INFO)\n', (4093, 4110), True, 'import tensorrt as trt\n'), ((4978, 5011), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['model_input'], {}), '(model_input)\n', (4998, 5011), True, 'import numpy as np\n'), ((3619, 3647), 'tensorrt.Runtime', 'trt.Runtime', (['self.trt_logger'], {}), '(self.trt_logger)\n', (3630, 3647), True, 'import tensorrt as trt\n'), ((5451, 5472), 'numpy.array', 'np.array', (['trt_outputs'], {}), '(trt_outputs)\n', (5459, 5472), True, 'import numpy as np\n'), ((5531, 5552), 'numpy.array', 'np.array', (['trt_outputs'], {}), '(trt_outputs)\n', (5539, 5552), True, 'import numpy as np\n')] |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import TestCase
import numpy as np
from qf_lib_tests.unit_tests.portfolio_construction.utils import assets_df
from qf_lib.portfolio_construction.portfolio_models.max_diversification_portfolio import \
MaxDiversificationPortfolio
class TestMaxDiversificationPortfolio(TestCase):
@classmethod
def setUpClass(cls):
cls.assets_df = assets_df
def test_get_weights(self):
portfolio = MaxDiversificationPortfolio(self.assets_df.cov(), self.assets_df.std())
actual_weights = portfolio.get_weights()
expected_weights_vals = np.zeros(20)
expected_weights_vals[1] = 0.0393
expected_weights_vals[2] = 0.0569
expected_weights_vals[3] = 0.0249
expected_weights_vals[5] = 0.1076
expected_weights_vals[6] = 0.0864
expected_weights_vals[7] = 0.0830
expected_weights_vals[9] = 0.0528
expected_weights_vals[10] = 0.1137
expected_weights_vals[11] = 0.0664
expected_weights_vals[12] = 0.0730
expected_weights_vals[14] = 0.0672
expected_weights_vals[16] = 0.0584
expected_weights_vals[17] = 0.0575
expected_weights_vals[18] = 0.0567
expected_weights_vals[19] = 0.0562
self.assertTrue(np.allclose(expected_weights_vals, actual_weights.values, rtol=0, atol=1e-04))
def test_get_weights_with_upper_limits(self):
portfolio = MaxDiversificationPortfolio(self.assets_df.cov(), self.assets_df.std(), upper_constraint=0.1)
actual_weights = portfolio.get_weights()
expected_weights_vals = np.zeros(20)
expected_weights_vals[1] = 0.0404
expected_weights_vals[2] = 0.0583
expected_weights_vals[3] = 0.0264
expected_weights_vals[5] = 0.0999
expected_weights_vals[6] = 0.0876
expected_weights_vals[7] = 0.0845
expected_weights_vals[9] = 0.0533
expected_weights_vals[10] = 0.0999
expected_weights_vals[11] = 0.0682
expected_weights_vals[12] = 0.0755
expected_weights_vals[14] = 0.0682
expected_weights_vals[16] = 0.0581
expected_weights_vals[17] = 0.0600
expected_weights_vals[18] = 0.0604
expected_weights_vals[19] = 0.0592
self.assertTrue(np.allclose(expected_weights_vals, actual_weights.values, rtol=0, atol=1e-04))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.allclose",
"numpy.zeros"
] | [((3053, 3068), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3066, 3068), False, 'import unittest\n'), ((1263, 1275), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (1271, 1275), True, 'import numpy as np\n'), ((2265, 2277), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (2273, 2277), True, 'import numpy as np\n'), ((1939, 2017), 'numpy.allclose', 'np.allclose', (['expected_weights_vals', 'actual_weights.values'], {'rtol': '(0)', 'atol': '(0.0001)'}), '(expected_weights_vals, actual_weights.values, rtol=0, atol=0.0001)\n', (1950, 2017), True, 'import numpy as np\n'), ((2941, 3019), 'numpy.allclose', 'np.allclose', (['expected_weights_vals', 'actual_weights.values'], {'rtol': '(0)', 'atol': '(0.0001)'}), '(expected_weights_vals, actual_weights.values, rtol=0, atol=0.0001)\n', (2952, 3019), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Mnist_testing_least_variance_direction_dirichlet.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xHmmcFk_REH865XvBafXANHO7i7enMJl
"""
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
from numpy import linalg as LA
import copy
import torch.optim as optim
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))])
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
classes = ('zero','one','two','three','four','five','six','seven','eight','nine')
foreground_classes = {'zero','one','two'}
fg_used = '012'
fg1, fg2, fg3 = 0,1,2
all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'}
background_classes = all_classes - foreground_classes
background_classes
gamma = 5*1e-3
train = trainset.data
label = trainset.targets
train = np.reshape(train, (60000,784))
train.shape
u, s, vh = LA.svd(train, full_matrices= False)
dir = vh[600:610,:] #vh[774:784,:] #vh[0:9,:]
u1 = dir[7,:]
u2 = dir[8,:]
u3 = dir[9,:]
cnt=0
for i in range(60000):
if(label[i] == fg1):
# print(train[i])
# print(LA.norm(train[i]))
# print(u1)
train[i] = train[i] + gamma * LA.norm(train[i]) * u1
# print(train[i])
cnt+=1
if(label[i] == fg2):
train[i] = train[i] + gamma * LA.norm(train[i]) * u2
cnt+=1
if(label[i] == fg3):
train[i] = train[i] + gamma * LA.norm(train[i]) * u3
cnt+=1
if(i%10000 == 9999):
print("partly over")
print(cnt)
train = np.reshape(train, (60000,28, 28))
trainset.data = train
test = testset.data
label = testset.targets
test = np.reshape(test, (10000,784))
test.shape
cnt=0
for i in range(10000):
if(label[i] == fg1):
# print(train[i])
# print(LA.norm(train[i]))
# print(u1)
test[i] = test[i] + gamma * LA.norm(test[i]) * u1
# print(train[i])
cnt+=1
if(label[i] == fg2):
test[i] = test[i] + gamma * LA.norm(test[i]) * u2
cnt+=1
if(label[i] == fg3):
test[i] = test[i] + gamma * LA.norm(test[i]) * u3
cnt+=1
if(i%1000 == 999):
print("partly over")
print(cnt)
test = np.reshape(test, (10000,28, 28))
test.shape
testset.data = test
fg = [fg1,fg2,fg3]
bg = list(set([0,1,2,3,4,5,6,7,8,9])-set(fg))
fg,bg
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(6000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])
j+=1
else:
image_list.append(foreground_data[fg_idx])
label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 10000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
list_set_labels = []
for i in range(desired_num):
set_idx = set()
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
set_idx = set(background_label[bg_idx].tolist())
fg_idx = np.random.randint(0,15000)
set_idx.add(foreground_label[fg_idx].item())
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
list_set_labels.append(set_idx)
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
for i in range(len(mosaic_dataset)):
img = torch.zeros([ 28,28], dtype=torch.float64)
for j in range(9):
if j == foreground_index[i]:
img = img + mosaic_dataset[i][j]*dataset_number/9
else :
img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9)
avg_image_dataset.append(img)
return avg_image_dataset , labels , foreground_index
def create_avg_image_from_mosaic_dataset_fraction(mosaic_dataset,labels,foreground_index,dataset_number, fraction):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([ 28,28], dtype=torch.float64)
np.random.seed(dataset_number*10000 + i)
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(9):
if j == give_pref:
img = img + mosaic_dataset[i][j]*fraction/9
else :
img = img + mosaic_dataset[i][j]*(9-fraction)/(8*9)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 1)
avg_image_dataset_1_01 , labels_1_01, fg_index_2 = create_avg_image_from_mosaic_dataset_fraction(mosaic_list_of_images, mosaic_label, fore_idx, 2,1.01)
avg_image_dataset_1_02, labels_1_02, fg_index_3 = create_avg_image_from_mosaic_dataset_fraction(mosaic_list_of_images, mosaic_label, fore_idx , 3, 1.02)
avg_image_dataset_1_1 , labels_1_1, fg_index_4 = create_avg_image_from_mosaic_dataset_fraction(mosaic_list_of_images, mosaic_label, fore_idx , 4, 1.1)
avg_image_dataset_1_2 , labels_1_2, fg_index_4 = create_avg_image_from_mosaic_dataset_fraction(mosaic_list_of_images, mosaic_label, fore_idx , 5, 1.2)
avg_image_dataset_1_5 , labels_1_5, fg_index_4 = create_avg_image_from_mosaic_dataset_fraction(mosaic_list_of_images, mosaic_label, fore_idx , 6, 1.5)
avg_test_1 , labels_test_1, fg_index_test_1 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 1)
avg_test_1_01 , labels_test_1_01, fg_index_test_2 = create_avg_image_from_mosaic_dataset_fraction(test_images, test_label, fore_idx_test , 2,1.01)
avg_test_1_02, labels_test_1_02, fg_index_test_3 = create_avg_image_from_mosaic_dataset_fraction(test_images, test_label, fore_idx_test , 3,1.02)
avg_test_1_1 , labels_test_1_1, fg_index_test_4 = create_avg_image_from_mosaic_dataset_fraction(test_images, test_label, fore_idx_test , 4,1.1)
avg_test_1_2 , labels_test_1_2, fg_index_test_5 = create_avg_image_from_mosaic_dataset_fraction(test_images, test_label, fore_idx_test , 5,1.2)
avg_test_1_5 , labels_test_1_5, fg_index_test_6 = create_avg_image_from_mosaic_dataset_fraction(test_images, test_label, fore_idx_test , 6,1.5)
avg_image_dataset_2 , labels_2, fg_index_2 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 2)
avg_image_dataset_3 , labels_3, fg_index_3 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 3)
avg_image_dataset_4 , labels_4, fg_index_4 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 4)
avg_image_dataset_5 , labels_5, fg_index_5 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 5)
avg_image_dataset_6 , labels_6, fg_index_6 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 6)
avg_image_dataset_7 , labels_7, fg_index_7 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 7)
avg_image_dataset_8 , labels_8, fg_index_8 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 8)
avg_image_dataset_9 , labels_9, fg_index_9 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 9)
avg_test_2 , labels_test_2, fg_index_test_2 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 2)
avg_test_3 , labels_test_3, fg_index_test_3 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 3)
avg_test_4 , labels_test_4, fg_index_test_4 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 4)
avg_test_5 , labels_test_5, fg_index_test_5 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 5)
avg_test_6 , labels_test_6, fg_index_test_6 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 6)
avg_test_7 , labels_test_7, fg_index_test_7 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 7)
avg_test_8 , labels_test_8, fg_index_test_8 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 8)
avg_test_9 , labels_test_9, fg_index_test_9 = create_avg_image_from_mosaic_dataset(test_images, test_label, fore_idx_test , 9)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
batch = 256
epochs = 65
# training_data = avg_image_dataset_5 #just change this and training_label to desired dataset for training
# training_label = labels_5
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
traindata_1_01 = MosaicDataset(avg_image_dataset_1_01, labels_1_01 )
trainloader_1_01 = DataLoader( traindata_1_01 , batch_size= batch ,shuffle=True)
traindata_1_02 = MosaicDataset(avg_image_dataset_1_02, labels_1_02 )
trainloader_1_02 = DataLoader( traindata_1_02 , batch_size= batch ,shuffle=True)
traindata_1_1 = MosaicDataset(avg_image_dataset_1_1, labels_1_1 )
trainloader_1_1 = DataLoader( traindata_1_1 , batch_size= batch ,shuffle=True)
traindata_1_2 = MosaicDataset(avg_image_dataset_1_2, labels_1_2 )
trainloader_1_2 = DataLoader( traindata_1_2 , batch_size= batch ,shuffle=True)
traindata_1_5 = MosaicDataset(avg_image_dataset_1_5, labels_1_5 )
trainloader_1_5 = DataLoader( traindata_1_5 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_test_1, labels_test_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_1_01 = MosaicDataset(avg_test_1_01, labels_test_1_01 )
testloader_1_01 = DataLoader( testdata_1_01 , batch_size= batch ,shuffle=False)
testdata_1_02 = MosaicDataset(avg_test_1_02, labels_test_1_02 )
testloader_1_02 = DataLoader( testdata_1_02 , batch_size= batch ,shuffle=False)
testdata_1_1 = MosaicDataset(avg_test_1_1, labels_test_1_1 )
testloader_1_1 = DataLoader( testdata_1_1 , batch_size= batch ,shuffle=False)
testdata_1_2 = MosaicDataset(avg_test_1_2, labels_test_1_2 )
testloader_1_2 = DataLoader( testdata_1_2 , batch_size= batch ,shuffle=False)
testdata_1_5 = MosaicDataset(avg_test_1_5, labels_test_1_5 )
testloader_1_5 = DataLoader( testdata_1_5 , batch_size= batch ,shuffle=False)
# traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
# trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
traindata_2 = MosaicDataset(avg_image_dataset_2, labels_2 )
trainloader_2 = DataLoader( traindata_2 , batch_size= batch ,shuffle=True)
traindata_3 = MosaicDataset(avg_image_dataset_3, labels_3 )
trainloader_3 = DataLoader( traindata_3 , batch_size= batch ,shuffle=True)
traindata_4 = MosaicDataset(avg_image_dataset_4, labels_4 )
trainloader_4 = DataLoader( traindata_4 , batch_size= batch ,shuffle=True)
traindata_5 = MosaicDataset(avg_image_dataset_5, labels_5 )
trainloader_5 = DataLoader( traindata_5 , batch_size= batch ,shuffle=True)
traindata_6 = MosaicDataset(avg_image_dataset_6, labels_6 )
trainloader_6 = DataLoader( traindata_6 , batch_size= batch ,shuffle=True)
traindata_7 = MosaicDataset(avg_image_dataset_7, labels_7 )
trainloader_7 = DataLoader( traindata_7 , batch_size= batch ,shuffle=True)
traindata_8 = MosaicDataset(avg_image_dataset_8, labels_8 )
trainloader_8 = DataLoader( traindata_8 , batch_size= batch ,shuffle=True)
traindata_9 = MosaicDataset(avg_image_dataset_9, labels_9 )
trainloader_9 = DataLoader( traindata_9 , batch_size= batch ,shuffle=True)
# testdata_1 = MosaicDataset(avg_test_1, labels_test_1 )
# testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_2 = MosaicDataset(avg_test_2, labels_test_2 )
testloader_2 = DataLoader( testdata_2 , batch_size= batch ,shuffle=False)
testdata_3 = MosaicDataset(avg_test_3, labels_test_3 )
testloader_3 = DataLoader( testdata_3 , batch_size= batch ,shuffle=False)
testdata_4 = MosaicDataset(avg_test_4, labels_test_4 )
testloader_4 = DataLoader( testdata_4 , batch_size= batch ,shuffle=False)
testdata_5 = MosaicDataset(avg_test_5, labels_test_5 )
testloader_5 = DataLoader( testdata_5 , batch_size= batch ,shuffle=False)
testdata_6 = MosaicDataset(avg_test_6, labels_test_6 )
testloader_6 = DataLoader( testdata_6 , batch_size= batch ,shuffle=False)
testdata_7 = MosaicDataset(avg_test_7, labels_test_7 )
testloader_7 = DataLoader( testdata_7 , batch_size= batch ,shuffle=False)
testdata_8 = MosaicDataset(avg_test_8, labels_test_8 )
testloader_8 = DataLoader( testdata_8 , batch_size= batch ,shuffle=False)
testdata_9 = MosaicDataset(avg_test_9, labels_test_9 )
testloader_9 = DataLoader( testdata_9 , batch_size= batch ,shuffle=False)
class Conv_module(nn.Module):
def __init__(self,inp_ch,f,s,k,pad):
super(Conv_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.s = s
self.k = k
self.pad = pad
self.conv = nn.Conv2d(self.inp_ch,self.f,k,stride=s,padding=self.pad)
self.bn = nn.BatchNorm2d(self.f,track_running_stats=False)
self.act = nn.ReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class inception_module(nn.Module):
def __init__(self,inp_ch,f0,f1):
super(inception_module, self).__init__()
self.inp_ch = inp_ch
self.f0 = f0
self.f1 = f1
self.conv1 = Conv_module(self.inp_ch,self.f0,1,1,pad=0)
self.conv3 = Conv_module(self.inp_ch,self.f1,1,3,pad=1)
#self.conv1 = nn.Conv2d(3,self.f0,1)
#self.conv3 = nn.Conv2d(3,self.f1,3,padding=1)
def forward(self,x):
x1 = self.conv1.forward(x)
x3 = self.conv3.forward(x)
#print(x1.shape,x3.shape)
x = torch.cat((x1,x3),dim=1)
return x
class downsample_module(nn.Module):
def __init__(self,inp_ch,f):
super(downsample_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.conv = Conv_module(self.inp_ch,self.f,2,3,pad=0)
self.pool = nn.MaxPool2d(3,stride=2,padding=0)
def forward(self,x):
x1 = self.conv(x)
#print(x1.shape)
x2 = self.pool(x)
#print(x2.shape)
x = torch.cat((x1,x2),dim=1)
return x,x1
class inception_net(nn.Module):
def __init__(self):
super(inception_net,self).__init__()
self.conv1 = Conv_module(1,96,1,3,0)
self.incept1 = inception_module(96,32,32)
self.incept2 = inception_module(64,32,48)
self.downsample1 = downsample_module(80,80)
self.incept3 = inception_module(160,112,48)
self.incept4 = inception_module(160,96,64)
self.incept5 = inception_module(160,80,80)
self.incept6 = inception_module(160,48,96)
self.downsample2 = downsample_module(144,96)
self.incept7 = inception_module(240,176,60)
self.incept8 = inception_module(236,176,60)
self.pool = nn.AvgPool2d(5)
self.linear = nn.Linear(236,3)
def forward(self,x):
x = self.conv1.forward(x)
#act1 = x
x = self.incept1.forward(x)
#act2 = x
x = self.incept2.forward(x)
#act3 = x
x,act4 = self.downsample1.forward(x)
x = self.incept3.forward(x)
#act5 = x
x = self.incept4.forward(x)
#act6 = x
x = self.incept5.forward(x)
#act7 = x
x = self.incept6.forward(x)
#act8 = x
x,act9 = self.downsample2.forward(x)
x = self.incept7.forward(x)
#act10 = x
x = self.incept8.forward(x)
#act11 = x
#print(x.shape)
x = self.pool(x)
#print(x.shape)
x = x.view(-1,1*1*236)
x = self.linear(x)
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/i
def test_all(number, testloader,inc):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= inc(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test dataset %d: %d %%' % (number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
inc = inception_net().double()
inc = inc.to("cuda")
criterion_inception = nn.CrossEntropyLoss()
optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9)
acti = []
loss_curi = []
epochs = 70
running_loss = calculate_loss(trainloader,inc,criterion_inception)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
inc.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_inception.zero_grad()
# forward + backward + optimize
outputs = inc(inputs)
loss = criterion_inception(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_inception.step()
running_loss = calculate_loss(trainloader,inc,criterion_inception)
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.001:
break
# if (epoch%5 == 0):
# _,actis= inc(inputs)
# acti.append(actis)
print('Finished Training')
#torch.save(inc.state_dict(),"train_dataset_"+str(ds_number)+"_"+str(epochs)+".pt")
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = inc(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 train images: %d %%' % ( 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,inc)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1,testloader_1_01,testloader_1_02,testloader_1_1, testloader_1_2, testloader_1_5,
testloader_2,testloader_3,testloader_4,testloader_5,testloader_6,testloader_7,testloader_8,testloader_9]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
train_loss_all.append(train_all(trainloader_1_01, 101, testloader_list))
train_loss_all.append(train_all(trainloader_1_02, 102, testloader_list))
train_loss_all.append(train_all(trainloader_1_1, 11, testloader_list))
train_loss_all.append(train_all(trainloader_1_2, 12, testloader_list))
train_loss_all.append(train_all(trainloader_1_5, 15, testloader_list))
train_loss_all.append(train_all(trainloader_2, 2, testloader_list))
train_loss_all.append(train_all(trainloader_3, 3, testloader_list))
train_loss_all.append(train_all(trainloader_4, 4, testloader_list))
train_loss_all.append(train_all(trainloader_5, 5, testloader_list))
train_loss_all.append(train_all(trainloader_6, 6, testloader_list))
train_loss_all.append(train_all(trainloader_7, 7, testloader_list))
train_loss_all.append(train_all(trainloader_8, 8, testloader_list))
train_loss_all.append(train_all(trainloader_9, 9, testloader_list))
curve_lbl = ["1","1.01","1.02","1.1","1.2","1.5","2","3","4","5","6","7","8","9"]
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+curve_lbl[i])
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig("mnist_direction_2.pdf")
plt.savefig("mnist_direction_2.png")
| [
"numpy.random.seed",
"torch.cat",
"numpy.linalg.svd",
"numpy.random.randint",
"numpy.linalg.norm",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.utils.data.DataLoader",
"numpy.reshape",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.AvgPool2d",
"matplotlib.pyplot.legend",
"torch... | [((674, 707), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (697, 707), False, 'import warnings\n'), ((823, 916), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (849, 916), False, 'import torchvision\n'), ((925, 1019), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (951, 1019), False, 'import torchvision\n'), ((1413, 1444), 'numpy.reshape', 'np.reshape', (['train', '(60000, 784)'], {}), '(train, (60000, 784))\n', (1423, 1444), True, 'import numpy as np\n'), ((1468, 1502), 'numpy.linalg.svd', 'LA.svd', (['train'], {'full_matrices': '(False)'}), '(train, full_matrices=False)\n', (1474, 1502), True, 'from numpy import linalg as LA\n'), ((2064, 2098), 'numpy.reshape', 'np.reshape', (['train', '(60000, 28, 28)'], {}), '(train, (60000, 28, 28))\n', (2074, 2098), True, 'import numpy as np\n'), ((2171, 2201), 'numpy.reshape', 'np.reshape', (['test', '(10000, 784)'], {}), '(test, (10000, 784))\n', (2181, 2201), True, 'import numpy as np\n'), ((2669, 2702), 'numpy.reshape', 'np.reshape', (['test', '(10000, 28, 28)'], {}), '(test, (10000, 28, 28))\n', (2679, 2702), True, 'import numpy as np\n'), ((2821, 2887), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(10)', 'shuffle': '(True)'}), '(trainset, batch_size=10, shuffle=True)\n', (2848, 2887), False, 'import torch\n'), ((2901, 2967), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(10)', 'shuffle': '(False)'}), '(testset, batch_size=10, shuffle=False)\n', (2928, 2967), False, 'import torch\n'), ((3481, 3510), 'torch.tensor', 'torch.tensor', (['foreground_data'], {}), '(foreground_data)\n', (3493, 3510), False, 'import torch\n'), ((3530, 3560), 'torch.tensor', 'torch.tensor', (['foreground_label'], {}), '(foreground_label)\n', (3542, 3560), False, 'import torch\n'), ((3579, 3608), 'torch.tensor', 'torch.tensor', (['background_data'], {}), '(background_data)\n', (3591, 3608), False, 'import torch\n'), ((3628, 3658), 'torch.tensor', 'torch.tensor', (['background_label'], {}), '(background_label)\n', (3640, 3658), False, 'import torch\n'), ((12942, 12997), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1, batch_size=batch, shuffle=True)\n', (12952, 12997), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13090, 13148), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1_01'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1_01, batch_size=batch, shuffle=True)\n', (13100, 13148), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13242, 13300), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1_02'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1_02, batch_size=batch, shuffle=True)\n', (13252, 13300), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13389, 13446), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1_1'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1_1, batch_size=batch, shuffle=True)\n', (13399, 13446), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13535, 13592), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1_2'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1_2, batch_size=batch, shuffle=True)\n', (13545, 13592), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13681, 13738), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_1_5'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_1_5, batch_size=batch, shuffle=True)\n', (13691, 13738), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13813, 13868), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1, batch_size=batch, shuffle=False)\n', (13823, 13868), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((13955, 14013), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1_01'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1_01, batch_size=batch, shuffle=False)\n', (13965, 14013), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14100, 14158), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1_02'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1_02, batch_size=batch, shuffle=False)\n', (14110, 14158), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14241, 14298), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1_1'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1_1, batch_size=batch, shuffle=False)\n', (14251, 14298), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14381, 14438), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1_2'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1_2, batch_size=batch, shuffle=False)\n', (14391, 14438), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14521, 14578), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_1_5'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_1_5, batch_size=batch, shuffle=False)\n', (14531, 14578), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14799, 14854), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_2'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_2, batch_size=batch, shuffle=True)\n', (14809, 14854), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14935, 14990), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_3'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_3, batch_size=batch, shuffle=True)\n', (14945, 14990), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15071, 15126), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_4'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_4, batch_size=batch, shuffle=True)\n', (15081, 15126), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15207, 15262), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_5'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_5, batch_size=batch, shuffle=True)\n', (15217, 15262), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15343, 15398), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_6'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_6, batch_size=batch, shuffle=True)\n', (15353, 15398), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15479, 15534), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_7'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_7, batch_size=batch, shuffle=True)\n', (15489, 15534), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15615, 15670), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_8'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_8, batch_size=batch, shuffle=True)\n', (15625, 15670), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((15751, 15806), 'torch.utils.data.DataLoader', 'DataLoader', (['traindata_9'], {'batch_size': 'batch', 'shuffle': '(True)'}), '(traindata_9, batch_size=batch, shuffle=True)\n', (15761, 15806), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16015, 16070), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_2'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_2, batch_size=batch, shuffle=False)\n', (16025, 16070), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16145, 16200), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_3'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_3, batch_size=batch, shuffle=False)\n', (16155, 16200), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16275, 16330), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_4'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_4, batch_size=batch, shuffle=False)\n', (16285, 16330), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16405, 16460), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_5'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_5, batch_size=batch, shuffle=False)\n', (16415, 16460), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16535, 16590), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_6'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_6, batch_size=batch, shuffle=False)\n', (16545, 16590), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16665, 16720), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_7'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_7, batch_size=batch, shuffle=False)\n', (16675, 16720), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16795, 16850), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_8'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_8, batch_size=batch, shuffle=False)\n', (16805, 16850), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16925, 16980), 'torch.utils.data.DataLoader', 'DataLoader', (['testdata_9'], {'batch_size': 'batch', 'shuffle': '(False)'}), '(testdata_9, batch_size=batch, shuffle=False)\n', (16935, 16980), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((24968, 24988), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (24978, 24988), True, 'from matplotlib import pyplot as plt\n'), ((24989, 25016), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training_loss"""'], {}), "('Training_loss')\n", (24999, 25016), True, 'from matplotlib import pyplot as plt\n'), ((25018, 25072), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(loc='center left', bbox_to_anchor=(1, 0.5))\n", (25028, 25072), True, 'from matplotlib import pyplot as plt\n'), ((25073, 25109), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mnist_direction_2.pdf"""'], {}), "('mnist_direction_2.pdf')\n", (25084, 25109), True, 'from matplotlib import pyplot as plt\n'), ((25110, 25146), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mnist_direction_2.png"""'], {}), "('mnist_direction_2.png')\n", (25121, 25146), True, 'from matplotlib import pyplot as plt\n'), ((4344, 4367), 'torch.stack', 'torch.stack', (['image_list'], {}), '(image_list)\n', (4355, 4367), False, 'import torch\n'), ((4822, 4839), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (4836, 4839), True, 'import numpy as np\n'), ((4851, 4881), 'numpy.random.randint', 'np.random.randint', (['(0)', '(35000)', '(8)'], {}), '(0, 35000, 8)\n', (4868, 4881), True, 'import numpy as np\n'), ((4942, 4969), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15000)'], {}), '(0, 15000)\n', (4959, 4969), True, 'import numpy as np\n'), ((5023, 5046), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (5040, 5046), True, 'import numpy as np\n'), ((5571, 5596), 'numpy.random.seed', 'np.random.seed', (['(i + 30000)'], {}), '(i + 30000)\n', (5585, 5596), True, 'import numpy as np\n'), ((5606, 5636), 'numpy.random.randint', 'np.random.randint', (['(0)', '(35000)', '(8)'], {}), '(0, 35000, 8)\n', (5623, 5636), True, 'import numpy as np\n'), ((5646, 5673), 'numpy.random.randint', 'np.random.randint', (['(0)', '(15000)'], {}), '(0, 15000)\n', (5663, 5673), True, 'import numpy as np\n'), ((5680, 5703), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (5697, 5703), True, 'import numpy as np\n'), ((7350, 7387), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (7358, 7387), True, 'import numpy as np\n'), ((21499, 21520), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (21518, 21520), True, 'import torch.nn as nn\n'), ((24919, 24963), 'matplotlib.pyplot.plot', 'plt.plot', (['j'], {'label': "('dataset ' + curve_lbl[i])"}), "(j, label='dataset ' + curve_lbl[i])\n", (24927, 24963), True, 'from matplotlib import pyplot as plt\n'), ((746, 767), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (765, 767), False, 'from torchvision import transforms, utils\n'), ((774, 804), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (794, 804), False, 'from torchvision import transforms, utils\n'), ((6435, 6477), 'torch.zeros', 'torch.zeros', (['[28, 28]'], {'dtype': 'torch.float64'}), '([28, 28], dtype=torch.float64)\n', (6446, 6477), False, 'import torch\n'), ((7429, 7471), 'torch.zeros', 'torch.zeros', (['[28, 28]'], {'dtype': 'torch.float64'}), '([28, 28], dtype=torch.float64)\n', (7440, 7471), False, 'import torch\n'), ((7476, 7518), 'numpy.random.seed', 'np.random.seed', (['(dataset_number * 10000 + i)'], {}), '(dataset_number * 10000 + i)\n', (7490, 7518), True, 'import numpy as np\n'), ((17248, 17309), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inp_ch', 'self.f', 'k'], {'stride': 's', 'padding': 'self.pad'}), '(self.inp_ch, self.f, k, stride=s, padding=self.pad)\n', (17257, 17309), True, 'import torch.nn as nn\n'), ((17324, 17373), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.f'], {'track_running_stats': '(False)'}), '(self.f, track_running_stats=False)\n', (17338, 17373), True, 'import torch.nn as nn\n'), ((17392, 17401), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17399, 17401), True, 'import torch.nn as nn\n'), ((18114, 18140), 'torch.cat', 'torch.cat', (['(x1, x3)'], {'dim': '(1)'}), '((x1, x3), dim=1)\n', (18123, 18140), False, 'import torch\n'), ((18419, 18455), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': '(2)', 'padding': '(0)'}), '(3, stride=2, padding=0)\n', (18431, 18455), True, 'import torch.nn as nn\n'), ((18593, 18619), 'torch.cat', 'torch.cat', (['(x1, x2)'], {'dim': '(1)'}), '((x1, x2), dim=1)\n', (18602, 18619), False, 'import torch\n'), ((19382, 19397), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(5)'], {}), '(5)\n', (19394, 19397), True, 'import torch.nn as nn\n'), ((19447, 19464), 'torch.nn.Linear', 'nn.Linear', (['(236)', '(3)'], {}), '(236, 3)\n', (19456, 19464), True, 'import torch.nn as nn\n'), ((20376, 20391), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20389, 20391), False, 'import torch\n'), ((20743, 20758), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20756, 20758), False, 'import torch\n'), ((23005, 23020), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (23018, 23020), False, 'import torch\n'), ((20996, 21022), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (21005, 21022), False, 'import torch\n'), ((23216, 23242), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (23225, 23242), False, 'import torch\n'), ((1751, 1768), 'numpy.linalg.norm', 'LA.norm', (['train[i]'], {}), '(train[i])\n', (1758, 1768), True, 'from numpy import linalg as LA\n'), ((1865, 1882), 'numpy.linalg.norm', 'LA.norm', (['train[i]'], {}), '(train[i])\n', (1872, 1882), True, 'from numpy import linalg as LA\n'), ((1957, 1974), 'numpy.linalg.norm', 'LA.norm', (['train[i]'], {}), '(train[i])\n', (1964, 1974), True, 'from numpy import linalg as LA\n'), ((2366, 2382), 'numpy.linalg.norm', 'LA.norm', (['test[i]'], {}), '(test[i])\n', (2373, 2382), True, 'from numpy import linalg as LA\n'), ((2477, 2493), 'numpy.linalg.norm', 'LA.norm', (['test[i]'], {}), '(test[i])\n', (2484, 2493), True, 'from numpy import linalg as LA\n'), ((2566, 2582), 'numpy.linalg.norm', 'LA.norm', (['test[i]'], {}), '(test[i])\n', (2573, 2582), True, 'from numpy import linalg as LA\n')] |
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from prediction.LR_Regression import *
import numpy as np
parser = argparse.ArgumentParser(description='LR Regression')
parser.add_argument('--input', type=str, help='xlsx input', required=True)
parser.add_argument('--export_plot', type=int, help='to export plot', default=0, required=False)
parser.add_argument('--prior', type=int, default= 0, help='0: no prior, 1: conditional, 2: weighted, 3: prior only', required=False)
parser.add_argument('--weight', type=float, default= 0.5, help='when prior==1, the weight of prior', required=False)
args = parser.parse_args()
print(args)
# get feature
df_feature, df_label, df_price = getFeature(args.input, op_price=False, op_value = 2) # op_value = 2 (normalized)
all_feature = np.array(df_feature) # [4683, 36]
all_label = np.array(df_label, dtype=int) # [4683, 1]
all_price = np.array(df_price) # [4683, 1]
l_result = []
# 10 random cross-validation
for i in range (10):
id_train, id_test = split_data(len(df_feature), seed = i, ratio = 0.8)
train_feature = all_feature[id_train]
test_feature = all_feature[id_test]
train_label = all_label[id_train]
test_label = all_label[id_test]
train_price = all_price[id_train]
test_price = all_price[id_test]
# 1. train regression model (G(x) ->s*)
reg_model = PriceRegression(train_feature, train_price) # G(x) ->s*
# 2. train and test classifier
result = LR_WinPrediction(reg_model,
train_feature, train_label, train_price,
test_feature, test_label, test_price, weight = args.weight, op_prior = args.prior, \
op_diff = 0.1, n_bins=12, op_plot = args.export_plot)
l_result.append(result)
print("###############################################################")
print('##lr_classification\top_prior\tweight\taccuracy')
print("\t" + str(args.prior) + "\t" + str(args.weight) + "\t" + str(np.mean(l_result)) + "\t" + str(np.std(l_result)))
| [
"numpy.std",
"numpy.mean",
"numpy.array",
"argparse.ArgumentParser"
] | [((660, 712), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LR Regression"""'}), "(description='LR Regression')\n", (683, 712), False, 'import argparse\n'), ((1320, 1340), 'numpy.array', 'np.array', (['df_feature'], {}), '(df_feature)\n', (1328, 1340), True, 'import numpy as np\n'), ((1366, 1395), 'numpy.array', 'np.array', (['df_label'], {'dtype': 'int'}), '(df_label, dtype=int)\n', (1374, 1395), True, 'import numpy as np\n'), ((1420, 1438), 'numpy.array', 'np.array', (['df_price'], {}), '(df_price)\n', (1428, 1438), True, 'import numpy as np\n'), ((2532, 2548), 'numpy.std', 'np.std', (['l_result'], {}), '(l_result)\n', (2538, 2548), True, 'import numpy as np\n'), ((2500, 2517), 'numpy.mean', 'np.mean', (['l_result'], {}), '(l_result)\n', (2507, 2517), True, 'import numpy as np\n')] |
import os
import subprocess
import warnings
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import spearmanr
from warnings import filterwarnings
from lpocv.lpocv import LeavePairOut
from options_parser import arguments
from datasets import brca_data
# OptionParser
options, args = arguments()
# Breast cancer data
dfc = brca_data()
# Random forest parameters
##########################
dfparam = pd.read_csv(options.params, sep="\t")
param_name = dfparam["parameter"].tolist()
value = dfparam["value"].tolist()
for i in range(dfparam.shape[0]): exec("%s=%d"%(param_name[i], value[i]))
# AUC random forest regressor
def auc_random_forest(df):
'''
Parameters:
##########
df: pandas dataframe, shape=(rows, columns)
rows: features (genes)
columns: samples + growth rate metrics
must contain a column METRIC (e.g. "GR_AOC", "GR_max", "GR50", etc.) and its coressponding "sigma_METRIC"
Returns:
########
auc: float
Estimate of model accuracy
dfout: pandas dataframe
Measured and predicted values in each cross-validation fold.
'''
X = df.drop(columns=[options.metric, "sigma_%s"%options.metric]).values
y = df[options.metric].values
cell_line = dfc.index.tolist()
yerr = df["sigma_%s"%options.metric].values
auc=0
itr=0
dfout = pd.DataFrame()
for train, test in LeavePairOut().split(X, y, 2.0*yerr, num_pairs=options.complexity):
random_forest = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, random_state=random_state)
random_forest.fit(X[train], y[train])
ypred = random_forest.predict(X[test])
itr+=1
df0=pd.DataFrame(data=[["cv_iteration%s"%itr, cell_line[0], y[test[0]], ypred[0]]],
columns=["CV-fold", "cell_line", "measured", "predicted"])
df1=pd.DataFrame(data=[["cv_iteration%s"%itr, cell_line[1], y[test[1]], ypred[1]]],
columns=["CV-fold", "cell_line", "measured", "predicted"])
dfout = pd.concat([dfout, df0], ignore_index=True)
dfout = pd.concat([dfout, df1], ignore_index=True)
if (ypred[0]-ypred[1])*(y[test[0]]-y[test[1]]) > 0:
auc+=1
print("Evaluated %s pairs using leave-pair-out cross-validation."%itr)
auc = float(auc/itr) if itr > 0 else np.nan
return auc, dfout
def feature_importance(df):
'''
Function to evaluate feature importance of input feature set.
Parameters:
###########
df: pandas dataframe, shape=(rows,columns)
rows: features (genes)
columns: samples + growth rate metrics
must contain a column METRIC (e.g. "GR_AOC", "GR_max", "GR50", etc.) and its coressponding "sigma_METRIC"
Returns:
########
dfimp: pandas dataframe
shape = (rows,columns) rows: features (genes)
columns: feature importance, rho, pval of Spearman rank correlation between gene expression and growth rate metric across all cells
'''
filterwarnings("ignore")
X = df.drop(columns=[options.metric, "sigma_%s"%options.metric]).values
y = df[options.metric].values
random_forest = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth,
random_state=random_state)
random_forest.fit(X, y)
importances = random_forest.feature_importances_
indices = np.argsort(importances)[::-1]
feature_labels = df.drop(columns=[options.metric, "sigma_%s"%options.metric]).columns
dfimp = pd.DataFrame(list(zip(feature_labels[indices], importances[indices])),
columns=['features', options.drug])
dfimp.index = dfimp.features.tolist()
dfimp["spearman_rho"] = [None]*dfimp.shape[0]
dfimp["spearman_pval"] = [None]*dfimp.shape[0]
for feature in dfimp.index:
rho, pval = spearmanr(df[feature], df[options.metric])
dfimp.loc[feature, "spearman_rho"] = rho
dfimp.loc[feature, "spearman_pval"] = pval
return dfimp
#####################################################
if options.prediction_type=="predict_genes":
print ("Predicting drivers for %s (%s cell lines)."%(options.drug, dfc.shape[0]))
dfimp = feature_importance(dfc)
dfimp.to_csv("%s/%s_imp.csv"%(options.output, options.drug), index=False)
elif options.prediction_type=="estimate_accuracy":
msg = "Estimating accuracy of Random Forest model for %s (%s cell lines) from %s genes"
print (msg%(options.drug, dfc.shape[0], dfc.shape[1]))
auc, dfout = auc_random_forest(dfc)
dfout.to_csv("%s/%s_rfr.csv"%(options.output, options.drug), index=False)
with open("%s/%s_rfr.txt"%(options.output, options.drug),"w") as outFile:
outFile.write(str(auc))
outFile.close()
else:
print("Invalid prediction type")
| [
"pandas.DataFrame",
"datasets.brca_data",
"warnings.filterwarnings",
"pandas.read_csv",
"scipy.stats.spearmanr",
"sklearn.ensemble.RandomForestRegressor",
"numpy.argsort",
"lpocv.lpocv.LeavePairOut",
"options_parser.arguments",
"pandas.concat"
] | [((342, 353), 'options_parser.arguments', 'arguments', ([], {}), '()\n', (351, 353), False, 'from options_parser import arguments\n'), ((382, 393), 'datasets.brca_data', 'brca_data', ([], {}), '()\n', (391, 393), False, 'from datasets import brca_data\n'), ((459, 496), 'pandas.read_csv', 'pd.read_csv', (['options.params'], {'sep': '"""\t"""'}), "(options.params, sep='\\t')\n", (470, 496), True, 'import pandas as pd\n'), ((1391, 1405), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1403, 1405), True, 'import pandas as pd\n'), ((3058, 3082), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3072, 3082), False, 'from warnings import filterwarnings\n'), ((3213, 3313), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'random_state': 'random_state'}), '(n_estimators=n_estimators, max_depth=max_depth,\n random_state=random_state)\n', (3234, 3313), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1521, 1621), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'random_state': 'random_state'}), '(n_estimators=n_estimators, max_depth=max_depth,\n random_state=random_state)\n', (1542, 1621), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1738, 1883), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[['cv_iteration%s' % itr, cell_line[0], y[test[0]], ypred[0]]]", 'columns': "['CV-fold', 'cell_line', 'measured', 'predicted']"}), "(data=[['cv_iteration%s' % itr, cell_line[0], y[test[0]], ypred\n [0]]], columns=['CV-fold', 'cell_line', 'measured', 'predicted'])\n", (1750, 1883), True, 'import pandas as pd\n'), ((1914, 2059), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[['cv_iteration%s' % itr, cell_line[1], y[test[1]], ypred[1]]]", 'columns': "['CV-fold', 'cell_line', 'measured', 'predicted']"}), "(data=[['cv_iteration%s' % itr, cell_line[1], y[test[1]], ypred\n [1]]], columns=['CV-fold', 'cell_line', 'measured', 'predicted'])\n", (1926, 2059), True, 'import pandas as pd\n'), ((2095, 2137), 'pandas.concat', 'pd.concat', (['[dfout, df0]'], {'ignore_index': '(True)'}), '([dfout, df0], ignore_index=True)\n', (2104, 2137), True, 'import pandas as pd\n'), ((2154, 2196), 'pandas.concat', 'pd.concat', (['[dfout, df1]'], {'ignore_index': '(True)'}), '([dfout, df1], ignore_index=True)\n', (2163, 2196), True, 'import pandas as pd\n'), ((3466, 3489), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (3476, 3489), True, 'import numpy as np\n'), ((3925, 3967), 'scipy.stats.spearmanr', 'spearmanr', (['df[feature]', 'df[options.metric]'], {}), '(df[feature], df[options.metric])\n', (3934, 3967), False, 'from scipy.stats import spearmanr\n'), ((1429, 1443), 'lpocv.lpocv.LeavePairOut', 'LeavePairOut', ([], {}), '()\n', (1441, 1443), False, 'from lpocv.lpocv import LeavePairOut\n')] |
from . import transformations
from . import scorecard
from .. import models
import numpy as np
from matplotlib import pyplot, use
from skimage.graph import route_through_array, shortest_path
import math
import simplification
from simplification.cutil import simplify_coords_idx
# from pathfinding.core.diagonal_movement import DiagonalMovement
# from pathfinding.core.grid import Grid
# from pathfinding.finder.a_star import AStarFinder
# from pathfinding.finder.dijkstra import DijkstraFinder
# from pathfinding.finder.best_first import BestFirst
# from pathfinding.finder.ida_star import IDAStarFinder
# from pathfinding.finder.bi_a_star import BiAStarFinder
from . import astar
from . import dffinding
from typing import List, Tuple, Union
import copy
import pyastar
import datetime
from numba import int32, float32
from numba.typed import List
class Level:
def __init__(self, name, transform : Union[tuple, None] = None, data: list = [], model : Union[None, models.Level] = None, gamemode: str = ""):
self.raw_data = data
if transform:
self.transform = transformations.GridTransform(transform[0], transform[1], transform[2], transform[3])
self._data = None
self._elevation = None
self._costs = None
self._df = None
self._feature = None
self.costs_canvas = None
self.costs_preview = None
self.name = name
self.grid = None
if model:
self.project_id = model.project_id
else:
self.project_id = 0
# self.model = model
self.dffinder = None
self.mdf = None
self.gamemode = gamemode
# self.loaded_resources = False
def array_loaded(self, array: Union[np.ndarray, None]) -> bool:
return not isinstance(array, type(None))
@property
def model(self) -> models.Level:
# print(self.name, self.project_id)
# levels = models.Level.objects.filter(name = self.name)
# print(levels)
return models.Level.objects.filter(name = self.name, project_id = self.project_id).first()
def load_resources(self, data_type: int = 0):
path = self.model.relative_path
if data_type == 0:
with open(f'{path}/data.npy', 'rb') as f:
self._data = np.load(f)
print("Succesfully imported costs with shape: ", self._costs.shape)
elif data_type == 2:
with open(f'{path}/costs.npy', 'rb') as f:
self._costs = np.load(f)
self._costs = self._costs.astype(np.float32)
elif data_type == 1:
with open(f'{path}/elevation.npy', 'rb') as f:
self._elevation = np.load(f)
elif data_type == 3:
if self.model.has_distance_field:
with open(f'{path}/df.npy', 'rb') as f:
self._df = np.load(f)
elif data_type == 4:
try:
with open(f'{path}/feature.npy', 'rb') as f:
self._feature = np.load(f)
except:
self._feature = np.zeros(self._elevation.shape, dtype=np.int32)
@property
def data(self):
if self.array_loaded(self._data):
return self._data
else:
self.load_resources(0)
return self._data
@data.setter
def data(self, new_data_array: np.ndarray):
self._data = new_data_array
@property
def elevation(self):
if self.array_loaded(self._elevation):
return self._elevation
else:
self.load_resources(1)
return self._elevation
@elevation.setter
def elevation(self, new_elevation_array: np.ndarray):
self._elevation = new_elevation_array
@property
def costs(self):
if self.array_loaded(self._costs):
return self._costs
else:
self.load_resources(2)
return self._costs
@costs.setter
def costs(self, new_costs_array: np.ndarray):
self._costs = new_costs_array
@property
def df(self):
if self.array_loaded(self._df):
return self._df
else:
self.load_resources(3)
return self._df
@df.setter
def df(self, new_df_array: np.ndarray):
self._df = new_df_array
@property
def feature(self):
if self.array_loaded(self._feature):
return self._feature
else:
self.load_resources(4)
return self._feature
@feature.setter
def feature(self, new_feature_array: np.ndarray):
self._feature = new_feature_array
def create_dffinder(self):
# self.mdf = np.power(self.df, 0.2)
# self.mdf = np.max(self.mdf[1]) - self.mdf
# self.mdf = np.power(self.mdf, 4.0)
# self.mdf = self.mdf.astype(np.float32)
elevation = self.elevation.astype(np.float32)
print(self.model)
self.dffinder = dffinding.DFFinder(self.costs, elevation, self.feature, self.get_recorded_path(), self.model.distance_field_threshold)
def generate_distance_fields(self):
if type(self.df) == type(None) :
self.df = np.zeros(self.elevation.shape, dtype=np.float32)
if self.df.shape != self.elevation.shape:
self.df = np.zeros(self.elevation.shape, dtype=np.float32)
# scorecard.bruteforce_generate_distancefields(self.elevation, self.df, tuple((*self.transform.min_point,)), tuple((*self.transform.max_point,)))
scorecard.approximate_distance_fields(self.elevation, self.df, 0.5)
self.model.has_distance_field = True
self.model.save()
def classify_costs(self, elevation_based : bool = False, elevation_alpha_power : float = 0.05, elevation_alpha_beta : float = 1.5, elevation_alpha_beta_power : float = 7.0, just_paths = False, use_df = False):
if not just_paths:
scorecard.Scorecard.score(self.model, self.transform, self.data, self.elevation, self.df, self.costs, elevation_based, elevation_alpha_power, elevation_alpha_beta, elevation_alpha_beta_power, use_df=use_df)
if isinstance(self.model.recorded_paths, list):
for idx, path in enumerate(self.model.recorded_paths):
level = path['layer']
array_to_use = self.elevation
if use_df:
array_to_use = self.df
min_elevation = np.min(array_to_use[level])
max_elevation = np.max(array_to_use[level])
x = path['x']
y = path['y']
p = 1
if idx > 0:
p = math.floor(math.pow(scorecard.distance(x,y,1, self.model.recorded_paths[idx-1]['x'], self.model.recorded_paths[idx-1]['y'], 1), 1))
if p > 10:
p = 2
# print("Using radius:", p)
for ox in range(-p, p):
for oy in range(-p, p):
ix = x + ox
iy = y + oy
self.elevation[level][ix][iy] = path['elevation']
if ix < self.elevation[level].shape[0] and iy < self.elevation[level].shape[1]:
if array_to_use[level][x][y] > 0:
elevation_alpha = scorecard.remap(array_to_use[level][ix][iy], min_elevation, max_elevation, 0.0, 1.0)
elevation_alpha = math.pow(math.pow(elevation_alpha, elevation_alpha_power)*elevation_alpha_beta, elevation_alpha_beta_power)*10
elevation_value = scorecard.remap(elevation_alpha, 0.0, 1.0, min_elevation, max_elevation)
self.costs[level][ix][iy] = 1 + max(elevation_value*0.25, 1.0)
scorecard.Scorecard.score(self.model, self.transform, self.data, self.elevation, self.df, self.costs, elevation_based, elevation_alpha_power, elevation_alpha_beta, elevation_alpha_beta_power, True, use_df=use_df)
def get_best_navmesh_level(self, position : Tuple[int, int, float]):
d = math.inf
l = 1
for level in range(self.elevation.shape[0]):
if position[0] >= 0 and position[0] < self.elevation.shape[1] and position[1] >= 0 and position[1] < self.elevation.shape[2]:
c = abs(self.elevation[level][position[0]][position[1]] - position[2])
if c < d:
d = c
l = level
if l > 9:
l = min(9, l)
return l
# Call this after initialising Level
def pre_process_data(self, layers : int = 10):
_width = self.transform.width+1
_height = self.transform.height + 1
print("Creating arrays with size: ", _width, _height)
self.data = np.zeros((layers, _width, _height), dtype=np.float32)
self.elevation = np.zeros((layers, _width, _height), dtype=np.float32)
self.costs = np.zeros((layers, _width, _height), dtype=np.float32)
self.costs_canvas = np.zeros((layers, _width, _height), dtype=np.float32)
self.df = np.zeros((layers, _width, _height), dtype=np.float32)
self.feature = np.zeros((layers, _width, _height), dtype=np.int32)
def sensecheck(self):
try:
print(self.transform.width)
print(self.transform.height)
if self.data.shape[1] < self.transform.width+1:
print("Sensecheck failed, recreating array.")
self.pre_process_data()
except:
print("No array found, reinitialising.")
self.pre_process_data()
def process_data(self):
print("Processing data")
_width = len(self.raw_data)
_height = len(self.raw_data[0])
for x in range(_width):
for y in range(_height):
self.data[y][x] = self.raw_data[x][y][0]
self.elevation[y][x] = self.raw_data[x][y][1]
scorecard.Scorecard.score(self.model, self.transform, self.data, self.elevation, self.costs)
fig = pyplot.figure(frameon=False)
img = pyplot.imshow(self.costs)
pyplot.axis("off")
pyplot.savefig("./wake_island_cstf.png", bbox_inches='tight')
self.post_process_data()
def set_elevation_at(self, value : float, index_x : int, index_y : int, level : int = 0):
try:
self.elevation[level][index_y-1][index_x-1] = value
except Exception as e:
print(index_x-1, index_y-1)
raise(e)
def set_data_at(self, value : int, index_x : int, index_y : int, level : int = 0):
try:
self.data[level][index_y-1][index_x-1] = value
except Exception as e:
print(level, index_x-1, index_y-1)
raise(e)
def set_df_at(self, value : float, index_x : int, index_y : int, level : int = 0):
try:
self.df[level][index_y-1][index_x-1] = value
except Exception as e:
print(level, index_x-1, index_y-1)
raise(e)
def set_feature_at(self, value : int, index_x : int, index_y : int, level : int = 0):
try:
self.feature[level][index_y-1][index_x-1] = value
except Exception as e:
print(level, index_x-1, index_y-1)
raise(e)
def post_process_data(self):
# scorecard.Scorecard.score(self.data, self.costs)
if not self.grid:
# self.grid = Grid(matrix = self.costs)
self.transform.width = self.costs.shape[0]
self.transform.height = self.costs.shape[1]
def get_valid_point_in_radius(self, arr : np.ndarray, x: int, y : int, radius: float = 10.0, level = 0) -> list:
return [x, y]
if type(self.mdf)!=type(None) and self.dffinder:
pos = (int32(x), int32(y), int32(level))
pos = self.dffinder.ensure_point_valid(pos)
# max_tries = 900
# tries = 0
# while (not self.dffinder._is_within_threshold(pos)) and tries < max_tries:
# for i in range(-5, 5):
# for j in range(-5, 5):
# pos = (int32(pos[0]+i), int32(pos[1]+j), pos[2])
# tries += 1
return (pos[0], pos[1])
if arr[0][x][y] == 1.0:
return (x,y)
offsets = [(-1, 0), (1, 0), (-1, -1), (1, -1), (-1, 1), (1, 1), (0, -1), (0, 1)]
found = False
# final
final_pos = [x,y]
min_s = math.inf
for g in range(1, int(radius)):
for offset in offsets:
i = y+(offset[1]*g)
j = x+(offset[0]*g)
score = arr[0][j][i]-self.elevation[0][j][i]
if arr[0][j][i] != np.inf and score < min_s and score > 0.0:
found = True
final_pos = [j, i]
min_s = score
# if found:
# break
return final_pos
# def find_path(self, start: tuple, end : tuple) -> list:
#
# self.grid.cleanup()
# start = self.grid.node(start[0], start[1])
# end = self.grid.node(end[0], end[1])
# finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
# path, runs = finder.find_path(start, end, self.grid)
# path = [(p[1], p[0]) for p in path]
# return path
def find_path_safe(self, start: tuple, end: tuple, level : int = 0, target_level : int = 0, only_land = False) -> list:
path = []
used_dffinder = False
if not self.dffinder:
self.create_dffinder()
if self.dffinder:
path = self.dffinder.find((int32(start[0]), int32(start[1]), int32(level)), (int32(end[0]), int32(end[1]), int32(target_level)), only_land)
used_dffinder = True
if len(path) > 0:
p = np.array(path)
p_simplified_coords_idx = simplify_coords_idx(p, 100)
p_simplified = p[p_simplified_coords_idx]
path = list(p_simplified)
if not self.dffinder:
path = pyastar.astar_path(self.costs[level], start, end, allow_diagonal=True)
used_dffinder = False
# print("Finding at: ", (int32(start[0]), int32(start[1]), int32(level)), (int32(end[0]), int32(end[1]), int32(target_level)), used_dffinder)
return path, used_dffinder
def get_cost_to(self, start : Tuple[int, int, int], end : Tuple[int, int, int]):
start_v = self.get_valid_point_in_radius(self.costs, start[0], start[1], 5)
end_v = self.get_valid_point_in_radius(self.costs, end[0], end[1], 5)
start = (start_v[0], start_v[1], start[2])
end = (end_v[0], end_v[1], end[2])
if self.dffinder:
return self.dffinder.get_direction_cost(start, end, 1)
return 0.0
def astar(self, start : tuple, end : tuple, safe=True , all : bool = False, elevation : Union[float, None] = None, target_elevation : Union[float, None] = 0.0, recurse_depth : int = 0,
only_land : bool = False, use_base_level: bool = False, return_raw_path: bool = False, use_single_level = False, single_level = 0) -> list:
# print("running astar ", start, end)
# print("size of data: ", self.data.shape)
#path, cost = route_through_arrays(self.costs, start, end, fully_connected=False, geometric=True) # astar(self.data, start, end)
#if not safe:
# path = self.find_path(start, self.get_valid_point_in_radius(self.costs, end[0], end[1], 10))
#else:
# return []
# print('finding path', start, end)
if not use_base_level:
best_level = self.get_best_navmesh_level((start[0], start[1], elevation))
target_best_level = self.get_best_navmesh_level((end[0], end[1], target_elevation))
else:
best_level = 0
target_best_level = 0
if use_single_level:
best_level = single_level
target_best_level = single_level
# print('best navmesh level: ', best_level)
if (start[0] > 0 and start[0] < self.costs.shape[1] and start[1] > 0 and start[1] < self.costs.shape[2]
and end[0] > 0 and end[0] < self.costs.shape[1] and end[1] > 0 and end[1] < self.costs.shape[2]):
# print("Start elevation at {} - {}:".format(str(start), elevation), self.elevation[best_level][start[0]][start[1]])
# print(start, end)
path, udffinder = self.find_path_safe(
self.get_valid_point_in_radius(self.costs, start[0], start[1], 5),
self.get_valid_point_in_radius(self.costs, end[0], end[1], 5), best_level, target_best_level, only_land)
else:
print("Outside of map", start, end)
return []
#print('got path')
#path = astar(self.costs, start, end)
# path_and_cost = [(p[0], p[1], self.costs[p[0]][p[1]] ) for p in path]
if return_raw_path:
return list(path)
world_paths = []
if type(path) != type(None):
for idx, p in enumerate(path):
# self.costs_preview[p[0]][p[1]] = 0.5
wxy = self.transform.transform_to_world((int32(p[1]), int32(p[0])))
if self.dffinder and udffinder:
# print(p, udffinder)\
wxy = self.transform.transform_to_world((int32(p[1]), int32(p[0])))
y = float(self.elevation[p[2]][p[0]][p[1]])
if self.feature[p[2]][p[0]][p[1]] == 1:
y = float(self.elevation[0][p[0]][p[1]])
world_paths.append({
"x": wxy[0],
"y": y,
"z": wxy[1]
})
else:
world_paths.append({
"x": wxy[0],
"y": float(self.elevation[best_level][p[0]][p[1]]+1),
"z": wxy[1]
})
# if not self.dffinder:
# if idx > 1 and idx < len(path)-1:
# if abs(world_paths[idx]['y']-world_paths[idx-1]['y']) > 5.0 and recurse_depth < 1:
# print("finding depth path")
# world_paths += self.astar(path[idx+2], end, elevation=elevation, recurse_depth = recurse_depth+1)
# break
#
# if idx > 50:
# break
if self.dffinder:
# Reverse paths
world_paths.reverse()
if not self.dffinder:
for idx, wp in enumerate(world_paths):
if idx > 1 and idx+4 < len(path)-1:
if abs(world_paths[idx+4]['y']-world_paths[idx-1]['y']) > 5.0 and recurse_depth < 1:
print("finding depth path")
world_paths[idx:] = self.astar(path[idx+4], end, elevation=elevation, recurse_depth = recurse_depth+1)
break
else:
None
# print(world_paths)
# data = np.copy(self.costs)
#
#
# for point in path:
# x = int(point[0])
# y = int(point[1])
# data[x][y] = 0.0
# fig = pyplot.figure(frameon=False)
# img = pyplot.imshow(data)
# pyplot.axis("off")
# np.save("./wake_island_data.data", self.data)
# np.save("./wake_island_path.data", data)
# pyplot.savefig("./wake_island_path.png", bbox_inches='tight')
return world_paths#[0:128]# [0: min((len(world_paths)-1), 20)]
def export(self):
file_name = "./exports/<context>"+datetime.datetime.now().strftime("%d-%m-%Y")
np.save(file_name.replace("<context>", "costs"), self.costs)
np.save(file_name.replace("<context>", "costs_canvas"), self.costs_canvas)
np.save(file_name.replace("<context>", "data"), self.data)
np.save(file_name.replace("<context>", "elevation"), self.elevation)
def get_recorded_path(self): # -> List[Tuple[int, int, int]]:
path = List()
if len(self.model.recorded_paths) == 0:
path.append((-1, -1, -1))
for p in self.model.recorded_paths:
path.append((
p['x'], p['y'], p['layer']
))
return path
def import_data(self, data_name: str, data_type: str):
with open(f"./exports/{data_name}", "rb") as f:
if data_type == "costs":
self.costs = np.load(f)
#scorecard.flip_scorecard(np.load(f), self.costs)
self.costs = self.costs.astype(np.float32)
self.costs_preview = np.copy(self.costs)
elif data_type == "costs_canvas":
#self.costs_canvas = np.load(f)
scorecard.flip_scorecard(np.load(f), self.costs_canvas)
elif data_type == "data":
#self.data = np.load(f)
scorecard.flip_scorecard(np.load(f), self.data)
elif data_type == "elevation":
try:
# self.elevation = np.load(f)
scorecard.flip_scorecard(np.load(f), self.elevation)
except Exception as e:
print("Failed to load elevation!! ", e)
#scorecard.flip_scorecard(np.load(f), self.elevation)
def modify(self, grid_position : tuple, recording_mode: float, elevation: float, radius : float):
recording_mode = float(recording_mode)
value = 1.0
if recording_mode == 1.0:
#value = 0.0
value = np.inf
if type(self.costs_canvas) == type(None):
self.costs_canvas = np.zeros((self.costs.shape[0], self.costs.shape[1]))
for x in range(-1,2):
for y in range(-1, 2):
self.costs_preview[grid_position[0]+x][grid_position[1]+y] = 25
#print("Modifing at ", grid_position, recording_mode, value)
for x in range(-16, 16):
for y in range(-16, 16):
if math.sqrt(x**2+y**2) < radius:
self.costs_canvas[grid_position[0]+x][grid_position[1]+y] = value
self.costs[grid_position[0]+x][grid_position[1]+y] = value
self.elevation[grid_position[0]+x][grid_position[1]+y] = elevation
#print(self.costs[grid_position[0]][grid_position[1]])
| [
"pyastar.astar_path",
"numpy.load",
"numpy.copy",
"math.sqrt",
"math.pow",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"matplotlib.pyplot.axis",
"datetime.datetime.now",
"numba.int32",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.max",
"numpy.array",
"simplification.cutil.simplify_coor... | [((8912, 8965), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.float32'}), '((layers, _width, _height), dtype=np.float32)\n', (8920, 8965), True, 'import numpy as np\n'), ((8991, 9044), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.float32'}), '((layers, _width, _height), dtype=np.float32)\n', (8999, 9044), True, 'import numpy as np\n'), ((9066, 9119), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.float32'}), '((layers, _width, _height), dtype=np.float32)\n', (9074, 9119), True, 'import numpy as np\n'), ((9148, 9201), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.float32'}), '((layers, _width, _height), dtype=np.float32)\n', (9156, 9201), True, 'import numpy as np\n'), ((9221, 9274), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.float32'}), '((layers, _width, _height), dtype=np.float32)\n', (9229, 9274), True, 'import numpy as np\n'), ((9298, 9349), 'numpy.zeros', 'np.zeros', (['(layers, _width, _height)'], {'dtype': 'np.int32'}), '((layers, _width, _height), dtype=np.int32)\n', (9306, 9349), True, 'import numpy as np\n'), ((10189, 10217), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (10202, 10217), False, 'from matplotlib import pyplot, use\n'), ((10232, 10257), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['self.costs'], {}), '(self.costs)\n', (10245, 10257), False, 'from matplotlib import pyplot, use\n'), ((10266, 10284), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""off"""'], {}), "('off')\n", (10277, 10284), False, 'from matplotlib import pyplot, use\n'), ((10302, 10363), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""./wake_island_cstf.png"""'], {'bbox_inches': '"""tight"""'}), "('./wake_island_cstf.png', bbox_inches='tight')\n", (10316, 10363), False, 'from matplotlib import pyplot, use\n'), ((20403, 20409), 'numba.typed.List', 'List', ([], {}), '()\n', (20407, 20409), False, 'from numba.typed import List\n'), ((5230, 5278), 'numpy.zeros', 'np.zeros', (['self.elevation.shape'], {'dtype': 'np.float32'}), '(self.elevation.shape, dtype=np.float32)\n', (5238, 5278), True, 'import numpy as np\n'), ((5351, 5399), 'numpy.zeros', 'np.zeros', (['self.elevation.shape'], {'dtype': 'np.float32'}), '(self.elevation.shape, dtype=np.float32)\n', (5359, 5399), True, 'import numpy as np\n'), ((14292, 14362), 'pyastar.astar_path', 'pyastar.astar_path', (['self.costs[level]', 'start', 'end'], {'allow_diagonal': '(True)'}), '(self.costs[level], start, end, allow_diagonal=True)\n', (14310, 14362), False, 'import pyastar\n'), ((22023, 22075), 'numpy.zeros', 'np.zeros', (['(self.costs.shape[0], self.costs.shape[1])'], {}), '((self.costs.shape[0], self.costs.shape[1]))\n', (22031, 22075), True, 'import numpy as np\n'), ((2325, 2335), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2332, 2335), True, 'import numpy as np\n'), ((6480, 6507), 'numpy.min', 'np.min', (['array_to_use[level]'], {}), '(array_to_use[level])\n', (6486, 6507), True, 'import numpy as np\n'), ((6540, 6567), 'numpy.max', 'np.max', (['array_to_use[level]'], {}), '(array_to_use[level])\n', (6546, 6567), True, 'import numpy as np\n'), ((11962, 11970), 'numba.int32', 'int32', (['x'], {}), '(x)\n', (11967, 11970), False, 'from numba import int32, float32\n'), ((11972, 11980), 'numba.int32', 'int32', (['y'], {}), '(y)\n', (11977, 11980), False, 'from numba import int32, float32\n'), ((11982, 11994), 'numba.int32', 'int32', (['level'], {}), '(level)\n', (11987, 11994), False, 'from numba import int32, float32\n'), ((14045, 14059), 'numpy.array', 'np.array', (['path'], {}), '(path)\n', (14053, 14059), True, 'import numpy as np\n'), ((14102, 14129), 'simplification.cutil.simplify_coords_idx', 'simplify_coords_idx', (['p', '(100)'], {}), '(p, 100)\n', (14121, 14129), False, 'from simplification.cutil import simplify_coords_idx\n'), ((20827, 20837), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (20834, 20837), True, 'import numpy as np\n'), ((21000, 21019), 'numpy.copy', 'np.copy', (['self.costs'], {}), '(self.costs)\n', (21007, 21019), True, 'import numpy as np\n'), ((2530, 2540), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2537, 2540), True, 'import numpy as np\n'), ((13849, 13864), 'numba.int32', 'int32', (['start[0]'], {}), '(start[0])\n', (13854, 13864), False, 'from numba import int32, float32\n'), ((13866, 13881), 'numba.int32', 'int32', (['start[1]'], {}), '(start[1])\n', (13871, 13881), False, 'from numba import int32, float32\n'), ((13883, 13895), 'numba.int32', 'int32', (['level'], {}), '(level)\n', (13888, 13895), False, 'from numba import int32, float32\n'), ((13899, 13912), 'numba.int32', 'int32', (['end[0]'], {}), '(end[0])\n', (13904, 13912), False, 'from numba import int32, float32\n'), ((13914, 13927), 'numba.int32', 'int32', (['end[1]'], {}), '(end[1])\n', (13919, 13927), False, 'from numba import int32, float32\n'), ((13929, 13948), 'numba.int32', 'int32', (['target_level'], {}), '(target_level)\n', (13934, 13948), False, 'from numba import int32, float32\n'), ((19976, 19999), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19997, 19999), False, 'import datetime\n'), ((22379, 22405), 'math.sqrt', 'math.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (22388, 22405), False, 'import math\n'), ((2724, 2734), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2731, 2734), True, 'import numpy as np\n'), ((17441, 17452), 'numba.int32', 'int32', (['p[1]'], {}), '(p[1])\n', (17446, 17452), False, 'from numba import int32, float32\n'), ((17454, 17465), 'numba.int32', 'int32', (['p[0]'], {}), '(p[0])\n', (17459, 17465), False, 'from numba import int32, float32\n'), ((21156, 21166), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (21163, 21166), True, 'import numpy as np\n'), ((17620, 17631), 'numba.int32', 'int32', (['p[1]'], {}), '(p[1])\n', (17625, 17631), False, 'from numba import int32, float32\n'), ((17633, 17644), 'numba.int32', 'int32', (['p[0]'], {}), '(p[0])\n', (17638, 17644), False, 'from numba import int32, float32\n'), ((21306, 21316), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (21313, 21316), True, 'import numpy as np\n'), ((2897, 2907), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2904, 2907), True, 'import numpy as np\n'), ((3051, 3061), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3058, 3061), True, 'import numpy as np\n'), ((3114, 3161), 'numpy.zeros', 'np.zeros', (['self._elevation.shape'], {'dtype': 'np.int32'}), '(self._elevation.shape, dtype=np.int32)\n', (3122, 3161), True, 'import numpy as np\n'), ((21488, 21498), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (21495, 21498), True, 'import numpy as np\n'), ((7546, 7594), 'math.pow', 'math.pow', (['elevation_alpha', 'elevation_alpha_power'], {}), '(elevation_alpha, elevation_alpha_power)\n', (7554, 7594), False, 'import math\n')] |
#!/usr/bin/env python
"""Command line interface for FrankaPanda with Kinect2.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import atexit
import os.path
import sys
from builtins import input
import numpy as np
import matplotlib.pyplot as plt
import readline
from mpl_toolkits.mplot3d import Axes3D # NOQA: For 3D plotting
import _init_paths # NOQA
from robovat.math import Pose
from robovat.robots import franka_panda
from robovat.perception import point_cloud_utils as pc_utils
from robovat.perception.camera import Kinect2
from robovat.simulation import Simulator
from robovat.simulation.camera import BulletCamera
from robovat.utils.yaml_config import YamlConfig
from robovat.utils.logging import logger
HELP = {
'############################################################\n'
'Help'
'############################################################\n'
}
CLICK_Z = 0.2
def parse_args():
"""Parse arguments.
Returns:
args: The parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--mode',
dest='mode',
default='sim',
help='Mode: sim, real.')
parser.add_argument(
'--env_config',
dest='env_config',
type=str,
help='The configuration file for the environment.',
default='configs/envs/arm_env.yaml')
parser.add_argument(
'--debug',
dest='debug',
type=int,
help='Use the debugging mode if it is True.',
default=1)
parser.add_argument(
'--assets',
type=str,
dest='assets_dir',
default='./assets',
help='The assets directory.')
args = parser.parse_args()
return args
class EndEffectorClickController(object):
"""Controller of the end effector by mouse clicking."""
def __init__(self, cli, ax, eef_z=0.2):
"""Initialize.
Args:
cli: The command line interface.
ax: An instance of the Matplotlib Axes.
eef_z: Z position of the end effector.
"""
self.cli = cli
self.image = None
self.depth = None
self.eef_z = eef_z
self.ax = ax
plt.ion()
plt.show()
self.show_image()
def __call__(self, event):
"""Call.
Args:
event: A clicking event.
"""
pixel = [event.xdata, event.ydata]
z = self.depth[int(pixel[1]), int(pixel[0])]
position = self.cli.camera.deproject_pixel(pixel, z)
pose = Pose([position, [np.pi, 0, 0]])
position.z = self.eef_z
while not (self.cli.robot.is_limb_ready() and
self.cli.robot.is_gripper_ready()):
if self.cli.mode == 'sim':
self.cli.simulator.step()
print('Clicked pixel: %r. Moving end effector to: % s'
% (pixel + [z], position))
self.cli.robot.move_to_gripper_pose(pose)
while not (self.cli.robot.is_limb_ready() and
self.cli.robot.is_gripper_ready()):
if self.cli.mode == 'sim':
self.cli.simulator.step()
self.show_image()
plt.scatter(pixel[0], pixel[1], c='r')
return pixel
def show_image(self):
"""Show the RGB and depth images."""
self.image = self.cli.camera.frames()['rgb']
self.depth = self.cli.camera.frames()['depth']
plt.imshow(self.image)
plt.title('Image')
plt.draw()
plt.pause(1e-3)
class FrankaPandaCLI(object):
"""Command line interface for FrankaPanda with Kinect2.
"""
def __init__(self,
mode,
config,
debug,
assets_dir):
"""Initialize.
Args:
mode: 'sim' or 'real'.
config: The configuration file for the environment.
debug: True if it is debugging mode, False otherwise.
assets_dir: The assets directory.
"""
self.mode = mode
self.config = YamlConfig(config).as_easydict()
self.debug = debug
self.assets_dir = assets_dir
# Command line client input history.
readline.parse_and_bind('tab: complete')
history_file = os.path.join('.python_history')
try:
readline.read_history_file(history_file)
except IOError:
pass
atexit.register(readline.write_history_file, history_file)
# Set up the scene.
if self.mode == 'sim':
print('Setting up the environment in simulation...')
self.simulator = Simulator(use_visualizer=self.debug,
assets_dir=self.assets_dir)
self.simulator.reset()
self.simulator.start()
self.ground = self.simulator.add_body(
self.config.SIM.GROUND.PATH,
self.config.SIM.GROUND.POSE,
is_static=True,
name='ground')
self.table_pose = Pose(self.config.SIM.TABLE.POSE)
self.table_pose.position.z += np.random.uniform(
*self.config.SIM.TABLE.HEIGHT_RANGE)
self.simulator.add_body(
self.config.SIM.TABLE.PATH,
self.table_pose,
is_static=True,
name='table')
# Camera.
self.camera = BulletCamera(
simulator=self.simulator,
distance=1.0)
elif self.mode == 'real':
print('Setting up the environment in the real world...')
self.table_pose = Pose(self.config.SIM.TABLE.POSE)
self.table_pose.position.z += np.random.uniform(
*self.config.SIM.TABLE.HEIGHT_RANGE)
self.camera = Kinect2(
packet_pipeline_mode=0,
device_num=0,
skip_regirobovation=False,
use_inpaint=True)
self.simulator = None
else:
raise ValueError
# Set up the robot.
self.robot = franka_panda.factory(
simulator=self.simulator,
config=self.config.SIM.ARM.CONFIG)
# Start the camera camera.
self.camera.set_calibration(
intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
translation=self.config.KINECT2.DEPTH.TRANSLATION,
rotation=self.config.KINECT2.DEPTH.ROTATION)
self.camera.start()
if self.simulator:
camera_pose = [
self.config.KINECT2.DEPTH.TRANSLATION,
self.config.KINECT2.DEPTH.ROTATION]
camera_pose = Pose(camera_pose).inverse()
self.simulator.plot_pose(camera_pose, axis_length=0.05)
def start(self):
"""Start the command line client.
"""
while (1):
if self.robot.is_limb_ready() and self.robot.is_gripper_ready():
sys.stdout.flush()
command = input('Enter a command: ')
if command == 'quit' or command == 'q':
print('Closing the FrankaPanda client...')
break
else:
self.run_command(command)
if self.mode == 'sim':
self.simulator.step()
def run_command(self, command): # NOQA
"""Run the input command.
Args:
command: An input string command.
"""
command = command.replace(',', '').replace('[', '').replace(']', '')
words = command.split(' ')
command_type = words[0]
# Print the help information.
if command_type == 'help' or command_type == 'h':
print(HELP)
# Reset the robot joint positions.
elif command_type == 'reset' or command_type == 'r':
self.robot.reset(self.config.ARM.OFFSTAGE_POSITIONS)
# Visualize the camera image.
elif command_type == 'visualize' or command_type == 'v':
results = self.camera.frames()
image = results['rgb']
depth = results['depth']
plt.figure(figsize=(20, 10))
plt.subplot(121)
plt.imshow(image)
plt.title('RGB Image')
plt.subplot(122)
plt.imshow(depth)
plt.title('Depth Image')
plt.show()
# Visualize the table.
elif command_type == 'table' or command_type == 't':
results = self.camera.frames()
image = results['rgb']
depth = results['depth']
table_points = [
[0, 0, 0],
[0, -0.61, 0],
[0, 0.61, 0],
[-0.38, 0, 0],
[0.38, 0, 0],
[-0.38, -0.61, 0],
[-0.38, 0.61, 0],
[0.38, -0.61, 0],
[0.38, 0.61, 0],
]
table_offset = self.table_pose.position
table_points = np.array(table_points) + table_offset
table_pixels = self.camera.project_point(table_points)
plt.figure(figsize=(20, 10))
plt.subplot(121)
plt.imshow(image)
plt.scatter(table_pixels[:, 0], table_pixels[:, 1], c='r')
plt.title('RGB Image')
plt.subplot(122)
plt.imshow(depth)
plt.scatter(table_pixels[:, 0], table_pixels[:, 1], c='r')
plt.title('Depth Image')
plt.show()
# Visualize the layout.
elif command_type == 'layout' or command_type == 'l':
results = self.camera.frames()
image = results['rgb']
layout_name = words[1]
layout_config = self.config.LAYOUT[layout_name]
tile_config = layout_config.REGION
size = layout_config.SIZE
offset = layout_config.OFFSET
plt.figure(figsize=(10, 10))
plt.subplot(111)
plt.imshow(image)
for i, center in enumerate(tile_config.CENTERS):
position = np.array(offset) + np.array(center) * size
x = position[0]
y = position[1]
z = self.table_pose.position.z
corners = [
[x - 0.5 * size, y - 0.5 * size, z],
[x + 0.5 * size, y - 0.5 * size, z],
[x - 0.5 * size, y + 0.5 * size, z],
[x + 0.5 * size, y + 0.5 * size, z]]
pixels = self.camera.project_point(corners)
color = 'green'
plt.plot([pixels[0, 0], pixels[1, 0]],
[pixels[0, 1], pixels[1, 1]],
color=color, linewidth=2)
plt.plot([pixels[0, 0], pixels[2, 0]],
[pixels[0, 1], pixels[2, 1]],
color=color, linewidth=2)
plt.plot([pixels[1, 0], pixels[3, 0]],
[pixels[1, 1], pixels[3, 1]],
color=color, linewidth=2)
plt.plot([pixels[2, 0], pixels[3, 0]],
[pixels[2, 1], pixels[3, 1]],
color=color, linewidth=2)
plt.show()
# Visualize the camera image.
elif command_type == 'pointcloud' or command_type == 'pc':
if len(words) == 1:
num_clusters = 0
else:
num_clusters = int(words[1])
images = self.camera.frames()
image = images['rgb']
depth = images['depth']
point_cloud = self.camera.deproject_depth_image(depth)
fig = plt.figure(figsize=(20, 5))
ax1 = fig.add_subplot(141)
ax1.imshow(depth)
if (self.config.OBS.CROP_MIN is not None and
self.config.OBS.CROP_MAX is not None):
crop_max = np.array(self.config.OBS.CROP_MAX)[np.newaxis, :]
crop_min = np.array(self.config.OBS.CROP_MIN)[np.newaxis, :]
crop_mask = np.logical_and(
np.all(point_cloud >= crop_min, axis=-1),
np.all(point_cloud <= crop_max, axis=-1))
point_cloud = point_cloud[crop_mask]
ax2 = fig.add_subplot(142, projection='3d')
downsampled_point_cloud = pc_utils.downsample(
point_cloud, num_samples=4096)
pc_utils.show(downsampled_point_cloud, ax2, axis_range=1.0)
if num_clusters > 0:
point_cloud = pc_utils.remove_table(point_cloud)
segmask = pc_utils.cluster(
point_cloud, num_clusters=num_clusters, method='dbscan')
point_cloud = point_cloud[segmask != -1]
segmask = pc_utils.cluster(
point_cloud, num_clusters=num_clusters)
point_cloud = pc_utils.group_by_labels(
point_cloud, segmask, num_clusters, 256)
ax3 = fig.add_subplot(143, projection='3d')
pc_utils.show(point_cloud, ax3, axis_range=1.0)
ax4 = fig.add_subplot(144)
pc_utils.show2d(point_cloud, self.camera, ax4, image=image)
plt.show()
# Visualize the camera image.
elif command_type == 'rgbd':
images = self.camera.frames()
image = images['rgb']
depth = images['depth']
point_cloud = self.camera.deproject_depth_image(depth)
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111)
if (self.config.OBS.CROP_MIN is not None and
self.config.OBS.CROP_MAX is not None):
crop_max = np.array(self.config.OBS.CROP_MAX)[np.newaxis, :]
crop_min = np.array(self.config.OBS.CROP_MIN)[np.newaxis, :]
crop_mask = np.logical_and(
np.all(point_cloud >= crop_min, axis=-1),
np.all(point_cloud <= crop_max, axis=-1))
point_cloud = point_cloud[crop_mask]
point_cloud = pc_utils.remove_table(point_cloud)
point_cloud = pc_utils.downsample(point_cloud, num_samples=4096)
pixels = self.camera.project_point(point_cloud)
pixels = np.array(pixels, dtype=np.int32)
background = np.zeros_like(image)
background[pixels[:, 1], pixels[:, 0]] = (
image[pixels[:, 1], pixels[:, 0]])
plt.imshow(background)
plt.show()
# Move the gripper to the clicked pixel position.
elif command_type == 'click' or command_type == 'c':
fig, ax = plt.subplots(figsize=(20, 10))
onclick = EndEffectorClickController(self, ax)
results = self.camera.frames()
plt.imshow(results['rgb'])
plt.title('Image')
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
# Move joints to the target positions.
elif command_type == 'joints' or command_type == 'j':
joint_positions = [float(ch) for ch in words[1:]]
print('Moving to joint positions: %s ...' % joint_positions)
self.robot.move_to_joint_positions(joint_positions)
# Move the end effector to the target pose.
elif command_type == 'end_effector' or command_type == 'e':
pose = [float(ch) for ch in words[1:]]
if len(pose) == 6 or len(pose) == 7:
pose = Pose(pose[:3], pose[3:])
elif len(pose) == 3:
end_effector_pose = self.robot.end_effector
pose = Pose([pose, end_effector_pose.orientation])
else:
print('The format of the input pose is wrong.')
print('Moving to end effector pose: %s ...' % pose)
self.robot.move_to_gripper_pose(pose)
# Move the end effector to the target pose.
elif command_type == 'end_effector_line' or command_type == 'el':
pose = [float(ch) for ch in words[1:]]
if len(pose) == 6 or len(pose) == 7:
pose = Pose(pose[:3], pose[3:])
elif len(pose) == 3:
end_effector_pose = self.robot.end_effector
pose = Pose(pose, end_effector_pose.orientation)
else:
print('The format of the input pose is wrong.')
print('Moving to end effector pose: %s ...' % pose)
self.robot.move_to_gripper_pose(pose, straight_line=True)
# Open the gripper.
elif command_type == 'open' or command_type == 'o':
joint_positions = self.robot.grip(0)
# Close the gripper.
elif command_type == 'grasp' or command_type == 'g':
joint_positions = self.robot.grip(1)
# Print the current robot status.
elif command_type == 'print' or command_type == 'p':
joint_positions = self.robot.joint_positions
joint_positions = [
joint_positions['right_j0'],
joint_positions['right_j1'],
joint_positions['right_j2'],
joint_positions['right_j3'],
joint_positions['right_j4'],
joint_positions['right_j5'],
joint_positions['right_j6'],
]
print('Joint positions: %s' % (joint_positions))
end_effector_pose = self.robot.end_effector
print('End Effector position: %s, %s' %
(end_effector_pose.position, end_effector_pose.euler))
else:
print('Unrecognized command: %s' % command)
def main():
args = parse_args()
logger.info('Creating the FrankaPanda command line client...')
franka_panda_cli = FrankaPandaCLI(
args.mode,
args.env_config,
args.debug,
args.assets_dir)
logger.info('Running the Franka_Panda command line client...')
franka_panda_cli.start()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"atexit.register",
"robovat.perception.camera.Kinect2",
"robovat.perception.point_cloud_utils.remove_table",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"robovat.robots.franka_panda.factory",
"robovat.simulation.camera.BulletCamera",
"sys.stdout.flush",
"robov... | [((1095, 1120), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1118, 1120), False, 'import argparse\n'), ((17911, 17973), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Creating the FrankaPanda command line client..."""'], {}), "('Creating the FrankaPanda command line client...')\n", (17922, 17973), False, 'from robovat.utils.logging import logger\n'), ((18123, 18185), 'robovat.utils.logging.logger.info', 'logger.info', (['"""Running the Franka_Panda command line client..."""'], {}), "('Running the Franka_Panda command line client...')\n", (18134, 18185), False, 'from robovat.utils.logging import logger\n'), ((2285, 2294), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2292, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2311, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2657), 'robovat.math.Pose', 'Pose', (['[position, [np.pi, 0, 0]]'], {}), '([position, [np.pi, 0, 0]])\n', (2630, 2657), False, 'from robovat.math import Pose\n'), ((3262, 3300), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pixel[0]', 'pixel[1]'], {'c': '"""r"""'}), "(pixel[0], pixel[1], c='r')\n", (3273, 3300), True, 'import matplotlib.pyplot as plt\n'), ((3511, 3533), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image'], {}), '(self.image)\n', (3521, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3560), 'matplotlib.pyplot.title', 'plt.title', (['"""Image"""'], {}), "('Image')\n", (3551, 3560), True, 'import matplotlib.pyplot as plt\n'), ((3569, 3579), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3577, 3579), True, 'import matplotlib.pyplot as plt\n'), ((3588, 3604), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (3597, 3604), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4330), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (4313, 4330), False, 'import readline\n'), ((4503, 4561), 'atexit.register', 'atexit.register', (['readline.write_history_file', 'history_file'], {}), '(readline.write_history_file, history_file)\n', (4518, 4561), False, 'import atexit\n'), ((6199, 6285), 'robovat.robots.franka_panda.factory', 'franka_panda.factory', ([], {'simulator': 'self.simulator', 'config': 'self.config.SIM.ARM.CONFIG'}), '(simulator=self.simulator, config=self.config.SIM.ARM.\n CONFIG)\n', (6219, 6285), False, 'from robovat.robots import franka_panda\n'), ((4412, 4452), 'readline.read_history_file', 'readline.read_history_file', (['history_file'], {}), '(history_file)\n', (4438, 4452), False, 'import readline\n'), ((4716, 4780), 'robovat.simulation.Simulator', 'Simulator', ([], {'use_visualizer': 'self.debug', 'assets_dir': 'self.assets_dir'}), '(use_visualizer=self.debug, assets_dir=self.assets_dir)\n', (4725, 4780), False, 'from robovat.simulation import Simulator\n'), ((5126, 5158), 'robovat.math.Pose', 'Pose', (['self.config.SIM.TABLE.POSE'], {}), '(self.config.SIM.TABLE.POSE)\n', (5130, 5158), False, 'from robovat.math import Pose\n'), ((5201, 5255), 'numpy.random.uniform', 'np.random.uniform', (['*self.config.SIM.TABLE.HEIGHT_RANGE'], {}), '(*self.config.SIM.TABLE.HEIGHT_RANGE)\n', (5218, 5255), True, 'import numpy as np\n'), ((5498, 5550), 'robovat.simulation.camera.BulletCamera', 'BulletCamera', ([], {'simulator': 'self.simulator', 'distance': '(1.0)'}), '(simulator=self.simulator, distance=1.0)\n', (5510, 5550), False, 'from robovat.simulation.camera import BulletCamera\n'), ((4139, 4157), 'robovat.utils.yaml_config.YamlConfig', 'YamlConfig', (['config'], {}), '(config)\n', (4149, 4157), False, 'from robovat.utils.yaml_config import YamlConfig\n'), ((5726, 5758), 'robovat.math.Pose', 'Pose', (['self.config.SIM.TABLE.POSE'], {}), '(self.config.SIM.TABLE.POSE)\n', (5730, 5758), False, 'from robovat.math import Pose\n'), ((5801, 5855), 'numpy.random.uniform', 'np.random.uniform', (['*self.config.SIM.TABLE.HEIGHT_RANGE'], {}), '(*self.config.SIM.TABLE.HEIGHT_RANGE)\n', (5818, 5855), True, 'import numpy as np\n'), ((5899, 5993), 'robovat.perception.camera.Kinect2', 'Kinect2', ([], {'packet_pipeline_mode': '(0)', 'device_num': '(0)', 'skip_regirobovation': '(False)', 'use_inpaint': '(True)'}), '(packet_pipeline_mode=0, device_num=0, skip_regirobovation=False,\n use_inpaint=True)\n', (5906, 5993), False, 'from robovat.perception.camera import Kinect2\n'), ((7069, 7087), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7085, 7087), False, 'import sys\n'), ((7114, 7140), 'builtins.input', 'input', (['"""Enter a command: """'], {}), "('Enter a command: ')\n", (7119, 7140), False, 'from builtins import input\n'), ((6785, 6802), 'robovat.math.Pose', 'Pose', (['camera_pose'], {}), '(camera_pose)\n', (6789, 6802), False, 'from robovat.math import Pose\n'), ((8248, 8276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (8258, 8276), True, 'import matplotlib.pyplot as plt\n'), ((8289, 8305), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (8300, 8305), True, 'import matplotlib.pyplot as plt\n'), ((8318, 8335), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (8328, 8335), True, 'import matplotlib.pyplot as plt\n'), ((8348, 8370), 'matplotlib.pyplot.title', 'plt.title', (['"""RGB Image"""'], {}), "('RGB Image')\n", (8357, 8370), True, 'import matplotlib.pyplot as plt\n'), ((8383, 8399), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (8394, 8399), True, 'import matplotlib.pyplot as plt\n'), ((8412, 8429), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth'], {}), '(depth)\n', (8422, 8429), True, 'import matplotlib.pyplot as plt\n'), ((8442, 8466), 'matplotlib.pyplot.title', 'plt.title', (['"""Depth Image"""'], {}), "('Depth Image')\n", (8451, 8466), True, 'import matplotlib.pyplot as plt\n'), ((8480, 8490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8488, 8490), True, 'import matplotlib.pyplot as plt\n'), ((9225, 9253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (9235, 9253), True, 'import matplotlib.pyplot as plt\n'), ((9266, 9282), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (9277, 9282), True, 'import matplotlib.pyplot as plt\n'), ((9295, 9312), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (9305, 9312), True, 'import matplotlib.pyplot as plt\n'), ((9325, 9383), 'matplotlib.pyplot.scatter', 'plt.scatter', (['table_pixels[:, 0]', 'table_pixels[:, 1]'], {'c': '"""r"""'}), "(table_pixels[:, 0], table_pixels[:, 1], c='r')\n", (9336, 9383), True, 'import matplotlib.pyplot as plt\n'), ((9396, 9418), 'matplotlib.pyplot.title', 'plt.title', (['"""RGB Image"""'], {}), "('RGB Image')\n", (9405, 9418), True, 'import matplotlib.pyplot as plt\n'), ((9431, 9447), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (9442, 9447), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9477), 'matplotlib.pyplot.imshow', 'plt.imshow', (['depth'], {}), '(depth)\n', (9470, 9477), True, 'import matplotlib.pyplot as plt\n'), ((9490, 9548), 'matplotlib.pyplot.scatter', 'plt.scatter', (['table_pixels[:, 0]', 'table_pixels[:, 1]'], {'c': '"""r"""'}), "(table_pixels[:, 0], table_pixels[:, 1], c='r')\n", (9501, 9548), True, 'import matplotlib.pyplot as plt\n'), ((9561, 9585), 'matplotlib.pyplot.title', 'plt.title', (['"""Depth Image"""'], {}), "('Depth Image')\n", (9570, 9585), True, 'import matplotlib.pyplot as plt\n'), ((9599, 9609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9607, 9609), True, 'import matplotlib.pyplot as plt\n'), ((9107, 9129), 'numpy.array', 'np.array', (['table_points'], {}), '(table_points)\n', (9115, 9129), True, 'import numpy as np\n'), ((10020, 10048), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (10030, 10048), True, 'import matplotlib.pyplot as plt\n'), ((10061, 10077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (10072, 10077), True, 'import matplotlib.pyplot as plt\n'), ((10090, 10107), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (10100, 10107), True, 'import matplotlib.pyplot as plt\n'), ((11358, 11368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11366, 11368), True, 'import matplotlib.pyplot as plt\n'), ((10717, 10816), 'matplotlib.pyplot.plot', 'plt.plot', (['[pixels[0, 0], pixels[1, 0]]', '[pixels[0, 1], pixels[1, 1]]'], {'color': 'color', 'linewidth': '(2)'}), '([pixels[0, 0], pixels[1, 0]], [pixels[0, 1], pixels[1, 1]], color=\n color, linewidth=2)\n', (10725, 10816), True, 'import matplotlib.pyplot as plt\n'), ((10878, 10977), 'matplotlib.pyplot.plot', 'plt.plot', (['[pixels[0, 0], pixels[2, 0]]', '[pixels[0, 1], pixels[2, 1]]'], {'color': 'color', 'linewidth': '(2)'}), '([pixels[0, 0], pixels[2, 0]], [pixels[0, 1], pixels[2, 1]], color=\n color, linewidth=2)\n', (10886, 10977), True, 'import matplotlib.pyplot as plt\n'), ((11039, 11138), 'matplotlib.pyplot.plot', 'plt.plot', (['[pixels[1, 0], pixels[3, 0]]', '[pixels[1, 1], pixels[3, 1]]'], {'color': 'color', 'linewidth': '(2)'}), '([pixels[1, 0], pixels[3, 0]], [pixels[1, 1], pixels[3, 1]], color=\n color, linewidth=2)\n', (11047, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11200, 11299), 'matplotlib.pyplot.plot', 'plt.plot', (['[pixels[2, 0], pixels[3, 0]]', '[pixels[2, 1], pixels[3, 1]]'], {'color': 'color', 'linewidth': '(2)'}), '([pixels[2, 0], pixels[3, 0]], [pixels[2, 1], pixels[3, 1]], color=\n color, linewidth=2)\n', (11208, 11299), True, 'import matplotlib.pyplot as plt\n'), ((11802, 11829), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (11812, 11829), True, 'import matplotlib.pyplot as plt\n'), ((12487, 12537), 'robovat.perception.point_cloud_utils.downsample', 'pc_utils.downsample', (['point_cloud'], {'num_samples': '(4096)'}), '(point_cloud, num_samples=4096)\n', (12506, 12537), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((12571, 12630), 'robovat.perception.point_cloud_utils.show', 'pc_utils.show', (['downsampled_point_cloud', 'ax2'], {'axis_range': '(1.0)'}), '(downsampled_point_cloud, ax2, axis_range=1.0)\n', (12584, 12630), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((13389, 13399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13397, 13399), True, 'import matplotlib.pyplot as plt\n'), ((10197, 10213), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (10205, 10213), True, 'import numpy as np\n'), ((12695, 12729), 'robovat.perception.point_cloud_utils.remove_table', 'pc_utils.remove_table', (['point_cloud'], {}), '(point_cloud)\n', (12716, 12729), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((12757, 12830), 'robovat.perception.point_cloud_utils.cluster', 'pc_utils.cluster', (['point_cloud'], {'num_clusters': 'num_clusters', 'method': '"""dbscan"""'}), "(point_cloud, num_clusters=num_clusters, method='dbscan')\n", (12773, 12830), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((12936, 12992), 'robovat.perception.point_cloud_utils.cluster', 'pc_utils.cluster', (['point_cloud'], {'num_clusters': 'num_clusters'}), '(point_cloud, num_clusters=num_clusters)\n', (12952, 12992), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((13044, 13109), 'robovat.perception.point_cloud_utils.group_by_labels', 'pc_utils.group_by_labels', (['point_cloud', 'segmask', 'num_clusters', '(256)'], {}), '(point_cloud, segmask, num_clusters, 256)\n', (13068, 13109), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((13208, 13255), 'robovat.perception.point_cloud_utils.show', 'pc_utils.show', (['point_cloud', 'ax3'], {'axis_range': '(1.0)'}), '(point_cloud, ax3, axis_range=1.0)\n', (13221, 13255), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((13316, 13375), 'robovat.perception.point_cloud_utils.show2d', 'pc_utils.show2d', (['point_cloud', 'self.camera', 'ax4'], {'image': 'image'}), '(point_cloud, self.camera, ax4, image=image)\n', (13331, 13375), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((13674, 13700), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (13684, 13700), True, 'import matplotlib.pyplot as plt\n'), ((14258, 14292), 'robovat.perception.point_cloud_utils.remove_table', 'pc_utils.remove_table', (['point_cloud'], {}), '(point_cloud)\n', (14279, 14292), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((14319, 14369), 'robovat.perception.point_cloud_utils.downsample', 'pc_utils.downsample', (['point_cloud'], {'num_samples': '(4096)'}), '(point_cloud, num_samples=4096)\n', (14338, 14369), True, 'from robovat.perception import point_cloud_utils as pc_utils\n'), ((14451, 14483), 'numpy.array', 'np.array', (['pixels'], {'dtype': 'np.int32'}), '(pixels, dtype=np.int32)\n', (14459, 14483), True, 'import numpy as np\n'), ((14510, 14530), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (14523, 14530), True, 'import numpy as np\n'), ((14650, 14672), 'matplotlib.pyplot.imshow', 'plt.imshow', (['background'], {}), '(background)\n', (14660, 14672), True, 'import matplotlib.pyplot as plt\n'), ((14686, 14696), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14694, 14696), True, 'import matplotlib.pyplot as plt\n'), ((10216, 10232), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (10224, 10232), True, 'import numpy as np\n'), ((12044, 12078), 'numpy.array', 'np.array', (['self.config.OBS.CROP_MAX'], {}), '(self.config.OBS.CROP_MAX)\n', (12052, 12078), True, 'import numpy as np\n'), ((12121, 12155), 'numpy.array', 'np.array', (['self.config.OBS.CROP_MIN'], {}), '(self.config.OBS.CROP_MIN)\n', (12129, 12155), True, 'import numpy as np\n'), ((12235, 12275), 'numpy.all', 'np.all', (['(point_cloud >= crop_min)'], {'axis': '(-1)'}), '(point_cloud >= crop_min, axis=-1)\n', (12241, 12275), True, 'import numpy as np\n'), ((12297, 12337), 'numpy.all', 'np.all', (['(point_cloud <= crop_max)'], {'axis': '(-1)'}), '(point_cloud <= crop_max, axis=-1)\n', (12303, 12337), True, 'import numpy as np\n'), ((14840, 14870), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (14852, 14870), True, 'import matplotlib.pyplot as plt\n'), ((14986, 15012), 'matplotlib.pyplot.imshow', 'plt.imshow', (["results['rgb']"], {}), "(results['rgb'])\n", (14996, 15012), True, 'import matplotlib.pyplot as plt\n'), ((15025, 15043), 'matplotlib.pyplot.title', 'plt.title', (['"""Image"""'], {}), "('Image')\n", (15034, 15043), True, 'import matplotlib.pyplot as plt\n'), ((15122, 15132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15130, 15132), True, 'import matplotlib.pyplot as plt\n'), ((13883, 13917), 'numpy.array', 'np.array', (['self.config.OBS.CROP_MAX'], {}), '(self.config.OBS.CROP_MAX)\n', (13891, 13917), True, 'import numpy as np\n'), ((13960, 13994), 'numpy.array', 'np.array', (['self.config.OBS.CROP_MIN'], {}), '(self.config.OBS.CROP_MIN)\n', (13968, 13994), True, 'import numpy as np\n'), ((14074, 14114), 'numpy.all', 'np.all', (['(point_cloud >= crop_min)'], {'axis': '(-1)'}), '(point_cloud >= crop_min, axis=-1)\n', (14080, 14114), True, 'import numpy as np\n'), ((14136, 14176), 'numpy.all', 'np.all', (['(point_cloud <= crop_max)'], {'axis': '(-1)'}), '(point_cloud <= crop_max, axis=-1)\n', (14142, 14176), True, 'import numpy as np\n'), ((15686, 15710), 'robovat.math.Pose', 'Pose', (['pose[:3]', 'pose[3:]'], {}), '(pose[:3], pose[3:])\n', (15690, 15710), False, 'from robovat.math import Pose\n'), ((15827, 15870), 'robovat.math.Pose', 'Pose', (['[pose, end_effector_pose.orientation]'], {}), '([pose, end_effector_pose.orientation])\n', (15831, 15870), False, 'from robovat.math import Pose\n'), ((16318, 16342), 'robovat.math.Pose', 'Pose', (['pose[:3]', 'pose[3:]'], {}), '(pose[:3], pose[3:])\n', (16322, 16342), False, 'from robovat.math import Pose\n'), ((16459, 16500), 'robovat.math.Pose', 'Pose', (['pose', 'end_effector_pose.orientation'], {}), '(pose, end_effector_pose.orientation)\n', (16463, 16500), False, 'from robovat.math import Pose\n')] |
import collections
import utils
import observation
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
import utils
from tensorflow.keras.layers import Input, Dense, Lambda, Add, Conv2D, Flatten, LSTM, Reshape, ConvLSTM2D, BatchNormalization, Conv3D
from tensorflow_probability.python.distributions import kullback_leibler
from itertools import repeat
tfd = tfp.distributions
TICKS_PER_OBSERVATION = 15
TICKS_PER_SECOND = 30
MAX_MOVE_SPEED = 550
MAX_MOVE_IN_OBS = (MAX_MOVE_SPEED / TICKS_PER_SECOND) * TICKS_PER_OBSERVATION
N_MOVE_ENUMS = 9
MOVE_ENUMS = np.arange(N_MOVE_ENUMS, dtype=np.float32) - int(N_MOVE_ENUMS / 2)
MOVE_ENUMS *= MAX_MOVE_IN_OBS / (N_MOVE_ENUMS - 1) * 2
OBSERVATIONS_PER_SECOND = TICKS_PER_SECOND / TICKS_PER_OBSERVATION
MAX_UNITS = 5 + 5 + 16 + 16 + 1 + 1
ACTION_OUTPUT_COUNTS = {'enum': 5, 'x': 9, 'y': 9, 'target_unit': MAX_UNITS, 'ability': 4, 'item': 6}
OUTPUT_KEYS = ACTION_OUTPUT_COUNTS.keys()
INPUT_KEYS = ['env', 'allied_heroes', 'enemy_heroes', 'allied_nonheroes', 'enemy_nonheroes',
'allied_towers', 'enemy_towers']
AgentOutput = collections.namedtuple('AgentOutput', 'enum enum_logits x x_logits y y_logits target_unit target_unit_logits ability \
ability_logits item item_logits baseline')
class Dota(tf.Module):
"""Agent with ResNet, but without LSTM and additional inputs.
Four blocks instead of three in ImpalaAtariDeep.
"""
def __init__(self, enum_parametric_action_distribution, x_parametric_action_distribution,
y_parametric_action_distribution, target_unit_parametric_action_distribution,
ability_parametric_action_distribution, item_parametric_action_distribution):
super(Dota, self).__init__(name='dota')
# Parameters and layers for unroll.
self._enum_parametric_action_distribution = enum_parametric_action_distribution
self._x_parametric_action_distribution = x_parametric_action_distribution
self._y_parametric_action_distribution = y_parametric_action_distribution
self._target_unit_parametric_action_distribution = target_unit_parametric_action_distribution
self._ability_parametric_action_distribution = ability_parametric_action_distribution
self._item_parametric_action_distribution = item_parametric_action_distribution
# Parameters and layers for _torso.
self.affine_env = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_basic_stats = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_ah = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_eh = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_anh = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_enh = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_ath = tf.keras.layers.Dense(128, activation='relu')
self.affine_unit_eth = tf.keras.layers.Dense(128, activation='relu')
self.affine_pre_rnn = tf.keras.layers.Dense(MAX_UNITS, activation='relu')
self._core = tf.keras.layers.LSTMCell(128)
self.affine_unit_attention = tf.keras.layers.Dense(128, name='target_unit_policy_logits',
kernel_initializer='lecun_normal')
# Layers for _head.
self.affine_head_enum = tf.keras.layers.Dense(self._enum_parametric_action_distribution.param_size,
name='enum_policy_logits',
kernel_initializer='lecun_normal')
self.affine_move_x = tf.keras.layers.Dense(self._x_parametric_action_distribution.param_size, name='x_policy_logits',
kernel_initializer='lecun_normal')
self.affine_move_y = tf.keras.layers.Dense(self._y_parametric_action_distribution.param_size, name='y_policy_logits',
kernel_initializer='lecun_normal')
self.affine_head_ability = tf.keras.layers.Dense(self._ability_parametric_action_distribution.param_size,
name='ability_policy_logits',
kernel_initializer='lecun_normal')
self.affine_head_item = tf.keras.layers.Dense(self._item_parametric_action_distribution.param_size,
name='item_policy_logits',
kernel_initializer='lecun_normal')
self._baseline = tf.keras.layers.Dense(1, name='baseline', kernel_initializer='lecun_normal')
def initial_state(self, batch_size):
return self._core.get_initial_state(batch_size=batch_size, dtype=tf.float32)
def _torso(self, unused_prev_action, env_output):
#_, _, env, allied_heroes, enemy_heroes, allied_nonheroes, enemy_nonheroes, allied_towers, enemy_towers, _, _ = env_output
env = env_output[2]
allied_heroes = env_output[3]
enemy_heroes = env_output[4]
allied_nonheroes = env_output[5]
enemy_nonheroes = env_output[6]
allied_towers = env_output[7]
enemy_towers = env_output[8]
enum_mask = env_output[9]
x_mask = env_output[10]
y_mask = env_output[11]
target_unit_mask = env_output[12]
ability_mask = env_output[13]
item_mask = env_output[14]
env = self.affine_env(env)
ah_basic = self.affine_unit_basic_stats(allied_heroes)
ah_embedding = self.affine_unit_ah(ah_basic)
ah_embedding_max = tf.math.reduce_max(ah_embedding, 1)
eh_basic = self.affine_unit_basic_stats(enemy_heroes)
eh_embedding = self.affine_unit_eh(eh_basic)
eh_embedding_max = tf.math.reduce_max(eh_embedding, 1)
anh_basic = self.affine_unit_basic_stats(allied_nonheroes)
anh_embedding = self.affine_unit_anh(anh_basic)
anh_embedding_max = tf.math.reduce_max(anh_embedding, 1)
enh_basic = self.affine_unit_basic_stats(enemy_nonheroes)
enh_embedding = self.affine_unit_enh(enh_basic)
enh_embedding_max = tf.math.reduce_max(enh_embedding, 1)
ath_basic = self.affine_unit_basic_stats(allied_towers)
ath_embedding = self.affine_unit_ath(ath_basic)
ath_embedding_max = tf.math.reduce_max(ath_embedding, 1)
eth_basic = self.affine_unit_basic_stats(enemy_towers)
eth_embedding = self.affine_unit_eth(eth_basic)
eth_embedding_max = tf.math.reduce_max(eth_embedding, 1)
unit_embedding = tf.concat([ah_embedding, eh_embedding, anh_embedding, enh_embedding, ath_embedding,
eth_embedding], axis=1)
unit_embedding = tf.transpose(unit_embedding, perm=[0, 2, 1])
x = tf.concat((env, ah_embedding_max, eh_embedding_max, anh_embedding_max, enh_embedding_max,
ath_embedding_max, eth_embedding_max), axis=1)
x = self.affine_pre_rnn(x)
return unit_embedding, x, enum_mask, x_mask, y_mask, target_unit_mask, ability_mask, item_mask
def _head(self, torso_output):
unit_embedding, x, enum_mask, x_mask, y_mask, target_unit_mask, ability_mask, item_mask = torso_output
batch_size = unit_embedding.shape[0]
unit_attention = self.affine_unit_attention(x)
unit_attention = tf.expand_dims(unit_attention, 1)
action_scores_enum = self.affine_head_enum(x)
action_scores_x = self.affine_move_x(x)
action_scores_y = self.affine_move_y(x)
action_target_unit = tf.linalg.matmul(unit_attention, unit_embedding)
action_target_unit = tf.squeeze(action_target_unit, 1)
action_ability = self.affine_head_ability(x)
action_item = self.affine_head_item(x)
baseline = tf.squeeze(self._baseline(x), axis=-1)
enum_action_list = []
x_action_list = []
y_action_list = []
target_unit_action_list = []
ability_action_list = []
item_action_list = []
enum_logits_list = []
x_logits_list = []
y_logits_list = []
target_unit_logits_list = []
ability_logits_list = []
item_logits_list = []
for e_l, x_l, y_l, t_l, a_l, i_l, e_m, x_m, y_m, t_m, a_m, i_m in zip(tf.unstack(action_scores_enum),
tf.unstack(action_scores_x), tf.unstack(action_scores_y),
tf.unstack(action_target_unit), tf.unstack(action_ability), tf.unstack(action_item),
tf.unstack(enum_mask), tf.unstack(x_mask),
tf.unstack(y_mask), tf.unstack(target_unit_mask),
tf.unstack(ability_mask), tf.unstack(item_mask)):
heads_logits = {'enum': tf.expand_dims(tf.expand_dims(e_l, 0), 0),
'x': tf.expand_dims(tf.expand_dims(x_l, 0), 0),
'y': tf.expand_dims(tf.expand_dims(y_l, 0), 0),
'target_unit': tf.expand_dims(tf.expand_dims(t_l, 0), 0),
'ability': tf.expand_dims(tf.expand_dims(a_l, 0), 0),
'item': tf.expand_dims(tf.expand_dims(i_l, 0), 0)
}
action_masks = {'enum': tf.expand_dims(tf.expand_dims(e_m, 0), 0),
'x': tf.expand_dims(tf.expand_dims(x_m, 0), 0),
'y': tf.expand_dims(tf.expand_dims(y_m, 0), 0),
'target_unit': tf.expand_dims(tf.expand_dims(t_m, 0), 0),
'ability': tf.expand_dims(tf.expand_dims(a_m, 0), 0),
'item': tf.expand_dims(tf.expand_dims(i_m, 0), 0)
}
action_dict = {'enum': -1, 'x': -1, 'y': -1, 'target_unit': -1, 'ability': -1, 'item': -1}
masked_heads_logits = {'enum': heads_logits['enum'], 'x': heads_logits['x'],
'y': heads_logits['y'], 'target_unit': heads_logits['target_unit'],
'ability': heads_logits['ability'], 'item': heads_logits['item']}
masked_heads_logits['enum'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['enum'].shape[2]))]],
dtype=tf.float32)
masked_heads_logits['x'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['x'].shape[2]))]],
dtype=tf.float32)
masked_heads_logits['y'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['y'].shape[2]))]],
dtype=tf.float32)
masked_heads_logits['target_unit'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['target_unit'].shape[2]))]],
dtype=tf.float32)
masked_heads_logits['ability'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['ability'].shape[2]))]],
dtype=tf.float32)
masked_heads_logits['item'] = tf.convert_to_tensor([[list(repeat(-1.0, heads_logits['item'].shape[2]))]],
dtype=tf.float32)
#tf.print("masked_heads_logits b: ", masked_heads_logits)
action_dict, masked_heads_logits = utils.select_actions(action_dict, heads_logits,
action_masks, masked_heads_logits)
#tf.print("masked_heads_logits a: ", masked_heads_logits)
#tf.print("")
#print("masked_heads_logits: ", masked_heads_logits)
enum_action_list.append(action_dict['enum'])
x_action_list.append(action_dict['x'])
y_action_list.append(action_dict['y'])
target_unit_action_list.append(action_dict['target_unit'])
ability_action_list.append(action_dict['ability'])
item_action_list.append(action_dict['item'])
enum_logits_list.append(masked_heads_logits['enum'][0][0])
x_logits_list.append(masked_heads_logits['x'][0][0])
y_logits_list.append(masked_heads_logits['y'][0][0])
target_unit_logits_list.append(masked_heads_logits['target_unit'][0][0])
ability_logits_list.append(masked_heads_logits['ability'][0][0])
item_logits_list.append(masked_heads_logits['item'][0][0])
enum_action_list = tf.stack(enum_action_list)
x_action_list = tf.stack(x_action_list)
y_action_list = tf.stack(y_action_list)
target_unit_action_list = tf.stack(target_unit_action_list)
ability_action_list = tf.stack(ability_action_list)
item_action_list = tf.stack(item_action_list)
enum_logits_list = tf.stack(enum_logits_list)
x_logits_list = tf.stack(x_logits_list)
y_logits_list = tf.stack(y_logits_list)
target_unit_logits_list = tf.stack(target_unit_logits_list)
ability_logits_list = tf.stack(ability_logits_list)
item_logits_list = tf.stack(item_logits_list)
return AgentOutput(enum_action_list, enum_logits_list, x_action_list, x_logits_list, y_action_list,
y_logits_list, target_unit_action_list, target_unit_logits_list, ability_action_list,
ability_logits_list, item_action_list, item_logits_list, baseline)
# Not clear why, but if "@tf.function" declarator is placed directly onto
# __call__, training fails with "uninitialized variable *baseline".
# when running on multiple learning tpu cores.
@tf.function
def get_action(self, *args, **kwargs):
return self.__call__(*args, **kwargs)
def __call__(self, prev_actions, env_outputs, core_state, unroll=False, is_training=False):
if not unroll:
# Add time dimension.
prev_actions, env_outputs = tf.nest.map_structure(lambda t: tf.expand_dims(t, 0), (prev_actions, env_outputs))
outputs, core_state = self._unroll(prev_actions, env_outputs, core_state)
if not unroll:
# Remove time dimension.
outputs = tf.nest.map_structure(lambda t: tf.squeeze(t, 0), outputs)
return outputs, core_state
def _unroll(self, prev_actions, env_outputs, core_state):
unused_reward, done, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = env_outputs
torso_outputs = utils.batch_apply(self._torso, (prev_actions, env_outputs))
unit_embedding, x, enum_mask, x_mask, y_mask, target_unit_mask, ability_mask, item_mask = torso_outputs
initial_core_state = self._core.get_initial_state(batch_size=tf.shape(prev_actions[0])[1], dtype=tf.float32)
core_output_list = []
for input_, d in zip(tf.unstack(x), tf.unstack(done)):
# If the episode ended, the core state should be reset before the next.
core_state = tf.nest.map_structure(
lambda x, y, d=d: tf.where(
tf.reshape(d, [d.shape[0]] + [1] * (x.shape.rank - 1)), x, y),
initial_core_state, core_state)
core_output, core_state = self._core(input_, core_state)
core_output_list.append(core_output)
core_outputs = tf.stack(core_output_list)
return utils.batch_apply(self._head, ((unit_embedding, core_outputs, enum_mask, x_mask, y_mask, target_unit_mask,
ability_mask, item_mask),)), core_state | [
"itertools.repeat",
"utils.batch_apply",
"tensorflow.linalg.matmul",
"tensorflow.keras.layers.Dense",
"tensorflow.math.reduce_max",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.keras.layers.LSTMCell",
"tensorflow.transpose",
"tensorflow.stack",
"utils.select_actions",
"tensorflow.sha... | [((1109, 1330), 'collections.namedtuple', 'collections.namedtuple', (['"""AgentOutput"""', '"""enum enum_logits x x_logits y y_logits target_unit target_unit_logits ability ability_logits item item_logits baseline"""'], {}), "('AgentOutput',\n 'enum enum_logits x x_logits y y_logits target_unit target_unit_logits ability ability_logits item item_logits baseline'\n )\n", (1131, 1330), False, 'import collections\n'), ((585, 626), 'numpy.arange', 'np.arange', (['N_MOVE_ENUMS'], {'dtype': 'np.float32'}), '(N_MOVE_ENUMS, dtype=np.float32)\n', (594, 626), True, 'import numpy as np\n'), ((2410, 2455), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2431, 2455), True, 'import tensorflow as tf\n'), ((2492, 2537), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2513, 2537), True, 'import tensorflow as tf\n'), ((2565, 2610), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2586, 2610), True, 'import tensorflow as tf\n'), ((2637, 2682), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2658, 2682), True, 'import tensorflow as tf\n'), ((2710, 2755), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2731, 2755), True, 'import tensorflow as tf\n'), ((2783, 2828), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2804, 2828), True, 'import tensorflow as tf\n'), ((2856, 2901), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2877, 2901), True, 'import tensorflow as tf\n'), ((2929, 2974), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2950, 2974), True, 'import tensorflow as tf\n'), ((3002, 3053), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['MAX_UNITS'], {'activation': '"""relu"""'}), "(MAX_UNITS, activation='relu')\n", (3023, 3053), True, 'import tensorflow as tf\n'), ((3072, 3101), 'tensorflow.keras.layers.LSTMCell', 'tf.keras.layers.LSTMCell', (['(128)'], {}), '(128)\n', (3096, 3101), True, 'import tensorflow as tf\n'), ((3136, 3235), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'name': '"""target_unit_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(128, name='target_unit_policy_logits',\n kernel_initializer='lecun_normal')\n", (3157, 3235), True, 'import tensorflow as tf\n'), ((3340, 3481), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self._enum_parametric_action_distribution.param_size'], {'name': '"""enum_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(self._enum_parametric_action_distribution.param_size,\n name='enum_policy_logits', kernel_initializer='lecun_normal')\n", (3361, 3481), True, 'import tensorflow as tf\n'), ((3604, 3739), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self._x_parametric_action_distribution.param_size'], {'name': '"""x_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(self._x_parametric_action_distribution.param_size,\n name='x_policy_logits', kernel_initializer='lecun_normal')\n", (3625, 3739), True, 'import tensorflow as tf\n'), ((3808, 3943), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self._y_parametric_action_distribution.param_size'], {'name': '"""y_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(self._y_parametric_action_distribution.param_size,\n name='y_policy_logits', kernel_initializer='lecun_normal')\n", (3829, 3943), True, 'import tensorflow as tf\n'), ((4018, 4171), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self._ability_parametric_action_distribution.param_size'], {'name': '"""ability_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(self._ability_parametric_action_distribution.\n param_size, name='ability_policy_logits', kernel_initializer='lecun_normal'\n )\n", (4039, 4171), True, 'import tensorflow as tf\n'), ((4297, 4438), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self._item_parametric_action_distribution.param_size'], {'name': '"""item_policy_logits"""', 'kernel_initializer': '"""lecun_normal"""'}), "(self._item_parametric_action_distribution.param_size,\n name='item_policy_logits', kernel_initializer='lecun_normal')\n", (4318, 4438), True, 'import tensorflow as tf\n'), ((4564, 4640), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'name': '"""baseline"""', 'kernel_initializer': '"""lecun_normal"""'}), "(1, name='baseline', kernel_initializer='lecun_normal')\n", (4585, 4640), True, 'import tensorflow as tf\n'), ((5531, 5566), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['ah_embedding', '(1)'], {}), '(ah_embedding, 1)\n', (5549, 5566), True, 'import tensorflow as tf\n'), ((5702, 5737), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['eh_embedding', '(1)'], {}), '(eh_embedding, 1)\n', (5720, 5737), True, 'import tensorflow as tf\n'), ((5882, 5918), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['anh_embedding', '(1)'], {}), '(anh_embedding, 1)\n', (5900, 5918), True, 'import tensorflow as tf\n'), ((6062, 6098), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['enh_embedding', '(1)'], {}), '(enh_embedding, 1)\n', (6080, 6098), True, 'import tensorflow as tf\n'), ((6240, 6276), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['ath_embedding', '(1)'], {}), '(ath_embedding, 1)\n', (6258, 6276), True, 'import tensorflow as tf\n'), ((6417, 6453), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['eth_embedding', '(1)'], {}), '(eth_embedding, 1)\n', (6435, 6453), True, 'import tensorflow as tf\n'), ((6476, 6587), 'tensorflow.concat', 'tf.concat', (['[ah_embedding, eh_embedding, anh_embedding, enh_embedding, ath_embedding,\n eth_embedding]'], {'axis': '(1)'}), '([ah_embedding, eh_embedding, anh_embedding, enh_embedding,\n ath_embedding, eth_embedding], axis=1)\n', (6485, 6587), True, 'import tensorflow as tf\n'), ((6662, 6706), 'tensorflow.transpose', 'tf.transpose', (['unit_embedding'], {'perm': '[0, 2, 1]'}), '(unit_embedding, perm=[0, 2, 1])\n', (6674, 6706), True, 'import tensorflow as tf\n'), ((6716, 6856), 'tensorflow.concat', 'tf.concat', (['(env, ah_embedding_max, eh_embedding_max, anh_embedding_max,\n enh_embedding_max, ath_embedding_max, eth_embedding_max)'], {'axis': '(1)'}), '((env, ah_embedding_max, eh_embedding_max, anh_embedding_max,\n enh_embedding_max, ath_embedding_max, eth_embedding_max), axis=1)\n', (6725, 6856), True, 'import tensorflow as tf\n'), ((7259, 7292), 'tensorflow.expand_dims', 'tf.expand_dims', (['unit_attention', '(1)'], {}), '(unit_attention, 1)\n', (7273, 7292), True, 'import tensorflow as tf\n'), ((7457, 7505), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['unit_attention', 'unit_embedding'], {}), '(unit_attention, unit_embedding)\n', (7473, 7505), True, 'import tensorflow as tf\n'), ((7534, 7567), 'tensorflow.squeeze', 'tf.squeeze', (['action_target_unit', '(1)'], {}), '(action_target_unit, 1)\n', (7544, 7567), True, 'import tensorflow as tf\n'), ((12231, 12257), 'tensorflow.stack', 'tf.stack', (['enum_action_list'], {}), '(enum_action_list)\n', (12239, 12257), True, 'import tensorflow as tf\n'), ((12278, 12301), 'tensorflow.stack', 'tf.stack', (['x_action_list'], {}), '(x_action_list)\n', (12286, 12301), True, 'import tensorflow as tf\n'), ((12322, 12345), 'tensorflow.stack', 'tf.stack', (['y_action_list'], {}), '(y_action_list)\n', (12330, 12345), True, 'import tensorflow as tf\n'), ((12376, 12409), 'tensorflow.stack', 'tf.stack', (['target_unit_action_list'], {}), '(target_unit_action_list)\n', (12384, 12409), True, 'import tensorflow as tf\n'), ((12436, 12465), 'tensorflow.stack', 'tf.stack', (['ability_action_list'], {}), '(ability_action_list)\n', (12444, 12465), True, 'import tensorflow as tf\n'), ((12489, 12515), 'tensorflow.stack', 'tf.stack', (['item_action_list'], {}), '(item_action_list)\n', (12497, 12515), True, 'import tensorflow as tf\n'), ((12540, 12566), 'tensorflow.stack', 'tf.stack', (['enum_logits_list'], {}), '(enum_logits_list)\n', (12548, 12566), True, 'import tensorflow as tf\n'), ((12587, 12610), 'tensorflow.stack', 'tf.stack', (['x_logits_list'], {}), '(x_logits_list)\n', (12595, 12610), True, 'import tensorflow as tf\n'), ((12631, 12654), 'tensorflow.stack', 'tf.stack', (['y_logits_list'], {}), '(y_logits_list)\n', (12639, 12654), True, 'import tensorflow as tf\n'), ((12685, 12718), 'tensorflow.stack', 'tf.stack', (['target_unit_logits_list'], {}), '(target_unit_logits_list)\n', (12693, 12718), True, 'import tensorflow as tf\n'), ((12745, 12774), 'tensorflow.stack', 'tf.stack', (['ability_logits_list'], {}), '(ability_logits_list)\n', (12753, 12774), True, 'import tensorflow as tf\n'), ((12798, 12824), 'tensorflow.stack', 'tf.stack', (['item_logits_list'], {}), '(item_logits_list)\n', (12806, 12824), True, 'import tensorflow as tf\n'), ((14087, 14146), 'utils.batch_apply', 'utils.batch_apply', (['self._torso', '(prev_actions, env_outputs)'], {}), '(self._torso, (prev_actions, env_outputs))\n', (14104, 14146), False, 'import utils\n'), ((14861, 14887), 'tensorflow.stack', 'tf.stack', (['core_output_list'], {}), '(core_output_list)\n', (14869, 14887), True, 'import tensorflow as tf\n'), ((8113, 8143), 'tensorflow.unstack', 'tf.unstack', (['action_scores_enum'], {}), '(action_scores_enum)\n', (8123, 8143), True, 'import tensorflow as tf\n'), ((8187, 8214), 'tensorflow.unstack', 'tf.unstack', (['action_scores_x'], {}), '(action_scores_x)\n', (8197, 8214), True, 'import tensorflow as tf\n'), ((8216, 8243), 'tensorflow.unstack', 'tf.unstack', (['action_scores_y'], {}), '(action_scores_y)\n', (8226, 8243), True, 'import tensorflow as tf\n'), ((8287, 8317), 'tensorflow.unstack', 'tf.unstack', (['action_target_unit'], {}), '(action_target_unit)\n', (8297, 8317), True, 'import tensorflow as tf\n'), ((8319, 8345), 'tensorflow.unstack', 'tf.unstack', (['action_ability'], {}), '(action_ability)\n', (8329, 8345), True, 'import tensorflow as tf\n'), ((8347, 8370), 'tensorflow.unstack', 'tf.unstack', (['action_item'], {}), '(action_item)\n', (8357, 8370), True, 'import tensorflow as tf\n'), ((8414, 8435), 'tensorflow.unstack', 'tf.unstack', (['enum_mask'], {}), '(enum_mask)\n', (8424, 8435), True, 'import tensorflow as tf\n'), ((8437, 8455), 'tensorflow.unstack', 'tf.unstack', (['x_mask'], {}), '(x_mask)\n', (8447, 8455), True, 'import tensorflow as tf\n'), ((8498, 8516), 'tensorflow.unstack', 'tf.unstack', (['y_mask'], {}), '(y_mask)\n', (8508, 8516), True, 'import tensorflow as tf\n'), ((8518, 8546), 'tensorflow.unstack', 'tf.unstack', (['target_unit_mask'], {}), '(target_unit_mask)\n', (8528, 8546), True, 'import tensorflow as tf\n'), ((8590, 8614), 'tensorflow.unstack', 'tf.unstack', (['ability_mask'], {}), '(ability_mask)\n', (8600, 8614), True, 'import tensorflow as tf\n'), ((8616, 8637), 'tensorflow.unstack', 'tf.unstack', (['item_mask'], {}), '(item_mask)\n', (8626, 8637), True, 'import tensorflow as tf\n'), ((11205, 11291), 'utils.select_actions', 'utils.select_actions', (['action_dict', 'heads_logits', 'action_masks', 'masked_heads_logits'], {}), '(action_dict, heads_logits, action_masks,\n masked_heads_logits)\n', (11225, 11291), False, 'import utils\n'), ((14421, 14434), 'tensorflow.unstack', 'tf.unstack', (['x'], {}), '(x)\n', (14431, 14434), True, 'import tensorflow as tf\n'), ((14436, 14452), 'tensorflow.unstack', 'tf.unstack', (['done'], {}), '(done)\n', (14446, 14452), True, 'import tensorflow as tf\n'), ((14900, 15038), 'utils.batch_apply', 'utils.batch_apply', (['self._head', '((unit_embedding, core_outputs, enum_mask, x_mask, y_mask, target_unit_mask,\n ability_mask, item_mask),)'], {}), '(self._head, ((unit_embedding, core_outputs, enum_mask,\n x_mask, y_mask, target_unit_mask, ability_mask, item_mask),))\n', (14917, 15038), False, 'import utils\n'), ((8685, 8707), 'tensorflow.expand_dims', 'tf.expand_dims', (['e_l', '(0)'], {}), '(e_l, 0)\n', (8699, 8707), True, 'import tensorflow as tf\n'), ((8755, 8777), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_l', '(0)'], {}), '(x_l, 0)\n', (8769, 8777), True, 'import tensorflow as tf\n'), ((8825, 8847), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_l', '(0)'], {}), '(y_l, 0)\n', (8839, 8847), True, 'import tensorflow as tf\n'), ((8905, 8927), 'tensorflow.expand_dims', 'tf.expand_dims', (['t_l', '(0)'], {}), '(t_l, 0)\n', (8919, 8927), True, 'import tensorflow as tf\n'), ((8981, 9003), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_l', '(0)'], {}), '(a_l, 0)\n', (8995, 9003), True, 'import tensorflow as tf\n'), ((9054, 9076), 'tensorflow.expand_dims', 'tf.expand_dims', (['i_l', '(0)'], {}), '(i_l, 0)\n', (9068, 9076), True, 'import tensorflow as tf\n'), ((9149, 9171), 'tensorflow.expand_dims', 'tf.expand_dims', (['e_m', '(0)'], {}), '(e_m, 0)\n', (9163, 9171), True, 'import tensorflow as tf\n'), ((9219, 9241), 'tensorflow.expand_dims', 'tf.expand_dims', (['x_m', '(0)'], {}), '(x_m, 0)\n', (9233, 9241), True, 'import tensorflow as tf\n'), ((9289, 9311), 'tensorflow.expand_dims', 'tf.expand_dims', (['y_m', '(0)'], {}), '(y_m, 0)\n', (9303, 9311), True, 'import tensorflow as tf\n'), ((9369, 9391), 'tensorflow.expand_dims', 'tf.expand_dims', (['t_m', '(0)'], {}), '(t_m, 0)\n', (9383, 9391), True, 'import tensorflow as tf\n'), ((9445, 9467), 'tensorflow.expand_dims', 'tf.expand_dims', (['a_m', '(0)'], {}), '(a_m, 0)\n', (9459, 9467), True, 'import tensorflow as tf\n'), ((9518, 9540), 'tensorflow.expand_dims', 'tf.expand_dims', (['i_m', '(0)'], {}), '(i_m, 0)\n', (9532, 9540), True, 'import tensorflow as tf\n'), ((13634, 13654), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(0)'], {}), '(t, 0)\n', (13648, 13654), True, 'import tensorflow as tf\n'), ((13863, 13879), 'tensorflow.squeeze', 'tf.squeeze', (['t', '(0)'], {}), '(t, 0)\n', (13873, 13879), True, 'import tensorflow as tf\n'), ((14322, 14347), 'tensorflow.shape', 'tf.shape', (['prev_actions[0]'], {}), '(prev_actions[0])\n', (14330, 14347), True, 'import tensorflow as tf\n'), ((14629, 14683), 'tensorflow.reshape', 'tf.reshape', (['d', '([d.shape[0]] + [1] * (x.shape.rank - 1))'], {}), '(d, [d.shape[0]] + [1] * (x.shape.rank - 1))\n', (14639, 14683), True, 'import tensorflow as tf\n'), ((10007, 10050), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['enum'].shape[2]"], {}), "(-1.0, heads_logits['enum'].shape[2])\n", (10013, 10050), False, 'from itertools import repeat\n'), ((10194, 10234), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['x'].shape[2]"], {}), "(-1.0, heads_logits['x'].shape[2])\n", (10200, 10234), False, 'from itertools import repeat\n'), ((10375, 10415), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['y'].shape[2]"], {}), "(-1.0, heads_logits['y'].shape[2])\n", (10381, 10415), False, 'from itertools import repeat\n'), ((10566, 10616), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['target_unit'].shape[2]"], {}), "(-1.0, heads_logits['target_unit'].shape[2])\n", (10572, 10616), False, 'from itertools import repeat\n'), ((10773, 10819), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['ability'].shape[2]"], {}), "(-1.0, heads_logits['ability'].shape[2])\n", (10779, 10819), False, 'from itertools import repeat\n'), ((10969, 11012), 'itertools.repeat', 'repeat', (['(-1.0)', "heads_logits['item'].shape[2]"], {}), "(-1.0, heads_logits['item'].shape[2])\n", (10975, 11012), False, 'from itertools import repeat\n')] |
"""Tests that need to be run under `fil-profile python`.
To run:
$ fil-profile python -m pytest tests/test-scripts/fil-interpreter.py
"""
import sys
import os
from ctypes import c_void_p
import re
from pathlib import Path
from subprocess import check_output, check_call
import multiprocessing
import pytest
import numpy as np
import numpy.core.numeric
from pampy import _ as ANY, match
from IPython.core.displaypub import CapturingDisplayPublisher
from IPython.core.interactiveshell import InteractiveShell
import threadpoolctl
from filprofiler._tracer import (
preload,
start_tracing,
stop_tracing,
disable_thread_pools,
)
from filprofiler._testing import get_allocations, big, as_mb
from filprofiler._ipython import run_with_profile
from filprofiler.api import profile
from pymalloc import pymalloc
import fil_api
def test_no_profiling():
"""Neither memory tracking nor Python profiling happen by default."""
address = pymalloc(365)
# No information about size available, since it's not tracked:
assert preload.pymemprofile_get_allocation_size(c_void_p(address)) == 0
assert sys.getprofile() is None
def test_temporary_profiling(tmpdir):
"""Profiling can be run temporarily."""
# get_allocations() expects actual output in a subdirectory.
def f():
arr = np.ones((1024, 1024, 4), dtype=np.uint64) # 32MB
del arr
return 1234
result = profile(f, tmpdir / "output")
assert result == 1234
# Allocations were tracked:
path = ((__file__, "f", 49), (numpy.core.numeric.__file__, "ones", ANY))
allocations = get_allocations(tmpdir)
assert match(allocations, {path: big}, as_mb) == pytest.approx(32, 0.1)
# Profiling stopped:
test_no_profiling()
def run_in_ipython_shell(code_cells):
"""Run a list of strings in IPython.
Returns parsed allocations.
"""
InteractiveShell.clear_instance()
shell = InteractiveShell.instance(display_pub_class=CapturingDisplayPublisher)
for code in code_cells:
shell.run_cell(code)
InteractiveShell.clear_instance()
html = shell.display_pub.outputs[-1]["data"]["text/html"]
assert "<iframe" in html
[svg_path] = re.findall('src="([^"]*)"', html)
assert svg_path.endswith("peak-memory.svg")
resultdir = Path(svg_path).parent.parent
return get_allocations(resultdir)
def test_ipython_profiling(tmpdir):
"""Profiling can be run via IPython magic."""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
arr = np.ones((1024, 1024, 4), dtype=np.uint64) # 32MB
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 3),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(32, 0.1)
# Profiling stopped:
test_no_profiling()
def test_ipython_exception_while_profiling(tmpdir):
"""
Profiling can be run via IPython magic, still profiles and shuts down
correctly on an exception.
This will log a RuntimeError. That is expected.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
arr = np.ones((1024, 1024, 2), dtype=np.uint64) # 16MB
raise RuntimeError("The test will log this, it's OK.")
arr = np.ones((1024, 1024, 8), dtype=np.uint64) # 64MB
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 3),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(16, 0.1)
# Profiling stopped:
test_no_profiling()
def test_ipython_non_standard_indent(tmpdir):
"""
Profiling can be run via IPython magic, still profiles and shuts down
correctly on an exception.
This will log a RuntimeError. That is expected.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
def f(): # indented with 5 spaces what
arr = np.ones((1024, 1024, 2), dtype=np.uint64) # 16MB
f()
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 5),
(re.compile("<ipython-input-1-.*"), "f", 4),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(16, 0.1)
# Profiling stopped:
test_no_profiling()
@pytest.mark.parametrize(
"profile_func",
[
lambda f, tempdir: run_with_profile(f),
profile,
],
)
def test_profiling_disables_threadpools(tmpdir, profile_func):
"""
Memory profiling disables thread pools, then restores them when done.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
import numexpr
import blosc
numexpr.set_num_threads(3)
blosc.set_nthreads(3)
with threadpoolctl.threadpool_limits(3, "blas"):
def check():
assert numexpr.set_num_threads(2) == 1
assert blosc.set_nthreads(2) == 1
for d in threadpoolctl.threadpool_info():
assert d["num_threads"] == 1, d
profile_func(check, tmpdir)
# Resets when done:
assert numexpr.set_num_threads(2) == 3
assert blosc.set_nthreads(2) == 3
for d in threadpoolctl.threadpool_info():
if d["user_api"] == "blas":
assert d["num_threads"] == 3, d
def test_profiling_without_blosc_and_numexpr(tmpdir):
"""
The support for numexpr and blosc is optional; disabling them should work
even when they're not present.
"""
import sys
sys.modules["blosc"] = None
sys.modules["numexpr"] = None
try:
with disable_thread_pools():
pass
finally:
del sys.modules["blosc"]
del sys.modules["numexpr"]
def test_subprocess(tmpdir):
"""
Running a subprocess doesn't blow up.
"""
start_tracing(tmpdir)
try:
output = check_output(["printf", "hello"])
finally:
stop_tracing(tmpdir)
assert output == b"hello"
def test_subprocess_2(tmpdir):
"""
Test a process that, on macOS, would fail (see
https://github.com/pythonspeed/filprofiler/issues/230). Brew processes are
compiled or linked differently somehow.
"""
start_tracing(tmpdir)
try:
check_call(["gfortran", "--version"])
finally:
stop_tracing(tmpdir)
@pytest.mark.parametrize("mode", ["spawn", "forkserver", "fork"])
def test_multiprocessing(tmpdir, mode):
"""
Running a subprocess via multiprocessing in the various different modes
doesn't blow up.
"""
# Non-tracing:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
assert pool.apply((3).__add__, (4,)) == 7
# Tracing:
start_tracing(tmpdir)
try:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
assert pool.apply((3).__add__, (4,)) == 7
finally:
stop_tracing(tmpdir)
@pytest.mark.parametrize("mode", ["spawn", "forkserver", "fork"])
def test_multiprocessing_good_error_message_fil_api(tmpdir, mode):
"""
Using Fil API from a subprocess gives a reasonable error message.
"""
start_tracing(tmpdir)
try:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
with pytest.raises(RuntimeError) as e:
pool.apply(fil_api.run_with_fil)
finally:
stop_tracing(tmpdir)
| [
"IPython.core.interactiveshell.InteractiveShell.clear_instance",
"numexpr.set_num_threads",
"numpy.ones",
"multiprocessing.get_context",
"pathlib.Path",
"pytest.mark.parametrize",
"filprofiler._tracer.disable_thread_pools",
"os.chdir",
"subprocess.check_call",
"blosc.set_nthreads",
"threadpoolct... | [((6789, 6853), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['spawn', 'forkserver', 'fork']"], {}), "('mode', ['spawn', 'forkserver', 'fork'])\n", (6812, 6853), False, 'import pytest\n'), ((7370, 7434), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['spawn', 'forkserver', 'fork']"], {}), "('mode', ['spawn', 'forkserver', 'fork'])\n", (7393, 7434), False, 'import pytest\n'), ((952, 965), 'pymalloc.pymalloc', 'pymalloc', (['(365)'], {}), '(365)\n', (960, 965), False, 'from pymalloc import pymalloc\n'), ((1421, 1450), 'filprofiler.api.profile', 'profile', (['f', "(tmpdir / 'output')"], {}), "(f, tmpdir / 'output')\n", (1428, 1450), False, 'from filprofiler.api import profile\n'), ((1605, 1628), 'filprofiler._testing.get_allocations', 'get_allocations', (['tmpdir'], {}), '(tmpdir)\n', (1620, 1628), False, 'from filprofiler._testing import get_allocations, big, as_mb\n'), ((1881, 1914), 'IPython.core.interactiveshell.InteractiveShell.clear_instance', 'InteractiveShell.clear_instance', ([], {}), '()\n', (1912, 1914), False, 'from IPython.core.interactiveshell import InteractiveShell\n'), ((1928, 1998), 'IPython.core.interactiveshell.InteractiveShell.instance', 'InteractiveShell.instance', ([], {'display_pub_class': 'CapturingDisplayPublisher'}), '(display_pub_class=CapturingDisplayPublisher)\n', (1953, 1998), False, 'from IPython.core.interactiveshell import InteractiveShell\n'), ((2060, 2093), 'IPython.core.interactiveshell.InteractiveShell.clear_instance', 'InteractiveShell.clear_instance', ([], {}), '()\n', (2091, 2093), False, 'from IPython.core.interactiveshell import InteractiveShell\n'), ((2202, 2235), 're.findall', 're.findall', (['"""src="([^"]*)\\""""', 'html'], {}), '(\'src="([^"]*)"\', html)\n', (2212, 2235), False, 'import re\n'), ((2341, 2367), 'filprofiler._testing.get_allocations', 'get_allocations', (['resultdir'], {}), '(resultdir)\n', (2356, 2367), False, 'from filprofiler._testing import get_allocations, big, as_mb\n'), ((2466, 2477), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2475, 2477), False, 'import os\n'), ((2482, 2498), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (2490, 2498), False, 'import os\n'), ((3252, 3263), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3261, 3263), False, 'import os\n'), ((3268, 3284), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (3276, 3284), False, 'import os\n'), ((4143, 4154), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4152, 4154), False, 'import os\n'), ((4159, 4175), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (4167, 4175), False, 'import os\n'), ((5084, 5095), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5093, 5095), False, 'import os\n'), ((5100, 5116), 'os.chdir', 'os.chdir', (['tmpdir'], {}), '(tmpdir)\n', (5108, 5116), False, 'import os\n'), ((5159, 5185), 'numexpr.set_num_threads', 'numexpr.set_num_threads', (['(3)'], {}), '(3)\n', (5182, 5185), False, 'import numexpr\n'), ((5190, 5211), 'blosc.set_nthreads', 'blosc.set_nthreads', (['(3)'], {}), '(3)\n', (5208, 5211), False, 'import blosc\n'), ((6285, 6306), 'filprofiler._tracer.start_tracing', 'start_tracing', (['tmpdir'], {}), '(tmpdir)\n', (6298, 6306), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((6667, 6688), 'filprofiler._tracer.start_tracing', 'start_tracing', (['tmpdir'], {}), '(tmpdir)\n', (6680, 6688), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((7166, 7187), 'filprofiler._tracer.start_tracing', 'start_tracing', (['tmpdir'], {}), '(tmpdir)\n', (7179, 7187), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((7592, 7613), 'filprofiler._tracer.start_tracing', 'start_tracing', (['tmpdir'], {}), '(tmpdir)\n', (7605, 7613), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((1120, 1136), 'sys.getprofile', 'sys.getprofile', ([], {}), '()\n', (1134, 1136), False, 'import sys\n'), ((1321, 1362), 'numpy.ones', 'np.ones', (['(1024, 1024, 4)'], {'dtype': 'np.uint64'}), '((1024, 1024, 4), dtype=np.uint64)\n', (1328, 1362), True, 'import numpy as np\n'), ((1640, 1678), 'pampy.match', 'match', (['allocations', '{path: big}', 'as_mb'], {}), '(allocations, {path: big}, as_mb)\n', (1645, 1678), False, 'from pampy import _ as ANY, match\n'), ((1682, 1704), 'pytest.approx', 'pytest.approx', (['(32)', '(0.1)'], {}), '(32, 0.1)\n', (1695, 1704), False, 'import pytest\n'), ((2899, 2937), 'pampy.match', 'match', (['allocations', '{path: big}', 'as_mb'], {}), '(allocations, {path: big}, as_mb)\n', (2904, 2937), False, 'from pampy import _ as ANY, match\n'), ((2941, 2963), 'pytest.approx', 'pytest.approx', (['(32)', '(0.1)'], {}), '(32, 0.1)\n', (2954, 2963), False, 'import pytest\n'), ((3796, 3834), 'pampy.match', 'match', (['allocations', '{path: big}', 'as_mb'], {}), '(allocations, {path: big}, as_mb)\n', (3801, 3834), False, 'from pampy import _ as ANY, match\n'), ((3838, 3860), 'pytest.approx', 'pytest.approx', (['(16)', '(0.1)'], {}), '(16, 0.1)\n', (3851, 3860), False, 'import pytest\n'), ((4678, 4716), 'pampy.match', 'match', (['allocations', '{path: big}', 'as_mb'], {}), '(allocations, {path: big}, as_mb)\n', (4683, 4716), False, 'from pampy import _ as ANY, match\n'), ((4720, 4742), 'pytest.approx', 'pytest.approx', (['(16)', '(0.1)'], {}), '(16, 0.1)\n', (4733, 4742), False, 'import pytest\n'), ((5221, 5263), 'threadpoolctl.threadpool_limits', 'threadpoolctl.threadpool_limits', (['(3)', '"""blas"""'], {}), "(3, 'blas')\n", (5252, 5263), False, 'import threadpoolctl\n'), ((5660, 5691), 'threadpoolctl.threadpool_info', 'threadpoolctl.threadpool_info', ([], {}), '()\n', (5689, 5691), False, 'import threadpoolctl\n'), ((6333, 6366), 'subprocess.check_output', 'check_output', (["['printf', 'hello']"], {}), "(['printf', 'hello'])\n", (6345, 6366), False, 'from subprocess import check_output, check_call\n'), ((6388, 6408), 'filprofiler._tracer.stop_tracing', 'stop_tracing', (['tmpdir'], {}), '(tmpdir)\n', (6400, 6408), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((6706, 6743), 'subprocess.check_call', 'check_call', (["['gfortran', '--version']"], {}), "(['gfortran', '--version'])\n", (6716, 6743), False, 'from subprocess import check_output, check_call\n'), ((6765, 6785), 'filprofiler._tracer.stop_tracing', 'stop_tracing', (['tmpdir'], {}), '(tmpdir)\n', (6777, 6785), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((7346, 7366), 'filprofiler._tracer.stop_tracing', 'stop_tracing', (['tmpdir'], {}), '(tmpdir)\n', (7358, 7366), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((7818, 7838), 'filprofiler._tracer.stop_tracing', 'stop_tracing', (['tmpdir'], {}), '(tmpdir)\n', (7830, 7838), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((1085, 1102), 'ctypes.c_void_p', 'c_void_p', (['address'], {}), '(address)\n', (1093, 1102), False, 'from ctypes import c_void_p\n'), ((2300, 2314), 'pathlib.Path', 'Path', (['svg_path'], {}), '(svg_path)\n', (2304, 2314), False, 'from pathlib import Path\n'), ((2767, 2800), 're.compile', 're.compile', (['"""<ipython-input-1-.*"""'], {}), "('<ipython-input-1-.*')\n", (2777, 2800), False, 'import re\n'), ((3664, 3697), 're.compile', 're.compile', (['"""<ipython-input-1-.*"""'], {}), "('<ipython-input-1-.*')\n", (3674, 3697), False, 'import re\n'), ((4493, 4526), 're.compile', 're.compile', (['"""<ipython-input-1-.*"""'], {}), "('<ipython-input-1-.*')\n", (4503, 4526), False, 'import re\n'), ((4565, 4598), 're.compile', 're.compile', (['"""<ipython-input-1-.*"""'], {}), "('<ipython-input-1-.*')\n", (4575, 4598), False, 'import re\n'), ((5406, 5437), 'threadpoolctl.threadpool_info', 'threadpoolctl.threadpool_info', ([], {}), '()\n', (5435, 5437), False, 'import threadpoolctl\n'), ((5568, 5594), 'numexpr.set_num_threads', 'numexpr.set_num_threads', (['(2)'], {}), '(2)\n', (5591, 5594), False, 'import numexpr\n'), ((5615, 5636), 'blosc.set_nthreads', 'blosc.set_nthreads', (['(2)'], {}), '(2)\n', (5633, 5636), False, 'import blosc\n'), ((4874, 4893), 'filprofiler._ipython.run_with_profile', 'run_with_profile', (['f'], {}), '(f)\n', (4890, 4893), False, 'from filprofiler._ipython import run_with_profile\n'), ((6070, 6092), 'filprofiler._tracer.disable_thread_pools', 'disable_thread_pools', ([], {}), '()\n', (6090, 6092), False, 'from filprofiler._tracer import preload, start_tracing, stop_tracing, disable_thread_pools\n'), ((5306, 5332), 'numexpr.set_num_threads', 'numexpr.set_num_threads', (['(2)'], {}), '(2)\n', (5329, 5332), False, 'import numexpr\n'), ((5357, 5378), 'blosc.set_nthreads', 'blosc.set_nthreads', (['(2)'], {}), '(2)\n', (5375, 5378), False, 'import blosc\n'), ((7035, 7068), 'multiprocessing.get_context', 'multiprocessing.get_context', (['mode'], {}), '(mode)\n', (7062, 7068), False, 'import multiprocessing\n'), ((7714, 7741), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7727, 7741), False, 'import pytest\n'), ((7210, 7243), 'multiprocessing.get_context', 'multiprocessing.get_context', (['mode'], {}), '(mode)\n', (7237, 7243), False, 'import multiprocessing\n'), ((7636, 7669), 'multiprocessing.get_context', 'multiprocessing.get_context', (['mode'], {}), '(mode)\n', (7663, 7669), False, 'import multiprocessing\n')] |
from confseq.betting import betting_ci
import numpy as np
import time
import os, sys
import matplotlib.pyplot as plt
dirname = os.path.dirname(__file__)
sys.path.append(os.path.join(dirname, "../.."))
from other_bounds import (
ptl_l2_ci,
)
sample_sizes = np.arange(5, 250, step=10)
ptl_compute_times = [None] * len(sample_sizes)
betting_compute_times = [None] * len(sample_sizes)
for i in range(len(sample_sizes)):
sample_size = sample_sizes[i]
x = np.random.beta(1, 1, sample_size)
start = time.time()
ptl_l2_ci(x, alpha=0.05)
end = time.time()
compute_time = end - start
print(
"PTL: sample size of "
+ str(sample_size)
+ " took "
+ str(compute_time)
+ " seconds"
)
ptl_compute_times[i] = compute_time
start = time.time()
betting_ci(x, alpha=0.05)
end = time.time()
compute_time = end - start
print(
"Betting: sample size of "
+ str(sample_size)
+ " took "
+ str(compute_time)
+ " seconds"
)
betting_compute_times[i] = compute_time
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.rcParams["font.size"] = 13
plt.plot(sample_sizes, ptl_compute_times, label=r"PTL-$\ell_2$", color="royalblue", linestyle="-")
plt.plot(sample_sizes, betting_compute_times, label=r"Betting", color="tomato", linestyle="-.")
plt.xlabel(r"Sample size $n$")
plt.ylabel(r"Computation time (seconds)")
plt.legend(loc="best")
plt.savefig('figures/compute_times.pdf') | [
"matplotlib.pyplot.plot",
"other_bounds.ptl_l2_ci",
"numpy.random.beta",
"os.path.dirname",
"matplotlib.pyplot.legend",
"confseq.betting.betting_ci",
"time.time",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.savefig"
] | [((128, 153), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (143, 153), False, 'import os, sys\n'), ((263, 289), 'numpy.arange', 'np.arange', (['(5)', '(250)'], {'step': '(10)'}), '(5, 250, step=10)\n', (272, 289), True, 'import numpy as np\n'), ((1207, 1310), 'matplotlib.pyplot.plot', 'plt.plot', (['sample_sizes', 'ptl_compute_times'], {'label': '"""PTL-$\\\\ell_2$"""', 'color': '"""royalblue"""', 'linestyle': '"""-"""'}), "(sample_sizes, ptl_compute_times, label='PTL-$\\\\ell_2$', color=\n 'royalblue', linestyle='-')\n", (1215, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1405), 'matplotlib.pyplot.plot', 'plt.plot', (['sample_sizes', 'betting_compute_times'], {'label': '"""Betting"""', 'color': '"""tomato"""', 'linestyle': '"""-."""'}), "(sample_sizes, betting_compute_times, label='Betting', color=\n 'tomato', linestyle='-.')\n", (1314, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1431), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample size $n$"""'], {}), "('Sample size $n$')\n", (1412, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1433, 1473), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Computation time (seconds)"""'], {}), "('Computation time (seconds)')\n", (1443, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1497), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1485, 1497), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1538), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/compute_times.pdf"""'], {}), "('figures/compute_times.pdf')\n", (1509, 1538), True, 'import matplotlib.pyplot as plt\n'), ((170, 200), 'os.path.join', 'os.path.join', (['dirname', '"""../.."""'], {}), "(dirname, '../..')\n", (182, 200), False, 'import os, sys\n'), ((466, 499), 'numpy.random.beta', 'np.random.beta', (['(1)', '(1)', 'sample_size'], {}), '(1, 1, sample_size)\n', (480, 499), True, 'import numpy as np\n'), ((512, 523), 'time.time', 'time.time', ([], {}), '()\n', (521, 523), False, 'import time\n'), ((528, 552), 'other_bounds.ptl_l2_ci', 'ptl_l2_ci', (['x'], {'alpha': '(0.05)'}), '(x, alpha=0.05)\n', (537, 552), False, 'from other_bounds import ptl_l2_ci\n'), ((563, 574), 'time.time', 'time.time', ([], {}), '()\n', (572, 574), False, 'import time\n'), ((802, 813), 'time.time', 'time.time', ([], {}), '()\n', (811, 813), False, 'import time\n'), ((818, 843), 'confseq.betting.betting_ci', 'betting_ci', (['x'], {'alpha': '(0.05)'}), '(x, alpha=0.05)\n', (828, 843), False, 'from confseq.betting import betting_ci\n'), ((854, 865), 'time.time', 'time.time', ([], {}), '()\n', (863, 865), False, 'import time\n')] |
import math
import random
import softwareproperties.ppa
import WilliamsDivergenceMaker as wdm
import BioPythonMaker as bpm
import GeometryMaker as dfm
import HtmlReportMaker as hrm
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
rep = hrm.HtmlReportMaker("WCCC","williams_new.html",cols=7)
############### REAL data #######################
rep.addLineComment('Real pdb data')
strucs = bpm.loadPdbStructures([],'Data/',extension='ent',prefix='pdb')
geo_mak = dfm.GeometryMaker(strucs,log=0)
geos = ['N:CA','CA:C','C:O','C:N+1','C-1:N','N:N+1','N:CA:C:N+1']
data = geo_mak.calculateGeometry(geos)
cm = wdm.WilliamsDivergenceMaker(data,geos,bins=10,log=1,norm=True,p_resample=True,pval_iters=1000)
rep.addLineComment('Scatters')
rep.changeColNumber(6)
print('Creating scatters')
for i in range(0,len(geos)):
geoA = geos[i]
for j in range(i+1,len(geos)):
geoB = geos[j]
if geoA != geoB:
print('Scatter',geoA,geoB)
samp_df, shuffled_df = cm.resampleData(data[[geoA,geoB]])
stat1, hist2A1, diffAB1, hist2B1 = cm.compareToConvolved(samp_df, geoA, geoB)
stat2, hist2A2, diffAB2, hist2B2 = cm.compareToConvolved(shuffled_df, geoA, geoB)
stat3, hist2A3, diffAB3, hist2B3 = cm.compareToConvolved(data, geoA, geoB)
div = cm.getCorrelation([geoA, geoB])
stat, p_value, A, D, B,pphist = div.stat,div.p_value,div.histAB,div.diffAB,div.convAB,div.p_hist
hist = div.p_hist
maxV = max(np.max(A),np.max(B))
rep.addLineComment(geoA + ' ' + geoB + ' stat=' + str(round(stat,3)) + ' p_value=' + str(round(p_value,3)))
rep.addPlot2d(data, 'scatter', geo_x=geoA, geo_y=geoB, hue=geoA,title='observed ' + str(round(stat3,3)))
rep.addPlot2d(shuffled_df, 'scatter', geo_x=geoA, geo_y=geoB, hue=geoA,title='shuffled ' + str(round(stat2,3)))
#rep.addPlot2d(samp_df, 'scatter', geo_x=geoA, geo_y=geoB, hue=geoA,title='resampled ' + str(round(stat1,3)))
if len(hist['divergence_shuffled']) > 0:
rep.addPlot1d(hist,'histogram',geo_x='divergence_shuffled',title='',overlay=True,alpha=0.5)
rep.addPlot1d(hist, 'histogram', geo_x='divergence_resampled', title='',alpha=0.5,palette='mediumseagreen')
rep.addSurface(A,'Original Data',cmin=0,cmax=maxV,palette='Blues')
rep.addSurface(D, 'Difference Data, '+'stat='+str(round(stat,3)), cmin=-1*maxV, cmax=maxV, palette='RdBu')
rep.addSurface(B, 'Convolved Data', cmin=0, cmax=maxV, palette='Reds')
rep.printReport()
| [
"WilliamsDivergenceMaker.WilliamsDivergenceMaker",
"HtmlReportMaker.HtmlReportMaker",
"numpy.max",
"BioPythonMaker.loadPdbStructures",
"GeometryMaker.GeometryMaker"
] | [((284, 340), 'HtmlReportMaker.HtmlReportMaker', 'hrm.HtmlReportMaker', (['"""WCCC"""', '"""williams_new.html"""'], {'cols': '(7)'}), "('WCCC', 'williams_new.html', cols=7)\n", (303, 340), True, 'import HtmlReportMaker as hrm\n'), ((435, 500), 'BioPythonMaker.loadPdbStructures', 'bpm.loadPdbStructures', (['[]', '"""Data/"""'], {'extension': '"""ent"""', 'prefix': '"""pdb"""'}), "([], 'Data/', extension='ent', prefix='pdb')\n", (456, 500), True, 'import BioPythonMaker as bpm\n'), ((508, 540), 'GeometryMaker.GeometryMaker', 'dfm.GeometryMaker', (['strucs'], {'log': '(0)'}), '(strucs, log=0)\n', (525, 540), True, 'import GeometryMaker as dfm\n'), ((650, 754), 'WilliamsDivergenceMaker.WilliamsDivergenceMaker', 'wdm.WilliamsDivergenceMaker', (['data', 'geos'], {'bins': '(10)', 'log': '(1)', 'norm': '(True)', 'p_resample': '(True)', 'pval_iters': '(1000)'}), '(data, geos, bins=10, log=1, norm=True,\n p_resample=True, pval_iters=1000)\n', (677, 754), True, 'import WilliamsDivergenceMaker as wdm\n'), ((1554, 1563), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (1560, 1563), True, 'import numpy as np\n'), ((1564, 1573), 'numpy.max', 'np.max', (['B'], {}), '(B)\n', (1570, 1573), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import zeros, linspace
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('SIR1.pdf')
print("Setup Complete")
# Time unit: 1 h
betas = (10./(40*8*24),2./(40*8*24),0.001/(40*8*24))
print(betas)
days = (30,60,365)
print(days)
gamma = 3./(15*24)
# 6 min
dt = 0.1
for beta,D in zip(betas,days):
print(beta,D)
# Corresponding no of time steps
N_t = int(D*24/dt)
t = linspace(0, N_t*dt, N_t+1)
S = zeros(N_t+1)
I = zeros(N_t+1)
R = zeros(N_t+1)
# Initial condition
S[0] = 50
I[0] = 1
R[0] = 0
# Step equations forward in time
for n in range(N_t):
S[n+1] = S[n] - dt*beta*S[n]*I[n]
I[n+1] = I[n] + dt*beta*S[n]*I[n] - dt*gamma*I[n]
R[n+1] = R[n] + dt*gamma*I[n]
plt.plot(t, S, label="Susceptibles")
plt.plot(t, I, label="Infected")
plt.plot(t, R,label="Recovered")
plt.legend()
plt.xlabel('hours')
plt.ylabel('students')
plt.show()
pp.savefig()
plt.clf()
pp.close()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((171, 191), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""SIR1.pdf"""'], {}), "('SIR1.pdf')\n", (179, 191), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((501, 531), 'numpy.linspace', 'linspace', (['(0)', '(N_t * dt)', '(N_t + 1)'], {}), '(0, N_t * dt, N_t + 1)\n', (509, 531), False, 'from numpy import zeros, linspace\n'), ((536, 550), 'numpy.zeros', 'zeros', (['(N_t + 1)'], {}), '(N_t + 1)\n', (541, 550), False, 'from numpy import zeros, linspace\n'), ((557, 571), 'numpy.zeros', 'zeros', (['(N_t + 1)'], {}), '(N_t + 1)\n', (562, 571), False, 'from numpy import zeros, linspace\n'), ((578, 592), 'numpy.zeros', 'zeros', (['(N_t + 1)'], {}), '(N_t + 1)\n', (583, 592), False, 'from numpy import zeros, linspace\n'), ((862, 898), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'S'], {'label': '"""Susceptibles"""'}), "(t, S, label='Susceptibles')\n", (870, 898), True, 'import matplotlib.pyplot as plt\n'), ((903, 935), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'I'], {'label': '"""Infected"""'}), "(t, I, label='Infected')\n", (911, 935), True, 'import matplotlib.pyplot as plt\n'), ((940, 973), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'R'], {'label': '"""Recovered"""'}), "(t, R, label='Recovered')\n", (948, 973), True, 'import matplotlib.pyplot as plt\n'), ((977, 989), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (987, 989), True, 'import matplotlib.pyplot as plt\n'), ((994, 1013), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""hours"""'], {}), "('hours')\n", (1004, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""students"""'], {}), "('students')\n", (1028, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1053, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1086), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1084, 1086), True, 'import matplotlib.pyplot as plt\n')] |
# encdoing: utf-8
"""
@File: main
@Author: <NAME>
@Time: 2021/4/28 21:23
@Description: main function to run DIP model on real-world data sets
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from utils import compute_metrics
from tabulate import tabulate
from datasets.dataset import DATASET
from models.DIP import DIPModel
def run1(model, ds, seed):
"""
Evaluate the DIPModel on a dataset, and output the performance metrics
:param model: A DIPModel object
:param ds: A DATASET object
:param seed: random seed
:return:
"""
random_state = np.random.RandomState(seed)
X_train, X_test, y_train, y_test = \
train_test_split(ds.X, ds.y, test_size=ds.test_size, random_state=random_state)
y_test_prob = model.fit_predict(X_train, X_test)
# Compute the perfect y_test_pred, assuming the anomaly rate is known
n_outlier = int(np.sum(y_test))
y_test_pred = np.zeros(X_test.shape[0])
y_test_pred[y_test_prob.argsort()[-n_outlier:][::-1]] = 1
metrics = compute_metrics(y_true=y_test, y_pred=y_test_pred, y_prob=y_test_prob, verbose=True)
return metrics
if __name__ == "__main__":
# Evaluate Single DIP on the musk dataset
print("Evaluate the single DIP on the musk dataset")
ds = DATASET(name="musk", scalertype='MinMaxScaler', test_size=0.4)
model = DIPModel(ModelName='DIP', n_neigh_list=[100], pathType='nearest',
distance="manhattan")
run1(model, ds, seed=1)
print('------'*11)
# Evaluate Ensemble DIP on the arrhythmia dataset
print("Evaluate Ensemble DIP on the arrhythmia dataset")
ds = DATASET(name="arrhythmia", scalertype='MinMaxScaler', test_size=0.4)
num_train = int(ds.numSample * 0.4)
tt = np.array([0.1, 0.2, 0.3])
n_neigh_lists = (num_train * tt).astype(int) # Compute the n_neigh_lists
model = DIPModel(ModelName='DIP', n_neigh_list=n_neigh_lists, pathType='nearest',
distance="manhattan")
run1(model, ds, seed=1)
| [
"datasets.dataset.DATASET",
"numpy.sum",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.random.RandomState",
"models.DIP.DIPModel",
"numpy.array",
"utils.compute_metrics"
] | [((628, 655), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (649, 655), True, 'import numpy as np\n'), ((705, 784), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ds.X', 'ds.y'], {'test_size': 'ds.test_size', 'random_state': 'random_state'}), '(ds.X, ds.y, test_size=ds.test_size, random_state=random_state)\n', (721, 784), False, 'from sklearn.model_selection import train_test_split\n'), ((966, 991), 'numpy.zeros', 'np.zeros', (['X_test.shape[0]'], {}), '(X_test.shape[0])\n', (974, 991), True, 'import numpy as np\n'), ((1068, 1156), 'utils.compute_metrics', 'compute_metrics', ([], {'y_true': 'y_test', 'y_pred': 'y_test_pred', 'y_prob': 'y_test_prob', 'verbose': '(True)'}), '(y_true=y_test, y_pred=y_test_pred, y_prob=y_test_prob,\n verbose=True)\n', (1083, 1156), False, 'from utils import compute_metrics\n'), ((1313, 1375), 'datasets.dataset.DATASET', 'DATASET', ([], {'name': '"""musk"""', 'scalertype': '"""MinMaxScaler"""', 'test_size': '(0.4)'}), "(name='musk', scalertype='MinMaxScaler', test_size=0.4)\n", (1320, 1375), False, 'from datasets.dataset import DATASET\n'), ((1388, 1480), 'models.DIP.DIPModel', 'DIPModel', ([], {'ModelName': '"""DIP"""', 'n_neigh_list': '[100]', 'pathType': '"""nearest"""', 'distance': '"""manhattan"""'}), "(ModelName='DIP', n_neigh_list=[100], pathType='nearest', distance=\n 'manhattan')\n", (1396, 1480), False, 'from models.DIP import DIPModel\n'), ((1672, 1740), 'datasets.dataset.DATASET', 'DATASET', ([], {'name': '"""arrhythmia"""', 'scalertype': '"""MinMaxScaler"""', 'test_size': '(0.4)'}), "(name='arrhythmia', scalertype='MinMaxScaler', test_size=0.4)\n", (1679, 1740), False, 'from datasets.dataset import DATASET\n'), ((1790, 1815), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (1798, 1815), True, 'import numpy as np\n'), ((1908, 2007), 'models.DIP.DIPModel', 'DIPModel', ([], {'ModelName': '"""DIP"""', 'n_neigh_list': 'n_neigh_lists', 'pathType': '"""nearest"""', 'distance': '"""manhattan"""'}), "(ModelName='DIP', n_neigh_list=n_neigh_lists, pathType='nearest',\n distance='manhattan')\n", (1916, 2007), False, 'from models.DIP import DIPModel\n'), ((932, 946), 'numpy.sum', 'np.sum', (['y_test'], {}), '(y_test)\n', (938, 946), True, 'import numpy as np\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import torch
import theseus as th
from theseus.constants import EPS
from theseus.core.tests.common import check_copy_var
from theseus.utils import numeric_jacobian
from .common import (
check_adjoint,
check_compose,
check_exp_map,
check_inverse,
check_log_map,
check_projection_for_compose,
check_projection_for_exp_map,
check_projection_for_inverse,
check_projection_for_rotate_and_transform,
)
def create_random_se2(batch_size, rng):
theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta], dim=1)
return th.SE2.exp_map(tangent_vector.double())
def test_exp_map():
for batch_size in [1, 20, 100]:
theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size))
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1)
check_exp_map(tangent_vector.double(), th.SE2)
def test_log_map():
for batch_size in [1, 20, 100]:
theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size))
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1)
check_log_map(tangent_vector, th.SE2)
check_projection_for_exp_map(tangent_vector, th.SE2)
def test_compose():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2_1 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
se2_2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
check_compose(se2_1, se2_2)
def test_inverse():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
check_inverse(se2)
def test_adjoint():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
tangent = torch.randn(batch_size, 3).double()
check_adjoint(se2, tangent)
def test_copy():
rng = torch.Generator()
se2 = th.SE2.rand(1, generator=rng, dtype=torch.float64)
check_copy_var(se2)
def test_transform_from_and_to():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size_se2 in [1, 20, 100]:
for batch_size_pnt in [1, 20, 100]:
if (
batch_size_se2 != 1
and batch_size_pnt != 1
and batch_size_pnt != batch_size_se2
):
continue
se2 = th.SE2.rand(batch_size_se2, generator=rng, dtype=torch.float64)
point_tensor = torch.randn(batch_size_pnt, 2).double()
point_tensor_ext = torch.cat(
(point_tensor, torch.ones(batch_size_pnt, 1).double()), dim=1
)
jacobians_to = []
point_to = se2.transform_to(point_tensor, jacobians=jacobians_to)
expected_to = (
se2.inverse().to_matrix() @ point_tensor_ext.unsqueeze(2)
)[:, :2]
jacobians_from = []
point_from = se2.transform_from(point_to, jacobians_from)
# Check the operation result
assert torch.allclose(expected_to.squeeze(2), point_to.data, atol=EPS)
assert torch.allclose(point_tensor, point_from.data, atol=EPS)
# Check the jacobians
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_to(groups[1]),
[se2, th.Point2(point_tensor)],
function_dim=2,
)
assert jacobians_to[0].shape == expected_jac[0].shape
assert jacobians_to[1].shape == expected_jac[1].shape
assert torch.allclose(jacobians_to[0], expected_jac[0])
assert torch.allclose(jacobians_to[1], expected_jac[1])
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_from(groups[1]),
[se2, point_to],
function_dim=2,
)
assert jacobians_from[0].shape == expected_jac[0].shape
assert jacobians_from[1].shape == expected_jac[1].shape
assert torch.allclose(jacobians_from[0], expected_jac[0])
assert torch.allclose(jacobians_from[1], expected_jac[1])
def test_xy_jacobian():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
jacobian = []
se2.xy(jacobians=jacobian)
expected_jac = numeric_jacobian(
lambda groups: th.Point2(groups[0].xy()), [se2], function_dim=2
)
torch.allclose(jacobian[0], expected_jac[0])
def test_theta_jacobian():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
jacobian = []
se2.theta(jacobians=jacobian)
expected_jac = numeric_jacobian(
lambda groups: th.Vector(data=groups[0].theta()), [se2], function_dim=1
)
torch.allclose(jacobian[0], expected_jac[0])
def test_projection():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size in [1, 20, 100]:
# Test SE2.transform_to
check_projection_for_rotate_and_transform(
th.SE2, th.Point2, th.SE2.transform_to, batch_size, rng
)
# Test SE2.transform_from
check_projection_for_rotate_and_transform(
th.SE2, th.Point2, th.SE2.transform_from, batch_size, rng
)
# Test SE2.compose
check_projection_for_compose(th.SE2, batch_size, rng)
# Test SE2.inverse
check_projection_for_inverse(th.SE2, batch_size, rng)
| [
"torch.ones",
"torch.cat",
"theseus.SE2.rand",
"torch.randn",
"theseus.Point2",
"theseus.core.tests.common.check_copy_var",
"numpy.linspace",
"torch.rand",
"torch.allclose",
"torch.Generator"
] | [((784, 810), 'torch.randn', 'torch.randn', (['batch_size', '(2)'], {}), '(batch_size, 2)\n', (795, 810), False, 'import torch\n'), ((832, 860), 'torch.cat', 'torch.cat', (['[u, theta]'], {'dim': '(1)'}), '([u, theta], dim=1)\n', (841, 860), False, 'import torch\n'), ((1580, 1597), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1595, 1597), False, 'import torch\n'), ((1877, 1894), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1892, 1894), False, 'import torch\n'), ((2087, 2104), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2102, 2104), False, 'import torch\n'), ((2357, 2374), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2372, 2374), False, 'import torch\n'), ((2385, 2435), 'theseus.SE2.rand', 'th.SE2.rand', (['(1)'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(1, generator=rng, dtype=torch.float64)\n', (2396, 2435), True, 'import theseus as th\n'), ((2440, 2459), 'theseus.core.tests.common.check_copy_var', 'check_copy_var', (['se2'], {}), '(se2)\n', (2454, 2459), False, 'from theseus.core.tests.common import check_copy_var\n'), ((2506, 2523), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2521, 2523), False, 'import torch\n'), ((4863, 4880), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (4878, 4880), False, 'import torch\n'), ((5290, 5307), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (5305, 5307), False, 'import torch\n'), ((5724, 5741), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (5739, 5741), False, 'import torch\n'), ((1055, 1081), 'torch.randn', 'torch.randn', (['batch_size', '(2)'], {}), '(batch_size, 2)\n', (1066, 1081), False, 'import torch\n'), ((1347, 1373), 'torch.randn', 'torch.randn', (['batch_size', '(2)'], {}), '(batch_size, 2)\n', (1358, 1373), False, 'import torch\n'), ((1673, 1732), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (1684, 1732), True, 'import theseus as th\n'), ((1749, 1808), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (1760, 1808), True, 'import theseus as th\n'), ((1968, 2027), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (1979, 2027), True, 'import theseus as th\n'), ((2178, 2237), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (2189, 2237), True, 'import theseus as th\n'), ((4954, 5013), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (4965, 5013), True, 'import theseus as th\n'), ((5206, 5250), 'torch.allclose', 'torch.allclose', (['jacobian[0]', 'expected_jac[0]'], {}), '(jacobian[0], expected_jac[0])\n', (5220, 5250), False, 'import torch\n'), ((5381, 5440), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size, generator=rng, dtype=torch.float64)\n', (5392, 5440), True, 'import theseus as th\n'), ((5644, 5688), 'torch.allclose', 'torch.allclose', (['jacobian[0]', 'expected_jac[0]'], {}), '(jacobian[0], expected_jac[0])\n', (5658, 5688), False, 'import torch\n'), ((1003, 1041), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'batch_size'], {}), '(-np.pi, np.pi, batch_size)\n', (1014, 1041), True, 'import numpy as np\n'), ((1295, 1333), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'batch_size'], {}), '(-np.pi, np.pi, batch_size)\n', (1306, 1333), True, 'import numpy as np\n'), ((715, 755), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {'generator': 'rng'}), '(batch_size, 1, generator=rng)\n', (725, 755), False, 'import torch\n'), ((2256, 2282), 'torch.randn', 'torch.randn', (['batch_size', '(3)'], {}), '(batch_size, 3)\n', (2267, 2282), False, 'import torch\n'), ((2918, 2981), 'theseus.SE2.rand', 'th.SE2.rand', (['batch_size_se2'], {'generator': 'rng', 'dtype': 'torch.float64'}), '(batch_size_se2, generator=rng, dtype=torch.float64)\n', (2929, 2981), True, 'import theseus as th\n'), ((3717, 3772), 'torch.allclose', 'torch.allclose', (['point_tensor', 'point_from.data'], {'atol': 'EPS'}), '(point_tensor, point_from.data, atol=EPS)\n', (3731, 3772), False, 'import torch\n'), ((4201, 4249), 'torch.allclose', 'torch.allclose', (['jacobians_to[0]', 'expected_jac[0]'], {}), '(jacobians_to[0], expected_jac[0])\n', (4215, 4249), False, 'import torch\n'), ((4273, 4321), 'torch.allclose', 'torch.allclose', (['jacobians_to[1]', 'expected_jac[1]'], {}), '(jacobians_to[1], expected_jac[1])\n', (4287, 4321), False, 'import torch\n'), ((4702, 4752), 'torch.allclose', 'torch.allclose', (['jacobians_from[0]', 'expected_jac[0]'], {}), '(jacobians_from[0], expected_jac[0])\n', (4716, 4752), False, 'import torch\n'), ((4776, 4826), 'torch.allclose', 'torch.allclose', (['jacobians_from[1]', 'expected_jac[1]'], {}), '(jacobians_from[1], expected_jac[1])\n', (4790, 4826), False, 'import torch\n'), ((3013, 3043), 'torch.randn', 'torch.randn', (['batch_size_pnt', '(2)'], {}), '(batch_size_pnt, 2)\n', (3024, 3043), False, 'import torch\n'), ((3957, 3980), 'theseus.Point2', 'th.Point2', (['point_tensor'], {}), '(point_tensor)\n', (3966, 3980), True, 'import theseus as th\n'), ((3134, 3163), 'torch.ones', 'torch.ones', (['batch_size_pnt', '(1)'], {}), '(batch_size_pnt, 1)\n', (3144, 3163), False, 'import torch\n')] |
from __future__ import print_function
import numpy as np
from warnings import warn
from joblib import Parallel, delayed
from . import utils
import copy,argparse,os,math,random,time
from scipy import io,linalg
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from scipy.linalg import blas
import warnings
import pandas as pd
from numpy import dot,multiply
from math import sqrt
import warnings
import numbers
import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_random_state, check_array
from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from sklearn.utils.extmath import safe_min
from sklearn.utils.validation import check_is_fitted, check_non_negative
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition.cdnmf_fast import _update_cdnmf_fast
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
class netNMFGD:
'''
Performs netNMF-sc with gradient descent using Tensorflow
'''
def __init__(self, distance="KL",d=None, N=None, alpha=10, n_inits=1, tol=1e-2, max_iter=20000, n_jobs=1, weight=0.1,parallel_backend='multiprocessing',normalize=True,sparsity=0.75,lr=0.0001):
"""
d: number of dimensions
N: Network (weighted adjacency matrix)
alpha: regularization parameter
n_inits: number of runs to make with different random inits (in order to avoid being stuck in local minima)
n_jobs: number of parallel jobs to run, when n_inits > 1
tol: stopping criteria
max_iter: stopping criteria
"""
self.X = None
self.M = None
self.d = d
self.N = N
self.alpha = alpha
self.n_inits = n_inits
self.tol = tol
self.max_iter = max_iter
self.n_jobs = n_jobs
self.parallel_backend = parallel_backend
self.normalize = normalize
self.sparsity=sparsity
self.weight = weight
self.distance = distance
self.lr = lr
def _init(self, X):
temp_H = np.random.randn(self.d,X.shape[1]).astype(np.float32)
temp_W = np.random.randn(X.shape[0], self.d).astype(np.float32)
temp_H = np.array(temp_H,order='F')
temp_W = np.array(temp_W,order='F')
return abs(temp_H),abs(temp_W)
def _fit(self, X):
import tensorflow as tf
temp_H, temp_W = self._init(X)
conv = False
mask = tf.constant(self.M.astype(np.float32))
eps = tf.constant(np.float32(1e-8))
A = tf.constant(X.astype(np.float32)) + eps
H = tf.Variable(temp_H.astype(np.float32))
W = tf.Variable(temp_W.astype(np.float32))
print(np.max(mask),np.min(mask),np.sum(mask))
WH = tf.matmul(W, H)
if self.weight < 1:
WH = tf.multiply(mask,WH)
WH += eps
L_s = tf.constant(self.L.astype(np.float32))
alpha_s = tf.constant(np.float32(self.alpha))
if self.distance == 'frobenius':
cost0 = tf.reduce_sum(tf.pow(A - WH, 2))
costL = alpha_s * tf.linalg.trace(tf.matmul(tf.transpose(W),tf.matmul(L_s,W)))
elif self.distance == 'KL':
cost0 = tf.reduce_sum(tf.multiply(A ,tf.math.log(tf.div(A,WH)))-A+WH)
costL = alpha_s * tf.trace(tf.matmul(tf.transpose(W),tf.matmul(L_s,W)))
else:
raise ValueError('Select frobenius or KL for distance')
if self.alpha > 0:
cost = cost0 + costL
else:
cost = cost0
lr = self.lr
decay = 0.95
global_step = tf.Variable(0, trainable=False)
increment_global_step = tf.compat.v1.assign(global_step, global_step + 1)
learning_rate = tf.compat.v1.train.exponential_decay(lr, global_step, self.max_iter, decay, staircase=True)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate, epsilon=.1)
train_step = optimizer.minimize(cost,global_step=global_step)
init = tf.compat.v1.global_variables_initializer()
# Clipping operation. This ensure that W and H learnt are non-negative
clip_W = tf.compat.v1.assign(W,tf.maximum(tf.zeros_like(W), W))
clip_H = tf.compat.v1.assign(H,tf.maximum(tf.zeros_like(H), H))
clip = tf.group(clip_W, clip_H)
c = np.inf
with tf.compat.v1.Session() as sess:
sess.run(init)
for i in range(self.max_iter):
sess.run(train_step)
sess.run(clip)
if i%300==0:
c2 = sess.run(cost)
e = c-c2
c = c2
if i%10000==0:
print(i,c,e)
if e < self.tol:
conv = True
break
learnt_W = sess.run(W)
learnt_H = sess.run(H)
tf.reset_default_graph()
return {
'conv': conv,
'obj': c,
'H': learnt_H,
'W': learnt_W
}
def load_10X(self,direc=None,genome='mm10'):
if direc.endswith('hdf5') or direc.endswith('h5'):
X,genenames = utils.import_10X_hdf5(direc,genome)
else:
X,genenames = utils.import_10X_mtx(direc)
self.X = X
self.genes = genenames
def load_network(self,net=None,genenames=None,sparsity=.75):
if net:
if net.endswith('.txt'):
network,netgenes = utils.import_network_from_gene_pairs(net,genenames)
else:
network,netgenes = utils.import_network(net,genenames,sparsity)
network = utils.network_threshold(network,sparsity)
self.N = network
self.netgenes = netgenes
def fit_transform(self, X=None):
if type(X) == np.ndarray:
self.X = X
if type(self.genes) == np.ndarray and type(self.netgenes) == np.ndarray: # if imported data from file reorder network to match genes in X
assert type(self.X) == np.ndarray
assert type(self.N) == np.ndarray
network = utils.reorder(self.genes,self.netgenes,self.N,self.sparsity)
self.N = network
self.netgenes = self.genes
if self.normalize:
print('library size normalizing...')
self.X = utils.normalize(self.X)
#self.X = utils.log_transform(self.X)
M = np.ones_like(self.X)
M[self.X == 0] = self.weight
self.M = M
if self.d is None:
self.d = min(X.shape)
print('rank set to:',self.d)
if self.N is not None:
if np.max(abs(self.N)) > 0:
self.N = self.N / np.max(abs(self.N))
N = self.N
D = np.sum(abs(self.N),axis=0) * np.eye(self.N.shape[0])
print(np.count_nonzero(N),'edges')
self.D = D
self.N = N
self.L = self.D - self.N
assert utils.check_symmetric(self.L)
else:
self.N = np.eye(X.shape[0])
self.D = np.eye(X.shape[0])
self.L = self.D - self.N
results = Parallel(n_jobs=self.n_jobs, backend=self.parallel_backend)(delayed(self._fit)(self.X) for x in range(self.n_inits))
best_results = {"obj": np.inf, "H": None, "W": None}
for r in results:
if r['obj'] < best_results['obj']:
best_results = r
if 'conv' not in best_results:
warn("Did not converge after {} iterations. Error is {}. Try increasing `max_iter`.".format(self.max_iter, best_results['e']))
return(best_results['W'],best_results['H'])
# NMF code is adapted from from Non-negative matrix factorization in scikit=learn library
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
Parameters
----------
x : array-like
Vector for which to compute the norm
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix
Y : array-like
Second matrix
"""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _beta_divergence(lam,N,D,X, W, H, beta, square_root=False):
L = D - N
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized KL divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
res += lam * np.trace(np.dot(np.dot(W.T,L),W)) ### netNMF
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
res = np.sqrt(2 * res)
print(res)
return res
else:
print(res)
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H"""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def _check_string_param(solver, regularization, beta_loss, init):
beta_loss = _beta_loss_to_float(beta_loss)
return beta_loss
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float"""
allowed_beta_loss = {'frobenius': 2,
'KL': 1,
'itakura-saito': 0}
if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:
beta_loss = allowed_beta_loss[beta_loss]
if not isinstance(beta_loss, numbers.Number):
raise ValueError('Invalid beta_loss parameter: got %r instead '
'of one of %r, or a float.' %
(beta_loss, allowed_beta_loss.keys()))
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if (init is not None and init != 'random'
and n_components > min(n_samples, n_features)):
raise ValueError("init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)"
.format(init))
if init is None:
if n_components <= min(n_samples, n_features):
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _multiplicative_update_w(lam,N,D,X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum=None, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
C = np.dot(W,W.T)
numerator = safe_sparse_dot(WH_safe_X_data , H.T ) + lam * np.dot(N,W)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
numerator = safe_sparse_dot(WH_safe_X, H.T)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :] + lam * np.dot(D,W)
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
return delta_W, H_sum, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid division by zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = np.dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = np.dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = np.dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_H **= gamma
return delta_H
def _fit_multiplicative_update(lam,N,D,X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(lam,N,D,X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(lam,N,D,
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(lam,N,D,X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
def non_negative_factorization(lam,N,D,X, W=None, H=None, n_components=None,
init='warn', update_H=True, solver='mu',
beta_loss='KL', tol=1e-4,
max_iter=400, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False):
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
check_non_negative(X, "NMF (input X)")
beta_loss = _check_string_param(solver, regularization, beta_loss, init)
if safe_min(X) == 0 and beta_loss <= 0:
raise ValueError("When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values to "
"X, or use a positive beta_loss.")
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
if init == "warn":
if n_components < n_features:
warnings.warn("The default value of init will change from "
"random to None in 0.23 to make it consistent "
"with decomposition.NMF.", FutureWarning)
init = "random"
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
avg = np.sqrt(X.mean() / n_components)
W = np.full((n_samples, n_components), avg)
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(
alpha, l1_ratio, regularization)
W, H, n_iter = _fit_multiplicative_update(lam,N,D,X, W, H, beta_loss, max_iter,
tol, l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H, update_H,
verbose)
if n_iter == max_iter and tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class netNMFMU(BaseEstimator, TransformerMixin):
def __init__(self, n_components=None, init=None, solver='mu',
beta_loss='KL', tol=1e-4, max_iter=400,
random_state=None, alpha=0., l1_ratio=0., verbose=0,
shuffle=False):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
def fit_transform(self, lam,N,X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data.
"""
D = np.sum(abs(N),axis=0) * np.eye(N.shape[0])
N = np.dot(np.linalg.inv(D),N)
print(N.shape)
D = np.eye(D.shape[0])
print(D.shape)
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
W, H, n_iter_ = non_negative_factorization(lam=lam,N=N,D=D,
X=X, W=W, H=H, n_components=self.n_components, init=self.init,
update_H=True, solver=self.solver, beta_loss=self.beta_loss,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(lam,N,D,X, W, H, self.beta_loss,
square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, lam,N,D,X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
y : Ignored
Returns
-------
self
"""
self.fit_transform(lam,N,D,X, **params)
return self
def transform(self, lam,N,D,X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
lam=lam,N=N,D=D,X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,
alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
| [
"sklearn.utils.check_random_state",
"numpy.abs",
"numpy.sum",
"numpy.maximum",
"scipy.sparse.issparse",
"tensorflow.reset_default_graph",
"numpy.empty",
"tensorflow.zeros_like",
"tensorflow.compat.v1.assign",
"tensorflow.matmul",
"tensorflow.compat.v1.train.exponential_decay",
"tensorflow.Vari... | [((871, 891), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (879, 891), True, 'import numpy as np\n'), ((8400, 8414), 'sklearn.utils.check_array', 'check_array', (['A'], {}), '(A)\n', (8411, 8414), False, 'from sklearn.utils import check_random_state, check_array\n'), ((8595, 8622), 'sklearn.utils.validation.check_non_negative', 'check_non_negative', (['A', 'whom'], {}), '(A, whom)\n', (8613, 8622), False, 'from sklearn.utils.validation import check_is_fitted, check_non_negative\n'), ((8945, 8961), 'numpy.atleast_2d', 'np.atleast_2d', (['W'], {}), '(W)\n', (8958, 8961), True, 'import numpy as np\n'), ((8970, 8986), 'numpy.atleast_2d', 'np.atleast_2d', (['H'], {}), '(H)\n', (8983, 8986), True, 'import numpy as np\n'), ((9514, 9528), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (9525, 9528), True, 'import scipy.sparse as sp\n'), ((11428, 11442), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (11439, 11442), True, 'import scipy.sparse as sp\n'), ((13023, 13066), 'sklearn.utils.validation.check_non_negative', 'check_non_negative', (['X', '"""NMF initialization"""'], {}), "(X, 'NMF initialization')\n", (13041, 13066), False, 'from sklearn.utils.validation import check_is_fitted, check_non_negative\n'), ((14060, 14118), 'sklearn.utils.extmath.randomized_svd', 'randomized_svd', (['X', 'n_components'], {'random_state': 'random_state'}), '(X, n_components, random_state=random_state)\n', (14074, 14118), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((21936, 21947), 'time.time', 'time.time', ([], {}), '()\n', (21945, 21947), False, 'import time\n'), ((24525, 24582), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': "('csr', 'csc')", 'dtype': 'float'}), "(X, accept_sparse=('csr', 'csc'), dtype=float)\n", (24536, 24582), False, 'from sklearn.utils import check_random_state, check_array\n'), ((24587, 24625), 'sklearn.utils.validation.check_non_negative', 'check_non_negative', (['X', '"""NMF (input X)"""'], {}), "(X, 'NMF (input X)')\n", (24605, 24625), False, 'from sklearn.utils.validation import check_is_fitted, check_non_negative\n'), ((2302, 2329), 'numpy.array', 'np.array', (['temp_H'], {'order': '"""F"""'}), "(temp_H, order='F')\n", (2310, 2329), True, 'import numpy as np\n'), ((2346, 2373), 'numpy.array', 'np.array', (['temp_W'], {'order': '"""F"""'}), "(temp_W, order='F')\n", (2354, 2373), True, 'import numpy as np\n'), ((2849, 2864), 'tensorflow.matmul', 'tf.matmul', (['W', 'H'], {}), '(W, H)\n', (2858, 2864), True, 'import tensorflow as tf\n'), ((3702, 3733), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3713, 3733), True, 'import tensorflow as tf\n'), ((3766, 3815), 'tensorflow.compat.v1.assign', 'tf.compat.v1.assign', (['global_step', '(global_step + 1)'], {}), '(global_step, global_step + 1)\n', (3785, 3815), True, 'import tensorflow as tf\n'), ((3840, 3935), 'tensorflow.compat.v1.train.exponential_decay', 'tf.compat.v1.train.exponential_decay', (['lr', 'global_step', 'self.max_iter', 'decay'], {'staircase': '(True)'}), '(lr, global_step, self.max_iter, decay,\n staircase=True)\n', (3876, 3935), True, 'import tensorflow as tf\n'), ((3953, 4027), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate', 'epsilon': '(0.1)'}), '(learning_rate=learning_rate, epsilon=0.1)\n', (3985, 4027), True, 'import tensorflow as tf\n'), ((4113, 4156), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (4154, 4156), True, 'import tensorflow as tf\n'), ((4395, 4419), 'tensorflow.group', 'tf.group', (['clip_W', 'clip_H'], {}), '(clip_W, clip_H)\n', (4403, 4419), True, 'import tensorflow as tf\n'), ((5001, 5025), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5023, 5025), True, 'import tensorflow as tf\n'), ((6530, 6550), 'numpy.ones_like', 'np.ones_like', (['self.X'], {}), '(self.X)\n', (6542, 6550), True, 'import numpy as np\n'), ((8125, 8140), 'sklearn.utils.extmath.squared_norm', 'squared_norm', (['x'], {}), '(x)\n', (8137, 8140), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((8422, 8433), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (8430, 8433), True, 'import numpy as np\n'), ((8630, 8639), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (8636, 8639), True, 'import numpy as np\n'), ((8892, 8906), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (8903, 8906), True, 'import scipy.sparse as sp\n'), ((8920, 8936), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (8933, 8936), True, 'import numpy as np\n'), ((9109, 9123), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (9120, 9123), True, 'import scipy.sparse as sp\n'), ((9684, 9696), 'numpy.dot', 'np.dot', (['W', 'H'], {}), '(W, H)\n', (9690, 9696), True, 'import numpy as np\n'), ((11223, 11239), 'numpy.sqrt', 'np.sqrt', (['(2 * res)'], {}), '(2 * res)\n', (11230, 11239), True, 'import numpy as np\n'), ((11551, 11601), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(dot_vals, (ii, jj))'], {'shape': 'X.shape'}), '((dot_vals, (ii, jj)), shape=X.shape)\n', (11564, 11601), True, 'import scipy.sparse as sp\n'), ((11653, 11665), 'numpy.dot', 'np.dot', (['W', 'H'], {}), '(W, H)\n', (11659, 11665), True, 'import numpy as np\n'), ((13644, 13676), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (13662, 13676), False, 'from sklearn.utils import check_random_state, check_array\n'), ((13963, 13975), 'numpy.abs', 'np.abs', (['H', 'H'], {}), '(H, H)\n', (13969, 13975), True, 'import numpy as np\n'), ((13984, 13996), 'numpy.abs', 'np.abs', (['W', 'W'], {}), '(W, W)\n', (13990, 13996), True, 'import numpy as np\n'), ((14130, 14147), 'numpy.zeros', 'np.zeros', (['U.shape'], {}), '(U.shape)\n', (14138, 14147), True, 'import numpy as np\n'), ((14149, 14166), 'numpy.zeros', 'np.zeros', (['V.shape'], {}), '(V.shape)\n', (14157, 14166), True, 'import numpy as np\n'), ((14283, 14296), 'numpy.sqrt', 'np.sqrt', (['S[0]'], {}), '(S[0])\n', (14290, 14296), True, 'import numpy as np\n'), ((14299, 14314), 'numpy.abs', 'np.abs', (['U[:, 0]'], {}), '(U[:, 0])\n', (14305, 14314), True, 'import numpy as np\n'), ((14329, 14342), 'numpy.sqrt', 'np.sqrt', (['S[0]'], {}), '(S[0])\n', (14336, 14342), True, 'import numpy as np\n'), ((14345, 14360), 'numpy.abs', 'np.abs', (['V[0, :]'], {}), '(V[0, :])\n', (14351, 14360), True, 'import numpy as np\n'), ((15044, 15065), 'numpy.sqrt', 'np.sqrt', (['(S[j] * sigma)'], {}), '(S[j] * sigma)\n', (15051, 15065), True, 'import numpy as np\n'), ((16385, 16399), 'numpy.dot', 'np.dot', (['W', 'HHt'], {}), '(W, HHt)\n', (16391, 16399), True, 'import numpy as np\n'), ((16553, 16567), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (16564, 16567), True, 'import scipy.sparse as sp\n'), ((19174, 19197), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['W.T', 'X'], {}), '(W.T, X)\n', (19189, 19197), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((19337, 19351), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (19348, 19351), True, 'import scipy.sparse as sp\n'), ((20366, 20397), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['W.T', 'WH_safe_X'], {}), '(W.T, WH_safe_X)\n', (20381, 20397), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((23970, 23981), 'time.time', 'time.time', ([], {}), '()\n', (23979, 23981), False, 'import time\n'), ((26952, 27085), 'warnings.warn', 'warnings.warn', (["('Maximum number of iteration %d reached. Increase it to improve convergence.'\n % max_iter)", 'ConvergenceWarning'], {}), "(\n 'Maximum number of iteration %d reached. Increase it to improve convergence.'\n % max_iter, ConvergenceWarning)\n", (26965, 27085), False, 'import warnings\n'), ((28660, 28678), 'numpy.eye', 'np.eye', (['D.shape[0]'], {}), '(D.shape[0])\n', (28666, 28678), True, 'import numpy as np\n'), ((28714, 28771), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': "('csr', 'csc')", 'dtype': 'float'}), "(X, accept_sparse=('csr', 'csc'), dtype=float)\n", (28725, 28771), False, 'from sklearn.utils import check_random_state, check_array\n'), ((30284, 30322), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""n_components_"""'], {}), "(self, 'n_components_')\n", (30299, 30322), False, 'from sklearn.utils.validation import check_is_fitted, check_non_negative\n'), ((31242, 31280), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""n_components_"""'], {}), "(self, 'n_components_')\n", (31257, 31280), False, 'from sklearn.utils.validation import check_is_fitted, check_non_negative\n'), ((31296, 31323), 'numpy.dot', 'np.dot', (['W', 'self.components_'], {}), '(W, self.components_)\n', (31302, 31323), True, 'import numpy as np\n'), ((2609, 2626), 'numpy.float32', 'np.float32', (['(1e-08)'], {}), '(1e-08)\n', (2619, 2626), True, 'import numpy as np\n'), ((2796, 2808), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (2802, 2808), True, 'import numpy as np\n'), ((2809, 2821), 'numpy.min', 'np.min', (['mask'], {}), '(mask)\n', (2815, 2821), True, 'import numpy as np\n'), ((2822, 2834), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2828, 2834), True, 'import numpy as np\n'), ((2911, 2932), 'tensorflow.multiply', 'tf.multiply', (['mask', 'WH'], {}), '(mask, WH)\n', (2922, 2932), True, 'import tensorflow as tf\n'), ((3033, 3055), 'numpy.float32', 'np.float32', (['self.alpha'], {}), '(self.alpha)\n', (3043, 3055), True, 'import numpy as np\n'), ((4453, 4475), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (4473, 4475), True, 'import tensorflow as tf\n'), ((7140, 7158), 'numpy.eye', 'np.eye', (['X.shape[0]'], {}), '(X.shape[0])\n', (7146, 7158), True, 'import numpy as np\n'), ((7180, 7198), 'numpy.eye', 'np.eye', (['X.shape[0]'], {}), '(X.shape[0])\n', (7186, 7198), True, 'import numpy as np\n'), ((7263, 7322), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'backend': 'self.parallel_backend'}), '(n_jobs=self.n_jobs, backend=self.parallel_backend)\n', (7271, 7322), False, 'from joblib import Parallel, delayed\n'), ((9146, 9168), 'numpy.dot', 'np.dot', (['X.data', 'X.data'], {}), '(X.data, X.data)\n', (9152, 9168), True, 'import numpy as np\n'), ((9452, 9468), 'numpy.sqrt', 'np.sqrt', (['(res * 2)'], {}), '(res * 2)\n', (9459, 9468), True, 'import numpy as np\n'), ((10133, 10150), 'numpy.sum', 'np.sum', (['W'], {'axis': '(0)'}), '(W, axis=0)\n', (10139, 10150), True, 'import numpy as np\n'), ((10152, 10169), 'numpy.sum', 'np.sum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (10158, 10169), True, 'import numpy as np\n'), ((10298, 10309), 'numpy.log', 'np.log', (['div'], {}), '(div)\n', (10304, 10309), True, 'import numpy as np\n'), ((10686, 10700), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (10697, 10700), True, 'import scipy.sparse as sp\n'), ((11022, 11059), 'numpy.dot', 'np.dot', (['X_data', '(WH_data ** (beta - 1))'], {}), '(X_data, WH_data ** (beta - 1))\n', (11028, 11059), True, 'import numpy as np\n'), ((14515, 14531), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (14525, 14531), True, 'import numpy as np\n'), ((14533, 14549), 'numpy.maximum', 'np.maximum', (['y', '(0)'], {}), '(y, 0)\n', (14543, 14549), True, 'import numpy as np\n'), ((16010, 16033), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['X', 'H.T'], {}), '(X, H.T)\n', (16025, 16033), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((16348, 16362), 'numpy.dot', 'np.dot', (['H', 'H.T'], {}), '(H, H.T)\n', (16354, 16362), True, 'import numpy as np\n'), ((17048, 17101), 'numpy.divide', 'np.divide', (['X_data', 'WH_safe_X_data'], {'out': 'WH_safe_X_data'}), '(X_data, WH_safe_X_data, out=WH_safe_X_data)\n', (17057, 17101), True, 'import numpy as np\n'), ((17118, 17132), 'numpy.dot', 'np.dot', (['W', 'W.T'], {}), '(W, W.T)\n', (17124, 17132), True, 'import numpy as np\n'), ((18070, 18084), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (18081, 18084), True, 'import scipy.sparse as sp\n'), ((19227, 19241), 'numpy.dot', 'np.dot', (['W.T', 'W'], {}), '(W.T, W)\n', (19233, 19241), True, 'import numpy as np\n'), ((19817, 19870), 'numpy.divide', 'np.divide', (['X_data', 'WH_safe_X_data'], {'out': 'WH_safe_X_data'}), '(X_data, WH_safe_X_data, out=WH_safe_X_data)\n', (19826, 19870), True, 'import numpy as np\n'), ((20468, 20485), 'numpy.sum', 'np.sum', (['W'], {'axis': '(0)'}), '(W, axis=0)\n', (20474, 20485), True, 'import numpy as np\n'), ((20730, 20744), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (20741, 20744), True, 'import scipy.sparse as sp\n'), ((24711, 24722), 'sklearn.utils.extmath.safe_min', 'safe_min', (['X'], {}), '(X)\n', (24719, 24722), False, 'from sklearn.utils.extmath import safe_min\n'), ((25733, 25886), 'warnings.warn', 'warnings.warn', (['"""The default value of init will change from random to None in 0.23 to make it consistent with decomposition.NMF."""', 'FutureWarning'], {}), "(\n 'The default value of init will change from random to None in 0.23 to make it consistent with decomposition.NMF.'\n , FutureWarning)\n", (25746, 25886), False, 'import warnings\n'), ((26324, 26363), 'numpy.full', 'np.full', (['(n_samples, n_components)', 'avg'], {}), '((n_samples, n_components), avg)\n', (26331, 26363), True, 'import numpy as np\n'), ((28567, 28585), 'numpy.eye', 'np.eye', (['N.shape[0]'], {}), '(N.shape[0])\n', (28573, 28585), True, 'import numpy as np\n'), ((28605, 28621), 'numpy.linalg.inv', 'np.linalg.inv', (['D'], {}), '(D)\n', (28618, 28621), True, 'import numpy as np\n'), ((2159, 2194), 'numpy.random.randn', 'np.random.randn', (['self.d', 'X.shape[1]'], {}), '(self.d, X.shape[1])\n', (2174, 2194), True, 'import numpy as np\n'), ((2230, 2265), 'numpy.random.randn', 'np.random.randn', (['X.shape[0]', 'self.d'], {}), '(X.shape[0], self.d)\n', (2245, 2265), True, 'import numpy as np\n'), ((3142, 3159), 'tensorflow.pow', 'tf.pow', (['(A - WH)', '(2)'], {}), '(A - WH, 2)\n', (3148, 3159), True, 'import tensorflow as tf\n'), ((4286, 4302), 'tensorflow.zeros_like', 'tf.zeros_like', (['W'], {}), '(W)\n', (4299, 4302), True, 'import tensorflow as tf\n'), ((4358, 4374), 'tensorflow.zeros_like', 'tf.zeros_like', (['H'], {}), '(H)\n', (4371, 4374), True, 'import tensorflow as tf\n'), ((6902, 6925), 'numpy.eye', 'np.eye', (['self.N.shape[0]'], {}), '(self.N.shape[0])\n', (6908, 6925), True, 'import numpy as np\n'), ((6944, 6963), 'numpy.count_nonzero', 'np.count_nonzero', (['N'], {}), '(N)\n', (6960, 6963), True, 'import numpy as np\n'), ((10983, 11001), 'numpy.sum', 'np.sum', (['(WH ** beta)'], {}), '(WH ** beta)\n', (10989, 11001), True, 'import numpy as np\n'), ((11492, 11525), 'numpy.multiply', 'np.multiply', (['W[ii, :]', 'H.T[jj, :]'], {}), '(W[ii, :], H.T[jj, :])\n', (11503, 11525), True, 'import numpy as np\n'), ((14576, 14592), 'numpy.minimum', 'np.minimum', (['x', '(0)'], {}), '(x, 0)\n', (14586, 14592), True, 'import numpy as np\n'), ((14602, 14618), 'numpy.minimum', 'np.minimum', (['y', '(0)'], {}), '(y, 0)\n', (14612, 14618), True, 'import numpy as np\n'), ((15338, 15370), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (15356, 15370), False, 'from sklearn.utils import check_random_state, check_array\n'), ((17156, 17192), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['WH_safe_X_data', 'H.T'], {}), '(WH_safe_X_data, H.T)\n', (17171, 17192), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((17525, 17556), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['WH_safe_X', 'H.T'], {}), '(WH_safe_X, H.T)\n', (17540, 17556), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((17719, 17750), 'sklearn.utils.extmath.safe_sparse_dot', 'safe_sparse_dot', (['WH_safe_X', 'H.T'], {}), '(WH_safe_X, H.T)\n', (17734, 17750), False, 'from sklearn.utils.extmath import randomized_svd, safe_sparse_dot, squared_norm\n'), ((17857, 17874), 'numpy.sum', 'np.sum', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (17863, 17874), True, 'import numpy as np\n'), ((18225, 18242), 'numpy.empty', 'np.empty', (['W.shape'], {}), '(W.shape)\n', (18233, 18242), True, 'import numpy as np\n'), ((18592, 18607), 'numpy.dot', 'np.dot', (['WH', 'H.T'], {}), '(WH, H.T)\n', (18598, 18607), True, 'import numpy as np\n'), ((20891, 20908), 'numpy.empty', 'np.empty', (['H.shape'], {}), '(H.shape)\n', (20899, 20908), True, 'import numpy as np\n'), ((21258, 21273), 'numpy.dot', 'np.dot', (['W.T', 'WH'], {}), '(W.T, WH)\n', (21264, 21273), True, 'import numpy as np\n'), ((23558, 23569), 'time.time', 'time.time', ([], {}), '()\n', (23567, 23569), False, 'import time\n'), ((7323, 7341), 'joblib.delayed', 'delayed', (['self._fit'], {}), '(self._fit)\n', (7330, 7341), False, 'from joblib import Parallel, delayed\n'), ((8577, 8588), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (8585, 8588), True, 'import numpy as np\n'), ((9208, 9222), 'numpy.dot', 'np.dot', (['W.T', 'W'], {}), '(W.T, W)\n', (9214, 9222), True, 'import numpy as np\n'), ((10437, 10451), 'numpy.dot', 'np.dot', (['W.T', 'L'], {}), '(W.T, L)\n', (10443, 10451), True, 'import numpy as np\n'), ((10563, 10574), 'numpy.sum', 'np.sum', (['div'], {}), '(div)\n', (10569, 10574), True, 'import numpy as np\n'), ((10577, 10596), 'numpy.product', 'np.product', (['X.shape'], {}), '(X.shape)\n', (10587, 10596), True, 'import numpy as np\n'), ((10606, 10617), 'numpy.log', 'np.log', (['div'], {}), '(div)\n', (10612, 10617), True, 'import numpy as np\n'), ((17203, 17215), 'numpy.dot', 'np.dot', (['N', 'W'], {}), '(N, W)\n', (17209, 17215), True, 'import numpy as np\n'), ((17955, 17967), 'numpy.dot', 'np.dot', (['D', 'W'], {}), '(D, W)\n', (17961, 17967), True, 'import numpy as np\n'), ((18313, 18331), 'numpy.dot', 'np.dot', (['W[i, :]', 'H'], {}), '(W[i, :], H)\n', (18319, 18331), True, 'import numpy as np\n'), ((18497, 18513), 'numpy.dot', 'np.dot', (['WHi', 'H.T'], {}), '(WHi, H.T)\n', (18503, 18513), True, 'import numpy as np\n'), ((20979, 20997), 'numpy.dot', 'np.dot', (['W', 'H[:, i]'], {}), '(W, H[:, i])\n', (20985, 20997), True, 'import numpy as np\n'), ((21163, 21179), 'numpy.dot', 'np.dot', (['W.T', 'WHi'], {}), '(W.T, WHi)\n', (21169, 21179), True, 'import numpy as np\n'), ((3217, 3232), 'tensorflow.transpose', 'tf.transpose', (['W'], {}), '(W)\n', (3229, 3232), True, 'import tensorflow as tf\n'), ((3233, 3250), 'tensorflow.matmul', 'tf.matmul', (['L_s', 'W'], {}), '(L_s, W)\n', (3242, 3250), True, 'import tensorflow as tf\n'), ((9389, 9401), 'numpy.dot', 'np.dot', (['W', 'H'], {}), '(W, H)\n', (9395, 9401), True, 'import numpy as np\n'), ((22833, 22853), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (22841, 22853), True, 'import numpy as np\n'), ((3419, 3434), 'tensorflow.transpose', 'tf.transpose', (['W'], {}), '(W)\n', (3431, 3434), True, 'import tensorflow as tf\n'), ((3435, 3452), 'tensorflow.matmul', 'tf.matmul', (['L_s', 'W'], {}), '(L_s, W)\n', (3444, 3452), True, 'import tensorflow as tf\n'), ((10914, 10932), 'numpy.dot', 'np.dot', (['W', 'H[:, i]'], {}), '(W, H[:, i])\n', (10920, 10932), True, 'import numpy as np\n'), ((23292, 23312), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (23300, 23312), True, 'import numpy as np\n'), ((3349, 3362), 'tensorflow.div', 'tf.div', (['A', 'WH'], {}), '(A, WH)\n', (3355, 3362), True, 'import tensorflow as tf\n')] |
import random
import json
import pickle
import numpy as np
#Start Import for Speech to talk
import os
import time
import playsound
import speech_recognition as speech_rec
from gtts import gTTS
#from AppKit import NSSound
#End Import for Speech to talkhj
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import load_model
lemmatizer = WordNetLemmatizer()
intents = json.loads(open('intents.json').read())
words = pickle.load(open('talkbot_words.pkl', 'rb'))
labels = pickle.load(open('talkbot_labels.pkl', 'rb'))
model = load_model('talkbotnlp_model.h5')
#print(labels)
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words]
return sentence_words
def bagofwords(sentence):
sentence_words = clean_up_sentence(sentence)
bag = [0] * len(words)
for w in sentence_words:
for i, word in enumerate(words):
if word == w:
bag[i] = 1
return np.array(bag)
def predict_class(sentence):
bow = bagofwords(sentence)
res = model.predict(np.array([bow]))[0]
ERROR_THRESHOLD = 0.1
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': labels[r[0]], 'probability': str(r[1])})
return return_list
def get_response(intents_list, intents_json):
tag = intents_list[0]['intent']
#print(tag)
list_of_intents = intents_json['intents']
for i in list_of_intents:
#print(i['tag'])
if i['tag'] == tag:
result = random.choice(i['responses'])
#print(result)
break
return result
def talk_to_bot(speak_words):
save_speak_words = gTTS(text=speak_words, lang="en")
chat_file = "talk_bot.mp3"
save_speak_words.save(chat_file)
playsound.playsound(chat_file)
os.remove("talk_bot.mp3")
def listen_microphone_audio():
rec = speech_rec.Recognizer()
with speech_rec.Microphone() as talk_source:
listen_audio = rec.listen(talk_source)
talk = ""
try:
talk = rec.recognize_google(listen_audio)
print("User: ", talk)
except Exception as e:
print("Exception: " + str(e))
return talk
talk_to_bot("Hi! How can I help you?")
#print("Chat with me now")
print("Bot: Hi! How can I help you?")
while True:
speech_message = listen_microphone_audio()
chat_intents = predict_class(speech_message)
intent_response = get_response(chat_intents, intents)
print("Bot: ", intent_response)
talk_to_bot(intent_response)
#print(res)
| [
"playsound.playsound",
"os.remove",
"tensorflow.keras.models.load_model",
"nltk.stem.WordNetLemmatizer",
"gtts.gTTS",
"random.choice",
"speech_recognition.Microphone",
"numpy.array",
"nltk.word_tokenize",
"speech_recognition.Recognizer"
] | [((370, 389), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (387, 389), False, 'from nltk.stem import WordNetLemmatizer\n'), ((557, 590), 'tensorflow.keras.models.load_model', 'load_model', (['"""talkbotnlp_model.h5"""'], {}), "('talkbotnlp_model.h5')\n", (567, 590), False, 'from tensorflow.keras.models import load_model\n'), ((661, 689), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (679, 689), False, 'import nltk\n'), ((1030, 1043), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1038, 1043), True, 'import numpy as np\n'), ((1842, 1875), 'gtts.gTTS', 'gTTS', ([], {'text': 'speak_words', 'lang': '"""en"""'}), "(text=speak_words, lang='en')\n", (1846, 1875), False, 'from gtts import gTTS\n'), ((1948, 1978), 'playsound.playsound', 'playsound.playsound', (['chat_file'], {}), '(chat_file)\n', (1967, 1978), False, 'import playsound\n'), ((1983, 2008), 'os.remove', 'os.remove', (['"""talk_bot.mp3"""'], {}), "('talk_bot.mp3')\n", (1992, 2008), False, 'import os\n'), ((2051, 2074), 'speech_recognition.Recognizer', 'speech_rec.Recognizer', ([], {}), '()\n', (2072, 2074), True, 'import speech_recognition as speech_rec\n'), ((2084, 2107), 'speech_recognition.Microphone', 'speech_rec.Microphone', ([], {}), '()\n', (2105, 2107), True, 'import speech_recognition as speech_rec\n'), ((1129, 1144), 'numpy.array', 'np.array', (['[bow]'], {}), '([bow])\n', (1137, 1144), True, 'import numpy as np\n'), ((1695, 1724), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (1708, 1724), False, 'import random\n')] |
import numpy as np
import pylab
from qiskit import Aer
from qiskit.opflow import X, Z, I
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit.algorithms import VQE, NumPyMinimumEigensolver
from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SLSQP
from qiskit.circuit.library import TwoLocal
# Create qubit operator of H2 molecule
H2_op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z) + \
(0.18093119978423156 * X ^ X)
# Show callback usage over set of optimizers
optimizers = [COBYLA(maxiter=80), L_BFGS_B(maxiter=60), SLSQP(maxiter=60)]
converge_cnts = np.empty([len(optimizers)], dtype=object)
converge_vals = np.empty([len(optimizers)], dtype=object)
for i, optimizer in enumerate(optimizers):
print('\rOptimizer: {} '.format(type(optimizer).__name__), end='')
algorithm_globals.random_seed = 50
ansatz = TwoLocal(rotation_blocks='ry', entanglement_blocks='cz')
counts = []
values = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
vqe = VQE(ansatz,
optimizer,
callback=store_intermediate_result,
quantum_instance= QuantumInstance(
backend=Aer.get_backend('statevector_simulator')
))
result = vqe.compute_minimum_eigenvalue(operator=H2_op)
converge_cnts[i] = np.asarray(counts)
converge_vals[i] = np.asarray(values)
print('\rOptimization complete ')
# plot energy value at each objective funciton call each optimizer makes
pylab.rcParams['figure.figsize'] = (12,8)
for i, optimizer in enumerate(optimizers):
pylab.plot(converge_cnts[i], converge_vals[i], label=type(optimizer).__name__)
pylab.xlabel('Eval count')
pylab.ylabel('Energy')
pylab.title('Energy convergence for variaous optimizers')
pylab.legend(loc='upper right')
pylab.show()
# Run classical algorithm to obtain reference solution
npme = NumPyMinimumEigensolver()
result = npme.compute_minimum_eigenvalue(operator=H2_op)
ref_value = result.eigenvalue.real
print(f'Reference value: {ref_value:.5f}')
# Plot difference from reference solution to optimizations
pylab.rcParams['figure.figsize'] = (12,8)
for i, optimizer in enumerate(optimizers):
pylab.plot(converge_cnts[i], abs(ref_value - converge_vals[i]), label=type(optimizer).__name__)
pylab.xlabel('Eval count')
pylab.ylabel('Energy difference from solution reference value')
pylab.title('Energy convergence for various optimizers')
pylab.yscale('log')
pylab.legend(loc='upper right')
pylab.show()
### Using Gradient framework
from qiskit.opflow.gradients import Gradient
algorithm_globals.random_seed = 50
ansatz = TwoLocal(rotation_blocks='ry', entanglement_blocks='cz')
optimizer = SLSQP(maxiter=60)
counts = []
values = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
vqe = VQE(ansatz,
optimizer,
callback=store_intermediate_result,
gradient=Gradient(grad_method='fin_diff'),
quantum_instance=QuantumInstance(
backend=Aer.get_backend('statevector_simulator')
))
result = vqe.compute_minimum_eigenvalue(operator=H2_op)
print(f'Value using Gradient: {result.eigenvalue.real:.5f}')
pylab.rcParams['figure.figsize'] = (12,8)
pylab.plot(counts, values, label=type(optimizer).__name__)
pylab.xlabel('Eval count')
pylab.ylabel('Energy')
pylab.title('Energy convergence using Gradient')
pylab.legend(loc='upper right')
pylab.show() | [
"pylab.show",
"pylab.title",
"qiskit.algorithms.NumPyMinimumEigensolver",
"qiskit.opflow.gradients.Gradient",
"numpy.asarray",
"pylab.ylabel",
"qiskit.algorithms.optimizers.COBYLA",
"qiskit.algorithms.optimizers.SLSQP",
"pylab.xlabel",
"qiskit.circuit.library.TwoLocal",
"pylab.yscale",
"pylab.... | [((1846, 1872), 'pylab.xlabel', 'pylab.xlabel', (['"""Eval count"""'], {}), "('Eval count')\n", (1858, 1872), False, 'import pylab\n'), ((1873, 1895), 'pylab.ylabel', 'pylab.ylabel', (['"""Energy"""'], {}), "('Energy')\n", (1885, 1895), False, 'import pylab\n'), ((1896, 1953), 'pylab.title', 'pylab.title', (['"""Energy convergence for variaous optimizers"""'], {}), "('Energy convergence for variaous optimizers')\n", (1907, 1953), False, 'import pylab\n'), ((1954, 1985), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1966, 1985), False, 'import pylab\n'), ((1986, 1998), 'pylab.show', 'pylab.show', ([], {}), '()\n', (1996, 1998), False, 'import pylab\n'), ((2062, 2087), 'qiskit.algorithms.NumPyMinimumEigensolver', 'NumPyMinimumEigensolver', ([], {}), '()\n', (2085, 2087), False, 'from qiskit.algorithms import VQE, NumPyMinimumEigensolver\n'), ((2468, 2494), 'pylab.xlabel', 'pylab.xlabel', (['"""Eval count"""'], {}), "('Eval count')\n", (2480, 2494), False, 'import pylab\n'), ((2495, 2558), 'pylab.ylabel', 'pylab.ylabel', (['"""Energy difference from solution reference value"""'], {}), "('Energy difference from solution reference value')\n", (2507, 2558), False, 'import pylab\n'), ((2559, 2615), 'pylab.title', 'pylab.title', (['"""Energy convergence for various optimizers"""'], {}), "('Energy convergence for various optimizers')\n", (2570, 2615), False, 'import pylab\n'), ((2616, 2635), 'pylab.yscale', 'pylab.yscale', (['"""log"""'], {}), "('log')\n", (2628, 2635), False, 'import pylab\n'), ((2636, 2667), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2648, 2667), False, 'import pylab\n'), ((2668, 2680), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2678, 2680), False, 'import pylab\n'), ((2804, 2860), 'qiskit.circuit.library.TwoLocal', 'TwoLocal', ([], {'rotation_blocks': '"""ry"""', 'entanglement_blocks': '"""cz"""'}), "(rotation_blocks='ry', entanglement_blocks='cz')\n", (2812, 2860), False, 'from qiskit.circuit.library import TwoLocal\n'), ((2874, 2891), 'qiskit.algorithms.optimizers.SLSQP', 'SLSQP', ([], {'maxiter': '(60)'}), '(maxiter=60)\n', (2879, 2891), False, 'from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SLSQP\n'), ((3519, 3545), 'pylab.xlabel', 'pylab.xlabel', (['"""Eval count"""'], {}), "('Eval count')\n", (3531, 3545), False, 'import pylab\n'), ((3546, 3568), 'pylab.ylabel', 'pylab.ylabel', (['"""Energy"""'], {}), "('Energy')\n", (3558, 3568), False, 'import pylab\n'), ((3569, 3617), 'pylab.title', 'pylab.title', (['"""Energy convergence using Gradient"""'], {}), "('Energy convergence using Gradient')\n", (3580, 3617), False, 'import pylab\n'), ((3618, 3649), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3630, 3649), False, 'import pylab\n'), ((3650, 3662), 'pylab.show', 'pylab.show', ([], {}), '()\n', (3660, 3662), False, 'import pylab\n'), ((626, 644), 'qiskit.algorithms.optimizers.COBYLA', 'COBYLA', ([], {'maxiter': '(80)'}), '(maxiter=80)\n', (632, 644), False, 'from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SLSQP\n'), ((646, 666), 'qiskit.algorithms.optimizers.L_BFGS_B', 'L_BFGS_B', ([], {'maxiter': '(60)'}), '(maxiter=60)\n', (654, 666), False, 'from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SLSQP\n'), ((668, 685), 'qiskit.algorithms.optimizers.SLSQP', 'SLSQP', ([], {'maxiter': '(60)'}), '(maxiter=60)\n', (673, 685), False, 'from qiskit.algorithms.optimizers import COBYLA, L_BFGS_B, SLSQP\n'), ((971, 1027), 'qiskit.circuit.library.TwoLocal', 'TwoLocal', ([], {'rotation_blocks': '"""ry"""', 'entanglement_blocks': '"""cz"""'}), "(rotation_blocks='ry', entanglement_blocks='cz')\n", (979, 1027), False, 'from qiskit.circuit.library import TwoLocal\n'), ((1507, 1525), 'numpy.asarray', 'np.asarray', (['counts'], {}), '(counts)\n', (1517, 1525), True, 'import numpy as np\n'), ((1549, 1567), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (1559, 1567), True, 'import numpy as np\n'), ((3142, 3174), 'qiskit.opflow.gradients.Gradient', 'Gradient', ([], {'grad_method': '"""fin_diff"""'}), "(grad_method='fin_diff')\n", (3150, 3174), False, 'from qiskit.opflow.gradients import Gradient\n'), ((3246, 3286), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""statevector_simulator"""'], {}), "('statevector_simulator')\n", (3261, 3286), False, 'from qiskit import Aer\n'), ((1366, 1406), 'qiskit.Aer.get_backend', 'Aer.get_backend', (['"""statevector_simulator"""'], {}), "('statevector_simulator')\n", (1381, 1406), False, 'from qiskit import Aer\n')] |
# -*- coding: utf-8 -*-
"""
ENG 1600
the path-finding snake
"""
import random
import time
import pygame
import sys
import numpy as np
from pygame.locals import *
class PFSnake(object):
# class attributes
# a 50x50 gameboard
window_height = 100 #400
window_width = 100 #400
cell_size = 20
board_height = int(window_height/cell_size) #5 10 20
board_width = int(window_width/cell_size) # 5 10 20
# define the colors
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
gray = (40, 40, 40)
green = (0, 255, 0)
blue = (0, 0, 255)
# define the directions
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
#board element
FOOD = 0
UNDEF = (board_height + 1) * (board_width + 1)
SNAKE = 2 * UNDEF
def __init__(self):
pygame.init()
self.speed = 5
self.speed_clock = pygame.time.Clock()
self.score = 0
self.ate = 0
self.maxScore = PFSnake.board_height * PFSnake.board_width * 10
self.alive = True
#temporary vars; for path finding
self.tmp_board = np.zeros((PFSnake.board_height, PFSnake.board_width))
self.initialize()
def initialize(self):
# start from the center
init_x = int(PFSnake.board_width/2)
init_y = int(PFSnake.board_height/2)
self.snake_body = [{'x':init_x, 'y':init_y},
{'x':init_x-1, 'y':init_y},
{'x':init_x-2, 'y':init_y}]
#self.direction = PFSnake.RIGHT
self.food = self.generate_food() # random food location
def restart(self):
time.sleep(1)
self.score = 0
self.ate = 0
self.alive = True
self.initialize()
def main(self):
while True:
self.run()
self.restart()
def run(self):
self.screen = pygame.display.set_mode((PFSnake.window_width, PFSnake.window_height))
self.screen.fill(PFSnake.white)
pygame.display.set_caption("ENG 1600: SmartSnake")
# while not stopPlay:
while self._check_alive():
print('PFPlay')
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
self.PFPlay()
print('Played one step')
self.draw_game()
pygame.display.update()
self.speed_clock.tick(self.speed)
print('Got Score: ', self.score)
def PFPlay(self):
self.reset_board(self.tmp_board, self.snake_body)
move = -1
if self.update_board(self.tmp_board,self.snake_body):
#find path between food and head
move = self.find_path()
else:
move = self.follow_tail()
if move == -1:
move = self.just_one_possible_move()
if move != -1:
self.move_snake(move)
else:
return False
print('move == {:d}'.format(move))
return False
# def play_one_step(self):
# print('Snake game: do action A')
# for event in pygame.event.get():
# if event.type == QUIT:
# pygame.quit()
# sys.exit()
#
# self.move_snake()
# self.alive = self.check_alive()
#
# self.check_food()
# self.score = self.ate*10 #(len(self.snake_body) - 3) * 10
# self.draw_game()
#
# pygame.display.update()
# self.speed_clock.tick(self.speed)
# print(self.get_game_board(), '\n')
def is_valid_move(self, direction):
assert direction in [0,1,2,3]
valid = direction == PFSnake.UP and self.direction != PFSnake.DOWN or \
direction == PFSnake.DOWN and self.direction != PFSnake.UP or \
direction == PFSnake.LEFT and self.direction != PFSnake.RIGHT or \
direction == PFSnake.RIGHT and self.direction != PFSnake.LEFT
return valid
def is_cell_free(self, cell):
return not (cell in self.snake_body)
def generate_food(self):
Loc = {'x': random.randint(0, PFSnake.board_width - 1), 'y': random.randint(0, PFSnake.board_height - 1)}
while Loc in self.snake_body:
Loc = {'x': random.randint(0, PFSnake.board_width - 1), 'y': random.randint(0, PFSnake.board_height - 1)}
return Loc
def is_move_possible(self, cell, direction):
flag = False
nextCell = self.move_cell(cell,direction)
if direction == PFSnake.LEFT:
flag = True if cell['x'] > 0 else False
elif direction == PFSnake.RIGHT:
flag = True if cell['x'] < PFSnake.board_width-1 else False
elif direction == PFSnake.UP:
flag = True if cell['y'] > 0 else False
elif direction == PFSnake.DOWN:
flag = True if cell['y'] < PFSnake.board_height-1 else False
if nextCell in self.snake_body[1:]:
flag = False
return flag
def move_virtual_snake(self, direction, snake):
print(direction)
if direction == -1:
direction = random.randint(0,3)
if direction == PFSnake.UP:
newHead = {'x': snake[0]['x'], 'y': snake[0]['y'] - 1}
elif direction == PFSnake.DOWN:
newHead = {'x': snake[0]['x'], 'y': snake[0]['y'] + 1}
elif direction == PFSnake.LEFT:
newHead = {'x': snake[0]['x'] - 1, 'y': snake[0]['y']}
elif direction == PFSnake.RIGHT:
newHead = {'x': snake[0]['x'] + 1, 'y': snake[0]['y']}
self.snake_body.insert(0, newHead)
def move_snake(self, direction):
# # if end, do nothing
# if not self.check_alive():
# return
if direction == PFSnake.UP:
newHead = {'x': self.snake_body[0]['x'], 'y': self.snake_body[0]['y'] - 1}
elif direction == PFSnake.DOWN:
newHead = {'x': self.snake_body[0]['x'], 'y': self.snake_body[0]['y'] + 1}
elif direction == PFSnake.LEFT:
newHead = {'x': self.snake_body[0]['x'] - 1, 'y': self.snake_body[0]['y']}
elif direction == PFSnake.RIGHT:
newHead = {'x': self.snake_body[0]['x'] + 1, 'y': self.snake_body[0]['y']}
self.snake_body.insert(0, newHead)
return self._check_food()
def move_cell(self, cell, direction):
if direction == PFSnake.UP:
newCell = {'x': cell['x'], 'y': cell['y'] - 1}
elif direction == PFSnake.DOWN:
newCell = {'x': cell['x'], 'y': cell['y'] + 1}
elif direction == PFSnake.LEFT:
newCell = {'x': cell['x'] - 1, 'y': cell['y']}
elif direction == PFSnake.RIGHT:
newCell = {'x': cell['x'] + 1, 'y': cell['y']}
return newCell
def _check_alive(self):
alive = False
# if there is a empty cell near s_head, return true
head = self.snake_body[0]
for direction in [0,1,2,3]:
if self.is_move_possible(head, direction):
alive = True
break
# if self.snake_body[0]['x'] == -1 or \
# self.snake_body[0]['x'] == PFSnake.board_width or \
# self.snake_body[0]['y'] == -1 or \
# self.snake_body[0]['y'] == PFSnake.board_height:
# alive = False
#
# for node in self.snake_body[1:]:
# if node['x'] == self.snake_body[0]['x'] and \
# node['y'] == self.snake_body[0]['y']:
# alive = False
# break
return alive
def _check_food(self):
ate = False
# if end, do nothing
if not self._check_alive():
return
if self.snake_body[0]['x'] == self.food['x'] and self.snake_body[0]['y'] == self.food['y']:
self.food = self.generate_food()
self.ate += 1
ate = True
else:
self.snake_body.pop(-1)
return ate
def draw_game(self):
self.screen.fill(PFSnake.black)
#draw grid
for x in range(0, PFSnake.window_width, PFSnake.cell_size):
pygame.draw.line(self.screen, PFSnake.gray, (x, 0), (x, PFSnake.window_height))
for y in range(0, PFSnake.window_height, PFSnake.cell_size):
pygame.draw.line(self.screen, PFSnake.gray, (0, y), (PFSnake.window_width, y))
#draw snake
headx = self.snake_body[0]['x'] * PFSnake.cell_size
heady = self.snake_body[0]['y'] * PFSnake.cell_size
pygame.draw.rect(self.screen, PFSnake.green, pygame.Rect(headx, heady, PFSnake.cell_size, PFSnake.cell_size))
for node in self.snake_body[1:]:
x = node['x'] * PFSnake.cell_size
y = node['y'] * PFSnake.cell_size
pygame.draw.rect(self.screen, PFSnake.blue, pygame.Rect(x, y, PFSnake.cell_size, PFSnake.cell_size))
#draw food
x = self.food['x'] * PFSnake.cell_size
y = self.food['y'] * PFSnake.cell_size
pygame.draw.rect(self.screen, PFSnake.red, pygame.Rect(x, y, PFSnake.cell_size, PFSnake.cell_size))
#draw score
# font = pygame.font.SysFont('arial', 20)
# scoreSurf = font.render('Score: %s' % self.score, True, PFSnake.white)
# scoreRect = scoreSurf.get_rect()
# scoreRect.topleft = (int(PFSnake.window_width / 2) - 20, 10)
# self.screen.blit(scoreSurf, scoreRect)
# reset board after update_board
def reset_board(self, board, snake):
#board = self.tmp_board
for row in range(PFSnake.board_height):
for col in range(PFSnake.board_width):
board[row][col] = PFSnake.UNDEF
board[self.food['y']][self.food['x']] = PFSnake.FOOD
for node in snake:
board[node['y']][node['x']] = PFSnake.SNAKE
# compute the distance to food for every non-snake cell
def update_board(self, board, snake):
found = False
queue = []
queue.append(self.food)
visited = np.zeros((PFSnake.board_height, PFSnake.board_width))
while len(queue)!=0:
cell = queue.pop(0)
if visited[cell['y']][cell['x']]==1:
continue
visited[cell['y']][cell['x']]=1
for direction in range(4):
if self.is_move_possible(cell,direction):
newCell = self.move_cell(cell,direction)
if newCell==snake[0]:
# found head
found = True
if board[newCell['y']][newCell['x']] < PFSnake.SNAKE:
if board[newCell['y']][newCell['x']] > \
board[cell['y']][cell['x']] + 1:
board[newCell['y']][newCell['x']] = \
board[cell['y']][cell['x']] + 1
if visited[newCell['y']][newCell['x']] == 0:
queue.append(newCell)
# print('board updated')
# print(board)
return found
def get_shortest_safe_move(self, board, snake):
move = -1
min = PFSnake.SNAKE
self.reset_board(board, snake)
self.update_board(board, snake)
for direction in range(4):
if self.is_move_possible(snake[0], direction):
nextHead = self.move_cell(snake[0], direction)
if board[nextHead['y']][nextHead['x']] < min:
min = board[nextHead['y']][nextHead['x']]
move = direction
return move
def get_longest_safe_move(self, board):
move = -1
max = -1
self.reset_board(self.tmp_board, self.snake_body)
self.update_board(self.tmp_board,self.snake_body)
for direction in range(4):
if self.is_move_possible(self.snake_body[0], direction):
nextHead = self.move_cell(self.snake_body[0], direction)
if board[nextHead['y']][nextHead['x']] > max and \
board[nextHead['y']][nextHead['x']] < PFSnake.UNDEF:
max = board[nextHead['y']][nextHead['x']]
move = direction
return move
def can_find_tail(self):
self.reset_board(self.tmp_board, self.snake_body)
temp = self.tmp_board
tail = self.snake_body[-1]
temp[tail['y']][tail['x']] = PFSnake.FOOD
temp[self.food['y']][self.food['x']] = PFSnake.SNAKE
result = self.update_board(self.tmp_board,self.snake_body)
for direction in range(4):
if self.is_move_possible(self.snake_body[0], direction):
newHead = self.move_cell(self.snake_body[0], direction)
if newHead == self.snake_body[-1] and len(self.snake_body) >3:
# cannot follow tail if tail is next to head
result = False
return result
def follow_tail(self):
self.reset_board(self.tmp_board, self.snake_body)
temp = self.tmp_board
tail = self.snake_body[-1]
temp[tail['y']][tail['x']] = PFSnake.FOOD
temp[self.food['y']][self.food['x']] = PFSnake.SNAKE
self.update_board(self.tmp_board,self.snake_body)
temp[tail['y']][tail['x']] = PFSnake.SNAKE
return self.get_longest_safe_move(self.tmp_board)
def just_one_possible_move(self):
move = -1
self.reset_board(self.tmp_board, self.snake_body)
self.update_board(self.tmp_board,self.snake_body)
min = PFSnake.SNAKE
for direction in range(4):
if self.is_move_possible(self.snake_body[0], direction):
nextHead = self.move_cell(self.snake_body[0], direction)
if self.tmp_board[nextHead['y']][nextHead['x']] < min:
min = self.tmp_board[nextHead['y']][nextHead['x']]
move = direction
return move
# let a virtual snake try to find path
def virtual_move(self):
temp = self.tmp_board.copy()
snake_backup = self.snake_body.copy()
virtual_snake = self.snake_body.copy()
self.reset_board(temp, virtual_snake)
food_ate = False
while not food_ate:
print('in virtual move')
# TODO: impl api for virtual snake
self.update_board(temp, virtual_snake)
move = self.get_shortest_safe_move(temp, virtual_snake)
food_ate = self.move_virtual_snake(move, virtual_snake)
self.snake_body = snake_backup
self.reset_board(self.tmp_board, self.snake_body)
return
def find_path(self):
#self.virtual_move()
if self.can_find_tail():
return self.get_shortest_safe_move(self.tmp_board, self.snake_body)
else:
return self.follow_tail()
if __name__ == '__main__':
game = PFSnake()
game.main()
| [
"pygame.quit",
"pygame.draw.line",
"random.randint",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.Rect",
"numpy.zeros",
"pygame.init",
"time.sleep",
"pygame.display.update",
"pygame.display.set_caption",
"pygame.time.Clock"
] | [((822, 835), 'pygame.init', 'pygame.init', ([], {}), '()\n', (833, 835), False, 'import pygame\n'), ((886, 905), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (903, 905), False, 'import pygame\n'), ((1124, 1177), 'numpy.zeros', 'np.zeros', (['(PFSnake.board_height, PFSnake.board_width)'], {}), '((PFSnake.board_height, PFSnake.board_width))\n', (1132, 1177), True, 'import numpy as np\n'), ((1690, 1703), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1700, 1703), False, 'import time\n'), ((1951, 2021), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(PFSnake.window_width, PFSnake.window_height)'], {}), '((PFSnake.window_width, PFSnake.window_height))\n', (1974, 2021), False, 'import pygame\n'), ((2070, 2120), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""ENG 1600: SmartSnake"""'], {}), "('ENG 1600: SmartSnake')\n", (2096, 2120), False, 'import pygame\n'), ((10289, 10342), 'numpy.zeros', 'np.zeros', (['(PFSnake.board_height, PFSnake.board_width)'], {}), '((PFSnake.board_height, PFSnake.board_width))\n', (10297, 10342), True, 'import numpy as np\n'), ((2247, 2265), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2263, 2265), False, 'import pygame\n'), ((2451, 2474), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2472, 2474), False, 'import pygame\n'), ((4237, 4279), 'random.randint', 'random.randint', (['(0)', '(PFSnake.board_width - 1)'], {}), '(0, PFSnake.board_width - 1)\n', (4251, 4279), False, 'import random\n'), ((4286, 4329), 'random.randint', 'random.randint', (['(0)', '(PFSnake.board_height - 1)'], {}), '(0, PFSnake.board_height - 1)\n', (4300, 4329), False, 'import random\n'), ((5313, 5333), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (5327, 5333), False, 'import random\n'), ((8358, 8437), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'PFSnake.gray', '(x, 0)', '(x, PFSnake.window_height)'], {}), '(self.screen, PFSnake.gray, (x, 0), (x, PFSnake.window_height))\n', (8374, 8437), False, 'import pygame\n'), ((8519, 8597), 'pygame.draw.line', 'pygame.draw.line', (['self.screen', 'PFSnake.gray', '(0, y)', '(PFSnake.window_width, y)'], {}), '(self.screen, PFSnake.gray, (0, y), (PFSnake.window_width, y))\n', (8535, 8597), False, 'import pygame\n'), ((8800, 8863), 'pygame.Rect', 'pygame.Rect', (['headx', 'heady', 'PFSnake.cell_size', 'PFSnake.cell_size'], {}), '(headx, heady, PFSnake.cell_size, PFSnake.cell_size)\n', (8811, 8863), False, 'import pygame\n'), ((9284, 9339), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'PFSnake.cell_size', 'PFSnake.cell_size'], {}), '(x, y, PFSnake.cell_size, PFSnake.cell_size)\n', (9295, 9339), False, 'import pygame\n'), ((4393, 4435), 'random.randint', 'random.randint', (['(0)', '(PFSnake.board_width - 1)'], {}), '(0, PFSnake.board_width - 1)\n', (4407, 4435), False, 'import random\n'), ((4442, 4485), 'random.randint', 'random.randint', (['(0)', '(PFSnake.board_height - 1)'], {}), '(0, PFSnake.board_height - 1)\n', (4456, 4485), False, 'import random\n'), ((9054, 9109), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'PFSnake.cell_size', 'PFSnake.cell_size'], {}), '(x, y, PFSnake.cell_size, PFSnake.cell_size)\n', (9065, 9109), False, 'import pygame\n'), ((2333, 2346), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2344, 2346), False, 'import pygame\n')] |
#<NAME>
import cv2
import numpy as np
def read_keypoints(ith):
"""This function reads wrote keypoints
Parameters
----------
ith : A number or character that is represents the file name of the input txt file
Returns
-------
keypoints : read keypoints that is given by detect_and_match
"""
filename = "../data/sift_"+str(ith)+".txt"
file = open(filename,"r") #open file in read mode
keypoints = [] # keypoints
lines = file.readlines() #read all lines
file.close()
for line in lines:
if(line=="Descriptors\n"):
break
else:
list = line.split(',')
kp = cv2.KeyPoint(x=float(list[3]), y=float(list[4]), _size=float(list[6]), _angle=float(list[0]),_response=float(list[5]), _octave=int(list[2]), _class_id=int(list[1]))
keypoints.append(kp)
return keypoints
def read_tentative_correspondences(ith,jth):
"""This function reads tentative correspondences created by detect_and_match.py
Parameters
----------
ith : A number or character that is represents the file name of the input txt file
jth : A number or character that is represents the file name of the input txt file
Returns
-------
dmatches : read desciptor matches that is given by detect_and_match
"""
filename = "../data/tentative_correspondences_"+str(ith)+"-"+str(jth)+".txt"
file = open(filename,"r") #open file in read mode
lines = file.readlines() #read all lines
file.close()
dmatches = [] #dmatches
for line in lines:
list = line.split(',')
dm = cv2.DMatch(_queryIdx=int(list[2]),_trainIdx=int(list[3]),_imgIdx=int(list[1]),_distance=float(list[0]))
dmatches.append(dm)
return dmatches
def read_images(ith,jth):
"""This function reads images that is mentioned in data/goldengate folder
Parameters
----------
ith : A number or character that is represents the file name of the input png file
jth : A number or character that is represents the file name of the input png file
Retuns
------
img1 : The first image that is read by function
img2 : The second image that is read by function
"""
filename1 = '../data/goldengate/goldengate-0'+str(ith)+'.png'
img1 = cv2.imread(filename1,cv2.IMREAD_GRAYSCALE) #read as grayscale
filename2 = '../data/goldengate/goldengate-0'+str(jth)+'.png'
img2 = cv2.imread(filename2,cv2.IMREAD_GRAYSCALE)
return img1,img2
def write_homography(H,ith,jth):
"""This function writes homography matrix to a txt file
Parameters
----------
H : The Homography Matrix
ith : A number or character that is represents the file name of the output txt file
jth : A number or character that is represents the file name of the output txt file
"""
filename = "../data/h_"+str(ith)+"-"+str(jth)+".txt"
f = open(filename, "w")
f.write(str(H)) #just writes homograpy all
f.close()
def draw_inliers(ith,jth,img):
"""This functions saves image that has inliers on it
Parameters
----------
ith : A number or character that is represents the file name of the output png file
jth : A number or character that is represents the file name of the output png file
img : The image that has inliers on it
"""
filename = "../data/inliers_"+str(ith)+"-"+str(jth)+".png"
cv2.imwrite(filename,img) #save inlier image
def write_inliers(ith,jth,inliers_mask,tentative):
"""This function saves inliers in a txt file
Parameters
----------
ith : A number or character that is represents the file name of the output txt file
jth : A number or character that is represents the file name of the output txt file
inliers_mask : The inlier mask which is given by findHomography
tentative : The DMatches of the two images
"""
filename = "../data/inliers_"+str(ith)+"-"+str(jth)+".txt"
f = open(filename, "w")
number = 0
for boolean in inliers_mask:
if(boolean==1):
f.write(str(tentative[number].distance)+","+str(tentative[number].imgIdx)+","+str(tentative[number].queryIdx)+","+str(tentative[number].trainIdx)+"\n")
number+=1
f.close()
def create_save_homography(ith,jth):
"""This function creates homography, creates inliers and saves them
Parameters
----------
ith : A number or character that is represents the file name of the input png file
jth : A number or character that is represents the file name of the input png file
Returns
-------
H : Returns homography matrix which is calculated by cv2.findHomography
"""
img1,img2 = read_images(ith,jth)
keypoints1 = read_keypoints(ith)
keypoints2 = read_keypoints(jth)
tentative = read_tentative_correspondences(ith,jth)
MIN_MATCHES = 15 #if minimumum matches below 15 we do not calculate homography
if len(tentative) > MIN_MATCHES:
# Convert keypoints to an argument for findHomography
source_points = np.float32([ keypoints1[dmatch.queryIdx].pt for dmatch in tentative]) #query means img1 means source
destination_points = np.float32([ keypoints2[dmatch.trainIdx].pt for dmatch in tentative]) #train means img2
# Establish a homography
H, M = cv2.findHomography(source_points, destination_points, cv2.RANSAC,5) #5 is reprojection threshold
inliers_mask = M.ravel().tolist()#This transforms mask to a list from matrix #it is used in Ransac it is at half of the range for now
write_inliers(ith,jth,inliers_mask,tentative) # not draws single keypoints because flag is 2 and green color. And singlepointcolor none
img3 = cv2.drawMatches(img1,keypoints1,img2,keypoints2,tentative,None,matchColor = (0,255,0), singlePointColor = None,
matchesMask = inliers_mask, # draw only inliers with mask calculated above
flags = 2)
draw_inliers(ith,jth,img3) #saves inliers photo
write_homography(H,ith,jth) #writes H matrix to the txt
return H #returns it
else:
print( "Not enough tentative correspondences to match images") #if min match count greater than len of tentatives
def main():
for i in range(5): #all images
create_save_homography(i,i+1)
main() | [
"cv2.drawMatches",
"cv2.imwrite",
"numpy.float32",
"cv2.imread",
"cv2.findHomography"
] | [((2356, 2399), 'cv2.imread', 'cv2.imread', (['filename1', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename1, cv2.IMREAD_GRAYSCALE)\n', (2366, 2399), False, 'import cv2\n'), ((2497, 2540), 'cv2.imread', 'cv2.imread', (['filename2', 'cv2.IMREAD_GRAYSCALE'], {}), '(filename2, cv2.IMREAD_GRAYSCALE)\n', (2507, 2540), False, 'import cv2\n'), ((3491, 3517), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img'], {}), '(filename, img)\n', (3502, 3517), False, 'import cv2\n'), ((5168, 5236), 'numpy.float32', 'np.float32', (['[keypoints1[dmatch.queryIdx].pt for dmatch in tentative]'], {}), '([keypoints1[dmatch.queryIdx].pt for dmatch in tentative])\n', (5178, 5236), True, 'import numpy as np\n'), ((5298, 5366), 'numpy.float32', 'np.float32', (['[keypoints2[dmatch.trainIdx].pt for dmatch in tentative]'], {}), '([keypoints2[dmatch.trainIdx].pt for dmatch in tentative])\n', (5308, 5366), True, 'import numpy as np\n'), ((5434, 5502), 'cv2.findHomography', 'cv2.findHomography', (['source_points', 'destination_points', 'cv2.RANSAC', '(5)'], {}), '(source_points, destination_points, cv2.RANSAC, 5)\n', (5452, 5502), False, 'import cv2\n'), ((5854, 6012), 'cv2.drawMatches', 'cv2.drawMatches', (['img1', 'keypoints1', 'img2', 'keypoints2', 'tentative', 'None'], {'matchColor': '(0, 255, 0)', 'singlePointColor': 'None', 'matchesMask': 'inliers_mask', 'flags': '(2)'}), '(img1, keypoints1, img2, keypoints2, tentative, None,\n matchColor=(0, 255, 0), singlePointColor=None, matchesMask=inliers_mask,\n flags=2)\n', (5869, 6012), False, 'import cv2\n')] |
"""
Implementation of a dataset that add phenotype data.
"""
from typing import Optional, List
from dataclasses import dataclass
import torch
import pandas as pd
import numpy as np
from .core import GeneticDataset, GeneticDatasetBackend
_STANDARDIZE_MAX_SAMPLE = 10_000
@dataclass
class StdVariableScaler(object):
name: str
mean: float
std: float
def scale(self, x):
return (x - self.mean) / self.std
def inverse(self, z):
return z * self.std + self.mean
class PhenotypeGeneticDataset(GeneticDataset):
"""Dataset implementation for genotype-phenotype models.
Uses an arbitrary backend for the genetic data and a pandas DataFrame to
hold the correpsonding phenotypes.
"""
def __init__(
self,
backend: GeneticDatasetBackend,
phenotypes: pd.DataFrame,
phenotypes_sample_id_column: str = "sample_id",
exogenous_columns: Optional[List[str]] = None,
endogenous_columns: Optional[List[str]] = None,
standardize_columns: Optional[List[str]] = None,
standardize_genotypes: bool = True
):
super().__init__(backend)
# Check that we can find all the phenotype data.
expected_cols = {phenotypes_sample_id_column}
if exogenous_columns is not None:
expected_cols &= set(exogenous_columns)
if endogenous_columns is not None:
expected_cols &= set(endogenous_columns)
missing_cols = expected_cols - set(phenotypes.columns)
if missing_cols:
raise ValueError(f"Missing expected column(s): '{missing_cols}'.")
# Overlap genetic and phenotype samples.
self.overlap_samples(phenotypes[phenotypes_sample_id_column])
# Reorder and select samples in the phenotypes dataset with respect to
# their order in the genetic dataset.
phenotypes = phenotypes.iloc[self.idx["phen"], :]
# Standardize phenotypes if requested.
if standardize_columns is not None:
self.phenotype_scalers: Optional[List] = []
for col in standardize_columns:
scaler = StdVariableScaler(
col,
phenotypes[col].mean(skipna=True),
phenotypes[col].std(skipna=True)
)
phenotypes[col] = scaler.scale(phenotypes[col])
self.phenotype_scalers.append(scaler)
else:
self.phenotype_scalers = None
# Prepare tensors from the phenotypes df.
self.exogenous_columns = exogenous_columns
if exogenous_columns:
self.exog: Optional[torch.Tensor] = torch.tensor(
phenotypes.loc[:, exogenous_columns].values
)
else:
self.exog = None
self.endogenous_columns = endogenous_columns
if endogenous_columns:
self.endog: Optional[torch.Tensor] = torch.tensor(
phenotypes.loc[:, endogenous_columns].values
)
else:
self.endog = None
# Standardize genotypes if requested.
if standardize_genotypes:
self.genotype_scaling_mean_std = self.compute_genotype_scaling()
else:
self.genotype_scaling_mean_std = None
def compute_genotype_scaling(self):
# Estimate the mean and standard deviation of the genotypes to allow
# standardization on the fly.
sample_size = min(len(self), _STANDARDIZE_MAX_SAMPLE)
sample = np.random.choice(
np.arange(len(self)), size=sample_size, replace=False
)
m = np.empty((sample_size, self.backend.get_n_variants()), dtype=float)
for i, idx in enumerate(sample):
m[i, :] = self.backend[idx].numpy()
return (
torch.tensor(np.nanmean(m, axis=0)),
torch.tensor(np.nanstd(m, axis=0))
)
def standardized_genotypes_to_dosage(self, genotypes):
if self.genotype_scaling_mean_std is None:
raise RuntimeError("Genotypes were not standardized.")
genotypes *= self.genotype_scaling_mean_std[1]
genotypes += self.genotype_scaling_mean_std[0]
return genotypes
def __getitem__(self, idx):
"""Retrieve data at index.
The return type is a tuple of length 1 to 4 depending on the requested
endogenous and exogenous variables. The order is always:
- (genotypes_raw, genotypes_std, exogenous, endogenous)
"""
# Get the genotypes from the backend.
geno = self.backend[self.idx["geno"][idx]]
geno_std = None
# Apply the standardization if requested.
if self.genotype_scaling_mean_std is not None:
geno_std = geno - self.genotype_scaling_mean_std[0]
geno_std /= self.genotype_scaling_mean_std[1]
# Impute NA to mean (0).
geno_std = torch.nan_to_num(geno_std, nan=0)
out = [geno]
if geno_std is not None:
out.append(geno_std)
if self.exog is not None:
out.append(self.exog[idx, :])
if self.endog is not None:
out.append(self.endog[idx, :])
return tuple(out)
def __len__(self):
return len(self.idx["geno"])
def overlap_samples(self, phenotype_samples):
"""Finds overlapping samples between genetic and phenotype dataset.
Sets indexers:
self.idx["geno"] is the indices in the genetic backend.
self.idx["phen"] is the indices in the phenotype DF.
"""
genetic_samples = self.backend.get_samples()
overlap = set(genetic_samples) & set(phenotype_samples)
genetic_samples_type = type(genetic_samples[0])
phenotype_samples_type = type(phenotype_samples[0])
if genetic_samples_type is not phenotype_samples_type:
raise ValueError(
f"Genetic file sample type: '{genetic_samples_type}' is "
f"different from phenotype samples ('{phenotype_samples_type}'"
")."
)
if len(overlap) == 0:
raise ValueError("No overlap between the genetic and phenotype "
"samples.")
indexer = make_indexer(genetic_samples, phenotype_samples)
self.idx = {
"geno": indexer.left_idx.values,
"phen": indexer.right_idx.values
}
def make_indexer(left_samples, right_samples):
left_df = pd.DataFrame({"left_id": left_samples})
left_df["left_idx"] = np.arange(left_df.shape[0])
right_df = pd.DataFrame({"right_id": right_samples})
right_df["right_idx"] = np.arange(right_df.shape[0])
df = pd.merge(left_df, right_df,
left_on="left_id", right_on="right_id",
how="inner")
if df.shape[0] == 0:
raise RuntimeError("Can't index non-overlapping datasets.")
# Sort wrt left to minimize shuffling around on this dataset.
df = df.sort_values("left_idx")
return df
| [
"pandas.DataFrame",
"pandas.merge",
"numpy.nanstd",
"torch.nan_to_num",
"numpy.arange",
"torch.tensor",
"numpy.nanmean"
] | [((6512, 6551), 'pandas.DataFrame', 'pd.DataFrame', (["{'left_id': left_samples}"], {}), "({'left_id': left_samples})\n", (6524, 6551), True, 'import pandas as pd\n'), ((6578, 6605), 'numpy.arange', 'np.arange', (['left_df.shape[0]'], {}), '(left_df.shape[0])\n', (6587, 6605), True, 'import numpy as np\n'), ((6622, 6663), 'pandas.DataFrame', 'pd.DataFrame', (["{'right_id': right_samples}"], {}), "({'right_id': right_samples})\n", (6634, 6663), True, 'import pandas as pd\n'), ((6692, 6720), 'numpy.arange', 'np.arange', (['right_df.shape[0]'], {}), '(right_df.shape[0])\n', (6701, 6720), True, 'import numpy as np\n'), ((6731, 6816), 'pandas.merge', 'pd.merge', (['left_df', 'right_df'], {'left_on': '"""left_id"""', 'right_on': '"""right_id"""', 'how': '"""inner"""'}), "(left_df, right_df, left_on='left_id', right_on='right_id', how='inner'\n )\n", (6739, 6816), True, 'import pandas as pd\n'), ((2668, 2725), 'torch.tensor', 'torch.tensor', (['phenotypes.loc[:, exogenous_columns].values'], {}), '(phenotypes.loc[:, exogenous_columns].values)\n', (2680, 2725), False, 'import torch\n'), ((2933, 2991), 'torch.tensor', 'torch.tensor', (['phenotypes.loc[:, endogenous_columns].values'], {}), '(phenotypes.loc[:, endogenous_columns].values)\n', (2945, 2991), False, 'import torch\n'), ((4930, 4963), 'torch.nan_to_num', 'torch.nan_to_num', (['geno_std'], {'nan': '(0)'}), '(geno_std, nan=0)\n', (4946, 4963), False, 'import torch\n'), ((3830, 3851), 'numpy.nanmean', 'np.nanmean', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (3840, 3851), True, 'import numpy as np\n'), ((3879, 3899), 'numpy.nanstd', 'np.nanstd', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (3888, 3899), True, 'import numpy as np\n')] |
import cv2.cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread('../Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
blank = np.zeros(img.shape[:2], dtype='uint8')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
circle = cv.circle(blank, (img.shape[1]//2, img.shape[0]//2), 100, 255, -1)
mask = cv.bitwise_and(gray, gray, mask=circle)
cv.imshow('Mask', mask)
gray_hist = cv.calcHist([gray], [0], mask, [256], [0, 255])
plt.figure()
plt.title('Color Histogram')
plt.xlabel('Bins')
plt.ylabel('# of pixels')
colors = ('b', 'g', 'r')
for i, col in enumerate(colors):
hist = cv.calcHist([img], [i], None, [255], [0, 255])
plt.plot(hist, color=col)
plt.xlim([0, 255])
plt.show()
cv.waitKey(0)
cv.destroyAllWindows() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"cv2.cv2.destroyAllWindows",
"cv2.cv2.circle",
"cv2.cv2.bitwise_and",
"matplotlib.pyplot.show",
"cv2.cv2.waitKey",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure",
"cv2.cv2.calcHist",
"cv2.cv... | [((79, 120), 'cv2.cv2.imread', 'cv.imread', (['"""../Resources/Photos/cats.jpg"""'], {}), "('../Resources/Photos/cats.jpg')\n", (88, 120), True, 'import cv2.cv2 as cv\n'), ((121, 143), 'cv2.cv2.imshow', 'cv.imshow', (['"""Cats"""', 'img'], {}), "('Cats', img)\n", (130, 143), True, 'import cv2.cv2 as cv\n'), ((153, 191), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (161, 191), True, 'import numpy as np\n'), ((200, 235), 'cv2.cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (211, 235), True, 'import cv2.cv2 as cv\n'), ((236, 259), 'cv2.cv2.imshow', 'cv.imshow', (['"""Gray"""', 'gray'], {}), "('Gray', gray)\n", (245, 259), True, 'import cv2.cv2 as cv\n'), ((270, 340), 'cv2.cv2.circle', 'cv.circle', (['blank', '(img.shape[1] // 2, img.shape[0] // 2)', '(100)', '(255)', '(-1)'], {}), '(blank, (img.shape[1] // 2, img.shape[0] // 2), 100, 255, -1)\n', (279, 340), True, 'import cv2.cv2 as cv\n'), ((345, 384), 'cv2.cv2.bitwise_and', 'cv.bitwise_and', (['gray', 'gray'], {'mask': 'circle'}), '(gray, gray, mask=circle)\n', (359, 384), True, 'import cv2.cv2 as cv\n'), ((385, 408), 'cv2.cv2.imshow', 'cv.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (394, 408), True, 'import cv2.cv2 as cv\n'), ((423, 470), 'cv2.cv2.calcHist', 'cv.calcHist', (['[gray]', '[0]', 'mask', '[256]', '[0, 255]'], {}), '([gray], [0], mask, [256], [0, 255])\n', (434, 470), True, 'import cv2.cv2 as cv\n'), ((472, 484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (482, 484), True, 'import matplotlib.pyplot as plt\n'), ((485, 513), 'matplotlib.pyplot.title', 'plt.title', (['"""Color Histogram"""'], {}), "('Color Histogram')\n", (494, 513), True, 'import matplotlib.pyplot as plt\n'), ((514, 532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bins"""'], {}), "('Bins')\n", (524, 532), True, 'import matplotlib.pyplot as plt\n'), ((533, 558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of pixels"""'], {}), "('# of pixels')\n", (543, 558), True, 'import matplotlib.pyplot as plt\n'), ((730, 740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (738, 740), True, 'import matplotlib.pyplot as plt\n'), ((742, 755), 'cv2.cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (752, 755), True, 'import cv2.cv2 as cv\n'), ((756, 778), 'cv2.cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (776, 778), True, 'import cv2.cv2 as cv\n'), ((629, 675), 'cv2.cv2.calcHist', 'cv.calcHist', (['[img]', '[i]', 'None', '[255]', '[0, 255]'], {}), '([img], [i], None, [255], [0, 255])\n', (640, 675), True, 'import cv2.cv2 as cv\n'), ((680, 705), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {'color': 'col'}), '(hist, color=col)\n', (688, 705), True, 'import matplotlib.pyplot as plt\n'), ((710, 728), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 255]'], {}), '([0, 255])\n', (718, 728), True, 'import matplotlib.pyplot as plt\n')] |
from lib import cache
from lib import math
from api import helpers
import numpy as np
import scipy.sparse
class Task4:
def __init__(self, init_params, data):
self.config = init_params['config']
self.app_mode = init_params['app_mode']
self.project_directory = init_params['project_directory']
self.cache = cache.Cache( self.config.get("CACHE", "host"), int(self.config.get("CACHE", "port")) )
self.math = math.Math()
self.data = data
def reduce(self, model, algo, k):
self.model = model
if algo == "lda":
self.data = self.reduce_lda(model, k)
reduction_function = {}
reduction_function['svd'] = self.math.svd
reduction_function['pca'] = self.math.pca
reduction_function['lda'] = self.math.lda
reduction_data = []
image_count = {}
for location_id in self.data:
image_count[location_id] = len( self.data[location_id][self.model] )
reduction_data.extend(self.data[location_id][self.model])
reduction_data = np.squeeze(reduction_data)
total_reduced_data, components, ev = reduction_function[algo](reduction_data, k)
components = np.asarray(components)
np.savetxt(self.project_directory+"/task4.csv", components[:,:k], delimiter=",")
self.reduced_data = {}
prev_index = 0
for key, value in image_count.items():
self.reduced_data[key] = total_reduced_data[prev_index:prev_index+value]
prev_index = prev_index + value
def reduce_lda(self, model, k):
files = helpers.load_directory(self.project_directory + self.config.get("PREPROCESSED-DATA","visual_directory"))
data = {}
for filepath in files:
filename = helpers.get_file_name(filepath)
file_model = filename.split()[1]
location_name = filename.split()[0]
location_id = self.cache.hgetall('location_map')[location_name.encode('utf-8')].decode('utf-8')
if model != file_model:
continue
file = (scipy.sparse.load_npz(filepath)).todense()
if location_id not in data:
data[location_id] = {}
if file_model not in data[location_id]:
data[location_id][file_model] = []
data[location_id][file_model] = file
return data
def similarity(self, location_id, limit):
self.location_id = location_id
self.limit = limit
query_matrix = self.reduced_data[self.location_id]
similar_vectors = []
for k, v in self.reduced_data.items():
s = {}
s['query_id'] = self.location_id
s['compare_id'] = k
s['value'] = self.min_pair_similarity(query_matrix, v)
similar_vectors = helpers.sort(similar_vectors, s, self.limit, 'value', 1)
return similar_vectors
def min_pair_similarity(self, m1, m2):
matrix_distance = 0.0
for r1 in m1:
minimum = float("inf")
for r2 in m2:
dist = self.math.manhattan_distance(r1, r2)
if dist < minimum:
minimum = dist
matrix_distance = matrix_distance + minimum
return matrix_distance/len(m1)
| [
"api.helpers.get_file_name",
"api.helpers.sort",
"numpy.asarray",
"numpy.savetxt",
"lib.math.Math",
"numpy.squeeze"
] | [((418, 429), 'lib.math.Math', 'math.Math', ([], {}), '()\n', (427, 429), False, 'from lib import math\n'), ((956, 982), 'numpy.squeeze', 'np.squeeze', (['reduction_data'], {}), '(reduction_data)\n', (966, 982), True, 'import numpy as np\n'), ((1082, 1104), 'numpy.asarray', 'np.asarray', (['components'], {}), '(components)\n', (1092, 1104), True, 'import numpy as np\n'), ((1107, 1194), 'numpy.savetxt', 'np.savetxt', (["(self.project_directory + '/task4.csv')", 'components[:, :k]'], {'delimiter': '""","""'}), "(self.project_directory + '/task4.csv', components[:, :k],\n delimiter=',')\n", (1117, 1194), True, 'import numpy as np\n'), ((1587, 1618), 'api.helpers.get_file_name', 'helpers.get_file_name', (['filepath'], {}), '(filepath)\n', (1608, 1618), False, 'from api import helpers\n'), ((2453, 2509), 'api.helpers.sort', 'helpers.sort', (['similar_vectors', 's', 'self.limit', '"""value"""', '(1)'], {}), "(similar_vectors, s, self.limit, 'value', 1)\n", (2465, 2509), False, 'from api import helpers\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import pandas as pd
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_allclose)
from sm2.tools.sm_exceptions import MissingDataError
from sm2.datasets import sunspots, macrodata
from sm2.tsa.stattools import levinson_durbin
from sm2.tsa.autocov import (
acovf, acf,
yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg)
from sm2.tsa import autocov
from sm2.tsa.tests.results import savedrvs
from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf
xo = savedrvs.rvsdata.xar2
x100 = xo[-100:] / 1000.
x1000 = xo / 1000.
# -----------------------------------------------------------------
# TODO: This section is duplicated in test_stattools
cur_dir = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(cur_dir, "results", "results_corrgram.csv")
results_corrgram = pd.read_csv(path, delimiter=',')
@pytest.mark.not_vetted
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load_pandas()
x = data.data['realgdp']
results = results_corrgram
# -----------------------------------------------------------------
@pytest.fixture('module')
def acovf_data():
# GH#4937
rnd = np.random.RandomState(12345)
return rnd.randn(250)
@pytest.mark.not_vetted
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
@classmethod
def setup_class(cls):
cls.acf = cls.results['acvar']
cls.qstat = cls.results['Q1']
cls.res1 = acf(cls.x, nlags=40,
qstat=True, alpha=.05, fft=False)
cls.confint_res = cls.results[['acvar_lb', 'acvar_ub']].values
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, 8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:, None]
assert_almost_equal(centered[1:41], self.confint_res, 8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, 3)
# 3 decimal places because of stata rounding
# FIXME: dont comment-out code
#def pvalue(self):
# pass
# NOTE: shouldn't need testing if Q stat is correct
@pytest.mark.not_vetted
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
@classmethod
def setup_class(cls):
cls.x = np.concatenate((np.array([np.nan]), cls.x))
cls.acf = cls.results['acvar'] # drop and conservative
cls.qstat = cls.results['Q1']
cls.res_drop = acf(cls.x, nlags=40, qstat=True, alpha=.05,
missing='drop', fft=False)
cls.res_conservative = acf(cls.x, nlags=40,
qstat=True, alpha=.05,
missing='conservative', fft=False)
cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
cls.qstat_none = np.empty(40) * np.nan
cls.res_none = acf(cls.x, nlags=40, qstat=True, alpha=.05,
missing='none', fft=False)
def test_raise(self):
with pytest.raises(MissingDataError):
acf(self.x, nlags=40, qstat=True, alpha=0.5,
missing='raise', fft=False)
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41],
self.acf_none,
8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41],
self.acf,
8)
def test_acf_conservative(self):
assert_almost_equal(self.res_conservative[0][1:41],
self.acf,
8)
def test_qstat_none(self):
# TODO: why is res1/qstat 1 short
assert_almost_equal(self.res_none[2],
self.qstat_none,
3)
# FIXME: dont comment-out
# TODO: how to do this test? the correct q_stat depends on
# whether nobs=len(x) is used when x contains NaNs or whether
# nobs<len(x) when x contains NaNs
#def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, 3)
@pytest.mark.not_vetted
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
@classmethod
def setup_class(cls):
cls.acf = cls.results['acvarfft']
cls.qstat = cls.results['Q1']
cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, 8)
def test_qstat(self):
# TODO: why is res1/qstat 1 short
assert_almost_equal(self.res1[2], self.qstat, 3)
@pytest.mark.not_vetted
def test_acf():
# upstream this is in tsa.tests.test_tsa_tools
acf_x = acf(x100, unbiased=False, fft=False)[0][:21]
assert_array_almost_equal(mlacf.acf100.ravel(), acf_x, 8)
# TODO: why only dec=8?
acf_x = acf(x1000, unbiased=False, fft=False)[0][:21]
assert_array_almost_equal(mlacf.acf1000.ravel(), acf_x, 8)
# TODO: why only dec=9? (comment out of date?)
@pytest.mark.not_vetted
def test_ccf():
# upstream this is in tsa.tests.test_tsa_tools
ccf_x = autocov.ccf(x100[4:], x100[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf100.ravel()[:21][::-1], ccf_x, 8)
ccf_x = autocov.ccf(x1000[4:], x1000[:-4], unbiased=False)[:21]
assert_array_almost_equal(mlccf.ccf1000.ravel()[:21][::-1], ccf_x, 8)
@pytest.mark.not_vetted
def test_pacf_yw():
# upstream this is in tsa.tests.test_tsa_tools
pacfyw = pacf_yw(x100, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf100.ravel(), pacfyw, 1)
pacfyw = pacf_yw(x1000, 20, method='mle')
assert_array_almost_equal(mlpacf.pacf1000.ravel(), pacfyw, 2)
@pytest.mark.smoke
def test_yule_walker_inter():
# see GH#1869
# upstream this is in tsa.tests.test_tsa_tools
x = np.array([1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0])
yule_walker(x, 3)
@pytest.mark.not_vetted
def test_yule_walker():
# upstream this is in test_regression.
# TODO: Document where R_params came from
R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
data = sunspots.load()
rho, sigma = yule_walker(data.endog, order=4, method="mle")
# TODO: assert something about sigma?
assert_almost_equal(rho,
R_params,
4)
@pytest.mark.not_vetted
def test_ywcoef():
# upstream this is in tsa.tests.test_tsa_tools
assert_array_almost_equal(mlywar.arcoef100[1:],
-yule_walker(x100, 10, method='mle')[0], 8)
assert_array_almost_equal(mlywar.arcoef1000[1:],
-yule_walker(x1000, 20, method='mle')[0], 8)
@pytest.mark.not_vetted
def test_acovf2d():
dta = sunspots.load_pandas().data
dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')[:309]
del dta["YEAR"]
res = acovf(dta, fft=False)
assert_equal(res, acovf(dta.values, fft=False))
X = np.random.random((10, 2))
with pytest.raises(ValueError):
acovf(X, fft=False)
@pytest.mark.not_vetted
def test_acovf_fft_vs_convolution():
np.random.seed(1)
q = np.random.normal(size=100)
# TODO: parametrize?
for demean in [True, False]:
for unbiased in [True, False]:
F1 = acovf(q, demean=demean, unbiased=unbiased, fft=True)
F2 = acovf(q, demean=demean, unbiased=unbiased, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.not_vetted
def test_acf_fft_dataframe():
# GH#322
data = sunspots.load_pandas().data[['SUNACTIVITY']]
result = acf(data, fft=True)[0]
assert result.ndim == 1
@pytest.mark.parametrize("missing", ['conservative', 'drop', 'raise', 'none'])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("unbiased", [True, False])
def test_acovf_nlags(acovf_data, unbiased, demean, fft, missing):
# GH#4937
full = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft,
missing=missing)
limited = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft,
missing=missing, nlag=10)
assert_allclose(full[:11], limited)
@pytest.mark.parametrize("missing", ['conservative', 'drop'])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("unbiased", [True, False])
def test_acovf_nlags_missing(acovf_data, unbiased, demean, fft, missing):
# GH#4937
acovf_data = acovf_data.copy()
acovf_data[1:3] = np.nan
full = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft,
missing=missing)
limited = acovf(acovf_data, unbiased=unbiased, demean=demean, fft=fft,
missing=missing, nlag=10)
assert_allclose(full[:11], limited)
def test_acovf_error(acovf_data):
# GH#4937
with pytest.raises(ValueError):
acovf(acovf_data, nlag=250, fft=False)
def test_acovf_warns(acovf_data):
# GH#4937
with pytest.warns(FutureWarning):
acovf(acovf_data)
def test_acf_warns(acovf_data):
# GH#4937
with pytest.warns(FutureWarning):
acf(acovf_data, nlags=40)
def test_pandasacovf():
# test that passing Series vs ndarray to acovf doesn't affect results
# TODO: GH reference?
# TODO: Same test for other functions?
ser = pd.Series(list(range(1, 11)))
assert_allclose(acovf(ser, fft=False),
acovf(ser.values, fft=False),
rtol=1e-12)
def test_pacf2acf_ar():
# GH#5016
pacf = np.zeros(10)
pacf[0] = 1
pacf[1] = 0.9
ar, acf = levinson_durbin_pacf(pacf)
assert_allclose(acf, 0.9 ** np.arange(10.))
assert_allclose(ar, pacf[1:], atol=1e-8)
ar, acf = levinson_durbin_pacf(pacf, nlags=5)
assert_allclose(acf, 0.9 ** np.arange(6.))
assert_allclose(ar, pacf[1:6], atol=1e-8)
def test_pacf2acf_levinson_durbin():
# GH#5016
pacf = -0.9 ** np.arange(11.)
pacf[0] = 1
ar, acf = levinson_durbin_pacf(pacf)
_, ar_ld, pacf_ld, _, _ = levinson_durbin(acf, 10, isacov=True)
assert_allclose(ar, ar_ld, atol=1e-8)
assert_allclose(pacf, pacf_ld, atol=1e-8)
# From R, FitAR, PacfToAR
ar_from_r = [-4.1609, -9.2549, -14.4826, -17.6505, -17.5012,
-14.2969, -9.5020, -4.9184, -1.7911, -0.3486]
assert_allclose(ar, ar_from_r, atol=1e-4)
def test_pacf2acf_errors():
# GH#5016
pacf = -0.9 ** np.arange(11.)
pacf[0] = 1
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf, nlags=20)
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf[:1])
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros(10))
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros((10, 2)))
def test_pacf_burg():
# GH#5016
rnd = np.random.RandomState(12345)
e = rnd.randn(10001)
y = e[1:] + 0.5 * e[:-1]
pacf, sigma2 = pacf_burg(y, 10)
yw_pacf = pacf_yw(y, 10)
assert_allclose(pacf, yw_pacf, atol=5e-4)
# Internal consistency check between pacf and sigma2
ye = y - y.mean()
s2y = ye.dot(ye) / 10000
pacf[0] = 0
sigma2_direct = s2y * np.cumprod(1 - pacf ** 2)
assert_allclose(sigma2, sigma2_direct, atol=1e-3)
def test_pacf_burg_error():
# GH#5016
with pytest.raises(ValueError):
pacf_burg(np.empty((20, 2)), 10)
with pytest.raises(ValueError):
pacf_burg(np.empty(100), 101)
def test_burg():
# GH#5016
# upstream this is in test_regression
rnd = np.random.RandomState(12345)
e = rnd.randn(10001)
y = e[1:] + 0.5 * e[:-1]
# R, ar.burg
expected = [
[0.3909931],
[0.4602607, -0.1771582],
[0.47473245, -0.21475602, 0.08168813],
[0.4787017, -0.2251910, 0.1047554, -0.0485900],
[0.47975462, - 0.22746106, 0.10963527, -0.05896347, 0.02167001]
]
for i in range(1, 6):
ar, _ = burg(y, i)
assert_allclose(ar, expected[i - 1], atol=1e-6)
as_nodemean, _ = burg(1 + y, i, False)
assert np.all(ar != as_nodemean)
def test_burg_errors():
# GH#5016
# upstream this is in test_regression
with pytest.raises(ValueError):
burg(np.ones((100, 2)))
with pytest.raises(ValueError):
burg(np.random.randn(100), 0)
with pytest.raises(ValueError):
burg(np.random.randn(100), 'apple')
| [
"numpy.random.seed",
"pandas.read_csv",
"numpy.empty",
"sm2.tsa.tests.results.datamlw_tls.mlccf.ccf1000.ravel",
"sm2.datasets.sunspots.load_pandas",
"numpy.ones",
"pandas.DatetimeIndex",
"numpy.arange",
"numpy.random.normal",
"sm2.datasets.macrodata.load_pandas",
"pytest.mark.parametrize",
"os... | [((901, 957), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', '"""results_corrgram.csv"""'], {}), "(cur_dir, 'results', 'results_corrgram.csv')\n", (913, 957), False, 'import os\n'), ((977, 1009), 'pandas.read_csv', 'pd.read_csv', (['path'], {'delimiter': '""","""'}), "(path, delimiter=',')\n", (988, 1009), True, 'import pandas as pd\n'), ((1280, 1304), 'pytest.fixture', 'pytest.fixture', (['"""module"""'], {}), "('module')\n", (1294, 1304), False, 'import pytest\n'), ((7911, 7988), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""missing"""', "['conservative', 'drop', 'raise', 'none']"], {}), "('missing', ['conservative', 'drop', 'raise', 'none'])\n", (7934, 7988), False, 'import pytest\n'), ((7990, 8035), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fft"""', '[False, True]'], {}), "('fft', [False, True])\n", (8013, 8035), False, 'import pytest\n'), ((8037, 8085), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demean"""', '[True, False]'], {}), "('demean', [True, False])\n", (8060, 8085), False, 'import pytest\n'), ((8087, 8137), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""unbiased"""', '[True, False]'], {}), "('unbiased', [True, False])\n", (8110, 8137), False, 'import pytest\n'), ((8488, 8548), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""missing"""', "['conservative', 'drop']"], {}), "('missing', ['conservative', 'drop'])\n", (8511, 8548), False, 'import pytest\n'), ((8550, 8595), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fft"""', '[False, True]'], {}), "('fft', [False, True])\n", (8573, 8595), False, 'import pytest\n'), ((8597, 8645), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demean"""', '[True, False]'], {}), "('demean', [True, False])\n", (8620, 8645), False, 'import pytest\n'), ((8647, 8697), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""unbiased"""', '[True, False]'], {}), "('unbiased', [True, False])\n", (8670, 8697), False, 'import pytest\n'), ((867, 892), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (882, 892), False, 'import os\n'), ((1124, 1147), 'sm2.datasets.macrodata.load_pandas', 'macrodata.load_pandas', ([], {}), '()\n', (1145, 1147), False, 'from sm2.datasets import sunspots, macrodata\n'), ((1347, 1375), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (1368, 1375), True, 'import numpy as np\n'), ((5709, 5740), 'sm2.tsa.autocov.pacf_yw', 'pacf_yw', (['x100', '(20)'], {'method': '"""mle"""'}), "(x100, 20, method='mle')\n", (5716, 5740), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((5819, 5851), 'sm2.tsa.autocov.pacf_yw', 'pacf_yw', (['x1000', '(20)'], {'method': '"""mle"""'}), "(x1000, 20, method='mle')\n", (5826, 5851), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((6046, 6092), 'numpy.array', 'np.array', (['[1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0]'], {}), '([1, -1, 2, 2, 0, -2, 1, 0, -3, 0, 0])\n', (6054, 6092), True, 'import numpy as np\n'), ((6097, 6114), 'sm2.tsa.autocov.yule_walker', 'yule_walker', (['x', '(3)'], {}), '(x, 3)\n', (6108, 6114), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((6384, 6399), 'sm2.datasets.sunspots.load', 'sunspots.load', ([], {}), '()\n', (6397, 6399), False, 'from sm2.datasets import sunspots, macrodata\n'), ((6417, 6463), 'sm2.tsa.autocov.yule_walker', 'yule_walker', (['data.endog'], {'order': '(4)', 'method': '"""mle"""'}), "(data.endog, order=4, method='mle')\n", (6428, 6463), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((6511, 6548), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['rho', 'R_params', '(4)'], {}), '(rho, R_params, 4)\n', (6530, 6548), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((7136, 7157), 'sm2.tsa.autocov.acovf', 'acovf', (['dta'], {'fft': '(False)'}), '(dta, fft=False)\n', (7141, 7157), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7219, 7244), 'numpy.random.random', 'np.random.random', (['(10, 2)'], {}), '((10, 2))\n', (7235, 7244), True, 'import numpy as np\n'), ((7376, 7393), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7390, 7393), True, 'import numpy as np\n'), ((7402, 7428), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (7418, 7428), True, 'import numpy as np\n'), ((8229, 8306), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {'unbiased': 'unbiased', 'demean': 'demean', 'fft': 'fft', 'missing': 'missing'}), '(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing)\n', (8234, 8306), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((8338, 8429), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {'unbiased': 'unbiased', 'demean': 'demean', 'fft': 'fft', 'missing': 'missing', 'nlag': '(10)'}), '(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=\n missing, nlag=10)\n', (8343, 8429), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((8449, 8484), 'numpy.testing.assert_allclose', 'assert_allclose', (['full[:11]', 'limited'], {}), '(full[:11], limited)\n', (8464, 8484), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((8861, 8938), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {'unbiased': 'unbiased', 'demean': 'demean', 'fft': 'fft', 'missing': 'missing'}), '(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=missing)\n', (8866, 8938), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((8970, 9061), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {'unbiased': 'unbiased', 'demean': 'demean', 'fft': 'fft', 'missing': 'missing', 'nlag': '(10)'}), '(acovf_data, unbiased=unbiased, demean=demean, fft=fft, missing=\n missing, nlag=10)\n', (8975, 9061), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9081, 9116), 'numpy.testing.assert_allclose', 'assert_allclose', (['full[:11]', 'limited'], {}), '(full[:11], limited)\n', (9096, 9116), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((9869, 9881), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (9877, 9881), True, 'import numpy as np\n'), ((9931, 9957), 'sm2.tsa.autocov.levinson_durbin_pacf', 'levinson_durbin_pacf', (['pacf'], {}), '(pacf)\n', (9951, 9957), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10010, 10051), 'numpy.testing.assert_allclose', 'assert_allclose', (['ar', 'pacf[1:]'], {'atol': '(1e-08)'}), '(ar, pacf[1:], atol=1e-08)\n', (10025, 10051), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((10066, 10101), 'sm2.tsa.autocov.levinson_durbin_pacf', 'levinson_durbin_pacf', (['pacf'], {'nlags': '(5)'}), '(pacf, nlags=5)\n', (10086, 10101), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10153, 10195), 'numpy.testing.assert_allclose', 'assert_allclose', (['ar', 'pacf[1:6]'], {'atol': '(1e-08)'}), '(ar, pacf[1:6], atol=1e-08)\n', (10168, 10195), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((10312, 10338), 'sm2.tsa.autocov.levinson_durbin_pacf', 'levinson_durbin_pacf', (['pacf'], {}), '(pacf)\n', (10332, 10338), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10369, 10406), 'sm2.tsa.stattools.levinson_durbin', 'levinson_durbin', (['acf', '(10)'], {'isacov': '(True)'}), '(acf, 10, isacov=True)\n', (10384, 10406), False, 'from sm2.tsa.stattools import levinson_durbin\n'), ((10411, 10449), 'numpy.testing.assert_allclose', 'assert_allclose', (['ar', 'ar_ld'], {'atol': '(1e-08)'}), '(ar, ar_ld, atol=1e-08)\n', (10426, 10449), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((10453, 10495), 'numpy.testing.assert_allclose', 'assert_allclose', (['pacf', 'pacf_ld'], {'atol': '(1e-08)'}), '(pacf, pacf_ld, atol=1e-08)\n', (10468, 10495), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((10658, 10701), 'numpy.testing.assert_allclose', 'assert_allclose', (['ar', 'ar_from_r'], {'atol': '(0.0001)'}), '(ar, ar_from_r, atol=0.0001)\n', (10673, 10701), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((11161, 11189), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (11182, 11189), True, 'import numpy as np\n'), ((11263, 11279), 'sm2.tsa.autocov.pacf_burg', 'pacf_burg', (['y', '(10)'], {}), '(y, 10)\n', (11272, 11279), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((11294, 11308), 'sm2.tsa.autocov.pacf_yw', 'pacf_yw', (['y', '(10)'], {}), '(y, 10)\n', (11301, 11308), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((11313, 11356), 'numpy.testing.assert_allclose', 'assert_allclose', (['pacf', 'yw_pacf'], {'atol': '(0.0005)'}), '(pacf, yw_pacf, atol=0.0005)\n', (11328, 11356), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((11535, 11585), 'numpy.testing.assert_allclose', 'assert_allclose', (['sigma2', 'sigma2_direct'], {'atol': '(0.001)'}), '(sigma2, sigma2_direct, atol=0.001)\n', (11550, 11585), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((11865, 11893), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (11886, 11893), True, 'import numpy as np\n'), ((1647, 1702), 'sm2.tsa.autocov.acf', 'acf', (['cls.x'], {'nlags': '(40)', 'qstat': '(True)', 'alpha': '(0.05)', 'fft': '(False)'}), '(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False)\n', (1650, 1702), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((1829, 1881), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1[0][1:41]', 'self.acf', '(8)'], {}), '(self.res1[0][1:41], self.acf, 8)\n', (1848, 1881), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((1983, 2039), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['centered[1:41]', 'self.confint_res', '(8)'], {}), '(centered[1:41], self.confint_res, 8)\n', (2002, 2039), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((2075, 2128), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1[2][:40]', 'self.qstat', '(3)'], {}), '(self.res1[2][:40], self.qstat, 3)\n', (2094, 2128), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((2653, 2724), 'sm2.tsa.autocov.acf', 'acf', (['cls.x'], {'nlags': '(40)', 'qstat': '(True)', 'alpha': '(0.05)', 'missing': '"""drop"""', 'fft': '(False)'}), "(cls.x, nlags=40, qstat=True, alpha=0.05, missing='drop', fft=False)\n", (2656, 2724), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((2782, 2861), 'sm2.tsa.autocov.acf', 'acf', (['cls.x'], {'nlags': '(40)', 'qstat': '(True)', 'alpha': '(0.05)', 'missing': '"""conservative"""', 'fft': '(False)'}), "(cls.x, nlags=40, qstat=True, alpha=0.05, missing='conservative', fft=False)\n", (2785, 2861), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((3072, 3143), 'sm2.tsa.autocov.acf', 'acf', (['cls.x'], {'nlags': '(40)', 'qstat': '(True)', 'alpha': '(0.05)', 'missing': '"""none"""', 'fft': '(False)'}), "(cls.x, nlags=40, qstat=True, alpha=0.05, missing='none', fft=False)\n", (3075, 3143), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((3382, 3443), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res_none[0][1:41]', 'self.acf_none', '(8)'], {}), '(self.res_none[0][1:41], self.acf_none, 8)\n', (3401, 3443), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((3538, 3594), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res_drop[0][1:41]', 'self.acf', '(8)'], {}), '(self.res_drop[0][1:41], self.acf, 8)\n', (3557, 3594), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((3697, 3761), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res_conservative[0][1:41]', 'self.acf', '(8)'], {}), '(self.res_conservative[0][1:41], self.acf, 8)\n', (3716, 3761), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((3900, 3957), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res_none[2]', 'self.qstat_none', '(3)'], {}), '(self.res_none[2], self.qstat_none, 3)\n', (3919, 3957), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((4560, 4602), 'sm2.tsa.autocov.acf', 'acf', (['cls.x'], {'nlags': '(40)', 'qstat': '(True)', 'fft': '(True)'}), '(cls.x, nlags=40, qstat=True, fft=True)\n', (4563, 4602), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((4636, 4686), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1[0][1:]', 'self.acf', '(8)'], {}), '(self.res1[0][1:], self.acf, 8)\n', (4655, 4686), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((4764, 4812), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['self.res1[2]', 'self.qstat', '(3)'], {}), '(self.res1[2], self.qstat, 3)\n', (4783, 4812), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((4993, 5013), 'sm2.tsa.tests.results.datamlw_tls.mlacf.acf100.ravel', 'mlacf.acf100.ravel', ([], {}), '()\n', (5011, 5013), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((5141, 5162), 'sm2.tsa.tests.results.datamlw_tls.mlacf.acf1000.ravel', 'mlacf.acf1000.ravel', ([], {}), '()\n', (5160, 5162), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((5330, 5378), 'sm2.tsa.autocov.ccf', 'autocov.ccf', (['x100[4:]', 'x100[:-4]'], {'unbiased': '(False)'}), '(x100[4:], x100[:-4], unbiased=False)\n', (5341, 5378), False, 'from sm2.tsa import autocov\n'), ((5469, 5519), 'sm2.tsa.autocov.ccf', 'autocov.ccf', (['x1000[4:]', 'x1000[:-4]'], {'unbiased': '(False)'}), '(x1000[4:], x1000[:-4], unbiased=False)\n', (5480, 5519), False, 'from sm2.tsa import autocov\n'), ((5771, 5793), 'sm2.tsa.tests.results.datamlw_tls.mlpacf.pacf100.ravel', 'mlpacf.pacf100.ravel', ([], {}), '()\n', (5791, 5793), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((5882, 5905), 'sm2.tsa.tests.results.datamlw_tls.mlpacf.pacf1000.ravel', 'mlpacf.pacf1000.ravel', ([], {}), '()\n', (5903, 5905), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((7003, 7025), 'sm2.datasets.sunspots.load_pandas', 'sunspots.load_pandas', ([], {}), '()\n', (7023, 7025), False, 'from sm2.datasets import sunspots, macrodata\n'), ((7047, 7099), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', ([], {'start': '"""1700"""', 'end': '"""2009"""', 'freq': '"""A"""'}), "(start='1700', end='2009', freq='A')\n", (7063, 7099), True, 'import pandas as pd\n'), ((7180, 7208), 'sm2.tsa.autocov.acovf', 'acovf', (['dta.values'], {'fft': '(False)'}), '(dta.values, fft=False)\n', (7185, 7208), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7254, 7279), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7267, 7279), False, 'import pytest\n'), ((7289, 7308), 'sm2.tsa.autocov.acovf', 'acovf', (['X'], {'fft': '(False)'}), '(X, fft=False)\n', (7294, 7308), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7857, 7876), 'sm2.tsa.autocov.acf', 'acf', (['data'], {'fft': '(True)'}), '(data, fft=True)\n', (7860, 7876), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9176, 9201), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9189, 9201), False, 'import pytest\n'), ((9211, 9249), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {'nlag': '(250)', 'fft': '(False)'}), '(acovf_data, nlag=250, fft=False)\n', (9216, 9249), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9309, 9336), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (9321, 9336), False, 'import pytest\n'), ((9346, 9363), 'sm2.tsa.autocov.acovf', 'acovf', (['acovf_data'], {}), '(acovf_data)\n', (9351, 9363), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9421, 9448), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (9433, 9448), False, 'import pytest\n'), ((9458, 9483), 'sm2.tsa.autocov.acf', 'acf', (['acovf_data'], {'nlags': '(40)'}), '(acovf_data, nlags=40)\n', (9461, 9483), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9713, 9734), 'sm2.tsa.autocov.acovf', 'acovf', (['ser'], {'fft': '(False)'}), '(ser, fft=False)\n', (9718, 9734), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((9756, 9784), 'sm2.tsa.autocov.acovf', 'acovf', (['ser.values'], {'fft': '(False)'}), '(ser.values, fft=False)\n', (9761, 9784), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10803, 10828), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10816, 10828), False, 'import pytest\n'), ((10838, 10874), 'sm2.tsa.autocov.levinson_durbin_pacf', 'levinson_durbin_pacf', (['pacf'], {'nlags': '(20)'}), '(pacf, nlags=20)\n', (10858, 10874), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10884, 10909), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10897, 10909), False, 'import pytest\n'), ((10919, 10949), 'sm2.tsa.autocov.levinson_durbin_pacf', 'levinson_durbin_pacf', (['pacf[:1]'], {}), '(pacf[:1])\n', (10939, 10949), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((10959, 10984), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10972, 10984), False, 'import pytest\n'), ((11038, 11063), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11051, 11063), False, 'import pytest\n'), ((11505, 11530), 'numpy.cumprod', 'np.cumprod', (['(1 - pacf ** 2)'], {}), '(1 - pacf ** 2)\n', (11515, 11530), True, 'import numpy as np\n'), ((11638, 11663), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11651, 11663), False, 'import pytest\n'), ((11715, 11740), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (11728, 11740), False, 'import pytest\n'), ((12261, 12271), 'sm2.tsa.autocov.burg', 'burg', (['y', 'i'], {}), '(y, i)\n', (12265, 12271), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((12280, 12328), 'numpy.testing.assert_allclose', 'assert_allclose', (['ar', 'expected[i - 1]'], {'atol': '(1e-06)'}), '(ar, expected[i - 1], atol=1e-06)\n', (12295, 12328), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((12353, 12374), 'sm2.tsa.autocov.burg', 'burg', (['(1 + y)', 'i', '(False)'], {}), '(1 + y, i, False)\n', (12357, 12374), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((12390, 12415), 'numpy.all', 'np.all', (['(ar != as_nodemean)'], {}), '(ar != as_nodemean)\n', (12396, 12415), True, 'import numpy as np\n'), ((12507, 12532), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12520, 12532), False, 'import pytest\n'), ((12575, 12600), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12588, 12600), False, 'import pytest\n'), ((12649, 12674), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12662, 12674), False, 'import pytest\n'), ((2954, 2966), 'numpy.empty', 'np.empty', (['(40)'], {}), '(40)\n', (2962, 2966), True, 'import numpy as np\n'), ((3027, 3039), 'numpy.empty', 'np.empty', (['(40)'], {}), '(40)\n', (3035, 3039), True, 'import numpy as np\n'), ((3210, 3241), 'pytest.raises', 'pytest.raises', (['MissingDataError'], {}), '(MissingDataError)\n', (3223, 3241), False, 'import pytest\n'), ((3255, 3327), 'sm2.tsa.autocov.acf', 'acf', (['self.x'], {'nlags': '(40)', 'qstat': '(True)', 'alpha': '(0.5)', 'missing': '"""raise"""', 'fft': '(False)'}), "(self.x, nlags=40, qstat=True, alpha=0.5, missing='raise', fft=False)\n", (3258, 3327), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((4918, 4954), 'sm2.tsa.autocov.acf', 'acf', (['x100'], {'unbiased': '(False)', 'fft': '(False)'}), '(x100, unbiased=False, fft=False)\n', (4921, 4954), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((5065, 5102), 'sm2.tsa.autocov.acf', 'acf', (['x1000'], {'unbiased': '(False)', 'fft': '(False)'}), '(x1000, unbiased=False, fft=False)\n', (5068, 5102), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7544, 7596), 'sm2.tsa.autocov.acovf', 'acovf', (['q'], {'demean': 'demean', 'unbiased': 'unbiased', 'fft': '(True)'}), '(q, demean=demean, unbiased=unbiased, fft=True)\n', (7549, 7596), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7614, 7667), 'sm2.tsa.autocov.acovf', 'acovf', (['q'], {'demean': 'demean', 'unbiased': 'unbiased', 'fft': '(False)'}), '(q, demean=demean, unbiased=unbiased, fft=False)\n', (7619, 7667), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((7680, 7718), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['F1', 'F2'], {'decimal': '(7)'}), '(F1, F2, decimal=7)\n', (7699, 7718), False, 'from numpy.testing import assert_array_almost_equal, assert_equal, assert_almost_equal, assert_allclose\n'), ((7799, 7821), 'sm2.datasets.sunspots.load_pandas', 'sunspots.load_pandas', ([], {}), '()\n', (7819, 7821), False, 'from sm2.datasets import sunspots, macrodata\n'), ((9990, 10005), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (9999, 10005), True, 'import numpy as np\n'), ((10134, 10148), 'numpy.arange', 'np.arange', (['(6.0)'], {}), '(6.0)\n', (10143, 10148), True, 'import numpy as np\n'), ((10267, 10282), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (10276, 10282), True, 'import numpy as np\n'), ((10763, 10778), 'numpy.arange', 'np.arange', (['(11.0)'], {}), '(11.0)\n', (10772, 10778), True, 'import numpy as np\n'), ((11015, 11027), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (11023, 11027), True, 'import numpy as np\n'), ((11094, 11111), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (11102, 11111), True, 'import numpy as np\n'), ((11683, 11700), 'numpy.empty', 'np.empty', (['(20, 2)'], {}), '((20, 2))\n', (11691, 11700), True, 'import numpy as np\n'), ((11760, 11773), 'numpy.empty', 'np.empty', (['(100)'], {}), '(100)\n', (11768, 11773), True, 'import numpy as np\n'), ((12547, 12564), 'numpy.ones', 'np.ones', (['(100, 2)'], {}), '((100, 2))\n', (12554, 12564), True, 'import numpy as np\n'), ((12615, 12635), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (12630, 12635), True, 'import numpy as np\n'), ((12689, 12709), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (12704, 12709), True, 'import numpy as np\n'), ((2500, 2518), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (2508, 2518), True, 'import numpy as np\n'), ((5414, 5434), 'sm2.tsa.tests.results.datamlw_tls.mlccf.ccf100.ravel', 'mlccf.ccf100.ravel', ([], {}), '()\n', (5432, 5434), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((5555, 5576), 'sm2.tsa.tests.results.datamlw_tls.mlccf.ccf1000.ravel', 'mlccf.ccf1000.ravel', ([], {}), '()\n', (5574, 5576), False, 'from sm2.tsa.tests.results.datamlw_tls import mlccf, mlpacf, mlywar, mlacf\n'), ((6776, 6811), 'sm2.tsa.autocov.yule_walker', 'yule_walker', (['x100', '(10)'], {'method': '"""mle"""'}), "(x100, 10, method='mle')\n", (6787, 6811), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n'), ((6903, 6939), 'sm2.tsa.autocov.yule_walker', 'yule_walker', (['x1000', '(20)'], {'method': '"""mle"""'}), "(x1000, 20, method='mle')\n", (6914, 6939), False, 'from sm2.tsa.autocov import acovf, acf, yule_walker, levinson_durbin_pacf, pacf_yw, pacf_burg, burg\n')] |
import numpy as np
import inspect
import os
import pytest
import MiraTitanHMFemulator
class TestClass:
z_arr = np.linspace(0, 2.02, 4)
m_arr = np.logspace(13, 16, 31)
def test_init(self):
self.HMFemu = MiraTitanHMFemulator.Emulator()
def test_translate_params(self):
HMFemu = MiraTitanHMFemulator.Emulator()
fiducial_cosmo_no_underscore = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'ns': .96,
'h': .7,
'w0': -1,
'wa': 0,
'sigma8': .8}
# Check that translate_params returns
assert HMFemu._Emulator__translate_params(fiducial_cosmo_no_underscore) is True
# Check that translate_params copied the parameters
for var_w, var_wo in zip(['w_0', 'w_a', 'n_s', 'sigma_8'], ['w0', 'wa', 'ns', 'sigma8']):
assert np.isclose(fiducial_cosmo_no_underscore[var_wo], fiducial_cosmo_no_underscore[var_w])
# Check that inconsistent definitions are caught
variables_with_underscores = {'n_s': 1,
'w_0': -1.1,
'w_a': .1,
'sigma_8': .88}
for name in variables_with_underscores.keys():
_cosmo = fiducial_cosmo_no_underscore.copy()
_cosmo[name] = variables_with_underscores[name]
assert HMFemu._Emulator__translate_params(_cosmo) is False
def test_validate_params(self):
HMFemu = MiraTitanHMFemulator.Emulator()
fiducial_cosmo = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'n_s': .96,
'h': .7,
'w_0': -1,
'w_a': 0,
'sigma_8': .8,
}
assert HMFemu.validate_params(fiducial_cosmo) is True
fiducial_cosmo_no_underscore = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'ns': .96,
'h': .7,
'w0': -1,
'wa': 0,
'sigma8': .8,
}
assert HMFemu.validate_params(fiducial_cosmo_no_underscore) is True
# Missing keys, parameter limits
fiducial_cosmo = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'n_s': .96,
'h': .7,
'w_0': -1,
'w_a': 0,
'sigma_8': .8,
}
for k in fiducial_cosmo.keys():
_cosmo = fiducial_cosmo.copy()
_cosmo.pop(k)
assert HMFemu.validate_params(_cosmo) is False
_cosmo = fiducial_cosmo.copy()
_cosmo[k]+= 2
assert HMFemu.validate_params(_cosmo) is False
_cosmo = fiducial_cosmo.copy()
_cosmo[k]-= 2
assert HMFemu.validate_params(_cosmo) is False
def test_missingkey(self):
HMFemu = MiraTitanHMFemulator.Emulator()
fiducial_cosmo = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'n_s': .96,
'h': .7,
'w_0': -1,
'w_a': 0,
'sigma_8': .8,
}
for k in fiducial_cosmo.keys():
_cosmo = fiducial_cosmo.copy()
_cosmo.pop(k)
with pytest.raises(KeyError):
HMFemu.predict(_cosmo, self.z_arr, self.m_arr)
with pytest.raises(TypeError):
HMFemu.predict()
def test_cosmo_limits(self):
HMFemu = MiraTitanHMFemulator.Emulator()
fiducial_cosmo = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'n_s': .96,
'h': .7,
'w_0': -1,
'w_a': 0,
'sigma_8': .8,
}
for k in fiducial_cosmo.keys():
_cosmo = fiducial_cosmo.copy()
_cosmo[k]+= 2
with pytest.raises(ValueError):
HMFemu.predict(_cosmo, self.z_arr, self.m_arr)
for k in fiducial_cosmo.keys():
_cosmo = fiducial_cosmo.copy()
_cosmo[k]-= 2
with pytest.raises(ValueError):
HMFemu.predict(_cosmo, self.z_arr, self.m_arr)
def test_fiducial(self):
np.random.seed(1328)
data_path = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
HMFemu = MiraTitanHMFemulator.Emulator()
fiducial_cosmo = {'Ommh2': .3*.7**2,
'Ombh2': .022,
'Omnuh2': .006,
'n_s': .96,
'h': .7,
'w_0': -1,
'w_a': 0,
'sigma_8': .8,
}
res = HMFemu.predict(fiducial_cosmo, self.z_arr, self.m_arr)
# Mass function
_fname = os.path.join(data_path, 'fid.npy')
# np.save(_fname, res[0])
ref = np.load(_fname)
assert np.all(np.isclose(res[0], ref))
# Error on mass function
_fname = os.path.join(data_path, 'fid_err.npy')
# np.save(_fname, res[1])
ref = np.load(_fname)
assert np.all(np.isclose(res[1], ref))
def test_center(self):
np.random.seed(1328)
data_path = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
HMFemu = MiraTitanHMFemulator.Emulator()
mid_cosmo = {}
for k in ['Ommh2', 'Ombh2', 'Omnuh2', 'n_s', 'h', 'sigma_8', 'w_0']:
mid_cosmo[k] = .5 * np.sum(HMFemu.param_limits[k])
mid_cosmo['w_a'] = .5 * (-1.73 + 1.28)
res = HMFemu.predict(mid_cosmo, self.z_arr, self.m_arr)
# Mass function
_fname = os.path.join(data_path, 'mid.npy')
# np.save(_fname, res[0])
ref = np.load(_fname)
assert np.all(np.isclose(res[0], ref))
# Error on mass function
_fname = os.path.join(data_path, 'mid_err.npy')
# np.save(_fname, res[1])
ref = np.load(_fname)
assert np.all(np.isclose(res[1], ref))
| [
"numpy.load",
"inspect.stack",
"numpy.random.seed",
"numpy.sum",
"numpy.logspace",
"numpy.isclose",
"pytest.raises",
"numpy.linspace",
"MiraTitanHMFemulator.Emulator",
"os.path.join"
] | [((117, 140), 'numpy.linspace', 'np.linspace', (['(0)', '(2.02)', '(4)'], {}), '(0, 2.02, 4)\n', (128, 140), True, 'import numpy as np\n'), ((153, 176), 'numpy.logspace', 'np.logspace', (['(13)', '(16)', '(31)'], {}), '(13, 16, 31)\n', (164, 176), True, 'import numpy as np\n'), ((226, 257), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (255, 257), False, 'import MiraTitanHMFemulator\n'), ((314, 345), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (343, 345), False, 'import MiraTitanHMFemulator\n'), ((1726, 1757), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (1755, 1757), False, 'import MiraTitanHMFemulator\n'), ((3566, 3597), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (3595, 3597), False, 'import MiraTitanHMFemulator\n'), ((4277, 4308), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (4306, 4308), False, 'import MiraTitanHMFemulator\n'), ((5126, 5146), 'numpy.random.seed', 'np.random.seed', (['(1328)'], {}), '(1328)\n', (5140, 5146), True, 'import numpy as np\n'), ((5241, 5272), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (5270, 5272), False, 'import MiraTitanHMFemulator\n'), ((5729, 5763), 'os.path.join', 'os.path.join', (['data_path', '"""fid.npy"""'], {}), "(data_path, 'fid.npy')\n", (5741, 5763), False, 'import os\n'), ((5812, 5827), 'numpy.load', 'np.load', (['_fname'], {}), '(_fname)\n', (5819, 5827), True, 'import numpy as np\n'), ((5926, 5964), 'os.path.join', 'os.path.join', (['data_path', '"""fid_err.npy"""'], {}), "(data_path, 'fid_err.npy')\n", (5938, 5964), False, 'import os\n'), ((6013, 6028), 'numpy.load', 'np.load', (['_fname'], {}), '(_fname)\n', (6020, 6028), True, 'import numpy as np\n'), ((6113, 6133), 'numpy.random.seed', 'np.random.seed', (['(1328)'], {}), '(1328)\n', (6127, 6133), True, 'import numpy as np\n'), ((6228, 6259), 'MiraTitanHMFemulator.Emulator', 'MiraTitanHMFemulator.Emulator', ([], {}), '()\n', (6257, 6259), False, 'import MiraTitanHMFemulator\n'), ((6578, 6612), 'os.path.join', 'os.path.join', (['data_path', '"""mid.npy"""'], {}), "(data_path, 'mid.npy')\n", (6590, 6612), False, 'import os\n'), ((6661, 6676), 'numpy.load', 'np.load', (['_fname'], {}), '(_fname)\n', (6668, 6676), True, 'import numpy as np\n'), ((6775, 6813), 'os.path.join', 'os.path.join', (['data_path', '"""mid_err.npy"""'], {}), "(data_path, 'mid_err.npy')\n", (6787, 6813), False, 'import os\n'), ((6862, 6877), 'numpy.load', 'np.load', (['_fname'], {}), '(_fname)\n', (6869, 6877), True, 'import numpy as np\n'), ((1082, 1171), 'numpy.isclose', 'np.isclose', (['fiducial_cosmo_no_underscore[var_wo]', 'fiducial_cosmo_no_underscore[var_w]'], {}), '(fiducial_cosmo_no_underscore[var_wo],\n fiducial_cosmo_no_underscore[var_w])\n', (1092, 1171), True, 'import numpy as np\n'), ((4170, 4194), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4183, 4194), False, 'import pytest\n'), ((5850, 5873), 'numpy.isclose', 'np.isclose', (['res[0]', 'ref'], {}), '(res[0], ref)\n', (5860, 5873), True, 'import numpy as np\n'), ((6051, 6074), 'numpy.isclose', 'np.isclose', (['res[1]', 'ref'], {}), '(res[1], ref)\n', (6061, 6074), True, 'import numpy as np\n'), ((6699, 6722), 'numpy.isclose', 'np.isclose', (['res[0]', 'ref'], {}), '(res[0], ref)\n', (6709, 6722), True, 'import numpy as np\n'), ((6900, 6923), 'numpy.isclose', 'np.isclose', (['res[1]', 'ref'], {}), '(res[1], ref)\n', (6910, 6923), True, 'import numpy as np\n'), ((4068, 4091), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (4081, 4091), False, 'import pytest\n'), ((4780, 4805), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4793, 4805), False, 'import pytest\n'), ((4997, 5022), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5010, 5022), False, 'import pytest\n'), ((6393, 6423), 'numpy.sum', 'np.sum', (['HMFemu.param_limits[k]'], {}), '(HMFemu.param_limits[k])\n', (6399, 6423), True, 'import numpy as np\n'), ((5199, 5214), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (5212, 5214), False, 'import inspect\n'), ((6186, 6201), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (6199, 6201), False, 'import inspect\n')] |
'''
This file does the heavy-lifting of the OCR.
I refered to multiple tutorials / StackOverflow answers online to learn this.
kNN Regression in OpenCV: https://docs.opencv.org/3.0-beta/modules/ml/doc/k_nearest_neighbors.html
OCR with kNN Regressions: https://stackoverflow.com/a/9620295/4982987
NOTE: The "OCR with kNN Regressions" tutorial was in OpenCV 2 and I had to
reimplement it myself for Python3.
'''
####################################
# Modules
####################################
import sys
for p in sys.path:
if p.startswith('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras'): sys.path.remove(p)
import cv2
import numpy as np
from isolation import *
####################################
# Training
####################################
# Load data
features = np.loadtxt('ocr_features.data', np.float32)
labels = np.loadtxt('ocr_labels.data', np.float32)
# Train kNN model
model = cv2.ml.KNearest_create()
model.train(features, cv2.ml.ROW_SAMPLE, labels)
####################################
# Global Variables
####################################
MINIMUMCONTOURAREA = 1000
MAXIMUMCONTOURAREA = 7500
MINIMUMCHARACTERHEIGHT = 28
MINIMUMCHARACTERWIDTH = 15
####################################
# Regression
####################################
def ocr(image):
# Preprocessing
im_blur = cv2.GaussianBlur(image, (31, 31), 0)
im_trsh = cv2.adaptiveThreshold(im_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
# Detect contours in im_trsholded image
_, contours, _ = cv2.findContours(im_trsh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Initialize result dictionary
characters = {}
# Iterate through all detected contours
for contour in contours:
# Only for large enough contours
if MINIMUMCONTOURAREA < cv2.contourArea(contour) < MAXIMUMCONTOURAREA:
# Find bounding rectangle for the character
x, y, width, height = cv2.boundingRect(contour)
# Only for large enough characters
if height > MINIMUMCHARACTERHEIGHT:
# Zoom into region of interest
roi = perspectiveTransform(im_trsh, numpy.array([[x, y], [x+width, y], [x+width, y+height], [x, y+height]]))
# Resize and reshape region of interest to 10x10 grid
roi = cv2.resize(roi, (10,10))
roi = roi.reshape((1, 100))
roi = np.float32(roi)
# Implement kNN model
_, result, _, _ = model.findNearest(roi, k=1)
# Get character from ASCII
character = chr(int((result[0][0])))
# Add character to result
characters[x] = character
# Omit overlapping contours
previousKey = -100
largeCharacters = {}
for key in sorted(characters.keys()):
if key - previousKey < MINIMUMCHARACTERWIDTH: continue
previousKey = key
largeCharacters[key] = characters[key]
word = ""
for key in sorted(largeCharacters.keys()):
word += str(largeCharacters[key])
return word
| [
"cv2.resize",
"cv2.GaussianBlur",
"cv2.contourArea",
"sys.path.remove",
"cv2.ml.KNearest_create",
"numpy.float32",
"cv2.adaptiveThreshold",
"numpy.loadtxt",
"cv2.boundingRect",
"cv2.findContours"
] | [((802, 845), 'numpy.loadtxt', 'np.loadtxt', (['"""ocr_features.data"""', 'np.float32'], {}), "('ocr_features.data', np.float32)\n", (812, 845), True, 'import numpy as np\n'), ((855, 896), 'numpy.loadtxt', 'np.loadtxt', (['"""ocr_labels.data"""', 'np.float32'], {}), "('ocr_labels.data', np.float32)\n", (865, 896), True, 'import numpy as np\n'), ((924, 948), 'cv2.ml.KNearest_create', 'cv2.ml.KNearest_create', ([], {}), '()\n', (946, 948), False, 'import cv2\n'), ((1348, 1384), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(31, 31)', '(0)'], {}), '(image, (31, 31), 0)\n', (1364, 1384), False, 'import cv2\n'), ((1399, 1493), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['im_blur', '(255)', 'cv2.ADAPTIVE_THRESH_MEAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(im_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (1420, 1493), False, 'import cv2\n'), ((1555, 1620), 'cv2.findContours', 'cv2.findContours', (['im_trsh', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_trsh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (1571, 1620), False, 'import cv2\n'), ((617, 635), 'sys.path.remove', 'sys.path.remove', (['p'], {}), '(p)\n', (632, 635), False, 'import sys\n'), ((1824, 1848), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1839, 1848), False, 'import cv2\n'), ((1961, 1986), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1977, 1986), False, 'import cv2\n'), ((2346, 2371), 'cv2.resize', 'cv2.resize', (['roi', '(10, 10)'], {}), '(roi, (10, 10))\n', (2356, 2371), False, 'import cv2\n'), ((2437, 2452), 'numpy.float32', 'np.float32', (['roi'], {}), '(roi)\n', (2447, 2452), True, 'import numpy as np\n')] |
"""This module defines a dataclass which holds metadata about a WSI.
With this class, metadata is in a normalized consistent format
which is quite useful when working with many different WSI formats.
The raw metadata is also preserved and accessible via a dictionary. The
format of this dictionary may vary between WSI formats.
"""
import warnings
from numbers import Number
from pathlib import Path
from typing import List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
Resolution = Union[Number, Tuple[Number, Number], np.ndarray]
class WSIMeta:
"""Whole slide image metadata class.
Args:
slide_dimensions (int, int):
Tuple containing the width and height of the WSI. These
are for the baseline (full resolution) image if the WSI
is a pyramid or multi-resolution.
level_dimensions (list):
A list of dimensions for each level of the pyramid or
for each resolution in the WSI.
objective_power (float, optional):
The power of the objective lens used to create the
image.
level_count: (int, optional):
The number of levels or resolutions in the WSI. If not
given this is assigned len(level_dimensions). Defaults
to None.
level_downsamples (:obj:`list` of :obj:`float`):
List of scale values which describe how many times
smaller the current level is compared with the baseline.
vendor (str, optional):
Scanner vendor/manufacturer description.
mpp (float, float, optional):
Microns per pixel.
file_path (Path, optional):
Path to the corresponding WSI file.
raw (dict, optional):
Dictionary of unprocessed metadata extracted from the
WSI format. For JPEG-2000 images this contains an xml
object under the key "xml".
Attributes:
slide_dimensions (tuple(int)):
Tuple containing the width and height of the WSI. These are
for the baseline (full resolution) image if the WSI is a
pyramid or multi-resolution. Required.
axes (str):
Axes ordering of the image. This is most relevant for
OME-TIFF images where the axes ordering can vary. For most
images this with be "YXS" i.e. the image is store in the
axis order of Y coordinates first, then X coordinates, and
colour channels last.
level_dimensions (list):
A list of dimensions for each level of the pyramid or for
each resolution in the WSI. Defaults to [slide_dimension].
objective_power (float):
The magnification power of the objective lens used to scan
the image. Not always present or accurate. Defaults to None.
level_count: (int):
The number of levels or resolutions in the WSI. If not given
this is assigned len(level_dimensions). Defaults to
len(level_dimensions).
level_downsamples (:obj:`list` of :obj:`float`):
List of scale values which describe how many times smaller
the current level is compared with the baseline. Defaults to
(1,).
vendor (str):
Scanner vendor/manufacturer description.
mpp (float, float, optional):
Microns per pixel. Derived from objective power and sensor
size. Not always present or accurate. Defaults to None.
file_path (Path):
Path to the corresponding WSI file. Defaults to None.
raw (dict):
Dictionary of unprocessed metadata extracted from the WSI
format. For JP2 images this contains an xml object under the
key "xml". Defaults to empty dictionary.
"""
_valid_axes_characters = "YXSTZ"
def __init__(
self,
slide_dimensions: Tuple[int, int],
axes: str,
level_dimensions: Optional[Sequence[Tuple[int, int]]] = None,
objective_power: Optional[float] = None,
level_count: Optional[int] = None,
level_downsamples: Optional[Sequence[float]] = (1,),
vendor: Optional[str] = None,
mpp: Optional[Sequence[float]] = None,
file_path: Optional[Path] = None,
raw: Optional[Mapping[str, str]] = None,
):
self.axes = axes
self.objective_power = float(objective_power) if objective_power else None
self.slide_dimensions = tuple([int(x) for x in slide_dimensions])
self.level_dimensions = (
tuple([(int(w), int(h)) for w, h in level_dimensions])
if level_dimensions is not None
else [self.slide_dimensions]
)
self.level_downsamples = (
[float(x) for x in level_downsamples]
if level_downsamples is not None
else None
)
self.level_count = (
int(level_count) if level_count is not None else len(self.level_dimensions)
)
self.vendor = str(vendor)
self.mpp = np.array([float(x) for x in mpp]) if mpp is not None else None
self.file_path = Path(file_path) if file_path is not None else None
self.raw = raw if raw is not None else None
self.validate()
def validate(self):
"""Validate passed values and cast to Python types.
Metadata values are often given as strings and must be
parsed/cast to the appropriate python type e.g. "3.14" to 3.14
etc.
Returns:
bool:
True is validation passed, False otherwise.
"""
passed = True
# Fatal conditions: Should return False if not True
if len(set(self.axes) - set(self._valid_axes_characters)) > 0:
warnings.warn(
"Axes contains invalid characters. "
f"Valid characters are '{self._valid_axes_characters}'."
)
passed = False
if self.level_count < 1:
warnings.warn("Level count is not a positive integer")
passed = False
if self.level_dimensions is None:
warnings.warn("level_dimensions is None")
passed = False
elif len(self.level_dimensions) != self.level_count:
warnings.warn("Length of level dimensions != level count")
passed = False
if self.level_downsamples is None:
warnings.warn("Level downsamples is None")
passed = False
elif len(self.level_downsamples) != self.level_count:
warnings.warn("Length of level downsamples != level count")
passed = False
# Non-fatal conditions: Raise warning only, do not fail validation
if self.raw is None:
warnings.warn("Raw data is None")
if all(x is None for x in [self.objective_power, self.mpp]):
warnings.warn("Unknown scale (no objective_power or mpp)")
return passed # noqa
def level_downsample(
self,
level: Union[int, float],
) -> float:
"""Get the downsample factor for a level.
For non-integer values of `level`, the downsample factor is
linearly interpolated between from the downsample factors of the
level below and the level above.
Args:
level (int or float):
Level to get downsample factor for.
Returns:
float:
Downsample factor for the given level.
"""
level_downsamples = self.level_downsamples
if isinstance(level, int) or int(level) == level:
# Return the downsample for the level
return level_downsamples[int(level)]
# Linearly interpolate between levels
floor = int(np.floor(level))
ceil = int(np.ceil(level))
floor_downsample = level_downsamples[floor]
ceil_downsample = level_downsamples[ceil]
return np.interp(level, [floor, ceil], [floor_downsample, ceil_downsample])
def relative_level_scales(
self, resolution: Resolution, units: str
) -> List[np.ndarray]:
"""Calculate scale of each level in the WSI relative to given resolution.
Find the relative scale of each image pyramid / resolution level
of the WSI relative to the given resolution and units.
Values > 1 indicate that the level has a larger scale than the
target and < 1 indicates that it is smaller.
Args:
resolution (float or tuple(float)):
Scale to calculate relative to units.
units (str):
Units of the scale. Allowed values are: `"mpp"`,
`"power"`, `"level"`, `"baseline"`. Baseline refers to
the largest resolution in the WSI (level 0).
Raises:
ValueError:
Missing MPP metadata.
ValueError:
Missing objective power metadata.
ValueError:
Invalid units.
Returns:
list:
Scale for each level relative to the given scale and
units.
Examples:
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> print(wsi.info.relative_level_scales(0.5, "mpp"))
[array([0.91282519, 0.91012514]), array([1.82565039, 1.82025028]) ...
>>> from tiatoolbox.wsicore.wsireader import WSIReader
>>> wsi = WSIReader.open(input_img="./CMU-1.ndpi")
>>> print(wsi.info.relative_level_scales(0.5, "baseline"))
[0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0]
"""
if units not in ("mpp", "power", "level", "baseline"):
raise ValueError("Invalid units")
level_downsamples = self.level_downsamples
def np_pair(x: Union[Number, np.array]) -> np.ndarray:
"""Ensure input x is a numpy array of length 2."""
# If one number is given, the same value is used for x and y
if isinstance(x, Number):
return np.array([x] * 2)
return np.array(x)
if units == "level":
if resolution >= len(level_downsamples):
raise ValueError(
f"Target scale level {resolution} "
f"> number of levels {len(level_downsamples)} in WSI"
)
base_scale, resolution = 1, self.level_downsample(resolution)
resolution = np_pair(resolution)
if units == "mpp":
if self.mpp is None:
raise ValueError("MPP is None. Cannot determine scale in terms of MPP.")
base_scale = self.mpp
if units == "power":
if self.objective_power is None:
raise ValueError(
"Objective power is None. "
"Cannot determine scale in terms of objective power."
)
base_scale, resolution = 1 / self.objective_power, 1 / resolution
if units == "baseline":
base_scale, resolution = 1, 1 / resolution
return [
(base_scale * downsample) / resolution for downsample in level_downsamples
]
def as_dict(self):
"""Convert WSIMeta to dictionary of Python types.
Returns:
dict:
Whole slide image meta data as dictionary.
"""
if self.mpp is None:
mpp = (self.mpp, self.mpp)
else:
mpp = tuple(self.mpp)
return {
"objective_power": self.objective_power,
"slide_dimensions": self.slide_dimensions,
"level_count": self.level_count,
"level_dimensions": self.level_dimensions,
"level_downsamples": self.level_downsamples,
"vendor": self.vendor,
"mpp": mpp,
"file_path": self.file_path,
}
| [
"numpy.ceil",
"numpy.floor",
"pathlib.Path",
"numpy.array",
"numpy.interp",
"warnings.warn"
] | [((8117, 8185), 'numpy.interp', 'np.interp', (['level', '[floor, ceil]', '[floor_downsample, ceil_downsample]'], {}), '(level, [floor, ceil], [floor_downsample, ceil_downsample])\n', (8126, 8185), True, 'import numpy as np\n'), ((5303, 5318), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (5307, 5318), False, 'from pathlib import Path\n'), ((5940, 6055), 'warnings.warn', 'warnings.warn', (['f"""Axes contains invalid characters. Valid characters are \'{self._valid_axes_characters}\'."""'], {}), '(\n f"Axes contains invalid characters. Valid characters are \'{self._valid_axes_characters}\'."\n )\n', (5953, 6055), False, 'import warnings\n'), ((6168, 6222), 'warnings.warn', 'warnings.warn', (['"""Level count is not a positive integer"""'], {}), "('Level count is not a positive integer')\n", (6181, 6222), False, 'import warnings\n'), ((6305, 6346), 'warnings.warn', 'warnings.warn', (['"""level_dimensions is None"""'], {}), "('level_dimensions is None')\n", (6318, 6346), False, 'import warnings\n'), ((6589, 6631), 'warnings.warn', 'warnings.warn', (['"""Level downsamples is None"""'], {}), "('Level downsamples is None')\n", (6602, 6631), False, 'import warnings\n'), ((6938, 6971), 'warnings.warn', 'warnings.warn', (['"""Raw data is None"""'], {}), "('Raw data is None')\n", (6951, 6971), False, 'import warnings\n'), ((7054, 7112), 'warnings.warn', 'warnings.warn', (['"""Unknown scale (no objective_power or mpp)"""'], {}), "('Unknown scale (no objective_power or mpp)')\n", (7067, 7112), False, 'import warnings\n'), ((7948, 7963), 'numpy.floor', 'np.floor', (['level'], {}), '(level)\n', (7956, 7963), True, 'import numpy as np\n'), ((7984, 7998), 'numpy.ceil', 'np.ceil', (['level'], {}), '(level)\n', (7991, 7998), True, 'import numpy as np\n'), ((10347, 10358), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (10355, 10358), True, 'import numpy as np\n'), ((6447, 6505), 'warnings.warn', 'warnings.warn', (['"""Length of level dimensions != level count"""'], {}), "('Length of level dimensions != level count')\n", (6460, 6505), False, 'import warnings\n'), ((6733, 6792), 'warnings.warn', 'warnings.warn', (['"""Length of level downsamples != level count"""'], {}), "('Length of level downsamples != level count')\n", (6746, 6792), False, 'import warnings\n'), ((10310, 10327), 'numpy.array', 'np.array', (['([x] * 2)'], {}), '([x] * 2)\n', (10318, 10327), True, 'import numpy as np\n')] |
import os
from PIL import Image
import sys
import numpy as np
import cv2
import fnmatch
from skimage.io import imread
import numpy.matlib as matlib
import json
import zlib
import base64
mainDir = '/home/samik/NISSL/Aarti/ds/'
annDir = mainDir + 'ann/'
imgdir = mainDir + 'img/'
outDir = mainDir + 'maskout/'
def imread_fast(img_path):
img_path_C= img_path.replace("&", "\&")
base_C = os.path.basename(img_path_C)
base_C = base_C[0:-4]
base = os.path.basename(img_path)
base = base[0:-4]
err_code = os.system("kdu_expand -i "+img_path_C+" -o temp/"+base_C+".tif -num_threads 16")
img = imread('temp/'+base+'.tif')
os.system("rm temp/"+base_C+'.tif')
return img
mainFolders = os.listdir(imgdir)
# outFolders = os.listdir(outDir)
for files in mainFolders:
img = cv2.imread(imgdir + files)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
masknew = np.zeros((img.shape[0], img.shape[1]), dtype= np.bool)
print(masknew.shape)
ann = annDir + files + '.json'
print(ann)
with open(ann) as json_file:
data = json.load(json_file)
count = 0
os.system("mkdir " + outDir + files[:-4] + '_mask')
for p in data['objects']:
z = zlib.decompress(base64.b64decode(p['bitmap']['data']))
r = p['bitmap']['origin']
# r[0] = img.shape[1] - r[1]
# r[1] = img.shape[0] - r[0]
n = np.fromstring(z,np.uint8)
mask = cv2.imdecode(n,cv2.IMREAD_UNCHANGED)[:,:,3].astype(np.bool)
maskOut = np.zeros((img.shape[0], img.shape[1]), dtype= np.bool)
maskOut[r[1]: r[1] + mask.shape[0], r[0]:r[0] + mask.shape[1]] = mask
print(outDir + files[:-4] + '_mask/' + str(count) + '.tif')
cv2.imwrite(outDir + files[:-4] + '_mask/' + str(r[0])+ '_' + str(r[1]) + '.tif', np.uint8(maskOut)*255)
count = count + 1
# input_folder = '/home/samik/ProcessDet/wdata/train/masks2m/'
# # input_folder = '/home/samik/ProcessDet/wdata/Process/annotV/'
# input_folder2 = '/home/samik/ProcessDet/wdata/test/images/'
# files = os.listdir(input_folder)
# files2 = os.listdir(input_folder2)
# for f1 in files:
# # if f1.replace('png', 'tif') not in files:
# os.system("cp " + os.path.join('/home/samik/ProcessDet/wdata/TrainingData2/rawD/', f1) + " /home/samik/ProcessDet/wdata/train/images/")
# # os.system("cp " + os.path.join('/home/samik/ProcessDet/wdata/TrainingData/dmV/', f1) + " /home/samik/ProcessDet/wdata/test/dmIP/")
# # img = np.zeros((512,512,3), dtype = np.uint8)
# # imgRaw = cv2.imread(os.path.join('/nfs/data/main/M32/PMD1605_Annotations/dataMBA/cropped_annotation/Process/rawD', f1), cv2.IMREAD_UNCHANGED)
# # # print(f1)
# # if fnmatch.fnmatch(f1, '*R.tif'):
# # img[:,:,0] = imgRaw
# # print(f1)
# # if fnmatch.fnmatch(f1, '*G.tif'):
# # img[:,:,1] = imgRaw
# # print(f1)
# # rgbimg = Image.fromarray(img)
# # # rgbimg.paste(img)
# # rgbimg.save("/home/samik/ProcessDet/wdata/train/images/" + f1)
| [
"json.load",
"numpy.uint8",
"os.path.basename",
"cv2.cvtColor",
"cv2.imdecode",
"numpy.zeros",
"os.system",
"base64.b64decode",
"cv2.imread",
"numpy.fromstring",
"os.listdir",
"skimage.io.imread"
] | [((713, 731), 'os.listdir', 'os.listdir', (['imgdir'], {}), '(imgdir)\n', (723, 731), False, 'import os\n'), ((394, 422), 'os.path.basename', 'os.path.basename', (['img_path_C'], {}), '(img_path_C)\n', (410, 422), False, 'import os\n'), ((460, 486), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (476, 486), False, 'import os\n'), ((524, 616), 'os.system', 'os.system', (["('kdu_expand -i ' + img_path_C + ' -o temp/' + base_C + '.tif -num_threads 16')"], {}), "('kdu_expand -i ' + img_path_C + ' -o temp/' + base_C +\n '.tif -num_threads 16')\n", (533, 616), False, 'import os\n'), ((615, 646), 'skimage.io.imread', 'imread', (["('temp/' + base + '.tif')"], {}), "('temp/' + base + '.tif')\n", (621, 646), False, 'from skimage.io import imread\n'), ((647, 686), 'os.system', 'os.system', (["('rm temp/' + base_C + '.tif')"], {}), "('rm temp/' + base_C + '.tif')\n", (656, 686), False, 'import os\n'), ((803, 829), 'cv2.imread', 'cv2.imread', (['(imgdir + files)'], {}), '(imgdir + files)\n', (813, 829), False, 'import cv2\n'), ((840, 876), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (852, 876), False, 'import cv2\n'), ((891, 944), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {'dtype': 'np.bool'}), '((img.shape[0], img.shape[1]), dtype=np.bool)\n', (899, 944), True, 'import numpy as np\n'), ((1070, 1090), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1079, 1090), False, 'import json\n'), ((1117, 1168), 'os.system', 'os.system', (["('mkdir ' + outDir + files[:-4] + '_mask')"], {}), "('mkdir ' + outDir + files[:-4] + '_mask')\n", (1126, 1168), False, 'import os\n'), ((1410, 1436), 'numpy.fromstring', 'np.fromstring', (['z', 'np.uint8'], {}), '(z, np.uint8)\n', (1423, 1436), True, 'import numpy as np\n'), ((1537, 1590), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1])'], {'dtype': 'np.bool'}), '((img.shape[0], img.shape[1]), dtype=np.bool)\n', (1545, 1590), True, 'import numpy as np\n'), ((1235, 1272), 'base64.b64decode', 'base64.b64decode', (["p['bitmap']['data']"], {}), "(p['bitmap']['data'])\n", (1251, 1272), False, 'import base64\n'), ((1840, 1857), 'numpy.uint8', 'np.uint8', (['maskOut'], {}), '(maskOut)\n', (1848, 1857), True, 'import numpy as np\n'), ((1455, 1492), 'cv2.imdecode', 'cv2.imdecode', (['n', 'cv2.IMREAD_UNCHANGED'], {}), '(n, cv2.IMREAD_UNCHANGED)\n', (1467, 1492), False, 'import cv2\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval for sdk"""
import argparse
import os
import math
import cv2
import numpy as np
def parser_args():
parser = argparse.ArgumentParser()
parser.add_argument("--label_dir", type=str, default="../data/DIV2K/label/",
help="path of label images directory")
parser.add_argument("--infer_dir", type=str, default="output", help="path of infer images directory")
parser.add_argument("--scale", type=int, default=2)
return parser.parse_args()
def calc_psnr(sr, hr, scale, rgb_range):
"""calculate psnr"""
hr = np.float32(hr)
sr = np.float32(sr)
diff = (sr - hr) / rgb_range
gray_coeffs = np.array([65.738, 129.057, 25.064]).reshape((1, 3, 1, 1)) / 256
diff = np.multiply(diff, gray_coeffs).sum(1)
if hr.size == 1:
return 0
if scale != 1:
shave = scale
else:
border_add = 6
shave = scale + border_add
if scale == 1:
valid = diff
else:
valid = diff[..., shave:-shave, shave:-shave]
mse = np.mean(pow(valid, 2))
return -10 * math.log10(mse)
if __name__ == '__main__':
args = parser_args()
infer_path_list = os.listdir(args.infer_dir)
infer_path_list.sort()
total_num = len(infer_path_list)
mean_psnr = 0.0
for infer_p in infer_path_list:
infer_path = os.path.join(args.infer_dir, infer_p)
label_path = os.path.join(args.label_dir, infer_p.replace('_infer', ''))
print(infer_p)
infer_img = cv2.imread(infer_path)
label_img = cv2.imread(label_path)
infer_img = np.expand_dims(infer_img, 0).transpose((0, 3, 1, 2))
label_img = np.expand_dims(label_img, 0).transpose((0, 3, 1, 2))
psnr = calc_psnr(infer_img, label_img, args.scale, 255.0)
mean_psnr += psnr/total_num
print("current psnr: ", psnr)
print('Mean psnr of %s images is %.4f' % (total_num, mean_psnr))
| [
"numpy.multiply",
"argparse.ArgumentParser",
"numpy.float32",
"numpy.expand_dims",
"cv2.imread",
"math.log10",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((788, 813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (811, 813), False, 'import argparse\n'), ((1228, 1242), 'numpy.float32', 'np.float32', (['hr'], {}), '(hr)\n', (1238, 1242), True, 'import numpy as np\n'), ((1252, 1266), 'numpy.float32', 'np.float32', (['sr'], {}), '(sr)\n', (1262, 1266), True, 'import numpy as np\n'), ((1824, 1850), 'os.listdir', 'os.listdir', (['args.infer_dir'], {}), '(args.infer_dir)\n', (1834, 1850), False, 'import os\n'), ((1732, 1747), 'math.log10', 'math.log10', (['mse'], {}), '(mse)\n', (1742, 1747), False, 'import math\n'), ((1992, 2029), 'os.path.join', 'os.path.join', (['args.infer_dir', 'infer_p'], {}), '(args.infer_dir, infer_p)\n', (2004, 2029), False, 'import os\n'), ((2154, 2176), 'cv2.imread', 'cv2.imread', (['infer_path'], {}), '(infer_path)\n', (2164, 2176), False, 'import cv2\n'), ((2197, 2219), 'cv2.imread', 'cv2.imread', (['label_path'], {}), '(label_path)\n', (2207, 2219), False, 'import cv2\n'), ((1393, 1423), 'numpy.multiply', 'np.multiply', (['diff', 'gray_coeffs'], {}), '(diff, gray_coeffs)\n', (1404, 1423), True, 'import numpy as np\n'), ((1318, 1353), 'numpy.array', 'np.array', (['[65.738, 129.057, 25.064]'], {}), '([65.738, 129.057, 25.064])\n', (1326, 1353), True, 'import numpy as np\n'), ((2240, 2268), 'numpy.expand_dims', 'np.expand_dims', (['infer_img', '(0)'], {}), '(infer_img, 0)\n', (2254, 2268), True, 'import numpy as np\n'), ((2313, 2341), 'numpy.expand_dims', 'np.expand_dims', (['label_img', '(0)'], {}), '(label_img, 0)\n', (2327, 2341), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.saved_model import signature_constants
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
cls_summ = tf.summary.histogram("class_dist", tf.argmax(input=logits, axis=1))
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(outputs=predictions)
}
if mode == tf.estimator.ModeKeys.PREDICT:
print("build Predict graph")
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions,
export_outputs=export_outputs)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
global_step = tf.train.get_or_create_global_step()
is_replace = tf.equal(global_step % 3, 0)
if mode == tf.estimator.ModeKeys.TRAIN:
print("build train graph")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
summary_hook = tf.train.SummarySaverHook(save_steps=1, summary_op=[cls_summ], output_dir="model_dir/eval")
print("build eval graph")
return tf.estimator.EstimatorSpec(
mode=mode,
loss=tf.cond(tf.random_uniform(shape=[], maxval=1) > 0.7,
lambda: tf.constant(100, dtype=tf.float32),
lambda: tf.constant(200, dtype=tf.float32)),
eval_metric_ops=eval_metric_ops, export_outputs=None, evaluation_hooks=[summary_hook])
def input_fn():
((train_data, train_labels), (eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data()
train_data = train_data / np.float32(255)
train_labels = train_labels.astype(np.int32) # not required
eval_data = eval_data / np.float32(255)
eval_labels = eval_labels.astype(np.int32) # not required
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=64,
num_epochs=500,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
return train_input_fn, eval_input_fn
def serving_input_fn():
example_proto = tf.placeholder(dtype=tf.string, shape=[None])
receiver_tensor = {"data": example_proto}
feature = tf.parse_example(example_proto, features={"x": tf.FixedLenFeature([], dtype=tf.string)})
img = tf.io.decode_raw(feature['x'], out_type=tf.float32)
feature['x'] = img
return tf.estimator.export.ServingInputReceiver(features=feature, receiver_tensors=receiver_tensor)
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
estimator_config = tf.estimator.RunConfig(model_dir="model_dir",
save_checkpoints_steps=100)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, config=estimator_config)
train_input_fn, eval_input_fn = input_fn()
exporter = tf.estimator.BestExporter(serving_input_receiver_fn=serving_input_fn)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=None,
start_delay_secs=20,
exporters=exporter, throttle_secs=5)
# tf.estimator.train_and_evaluate(estimator=mnist_classifier, train_spec=train_spec, eval_spec=eval_spec)
| [
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.reshape",
"tensorflow.estimator.Estimator",
"tensorflow.estimator.TrainSpec",
"tensorflow.layers.max_pooling2d",
"tensorflow.nn.softmax",
"tensorflow.metrics.accuracy",
"tensorflow.train.get_or_create_global_step",
"tensorflow.io.decode... | [((212, 254), 'tensorflow.reshape', 'tf.reshape', (["features['x']", '[-1, 28, 28, 1]'], {}), "(features['x'], [-1, 28, 28, 1])\n", (222, 254), True, 'import tensorflow as tf\n'), ((296, 407), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input_layer', 'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=input_layer, filters=32, kernel_size=[5, 5],\n padding='same', activation=tf.nn.relu)\n", (312, 407), True, 'import tensorflow as tf\n'), ((481, 547), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv1, pool_size=[2, 2], strides=2)\n', (504, 547), True, 'import tensorflow as tf\n'), ((611, 717), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'pool1', 'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(inputs=pool1, filters=64, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (627, 717), True, 'import tensorflow as tf\n'), ((766, 832), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '[2, 2]', 'strides': '(2)'}), '(inputs=conv2, pool_size=[2, 2], strides=2)\n', (789, 832), True, 'import tensorflow as tf\n'), ((869, 904), 'tensorflow.reshape', 'tf.reshape', (['pool2', '[-1, 7 * 7 * 64]'], {}), '(pool2, [-1, 7 * 7 * 64])\n', (879, 904), True, 'import tensorflow as tf\n'), ((917, 986), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pool2_flat', 'units': '(1024)', 'activation': 'tf.nn.relu'}), '(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n', (932, 986), True, 'import tensorflow as tf\n'), ((1001, 1093), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'dense', 'rate': '(0.4)', 'training': '(mode == tf.estimator.ModeKeys.TRAIN)'}), '(inputs=dense, rate=0.4, training=mode == tf.estimator.\n ModeKeys.TRAIN)\n', (1018, 1093), True, 'import tensorflow as tf\n'), ((1131, 1172), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'dropout', 'units': '(10)'}), '(inputs=dropout, units=10)\n', (1146, 1172), True, 'import tensorflow as tf\n'), ((2030, 2098), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2068, 2098), True, 'import tensorflow as tf\n'), ((2167, 2203), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (2201, 2203), True, 'import tensorflow as tf\n'), ((2221, 2249), 'tensorflow.equal', 'tf.equal', (['(global_step % 3)', '(0)'], {}), '(global_step % 3, 0)\n', (2229, 2249), True, 'import tensorflow as tf\n'), ((2870, 2966), 'tensorflow.train.SummarySaverHook', 'tf.train.SummarySaverHook', ([], {'save_steps': '(1)', 'summary_op': '[cls_summ]', 'output_dir': '"""model_dir/eval"""'}), "(save_steps=1, summary_op=[cls_summ], output_dir=\n 'model_dir/eval')\n", (2895, 2966), True, 'import tensorflow as tf\n'), ((3423, 3458), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (3456, 3458), True, 'import tensorflow as tf\n'), ((3701, 3821), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': '(64)', 'num_epochs': '(500)', 'shuffle': '(True)'}), "(x={'x': train_data}, y=train_labels,\n batch_size=64, num_epochs=500, shuffle=True)\n", (3735, 3821), True, 'import tensorflow as tf\n'), ((3880, 3982), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=eval_labels,\n num_epochs=1, shuffle=False)\n", (3914, 3982), True, 'import tensorflow as tf\n'), ((4099, 4144), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string', 'shape': '[None]'}), '(dtype=tf.string, shape=[None])\n', (4113, 4144), True, 'import tensorflow as tf\n'), ((4304, 4355), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["feature['x']"], {'out_type': 'tf.float32'}), "(feature['x'], out_type=tf.float32)\n", (4320, 4355), True, 'import tensorflow as tf\n'), ((4390, 4487), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'feature', 'receiver_tensors': 'receiver_tensor'}), '(features=feature, receiver_tensors\n =receiver_tensor)\n', (4430, 4487), True, 'import tensorflow as tf\n'), ((4516, 4577), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.INFO'], {}), '(tf.compat.v1.logging.INFO)\n', (4550, 4577), True, 'import tensorflow as tf\n'), ((4602, 4675), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': '"""model_dir"""', 'save_checkpoints_steps': '(100)'}), "(model_dir='model_dir', save_checkpoints_steps=100)\n", (4624, 4675), True, 'import tensorflow as tf\n'), ((4746, 4816), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'cnn_model_fn', 'config': 'estimator_config'}), '(model_fn=cnn_model_fn, config=estimator_config)\n', (4768, 4816), True, 'import tensorflow as tf\n'), ((4890, 4959), 'tensorflow.estimator.BestExporter', 'tf.estimator.BestExporter', ([], {'serving_input_receiver_fn': 'serving_input_fn'}), '(serving_input_receiver_fn=serving_input_fn)\n', (4915, 4959), True, 'import tensorflow as tf\n'), ((4978, 5025), 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', ([], {'input_fn': 'train_input_fn'}), '(input_fn=train_input_fn)\n', (5000, 5025), True, 'import tensorflow as tf\n'), ((5042, 5162), 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', ([], {'input_fn': 'eval_input_fn', 'steps': 'None', 'start_delay_secs': '(20)', 'exporters': 'exporter', 'throttle_secs': '(5)'}), '(input_fn=eval_input_fn, steps=None, start_delay_secs=\n 20, exporters=exporter, throttle_secs=5)\n', (5063, 5162), True, 'import tensorflow as tf\n'), ((1224, 1255), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (1233, 1255), True, 'import tensorflow as tf\n'), ((1356, 1387), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (1365, 1387), True, 'import tensorflow as tf\n'), ((1519, 1563), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (1532, 1563), True, 'import tensorflow as tf\n'), ((1669, 1723), 'tensorflow.estimator.export.PredictOutput', 'tf.estimator.export.PredictOutput', ([], {'outputs': 'predictions'}), '(outputs=predictions)\n', (1702, 1723), True, 'import tensorflow as tf\n'), ((1829, 1926), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions', 'export_outputs': 'export_outputs'}), '(mode=mode, predictions=predictions,\n export_outputs=export_outputs)\n', (1855, 1926), True, 'import tensorflow as tf\n'), ((2350, 2404), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2383, 2404), True, 'import tensorflow as tf\n'), ((2519, 2586), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (2545, 2586), True, 'import tensorflow as tf\n'), ((2761, 2831), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': "predictions['classes']"}), "(labels=labels, predictions=predictions['classes'])\n", (2780, 2831), True, 'import tensorflow as tf\n'), ((3490, 3505), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (3500, 3505), True, 'import numpy as np\n'), ((3600, 3615), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (3610, 3615), True, 'import numpy as np\n'), ((4252, 4291), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]'], {'dtype': 'tf.string'}), '([], dtype=tf.string)\n', (4270, 4291), True, 'import tensorflow as tf\n'), ((3073, 3110), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'maxval': '(1)'}), '(shape=[], maxval=1)\n', (3090, 3110), True, 'import tensorflow as tf\n'), ((3147, 3181), 'tensorflow.constant', 'tf.constant', (['(100)'], {'dtype': 'tf.float32'}), '(100, dtype=tf.float32)\n', (3158, 3181), True, 'import tensorflow as tf\n'), ((3212, 3246), 'tensorflow.constant', 'tf.constant', (['(200)'], {'dtype': 'tf.float32'}), '(200, dtype=tf.float32)\n', (3223, 3246), True, 'import tensorflow as tf\n')] |
'''
This benchmark script was inspired by https://github.com/nmerrill67/GPU_GSPCA/blob/master/demo.py
'''
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn.decomposition import PCA
import pudding
def benchmark_random():
'''
This function benchmark our implementation with Scikit-learn's fully optimized PCA using a randomly generated data matrix
'''
n_samples = [500, 1000, 5000, 10000, 30000]
times_sklearn_num_samples = []
times_pudding_num_samples = []
# Benchmar as the number samples grows
for n_sample in n_samples:
n_feature = 500
n_components = 500
X = np.random.rand(n_sample, n_feature)
t_sklearn_start = time()
# PCA in pudding performs both fit, transform and inverse transform all at the same time so in order to have a fair comparision, we perform these operations here too
sklearn_pca = PCA(n_components=n_components)
new_X = sklearn_pca.fit_transform(X)
sklearn_pca.inverse_transform(new_X)
times_sklearn_num_samples.append(time() - t_sklearn_start)
t_pudding_start = time()
pudding_pca = pudding.dimension_reduction.PCA(n_components=n_components)
pudding_pca.fit(X)
times_pudding_num_samples.append(time() - t_pudding_start)
plt.figure()
plt.plot(n_samples[1:], times_pudding_num_samples[1:], label='Pudding')
plt.plot(n_samples[1:], times_sklearn_num_samples[1:], label='Scikit-learn')
plt.legend()
plt.xlabel('Number of data points')
plt.ylabel('Times (seconds)')
plt.title('Execution time for PCA on %d dimensional data' % n_feature)
plt.savefig('pca_benchmark_num_samples.jpg')
if __name__ == '__main__':
benchmark_random()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"pudding.dimension_reduction.PCA",
"matplotlib.pyplot.legend",
"time.time",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((1332, 1344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1342, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1349, 1420), 'matplotlib.pyplot.plot', 'plt.plot', (['n_samples[1:]', 'times_pudding_num_samples[1:]'], {'label': '"""Pudding"""'}), "(n_samples[1:], times_pudding_num_samples[1:], label='Pudding')\n", (1357, 1420), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1501), 'matplotlib.pyplot.plot', 'plt.plot', (['n_samples[1:]', 'times_sklearn_num_samples[1:]'], {'label': '"""Scikit-learn"""'}), "(n_samples[1:], times_sklearn_num_samples[1:], label='Scikit-learn')\n", (1433, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1518), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1516, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1558), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of data points"""'], {}), "('Number of data points')\n", (1533, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Times (seconds)"""'], {}), "('Times (seconds)')\n", (1573, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1667), 'matplotlib.pyplot.title', 'plt.title', (["('Execution time for PCA on %d dimensional data' % n_feature)"], {}), "('Execution time for PCA on %d dimensional data' % n_feature)\n", (1606, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pca_benchmark_num_samples.jpg"""'], {}), "('pca_benchmark_num_samples.jpg')\n", (1683, 1716), True, 'import matplotlib.pyplot as plt\n'), ((660, 695), 'numpy.random.rand', 'np.random.rand', (['n_sample', 'n_feature'], {}), '(n_sample, n_feature)\n', (674, 695), True, 'import numpy as np\n'), ((723, 729), 'time.time', 'time', ([], {}), '()\n', (727, 729), False, 'from time import time\n'), ((926, 956), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (929, 956), False, 'from sklearn.decomposition import PCA\n'), ((1141, 1147), 'time.time', 'time', ([], {}), '()\n', (1145, 1147), False, 'from time import time\n'), ((1170, 1228), 'pudding.dimension_reduction.PCA', 'pudding.dimension_reduction.PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (1201, 1228), False, 'import pudding\n'), ((1088, 1094), 'time.time', 'time', ([], {}), '()\n', (1092, 1094), False, 'from time import time\n'), ((1297, 1303), 'time.time', 'time', ([], {}), '()\n', (1301, 1303), False, 'from time import time\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from edfplus import Edfplus
from bcg import load_bcg
from scipy.signal import lfilter
from scipy.signal import resample_poly
import pdb
def load_ecg(filepath, filter=True, resp=False):
#with open(filepath, "rb") as file:
# raw = file.read()
# print(raw[150:200])
edf = Edfplus(filepath)
if resp:
signal = edf.signals["Resp chest"]
signal = resample_poly(signal, 5, 1)
return signal
ecg = edf.signals['ECG LL-RA']
if filter:
ecg = notch_filter(ecg)
return ecg
def notch_filter(signal):
b = np.loadtxt("filter/ecg_notch_b.csv", delimiter=',')
a = np.loadtxt("filter/ecg_notch_a.csv", delimiter=',')
signal_filter = lfilter(b, a, signal)
return signal_filter
def lowpass_filter(signal, fs=100):
if fs == 100:
b = np.loadtxt("filter/lowpass0.4_b_100.csv", delimiter=',')
a = np.loadtxt("filter/lowpass0.4_a_100.csv", delimiter=',')
elif fs == 500:
b = np.loadtxt("filter/lowpass0.4_b_500.csv", delimiter=',')
a = np.loadtxt("filter/lowpass0.4_a_500.csv", delimiter=',')
else:
raise Exception("unexpected fs {}".format(fs))
signal_filter = lfilter(b, a, signal)
return signal_filter
if __name__ == '__main__':
pass
| [
"scipy.signal.lfilter",
"scipy.signal.resample_poly",
"numpy.loadtxt",
"edfplus.Edfplus"
] | [((424, 441), 'edfplus.Edfplus', 'Edfplus', (['filepath'], {}), '(filepath)\n', (431, 441), False, 'from edfplus import Edfplus\n'), ((697, 748), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/ecg_notch_b.csv"""'], {'delimiter': '""","""'}), "('filter/ecg_notch_b.csv', delimiter=',')\n", (707, 748), True, 'import numpy as np\n'), ((757, 808), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/ecg_notch_a.csv"""'], {'delimiter': '""","""'}), "('filter/ecg_notch_a.csv', delimiter=',')\n", (767, 808), True, 'import numpy as np\n'), ((829, 850), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'signal'], {}), '(b, a, signal)\n', (836, 850), False, 'from scipy.signal import lfilter\n'), ((1312, 1333), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'signal'], {}), '(b, a, signal)\n', (1319, 1333), False, 'from scipy.signal import lfilter\n'), ((515, 542), 'scipy.signal.resample_poly', 'resample_poly', (['signal', '(5)', '(1)'], {}), '(signal, 5, 1)\n', (528, 542), False, 'from scipy.signal import resample_poly\n'), ((943, 999), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/lowpass0.4_b_100.csv"""'], {'delimiter': '""","""'}), "('filter/lowpass0.4_b_100.csv', delimiter=',')\n", (953, 999), True, 'import numpy as np\n'), ((1012, 1068), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/lowpass0.4_a_100.csv"""'], {'delimiter': '""","""'}), "('filter/lowpass0.4_a_100.csv', delimiter=',')\n", (1022, 1068), True, 'import numpy as np\n'), ((1101, 1157), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/lowpass0.4_b_500.csv"""'], {'delimiter': '""","""'}), "('filter/lowpass0.4_b_500.csv', delimiter=',')\n", (1111, 1157), True, 'import numpy as np\n'), ((1170, 1226), 'numpy.loadtxt', 'np.loadtxt', (['"""filter/lowpass0.4_a_500.csv"""'], {'delimiter': '""","""'}), "('filter/lowpass0.4_a_500.csv', delimiter=',')\n", (1180, 1226), True, 'import numpy as np\n')] |
from typing import Union
import chess
import numpy as np
class ChessPositionSerializer:
def __init__(
self,
is_pieces: bool = True,
is_enpassant: bool = True,
is_castling: bool = True,
is_turn: bool = True,
dtype=np.int8, # TODO: add type
) -> None:
self.is_pieces = is_pieces
self.is_enpassant = is_enpassant
self.is_castling = is_castling
self.is_turn = is_turn
self.dtype = dtype
def _piece_type_to_index(self, piece_type: str) -> int:
PIECE_TYPE_TO_INT = dict(
zip(
["K", "Q", "R", "B", "N", "P", "k", "q", "r", "b", "n", "p"],
range(0, 12),
)
)
return PIECE_TYPE_TO_INT[piece_type]
def _serialize_board_pieces(self, board: chess.Board) -> np.ndarray:
if not self.is_pieces:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
pieces = np.zeros(shape=(12, 8 * 8), dtype=self.dtype)
for sq_index, piece_type in board.piece_map().items():
index = self._piece_type_to_index(piece_type.symbol())
pieces[index, sq_index] = 1
return pieces.reshape((12, 8, 8))
def _serialize_board_enpassant(self, board: chess.Board) -> np.ndarray:
if not self.is_enpassant:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
enpassant = np.zeros(shape=(1, 8 * 8), dtype=self.dtype)
if board.ep_square:
sq_index = board.ep_square
enpassant[0, sq_index] = 1
return enpassant.reshape((1, 8, 8))
def _serialize_board_castling(self, board: chess.Board) -> np.ndarray:
if not self.is_castling:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
castling = np.zeros(shape=(4, 8, 8), dtype=self.dtype)
castling[0, :, :] = (
1 if board.has_kingside_castling_rights(color=chess.WHITE) else 0
)
castling[1, :, :] = (
1 if board.has_queenside_castling_rights(color=chess.WHITE) else 0
)
castling[2, :, :] = (
1 if board.has_kingside_castling_rights(color=chess.BLACK) else 0
)
castling[3, :, :] = (
1 if board.has_queenside_castling_rights(color=chess.BLACK) else 0
)
return castling
def _serialize_board_turn(self, board: chess.Board) -> np.ndarray:
if not self.is_turn:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
turn = np.zeros(shape=(2, 8, 8), dtype=self.dtype)
if board.turn == chess.WHITE:
turn[0, :, :] = 1
elif board.turn == chess.BLACK:
turn[1, :, :] = 1
return turn
def _serialize_fen_pieces(self, fen: str) -> np.ndarray:
if not self.is_pieces:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
pieces_str = fen.split(" ")[0]
pieces = np.zeros(shape=(12, 8 * 8), dtype=self.dtype)
rank_index = 8
file_index = 1
for char in pieces_str:
if char.isdigit():
digit = int(char)
file_index += digit
elif char.isalpha():
index = self._piece_type_to_index(char)
sq_index = chess.square(file_index-1, rank_index-1)
pieces[index, sq_index] = 1
file_index += 1
elif char == "/":
rank_index -= 1
file_index = 1
return pieces.reshape((12, 8, 8))
def _serialize_fen_enpassant(self, fen: str) -> np.ndarray:
if not self.is_enpassant:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
enpassant_str = fen.split(" ")[3]
enpassant = np.zeros(shape=(1, 8 * 8), dtype=self.dtype)
if enpassant_str != "-":
sq_index = chess.parse_square(enpassant_str)
enpassant[0, sq_index] = 1
return enpassant.reshape((1, 8, 8))
def _serialize_fen_castling(self, fen: str) -> np.ndarray:
if not self.is_castling:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
castling_str = fen.split(" ")[2]
castling = np.zeros(shape=(4, 8, 8), dtype=self.dtype)
castling[0, :, :] = (
1 if "K" in castling_str else 0
)
castling[1, :, :] = (
1 if "Q" in castling_str else 0
)
castling[2, :, :] = (
1 if "k" in castling_str else 0
)
castling[3, :, :] = (
1 if "q" in castling_str else 0
)
return castling
def _serialize_fen_turn(self, fen: str) -> np.ndarray:
if not self.is_turn:
return np.empty(shape=(0, 8, 8), dtype=self.dtype)
turn_str = fen.split(" ")[1]
turn = np.zeros(shape=(2, 8, 8), dtype=self.dtype)
if turn_str == "w":
turn[0, :, :] = 1
elif turn_str == "b":
turn[1, :, :] = 1
return turn
def _serialize_board(self, board: chess.Board) -> np.ndarray:
pieces = self._serialize_board_pieces(board)
enpassant = self._serialize_board_enpassant(board)
castling = self._serialize_board_castling(board)
turn = self._serialize_board_turn(board)
array = np.concatenate([pieces, enpassant, castling, turn], axis=0)
return array
def _serialize_fen(self, fen: str) -> np.ndarray:
pieces = self._serialize_fen_pieces(fen)
enpassant = self._serialize_fen_enpassant(fen)
castling = self._serialize_fen_castling(fen)
turn = self._serialize_fen_turn(fen)
array = np.concatenate([pieces, enpassant, castling, turn], axis=0)
return array
def serialize(
self,
position: Union[chess.Board, str],
) -> np.ndarray:
if isinstance(position, chess.Board):
array = self._serialize_board(position)
elif isinstance(position, str):
array = self._serialize_fen(position)
else:
raise TypeError("position must be either str or chess.Board")
return array
def parse_eval(y: str) -> float:
if y[:2] == "#-":
return -100
elif y[0] == "#":
return 100
else:
return float(y)
if __name__ == "__main__":
from dataset import ChessValueDataset
import numpy as np
cvd = ChessValueDataset.from_file("cvd.json")
s = ChessPositionSerializer()
X = np.stack([s.serialize(fen) for fen in cvd.fen_to_value.keys()])
y = np.stack([parse_eval(y) for y in cvd.fen_to_value.values()])
np.savez("dataset.npz", X=X, y=y)
| [
"numpy.empty",
"numpy.zeros",
"dataset.ChessValueDataset.from_file",
"chess.parse_square",
"numpy.savez",
"chess.square",
"numpy.concatenate"
] | [((6380, 6419), 'dataset.ChessValueDataset.from_file', 'ChessValueDataset.from_file', (['"""cvd.json"""'], {}), "('cvd.json')\n", (6407, 6419), False, 'from dataset import ChessValueDataset\n'), ((6599, 6632), 'numpy.savez', 'np.savez', (['"""dataset.npz"""'], {'X': 'X', 'y': 'y'}), "('dataset.npz', X=X, y=y)\n", (6607, 6632), True, 'import numpy as np\n'), ((957, 1002), 'numpy.zeros', 'np.zeros', ([], {'shape': '(12, 8 * 8)', 'dtype': 'self.dtype'}), '(shape=(12, 8 * 8), dtype=self.dtype)\n', (965, 1002), True, 'import numpy as np\n'), ((1411, 1455), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 8 * 8)', 'dtype': 'self.dtype'}), '(shape=(1, 8 * 8), dtype=self.dtype)\n', (1419, 1455), True, 'import numpy as np\n'), ((1799, 1842), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(4, 8, 8), dtype=self.dtype)\n', (1807, 1842), True, 'import numpy as np\n'), ((2522, 2565), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(2, 8, 8), dtype=self.dtype)\n', (2530, 2565), True, 'import numpy as np\n'), ((2938, 2983), 'numpy.zeros', 'np.zeros', ([], {'shape': '(12, 8 * 8)', 'dtype': 'self.dtype'}), '(shape=(12, 8 * 8), dtype=self.dtype)\n', (2946, 2983), True, 'import numpy as np\n'), ((3757, 3801), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 8 * 8)', 'dtype': 'self.dtype'}), '(shape=(1, 8 * 8), dtype=self.dtype)\n', (3765, 3801), True, 'import numpy as np\n'), ((4197, 4240), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(4, 8, 8), dtype=self.dtype)\n', (4205, 4240), True, 'import numpy as np\n'), ((4807, 4850), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(2, 8, 8), dtype=self.dtype)\n', (4815, 4850), True, 'import numpy as np\n'), ((5291, 5350), 'numpy.concatenate', 'np.concatenate', (['[pieces, enpassant, castling, turn]'], {'axis': '(0)'}), '([pieces, enpassant, castling, turn], axis=0)\n', (5305, 5350), True, 'import numpy as np\n'), ((5645, 5704), 'numpy.concatenate', 'np.concatenate', (['[pieces, enpassant, castling, turn]'], {'axis': '(0)'}), '([pieces, enpassant, castling, turn], axis=0)\n', (5659, 5704), True, 'import numpy as np\n'), ((895, 938), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (903, 938), True, 'import numpy as np\n'), ((1346, 1389), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (1354, 1389), True, 'import numpy as np\n'), ((1735, 1778), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (1743, 1778), True, 'import numpy as np\n'), ((2462, 2505), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (2470, 2505), True, 'import numpy as np\n'), ((2837, 2880), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (2845, 2880), True, 'import numpy as np\n'), ((3650, 3693), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (3658, 3693), True, 'import numpy as np\n'), ((3858, 3891), 'chess.parse_square', 'chess.parse_square', (['enpassant_str'], {}), '(enpassant_str)\n', (3876, 3891), False, 'import chess\n'), ((4092, 4135), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (4100, 4135), True, 'import numpy as np\n'), ((4710, 4753), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 8, 8)', 'dtype': 'self.dtype'}), '(shape=(0, 8, 8), dtype=self.dtype)\n', (4718, 4753), True, 'import numpy as np\n'), ((3279, 3323), 'chess.square', 'chess.square', (['(file_index - 1)', '(rank_index - 1)'], {}), '(file_index - 1, rank_index - 1)\n', (3291, 3323), False, 'import chess\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test circuits and reference outputs for measure instruction.
"""
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.compiler import assemble
from qiskit.providers.aer.backends import QasmSimulator
from qiskit.providers.aer.utils.qobj_utils import unitary_instr
from qiskit.providers.aer.utils.qobj_utils import append_instr
from qiskit.providers.aer.utils.qobj_utils import measure_instr
# ==========================================================================
# Multi-qubit measure
# ==========================================================================
def _dummy_qobj():
"""Return a dummy qobj to insert experiments into"""
qr = QuantumRegister(1)
circuit = QuantumCircuit(qr)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
# remove experiment
qobj.experiments = []
return qobj
def unitary_gate_circuits_real_deterministic(final_measure=True):
"""Unitary gate test circuits with deterministic count output."""
final_qobj = _dummy_qobj()
qr = QuantumRegister(2)
if final_measure:
cr = ClassicalRegister(2)
regs = (qr, cr)
else:
regs = (qr, )
x_mat = np.array([[0, 1], [1, 0]])
cx_mat = np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
# CX01, |00> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10, |00> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX01.(X^I), |10> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(x_mat, [1]))
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10.(I^X), |01> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(x_mat, [0]))
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX01.(I^X), |11> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(x_mat, [0]))
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10.(X^I), |11> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(x_mat, [1]))
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
return final_qobj
def unitary_gate_counts_real_deterministic(shots, hex_counts=True):
"""Unitary gate circuits reference counts."""
targets = []
if hex_counts:
# CX01, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'0x2': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX10.(X^I), |11> state
targets.append({'0x3': shots}) # {"00": shots}
else:
# CX01, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX01.(X^I), |10> state
targets.append({'10': shots}) # {"00": shots}
# CX10.(I^X), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX01.(I^X), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX10.(X^I), |11> state
return targets
def unitary_gate_statevector_real_deterministic():
"""Unitary gate test circuits with deterministic counts."""
targets = []
# CX01, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX10, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX01.(X^I), |10> state
targets.append(np.array([0, 0, 1, 0]))
# CX10.(I^X), |01> state
targets.append(np.array([0, 1, 0, 0]))
# CX01.(I^X), |11> state
targets.append(np.array([0, 0, 0, 1]))
# CX10.(X^I), |11> state
targets.append(np.array([0, 0, 0, 1]))
return targets
def unitary_gate_unitary_real_deterministic():
"""Unitary gate circuits reference unitaries."""
targets = []
# CX01, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]]))
# CX10, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]))
# CX01.(X^I), |10> state
targets.append(np.array([[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]]))
# CX10.(I^X), |01> state
targets.append(np.array([[0, 1, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
# CX01.(I^X), |11> state
targets.append(np.array([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]]))
# CX10.(X^I), |11> state
targets.append(np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]]))
return targets
def unitary_gate_circuits_complex_deterministic(final_measure=True):
"""Unitary gate test circuits with deterministic count output."""
final_qobj = _dummy_qobj()
qr = QuantumRegister(2)
if final_measure:
cr = ClassicalRegister(2)
regs = (qr, cr)
else:
regs = (qr, )
y_mat = np.array([[0, -1j], [1j, 0]], dtype=complex)
cx_mat = np.array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]],
dtype=complex)
# CX01, |00> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10, |00> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX01.(Y^I), |10> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(y_mat, [1]))
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10.(I^Y), |01> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(y_mat, [0]))
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX01.(I^Y), |11> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(y_mat, [0]))
append_instr(qobj, 0, unitary_instr(cx_mat, [0, 1]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
# CX10.(Y^I), |11> state
circuit = QuantumCircuit(*regs)
circuit.barrier(qr)
qobj = assemble(circuit, QasmSimulator(), shots=1)
append_instr(qobj, 0, unitary_instr(y_mat, [1]))
append_instr(qobj, 0, unitary_instr(cx_mat, [1, 0]))
if final_measure:
append_instr(qobj, 0, measure_instr([0], [0]))
append_instr(qobj, 0, measure_instr([1], [1]))
final_qobj.experiments.append(qobj.experiments[0])
return final_qobj
def unitary_gate_counts_complex_deterministic(shots, hex_counts=True):
"""Unitary gate circuits reference counts."""
targets = []
if hex_counts:
# CX01, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'0x0': shots}) # {"00": shots}
# CX01.(Y^I), |10> state
targets.append({'0x2': shots}) # {"00": shots}
# CX10.(I^Y), |01> state
targets.append({'0x1': shots}) # {"00": shots}
# CX01.(I^Y), |11> state
targets.append({'0x3': shots}) # {"00": shots}
# CX10.(Y^I), |11> state
targets.append({'0x3': shots}) # {"00": shots}
else:
# CX01, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX10, |00> state
targets.append({'00': shots}) # {"00": shots}
# CX01.(Y^I), |10> state
targets.append({'10': shots}) # {"00": shots}
# CX10.(I^Y), |01> state
targets.append({'01': shots}) # {"00": shots}
# CX01.(I^Y), |11> state
targets.append({'11': shots}) # {"00": shots}
# CX10.(Y^I), |11> state
return targets
def unitary_gate_statevector_complex_deterministic():
"""Unitary gate test circuits with deterministic counts."""
targets = []
# CX01, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX10, |00> state
targets.append(np.array([1, 0, 0, 0]))
# CX01.(Y^I), |10> state
targets.append(np.array([0, 0, 1j, 0]))
# CX10.(I^Y), |01> state
targets.append(np.array([0, 1j, 0, 0]))
# CX01.(I^Y), |11> state
targets.append(np.array([0, 0, 0, 1j]))
# CX10.(Y^I), |11> state
targets.append(np.array([0, 0, 0, 1j]))
return targets
def unitary_gate_unitary_complex_deterministic():
"""Unitary gate circuits reference unitaries."""
targets = []
# CX01, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]]))
# CX10, |00> state
targets.append(np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]))
# CX01.(Y^I), |10> state
targets.append(np.array([[0, 0, -1j, 0],
[0, 1j, 0, 0],
[1j, 0, 0, 0],
[0, 0, 0, -1j]]))
# CX10.(I^Y), |01> state
targets.append(np.array([[0, -1j, 0, 0],
[1j, 0, 0, 0],
[0, 0, 1j, 0],
[0, 0, 0, -1j]]))
# CX01.(I^Y), |11> state
targets.append(np.array([[0, -1j, 0, 0],
[0, 0, 1j, 0],
[0, 0, 0, -1j],
[1j, 0, 0, 0]]))
# CX10.(Y^I), |11> state
targets.append(np.array([[0, 0, -1j, 0],
[0, 0, 0, -1j],
[0, 1j, 0, 0],
[1j, 0, 0, 0]]))
return targets
| [
"qiskit.providers.aer.backends.QasmSimulator",
"qiskit.QuantumCircuit",
"qiskit.providers.aer.utils.qobj_utils.unitary_instr",
"numpy.array",
"qiskit.ClassicalRegister",
"qiskit.providers.aer.utils.qobj_utils.measure_instr",
"qiskit.QuantumRegister"
] | [((1194, 1212), 'qiskit.QuantumRegister', 'QuantumRegister', (['(1)'], {}), '(1)\n', (1209, 1212), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1227, 1245), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['qr'], {}), '(qr)\n', (1241, 1245), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1570, 1588), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (1585, 1588), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1713, 1739), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1721, 1739), True, 'import numpy as np\n'), ((1753, 1819), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (1761, 1819), True, 'import numpy as np\n'), ((1858, 1879), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (1872, 1879), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2241, 2262), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (2255, 2262), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2630, 2651), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (2644, 2651), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3072, 3093), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (3086, 3093), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3514, 3535), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (3528, 3535), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3956, 3977), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (3970, 3977), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((7644, 7662), 'qiskit.QuantumRegister', 'QuantumRegister', (['(2)'], {}), '(2)\n', (7659, 7662), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((7787, 7835), 'numpy.array', 'np.array', (['[[0, -1.0j], [1.0j, 0]]'], {'dtype': 'complex'}), '([[0, -1.0j], [1.0j, 0]], dtype=complex)\n', (7795, 7835), True, 'import numpy as np\n'), ((7845, 7931), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {'dtype': 'complex'}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]], dtype=\n complex)\n', (7853, 7931), True, 'import numpy as np\n'), ((7987, 8008), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (8001, 8008), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8370, 8391), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (8384, 8391), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8759, 8780), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (8773, 8780), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((9201, 9222), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (9215, 9222), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((9643, 9664), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (9657, 9664), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10085, 10106), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (10099, 10106), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1299, 1314), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (1312, 1314), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((1624, 1644), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (1641, 1644), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1933, 1948), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (1946, 1948), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((1985, 2014), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (1998, 2014), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((2316, 2331), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (2329, 2331), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((2368, 2397), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (2381, 2397), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((2705, 2720), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (2718, 2720), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((2757, 2782), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['x_mat', '[1]'], {}), '(x_mat, [1])\n', (2770, 2782), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((2810, 2839), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (2823, 2839), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((3147, 3162), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (3160, 3162), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((3199, 3224), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['x_mat', '[0]'], {}), '(x_mat, [0])\n', (3212, 3224), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((3252, 3281), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (3265, 3281), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((3589, 3604), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (3602, 3604), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((3641, 3666), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['x_mat', '[0]'], {}), '(x_mat, [0])\n', (3654, 3666), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((3694, 3723), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (3707, 3723), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((4031, 4046), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (4044, 4046), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((4083, 4108), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['x_mat', '[1]'], {}), '(x_mat, [1])\n', (4096, 4108), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((4136, 4165), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (4149, 4165), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((5721, 5743), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (5729, 5743), True, 'import numpy as np\n'), ((5787, 5809), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (5795, 5809), True, 'import numpy as np\n'), ((5859, 5881), 'numpy.array', 'np.array', (['[0, 0, 1, 0]'], {}), '([0, 0, 1, 0])\n', (5867, 5881), True, 'import numpy as np\n'), ((5931, 5953), 'numpy.array', 'np.array', (['[0, 1, 0, 0]'], {}), '([0, 1, 0, 0])\n', (5939, 5953), True, 'import numpy as np\n'), ((6003, 6025), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (6011, 6025), True, 'import numpy as np\n'), ((6075, 6097), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (6083, 6097), True, 'import numpy as np\n'), ((6279, 6345), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (6287, 6345), True, 'import numpy as np\n'), ((6476, 6542), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\n', (6484, 6542), True, 'import numpy as np\n'), ((6679, 6745), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]'], {}), '([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]])\n', (6687, 6745), True, 'import numpy as np\n'), ((6882, 6948), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (6890, 6948), True, 'import numpy as np\n'), ((7085, 7151), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]'], {}), '([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]])\n', (7093, 7151), True, 'import numpy as np\n'), ((7288, 7354), 'numpy.array', 'np.array', (['[[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]]'], {}), '([[0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0], [1, 0, 0, 0]])\n', (7296, 7354), True, 'import numpy as np\n'), ((7698, 7718), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(2)'], {}), '(2)\n', (7715, 7718), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8062, 8077), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (8075, 8077), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((8114, 8143), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (8127, 8143), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((8445, 8460), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (8458, 8460), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((8497, 8526), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (8510, 8526), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((8834, 8849), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (8847, 8849), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((8886, 8911), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['y_mat', '[1]'], {}), '(y_mat, [1])\n', (8899, 8911), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((8939, 8968), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (8952, 8968), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((9276, 9291), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (9289, 9291), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((9328, 9353), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['y_mat', '[0]'], {}), '(y_mat, [0])\n', (9341, 9353), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((9381, 9410), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (9394, 9410), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((9718, 9733), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (9731, 9733), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((9770, 9795), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['y_mat', '[0]'], {}), '(y_mat, [0])\n', (9783, 9795), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((9823, 9852), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[0, 1]'], {}), '(cx_mat, [0, 1])\n', (9836, 9852), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((10160, 10175), 'qiskit.providers.aer.backends.QasmSimulator', 'QasmSimulator', ([], {}), '()\n', (10173, 10175), False, 'from qiskit.providers.aer.backends import QasmSimulator\n'), ((10212, 10237), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['y_mat', '[1]'], {}), '(y_mat, [1])\n', (10225, 10237), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((10265, 10294), 'qiskit.providers.aer.utils.qobj_utils.unitary_instr', 'unitary_instr', (['cx_mat', '[1, 0]'], {}), '(cx_mat, [1, 0])\n', (10278, 10294), False, 'from qiskit.providers.aer.utils.qobj_utils import unitary_instr\n'), ((11856, 11878), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (11864, 11878), True, 'import numpy as np\n'), ((11922, 11944), 'numpy.array', 'np.array', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (11930, 11944), True, 'import numpy as np\n'), ((11994, 12019), 'numpy.array', 'np.array', (['[0, 0, 1.0j, 0]'], {}), '([0, 0, 1.0j, 0])\n', (12002, 12019), True, 'import numpy as np\n'), ((12067, 12092), 'numpy.array', 'np.array', (['[0, 1.0j, 0, 0]'], {}), '([0, 1.0j, 0, 0])\n', (12075, 12092), True, 'import numpy as np\n'), ((12140, 12165), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0j]'], {}), '([0, 0, 0, 1.0j])\n', (12148, 12165), True, 'import numpy as np\n'), ((12213, 12238), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0j]'], {}), '([0, 0, 0, 1.0j])\n', (12221, 12238), True, 'import numpy as np\n'), ((12421, 12487), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (12429, 12487), True, 'import numpy as np\n'), ((12618, 12684), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])\n', (12626, 12684), True, 'import numpy as np\n'), ((12821, 12906), 'numpy.array', 'np.array', (['[[0, 0, -1.0j, 0], [0, 1.0j, 0, 0], [1.0j, 0, 0, 0], [0, 0, 0, -1.0j]]'], {}), '([[0, 0, -1.0j, 0], [0, 1.0j, 0, 0], [1.0j, 0, 0, 0], [0, 0, 0, -1.0j]]\n )\n', (12829, 12906), True, 'import numpy as np\n'), ((13030, 13115), 'numpy.array', 'np.array', (['[[0, -1.0j, 0, 0], [1.0j, 0, 0, 0], [0, 0, 1.0j, 0], [0, 0, 0, -1.0j]]'], {}), '([[0, -1.0j, 0, 0], [1.0j, 0, 0, 0], [0, 0, 1.0j, 0], [0, 0, 0, -1.0j]]\n )\n', (13038, 13115), True, 'import numpy as np\n'), ((13239, 13324), 'numpy.array', 'np.array', (['[[0, -1.0j, 0, 0], [0, 0, 1.0j, 0], [0, 0, 0, -1.0j], [1.0j, 0, 0, 0]]'], {}), '([[0, -1.0j, 0, 0], [0, 0, 1.0j, 0], [0, 0, 0, -1.0j], [1.0j, 0, 0, 0]]\n )\n', (13247, 13324), True, 'import numpy as np\n'), ((13448, 13533), 'numpy.array', 'np.array', (['[[0, 0, -1.0j, 0], [0, 0, 0, -1.0j], [0, 1.0j, 0, 0], [1.0j, 0, 0, 0]]'], {}), '([[0, 0, -1.0j, 0], [0, 0, 0, -1.0j], [0, 1.0j, 0, 0], [1.0j, 0, 0, 0]]\n )\n', (13456, 13533), True, 'import numpy as np\n'), ((2068, 2091), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (2081, 2091), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((2123, 2146), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (2136, 2146), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((2451, 2474), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (2464, 2474), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((2506, 2529), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (2519, 2529), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((2893, 2916), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (2906, 2916), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((2948, 2971), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (2961, 2971), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((3335, 3358), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (3348, 3358), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((3390, 3413), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (3403, 3413), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((3777, 3800), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (3790, 3800), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((3832, 3855), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (3845, 3855), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((4219, 4242), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (4232, 4242), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((4274, 4297), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (4287, 4297), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((8197, 8220), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (8210, 8220), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((8252, 8275), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (8265, 8275), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((8580, 8603), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (8593, 8603), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((8635, 8658), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (8648, 8658), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9022, 9045), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (9035, 9045), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9077, 9100), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (9090, 9100), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9464, 9487), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (9477, 9487), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9519, 9542), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (9532, 9542), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9906, 9929), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (9919, 9929), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((9961, 9984), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (9974, 9984), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((10348, 10371), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[0]', '[0]'], {}), '([0], [0])\n', (10361, 10371), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n'), ((10403, 10426), 'qiskit.providers.aer.utils.qobj_utils.measure_instr', 'measure_instr', (['[1]', '[1]'], {}), '([1], [1])\n', (10416, 10426), False, 'from qiskit.providers.aer.utils.qobj_utils import measure_instr\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 9:56am 2019
Plotting phase curves
Updated on Mon Jun 3 - Added name_par_list for NICERsoft segments
"""
from __future__ import division, print_function
from astropy.io import fits
import numpy as np
import Lv0_dirs,Lv0_fits2dict,Lv1_data_bin,Lv2_mkdir
from scipy import stats
from pint.eventstats import sf_z2m,z2m,sig2sigma
from PyAstronomy.pyasl import foldAt
import pathlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import os
Lv0_dirs.global_par() #obtaining the global parameters
def pulse_profile(f_pulse,times,counts,shift,no_phase_bins):
"""
Calculating the pulse profile for the observation. Goes from 0 to 2!
Thoughts on 1/14/2020: I wonder if the count rate is calculated from times[-1]-times[0]?
If so, this is WRONG! I should be using the total from the GTIs!
f_pulse - the frequency of the pulse
times - the array of time values
counts - the array of counts values
shift - how much to shift the pulse by in the phase axis.
It only affects how it is presented.
no_phase_bins - number of phase bins desired
"""
period = 1/f_pulse
phases = foldAt(times,period,T0=shift*period)
index_sort = np.argsort(phases)
phases = list(phases[index_sort]) + list(phases[index_sort]+1)
counts = list(counts[index_sort])*2
phase_bins = np.linspace(0,2,no_phase_bins*2+1)
summed_profile, bin_edges, binnumber = stats.binned_statistic(phases,counts,statistic='mean',bins=phase_bins)
return phases, phase_bins, summed_profile
def phase_exposure(start_time, stop_time, period, nbin=16, gtis=None):
"""Calculate the exposure on each phase of a pulse profile.
THIS FUNCTION IS FROM STINGRAY.
https://stingray.readthedocs.io/en/latest/_modules/stingray/pulse/pulsar.html
(as of 6/29/2020)
Parameters
----------
start_time, stop_time : float
Starting and stopping time (or phase if ``period==1``)
period : float
The pulse period (if 1, equivalent to phases)
Other parameters
----------------
nbin : int, optional, default 16
The number of bins in the profile
gtis : [[gti00, gti01], [gti10, gti11], ...], optional, default None
Good Time Intervals
Returns
-------
expo : array of floats
The normalized exposure of each bin in the pulse profile (1 is the
highest exposure, 0 the lowest)
"""
if gtis is None:
gtis = np.array([[start_time, stop_time]])
# Use precise floating points -------------
start_time = np.longdouble(start_time)
stop_time = np.longdouble(stop_time)
period = np.longdouble(period)
gtis = np.array(gtis, dtype=np.longdouble)
# -----------------------------------------
expo = np.zeros(nbin)
phs = np.linspace(0, 1, nbin + 1)
phs = np.array(list(zip(phs[0:-1], phs[1:])))
# Discard gtis outside [start, stop]
good = np.logical_and(gtis[:, 0] < stop_time, gtis[:, 1] > start_time)
gtis = gtis[good]
for g in gtis:
g0 = g[0]
g1 = g[1]
if g0 < start_time:
# If the start of the fold is inside a gti, start from there
g0 = start_time
if g1 > stop_time:
# If the end of the fold is inside a gti, end there
g1 = stop_time
length = g1 - g0
# How many periods inside this length?
nraw = length / period
# How many integer periods?
nper = nraw.astype(int)
# First raw exposure: the number of periods
expo += nper / nbin
# FRACTIONAL PART =================
# What remains is additional exposure for part of the profile.
start_phase = np.fmod(g0 / period, 1)
end_phase = nraw - nper + start_phase
limits = [[start_phase, end_phase]]
# start_phase is always < 1. end_phase not always. In this case...
if end_phase > 1:
limits = [[0, end_phase - 1], [start_phase, 1]]
for l in limits:
l0 = l[0]
l1 = l[1]
# Discards bins untouched by these limits
goodbins = np.logical_and(phs[:, 0] <= l1, phs[:, 1] >= l0)
idxs = np.arange(len(phs), dtype=int)[goodbins]
for i in idxs:
start = np.max([phs[i, 0], l0])
stop = np.min([phs[i, 1], l1])
w = stop - start
expo[i] += w
return expo / np.max(expo)
def pulse_folding(t,T,T0,f,fdot,fdotdot,no_phase_bins,mission):
"""
Calculating the pulse profile by also incorporating \dot{f} corrections!
Goes from 0 to 2.
t - array of time values
T - sum of all the GTIs
T0 - reference epoch in MJD
f - pulse/folding Frequency
fdot - frequency derivative
fdotdot - second derivative of frequency
no_phase_bins - number of phase bins desired (recommended 20!)
mission - "NICER", "XMM", or "SWIFT" for now
Returns the pulse profile in counts/s/phase bin vs phase. The number of counts
is divided by the exposure time (calculated through total sum of the GTIs)
Also added a "TIMEZERO" manually in the script since it'd be inconvenient to call the eventfile here.
"""
if mission == "NICER":
##### NICER
MJDREFI = 56658.0
MJDREFF = 0.000777592592592593
TIMEZERO = -1
t_MJDs = MJDREFI + MJDREFF + (TIMEZERO+t)/86400 #Swift or NICER
if mission == "SWIFT":
##### SWIFT
MJDREFI = 51910.0
MJDREFF = 7.428703700000000E-04
TIMEZERO = 0
t_MJDs = MJDREFI + MJDREFF + (TIMEZERO+t)/86400 #Swift or NICER
if mission == "XMM":
##### XMM-NEWTON
MJDREF = 50814.0
t_MJDs = MJDREF + t/86400.0
tau = (t_MJDs-T0)*86400.0
#print(tau[:10])
#print(f*tau[:10])
phase = (f*tau + fdot/2 *tau**2 + fdotdot/6*tau**3)%1
#print(phase[:10])
counts = np.ones(len(phase))
phase_bins = np.linspace(0,1,no_phase_bins+1)
summed_profile,bin_edges,binnumber = stats.binned_statistic(phase,counts,statistic='sum',bins=phase_bins)
phase_bins_total = np.array(list(phase_bins[:-1]) + list(phase_bins+1))
summed_profile_total = np.array(list(summed_profile)*2)
error = np.sqrt(summed_profile_total)
return phase_bins_total, summed_profile_total/T, error/T #/T
#return phase_bins_total, summed_profile_total, error
def get_Z2(phases,m):
"""
Calculate the Z^2 significances given the event file and harmonic number m
eventfile - name of event file
m - number of harmonics
"""
z_vals = z2m(phases,m=m)
probs = sf_z2m(z_vals)
significances = sig2sigma(probs)
return significances
def get_chi2(profile,error):
"""
Calculating the chi^2 value from the folded profile
phase - array of phase values
profile - flux/counts per sec per phase bin
error - corresponding errors
"""
mean_prof = np.mean(profile)
return sum( (profile-mean_prof)**2/error**2 )
def whole(eventfile,par_list,tbin_size,pulse_pars,shift,no_phase_bins,mode):
"""
Plot the entire raw pulse profile without any cuts to the data.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
pulse_pars - parameters corresponding to the pulse
shift - how much to shift the pulse by in the phase axis.
It only affects how the pulse profile is 'displaced'.
no_phase_bins - number of phase bins desired
mode - whether we want to show or save the plot.
pulse_pars will have [f,fdot,fdotdot]
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(pulse_pars) != list and type(pulse_pars) != np.ndarray:
raise TypeError("pulse_pars should either be a list or an array!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if mode != 'show' and mode != 'save' and mode != 'overlap':
raise ValueError("Mode should either be 'show' or 'save' or 'overlap'!")
parent_folder = str(pathlib.Path(eventfile).parent)
data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list)
gtis = Lv0_fits2dict.fits2dict(eventfile,2,['START','STOP'])
T = sum([ (gtis['STOP'][i]-gtis['START'][i]) for i in range(len(gtis['START'])) ])
times = data_dict['TIME']
if pulse_pars[1] == 0 and pulse_pars[2] == 0: #i.e., if both \dot{f} and \ddot{f} are zero; that is, if we have no frequency derivatives
counts = np.ones(len(times))
shifted_t = times-times[0]
t_bins = np.linspace(0,np.ceil(shifted_t[-1]),int(np.ceil(shifted_t[-1])*1/tbin_size+1))
summed_data, bin_edges, binnumber = stats.binned_statistic(shifted_t,counts,statistic='sum',bins=t_bins) #binning the time values in the data
phases, phase_bins, summed_profile = pulse_profile(pulse_pars[0],t_bins[:-1],summed_data,shift,no_phase_bins)
else:
phase_bins, summed_profile = pulse_folding(times,T,times[0],pulse_pars[0],pulse_pars[1],pulse_pars[2],no_phase_bins)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
if mode != 'overlap':
plt.figure()
plt.title('Pulse profile for ' + obj_name + ', ObsID ' + str(obsid),fontsize=12)
# plt.plot(phase_bins[:-1],summed_profile*(times[-1]-times[0])/T)
plt.step(phase_bins[:-1],summed_profile*(times[-1]-times[0])/T)
plt.xlabel('Phase', fontsize=12)
plt.ylabel('Count/' + str(tbin_size) + 's',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'pp_' + obsid + '_bin' + str(tbin_size) + 's.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return phase_bins[:-1],summed_profile
def partial_t(eventfile,par_list,tbin_size,pulse_pars,shift,no_phase_bins,t1,t2,mode):
"""
Plot the pulse profile for a desired time interval.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means bin by 0.05s!
pulse_pars - parameters corresponding to the pulse
shift - how much to shift the pulse by in the phase axis.
It only affects how it is presented.
no_phase_bins - number of phase bins desired
t1 - lower time boundary
t2 - upper time boundary
mode - whether we want to show or save the plot
pulse_pars will have [f,fdot,fdotdot]
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(pulse_pars) != list and type(pulse_pars) != np.ndarray:
raise TypeError("pulse_pars should either be a list or an array!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if t2<t1:
raise ValueError("t2 should be greater than t1!")
if mode != 'show' and mode != 'save' and mode != 'overlap':
raise ValueError("Mode should either be 'show' or 'save' or 'overlap'!")
parent_folder = str(pathlib.Path(eventfile).parent)
data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list)
gtis = Lv0_fits2dict.fits2dict(eventfile,2,['START','STOP'])
T = sum([ (gtis['STOP'][i]-gtis['START'][i]) for i in range(len(gtis['START'])) ])
if pulse_pars[1] == 0 and pulse_pars[2] == 0:
truncated_t, truncated_counts = Lv1_data_bin.binning_t(eventfile,par_list,tbin_size,t1,t2)
phases, phase_bins, summed_profile = pulse_profile(pulse_pars[0],truncated_t[:-1],truncated_counts,shift,no_phase_bins)
else:
truncated_t, truncated_counts = Lv1_data_bin.binning_t(eventfile,par_list,tbin_size,t1,t2)
phase_bins, summed_profile = pulse_folding(truncated_t,T,0,pulse_pars[0],pulse_pars[1],pulse_pars[2],no_phase_bins)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
if mode != 'overlap':
plt.figure()
plt.title('Pulse profile for ' + obj_name + ', ObsID ' + str(obsid) + '\n Time interval: ' + str(t1) + 's - ' + str(t2) + 's',fontsize=12)
# plt.plot(phase_bins[:-1], summed_profile*(times[-1]-times[0])/T)
plt.step(phase_bins[:-1],summed_profile*(times[-1]-times[0])/T)
plt.xlabel('Phase', fontsize=12)
plt.ylabel('Count/' + str(tbin_size) + 's',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'pp_' + obsid + '_bin' + str(tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return phase_bins[:-1],summed_profile
def partial_E(eventfile,par_list,tbin_size,Ebin_size,pulse_pars,shift,no_phase_bins,E1,E2,mode):
"""
Plot the pulse profile for a desired energy range.
[Though I don't think this will be used much. Count/s vs energy is pointless,
since we're not folding in response matrix information here to get the flux.
So we're just doing a count/s vs time with an energy cut to the data.]
INTERJECTION: This caveat is for the spectrum, NOT the pulse profile!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means by in 0.05s
Ebin_size - the size of the energy bins (in keV!)
>> e.g., Ebin_size = 0.1 means bin by 0.1keV
>> e.g., Ebin_size = 0.01 means bin by 0.01keV!
pulse_pars - parameters corresponding to the pulse
shift - how much to shift the pulse by in the phase axis.
It only affects how it is presented.
no_phase_bins - number of phase bins desired
E1 - lower energy boundary
E2 - upper energy boundary
pulse_pars will have [f,fdot,fdotdot]
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(pulse_pars) != list and type(pulse_pars) != np.ndarray:
raise TypeError("pulse_pars should either be a list or an array!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if E2<E1:
raise ValueError("E2 should be greater than E1!")
if mode != 'show' and mode != 'save' and mode != 'overlap':
raise ValueError("Mode should either be 'show' or 'save' or 'overlap'!")
parent_folder = str(pathlib.Path(eventfile).parent)
data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list)
gtis = Lv0_fits2dict.fits2dict(eventfile,2,['START','STOP'])
T = sum([ (gtis['STOP'][i]-gtis['START'][i]) for i in range(len(gtis['START'])) ])
if pulse_pars[1] == 0 and pulse_pars[2] == 0:
truncated_t, truncated_t_counts, truncated_E, truncated_E_counts = Lv1_data_bin.binning_E(eventfile,par_list,tbin_size,Ebin_size,E1,E2)
phases, phase_bins, summed_profile = pulse_profile(pulse_pars[0],truncated_t[:-1],truncated_t_counts,shift,no_phase_bins)
else:
phase_bins, summed_profile = pulse_folding(truncated_t,T,0,pulse_pars[0],pulse_pars[1],pulse_pars[2],no_phase_bins)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
if mode != 'overlap':
plt.figure()
# plt.plot(phase_bins[:-1], summed_profile*(times[-1]-times[0])/T,'-')
# print(sum(summed_profile)/truncated_t[-1])
plt.step(phase_bins[:-1],summed_profile*(times[-1]-times[0])/T)
plt.xlabel('Phase', fontsize=12)
plt.ylabel('Count/' + str(tbin_size) + 's',fontsize=12)
if mode != 'overlap':
plt.title('Pulse profile for ' + obj_name + ', ObsID ' + str(obsid)+ '\n Energy range: ' + str(E1) + 'keV - ' + str(E2) + 'keV',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'pp_' + obsid + '_bin' + str(tbin_size) + 's_' + str(E1) + 'keV-' + str(E2) + 'keV.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return phase_bins[:-1],summed_profile
def partial_tE(eventfile,par_list,tbin_size,Ebin_size,pulse_pars,shift,no_phase_bins,t1,t2,E1,E2,mode):
"""
Plot the pulse profile for a desired time interval and desired energy range.
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means by in 0.05s
Ebin_size - the size of the energy bins (in keV!)
>> e.g., Ebin_size = 0.1 means bin by 0.1keV
>> e.g., Ebin_size = 0.01 means bin by 0.01keV!
pulse_pars - parameters corresponding to the pulse
shift - how much to shift the pulse by in the phase axis.
It only affects how it is presented.
no_phase_bins - number of phase bins desired
t1 - lower time boundary
t2 - upper time boundary
E1 - lower energy boundary
E2 - upper energy boundary
mode - whether we want to show or save the plot
pulse_pars will have [f,fdot,fdotdot]
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if type(pulse_pars) != list and type(pulse_pars) != np.ndarray:
raise TypeError("pulse_pars should either be a list or an array!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if E2<E1:
raise ValueError("E2 should be greater than E1!")
if t2<t1:
raise ValueError("t2 should be greater than t1!")
if mode != 'show' and mode != 'save':
raise ValueError("Mode should either be 'show' or 'save'!")
parent_folder = str(pathlib.Path(eventfile).parent)
data_dict = Lv0_fits2dict.fits2dict(eventfile,1,par_list)
gtis = Lv0_fits2dict.fits2dict(eventfile,2,['START','STOP'])
T = sum([ (gtis['STOP'][i]-gtis['START'][i]) for i in range(len(gtis['START'])) ])
if pulse_pars[1] == 0 and pulse_pars[2] == 0:
truncated_t, truncated_t_counts, truncated_E, truncated_E_counts = Lv1_data_bin.binning_tE(eventfile,par_list,tbin_size,Ebin_size,t1,t2,E1,E2)
phases, phase_bins, summed_profile = pulse_profile(pulse_pars[0],truncated_t[:-1],truncated_t_counts,shift,no_phase_bins)
else:
phase_bins, summed_profile = pulse_folding(truncated_t,T,0,pulse_pars[0],pulse_pars[1],pulse_pars[2],no_phase_bins)
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
if mode != 'overlap':
plt.figure()
plt.title('Pulse profile for ' + obj_name + ', ObsID ' + str(obsid)+ '\n Time interval: ' + str(t1) + 's - ' + str(t2) + 's'+ '\n Energy range: ' + str(E1) + 'keV - ' + str(E2) + 'keV',fontsize=12)
# plt.plot(phase_bins[:-1], summed_profile*(times[-1]-times[0])/T)
plt.step(phase_bins[:-1],summed_profile*(times[-1]-times[0])/T)
plt.xlabel('Phase', fontsize=12)
plt.ylabel('Count/' + str(tbin_size) + 's',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'pp_' + obsid + '_bin' + str(tbin_size) + 's_' + str(t1) + 's-' + str(t2) + 's_' + str(E1) + 'keV-' + str(E2) + 'keV.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return phase_bins[:-1],summed_profile
################################################################################
### SUBPLOTS
def partial_subplots_E(eventfile,par_list,tbin_size,Ebin_size,f_pulse,shift,no_phase_bins,subplot_Es,E1,E2,mode):
"""
Plot the pulse profile for a desired energy range.
[Though I don't think this will be used much. Count/s vs energy is pointless,
since we're not folding in response matrix information here to get the flux.
So we're just doing a count/s vs time with an energy cut to the data.]
INTERJECTION: This caveat is for the spectrum, NOT the pulse profile!
eventfile - path to the event file. Will extract ObsID from this for the NICER files.
par_list - A list of parameters we'd like to extract from the FITS file
(e.g., from eventcl, PI_FAST, TIME, PI,)
tbin_size - the size of the time bins (in seconds!)
>> e.g., tbin_size = 2 means bin by 2s
>> e.g., tbin_size = 0.05 means by in 0.05s
Ebin_size - the size of the energy bins (in keV!)
>> e.g., Ebin_size = 0.1 means bin by 0.1keV
>> e.g., Ebin_size = 0.01 means bin by 0.01keV!
f_pulse - the frequency of the pulse
shift - how much to shift the pulse by in the phase axis.
It only affects how it is presented.
no_phase_bins - number of phase bins desired
subplot_Es - list of tuples defining energy boundaries for pulse profiles
E1 - lower energy boundary
E2 - upper energy boundary
"""
if type(eventfile) != str:
raise TypeError("eventfile should be a string!")
if 'TIME' not in par_list:
raise ValueError("You should have 'TIME' in the parameter list!")
if type(par_list) != list and type(par_list) != np.ndarray:
raise TypeError("par_list should either be a list or an array!")
if E2<E1:
raise ValueError("E2 should be greater than E1!")
if mode != 'show' and mode != 'save' and mode != 'overlap':
raise ValueError("Mode should either be 'show' or 'save' or 'overlap'!")
parent_folder = str(pathlib.Path(eventfile).parent)
#should find a way to generalize calling p10,p20,etc in the future..!
fig,(p10,p20,p30,p40,p50,p60) = plt.subplots(6,1)
gs = gridspec.GridSpec(6,1)
for i in range(len(subplot_Es)): #for each tuple of energy boundaries
truncated_t, truncated_t_counts, truncated_E, truncated_E_counts = Lv1_data_bin.binning_E(eventfile,par_list,tbin_size,Ebin_size,subplot_Es[i][0],subplot_Es[i][1])
phases, phase_bins, summed_profile = pulse_profile(pulse_pars[0],truncated_t[:-1],truncated_t_counts,shift,no_phase_bins)
plt.subplot(gs[i]).plot(phase_bins[:-1],summed_profile,'-')
event_header = fits.open(eventfile)[1].header
obj_name = event_header['OBJECT']
obsid = event_header['OBS_ID']
fig.suptitle(str(obsid),fontsize=12)
if mode != 'overlap':
plt.figure()
plt.xlabel('Phase', fontsize=12)
plt.ylabel('Count/' + str(tbin_size) + 's',fontsize=12)
if mode != 'overlap':
plt.title('Pulse profile for ' + obj_name + ', ObsID ' + str(obsid)+ '\n Energy range: ' + str(E1) + 'keV - ' + str(E2) + 'keV',fontsize=12)
if mode == 'show':
plt.show()
elif mode == 'save':
filename = 'pp_subplots_' + obsid + '_bin' + str(tbin_size) + 's_' + str(E1) + 'keV-' + str(E2) + 'keV.pdf'
plt.savefig(parent_folder+'/'+filename,dpi=900)
plt.close()
return phase_bins[:-1],summed_profile
if __name__ == "__main__":
eventfile = '/Volumes/Samsung_T5/NICERsoft_outputs/1034070101_pipe/ni1034070101_nicersoft_bary.evt'
#whole(eventfile,['TIME','PI','PI_FAST'],0.01,[0.20801275,0,0],0.4,21,'show')
#partial_t(eventfile,['TIME','PI','PI_FAST'],1,[0.2081,0,0],0.4,21,0,400,'show')
#partial_E(eventfile,['TIME','PI','PI_FAST'],1,0.05,[0.2081,0,0],0.4,21,0.3,12,'show')
#partial_tE(eventfile,['TIME','PI','PI_FAST'],1,0.05,[0.2081,0,0],0.4,21,0,400,0.3,12,'show')
| [
"Lv1_data_bin.binning_tE",
"Lv1_data_bin.binning_E",
"numpy.argsort",
"matplotlib.pyplot.step",
"matplotlib.pyplot.figure",
"numpy.mean",
"pathlib.Path",
"pint.eventstats.z2m",
"PyAstronomy.pyasl.foldAt",
"matplotlib.pyplot.close",
"pint.eventstats.sf_z2m",
"numpy.max",
"numpy.linspace",
"... | [((534, 555), 'Lv0_dirs.global_par', 'Lv0_dirs.global_par', ([], {}), '()\n', (553, 555), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((1209, 1249), 'PyAstronomy.pyasl.foldAt', 'foldAt', (['times', 'period'], {'T0': '(shift * period)'}), '(times, period, T0=shift * period)\n', (1215, 1249), False, 'from PyAstronomy.pyasl import foldAt\n'), ((1264, 1282), 'numpy.argsort', 'np.argsort', (['phases'], {}), '(phases)\n', (1274, 1282), True, 'import numpy as np\n'), ((1408, 1448), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(no_phase_bins * 2 + 1)'], {}), '(0, 2, no_phase_bins * 2 + 1)\n', (1419, 1448), True, 'import numpy as np\n'), ((1486, 1559), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['phases', 'counts'], {'statistic': '"""mean"""', 'bins': 'phase_bins'}), "(phases, counts, statistic='mean', bins=phase_bins)\n", (1508, 1559), False, 'from scipy import stats\n'), ((2619, 2644), 'numpy.longdouble', 'np.longdouble', (['start_time'], {}), '(start_time)\n', (2632, 2644), True, 'import numpy as np\n'), ((2661, 2685), 'numpy.longdouble', 'np.longdouble', (['stop_time'], {}), '(stop_time)\n', (2674, 2685), True, 'import numpy as np\n'), ((2699, 2720), 'numpy.longdouble', 'np.longdouble', (['period'], {}), '(period)\n', (2712, 2720), True, 'import numpy as np\n'), ((2732, 2767), 'numpy.array', 'np.array', (['gtis'], {'dtype': 'np.longdouble'}), '(gtis, dtype=np.longdouble)\n', (2740, 2767), True, 'import numpy as np\n'), ((2828, 2842), 'numpy.zeros', 'np.zeros', (['nbin'], {}), '(nbin)\n', (2836, 2842), True, 'import numpy as np\n'), ((2853, 2880), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nbin + 1)'], {}), '(0, 1, nbin + 1)\n', (2864, 2880), True, 'import numpy as np\n'), ((2984, 3047), 'numpy.logical_and', 'np.logical_and', (['(gtis[:, 0] < stop_time)', '(gtis[:, 1] > start_time)'], {}), '(gtis[:, 0] < stop_time, gtis[:, 1] > start_time)\n', (2998, 3047), True, 'import numpy as np\n'), ((6010, 6046), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(no_phase_bins + 1)'], {}), '(0, 1, no_phase_bins + 1)\n', (6021, 6046), True, 'import numpy as np\n'), ((6085, 6156), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['phase', 'counts'], {'statistic': '"""sum"""', 'bins': 'phase_bins'}), "(phase, counts, statistic='sum', bins=phase_bins)\n", (6107, 6156), False, 'from scipy import stats\n'), ((6303, 6332), 'numpy.sqrt', 'np.sqrt', (['summed_profile_total'], {}), '(summed_profile_total)\n', (6310, 6332), True, 'import numpy as np\n'), ((6652, 6668), 'pint.eventstats.z2m', 'z2m', (['phases'], {'m': 'm'}), '(phases, m=m)\n', (6655, 6668), False, 'from pint.eventstats import sf_z2m, z2m, sig2sigma\n'), ((6680, 6694), 'pint.eventstats.sf_z2m', 'sf_z2m', (['z_vals'], {}), '(z_vals)\n', (6686, 6694), False, 'from pint.eventstats import sf_z2m, z2m, sig2sigma\n'), ((6715, 6731), 'pint.eventstats.sig2sigma', 'sig2sigma', (['probs'], {}), '(probs)\n', (6724, 6731), False, 'from pint.eventstats import sf_z2m, z2m, sig2sigma\n'), ((6992, 7008), 'numpy.mean', 'np.mean', (['profile'], {}), '(profile)\n', (6999, 7008), True, 'import numpy as np\n'), ((8595, 8642), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(1)', 'par_list'], {}), '(eventfile, 1, par_list)\n', (8618, 8642), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((8652, 8708), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(2)', "['START', 'STOP']"], {}), "(eventfile, 2, ['START', 'STOP'])\n", (8675, 8708), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((9872, 9942), 'matplotlib.pyplot.step', 'plt.step', (['phase_bins[:-1]', '(summed_profile * (times[-1] - times[0]) / T)'], {}), '(phase_bins[:-1], summed_profile * (times[-1] - times[0]) / T)\n', (9880, 9942), True, 'import matplotlib.pyplot as plt\n'), ((9941, 9973), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Phase"""'], {'fontsize': '(12)'}), "('Phase', fontsize=12)\n", (9951, 9973), True, 'import matplotlib.pyplot as plt\n'), ((11934, 11981), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(1)', 'par_list'], {}), '(eventfile, 1, par_list)\n', (11957, 11981), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((11991, 12047), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(2)', "['START', 'STOP']"], {}), "(eventfile, 2, ['START', 'STOP'])\n", (12014, 12047), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((13036, 13106), 'matplotlib.pyplot.step', 'plt.step', (['phase_bins[:-1]', '(summed_profile * (times[-1] - times[0]) / T)'], {}), '(phase_bins[:-1], summed_profile * (times[-1] - times[0]) / T)\n', (13044, 13106), True, 'import matplotlib.pyplot as plt\n'), ((13104, 13136), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Phase"""'], {'fontsize': '(12)'}), "('Phase', fontsize=12)\n", (13114, 13136), True, 'import matplotlib.pyplot as plt\n'), ((15558, 15605), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(1)', 'par_list'], {}), '(eventfile, 1, par_list)\n', (15581, 15605), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((15615, 15671), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(2)', "['START', 'STOP']"], {}), "(eventfile, 2, ['START', 'STOP'])\n", (15638, 15671), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((16513, 16583), 'matplotlib.pyplot.step', 'plt.step', (['phase_bins[:-1]', '(summed_profile * (times[-1] - times[0]) / T)'], {}), '(phase_bins[:-1], summed_profile * (times[-1] - times[0]) / T)\n', (16521, 16583), True, 'import matplotlib.pyplot as plt\n'), ((16581, 16613), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Phase"""'], {'fontsize': '(12)'}), "('Phase', fontsize=12)\n", (16591, 16613), True, 'import matplotlib.pyplot as plt\n'), ((19083, 19130), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(1)', 'par_list'], {}), '(eventfile, 1, par_list)\n', (19106, 19130), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((19140, 19196), 'Lv0_fits2dict.fits2dict', 'Lv0_fits2dict.fits2dict', (['eventfile', '(2)', "['START', 'STOP']"], {}), "(eventfile, 2, ['START', 'STOP'])\n", (19163, 19196), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((20199, 20269), 'matplotlib.pyplot.step', 'plt.step', (['phase_bins[:-1]', '(summed_profile * (times[-1] - times[0]) / T)'], {}), '(phase_bins[:-1], summed_profile * (times[-1] - times[0]) / T)\n', (20207, 20269), True, 'import matplotlib.pyplot as plt\n'), ((20267, 20299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Phase"""'], {'fontsize': '(12)'}), "('Phase', fontsize=12)\n", (20277, 20299), True, 'import matplotlib.pyplot as plt\n'), ((22844, 22862), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(1)'], {}), '(6, 1)\n', (22856, 22862), True, 'import matplotlib.pyplot as plt\n'), ((22871, 22894), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(6)', '(1)'], {}), '(6, 1)\n', (22888, 22894), True, 'import matplotlib.gridspec as gridspec\n'), ((23557, 23589), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Phase"""'], {'fontsize': '(12)'}), "('Phase', fontsize=12)\n", (23567, 23589), True, 'import matplotlib.pyplot as plt\n'), ((2517, 2552), 'numpy.array', 'np.array', (['[[start_time, stop_time]]'], {}), '([[start_time, stop_time]])\n', (2525, 2552), True, 'import numpy as np\n'), ((3763, 3786), 'numpy.fmod', 'np.fmod', (['(g0 / period)', '(1)'], {}), '(g0 / period, 1)\n', (3770, 3786), True, 'import numpy as np\n'), ((4498, 4510), 'numpy.max', 'np.max', (['expo'], {}), '(expo)\n', (4504, 4510), True, 'import numpy as np\n'), ((9179, 9250), 'scipy.stats.binned_statistic', 'stats.binned_statistic', (['shifted_t', 'counts'], {'statistic': '"""sum"""', 'bins': 't_bins'}), "(shifted_t, counts, statistic='sum', bins=t_bins)\n", (9201, 9250), False, 'from scipy import stats\n'), ((9697, 9709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9707, 9709), True, 'import matplotlib.pyplot as plt\n'), ((10065, 10075), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10073, 10075), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12285), 'Lv1_data_bin.binning_t', 'Lv1_data_bin.binning_t', (['eventfile', 'par_list', 'tbin_size', 't1', 't2'], {}), '(eventfile, par_list, tbin_size, t1, t2)\n', (12245, 12285), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((12460, 12522), 'Lv1_data_bin.binning_t', 'Lv1_data_bin.binning_t', (['eventfile', 'par_list', 'tbin_size', 't1', 't2'], {}), '(eventfile, par_list, tbin_size, t1, t2)\n', (12482, 12522), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((12802, 12814), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12812, 12814), True, 'import matplotlib.pyplot as plt\n'), ((13229, 13239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13237, 13239), True, 'import matplotlib.pyplot as plt\n'), ((15882, 15955), 'Lv1_data_bin.binning_E', 'Lv1_data_bin.binning_E', (['eventfile', 'par_list', 'tbin_size', 'Ebin_size', 'E1', 'E2'], {}), '(eventfile, par_list, tbin_size, Ebin_size, E1, E2)\n', (15904, 15955), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((16374, 16386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16384, 16386), True, 'import matplotlib.pyplot as plt\n'), ((16882, 16892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16890, 16892), True, 'import matplotlib.pyplot as plt\n'), ((19407, 19493), 'Lv1_data_bin.binning_tE', 'Lv1_data_bin.binning_tE', (['eventfile', 'par_list', 'tbin_size', 'Ebin_size', 't1', 't2', 'E1', 'E2'], {}), '(eventfile, par_list, tbin_size, Ebin_size, t1, t2,\n E1, E2)\n', (19430, 19493), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((19906, 19918), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19916, 19918), True, 'import matplotlib.pyplot as plt\n'), ((20392, 20402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20400, 20402), True, 'import matplotlib.pyplot as plt\n'), ((23044, 23149), 'Lv1_data_bin.binning_E', 'Lv1_data_bin.binning_E', (['eventfile', 'par_list', 'tbin_size', 'Ebin_size', 'subplot_Es[i][0]', 'subplot_Es[i][1]'], {}), '(eventfile, par_list, tbin_size, Ebin_size,\n subplot_Es[i][0], subplot_Es[i][1])\n', (23066, 23149), False, 'import Lv0_dirs, Lv0_fits2dict, Lv1_data_bin, Lv2_mkdir\n'), ((23540, 23552), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23550, 23552), True, 'import matplotlib.pyplot as plt\n'), ((23858, 23868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23866, 23868), True, 'import matplotlib.pyplot as plt\n'), ((4186, 4234), 'numpy.logical_and', 'np.logical_and', (['(phs[:, 0] <= l1)', '(phs[:, 1] >= l0)'], {}), '(phs[:, 0] <= l1, phs[:, 1] >= l0)\n', (4200, 4234), True, 'import numpy as np\n'), ((8546, 8569), 'pathlib.Path', 'pathlib.Path', (['eventfile'], {}), '(eventfile)\n', (8558, 8569), False, 'import pathlib\n'), ((9069, 9091), 'numpy.ceil', 'np.ceil', (['shifted_t[-1]'], {}), '(shifted_t[-1])\n', (9076, 9091), True, 'import numpy as np\n'), ((9558, 9578), 'astropy.io.fits.open', 'fits.open', (['eventfile'], {}), '(eventfile)\n', (9567, 9578), False, 'from astropy.io import fits\n'), ((10178, 10230), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(parent_folder + '/' + filename)"], {'dpi': '(900)'}), "(parent_folder + '/' + filename, dpi=900)\n", (10189, 10230), True, 'import matplotlib.pyplot as plt\n'), ((10234, 10245), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10243, 10245), True, 'import matplotlib.pyplot as plt\n'), ((11885, 11908), 'pathlib.Path', 'pathlib.Path', (['eventfile'], {}), '(eventfile)\n', (11897, 11908), False, 'import pathlib\n'), ((12663, 12683), 'astropy.io.fits.open', 'fits.open', (['eventfile'], {}), '(eventfile)\n', (12672, 12683), False, 'from astropy.io import fits\n'), ((13376, 13428), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(parent_folder + '/' + filename)"], {'dpi': '(900)'}), "(parent_folder + '/' + filename, dpi=900)\n", (13387, 13428), True, 'import matplotlib.pyplot as plt\n'), ((13432, 13443), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13441, 13443), True, 'import matplotlib.pyplot as plt\n'), ((15509, 15532), 'pathlib.Path', 'pathlib.Path', (['eventfile'], {}), '(eventfile)\n', (15521, 15532), False, 'import pathlib\n'), ((16235, 16255), 'astropy.io.fits.open', 'fits.open', (['eventfile'], {}), '(eventfile)\n', (16244, 16255), False, 'from astropy.io import fits\n'), ((17033, 17085), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(parent_folder + '/' + filename)"], {'dpi': '(900)'}), "(parent_folder + '/' + filename, dpi=900)\n", (17044, 17085), True, 'import matplotlib.pyplot as plt\n'), ((17089, 17100), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17098, 17100), True, 'import matplotlib.pyplot as plt\n'), ((19034, 19057), 'pathlib.Path', 'pathlib.Path', (['eventfile'], {}), '(eventfile)\n', (19046, 19057), False, 'import pathlib\n'), ((19767, 19787), 'astropy.io.fits.open', 'fits.open', (['eventfile'], {}), '(eventfile)\n', (19776, 19787), False, 'from astropy.io import fits\n'), ((20578, 20630), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(parent_folder + '/' + filename)"], {'dpi': '(900)'}), "(parent_folder + '/' + filename, dpi=900)\n", (20589, 20630), True, 'import matplotlib.pyplot as plt\n'), ((20634, 20645), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20643, 20645), True, 'import matplotlib.pyplot as plt\n'), ((22701, 22724), 'pathlib.Path', 'pathlib.Path', (['eventfile'], {}), '(eventfile)\n', (22713, 22724), False, 'import pathlib\n'), ((23359, 23379), 'astropy.io.fits.open', 'fits.open', (['eventfile'], {}), '(eventfile)\n', (23368, 23379), False, 'from astropy.io import fits\n'), ((24018, 24070), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(parent_folder + '/' + filename)"], {'dpi': '(900)'}), "(parent_folder + '/' + filename, dpi=900)\n", (24029, 24070), True, 'import matplotlib.pyplot as plt\n'), ((24074, 24085), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24083, 24085), True, 'import matplotlib.pyplot as plt\n'), ((4346, 4369), 'numpy.max', 'np.max', (['[phs[i, 0], l0]'], {}), '([phs[i, 0], l0])\n', (4352, 4369), True, 'import numpy as np\n'), ((4393, 4416), 'numpy.min', 'np.min', (['[phs[i, 1], l1]'], {}), '([phs[i, 1], l1])\n', (4399, 4416), True, 'import numpy as np\n'), ((23279, 23297), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {}), '(gs[i])\n', (23290, 23297), True, 'import matplotlib.pyplot as plt\n'), ((9096, 9118), 'numpy.ceil', 'np.ceil', (['shifted_t[-1]'], {}), '(shifted_t[-1])\n', (9103, 9118), True, 'import numpy as np\n')] |
#Author: <NAME>
import numpy as np
from PlayGame.simulate import simulate_game
import matplotlib.pyplot as pt
def simulate(opponent:int):
print("simulating 100 events of TreeAI vs BaselineAI")
if opponent ==1 :
hist = [],[]
for i in range(100):
size = np.random.randint(2,5),np.random.randint(2,5)
res = simulate_game(opponent,size)
#print(res)
hist[0].append(res[0])
hist[1].append(res[1])
#print(hist)
pt.plot(hist[0], 'b-')
pt.plot(hist[1], 'r-')
pt.plot()
pt.legend(["Nodes","Scores"])
pt.show()
else:
print("Simulating 100 events of TreeNN_AI vs BaselineAI")
hist = [],[]
for i in range(100):
size = 5,5
res = simulate_game(opponent,size)
hist[0].append(res[0])
hist[1].append(res[1])
pt.plot(hist[0], 'b-')
pt.plot(hist[1], 'r-')
pt.plot()
pt.legend(["Nodes","Scores"])
pt.show()
print("********** End of Simulation. Thank you for playing **********")
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.random.randint",
"PlayGame.simulate.simulate_game"
] | [((509, 531), 'matplotlib.pyplot.plot', 'pt.plot', (['hist[0]', '"""b-"""'], {}), "(hist[0], 'b-')\n", (516, 531), True, 'import matplotlib.pyplot as pt\n'), ((540, 562), 'matplotlib.pyplot.plot', 'pt.plot', (['hist[1]', '"""r-"""'], {}), "(hist[1], 'r-')\n", (547, 562), True, 'import matplotlib.pyplot as pt\n'), ((571, 580), 'matplotlib.pyplot.plot', 'pt.plot', ([], {}), '()\n', (578, 580), True, 'import matplotlib.pyplot as pt\n'), ((589, 619), 'matplotlib.pyplot.legend', 'pt.legend', (["['Nodes', 'Scores']"], {}), "(['Nodes', 'Scores'])\n", (598, 619), True, 'import matplotlib.pyplot as pt\n'), ((627, 636), 'matplotlib.pyplot.show', 'pt.show', ([], {}), '()\n', (634, 636), True, 'import matplotlib.pyplot as pt\n'), ((912, 934), 'matplotlib.pyplot.plot', 'pt.plot', (['hist[0]', '"""b-"""'], {}), "(hist[0], 'b-')\n", (919, 934), True, 'import matplotlib.pyplot as pt\n'), ((943, 965), 'matplotlib.pyplot.plot', 'pt.plot', (['hist[1]', '"""r-"""'], {}), "(hist[1], 'r-')\n", (950, 965), True, 'import matplotlib.pyplot as pt\n'), ((974, 983), 'matplotlib.pyplot.plot', 'pt.plot', ([], {}), '()\n', (981, 983), True, 'import matplotlib.pyplot as pt\n'), ((992, 1022), 'matplotlib.pyplot.legend', 'pt.legend', (["['Nodes', 'Scores']"], {}), "(['Nodes', 'Scores'])\n", (1001, 1022), True, 'import matplotlib.pyplot as pt\n'), ((1030, 1039), 'matplotlib.pyplot.show', 'pt.show', ([], {}), '()\n', (1037, 1039), True, 'import matplotlib.pyplot as pt\n'), ((353, 382), 'PlayGame.simulate.simulate_game', 'simulate_game', (['opponent', 'size'], {}), '(opponent, size)\n', (366, 382), False, 'from PlayGame.simulate import simulate_game\n'), ((805, 834), 'PlayGame.simulate.simulate_game', 'simulate_game', (['opponent', 'size'], {}), '(opponent, size)\n', (818, 834), False, 'from PlayGame.simulate import simulate_game\n'), ((289, 312), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (306, 312), True, 'import numpy as np\n'), ((312, 335), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (329, 335), True, 'import numpy as np\n')] |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import gin
import numpy as np
import tensorflow as tf
from tf_agents.trajectories.time_step import StepType
from alf.algorithms.algorithm import Algorithm, AlgorithmStep, LossInfo
from alf.utils import dist_utils
from alf.utils.averager import ScalarWindowAverager
from alf.utils.common import namedtuple, run_if, should_record_summaries
from alf.utils.dist_utils import calc_default_target_entropy
EntropyTargetLossInfo = namedtuple("EntropyTargetLossInfo", ["entropy_loss"])
EntropyTargetInfo = namedtuple("EntropyTargetInfo", ["step_type", "loss"])
@gin.configurable
class EntropyTargetAlgorithm(Algorithm):
"""Algorithm for adjust entropy regularization.
It tries to adjust the entropy regularization (i.e. alpha) so that the
the entropy is not smaller than `target_entropy`.
The algorithm has two stages:
1. init stage. During this stage, the alpha is not changed. It transitions
to adjust_stage once entropy drops below `target_entropy`.
2. adjust stage. During this stage, log_alpha is adjusted using this formula:
((below + 0.5 * above) * decreasing - (above + 0.5 * below) * increasing) * update_rate
Note that log_alpha will always be decreased if entropy is increasing
even when the entropy is below the target entropy. This is to prevent
overshooting log_alpha to a too big value. Same reason for always
increasing log_alpha even when the entropy is above the target entropy.
`update_rate` is initialized to `fast_update_rate` and is reduced by a
factor of 0.9 whenever the entropy crosses `target_entropy`. `udpate_rate`
is reset to `fast_update_rate` if entropy drops too much below
`target_entropy` (i.e., fast_stage_thresh in the code, which is the half
of `target_entropy` if it is positive, and twice of `target_entropy` if
it is negative.
"""
def __init__(self,
action_spec,
initial_alpha=0.01,
target_entropy=None,
slow_update_rate=0.01,
fast_update_rate=np.log(2),
min_alpha=1e-4,
debug_summaries=False):
"""Create an EntropyTargetAlgorithm
Args:
action_spec (nested BoundedTensorSpec): representing the actions.
initial_alpha (float): initial value for alpha.
target_entropy (float): the lower bound of the entropy. If not
provided, a default value proportional to the action dimension
is used.
slow_update_rate (float): minimal update rate for log_alpha
fast_update_rate (float): maximum update rate for log_alpha
min_alpha (float): the minimal value of alpha. If <=0, exp(-100) is
used.
optimizer (tf.optimizers.Optimizer): The optimizer for training. If
not provided, will use the same optimizer of the parent
algorithm.
debug_summaries (bool): True if debug summaries should be created.
"""
super().__init__(
debug_summaries=debug_summaries, name="EntropyTargetAlgorithm")
self._log_alpha = tf.Variable(
name='log_alpha',
initial_value=np.log(initial_alpha),
dtype=tf.float32,
trainable=False)
self._stage = tf.Variable(
name='stage', initial_value=-1, dtype=tf.int32, trainable=False)
self._avg_entropy = ScalarWindowAverager(2)
self._update_rate = tf.Variable(
name='update_rate',
initial_value=fast_update_rate,
dtype=tf.float32,
trainable=False)
self._action_spec = action_spec
self._min_log_alpha = -100.
if min_alpha >= 0.:
self._min_log_alpha = np.log(min_alpha)
if target_entropy is None:
flat_action_spec = tf.nest.flatten(self._action_spec)
target_entropy = np.sum(
list(map(calc_default_target_entropy, flat_action_spec)))
if target_entropy > 0:
self._fast_stage_thresh = 0.5 * target_entropy
else:
self._fast_stage_thresh = 2.0 * target_entropy
self._target_entropy = target_entropy
self._slow_update_rate = slow_update_rate
self._fast_update_rate = fast_update_rate
logging.info("target_entropy=%s" % target_entropy)
def train_step(self, distribution, step_type):
"""Train step.
Args:
distribution (nested Distribution): action distribution from the
policy.
Returns:
AlgorithmStep. `info` field is LossInfo, other fields are empty.
"""
entropy, entropy_for_gradient = dist_utils.entropy_with_fallback(
distribution, self._action_spec)
return AlgorithmStep(
outputs=(),
state=(),
info=EntropyTargetInfo(
step_type=step_type,
loss=LossInfo(
loss=-entropy_for_gradient,
extra=EntropyTargetLossInfo(entropy_loss=-entropy))))
def calc_loss(self, training_info: EntropyTargetInfo):
loss_info = training_info.loss
mask = tf.cast(training_info.step_type != StepType.LAST, tf.float32)
entropy = -loss_info.extra.entropy_loss * mask
num = tf.reduce_sum(mask)
entropy2 = tf.reduce_sum(tf.square(entropy)) / num
entropy = tf.reduce_sum(entropy) / num
entropy_std = tf.sqrt(tf.maximum(0.0, entropy2 - entropy * entropy))
prev_avg_entropy = self._avg_entropy.get()
avg_entropy = self._avg_entropy.average(entropy)
def _init():
crossing = avg_entropy < self._target_entropy
self._stage.assign_add(tf.cast(crossing, tf.int32))
def _adjust():
previous_above = tf.cast(self._stage, tf.bool)
above = avg_entropy > self._target_entropy
self._stage.assign(tf.cast(above, tf.int32))
crossing = above != previous_above
update_rate = self._update_rate
update_rate = tf.where(crossing, 0.9 * update_rate, update_rate)
update_rate = tf.maximum(update_rate, self._slow_update_rate)
update_rate = tf.where(entropy < self._fast_stage_thresh,
np.float32(self._fast_update_rate),
update_rate)
self._update_rate.assign(update_rate)
above = tf.cast(above, tf.float32)
below = 1 - above
increasing = tf.cast(avg_entropy > prev_avg_entropy, tf.float32)
decreasing = 1 - increasing
log_alpha = self._log_alpha + (
(below + 0.5 * above) * decreasing -
(above + 0.5 * below) * increasing) * update_rate
log_alpha = tf.maximum(log_alpha, np.float32(self._min_log_alpha))
self._log_alpha.assign(log_alpha)
run_if(self._stage == -1, _init)
run_if(self._stage >= 0, _adjust)
alpha = tf.exp(self._log_alpha)
def _summarize():
with self.name_scope:
tf.summary.scalar("alpha", alpha)
tf.summary.scalar("entropy_std", entropy_std)
tf.summary.scalar("avg_entropy", avg_entropy)
tf.summary.scalar("stage", self._stage)
tf.summary.scalar("update_rate", self._update_rate)
if self._debug_summaries:
run_if(should_record_summaries(), _summarize)
return loss_info._replace(loss=loss_info.loss * alpha)
| [
"alf.utils.common.should_record_summaries",
"alf.utils.averager.ScalarWindowAverager",
"alf.utils.dist_utils.entropy_with_fallback",
"numpy.log",
"tensorflow.reduce_sum",
"tensorflow.summary.scalar",
"alf.utils.common.namedtuple",
"tensorflow.maximum",
"numpy.float32",
"absl.logging.info",
"tens... | [((1059, 1112), 'alf.utils.common.namedtuple', 'namedtuple', (['"""EntropyTargetLossInfo"""', "['entropy_loss']"], {}), "('EntropyTargetLossInfo', ['entropy_loss'])\n", (1069, 1112), False, 'from alf.utils.common import namedtuple, run_if, should_record_summaries\n'), ((1133, 1187), 'alf.utils.common.namedtuple', 'namedtuple', (['"""EntropyTargetInfo"""', "['step_type', 'loss']"], {}), "('EntropyTargetInfo', ['step_type', 'loss'])\n", (1143, 1187), False, 'from alf.utils.common import namedtuple, run_if, should_record_summaries\n'), ((2717, 2726), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2723, 2726), True, 'import numpy as np\n'), ((3996, 4072), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""stage"""', 'initial_value': '(-1)', 'dtype': 'tf.int32', 'trainable': '(False)'}), "(name='stage', initial_value=-1, dtype=tf.int32, trainable=False)\n", (4007, 4072), True, 'import tensorflow as tf\n'), ((4114, 4137), 'alf.utils.averager.ScalarWindowAverager', 'ScalarWindowAverager', (['(2)'], {}), '(2)\n', (4134, 4137), False, 'from alf.utils.averager import ScalarWindowAverager\n'), ((4166, 4269), 'tensorflow.Variable', 'tf.Variable', ([], {'name': '"""update_rate"""', 'initial_value': 'fast_update_rate', 'dtype': 'tf.float32', 'trainable': '(False)'}), "(name='update_rate', initial_value=fast_update_rate, dtype=tf.\n float32, trainable=False)\n", (4177, 4269), True, 'import tensorflow as tf\n'), ((5000, 5050), 'absl.logging.info', 'logging.info', (["('target_entropy=%s' % target_entropy)"], {}), "('target_entropy=%s' % target_entropy)\n", (5012, 5050), False, 'from absl import logging\n'), ((5388, 5453), 'alf.utils.dist_utils.entropy_with_fallback', 'dist_utils.entropy_with_fallback', (['distribution', 'self._action_spec'], {}), '(distribution, self._action_spec)\n', (5420, 5453), False, 'from alf.utils import dist_utils\n'), ((5883, 5944), 'tensorflow.cast', 'tf.cast', (['(training_info.step_type != StepType.LAST)', 'tf.float32'], {}), '(training_info.step_type != StepType.LAST, tf.float32)\n', (5890, 5944), True, 'import tensorflow as tf\n'), ((6014, 6033), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), '(mask)\n', (6027, 6033), True, 'import tensorflow as tf\n'), ((7636, 7668), 'alf.utils.common.run_if', 'run_if', (['(self._stage == -1)', '_init'], {}), '(self._stage == -1, _init)\n', (7642, 7668), False, 'from alf.utils.common import namedtuple, run_if, should_record_summaries\n'), ((7677, 7710), 'alf.utils.common.run_if', 'run_if', (['(self._stage >= 0)', '_adjust'], {}), '(self._stage >= 0, _adjust)\n', (7683, 7710), False, 'from alf.utils.common import namedtuple, run_if, should_record_summaries\n'), ((7727, 7750), 'tensorflow.exp', 'tf.exp', (['self._log_alpha'], {}), '(self._log_alpha)\n', (7733, 7750), True, 'import tensorflow as tf\n'), ((4452, 4469), 'numpy.log', 'np.log', (['min_alpha'], {}), '(min_alpha)\n', (4458, 4469), True, 'import numpy as np\n'), ((4537, 4571), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['self._action_spec'], {}), '(self._action_spec)\n', (4552, 4571), True, 'import tensorflow as tf\n'), ((6111, 6133), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['entropy'], {}), '(entropy)\n', (6124, 6133), True, 'import tensorflow as tf\n'), ((6170, 6215), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', '(entropy2 - entropy * entropy)'], {}), '(0.0, entropy2 - entropy * entropy)\n', (6180, 6215), True, 'import tensorflow as tf\n'), ((6522, 6551), 'tensorflow.cast', 'tf.cast', (['self._stage', 'tf.bool'], {}), '(self._stage, tf.bool)\n', (6529, 6551), True, 'import tensorflow as tf\n'), ((6781, 6831), 'tensorflow.where', 'tf.where', (['crossing', '(0.9 * update_rate)', 'update_rate'], {}), '(crossing, 0.9 * update_rate, update_rate)\n', (6789, 6831), True, 'import tensorflow as tf\n'), ((6858, 6905), 'tensorflow.maximum', 'tf.maximum', (['update_rate', 'self._slow_update_rate'], {}), '(update_rate, self._slow_update_rate)\n', (6868, 6905), True, 'import tensorflow as tf\n'), ((7165, 7191), 'tensorflow.cast', 'tf.cast', (['above', 'tf.float32'], {}), '(above, tf.float32)\n', (7172, 7191), True, 'import tensorflow as tf\n'), ((7247, 7298), 'tensorflow.cast', 'tf.cast', (['(avg_entropy > prev_avg_entropy)', 'tf.float32'], {}), '(avg_entropy > prev_avg_entropy, tf.float32)\n', (7254, 7298), True, 'import tensorflow as tf\n'), ((3892, 3913), 'numpy.log', 'np.log', (['initial_alpha'], {}), '(initial_alpha)\n', (3898, 3913), True, 'import numpy as np\n'), ((6067, 6085), 'tensorflow.square', 'tf.square', (['entropy'], {}), '(entropy)\n', (6076, 6085), True, 'import tensorflow as tf\n'), ((6440, 6467), 'tensorflow.cast', 'tf.cast', (['crossing', 'tf.int32'], {}), '(crossing, tf.int32)\n', (6447, 6467), True, 'import tensorflow as tf\n'), ((6638, 6662), 'tensorflow.cast', 'tf.cast', (['above', 'tf.int32'], {}), '(above, tf.int32)\n', (6645, 6662), True, 'import tensorflow as tf\n'), ((7011, 7045), 'numpy.float32', 'np.float32', (['self._fast_update_rate'], {}), '(self._fast_update_rate)\n', (7021, 7045), True, 'import numpy as np\n'), ((7548, 7579), 'numpy.float32', 'np.float32', (['self._min_log_alpha'], {}), '(self._min_log_alpha)\n', (7558, 7579), True, 'import numpy as np\n'), ((7828, 7861), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""alpha"""', 'alpha'], {}), "('alpha', alpha)\n", (7845, 7861), True, 'import tensorflow as tf\n'), ((7878, 7923), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""entropy_std"""', 'entropy_std'], {}), "('entropy_std', entropy_std)\n", (7895, 7923), True, 'import tensorflow as tf\n'), ((7940, 7985), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""avg_entropy"""', 'avg_entropy'], {}), "('avg_entropy', avg_entropy)\n", (7957, 7985), True, 'import tensorflow as tf\n'), ((8002, 8041), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stage"""', 'self._stage'], {}), "('stage', self._stage)\n", (8019, 8041), True, 'import tensorflow as tf\n'), ((8058, 8109), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""update_rate"""', 'self._update_rate'], {}), "('update_rate', self._update_rate)\n", (8075, 8109), True, 'import tensorflow as tf\n'), ((8164, 8189), 'alf.utils.common.should_record_summaries', 'should_record_summaries', ([], {}), '()\n', (8187, 8189), False, 'from alf.utils.common import namedtuple, run_if, should_record_summaries\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# DPP kernel fitter
# <NAME>
# Dec. 2019
from __future__ import print_function
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import pandas as pd
import argparse
import functools
import random
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training,datasets,iterators
from chainer.training import extensions,triggers
from chainer.dataset import dataset_mixin, convert, concat_examples
from chainerui.utils import save_args
import os,shutil,glob
from datetime import datetime as dt
from consts import optim,dtypes
## (non)-symmetric DPP
class DPP(chainer.Chain):
def __init__(self, dim, rankV, rankB, n_hidden_channels):
super().__init__()
self.n_hidden_channels = n_hidden_channels
self.dim = dim # cardinality of the universe set
self.rankV = rankV
self.rankB = rankB
with self.init_scope():
for i in range(len(n_hidden_channels)):
setattr(self, 'l' + str(i), L.Linear(None,n_hidden_channels[i]))
if rankV>0:
self.V = chainer.Parameter(np.random.uniform(low=-2.0, high=2.0, size=(dim, rankV)))
if rankB>0:
self.B = chainer.Parameter(np.random.uniform(low=-2.0, high=2.0, size=(dim, rankB)))
self.C = chainer.Parameter(np.random.uniform(low=-2.0, high=2.0, size=(dim, rankB)))
def __call__(self, x):
h=x
L = self.xp.zeros((self.dim,self.dim))
## "diagonal" is generated by a FC-NN
for i in range(len(self.n_hidden_channels)):
h=getattr(self, 'l' + str(i))(h)
if i < len(self.n_hidden_channels)-1:
h=F.relu(h)
## DPP
if self.rankV>0: # symmetric part
if len(self.n_hidden_channels)>0:
L += F.matmul(self.V * F.exp(h), self.V, transb=True)
else:
L += F.matmul(self.V, self.V, transb=True)
if self.rankB>0: # non-symmetric part
AS = F.matmul(self.B,self.C,transb=True)
L += AS - AS.T
return L
# (quick&dirty) Cholesky parametrisation
def make_upper_triangular(self):
rows = np.arange(self.dim).reshape((-1,1))
if self.rankV>0:
cols = np.arange(self.rankV).reshape((1,-1))
self.V.array[np.where(rows<cols)] = 0
# print(self.V)
if self.rankB>0:
cols = np.arange(self.rankB).reshape((1,-1))
idx = np.where(rows<cols)
self.B.array[idx] = 0
self.C.array[idx] = 0
## dataset preparation
class Dataset(dataset_mixin.DatasetMixin):
def __init__(self, path):
self.path = path
self.dat = []
self.maxid = 0
with open(path) as infh:
for line in infh:
# print(line,len(line))
if len(line)>1:
l = np.array(line.strip().split(','),dtype=np.int)
self.maxid = max(self.maxid,max(l))
else:
l = []
self.dat.append(l)
print("Data loaded: {}, Max ID: {}".format(len(self.dat),self.maxid))
def __len__(self):
return len(self.dat)
def get_example(self, i):
return (self.dat[i])
def compute_entropy(self):
labels = []
labels_count = []
for y in self.dat:
x = np.sort(y).tostring()
if x in labels:
labels_count[labels.index(x)] += 1
else:
labels.append(x)
labels_count.append(1)
# print(labels_count)
ent = 0
for i in range(len(labels)):
p = labels_count[i]/len(self.dat)
ent -= p * np.log(p)
return(ent)
# evaluator
class Evaluator(extensions.Evaluator):
name = "myval"
def __init__(self, *args, **kwargs):
params = kwargs.pop('params')
super(Evaluator, self).__init__(*args, **kwargs)
self.count = 0
def evaluate(self):
model = self.get_target('main')
L = model(0) # fixed input for now
if self.eval_hook:
self.eval_hook(self)
loss = 0
n = 0
test_iter=self.get_iterator('main')
while True:
batch = test_iter.next()
if model.rankV==0:
for b in batch:
if len(b) % 2 ==0:
if len(b)>0:
loss -= F.log(F.det(L[b,:][:,b]))
n += 1
else:
for b in batch:
if len(b)>0:
loss -= F.log(F.det(L[b,:][:,b]))
n += len(batch)
if test_iter.is_new_epoch:
test_iter.reset()
break
loss /= max(n,1)
# filename = "result_{}.csv".format(self.count)
loss += F.log(F.det(model.xp.eye(L.shape[0])+L))
self.count += 1
return {"myval/loss":loss}
## updater
class Updater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('models')
params = kwargs.pop('params')
super(Updater, self).__init__(*args, **kwargs)
self.args = params['args']
def update_core(self):
opt = self.get_optimizer('main')
L = self.model(0) # fixed input for now
# print(L.shape)
batch = self.get_iterator('main').next()
loss = 0
if self.model.rankV==0:
n=0
for b in batch:
if len(b) % 2 ==0: ## non-symmetric kernels have support on even selections
if len(b)>0:
loss -= F.log(F.det(L[b,:][:,b]))
n += 1
loss /= max(n,1)
else:
for b in batch:
if len(b)>0:
loss -= F.log(F.det(L[b,:][:,b]))
loss /= len(batch)
nm = F.det(self.model.xp.eye(L.shape[0])+L)
loss += F.log(nm)
self.model.cleargrads()
loss.backward()
opt.update(loss=loss)
if self.args.upper_triangular:
self.model.make_upper_triangular()
chainer.report({'loss': loss}, self.model)
########################################################
def main():
# command line argument parsing
parser = argparse.ArgumentParser(description='Fitting non-symmetric DPP kernels to data')
parser.add_argument('--train', '-t', help='Path to csv file')
parser.add_argument('--val', help='Path to validation csv file')
parser.add_argument('--outdir', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', type=int, default=200,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--models', '-m', default=None, help='load pretrained models')
parser.add_argument('--early_stopping', '-es', type=int, default=0, help='')
parser.add_argument('--rankV', '-rv', type=int, default=2, help='rank of V')
parser.add_argument('--rankB', '-rb', type=int, default=0, help='rank of B')
parser.add_argument('--dim', '-d', default=None, help='dimension of the kernel (number of items)')
parser.add_argument('--n_hidden_channels', '-chs', type=int, nargs="*", default=[],
help='number of channels of hidden layers for diagonal entry')
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of samples in each mini-batch')
parser.add_argument('--predict', '-p', action='store_true', help='prediction with a specified model')
parser.add_argument('--optimizer', '-op',choices=optim.keys(),default='Adam',
help='optimizer')
parser.add_argument('--learning_rate', '-lr', type=float, default=1e-2,
help='learning rate')
parser.add_argument('--lr_decay_strategy', '-lrs', choices=['exp','linear','none'], default='linear',
help='strategy for learning rate decay')
parser.add_argument('--weight_decay_l1', '-wd1', type=float, default=0,
help='L1 weight decay for regularization')
parser.add_argument('--weight_decay_l2', '-wd2', type=float, default=0,
help='L2 weight decay for regularization')
parser.add_argument('--dtype', '-dt', choices=dtypes.keys(), default='fp32',
help='floating point precision')
parser.add_argument('--vis_freq', '-vf', type=int, default=200,
help='output frequency in iteration')
parser.add_argument('--upper_triangular', '-up', action='store_true',help="Upper triangular kernel parametrisation")
args = parser.parse_args()
dtime = dt.now().strftime('%m%d_%H%M')
args.outdir = os.path.join(args.outdir, '{}'.format(dtime))
# Enable autotuner of cuDNN
chainer.config.autotune = True
chainer.config.dtype = dtypes[args.dtype]
chainer.print_runtime_info()
print(args)
save_args(args, args.outdir)
# data
train = Dataset(args.train)
if not args.dim:
args.dim = train.maxid+1
if args.val:
test = Dataset(args.val)
else:
test = Dataset(args.train)
train_iter = iterators.SerialIterator(train, args.batchsize, shuffle=True)
test_iter = iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False)
# initialise kernel components
model = DPP(args.dim, args.rankV, args.rankB, args.n_hidden_channels)
# Set up an optimizer
optimizer = optim[args.optimizer](args.learning_rate)
optimizer.setup(model)
if args.weight_decay_l2>0:
if args.optimizer in ['Adam','AdaBound','Eve']:
optimizer.weight_decay_rate = args.weight_decay
else:
optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay_l2))
if args.weight_decay_l1>0:
optimizer.add_hook(chainer.optimizer_hooks.Lasso(args.weight_decay_l1))
if args.models:
chainer.serializers.load_npz(args.models,model)
print('model loaded: {}'.format(args.models))
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
updater = Updater(
models=model,
iterator=train_iter,
optimizer={'main': optimizer},
device=args.gpu,
params={'args': args}
)
log_interval = 1, 'epoch'
if args.early_stopping:
stop_trigger = triggers.EarlyStoppingTrigger(
monitor='myval/loss',
check_trigger=(args.early_stopping, 'epoch'),
max_trigger=(args.epoch, 'epoch'))
else:
stop_trigger = (args.epoch, 'epoch')
trainer = training.Trainer(updater, stop_trigger, out=args.outdir)
trainer.extend(extensions.LogReport(trigger=log_interval))
# trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
if args.optimizer in ['SGD','Momentum','AdaGrad','RMSprop']:
trainer.extend(extensions.ExponentialShift('lr', 0.33), trigger=(args.epoch/5, 'epoch'))
elif args.optimizer in ['Adam','AdaBound','Eve']:
if args.lr_decay_strategy == 'exp':
trainer.extend(extensions.ExponentialShift('alpha', 0.33), trigger=(args.epoch/5, 'epoch'))
if args.lr_decay_strategy == 'linear':
decay_end_iter = len(train) * args.epoch
trainer.extend(extensions.LinearShift('alpha', (args.learning_rate,0), (decay_end_iter//2,decay_end_iter)))
if extensions.PlotReport.available():
trainer.extend(extensions.PlotReport(['main/loss','myval/loss'],
'epoch', file_name='loss.png'))
trainer.extend(extensions.PrintReport([
'epoch', 'main/loss','myval/loss',
'elapsed_time', 'lr'
]),trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(Evaluator(test_iter, model, params={'args': args}, device=args.gpu),trigger=(args.vis_freq, 'iteration'))
if not args.predict:
trainer.run()
########### (quick&dirty) results
print("\n\nEntropy of the traing set {}".format(train.compute_entropy()))
# histogram of DPP
pivot = 0
L=model(0).array
if args.gpu>-1:
L=L.get()
np.savetxt(os.path.join(args.outdir,"L.csv"),L)
if args.rankV>0:
np.savetxt(os.path.join(args.outdir,"V.csv"),model.V.array)
if args.rankB>0:
np.savetxt(os.path.join(args.outdir,"B.csv"),model.B.array)
np.savetxt(os.path.join(args.outdir,"C.csv"),model.C.array)
nm = np.linalg.det(np.eye(L.shape[0])+L)
p_dat = np.zeros(args.dim)
p = np.zeros(args.dim)
p[0] = L[pivot,pivot]/nm
for i in range(pivot+1,args.dim):
p[i] = np.linalg.det(L[[pivot,i],:][:,[pivot,i]])/nm
# histogram of data
p_dat = np.zeros(args.dim)
for b in train:
if pivot in b:
if len(b)==1:
p_dat[0] += 1
elif len(b)==2 and b[1]>pivot:
p_dat[b[1]] += 1
p_dat /= len(train)
np.set_printoptions(precision=5,suppress=True)
print("\n Probability of DPP")
print(p)
print("\n Probability of data")
print(p_dat)
if __name__ == '__main__':
main()
| [
"chainer.training.extensions.observe_lr",
"argparse.ArgumentParser",
"chainer.training.extensions.LinearShift",
"chainer.links.get",
"numpy.arange",
"chainer.iterators.SerialIterator",
"os.path.join",
"chainer.training.extensions.LogReport",
"chainer.links.Linear",
"numpy.set_printoptions",
"cha... | [((151, 165), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (158, 165), True, 'import matplotlib as mpl\n'), ((6436, 6521), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fitting non-symmetric DPP kernels to data"""'}), "(description='Fitting non-symmetric DPP kernels to data'\n )\n", (6459, 6521), False, 'import argparse\n'), ((9244, 9272), 'chainer.print_runtime_info', 'chainer.print_runtime_info', ([], {}), '()\n', (9270, 9272), False, 'import chainer\n'), ((9293, 9321), 'chainerui.utils.save_args', 'save_args', (['args', 'args.outdir'], {}), '(args, args.outdir)\n', (9302, 9321), False, 'from chainerui.utils import save_args\n'), ((9532, 9593), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['train', 'args.batchsize'], {'shuffle': '(True)'}), '(train, args.batchsize, shuffle=True)\n', (9556, 9593), False, 'from chainer import training, datasets, iterators\n'), ((9610, 9685), 'chainer.iterators.SerialIterator', 'iterators.SerialIterator', (['test', 'args.batchsize'], {'repeat': '(False)', 'shuffle': '(False)'}), '(test, args.batchsize, repeat=False, shuffle=False)\n', (9634, 9685), False, 'from chainer import training, datasets, iterators\n'), ((11005, 11061), 'chainer.training.Trainer', 'training.Trainer', (['updater', 'stop_trigger'], {'out': 'args.outdir'}), '(updater, stop_trigger, out=args.outdir)\n', (11021, 11061), False, 'from chainer import training, datasets, iterators\n'), ((11839, 11872), 'chainer.training.extensions.PlotReport.available', 'extensions.PlotReport.available', ([], {}), '()\n', (11870, 11872), False, 'from chainer.training import extensions, triggers\n'), ((12976, 12994), 'numpy.zeros', 'np.zeros', (['args.dim'], {}), '(args.dim)\n', (12984, 12994), True, 'import numpy as np\n'), ((13003, 13021), 'numpy.zeros', 'np.zeros', (['args.dim'], {}), '(args.dim)\n', (13011, 13021), True, 'import numpy as np\n'), ((13186, 13204), 'numpy.zeros', 'np.zeros', (['args.dim'], {}), '(args.dim)\n', (13194, 13204), True, 'import numpy as np\n'), ((13408, 13455), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (13427, 13455), True, 'import numpy as np\n'), ((6084, 6093), 'chainer.functions.log', 'F.log', (['nm'], {}), '(nm)\n', (6089, 6093), True, 'import chainer.functions as F\n'), ((6274, 6316), 'chainer.report', 'chainer.report', (["{'loss': loss}", 'self.model'], {}), "({'loss': loss}, self.model)\n", (6288, 6316), False, 'import chainer\n'), ((10293, 10341), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.models', 'model'], {}), '(args.models, model)\n', (10321, 10341), False, 'import chainer\n'), ((10766, 10903), 'chainer.training.triggers.EarlyStoppingTrigger', 'triggers.EarlyStoppingTrigger', ([], {'monitor': '"""myval/loss"""', 'check_trigger': "(args.early_stopping, 'epoch')", 'max_trigger': "(args.epoch, 'epoch')"}), "(monitor='myval/loss', check_trigger=(args.\n early_stopping, 'epoch'), max_trigger=(args.epoch, 'epoch'))\n", (10795, 10903), False, 'from chainer.training import extensions, triggers\n'), ((11081, 11123), 'chainer.training.extensions.LogReport', 'extensions.LogReport', ([], {'trigger': 'log_interval'}), '(trigger=log_interval)\n', (11101, 11123), False, 'from chainer.training import extensions, triggers\n'), ((11200, 11223), 'chainer.training.extensions.observe_lr', 'extensions.observe_lr', ([], {}), '()\n', (11221, 11223), False, 'from chainer.training import extensions, triggers\n'), ((12033, 12119), 'chainer.training.extensions.PrintReport', 'extensions.PrintReport', (["['epoch', 'main/loss', 'myval/loss', 'elapsed_time', 'lr']"], {}), "(['epoch', 'main/loss', 'myval/loss', 'elapsed_time',\n 'lr'])\n", (12055, 12119), False, 'from chainer.training import extensions, triggers\n'), ((12190, 12232), 'chainer.training.extensions.ProgressBar', 'extensions.ProgressBar', ([], {'update_interval': '(10)'}), '(update_interval=10)\n', (12212, 12232), False, 'from chainer.training import extensions, triggers\n'), ((12612, 12619), 'chainer.links.get', 'L.get', ([], {}), '()\n', (12617, 12619), True, 'import chainer.links as L\n'), ((12635, 12669), 'os.path.join', 'os.path.join', (['args.outdir', '"""L.csv"""'], {}), "(args.outdir, 'L.csv')\n", (12647, 12669), False, 'import os, shutil, glob\n'), ((2060, 2097), 'chainer.functions.matmul', 'F.matmul', (['self.B', 'self.C'], {'transb': '(True)'}), '(self.B, self.C, transb=True)\n', (2068, 2097), True, 'import chainer.functions as F\n'), ((2533, 2554), 'numpy.where', 'np.where', (['(rows < cols)'], {}), '(rows < cols)\n', (2541, 2554), True, 'import numpy as np\n'), ((7948, 7960), 'consts.optim.keys', 'optim.keys', ([], {}), '()\n', (7958, 7960), False, 'from consts import optim, dtypes\n'), ((8648, 8661), 'consts.dtypes.keys', 'dtypes.keys', ([], {}), '()\n', (8659, 8661), False, 'from consts import optim, dtypes\n'), ((9032, 9040), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (9038, 9040), True, 'from datetime import datetime as dt\n'), ((10211, 10262), 'chainer.optimizer_hooks.Lasso', 'chainer.optimizer_hooks.Lasso', (['args.weight_decay_l1'], {}), '(args.weight_decay_l1)\n', (10240, 10262), False, 'import chainer\n'), ((11335, 11374), 'chainer.training.extensions.ExponentialShift', 'extensions.ExponentialShift', (['"""lr"""', '(0.33)'], {}), "('lr', 0.33)\n", (11362, 11374), False, 'from chainer.training import extensions, triggers\n'), ((11897, 11983), 'chainer.training.extensions.PlotReport', 'extensions.PlotReport', (["['main/loss', 'myval/loss']", '"""epoch"""'], {'file_name': '"""loss.png"""'}), "(['main/loss', 'myval/loss'], 'epoch', file_name=\n 'loss.png')\n", (11918, 11983), False, 'from chainer.training import extensions, triggers\n'), ((12712, 12746), 'os.path.join', 'os.path.join', (['args.outdir', '"""V.csv"""'], {}), "(args.outdir, 'V.csv')\n", (12724, 12746), False, 'import os, shutil, glob\n'), ((12801, 12835), 'os.path.join', 'os.path.join', (['args.outdir', '"""B.csv"""'], {}), "(args.outdir, 'B.csv')\n", (12813, 12835), False, 'import os, shutil, glob\n'), ((12869, 12903), 'os.path.join', 'os.path.join', (['args.outdir', '"""C.csv"""'], {}), "(args.outdir, 'C.csv')\n", (12881, 12903), False, 'import os, shutil, glob\n'), ((12942, 12960), 'numpy.eye', 'np.eye', (['L.shape[0]'], {}), '(L.shape[0])\n', (12948, 12960), True, 'import numpy as np\n'), ((13104, 13150), 'numpy.linalg.det', 'np.linalg.det', (['L[[pivot, i], :][:, [pivot, i]]'], {}), '(L[[pivot, i], :][:, [pivot, i]])\n', (13117, 13150), True, 'import numpy as np\n'), ((1737, 1746), 'chainer.functions.relu', 'F.relu', (['h'], {}), '(h)\n', (1743, 1746), True, 'import chainer.functions as F\n'), ((1959, 1996), 'chainer.functions.matmul', 'F.matmul', (['self.V', 'self.V'], {'transb': '(True)'}), '(self.V, self.V, transb=True)\n', (1967, 1996), True, 'import chainer.functions as F\n'), ((2238, 2257), 'numpy.arange', 'np.arange', (['self.dim'], {}), '(self.dim)\n', (2247, 2257), True, 'import numpy as np\n'), ((2381, 2402), 'numpy.where', 'np.where', (['(rows < cols)'], {}), '(rows < cols)\n', (2389, 2402), True, 'import numpy as np\n'), ((3794, 3803), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (3800, 3803), True, 'import numpy as np\n'), ((10100, 10151), 'chainer.optimizer.WeightDecay', 'chainer.optimizer.WeightDecay', (['args.weight_decay_l2'], {}), '(args.weight_decay_l2)\n', (10129, 10151), False, 'import chainer\n'), ((10434, 10467), 'chainer.cuda.get_device', 'chainer.cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (10457, 10467), False, 'import chainer\n'), ((1050, 1086), 'chainer.links.Linear', 'L.Linear', (['None', 'n_hidden_channels[i]'], {}), '(None, n_hidden_channels[i])\n', (1058, 1086), True, 'import chainer.links as L\n'), ((1154, 1210), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2.0)', 'high': '(2.0)', 'size': '(dim, rankV)'}), '(low=-2.0, high=2.0, size=(dim, rankV))\n', (1171, 1210), True, 'import numpy as np\n'), ((1279, 1335), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2.0)', 'high': '(2.0)', 'size': '(dim, rankB)'}), '(low=-2.0, high=2.0, size=(dim, rankB))\n', (1296, 1335), True, 'import numpy as np\n'), ((1380, 1436), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2.0)', 'high': '(2.0)', 'size': '(dim, rankB)'}), '(low=-2.0, high=2.0, size=(dim, rankB))\n', (1397, 1436), True, 'import numpy as np\n'), ((2318, 2339), 'numpy.arange', 'np.arange', (['self.rankV'], {}), '(self.rankV)\n', (2327, 2339), True, 'import numpy as np\n'), ((2477, 2498), 'numpy.arange', 'np.arange', (['self.rankB'], {}), '(self.rankB)\n', (2486, 2498), True, 'import numpy as np\n'), ((3448, 3458), 'numpy.sort', 'np.sort', (['y'], {}), '(y)\n', (3455, 3458), True, 'import numpy as np\n'), ((11534, 11576), 'chainer.training.extensions.ExponentialShift', 'extensions.ExponentialShift', (['"""alpha"""', '(0.33)'], {}), "('alpha', 0.33)\n", (11561, 11576), False, 'from chainer.training import extensions, triggers\n'), ((11738, 11837), 'chainer.training.extensions.LinearShift', 'extensions.LinearShift', (['"""alpha"""', '(args.learning_rate, 0)', '(decay_end_iter // 2, decay_end_iter)'], {}), "('alpha', (args.learning_rate, 0), (decay_end_iter //\n 2, decay_end_iter))\n", (11760, 11837), False, 'from chainer.training import extensions, triggers\n'), ((1889, 1897), 'chainer.functions.exp', 'F.exp', (['h'], {}), '(h)\n', (1894, 1897), True, 'import chainer.functions as F\n'), ((5965, 5985), 'chainer.functions.det', 'F.det', (['L[b, :][:, b]'], {}), '(L[b, :][:, b])\n', (5970, 5985), True, 'import chainer.functions as F\n'), ((4707, 4727), 'chainer.functions.det', 'F.det', (['L[b, :][:, b]'], {}), '(L[b, :][:, b])\n', (4712, 4727), True, 'import chainer.functions as F\n'), ((5784, 5804), 'chainer.functions.det', 'F.det', (['L[b, :][:, b]'], {}), '(L[b, :][:, b])\n', (5789, 5804), True, 'import chainer.functions as F\n'), ((4535, 4555), 'chainer.functions.det', 'F.det', (['L[b, :][:, b]'], {}), '(L[b, :][:, b])\n', (4540, 4555), True, 'import chainer.functions as F\n')] |
import pandas as pd
import os
from glob import glob
from pandas.core.frame import DataFrame
from tqdm import tqdm
import numpy as np
from numpy.random import randint
from typing import Union, Tuple
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import RegressorMixin
from collections import OrderedDict
from copy import deepcopy
from covid_xprize.nixtamalai.helpers import add_geo_id
from covid_xprize.nixtamalai.analyze_predictor import IP_COLS
from microtc.utils import load_model
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
NUM_PRESCRIPTIONS = 10
# Faster than is_pareto_efficient_simple, but less readable.
def is_pareto_efficient(costs, return_mask = True):
"""
Taken from: https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
Find the pareto-efficient points
:param costs: An (n_points, n_costs) array
:param return_mask: True to return a mask
:return: An array of indices of pareto-efficient points.
If return_mask is True, this will be an (n_points, ) boolean array
Otherwise it will be a (n_efficient_points, ) integer array of indices.
"""
is_efficient = np.arange(costs.shape[0])
n_points = costs.shape[0]
next_point_index = 0 # Next index in the is_efficient array to search for
while next_point_index<len(costs):
nondominated_point_mask = np.any(costs<costs[next_point_index], axis=1)
nondominated_point_mask[next_point_index] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
next_point_index = np.sum(nondominated_point_mask[:next_point_index])+1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype = bool)
is_efficient_mask[is_efficient] = True
return is_efficient_mask
else:
return is_efficient
def prescription_cases(output: Union[str, None] = "presc-cases.csv") -> pd.DataFrame:
FILES = glob(os.path.join(ROOT_DIR, "..", "..", "prescriptions/*2021-01-28.csv"))
FILES.sort()
prescriptions = {os.path.basename(fname).split("-")[0]:
pd.read_csv(fname, parse_dates=["Date"], index_col=["Date"])
for fname in tqdm(FILES)}
presc_norm = {k: v - prescriptions['332423242324'] for k, v in prescriptions.items()}
presc_norm = {k: v.sum() for k, v in presc_norm.items()}
presc_norm_df = pd.DataFrame(presc_norm).T
presc_norm_df.replace(0, 1, inplace=True)
presc_norm_df = np.log(presc_norm_df)
presc_norm_df.fillna(0, inplace=True)
if output:
presc_norm_df.to_csv(output)
return presc_norm_df
def training_set(df: pd.DataFrame, region: str) -> Tuple[np.ndarray, np.ndarray]:
data = df.loc[:, region]
y = data.values
X = np.array([[int(i) for i in x] for x in data.index])
return X, y
class SHC(object):
def __init__(self, regressor: RegressorMixin) -> None:
self._regressor = regressor
self._max_value = [int(x) for x in "332423242324"]
self._scnd_best = None
@property
def max_value(self):
return self._max_value
@property
def regressor(self):
return self._regressor
@property
def visited_points(self):
return self._visited
def fit(self, X: np.ndarray, y: np.ndarray) -> "SHC":
self.regressor.fit(X, y)
hy = self.regressor.predict(X)
_ = {self.id(x): v for x, v in zip(X, hy)}
self._visited = _
# Process
ele, fit = self.random()
for _ in tqdm(range(len(self.visited_points), 32768)):
next = self.next(ele, fit)
if next is None:
if self._scnd_best is not None:
next = self._scnd_best
self._scnd_best = None
else:
next = self.random()
ele, fit = next
self.visited_points[self.id(ele)] = fit
return self
def next(self, ele: np.ndarray, fit: float) -> Union[None, Tuple[np.ndarray, float]]:
elements = self.neighbors(ele)
fits = self.fitness(elements)
index = np.where(fits < fit)[0]
if len(index) == 0:
return None
np.random.shuffle(index)
if len(index) >= 2:
self._scnd_best = elements[index[1]], fits[index[1]]
return elements[index[0]], fits[index[0]]
@staticmethod
def id(element: np.ndarray) -> str:
return "".join(map(str, element))
def fitness(self, element: np.ndarray) -> float:
return self.regressor.predict(np.atleast_2d(element))
def random(self):
for _ in range(100):
ind = [randint(x + 1) for x in self.max_value]
key = self.id(ind)
if key not in self.visited_points:
fit = self.fitness(ind)[0]
self.visited_points[key] = fit
return ind, fit
raise Exception("Could not find any more points")
def neighbors(self, element: np.ndarray) -> np.ndarray:
n = len(element)
res = []
visited = set(self.id(element))
for k in range(n):
new = deepcopy(element)
lst = list(range(self.max_value[k] + 1))
np.random.shuffle(lst)
value = lst[0] if lst[0] != element[k] else lst[1]
new[k] = value
key = self.id(new)
if key in visited or key in self.visited_points:
continue
res.append(new)
visited.add(key)
return res
class MSHC(SHC):
def __init__(self, weights: np.ndarray,
npis_pf: list,
hist: set,
**kwargs) -> None:
super(MSHC, self).__init__(**kwargs)
self._weights = weights
self._npis_pf = npis_pf
self._visited = hist
@property
def weights(self):
return self._weights
def fitness(self, element: np.ndarray) -> np.ndarray:
_ = np.atleast_2d(element)
cases = self.regressor.predict(_)
cost = (self.weights * _).sum(axis=1)
return np.vstack([cases, cost]).T
def fit(self, X, y):
self.regressor.fit(X, y)
points = list(self._npis_pf.keys())
for point in points:
if point not in self._npis_pf:
continue
self.iter(point)
return self
def iter(self, point):
fit = self._npis_pf[point]
for _ in range(100):
point = list(map(int, point))
neighbors = self.neighbors(np.array(point))
if len(neighbors) == 0:
return
fits = self.fitness(neighbors)
index = is_pareto_efficient(np.vstack([fit, fits]),
return_mask=False)
if index.shape[0] == 1 and index[0] == 0:
return
elif index.shape[0] > 1:
index = index[1:]
np.random.shuffle(index)
for i in index:
key = "".join(map(str, neighbors[i-1]))
self._npis_pf[key] = fits[i-1].tolist()
_ = is_pareto_efficient(np.array(list(self._npis_pf.values())))
keys = list(self._npis_pf.keys())
for k, flag in zip(keys, _):
if not flag:
del self._npis_pf[k]
point = neighbors[index[0] - 1]
point = "".join(map(str, point))
fit = fits[index[0] - 1]
def run(index):
from microtc.utils import save_model
df = prescription_cases()
cols = list(df.columns)
cols.sort()
country = cols[index]
X, y = training_set(df, country)
shc = SHC(RandomForestRegressor())
try:
shc.fit(X, y)
except ValueError:
print(country, "*******")
return
save_model(shc.visited_points,
os.path.join(ROOT_DIR, "prescriptions", str(index) + ".pickle.gz"))
def _policy(args):
weights, country, country_id, X, y = args
w = weights.loc[weights.GeoID == country, IP_COLS].values
# GeoID = weights.GeoID.values.copy()
# GeoID.sort()
# regions_id = {v: k for k, v in enumerate(GeoID)}
prescriptions_path = os.path.join(ROOT_DIR,
"2021-01-28-prescriptions/%s.pickle.gz" % country_id)
presc = load_model(prescriptions_path)
cost = {k: [v, (np.array([int(i) for i in k]) * w).sum()] for k, v in presc.items()}
npis = list(cost.keys())
npis.sort(key=lambda x: cost[x][0])
_ = np.array([cost[k] for k in npis])
index = is_pareto_efficient(_, return_mask=False)
_ = OrderedDict()
for x in index:
key = npis[x]
_[key] = cost[key]
mshc = MSHC(w, _, set(npis),
regressor=RandomForestRegressor()).fit(X, y)
ss = list(mshc._npis_pf.items())
ss.sort(key=lambda x: x[1][0])
if len(ss) > 10:
ind2 = np.linspace(1, len(ss) -2, 10).round().astype(np.int)
npis = [ss[i][0] for i in ind2]
else:
npis = [x[0] for x in ss]
return [country, npis]
def policy(weights):
from multiprocessing import Pool, cpu_count
add_geo_id(weights)
w_regions = set(weights.GeoID)
df = prescription_cases()
regions = df.columns.sort_values()
args = [training_set(df, region) for region in regions]
args = [(weights, country, country_id, X, y)
for country, (X, y), country_id in zip(regions, args,
range(len(regions))) if country in w_regions]
res = dict()
with Pool(cpu_count()) as pool:
for k, v in tqdm(pool.imap_unordered(_policy, args), total=len(args)):
res[k] = v
return res
def prescribe(start_date_str: str,
end_date_str: str,
path_to_hist_file: str,
weigths: np.ndarray) -> pd.DataFrame:
# Generate prescriptions
presc = policy(weigths)
start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')
end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')
prescription_dict = {
'CountryName': [],
'RegionName': [],
'Date': [],
'PrescriptionIndex': []
}
for ip in IP_COLS:
prescription_dict[ip] = []
for geoid, df in weigths.groupby("GeoID"):
country_name = df.iloc[0].CountryName
region_name = df.iloc[0].RegionName
data = presc[geoid]
if len(data) < NUM_PRESCRIPTIONS:
data += [data[0] for _ in range(len(data), NUM_PRESCRIPTIONS)]
for prescription_idx, prescriptor in enumerate(data):
for date in pd.date_range(start_date, end_date):
date_str = date.strftime("%Y-%m-%d")
prescription_dict['CountryName'].append(country_name)
prescription_dict['RegionName'].append(region_name)
prescription_dict['Date'].append(date_str)
prescription_dict['PrescriptionIndex'].append(prescription_idx)
for npi, value in zip(IP_COLS, prescriptor):
prescription_dict[npi].append(int(value))
df = (pd.DataFrame(prescription_dict).replace("", np.NaN))
return df
if __name__ == "__main__" and False:
from multiprocessing import Pool, cpu_count
df = prescription_cases()
cols = list(df.columns)
cols.sort()
with Pool(processes=cpu_count()) as pool:
[_ for _ in tqdm(pool.imap_unordered(run, range(len(cols))),
total=len(cols))]
| [
"numpy.sum",
"pandas.read_csv",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"numpy.atleast_2d",
"pandas.DataFrame",
"multiprocessing.cpu_count",
"os.path.abspath",
"numpy.random.shuffle",
"tqdm.tqdm",
"copy.deepcopy",
"pandas.date_range",
"os.path.basename",
"microtc.utils.lo... | [((532, 557), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (547, 557), False, 'import os\n'), ((1182, 1207), 'numpy.arange', 'np.arange', (['costs.shape[0]'], {}), '(costs.shape[0])\n', (1191, 1207), True, 'import numpy as np\n'), ((2555, 2576), 'numpy.log', 'np.log', (['presc_norm_df'], {}), '(presc_norm_df)\n', (2561, 2576), True, 'import numpy as np\n'), ((8279, 8355), 'os.path.join', 'os.path.join', (['ROOT_DIR', "('2021-01-28-prescriptions/%s.pickle.gz' % country_id)"], {}), "(ROOT_DIR, '2021-01-28-prescriptions/%s.pickle.gz' % country_id)\n", (8291, 8355), False, 'import os\n'), ((8406, 8436), 'microtc.utils.load_model', 'load_model', (['prescriptions_path'], {}), '(prescriptions_path)\n', (8416, 8436), False, 'from microtc.utils import load_model\n'), ((8603, 8636), 'numpy.array', 'np.array', (['[cost[k] for k in npis]'], {}), '([cost[k] for k in npis])\n', (8611, 8636), True, 'import numpy as np\n'), ((8699, 8712), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8710, 8712), False, 'from collections import OrderedDict\n'), ((9220, 9239), 'covid_xprize.nixtamalai.helpers.add_geo_id', 'add_geo_id', (['weights'], {}), '(weights)\n', (9230, 9239), False, 'from covid_xprize.nixtamalai.helpers import add_geo_id\n'), ((10026, 10075), 'pandas.to_datetime', 'pd.to_datetime', (['start_date_str'], {'format': '"""%Y-%m-%d"""'}), "(start_date_str, format='%Y-%m-%d')\n", (10040, 10075), True, 'import pandas as pd\n'), ((10091, 10138), 'pandas.to_datetime', 'pd.to_datetime', (['end_date_str'], {'format': '"""%Y-%m-%d"""'}), "(end_date_str, format='%Y-%m-%d')\n", (10105, 10138), True, 'import pandas as pd\n'), ((1390, 1437), 'numpy.any', 'np.any', (['(costs < costs[next_point_index])'], {'axis': '(1)'}), '(costs < costs[next_point_index], axis=1)\n', (1396, 1437), True, 'import numpy as np\n'), ((1756, 1786), 'numpy.zeros', 'np.zeros', (['n_points'], {'dtype': 'bool'}), '(n_points, dtype=bool)\n', (1764, 1786), True, 'import numpy as np\n'), ((2016, 2083), 'os.path.join', 'os.path.join', (['ROOT_DIR', '""".."""', '""".."""', '"""prescriptions/*2021-01-28.csv"""'], {}), "(ROOT_DIR, '..', '..', 'prescriptions/*2021-01-28.csv')\n", (2028, 2083), False, 'import os\n'), ((2183, 2243), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'parse_dates': "['Date']", 'index_col': "['Date']"}), "(fname, parse_dates=['Date'], index_col=['Date'])\n", (2194, 2243), True, 'import pandas as pd\n'), ((2462, 2486), 'pandas.DataFrame', 'pd.DataFrame', (['presc_norm'], {}), '(presc_norm)\n', (2474, 2486), True, 'import pandas as pd\n'), ((4282, 4306), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (4299, 4306), True, 'import numpy as np\n'), ((6048, 6070), 'numpy.atleast_2d', 'np.atleast_2d', (['element'], {}), '(element)\n', (6061, 6070), True, 'import numpy as np\n'), ((7763, 7786), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (7784, 7786), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1655, 1705), 'numpy.sum', 'np.sum', (['nondominated_point_mask[:next_point_index]'], {}), '(nondominated_point_mask[:next_point_index])\n', (1661, 1705), True, 'import numpy as np\n'), ((2278, 2289), 'tqdm.tqdm', 'tqdm', (['FILES'], {}), '(FILES)\n', (2282, 2289), False, 'from tqdm import tqdm\n'), ((4198, 4218), 'numpy.where', 'np.where', (['(fits < fit)'], {}), '(fits < fit)\n', (4206, 4218), True, 'import numpy as np\n'), ((4643, 4665), 'numpy.atleast_2d', 'np.atleast_2d', (['element'], {}), '(element)\n', (4656, 4665), True, 'import numpy as np\n'), ((5224, 5241), 'copy.deepcopy', 'deepcopy', (['element'], {}), '(element)\n', (5232, 5241), False, 'from copy import deepcopy\n'), ((5307, 5329), 'numpy.random.shuffle', 'np.random.shuffle', (['lst'], {}), '(lst)\n', (5324, 5329), True, 'import numpy as np\n'), ((6174, 6198), 'numpy.vstack', 'np.vstack', (['[cases, cost]'], {}), '([cases, cost])\n', (6183, 6198), True, 'import numpy as np\n'), ((9651, 9662), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (9660, 9662), False, 'from multiprocessing import Pool, cpu_count\n'), ((10704, 10739), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date)\n', (10717, 10739), True, 'import pandas as pd\n'), ((11204, 11235), 'pandas.DataFrame', 'pd.DataFrame', (['prescription_dict'], {}), '(prescription_dict)\n', (11216, 11235), True, 'import pandas as pd\n'), ((4738, 4752), 'numpy.random.randint', 'randint', (['(x + 1)'], {}), '(x + 1)\n', (4745, 4752), False, 'from numpy.random import randint\n'), ((6623, 6638), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (6631, 6638), True, 'import numpy as np\n'), ((6782, 6804), 'numpy.vstack', 'np.vstack', (['[fit, fits]'], {}), '([fit, fits])\n', (6791, 6804), True, 'import numpy as np\n'), ((11455, 11466), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (11464, 11466), False, 'from multiprocessing import Pool, cpu_count\n'), ((2123, 2146), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (2139, 2146), False, 'import os\n'), ((7029, 7053), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (7046, 7053), True, 'import numpy as np\n'), ((8837, 8860), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (8858, 8860), False, 'from sklearn.ensemble import RandomForestRegressor\n')] |
"""A parser for reading VLBI data from vgosDb files
Description:
------------
Reads data from files in the vgosDb files as defined in [1]. The data is organized in multiple smaller database
files based on netCDF.
References:
-----------
..[1] vgosDb format
ftp://gemini.gsfc.nasa.gov/pub/misc/jmg/VLBI_Structure_2013Jun11.pdf
new url needed
"""
# Standard library imports
from datetime import datetime, timedelta
# External library imports
import numpy as np
from scipy import interpolate
# Midgard imports
from midgard.dev import plugins
from midgard.math.constant import constant
from midgard.math.unit import Unit
from midgard.parsers._parser import Parser
from midgard import parsers
# Where imports
from where.lib import log
@plugins.register
class VgosDbParser(Parser):
"""A parser for reading VLBI data from a VGOS database"""
# Dictionary structure for each field:
# filestub: name of netcdf file
# variable: name of variable inside netcdf file
# factor: used to convert from unit given in file to desired unit
# nan_value: value used to indicate missing data
# unit: desired unit after conversion
_STATION_FIELDS = {
"temperature": {"filestub": "Met", "variable": "TempC", "factor": 1, "nan_value": -999, "unit": ("Celcius",)},
"pressure": {"filestub": "Met", "variable": "AtmPres", "factor": 1, "nan_value": -999, "unit": ("hPa",)},
"cable_delay": {
"filestub": "Cal-Cable",
"variable": "Cal-Cable",
"factor": constant.c,
"nan_value": np.nan,
"unit": ("meter",),
},
}
def __init__(self, file_path, encoding=None):
super().__init__(file_path, encoding)
self.raw = {}
def read_data(self):
"""Parse the vgosdb wrapper file
self.data will be populated with information from the netcdf files
"""
with open(self.file_path, mode="rt") as fid:
self._parse_file(fid)
self._organize_data()
def _parse_file(self, fid):
for line in fid:
if not line or line.startswith("!"):
continue
line = line.split()
if line and "begin" in line[0].lower():
self._parse_block(fid, line[1], name=" ".join(line[2:]))
def _parse_block(self, fid, block, name="", directory=""):
# print("Parsing {} {}".format(block, name))
for line in fid:
if not line or line.startswith("!"):
continue
line = line.split()
if line[0].lower().startswith("end") and line[1] == block:
# print("Finished {} {}".format(block, name))
return
elif line[0].lower().startswith("begin"):
# recursive call
self._parse_block(fid, line[1], name=" ".join(line[2:]), directory=directory)
elif line[0].lower().startswith("default_dir"):
directory = line[1]
elif line[0].endswith(".nc"):
file_path = self.file_path.parents[0] / directory / line[0]
if directory:
data = self.raw.setdefault(directory, {})
else:
data = self.raw
nc_name = file_path.stem.split("_")
nc_stub = nc_name.pop(0)
data = data.setdefault(nc_stub, {})
for part in nc_name:
if part.startswith("b"):
data = data.setdefault(part[1:], {})
# print("Parse {}".format(file_path))
netcdf_data = parsers.parse_file("vlbi_netcdf", file_path=file_path).as_dict()
if "TimeUTC" in file_path.stem:
self._parse_time(netcdf_data)
data.update(netcdf_data)
else:
data = self.raw.setdefault(block, {})
if name:
data = data.setdefault(name, {})
data[line[0]] = " ".join(line[1:])
def _organize_data(self):
""" Copy content from self.raw to self.data and convert all data to arrays with num_obs length
"""
meta = self.data.setdefault("meta", {})
meta["session_code"] = self.raw["Session"].get("Session")
units = meta.setdefault("units", {})
# Epoch info
self.data["time"] = self.raw["Observables"]["TimeUTC"]["time"]
num_obs = len(self.data["time"])
self.data["station_1"] = self.raw["Observables"]["Baseline"]["Baseline"].reshape(num_obs, -1)[:, 0]
self.data["station_2"] = self.raw["Observables"]["Baseline"]["Baseline"].reshape(num_obs, -1)[:, 1]
self.data["source"] = self.raw["Observables"]["Source"]["Source"]
# Obs info
try:
self.data["observed_delay_ferr"] = self.raw["Observables"]["GroupDelay"]["X"]["GroupDelaySig"] * constant.c
except KeyError:
self.data["observed_delay_ferr"] = np.zeros(num_obs)
log.error("Missing group delay formal error information")
units["observed_delay_ferr"] = ("meter",)
try:
self.data["data_quality"] = self.raw["ObsEdit"]["Edit"]["DelayFlag"]
except KeyError:
self.data["data_quality"] = np.full(num_obs, np.nan)
log.warn("Missing data quality information")
try:
self.data["observed_delay"] = self.raw["ObsEdit"]["GroupDelayFull"]["X"]["GroupDelayFull"] * constant.c
except KeyError:
self.data["observed_delay"] = np.full(num_obs, np.nan)
log.error("Missing full group delay information")
units["observed_delay"] = ("meter",)
try:
self.data["iono_delay"] = (
self.raw["ObsDerived"]["Cal-SlantPathIonoGroup"]["X"]["Cal-SlantPathIonoGroup"].reshape(num_obs, -1)[
:, 0
]
* constant.c
)
except KeyError:
try:
self.data["dtec"] = self.raw["Observables"]["DiffTec"]["diffTec"]
units["dtec"] = ("TECU",)
self.data["ref_freq"] = self.raw["Observables"]["RefFreq"]["X"]["RefFreq"] * Unit.MHz2Hz
units["ref_freq"] = ("Hz",)
except KeyError:
log.warn("Missing ionosphere delay information")
self.data["iono_delay"] = np.full(num_obs, np.nan)
units["iono_delay"] = ("meter",)
try:
self.data["iono_delay_ferr"] = (
self.raw["ObsDerived"]["Cal-SlantPathIonoGroup"]["X"]["Cal-SlantPathIonoGroupSigma"].reshape(
num_obs, -1
)[:, 0]
* constant.c
)
except KeyError:
try:
self.data["dtec_ferr"] = self.raw["Observables"]["DiffTec"]["diffTecStdDev"] # Unit: TECU
units["dtec_ferr"] = ("TECU",)
except KeyError:
if not np.isnan(self.data["iono_delay"]).all():
log.warn("Missing ionosphere delay formal error information")
self.data["iono_delay_ferr"] = np.full(num_obs, np.nan)
units["iono_delay_ferr"] = ("meter",)
try:
self.data["iono_quality"] = self.raw["ObsDerived"]["Cal-SlantPathIonoGroup"]["X"][
"Cal-SlantPathIonoGroupDataFlag"
]
except KeyError:
log.warn("Missing ionosphere quality information")
self.data["iono_quality"] = np.full(num_obs, np.nan)
# Station dependent info
for field, params in self._STATION_FIELDS.items():
self.data[field + "_1"] = np.zeros(len(self.data["time"]))
self.data[field + "_2"] = np.zeros(len(self.data["time"]))
for station in self.raw["Head"]["StationList"]:
sta_idx_1 = self.data["station_1"] == station
sta_idx_2 = self.data["station_2"] == station
sta_key = station.replace(" ", "_")
sta_time = self.raw[sta_key]["TimeUTC"]["sec_since_ref"]
try:
sta_data = self.raw[sta_key][params["filestub"]][params["variable"]]
missing_idx = np.isclose(sta_data, params["nan_value"])
sta_data[missing_idx] = np.nan
if missing_idx.any():
log.warn(f"Missing {field} data for {station}")
except KeyError:
sta_data = np.full(len(sta_time), np.nan)
log.warn(f"Missing all {field} data for {station}")
if len(sta_data) == 1:
# Use constant function if there is only one data point
func = lambda _: sta_data[0]
else:
func = interpolate.interp1d(
sta_time,
sta_data,
bounds_error=False,
fill_value=(sta_data[0], sta_data[-1]),
assume_sorted=True,
)
epochs_1 = self.raw["Observables"]["TimeUTC"]["sec_since_ref"][sta_idx_1]
epochs_2 = self.raw["Observables"]["TimeUTC"]["sec_since_ref"][sta_idx_2]
self.data[field + "_1"][sta_idx_1] = func(epochs_1) * params["factor"]
self.data[field + "_2"][sta_idx_2] = func(epochs_2) * params["factor"]
units[field + "_1"] = params["unit"]
units[field + "_2"] = params["unit"]
def _parse_time(self, time_dict):
part1 = time_dict.pop("YMDHM")
part2 = time_dict.pop("Second")
# For a few older sessions the time is given as 24:00 current day instead of 00:00 next day. Datetime does not support this
idx_wrong_date_format = part1[:, 3] == 24
part1[idx_wrong_date_format, 3] = 0
epochs_dt = np.array([datetime(*t) + timedelta(seconds=dt) for t, dt in zip(part1,part2)])
epochs_dt[idx_wrong_date_format]+=timedelta(days=1)
time_dict["time"] = [d.strftime("%Y-%m-%dT%H:%M:%S.%f") for d in epochs_dt]
self.raw["dt_0"] = epochs_dt[0]
time_dict["sec_since_ref"] = np.array([(dt - self.raw["dt_0"]).total_seconds() for dt in epochs_dt])
| [
"numpy.full",
"numpy.zeros",
"numpy.isnan",
"where.lib.log.error",
"datetime.datetime",
"midgard.parsers.parse_file",
"where.lib.log.warn",
"numpy.isclose",
"datetime.timedelta",
"scipy.interpolate.interp1d"
] | [((10038, 10055), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (10047, 10055), False, 'from datetime import datetime, timedelta\n'), ((4989, 5006), 'numpy.zeros', 'np.zeros', (['num_obs'], {}), '(num_obs)\n', (4997, 5006), True, 'import numpy as np\n'), ((5019, 5076), 'where.lib.log.error', 'log.error', (['"""Missing group delay formal error information"""'], {}), "('Missing group delay formal error information')\n", (5028, 5076), False, 'from where.lib import log\n'), ((5287, 5311), 'numpy.full', 'np.full', (['num_obs', 'np.nan'], {}), '(num_obs, np.nan)\n', (5294, 5311), True, 'import numpy as np\n'), ((5324, 5368), 'where.lib.log.warn', 'log.warn', (['"""Missing data quality information"""'], {}), "('Missing data quality information')\n", (5332, 5368), False, 'from where.lib import log\n'), ((5566, 5590), 'numpy.full', 'np.full', (['num_obs', 'np.nan'], {}), '(num_obs, np.nan)\n', (5573, 5590), True, 'import numpy as np\n'), ((5603, 5652), 'where.lib.log.error', 'log.error', (['"""Missing full group delay information"""'], {}), "('Missing full group delay information')\n", (5612, 5652), False, 'from where.lib import log\n'), ((6404, 6428), 'numpy.full', 'np.full', (['num_obs', 'np.nan'], {}), '(num_obs, np.nan)\n', (6411, 6428), True, 'import numpy as np\n'), ((7154, 7178), 'numpy.full', 'np.full', (['num_obs', 'np.nan'], {}), '(num_obs, np.nan)\n', (7161, 7178), True, 'import numpy as np\n'), ((7435, 7485), 'where.lib.log.warn', 'log.warn', (['"""Missing ionosphere quality information"""'], {}), "('Missing ionosphere quality information')\n", (7443, 7485), False, 'from where.lib import log\n'), ((7526, 7550), 'numpy.full', 'np.full', (['num_obs', 'np.nan'], {}), '(num_obs, np.nan)\n', (7533, 7550), True, 'import numpy as np\n'), ((8239, 8280), 'numpy.isclose', 'np.isclose', (['sta_data', "params['nan_value']"], {}), "(sta_data, params['nan_value'])\n", (8249, 8280), True, 'import numpy as np\n'), ((8827, 8952), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['sta_time', 'sta_data'], {'bounds_error': '(False)', 'fill_value': '(sta_data[0], sta_data[-1])', 'assume_sorted': '(True)'}), '(sta_time, sta_data, bounds_error=False, fill_value=(\n sta_data[0], sta_data[-1]), assume_sorted=True)\n', (8847, 8952), False, 'from scipy import interpolate\n'), ((9927, 9939), 'datetime.datetime', 'datetime', (['*t'], {}), '(*t)\n', (9935, 9939), False, 'from datetime import datetime, timedelta\n'), ((9942, 9963), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (9951, 9963), False, 'from datetime import datetime, timedelta\n'), ((6316, 6364), 'where.lib.log.warn', 'log.warn', (['"""Missing ionosphere delay information"""'], {}), "('Missing ionosphere delay information')\n", (6324, 6364), False, 'from where.lib import log\n'), ((8398, 8445), 'where.lib.log.warn', 'log.warn', (['f"""Missing {field} data for {station}"""'], {}), "(f'Missing {field} data for {station}')\n", (8406, 8445), False, 'from where.lib import log\n'), ((8561, 8612), 'where.lib.log.warn', 'log.warn', (['f"""Missing all {field} data for {station}"""'], {}), "(f'Missing all {field} data for {station}')\n", (8569, 8612), False, 'from where.lib import log\n'), ((7048, 7109), 'where.lib.log.warn', 'log.warn', (['"""Missing ionosphere delay formal error information"""'], {}), "('Missing ionosphere delay formal error information')\n", (7056, 7109), False, 'from where.lib import log\n'), ((3629, 3683), 'midgard.parsers.parse_file', 'parsers.parse_file', (['"""vlbi_netcdf"""'], {'file_path': 'file_path'}), "('vlbi_netcdf', file_path=file_path)\n", (3647, 3683), False, 'from midgard import parsers\n'), ((6987, 7020), 'numpy.isnan', 'np.isnan', (["self.data['iono_delay']"], {}), "(self.data['iono_delay'])\n", (6995, 7020), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import dgl
from collections import defaultdict as ddict
from ordered_set import OrderedSet
class TrainDataset(Dataset):
"""
Training Dataset class.
Parameters
----------
triples: The triples used for training the model
num_ent: Number of entities in the knowledge graph
lbl_smooth: Label smoothing
Returns
-------
A training Dataset class instance used by DataLoader
"""
def __init__(self, triples, num_ent, lbl_smooth):
self.triples = triples
self.num_ent = num_ent
self.lbl_smooth = lbl_smooth
self.entities = np.arange(self.num_ent, dtype=np.int32)
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
ele = self.triples[idx]
triple, label = torch.LongTensor(ele['triple']), np.int32(ele['label'])
trp_label = self.get_label(label)
#label smoothing
if self.lbl_smooth != 0.0:
trp_label = (1.0 - self.lbl_smooth) * trp_label + (1.0 / self.num_ent)
return triple, trp_label
@staticmethod
def collate_fn(data):
triples = []
labels = []
for triple, label in data:
triples.append(triple)
labels.append(label)
triple = torch.stack(triples, dim=0)
trp_label = torch.stack(labels, dim=0)
return triple, trp_label
#for edges that exist in the graph, the entry is 1.0, otherwise the entry is 0.0
def get_label(self, label):
y = np.zeros([self.num_ent], dtype=np.float32)
for e2 in label:
y[e2] = 1.0
return torch.FloatTensor(y)
class TestDataset(Dataset):
"""
Evaluation Dataset class.
Parameters
----------
triples: The triples used for evaluating the model
num_ent: Number of entities in the knowledge graph
Returns
-------
An evaluation Dataset class instance used by DataLoader for model evaluation
"""
def __init__(self, triples, num_ent):
self.triples = triples
self.num_ent = num_ent
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
ele = self.triples[idx]
triple, label = torch.LongTensor(ele['triple']), np.int32(ele['label'])
label = self.get_label(label)
return triple, label
@staticmethod
def collate_fn(data):
triples = []
labels = []
for triple, label in data:
triples.append(triple)
labels.append(label)
triple = torch.stack(triples, dim=0)
label = torch.stack(labels, dim=0)
return triple, label
#for edges that exist in the graph, the entry is 1.0, otherwise the entry is 0.0
def get_label(self, label):
y = np.zeros([self.num_ent], dtype=np.float32)
for e2 in label:
y[e2] = 1.0
return torch.FloatTensor(y)
class Data(object):
def __init__(self, dataset, lbl_smooth, num_workers, batch_size):
"""
Reading in raw triples and converts it into a standard format.
Parameters
----------
dataset: The name of the dataset
lbl_smooth: Label smoothing
num_workers: Number of workers of dataloaders
batch_size: Batch size of dataloaders
Returns
-------
self.ent2id: Entity to unique identifier mapping
self.rel2id: Relation to unique identifier mapping
self.id2ent: Inverse mapping of self.ent2id
self.id2rel: Inverse mapping of self.rel2id
self.num_ent: Number of entities in the knowledge graph
self.num_rel: Number of relations in the knowledge graph
self.g: The dgl graph constucted from the edges in the traing set and all the entities in the knowledge graph
self.data['train']: Stores the triples corresponding to training dataset
self.data['valid']: Stores the triples corresponding to validation dataset
self.data['test']: Stores the triples corresponding to test dataset
self.data_iter: The dataloader for different data splits
"""
self.dataset = dataset
self.lbl_smooth = lbl_smooth
self.num_workers = num_workers
self.batch_size = batch_size
#read in raw data and get mappings
ent_set, rel_set = OrderedSet(), OrderedSet()
for split in ['train', 'test', 'valid']:
for line in open('./{}/{}.txt'.format(self.dataset, split)):
sub, rel, obj = map(str.lower, line.strip().split('\t'))
ent_set.add(sub)
rel_set.add(rel)
ent_set.add(obj)
self.ent2id = {ent: idx for idx, ent in enumerate(ent_set)}
self.rel2id = {rel: idx for idx, rel in enumerate(rel_set)}
self.rel2id.update({rel+'_reverse': idx+len(self.rel2id) for idx, rel in enumerate(rel_set)})
self.id2ent = {idx: ent for ent, idx in self.ent2id.items()}
self.id2rel = {idx: rel for rel, idx in self.rel2id.items()}
self.num_ent = len(self.ent2id)
self.num_rel = len(self.rel2id) // 2
#read in ids of subjects, relations, and objects for train/test/valid
self.data = ddict(list) #stores the triples
sr2o = ddict(set) #The key of sr20 is (subject, relation), and the items are all the successors following (subject, relation)
src=[]
dst=[]
rels = []
inver_src = []
inver_dst = []
inver_rels = []
for split in ['train', 'test', 'valid']:
for line in open('./{}/{}.txt'.format(self.dataset, split)):
sub, rel, obj = map(str.lower, line.strip().split('\t'))
sub_id, rel_id, obj_id = self.ent2id[sub], self.rel2id[rel], self.ent2id[obj]
self.data[split].append((sub_id, rel_id, obj_id))
if split == 'train':
sr2o[(sub_id, rel_id)].add(obj_id)
sr2o[(obj_id, rel_id+self.num_rel)].add(sub_id) #append the reversed edges
src.append(sub_id)
dst.append(obj_id)
rels.append(rel_id)
inver_src.append(obj_id)
inver_dst.append(sub_id)
inver_rels.append(rel_id+self.num_rel)
#construct dgl graph
src = src + inver_src
dst = dst + inver_dst
rels = rels + inver_rels
self.g = dgl.graph((src, dst), num_nodes=self.num_ent)
self.g.edata['etype'] = torch.Tensor(rels).long()
#identify in and out edges
in_edges_mask = [True] * (self.g.num_edges()//2) + [False] * (self.g.num_edges()//2)
out_edges_mask = [False] * (self.g.num_edges()//2) + [True] * (self.g.num_edges()//2)
self.g.edata['in_edges_mask'] = torch.Tensor(in_edges_mask)
self.g.edata['out_edges_mask'] = torch.Tensor(out_edges_mask)
#Prepare train/valid/test data
self.data = dict(self.data)
self.sr2o = {k: list(v) for k, v in sr2o.items()} #store only the train data
for split in ['test', 'valid']:
for sub, rel, obj in self.data[split]:
sr2o[(sub, rel)].add(obj)
sr2o[(obj, rel+self.num_rel)].add(sub)
self.sr2o_all = {k: list(v) for k, v in sr2o.items()} #store all the data
self.triples = ddict(list)
for (sub, rel), obj in self.sr2o.items():
self.triples['train'].append({'triple':(sub, rel, -1), 'label': self.sr2o[(sub, rel)]})
for split in ['test', 'valid']:
for sub, rel, obj in self.data[split]:
rel_inv = rel + self.num_rel
self.triples['{}_{}'.format(split, 'tail')].append({'triple': (sub, rel, obj), 'label': self.sr2o_all[(sub, rel)]})
self.triples['{}_{}'.format(split, 'head')].append({'triple': (obj, rel_inv, sub), 'label': self.sr2o_all[(obj, rel_inv)]})
self.triples = dict(self.triples)
def get_train_data_loader(split, batch_size, shuffle=True):
return DataLoader(
TrainDataset(self.triples[split], self.num_ent, self.lbl_smooth),
batch_size = batch_size,
shuffle = shuffle,
num_workers = max(0, self.num_workers),
collate_fn = TrainDataset.collate_fn
)
def get_test_data_loader(split, batch_size, shuffle=True):
return DataLoader(
TestDataset(self.triples[split], self.num_ent),
batch_size = batch_size,
shuffle = shuffle,
num_workers = max(0, self.num_workers),
collate_fn = TestDataset.collate_fn
)
#train/valid/test dataloaders
self.data_iter = {
'train': get_train_data_loader('train', self.batch_size),
'valid_head': get_test_data_loader('valid_head', self.batch_size),
'valid_tail': get_test_data_loader('valid_tail', self.batch_size),
'test_head': get_test_data_loader('test_head', self.batch_size),
'test_tail': get_test_data_loader('test_tail', self.batch_size),
}
| [
"dgl.graph",
"torch.stack",
"torch.LongTensor",
"numpy.zeros",
"torch.FloatTensor",
"collections.defaultdict",
"ordered_set.OrderedSet",
"torch.Tensor",
"numpy.arange",
"numpy.int32"
] | [((675, 714), 'numpy.arange', 'np.arange', (['self.num_ent'], {'dtype': 'np.int32'}), '(self.num_ent, dtype=np.int32)\n', (684, 714), True, 'import numpy as np\n'), ((1342, 1369), 'torch.stack', 'torch.stack', (['triples'], {'dim': '(0)'}), '(triples, dim=0)\n', (1353, 1369), False, 'import torch\n'), ((1390, 1416), 'torch.stack', 'torch.stack', (['labels'], {'dim': '(0)'}), '(labels, dim=0)\n', (1401, 1416), False, 'import torch\n'), ((1580, 1622), 'numpy.zeros', 'np.zeros', (['[self.num_ent]'], {'dtype': 'np.float32'}), '([self.num_ent], dtype=np.float32)\n', (1588, 1622), True, 'import numpy as np\n'), ((1688, 1708), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (1705, 1708), False, 'import torch\n'), ((2611, 2638), 'torch.stack', 'torch.stack', (['triples'], {'dim': '(0)'}), '(triples, dim=0)\n', (2622, 2638), False, 'import torch\n'), ((2655, 2681), 'torch.stack', 'torch.stack', (['labels'], {'dim': '(0)'}), '(labels, dim=0)\n', (2666, 2681), False, 'import torch\n'), ((2841, 2883), 'numpy.zeros', 'np.zeros', (['[self.num_ent]'], {'dtype': 'np.float32'}), '([self.num_ent], dtype=np.float32)\n', (2849, 2883), True, 'import numpy as np\n'), ((2949, 2969), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (2966, 2969), False, 'import torch\n'), ((5404, 5415), 'collections.defaultdict', 'ddict', (['list'], {}), '(list)\n', (5409, 5415), True, 'from collections import defaultdict as ddict\n'), ((5451, 5461), 'collections.defaultdict', 'ddict', (['set'], {}), '(set)\n', (5456, 5461), True, 'from collections import defaultdict as ddict\n'), ((6640, 6685), 'dgl.graph', 'dgl.graph', (['(src, dst)'], {'num_nodes': 'self.num_ent'}), '((src, dst), num_nodes=self.num_ent)\n', (6649, 6685), False, 'import dgl\n'), ((7007, 7034), 'torch.Tensor', 'torch.Tensor', (['in_edges_mask'], {}), '(in_edges_mask)\n', (7019, 7034), False, 'import torch\n'), ((7076, 7104), 'torch.Tensor', 'torch.Tensor', (['out_edges_mask'], {}), '(out_edges_mask)\n', (7088, 7104), False, 'import torch\n'), ((7562, 7573), 'collections.defaultdict', 'ddict', (['list'], {}), '(list)\n', (7567, 7573), True, 'from collections import defaultdict as ddict\n'), ((861, 892), 'torch.LongTensor', 'torch.LongTensor', (["ele['triple']"], {}), "(ele['triple'])\n", (877, 892), False, 'import torch\n'), ((894, 916), 'numpy.int32', 'np.int32', (["ele['label']"], {}), "(ele['label'])\n", (902, 916), True, 'import numpy as np\n'), ((2281, 2312), 'torch.LongTensor', 'torch.LongTensor', (["ele['triple']"], {}), "(ele['triple'])\n", (2297, 2312), False, 'import torch\n'), ((2314, 2336), 'numpy.int32', 'np.int32', (["ele['label']"], {}), "(ele['label'])\n", (2322, 2336), True, 'import numpy as np\n'), ((4519, 4531), 'ordered_set.OrderedSet', 'OrderedSet', ([], {}), '()\n', (4529, 4531), False, 'from ordered_set import OrderedSet\n'), ((4533, 4545), 'ordered_set.OrderedSet', 'OrderedSet', ([], {}), '()\n', (4543, 4545), False, 'from ordered_set import OrderedSet\n'), ((6718, 6736), 'torch.Tensor', 'torch.Tensor', (['rels'], {}), '(rels)\n', (6730, 6736), False, 'import torch\n')] |
# Copied from semi-supervised.ipynb notebook
import argparse
import json
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import tqdm
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold, GroupKFold
from EVE.VAE_model import VAE_model
from utils import data_utils
def save_vae_aux(vae_aux, checkpoint_path, encoder_parameters, decoder_parameters, training_parameters):
# Create intermediate dirs above this
os.makedirs(os.path.dirname(checkpoint_path), exist_ok=True)
# Could also just save the vae and linear model separately
torch.save({
'model_state_dict': vae_aux.state_dict(),
'encoder_parameters': encoder_parameters,
'decoder_parameters': decoder_parameters,
'training_parameters': training_parameters,
}, checkpoint_path)
def main(model_checkpoint,
msa_location,
natural_labels_path,
model_params,
training_logs_location,
plot_save_dir=None,
# Cross-validation options
lm_training_frequency=2,
lm_elbo_together=True, # TODO On steps optimizing the linear model, also optimise the VAE loss
num_training_steps=200,
prev_num_steps=400000, # Previous number of steps the model was trained for
training_mode='mixed',
load_checkpoint=True,
lm_loss_weight=10,
):
####################
protein_name = "ADRB2"
MSA_LOCATION = msa_location
NATURAL_LABELS_PATH = natural_labels_path
PLOT_SAVE_DIR = plot_save_dir
training_parameters = model_params['training_parameters']
# Change training parameters in-place so that we can save the correct parameters in the checkpoint
training_parameters['learning_rate'] = training_parameters['learning_rate'] * 1
if not load_checkpoint:
prev_num_steps = 0
use_mean_embeddings = False
# print("Using mean embeddings")
shrink_init_variance = False
loss_fn = "mse"
os.makedirs(training_logs_location, exist_ok=True)
if PLOT_SAVE_DIR:
assert os.path.exists(PLOT_SAVE_DIR), PLOT_SAVE_DIR
assert os.path.exists(MSA_LOCATION), MSA_LOCATION
##################
start = time.time()
msa_data = data_utils.MSA_processing(
MSA_location=MSA_LOCATION,
theta=0.2,
use_weights=False,
# weights_location=args.MSA_weights_location + os.sep + protein_name + '_theta_' + str(theta) + '.npy' # Weights are saved here during training
)
print(f"Time taken: {(time.time()-start)//60}m:{(time.time()-start)%60:.3f}s", )
#####################################
msa_df = pd.DataFrame.from_dict(msa_data.seq_name_to_sequence, orient='index', columns=['sequence'])
msa_df['Uniref'] = msa_df.index
msa_df.reset_index(drop=True)
msa_df['Uniref'] = msa_df['Uniref'].apply(lambda s: s.replace(">", "")) # Strip the > character
#################################
# MSA labels
assert os.path.isfile(NATURAL_LABELS_PATH)
natural_labels_df = pd.read_csv(NATURAL_LABELS_PATH)
print(len(natural_labels_df), "rows")
# display(natural_labels_df.head())
msa_merged_df = pd.merge(left=msa_df, right=natural_labels_df, how='left', on='Uniref')
print(len(msa_merged_df), 'rows')
############################
if load_checkpoint:
MODEL_CHECKPOINT = model_checkpoint
assert os.path.isfile(MODEL_CHECKPOINT), f"{MODEL_CHECKPOINT} is not a file."
assert os.stat(MODEL_CHECKPOINT).st_size != 0, "File is empty, this will cause errors."
# Load model checkpoint
checkpoint = torch.load(MODEL_CHECKPOINT, map_location=torch.device("cpu"))
def load_model(model_name=protein_name):
# First, init model with random weights
vae_model = VAE_model(
model_name=model_name,
data=msa_data,
encoder_parameters=model_params["encoder_parameters"],
decoder_parameters=model_params["decoder_parameters"],
random_seed=42,
)
if load_checkpoint:
vae_model.load_state_dict(checkpoint['model_state_dict'])
# vae_model.eval() # Turn off dropout etc.
return vae_model
###########################
# Add auxiliary linear regression
class VAEAux(torch.nn.Module):
def __init__(self, vae_model, linear_out_features):
super().__init__()
self.vae_model = vae_model
linear_in = model_params['encoder_parameters']['z_dim']
self.linear_out_features = linear_out_features
self.lm = torch.nn.Linear(linear_in, linear_out_features)
def forward(self, x):
mu, log_var = self.vae_model.encoder(x)
z = self.vae_model.sample_latent(mu, log_var)
return mu, log_var, z, self.lm(z)
def encode_and_predict(self, x):
mu, log_var = self.vae_model.encoder(x)
z = self.vae_model.sample_latent(mu, log_var)
# lm_pred_interval = torch.clamp(lm_pred, -2, 0)
return self.lm(z)
def predict_lm(self, z):
return self.lm(z)
########################
# TODO use GroupKFold? To get equal splits of labeled data?
kf = KFold(n_splits=5, random_state=42, shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
cols = ['GNAS']
# cols = ['GNAS', 'GNAL', 'GNAI1', 'GNAI3', 'GNAO1', 'GNAZ', 'GNAQ', 'GNA14', 'GNA15', 'GNA12', 'GNA13']
targets = torch.as_tensor(msa_merged_df[cols].values).to(device).view(-1, len(cols)) # Many NaNs, only 128 labels
def evaluate_sampled(x, y, num_samples=20):
assert y.isnan().sum() == 0
y_pred_all = torch.cat([vae_aux.forward(x)[3].detach() for _ in
range(num_samples)]) # vae_aux.forward returns (mu, log_var, z, lm_pred)
y_test_all = torch.cat([y for _ in range(num_samples)])
mse_all = torch.nn.MSELoss()(y_test_all, y_pred_all)
print("Total test mse:", mse_all)
# print("Total test R^2:", r2_score(y_test_all, y_pred_all))
# TODO can also get mean, std of mse/R^2 across samples if stacked
bce_linear_all = torch.nn.BCELoss()(torch.sigmoid(y_pred_all), y_test_all / 2 + 1)
print("Total test linear bce:", bce_linear_all)
return y_pred_all, y_test_all, mse_all
for fold_idx, (train_index, val_index) in enumerate(kf.split(msa_data.one_hot_encoding)):
print(f"CV fold {fold_idx}")
torch.manual_seed(42)
vae_model = load_model(model_name=protein_name + "lood_linear_regression_joint")
vae_aux = VAEAux(vae_model, linear_out_features=len(cols))
# All parameters on for joint training
vae_aux.to(device)
vae_aux.train()
if training_mode == "frozen":
vae_aux.eval()
vae_aux.lm.train()
print("tmp weights init:", vae_aux.lm.weight, vae_aux.lm.bias)
# Init assumes Var(output) = 1. Here output variance is bigger, so let's scale down by a factor
if shrink_init_variance:
with torch.no_grad():
vae_aux.lm.weight.data = vae_aux.lm.weight.clone() / 10.
# Optimize over all parameters (VAE + prediction model)
optimizer = torch.optim.Adam(vae_aux.parameters(), lr=training_parameters['learning_rate'],
weight_decay=training_parameters['l2_regularization'])
if training_mode == "frozen":
print("Frozen encoder/decoder: only optimizing over lm parameters")
optimizer = torch.optim.Adam(vae_aux.lm.parameters(), lr=training_parameters['learning_rate'],
weight_decay=training_parameters['l2_regularization'])
x_train = torch.as_tensor(msa_data.one_hot_encoding[train_index], dtype=torch.float, device=device)
weights_train = msa_data.weights[train_index]
y_train = torch.as_tensor(targets[train_index], dtype=torch.float, device=device)
train_label_mask = ~targets[train_index].isnan().any(dim=-1)
x_train_labeled = torch.as_tensor(msa_data.one_hot_encoding[train_index][train_label_mask], dtype=torch.float,
device=device)
y_train_labeled = torch.as_tensor(targets[train_index][train_label_mask], dtype=torch.float, device=device)
# TODO also use the test set to get validation ELBO curve
x_test = torch.as_tensor(msa_data.one_hot_encoding[val_index], dtype=torch.float, device=device)
weights_test = msa_data.weights[val_index]
y_test = torch.as_tensor(targets[val_index], dtype=torch.float, device=device)
test_label_mask = ~targets[val_index].isnan().any(dim=-1)
x_test_labeled = torch.as_tensor(msa_data.one_hot_encoding[val_index][test_label_mask], dtype=torch.float,
device=device)
y_test_labeled = torch.as_tensor(targets[val_index][test_label_mask], dtype=torch.float, device=device)
batch_order = np.arange(x_train.shape[0])
seq_sample_probs = weights_train / np.sum(weights_train)
Neff_training = np.sum(weights_train)
training_metrics = {"mse": [], "neg_ELBO": [], "BCE": [], "bce_linear": []} # "r2": [],
validation_metrics = {"mse": [], 'bce_linear': []}
# Init the bias for better convergence?
print("y_train_labeled shape:", y_train_labeled.size())
assert y_train_labeled.size()[1] == len(cols), y_train_labeled.size()
vae_aux.lm.bias.data = torch.mean(y_train_labeled, dim=0)
mean = torch.mean(y_train_labeled, dim=0, keepdim=True)
print("mean(y_train): ", mean.detach().cpu())
print("mean shape before: ", mean.size())
baseline_pred = mean.expand(y_test_labeled.size()) # Broadcast mean up to N predictions, match shape for MSE loss
print("baseline shape:", baseline_pred.size())
print("Baseline mse (by predicting only mean): per-component:", cols, torch.nn.MSELoss(reduction='none')(baseline_pred, y_test_labeled).mean(dim=0), "reduced:", torch.nn.MSELoss()(baseline_pred, y_test_labeled))
# print("Baseline BCE/log-loss (by predicting only mean):", torch.nn.BCELoss()(baseline_pred / 2 + 1, y_test_labeled / 2 + 1))
# TODO aggregate mean/std of CV scores
# TODO should probably trim out the logging and keep outside functions
def mixed_batch_loss():
lm_l2_regularization = 0
# Train together in same batch
prop_batch_labeled = 0.75
num_labeled = int(training_parameters['batch_size'] * prop_batch_labeled)
# Sample labeled
sample_index_labeled = np.random.randint(0, x_train_labeled.shape[0], size=num_labeled).tolist()
batch_labeled = x_train_labeled[sample_index_labeled]
batch_labeled_y = y_train_labeled[sample_index_labeled]
# Sample unlabeled
sample_index_unlabeled = np.random.choice(batch_order, training_parameters['batch_size'] - num_labeled,
p=seq_sample_probs).tolist()
batch_unlabeled = x_train[sample_index_unlabeled]
batch = torch.cat((batch_labeled, batch_unlabeled), dim=0)
assert batch.size()[0] == training_parameters['batch_size']
mu, log_var = vae_aux.vae_model.encoder(batch)
z = vae_aux.vae_model.sample_latent(mu, log_var)
if use_mean_embeddings:
lm_pred = vae_aux.lm(mu)
else:
lm_pred = vae_aux.lm(z)
recon_x_log = vae_aux.vae_model.decoder(z)
neg_ELBO, BCE, KLD_latent, KLD_decoder_params_normalized = vae_aux.vae_model.loss_function(recon_x_log,
batch, mu,
log_var,
training_parameters[
'kl_latent_scale'],
training_parameters[
'kl_global_params_scale'],
training_parameters[
'annealing_warm_up'],
prev_num_steps + training_step,
Neff_training)
lm_l2_norm = torch.norm(vae_aux.lm.weight, p=2)
if loss_fn == "mse":
mse = torch.nn.MSELoss()(lm_pred[:num_labeled], batch_labeled_y)
loss = neg_ELBO + lm_loss_weight * mse + lm_l2_regularization * lm_l2_norm
training_metrics["mse"].append(mse.item())
print(training_step, "Training mse:", mse.item())
elif loss_fn == "sigmoid":
lm_pred = torch.sigmoid(lm_pred[:num_labeled])
y_sig = batch_labeled_y / 2 + 1 # y = [-2, 0] -> [0,1]
bce = torch.nn.BCELoss()(lm_pred, y_sig)
loss = neg_ELBO + lm_loss_weight * bce + lm_l2_regularization * lm_l2_norm
training_metrics["bce_linear"].append(bce.item())
print(training_step, "Training bce:", bce.item())
else:
raise ValueError(f"loss_fn must be one of [mse,sigmoid]. {loss_fn} given")
training_metrics["neg_ELBO"].append(neg_ELBO.item())
training_metrics["BCE"].append(BCE.item())
return loss
def alternating_loss(training_step):
# Linear model + joint training
if training_step % lm_training_frequency == 0:
x, y = x_train_labeled, y_train_labeled
mu, log_var = vae_aux.vae_model.encoder(x_train_labeled)
z = vae_aux.vae_model.sample_latent(mu, log_var)
if use_mean_embeddings:
lm_pred = vae_aux.lm(mu)
else:
lm_pred = vae_aux.lm(z)
mse = torch.nn.MSELoss()(lm_pred, y) # Can also do sigmoid loss for soft classification from -2 to 0?
loss = 10*mse
# Random thought: Can you optimize encoder and decoder separately? e.g. KL div one step, recon_x next step? In this case we might want to just optimize encoder + linear model.
if lm_elbo_together:
recon_x_log = vae_aux.vae_model.decoder(z)
neg_ELBO, BCE, KLD_latent, KLD_decoder_params_normalized = vae_aux.vae_model.loss_function(recon_x_log, x, mu, log_var, training_parameters['kl_latent_scale'], training_parameters['kl_global_params_scale'], training_parameters['annealing_warm_up'], prev_num_steps + training_step, Neff_training)
loss = neg_ELBO + 10 * mse # Can weight these appropriately: Since lr * 100 worked before, can we just do loss*100?
training_metrics["neg_ELBO"].append(neg_ELBO.item())
training_metrics["BCE"].append(BCE.item())
training_metrics["mse"].append(mse.item())
print(training_step, "Training mse:", mse.item())
# print(training_step, "Training R^2:", r2_score(y, lm_pred.detach().numpy()))
else:
# Sample a batch according to sequence weight
batch_sample_index = np.random.choice(batch_order, training_parameters['batch_size'],
p=seq_sample_probs).tolist()
x = x_train[batch_sample_index]
# y = y_train[batch_sample_index]
# Unsupervised training
mu, log_var = vae_aux.vae_model.encoder(x)
z = vae_aux.vae_model.sample_latent(mu, log_var)
recon_x_log = vae_aux.vae_model.decoder(z)
neg_ELBO, BCE, KLD_latent, KLD_decoder_params_normalized = vae_aux.vae_model.loss_function(recon_x_log, x,
mu, log_var,
training_parameters[
'kl_latent_scale'],
training_parameters[
'kl_global_params_scale'],
training_parameters[
'annealing_warm_up'],
prev_num_steps + training_step,
Neff_training)
loss = neg_ELBO
training_metrics["neg_ELBO"].append(neg_ELBO.item())
training_metrics["BCE"].append(BCE.item())
return loss
def frozen_vae_loss():
x, y = x_train_labeled, y_train_labeled
with torch.no_grad():
mu, log_var = vae_aux.vae_model.encoder(x)
z = vae_aux.vae_model.sample_latent(mu, log_var)
if use_mean_embeddings:
lm_pred = vae_aux.lm(mu)
else:
lm_pred = vae_aux.lm(z)
if loss_fn == "mse":
mse = torch.nn.MSELoss()(lm_pred, y) # Can also do sigmoid loss for soft classification from -2 to 0?
loss = lm_loss_weight * mse
training_metrics["mse"].append(mse.item())
print(training_step, "Training mse:", mse.item())
elif loss_fn == "sigmoid":
lm_pred = torch.sigmoid(lm_pred)
y_sig = y / 2 + 1 # y = [-2,0] -> [0,1]
bce = torch.nn.BCELoss()(lm_pred, y_sig)
loss = lm_loss_weight * bce
training_metrics["bce_linear"].append(bce.item())
print(training_step, "Training bce:", bce.item())
else:
raise ValueError(f"loss_fn must be one of [mse,sigmoid]. {loss_fn} given")
return loss
for training_step in tqdm.tqdm(range(1, num_training_steps+1), desc="Training linear reg model"):
optimizer.zero_grad()
if training_mode == "mixed":
loss = mixed_batch_loss()
elif training_mode == "alternating":
loss = alternating_loss(training_step)
elif training_mode == "frozen":
loss = frozen_vae_loss()
else:
raise KeyError(f"Training mode must be 'mixed', 'alternating' or 'frozen'. {training_mode} given")
loss.backward()
optimizer.step()
if training_step % 10 == 0:
# y_pred, mse = evaluate(x_test, y_test)
_, _, mse = evaluate_sampled(x_test_labeled, y_test_labeled, num_samples=5)
validation_metrics['mse'].append(mse.item())
print("weights after training:\n", vae_aux.lm.weight, "bias:", vae_aux.lm.bias)
if PLOT_SAVE_DIR:
for metric in training_metrics:
plt.plot(training_metrics[metric])
plt.title(f"Training {metric}: Fold {fold_idx}")
plt.savefig(os.path.join(PLOT_SAVE_DIR, f"training_{metric}_fold_{fold_idx}"))
plt.clf() # Clear figure, never knew this before
for metric in validation_metrics:
plt.plot(validation_metrics[metric])
plt.title(f"Validation {metric}: Fold {fold_idx}")
plt.savefig(os.path.join(PLOT_SAVE_DIR, f"test_{metric}_fold_{fold_idx}"))
plt.clf() # Clear figure, never knew this before
# Aggregate predictions
num_samples = 20
print("Final test mse:")
y_pred_all, y_test_all, mse = evaluate_sampled(x_test_labeled, y_test_labeled, num_samples=num_samples)
# Also write results out to CSV
csv_path = os.path.join(training_logs_location, f"test_fold_{fold_idx}.csv")
assert len(y_pred_all.cpu().numpy()) > 0, y_pred_all.cpu().numpy()
# using .squeeze() to remove the extra dimensions of size 1 e.g. [N, 1] -> [N], because pandas requires 1D arrays
if len(cols) == 1:
df = pd.DataFrame.from_dict({'pred': y_pred_all.cpu().numpy().squeeze(), 'test': y_test_all.cpu().numpy().squeeze()}, orient='columns')
df.to_csv(csv_path)
# For multiple columns, can save an e.g. GNAS_pred, GNAS_test column pair for each column
checkpoint_out = os.path.join(training_logs_location, f"fold_{fold_idx}")
save_vae_aux(vae_aux,
checkpoint_path=checkpoint_out,
encoder_parameters=model_params["encoder_parameters"],
decoder_parameters=model_params["decoder_parameters"],
training_parameters=training_parameters)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='VAE')
parser.add_argument('--MSA_data_folder', type=str, help='Folder where MSAs are stored')
parser.add_argument('--MSA_list', type=str, help='List of proteins and corresponding MSA file name')
parser.add_argument('--protein_index', type=int, help='Row index of protein in input mapping file')
parser.add_argument('--MSA_weights_location', type=str, help='Location where weights for each sequence in the MSA will be stored')
# parser.add_argument('--theta_reweighting', type=float, help='Parameters for MSA sequence re-weighting')
parser.add_argument('--VAE_checkpoint_location', type=str, help='Location of pretrained VAE model chekpoint')
parser.add_argument('--model_name_suffix', default='Jan1', type=str, help='model checkpoint name will be the protein name followed by this suffix')
parser.add_argument('--model_parameters_location', type=str, help='Location of VAE model parameters')
parser.add_argument('--training_logs_location', type=str, help='Where results (txt, csv) should be written')
parser.add_argument('--labels_path', type=str, help='Labels for linear regression in this case')
# Cross-validation/training options
# Note: It would be nice to take in arbitrary arguments here and pass them straight through to the main function instead of marshalling them along.
parser.add_argument('--num_training_steps', type=int, help="Number of steps of fine-tuning")
parser.add_argument('--z_dim', type=int, help='Specify a different latent dim than in the params file')
parser.add_argument('--lm_elbo_together', type=int) # 0 or 1
parser.add_argument('--lm_training_frequency', type=int)
parser.add_argument('--training_mode') # 'mixed' or 'alternating'
parser.add_argument('--lm_loss_weight', type=float)
args = parser.parse_args()
print("tmp: args=", args)
mapping_file = pd.read_csv(args.MSA_list)
protein_name = mapping_file['protein_name'][args.protein_index]
msa_location = args.MSA_data_folder + os.sep + mapping_file['msa_location'][args.protein_index]
print("Protein name: " + str(protein_name))
print("MSA file: " + str(msa_location))
fine_tuning_kwargs = {}
# TODO use args.dict instead
if args.num_training_steps is not None:
fine_tuning_kwargs['num_training_steps'] = args.num_training_steps
if args.lm_elbo_together is not None:
fine_tuning_kwargs['lm_elbo_together'] = args.lm_elbo_together
if args.lm_training_frequency is not None:
fine_tuning_kwargs['lm_training_frequency'] = args.lm_training_frequency
if args.training_mode is not None:
fine_tuning_kwargs['training_mode'] = args.training_mode
if args.lm_loss_weight is not None:
fine_tuning_kwargs['lm_loss_weight'] = args.lm_loss_weight
model_params = json.load(open(args.model_parameters_location))
# Overwrite params if necessary
if args.z_dim:
model_params["encoder_parameters"]["z_dim"] = args.z_dim
model_params["decoder_parameters"]["z_dim"] = args.z_dim
main(model_checkpoint=args.VAE_checkpoint_location,
msa_location=msa_location,
natural_labels_path=args.labels_path,
model_params=model_params,
training_logs_location=args.training_logs_location,
plot_save_dir=args.training_logs_location,
load_checkpoint=False, # TODO tmp, should rather read this in from command line
**fine_tuning_kwargs)
| [
"matplotlib.pyplot.title",
"numpy.sum",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"torch.cat",
"os.path.isfile",
"numpy.random.randint",
"numpy.arange",
"torch.device",
"EVE.VAE_model.VAE_model",
"torch.no_grad",
"os.path.join",
"utils.data_utils.MSA_processing... | [((2068, 2118), 'os.makedirs', 'os.makedirs', (['training_logs_location'], {'exist_ok': '(True)'}), '(training_logs_location, exist_ok=True)\n', (2079, 2118), False, 'import os\n'), ((2214, 2242), 'os.path.exists', 'os.path.exists', (['MSA_LOCATION'], {}), '(MSA_LOCATION)\n', (2228, 2242), False, 'import os\n'), ((2294, 2305), 'time.time', 'time.time', ([], {}), '()\n', (2303, 2305), False, 'import time\n'), ((2321, 2408), 'utils.data_utils.MSA_processing', 'data_utils.MSA_processing', ([], {'MSA_location': 'MSA_LOCATION', 'theta': '(0.2)', 'use_weights': '(False)'}), '(MSA_location=MSA_LOCATION, theta=0.2, use_weights\n =False)\n', (2346, 2408), False, 'from utils import data_utils\n'), ((2762, 2857), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['msa_data.seq_name_to_sequence'], {'orient': '"""index"""', 'columns': "['sequence']"}), "(msa_data.seq_name_to_sequence, orient='index',\n columns=['sequence'])\n", (2784, 2857), True, 'import pandas as pd\n'), ((3093, 3128), 'os.path.isfile', 'os.path.isfile', (['NATURAL_LABELS_PATH'], {}), '(NATURAL_LABELS_PATH)\n', (3107, 3128), False, 'import os\n'), ((3153, 3185), 'pandas.read_csv', 'pd.read_csv', (['NATURAL_LABELS_PATH'], {}), '(NATURAL_LABELS_PATH)\n', (3164, 3185), True, 'import pandas as pd\n'), ((3289, 3360), 'pandas.merge', 'pd.merge', ([], {'left': 'msa_df', 'right': 'natural_labels_df', 'how': '"""left"""', 'on': '"""Uniref"""'}), "(left=msa_df, right=natural_labels_df, how='left', on='Uniref')\n", (3297, 3360), True, 'import pandas as pd\n'), ((5373, 5421), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'random_state': '(42)', 'shuffle': '(True)'}), '(n_splits=5, random_state=42, shuffle=True)\n', (5378, 5421), False, 'from sklearn.model_selection import KFold, GroupKFold\n'), ((22260, 22302), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""VAE"""'}), "(description='VAE')\n", (22283, 22302), False, 'import argparse\n'), ((24168, 24194), 'pandas.read_csv', 'pd.read_csv', (['args.MSA_list'], {}), '(args.MSA_list)\n', (24179, 24194), True, 'import pandas as pd\n'), ((522, 554), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (537, 554), False, 'import os\n'), ((2157, 2186), 'os.path.exists', 'os.path.exists', (['PLOT_SAVE_DIR'], {}), '(PLOT_SAVE_DIR)\n', (2171, 2186), False, 'import os\n'), ((3517, 3549), 'os.path.isfile', 'os.path.isfile', (['MODEL_CHECKPOINT'], {}), '(MODEL_CHECKPOINT)\n', (3531, 3549), False, 'import os\n'), ((3915, 4098), 'EVE.VAE_model.VAE_model', 'VAE_model', ([], {'model_name': 'model_name', 'data': 'msa_data', 'encoder_parameters': "model_params['encoder_parameters']", 'decoder_parameters': "model_params['decoder_parameters']", 'random_seed': '(42)'}), "(model_name=model_name, data=msa_data, encoder_parameters=\n model_params['encoder_parameters'], decoder_parameters=model_params[\n 'decoder_parameters'], random_seed=42)\n", (3924, 4098), False, 'from EVE.VAE_model import VAE_model\n'), ((6686, 6707), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (6703, 6707), False, 'import torch\n'), ((7972, 8065), 'torch.as_tensor', 'torch.as_tensor', (['msa_data.one_hot_encoding[train_index]'], {'dtype': 'torch.float', 'device': 'device'}), '(msa_data.one_hot_encoding[train_index], dtype=torch.float,\n device=device)\n', (7987, 8065), False, 'import torch\n'), ((8134, 8205), 'torch.as_tensor', 'torch.as_tensor', (['targets[train_index]'], {'dtype': 'torch.float', 'device': 'device'}), '(targets[train_index], dtype=torch.float, device=device)\n', (8149, 8205), False, 'import torch\n'), ((8302, 8413), 'torch.as_tensor', 'torch.as_tensor', (['msa_data.one_hot_encoding[train_index][train_label_mask]'], {'dtype': 'torch.float', 'device': 'device'}), '(msa_data.one_hot_encoding[train_index][train_label_mask],\n dtype=torch.float, device=device)\n', (8317, 8413), False, 'import torch\n'), ((8478, 8571), 'torch.as_tensor', 'torch.as_tensor', (['targets[train_index][train_label_mask]'], {'dtype': 'torch.float', 'device': 'device'}), '(targets[train_index][train_label_mask], dtype=torch.float,\n device=device)\n', (8493, 8571), False, 'import torch\n'), ((8652, 8743), 'torch.as_tensor', 'torch.as_tensor', (['msa_data.one_hot_encoding[val_index]'], {'dtype': 'torch.float', 'device': 'device'}), '(msa_data.one_hot_encoding[val_index], dtype=torch.float,\n device=device)\n', (8667, 8743), False, 'import torch\n'), ((8808, 8877), 'torch.as_tensor', 'torch.as_tensor', (['targets[val_index]'], {'dtype': 'torch.float', 'device': 'device'}), '(targets[val_index], dtype=torch.float, device=device)\n', (8823, 8877), False, 'import torch\n'), ((8970, 9078), 'torch.as_tensor', 'torch.as_tensor', (['msa_data.one_hot_encoding[val_index][test_label_mask]'], {'dtype': 'torch.float', 'device': 'device'}), '(msa_data.one_hot_encoding[val_index][test_label_mask],\n dtype=torch.float, device=device)\n', (8985, 9078), False, 'import torch\n'), ((9141, 9231), 'torch.as_tensor', 'torch.as_tensor', (['targets[val_index][test_label_mask]'], {'dtype': 'torch.float', 'device': 'device'}), '(targets[val_index][test_label_mask], dtype=torch.float,\n device=device)\n', (9156, 9231), False, 'import torch\n'), ((9251, 9278), 'numpy.arange', 'np.arange', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (9260, 9278), True, 'import numpy as np\n'), ((9368, 9389), 'numpy.sum', 'np.sum', (['weights_train'], {}), '(weights_train)\n', (9374, 9389), True, 'import numpy as np\n'), ((9769, 9803), 'torch.mean', 'torch.mean', (['y_train_labeled'], {'dim': '(0)'}), '(y_train_labeled, dim=0)\n', (9779, 9803), False, 'import torch\n'), ((9819, 9867), 'torch.mean', 'torch.mean', (['y_train_labeled'], {'dim': '(0)', 'keepdim': '(True)'}), '(y_train_labeled, dim=0, keepdim=True)\n', (9829, 9867), False, 'import torch\n'), ((21270, 21335), 'os.path.join', 'os.path.join', (['training_logs_location', 'f"""test_fold_{fold_idx}.csv"""'], {}), "(training_logs_location, f'test_fold_{fold_idx}.csv')\n", (21282, 21335), False, 'import os\n'), ((21864, 21920), 'os.path.join', 'os.path.join', (['training_logs_location', 'f"""fold_{fold_idx}"""'], {}), "(training_logs_location, f'fold_{fold_idx}')\n", (21876, 21920), False, 'import os\n'), ((4720, 4767), 'torch.nn.Linear', 'torch.nn.Linear', (['linear_in', 'linear_out_features'], {}), '(linear_in, linear_out_features)\n', (4735, 4767), False, 'import torch\n'), ((5459, 5484), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5482, 5484), False, 'import torch\n'), ((6121, 6139), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (6137, 6139), False, 'import torch\n'), ((6376, 6394), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (6392, 6394), False, 'import torch\n'), ((6395, 6420), 'torch.sigmoid', 'torch.sigmoid', (['y_pred_all'], {}), '(y_pred_all)\n', (6408, 6420), False, 'import torch\n'), ((9322, 9343), 'numpy.sum', 'np.sum', (['weights_train'], {}), '(weights_train)\n', (9328, 9343), True, 'import numpy as np\n'), ((11456, 11506), 'torch.cat', 'torch.cat', (['(batch_labeled, batch_unlabeled)'], {'dim': '(0)'}), '((batch_labeled, batch_unlabeled), dim=0)\n', (11465, 11506), False, 'import torch\n'), ((13273, 13307), 'torch.norm', 'torch.norm', (['vae_aux.lm.weight'], {'p': '(2)'}), '(vae_aux.lm.weight, p=2)\n', (13283, 13307), False, 'import torch\n'), ((3603, 3628), 'os.stat', 'os.stat', (['MODEL_CHECKPOINT'], {}), '(MODEL_CHECKPOINT)\n', (3610, 3628), False, 'import os\n'), ((3780, 3799), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3792, 3799), False, 'import torch\n'), ((7285, 7300), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7298, 7300), False, 'import torch\n'), ((10319, 10337), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (10335, 10337), False, 'import torch\n'), ((18270, 18285), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18283, 18285), False, 'import torch\n'), ((20423, 20457), 'matplotlib.pyplot.plot', 'plt.plot', (['training_metrics[metric]'], {}), '(training_metrics[metric])\n', (20431, 20457), True, 'import matplotlib.pyplot as plt\n'), ((20474, 20522), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training {metric}: Fold {fold_idx}"""'], {}), "(f'Training {metric}: Fold {fold_idx}')\n", (20483, 20522), True, 'import matplotlib.pyplot as plt\n'), ((20634, 20643), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20641, 20643), True, 'import matplotlib.pyplot as plt\n'), ((20746, 20782), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_metrics[metric]'], {}), '(validation_metrics[metric])\n', (20754, 20782), True, 'import matplotlib.pyplot as plt\n'), ((20799, 20849), 'matplotlib.pyplot.title', 'plt.title', (['f"""Validation {metric}: Fold {fold_idx}"""'], {}), "(f'Validation {metric}: Fold {fold_idx}')\n", (20808, 20849), True, 'import matplotlib.pyplot as plt\n'), ((20957, 20966), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20964, 20966), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5714), 'torch.as_tensor', 'torch.as_tensor', (['msa_merged_df[cols].values'], {}), '(msa_merged_df[cols].values)\n', (5686, 5714), False, 'import torch\n'), ((10934, 10998), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x_train_labeled.shape[0]'], {'size': 'num_labeled'}), '(0, x_train_labeled.shape[0], size=num_labeled)\n', (10951, 10998), True, 'import numpy as np\n'), ((11211, 11313), 'numpy.random.choice', 'np.random.choice', (['batch_order', "(training_parameters['batch_size'] - num_labeled)"], {'p': 'seq_sample_probs'}), "(batch_order, training_parameters['batch_size'] -\n num_labeled, p=seq_sample_probs)\n", (11227, 11313), True, 'import numpy as np\n'), ((13364, 13382), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (13380, 13382), False, 'import torch\n'), ((13704, 13740), 'torch.sigmoid', 'torch.sigmoid', (['lm_pred[:num_labeled]'], {}), '(lm_pred[:num_labeled])\n', (13717, 13740), False, 'import torch\n'), ((14865, 14883), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (14881, 14883), False, 'import torch\n'), ((18601, 18619), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (18617, 18619), False, 'import torch\n'), ((18932, 18954), 'torch.sigmoid', 'torch.sigmoid', (['lm_pred'], {}), '(lm_pred)\n', (18945, 18954), False, 'import torch\n'), ((20551, 20616), 'os.path.join', 'os.path.join', (['PLOT_SAVE_DIR', 'f"""training_{metric}_fold_{fold_idx}"""'], {}), "(PLOT_SAVE_DIR, f'training_{metric}_fold_{fold_idx}')\n", (20563, 20616), False, 'import os\n'), ((20878, 20939), 'os.path.join', 'os.path.join', (['PLOT_SAVE_DIR', 'f"""test_{metric}_fold_{fold_idx}"""'], {}), "(PLOT_SAVE_DIR, f'test_{metric}_fold_{fold_idx}')\n", (20890, 20939), False, 'import os\n'), ((2646, 2657), 'time.time', 'time.time', ([], {}), '()\n', (2655, 2657), False, 'import time\n'), ((2673, 2684), 'time.time', 'time.time', ([], {}), '()\n', (2682, 2684), False, 'import time\n'), ((10228, 10262), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (10244, 10262), False, 'import torch\n'), ((13836, 13854), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (13852, 13854), False, 'import torch\n'), ((16221, 16310), 'numpy.random.choice', 'np.random.choice', (['batch_order', "training_parameters['batch_size']"], {'p': 'seq_sample_probs'}), "(batch_order, training_parameters['batch_size'], p=\n seq_sample_probs)\n", (16237, 16310), True, 'import numpy as np\n'), ((19034, 19052), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (19050, 19052), False, 'import torch\n')] |
from agent import Agent
import numpy as np
import random
import time
import os
import subprocess
# from bands import sound_params
action_set = [
"INCREASE_PARAM",
"DECREASE_PARAM",
"SAME_PARAM",
]
DATA_LOCATION = os.getcwd() + "/raw_data/"
SOUND_LOCATION = os.getcwd() + "/raw_sound/"
# print('outputting data to' + DATA_LOCATION)
# TODO: abstract into seperate file
sound_params = {
0 : {
"type": "dust",
"dev": 0.1,
"param": "parts",
"default": 3,
"other": {}
},
1 : {
"type": "noise",
"dev": 0.1,
"param": "osc",
"default": 0.2,
"other": {},
},
2 : {
"type":"pulse",
"dev": 0.1,
"param": "freq",
"default": 240,
"other": {}
},
3 : {
"type": "sine",
"dev": 0.1,
"param": "freq",
"default": 240,
"other": {}
},
4 : {
"type": "bin",
"dev": 0.1,
"param": "diff",
"default": 7.43,
"other": {}
}
}
# ~delta = 1; // 0.5 - 2. deep sleep, unsconsciousness
# ~theta = 5.5; // 4 - 7. Meditative, drowsy, sleeping. Memory, spatial learning
# ~mu = 9; // 9 - 11. associated with voluntary movement
# ~alpha = 10; // 7.5 - 12.5. Relaxed states of mind
# ~beta1 = 14; // 12.5 - 16. Normal waking consciousness
# ~beta2 = 18; // 16.5 - 20.
# ~beta3 = 24; // 20.5 - 28
# ~gamma = 35; // 32 - 100. Visual awareness, transcendental mental states
# // extra bonus vibrations:
# ~schumann1 = 7.83;
# ~schumann2 = 14.3;
# ~schumann3 = 20.8;
# ~schumann4 = 27.3;
# ~schumann5 = 33.8;
freqs = [
1,
3.3,
5.5,
7.83,
9,
14.3,
# 10,
14, # beta1
# 18, # beta2
24,
#
33.8,
# 40,
# 50,
# 70,
# 100
]
from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group
import time
import random
from matplotlib import pyplot as plt
import socketio
n_episodes = 3
episode_length = 2
observation_time_delay = 7
observeration_window = 15
moving_average_window = 5
# TODO: include in a MeatSpace variable and instantiate in MindSpace
fnirs_buffer = [0,0,0,0,0]
freq_buffer = []
fnirs_total = []
freq_i = 2
server = Server(port=57110)
# sio = None
# WARNING: hack, but a small one
sio = socketio.Client()
sio.connect('http://localhost:3002')
# server = Server()
def send_ping():
global start_timer
start_timer = time.time()
sio.emit('ping_from_client')
@sio.event
def connect():
print('connected to server')
send_ping()
@sio.event
def io_message(sid):
fnirs_buffer.append(sid['data'])
@sio.event
def pong_from_server(data):
global start_timer
latency = time.time() - start_timer
print('latency is {0:.2f} ms'.format(latency * 1000))
sio.sleep(1)
send_ping()
# TODO: abstract into seperate utility file
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
class BandSpace(object):
def __init__(self, experiment_name='test', actions=3, experiment_type='bin', is_connected = False):
# self.agent = Agent(lr = 0.01, input_dims=[5], gamma= 0.99, n_actions=actions, l1_size = 128, l2_size = 128)
self.band_history_scores = []
score = 0
self.mind_env = MindSpace(is_connected = is_connected)
self.bands = []
self.experiment_name = experiment_name
self.experiment_type = experiment_type
fnirs_buffer = []
freq_buffer = []
# def begin_client(self):
def host(self):
subprocess.Popen(["node", "../streamer/networker.js"])
global sio
# TODO: abstract into seperate file
sio = socketio.Client()
while True:
try:
sio.connect('http://localhost:3001')
break;
except Exception as e:
print('Retrying connection...')
time.sleep( 2 )
def send_ping():
global start_timer
start_timer = time.time()
sio.emit('ping_from_client')
@sio.event
def connect():
print('connected to server')
send_ping()
@sio.event
def update(hz):
# print('connected to server')
sio.emit('update', hz)
@sio.event
def hz_message(sid):
send_ping()
# print("message ", sid)
# print(sid['data'])
print('passing along')
print()
print(sid['hz'])
# fnirs_buffer.append(sid['data'])
# freq_buffer.append(freqs[freq_i])
# print("message ", data)
@sio.event
def pong_from_server(data):
global start_timer
latency = time.time() - start_timer
print('latency is {0:.2f} ms'.format(latency * 1000))
sio.sleep(1)
send_ping()
def join(self, key):
# call node program, pass in key
# subprocess.run(["ls", "-l"])
subprocess.Popen(["node", "../streamer/networker.js", key])
# TODO: abstract into seperate file
sio = socketio.Client()
while True:
try:
sio.connect('http://localhost:3002')
break;
except Exception as e:
print('Retrying connection...')
time.sleep( 2 )
def send_ping():
global start_timer
start_timer = time.time()
sio.emit('ping_from_client')
@sio.event
def connect():
print('connected to server')
send_ping()
@sio.event
def hz_message(sid):
# print("message ", sid)
# print(sid['data'])
# print('passing along')
# print(sid['hz'])
print('Setting sound ' + str(sid['hz']) + 'hz')
self.mind_env.sound_space.set_sound('diff', sid['hz'])
# fnirs_buffer.append(sid['data'])
# freq_buffer.append(freqs[freq_i])
# print("message ", data)
@sio.event
def pong_from_server(data):
global start_timer
latency = time.time() - start_timer
print('latency is {0:.2f} ms'.format(latency * 1000))
sio.sleep(1)
send_ping()
def connect(self):
# TODO: abstract into seperate file
sio = socketio.Client()
while True:
try:
sio.connect('http://localhost:3002')
# print('gunna wait for data')
# time.sleep(5)
print('connected')
break;
except Exception as e:
print('Retrying connection...')
time.sleep( 2 )
def send_ping():
global start_timer
start_timer = time.time()
sio.emit('ping_from_client')
@sio.event
def connect():
print('connected to server')
send_ping()
@sio.event
def io_message(sid):
# print("message ", sid)
# print(sid['data'])
# print(len(fnirs_buffer))
fnirs_buffer.append(sid['data'])
freq_buffer.append(freqs[freq_i])
# print("message ", data)
@sio.event
def pong_from_server(data):
global start_timer
latency = time.time() - start_timer
print('latency is {0:.2f} ms'.format(latency * 1000))
sio.sleep(1)
send_ping()
def binaural(self):
self.mind_env.sound_space.add_sound(4)
# todo cycle sound
# self.mind_env.sound_space.add_sound(4)
while 1:
# print('changing')
self.mind_env.sound_space.perturb_sound()
time.sleep(10)
def compose(self):
if self.experiment_type == 'bin':
# self.clear_band_space()
print('hooo')
else:
while 1:
time.sleep(2)
print("composing...")
# get all params and add sounds in sequence based on normalized weights
def clear_band_space(self):
self.mind_env.clear_band_connections()
# print
print('Scores')
print(self.band_history_scores)
print(fnirs_buffer)
print('freqs')
print(freq_buffer)
if(len(fnirs_buffer) > 5 ):
# Save
np.savetxt(DATA_LOCATION + self.experiment_name + '.csv', np.array(fnirs_buffer).astype(np.float), delimiter=',')
np.savetxt(SOUND_LOCATION + self.experiment_name + '_sound.csv', np.array(freq_buffer).astype(np.float), delimiter=',')
# np.savetxt('test.out', x, delimiter=',')
if len(self.band_history_scores) > 0:
# 1st plot of rewards
plt.plot(np.hstack(self.band_history_scores))
plt.savefig('plot_rewards.png')
plt.figure()
# 2nd plot
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(np.array(fnirs_buffer[5:], dtype=float),'g-')
ax2.plot(np.array(freq_buffer, dtype=float),'b-')
ax1.set_xlabel('sample')
ax1.set_ylabel('fnirs', color='g')
ax2.set_ylabel('frequency', color='b')
plt.savefig('plot_fnirs.png')
def new_band(self, band_idx):
print('New Band with index ' + str(band_idx))
self.mind_env.learn_new_sound(band_idx)
# input_dims = int(observeration_window / moving_average_window)
input_dims = observeration_window - (moving_average_window - 1)
print('input_dims')
print(input_dims)
print(observeration_window)
print(moving_average_window)
# prin(print)
agent = Agent(lr = 0.1, input_dims=[ input_dims ], gamma= 0.99, n_actions=3, l1_size = 128, l2_size = 128)
# print('breaks hurr')
score_history = []
score = 0
for i in range(n_episodes):
print('episode: ', i, "score %.f" % score)
done = False
score = 0
# get observation
observeration = self.mind_env.reset()
while not done:
print('OBSERVATION')
print(observeration)
action = agent.choose_action(observeration)
observeration_, reward, done = self.mind_env.step(action)
agent.store_rewards(reward)
observeration = observeration_
score += reward
score_history.append(score)
agent.learn()
print('learning...')
print("self.env.is_steady")
print(self.mind_env.is_steady)
if self.mind_env.is_steady:
break
print('Proceeding to next band')
print(score_history)
self.band_history_scores.append(score_history)
self.mind_env.reset_steady_state()
print(self.mind_env.is_steady)
return
class MindSpace(object):
def __init__(self, episode_length = episode_length, observation_time_delay = observation_time_delay, steady_count_max = 2, is_connected = False):
self.sound_space = SoundSpace(is_connected)
self.possibleActions = action_set
self.history = [0]
self.episode_count = 0
self.episode_length = episode_length
self.observation_time_delay = observation_time_delay
self.is_steady = False
self.steady_count = 0
self.steady_count_max = steady_count_max
# self.sound_space.add_ban
def learn_new_sound(self, sound_idx):
self.sound_space.add_sound(sound_idx)
def clear_band_connections(self):
self.sound_space.clear_all_synths()
def step(self, action_idx):
# print("Performing Action")
# print(action_set[action])
# Perform some action
self.sound_space.perform_action(action_set[action_idx])
# self.sound_space.perform_action(action_set[action_idx], "freq")
if action_set[action_idx] == "SAME_PARAM":
self.steady_count += 1
print("Steady Count: ", self.steady_count)
print(self.is_steady)
print(self.steady_count)
print(self.steady_count_max)
if self.steady_count == self.steady_count_max:
self.is_steady = True
print('is Steady')
# sio.wait()
# print(sio.wait())
time.sleep(self.observation_time_delay)
obs = self.get_observation()
reward = self.calculate_reward(obs)
# increment episode
self.episode_count += 1
print('Reward')
print(reward)
done = False
if self.episode_count > self.episode_length or self.is_steady:
done = True
return obs, reward, done
def reset_steady_state(self):
self.is_steady = False
self.steady_count = 0
def reset(self):
self.episode_count = 0
return self.get_observation()
def get_observation(self, window = observeration_window):
# TODO: convolve inputs
# obs = [random.random() for i in range(window)]
obs = fnirs_buffer[-window:]
parsed_obs = []
# Look at rolling moving_average
print('PRINTING')
print(np.array(obs).astype(np.float))
print(len(np.array(obs).astype(np.float)))
print("moving_average_window")
print(moving_average_window)
obs_moving_average = moving_average(np.array(obs).astype(np.float), moving_average_window)
obs = obs_moving_average
print(np.array(obs).astype(np.float))
print(len(np.array(obs).astype(np.float)))
for i in range(window):
# print(obs)
parsed_obs.append(obs)
return np.array(obs).astype(np.float)
def calculate_reward(self, seq):
# compute reward, can apply various heuristics here
x = np.mean(seq)
y = np.mean(self.history[-1])
self.history.append(seq)
if x <= y:
# return 1
print('------------------')
print(x)
print(y)
print(x - y)
print("reward: ", (5.0 * (y - x)))
return (5.0 * (y - x)) # play with reward heuristics
else:
return -10
# return 100
class SoundSpace(object):
def __init__(self, is_connected = False):
self.freq = 440
self.parts = 10
self.group = Group(server)
# self.synth = Synth(server, "sine", { "freq" : self.freq, "gain" : -12.0 }, target=self.group)
self.mul = 0.1
self.bus = AudioBus(server, 2)
self.synth_index = -1
self.synth = None
self.synths = []
self.is_connected = is_connected
def add_sound(self, index = -1):
print('ADDING_SOUND')
self.synth_index = index
synth = None
params = sound_params[index]
if index is 0:
print('playing sound index ' + str(index))
print(params['type'])
synth = Synth(server, params['type'], { "out": self.bus }, target=self.group)
synth.set("parts", 5)
reverb = Synth(server, 'reverb', { "in": self.bus, "out": 0 }, target=self.group, action=ADD_TO_TAIL)
elif index is 4: # TODO: only for custom sounds
print('playing custom sound')
opts = {}
# opts[params['param']] = params['default']
opts['diff'] = params['default']
# if ==
opts['gain'] = 0.5
synth = Synth(server, params['type'], opts, target=self.group)
else:
print('playing sound index ' + str(index))
opts = {}
opts["osc"] = 0.2
opts[params['param']] = params['default']
# customized for the sine sound
if index is 3:
opts['gain'] = 0.2
else:
opts['gain'] = 0.5
synth = Synth(server, params['type'], opts, target=self.group)
self.synths.append(synth)
self.synth = synth
def perturb_sound(self):
indices = len(freqs)
sound = sound_params[self.synth_index]
param = sound['param']
freq_i = random.randint(0, indices - 1)
new_param = freqs[freq_i]
print("Setting to " + str(new_param) + 'hz.')
# TODO: distinguish if host or not
if self.is_connected == True:
sio.emit('update', new_param)
self.set_sound(param, new_param)
def set_sound(self, param, new_param):
self.synth.set(param, new_param)
# self.synth
def clear_all_synths(self):
print("Freeing group")
self.group.free()
def perform_action(self, action):
global freq_i
print("performing action")
print(self.synth_index)
sound = sound_params[self.synth_index]
dev = sound['dev']
param = sound['param']
default = sound['default']
if action == "INCREASE_PARAM":
# get old param
old_param = self.synth.get(param)
# update param
new_param = old_param * (1 + dev)
print("checking_max")
if not new_param > default * 1.5:
# if new_param > default
print('Setting ' + param + " to " + str(new_param) + " from " + str(old_param))
# set param
self.synth.set(param, new_param)
print("( + ) INCREASE_PARAM")
# self.group.free()
elif action == "DECREASE_PARAM":
# get old param
old_param = self.synth.get(param)
# update param
new_param = old_param * (1 - dev)
print("checking_min")
if not new_param < default * 1.5:
print(new_param > default * 1.5)
print('Setting ' + param + " to " + str(new_param) + " from " + str(old_param))
# set param
self.synth.set(param, new_param)
print("( - ) DECREASE_PARAM")
elif action == "SAME_PARAM":
print("( = ) SAME_PARAM")
| [
"subprocess.Popen",
"random.randint",
"socketio.Client",
"os.getcwd",
"supercollider.AudioBus",
"numpy.ones",
"time.time",
"time.sleep",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"supercollider.Synth",
"supercollider.Server",
"supercollider.Group",
"agent... | [((1992, 2010), 'supercollider.Server', 'Server', ([], {'port': '(57110)'}), '(port=57110)\n', (1998, 2010), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((2065, 2082), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (2080, 2082), False, 'import socketio\n'), ((222, 233), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (231, 233), False, 'import os\n'), ((266, 277), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (275, 277), False, 'import os\n'), ((2200, 2211), 'time.time', 'time.time', ([], {}), '()\n', (2209, 2211), False, 'import time\n'), ((2469, 2480), 'time.time', 'time.time', ([], {}), '()\n', (2478, 2480), False, 'import time\n'), ((3244, 3298), 'subprocess.Popen', 'subprocess.Popen', (["['node', '../streamer/networker.js']"], {}), "(['node', '../streamer/networker.js'])\n", (3260, 3298), False, 'import subprocess\n'), ((3359, 3376), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (3374, 3376), False, 'import socketio\n'), ((4413, 4472), 'subprocess.Popen', 'subprocess.Popen', (["['node', '../streamer/networker.js', key]"], {}), "(['node', '../streamer/networker.js', key])\n", (4429, 4472), False, 'import subprocess\n'), ((4520, 4537), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (4535, 4537), False, 'import socketio\n'), ((5537, 5554), 'socketio.Client', 'socketio.Client', ([], {}), '()\n', (5552, 5554), False, 'import socketio\n'), ((7617, 7631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7629, 7631), True, 'from matplotlib import pyplot as plt\n'), ((7872, 7901), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_fnirs.png"""'], {}), "('plot_fnirs.png')\n", (7883, 7901), True, 'from matplotlib import pyplot as plt\n'), ((8287, 8380), 'agent.Agent', 'Agent', ([], {'lr': '(0.1)', 'input_dims': '[input_dims]', 'gamma': '(0.99)', 'n_actions': '(3)', 'l1_size': '(128)', 'l2_size': '(128)'}), '(lr=0.1, input_dims=[input_dims], gamma=0.99, n_actions=3, l1_size=128,\n l2_size=128)\n', (8292, 8380), False, 'from agent import Agent\n'), ((10516, 10555), 'time.sleep', 'time.sleep', (['self.observation_time_delay'], {}), '(self.observation_time_delay)\n', (10526, 10555), False, 'import time\n'), ((11802, 11814), 'numpy.mean', 'np.mean', (['seq'], {}), '(seq)\n', (11809, 11814), True, 'import numpy as np\n'), ((11821, 11846), 'numpy.mean', 'np.mean', (['self.history[-1]'], {}), '(self.history[-1])\n', (11828, 11846), True, 'import numpy as np\n'), ((12231, 12244), 'supercollider.Group', 'Group', (['server'], {}), '(server)\n', (12236, 12244), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((12373, 12392), 'supercollider.AudioBus', 'AudioBus', (['server', '(2)'], {}), '(server, 2)\n', (12381, 12392), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((13715, 13745), 'random.randint', 'random.randint', (['(0)', '(indices - 1)'], {}), '(0, indices - 1)\n', (13729, 13745), False, 'import random\n'), ((2684, 2694), 'numpy.ones', 'np.ones', (['w'], {}), '(w)\n', (2691, 2694), True, 'import numpy as np\n'), ((3517, 3530), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3527, 3530), False, 'import time\n'), ((3598, 3609), 'time.time', 'time.time', ([], {}), '()\n', (3607, 3609), False, 'import time\n'), ((4678, 4691), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4688, 4691), False, 'import time\n'), ((4759, 4770), 'time.time', 'time.time', ([], {}), '()\n', (4768, 4770), False, 'import time\n'), ((5773, 5786), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5783, 5786), False, 'import time\n'), ((5854, 5865), 'time.time', 'time.time', ([], {}), '()\n', (5863, 5865), False, 'import time\n'), ((6642, 6656), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (6652, 6656), False, 'import time\n'), ((7540, 7571), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_rewards.png"""'], {}), "('plot_rewards.png')\n", (7551, 7571), True, 'from matplotlib import pyplot as plt\n'), ((7575, 7587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7585, 7587), True, 'from matplotlib import pyplot as plt\n'), ((7665, 7704), 'numpy.array', 'np.array', (['fnirs_buffer[5:]'], {'dtype': 'float'}), '(fnirs_buffer[5:], dtype=float)\n', (7673, 7704), True, 'import numpy as np\n'), ((7722, 7756), 'numpy.array', 'np.array', (['freq_buffer'], {'dtype': 'float'}), '(freq_buffer, dtype=float)\n', (7730, 7756), True, 'import numpy as np\n'), ((12732, 12799), 'supercollider.Synth', 'Synth', (['server', "params['type']", "{'out': self.bus}"], {'target': 'self.group'}), "(server, params['type'], {'out': self.bus}, target=self.group)\n", (12737, 12799), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((12839, 12933), 'supercollider.Synth', 'Synth', (['server', '"""reverb"""', "{'in': self.bus, 'out': 0}"], {'target': 'self.group', 'action': 'ADD_TO_TAIL'}), "(server, 'reverb', {'in': self.bus, 'out': 0}, target=self.group,\n action=ADD_TO_TAIL)\n", (12844, 12933), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((4195, 4206), 'time.time', 'time.time', ([], {}), '()\n', (4204, 4206), False, 'import time\n'), ((5345, 5356), 'time.time', 'time.time', ([], {}), '()\n', (5354, 5356), False, 'import time\n'), ((6310, 6321), 'time.time', 'time.time', ([], {}), '()\n', (6319, 6321), False, 'import time\n'), ((6786, 6799), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6796, 6799), False, 'import time\n'), ((7500, 7535), 'numpy.hstack', 'np.hstack', (['self.band_history_scores'], {}), '(self.band_history_scores)\n', (7509, 7535), True, 'import numpy as np\n'), ((11675, 11688), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11683, 11688), True, 'import numpy as np\n'), ((13163, 13217), 'supercollider.Synth', 'Synth', (['server', "params['type']", 'opts'], {'target': 'self.group'}), "(server, params['type'], opts, target=self.group)\n", (13168, 13217), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((13479, 13533), 'supercollider.Synth', 'Synth', (['server', "params['type']", 'opts'], {'target': 'self.group'}), "(server, params['type'], opts, target=self.group)\n", (13484, 13533), False, 'from supercollider import Server, Synth, Buffer, AudioBus, ADD_TO_TAIL, Group\n'), ((11247, 11260), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11255, 11260), True, 'import numpy as np\n'), ((11427, 11440), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11435, 11440), True, 'import numpy as np\n'), ((11518, 11531), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11526, 11531), True, 'import numpy as np\n'), ((7197, 7219), 'numpy.array', 'np.array', (['fnirs_buffer'], {}), '(fnirs_buffer)\n', (7205, 7219), True, 'import numpy as np\n'), ((7321, 7342), 'numpy.array', 'np.array', (['freq_buffer'], {}), '(freq_buffer)\n', (7329, 7342), True, 'import numpy as np\n'), ((11291, 11304), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11299, 11304), True, 'import numpy as np\n'), ((11562, 11575), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (11570, 11575), True, 'import numpy as np\n')] |
"""Data Equivalence Tests"""
from __future__ import print_function
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import inspect
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mne.utils import _TempDir
from mne.fiff import Raw, pick_types
from mne.fiff.brainvision import read_raw_brainvision
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
vhdr_path = op.join(data_dir, 'test.vhdr')
elp_path = op.join(data_dir, 'test_elp.txt')
eeg_bin = op.join(data_dir, 'test_bin_raw.fif')
ch_names = ['FP1', 'VEOGt', 'F7', 'GND', 'F8',
'FC5', 'F3', 'FZ', 'F4', 'FC6',
'FC1', 'FCZ', 'FC2', 'CP5', 'C3',
'CZ', 'C4', 'CP6', 'CP1', 'CPZ',
'CP2', 'P7', 'P3', 'PZ', 'P4',
'P8', 'O1', 'POZ', 'O2', 'A1',
'A2', 'HEOGL', 'HEOGR', 'VEOGb']
tempdir = _TempDir()
def test_brainvision_data():
"""Test reading raw Brain Vision files
"""
raw_py = read_raw_brainvision(vhdr_path, elp_fname=elp_path,
ch_names=ch_names, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, times_py = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this fif was generated using MNE-C
raw_bin = Raw(eeg_bin, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_bin, times_bin = raw_bin[picks]
assert_array_almost_equal(data_py, data_bin)
assert_array_almost_equal(times_py, times_bin)
def test_read_segment():
"""Test writing raw eeg files when preload is False
"""
raw1 = read_raw_brainvision(vhdr_path, preload=False)
raw1_file = op.join(tempdir, 'raw1.fif')
raw1.save(raw1_file, overwrite=True)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:, :]
data11, times11 = raw11[:, :]
assert_array_almost_equal(data1, data11, 8)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
raw2 = read_raw_brainvision(vhdr_path, preload=True)
raw2_file = op.join(tempdir, 'raw2.fif')
raw2.save(raw2_file, overwrite=True)
data2, times2 = raw2[:, :]
assert_array_equal(data1, data2)
assert_array_equal(times1, times2)
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
assert_array_equal(raw1._data, raw2._data)
| [
"mne.fiff.brainvision.read_raw_brainvision",
"os.path.abspath",
"mne.utils._TempDir",
"numpy.testing.assert_array_equal",
"mne.fiff.Raw",
"inspect.currentframe",
"numpy.testing.assert_array_almost_equal",
"mne.fiff.pick_types",
"os.path.join"
] | [((509, 539), 'os.path.join', 'op.join', (['data_dir', '"""test.vhdr"""'], {}), "(data_dir, 'test.vhdr')\n", (516, 539), True, 'import os.path as op\n'), ((551, 584), 'os.path.join', 'op.join', (['data_dir', '"""test_elp.txt"""'], {}), "(data_dir, 'test_elp.txt')\n", (558, 584), True, 'import os.path as op\n'), ((595, 632), 'os.path.join', 'op.join', (['data_dir', '"""test_bin_raw.fif"""'], {}), "(data_dir, 'test_bin_raw.fif')\n", (602, 632), True, 'import os.path as op\n'), ((957, 967), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (965, 967), False, 'from mne.utils import _TempDir\n'), ((416, 438), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (436, 438), False, 'import inspect\n'), ((1063, 1151), 'mne.fiff.brainvision.read_raw_brainvision', 'read_raw_brainvision', (['vhdr_path'], {'elp_fname': 'elp_path', 'ch_names': 'ch_names', 'preload': '(True)'}), '(vhdr_path, elp_fname=elp_path, ch_names=ch_names,\n preload=True)\n', (1083, 1151), False, 'from mne.fiff.brainvision import read_raw_brainvision\n'), ((1194, 1254), 'mne.fiff.pick_types', 'pick_types', (['raw_py.info'], {'meg': '(False)', 'eeg': '(True)', 'exclude': '"""bads"""'}), "(raw_py.info, meg=False, eeg=True, exclude='bads')\n", (1204, 1254), False, 'from mne.fiff import Raw, pick_types\n'), ((1428, 1454), 'mne.fiff.Raw', 'Raw', (['eeg_bin'], {'preload': '(True)'}), '(eeg_bin, preload=True)\n', (1431, 1454), False, 'from mne.fiff import Raw, pick_types\n'), ((1467, 1527), 'mne.fiff.pick_types', 'pick_types', (['raw_py.info'], {'meg': '(False)', 'eeg': '(True)', 'exclude': '"""bads"""'}), "(raw_py.info, meg=False, eeg=True, exclude='bads')\n", (1477, 1527), False, 'from mne.fiff import Raw, pick_types\n'), ((1574, 1618), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['data_py', 'data_bin'], {}), '(data_py, data_bin)\n', (1599, 1618), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1623, 1669), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['times_py', 'times_bin'], {}), '(times_py, times_bin)\n', (1648, 1669), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((1772, 1818), 'mne.fiff.brainvision.read_raw_brainvision', 'read_raw_brainvision', (['vhdr_path'], {'preload': '(False)'}), '(vhdr_path, preload=False)\n', (1792, 1818), False, 'from mne.fiff.brainvision import read_raw_brainvision\n'), ((1835, 1863), 'os.path.join', 'op.join', (['tempdir', '"""raw1.fif"""'], {}), "(tempdir, 'raw1.fif')\n", (1842, 1863), True, 'import os.path as op\n'), ((1917, 1945), 'mne.fiff.Raw', 'Raw', (['raw1_file'], {'preload': '(True)'}), '(raw1_file, preload=True)\n', (1920, 1945), False, 'from mne.fiff import Raw, pick_types\n'), ((2015, 2058), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['data1', 'data11', '(8)'], {}), '(data1, data11, 8)\n', (2040, 2058), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2063, 2105), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['times1', 'times11'], {}), '(times1, times11)\n', (2088, 2105), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2188, 2233), 'mne.fiff.brainvision.read_raw_brainvision', 'read_raw_brainvision', (['vhdr_path'], {'preload': '(True)'}), '(vhdr_path, preload=True)\n', (2208, 2233), False, 'from mne.fiff.brainvision import read_raw_brainvision\n'), ((2250, 2278), 'os.path.join', 'op.join', (['tempdir', '"""raw2.fif"""'], {}), "(tempdir, 'raw2.fif')\n", (2257, 2278), True, 'import os.path as op\n'), ((2355, 2387), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['data1', 'data2'], {}), '(data1, data2)\n', (2373, 2387), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2392, 2426), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['times1', 'times2'], {}), '(times1, times2)\n', (2410, 2426), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2439, 2467), 'mne.fiff.Raw', 'Raw', (['raw1_file'], {'preload': '(True)'}), '(raw1_file, preload=True)\n', (2442, 2467), False, 'from mne.fiff import Raw, pick_types\n'), ((2479, 2507), 'mne.fiff.Raw', 'Raw', (['raw2_file'], {'preload': '(True)'}), '(raw2_file, preload=True)\n', (2482, 2507), False, 'from mne.fiff import Raw, pick_types\n'), ((2512, 2554), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['raw1._data', 'raw2._data'], {}), '(raw1._data, raw2._data)\n', (2530, 2554), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((470, 486), 'os.path.abspath', 'op.abspath', (['FILE'], {}), '(FILE)\n', (480, 486), True, 'import os.path as op\n')] |
"""
nbkode.events
~~~~~~~~~~~~
Methods for dealing with events.
Adapted from: https://github.com/scipy/scipy/blob/v1.5.4/scipy/integrate/_ivp/ivp.py
:copyright: 2020 by nbkode Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from typing import Callable, Iterable
import numpy as np
from numpy import ndarray
from .nbcompat import NO_NUMBA, is_jitted, numba, zeros
@numba.njit()
def _dummy_event(t, y):
return y[0]
if NO_NUMBA:
TypedList = list
else:
TypedList = numba.typed.List
_event_type = numba.types.float64(numba.types.float64, numba.types.float64[:])
@numba.njit()
def event_at_sol(t, func, interpolate, rhs, cache, *args):
"""Helper function to find the root for the event function along the solution.
Parameters
----------
t : float
timepoint
func : callable
jitted event function.
interpolate : callable
jitted interpolation function of the solution
rhs : callable
right hand side of the dynamical system.
cache : AlignedBuffer
cache of the solver.
args
extra arguments required for the interpolation function.
Returns
-------
float
"""
return func(t, interpolate(t, rhs, cache, *args))
# TODO: Remove this UGLY HACK
@numba.njit()
def _get_tl(val):
"""Helper function to create typed lists. Should be removed when
we can make empty list to work.
"""
out = TypedList()
out.append(val)
return out
@numba.njit()
def _empty_list(val):
"""Helper function to create typed lists. Should be removed when
we can make empty list to work.
"""
out = _get_tl(val)
out.pop()
return out
if NO_NUMBA:
_EVENT_SPEC = None
else:
_EVENT_SPEC = [
("func", _event_type.as_type()),
("is_terminal", numba.bool_),
("direction", numba.int_),
("last_t", numba.float64),
("last_value", numba.float64),
("t", numba.types.ListType(numba.float64)),
("y", numba.types.ListType(numba.float64[::1])),
]
@numba.jitclass(_EVENT_SPEC)
class Event:
"""Helper class to deal with event.
An event occurs at the zeros of a continuous function of time and state.
Parameters
----------
func : callable
jitted function to calculate the event function.
terminal: bool
Whether to terminate integration if this event occurs.
direction: int
Direction of a zero crossing. If `direction` is positive,
`event` will only trigger when going from negative to positive,
and vice versa if `direction` is negative. If 0, then either
direction will trigger event.
init_t
init_value
Attributes
----------
t : numba typed list of Event (length N)
y : numba typed list of Event (length N)
"""
def __init__(self, func, is_terminal, direction, init_t, init_y):
self.func = func
self.is_terminal = is_terminal
self.direction = direction
self.last_t = init_t
self.last_value = func(init_t, init_y)
self.t = _empty_list(init_t)
self.y = _empty_list(init_y)
def evaluate(self, interpolate, rhs, cache, *args):
t = cache.t
y = cache.y
value = self.func(t, y)
up = (self.last_value <= 0.0) & (value >= 0.0)
down = (self.last_value >= 0.0) & (value <= 0.0)
either = up | down
trigger = (
up & (self.direction > 0)
| down & (self.direction < 0)
| either & (self.direction == 0)
)
if trigger:
if value == 0:
root = t
else:
root = zeros.bisect(
event_at_sol,
self.last_t,
t,
args=(self.func, interpolate, rhs, cache, *args),
)
# This is required to avoid duplicates
if not (self.t and self.t[-1] == root):
self.t.append(root)
self.y.append(interpolate(root, rhs, cache, *args))
self.last_t = t
self.last_value = value
return trigger and self.is_terminal
@property
def last_event(self):
if self.t:
return self.t[-1], self.y[-1]
return np.nan, np.empty(0) * np.nan
if NO_NUMBA:
_EVENT_HANDLER_SPEC = None
else:
_EVENT_HANDLER_SPEC = [
("events", numba.types.ListType(Event.class_type.instance_type)),
(
"last_event",
numba.types.Tuple((numba.types.float64, numba.types.float64[:])),
),
]
@numba.jitclass(_EVENT_HANDLER_SPEC)
class EventHandler:
"""Helper class to deal with multiple events.
N is the number of events to be tracked.
Parameters
----------
events : numba typed list of Event (length N)
"""
def __init__(self, events):
self.events = events
self.last_event = np.nan, np.empty(0) * np.nan
def evaluate(self, interpolate, rhs, cache, *args):
"""
Parameters
----------
interpolate : callable
interpolator function.
rhs : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and the ndarray ``y`` hasna shape (n,);
then ``fun`` must return array_like with shape (n,).
cache : AlignedBuffer
args
extra arguments provided to interpolate.
Returns
-------
bool
True if it should terminate.
"""
terminate = False
min_t = -np.inf
for ndx, event in enumerate(self.events):
if event.evaluate(interpolate, rhs, cache, *args):
terminate = True
t, y = event.last_event
if t > min_t:
self.last_event = t, y
return terminate
def build_handler(events: Iterable[Callable], t: float, y: ndarray) -> EventHandler:
"""Standardize event functions and extract is_terminal and direction."""
if callable(events):
events = (events,)
evs = TypedList()
if events is not None:
for ndx, event in enumerate(events):
try:
is_terminal = event.terminal
except AttributeError:
is_terminal = False
try:
direction = int(np.sign(event.direction))
except AttributeError:
direction = 0
if not is_jitted(event):
event = numba.njit()(event)
evs.append(Event(event, is_terminal, direction, t, y))
return EventHandler(evs)
| [
"numpy.empty",
"numpy.sign"
] | [((4358, 4369), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (4366, 4369), True, 'import numpy as np\n'), ((5005, 5016), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (5013, 5016), True, 'import numpy as np\n'), ((6473, 6497), 'numpy.sign', 'np.sign', (['event.direction'], {}), '(event.direction)\n', (6480, 6497), True, 'import numpy as np\n')] |
import pickle
import os
import math
import random
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
import context
from src.device import Device, Device_Type
from src.packet import Packet, Packet_Type
class TCPML():
def __init__(self):
self.packets_to_send = list()
self.packets_in_flight = list()
self.pckts_to_resend = list()
#self.window_size = random.randint(1,10)
self.window_size = 1
self.timeout = 10
self.ack_recv_flag = False
self.ack_timeout_flag = False
class HostML(Device):
def __init__(self, ip:str, buffer_cap=5):
super().__init__(ip)
self.connected_router = None
self.outgoing_buffer = list()
self.incoming_buffer = list()
self.buffer_cap = buffer_cap
self.tcp = TCPML()
self.def_seg_no = 1
model_path = os.path.join(os.path.dirname(__file__),os.pardir,'model/model.pickle')
self.model = pickle.load(open(model_path,'rb'))
self.poly_features = PolynomialFeatures(degree=2,interaction_only=True)
def link(self,other:Device):
self.connected_router = other
def get_connected_router(self):
return self.connected_router
def device_type(self):
return Device_Type.HOST
def send_pckt(self,pckt:Packet):
self.tcp.packets_to_send.append(pckt)
def send_random_packet(self,to_device:Device):
pckt = Packet(self.def_seg_no,self,to_device,Packet_Type.DATA)
self.send_pckt(pckt)
self.def_seg_no = self.def_seg_no + 1
def receive_pckt(self,pckt:Packet):
if len(self.incoming_buffer) < self.buffer_cap:
self.incoming_buffer.append(pckt)
def __str__(self):
msg = "Host IP: {}\r\n".format(self.ip)
msg = msg + "Connected to {}\r\n".format(self.connected_router.get_ip())
return msg
def step(self):
super().step()
self.tcp.ack_recv_flag = False
self.tcp.ack_timeout_flag = False
# handle incoming packets
for pckt in self.incoming_buffer:
if pckt.get_pckt_type() == Packet_Type.DATA:
# send ack packet
ack_pack = Packet(pckt.get_seg_no(),pckt.get_to(),pckt.get_from(),Packet_Type.ACK)
self.outgoing_buffer.append(ack_pack)
# print("Host {} received packet {} from host {} and sent ACK.".format(self.get_ip(), pckt.get_seg_no(), pckt.get_from().get_ip()))
pass
elif pckt.get_pckt_type() == Packet_Type.ACK:
# remove packet from packets in flight and packets to send
seg_no = pckt.get_seg_no()
index = -1
for i in range(len(self.tcp.packets_in_flight)):
pckt2 = self.tcp.packets_in_flight[i][0]
if pckt2.get_seg_no() == seg_no:
index = i
break
if index >= 0:
self.tcp.timeout = self.clock-self.tcp.packets_in_flight[i][1] # set tcp timeout adaptively
self.tcp.packets_in_flight.pop(index)
index = -1
for i in range(len(self.tcp.packets_to_send)):
pckt2 = self.tcp.packets_to_send[i]
if pckt2.get_seg_no() == seg_no:
index = i
break
if index >= 0:
self.tcp.packets_to_send.pop(index)
# print("Host {} received ACK from host {}.".format(self.get_ip(), pckt.get_from().get_ip()))
self.tcp.ack_recv_flag = True
pass
self.incoming_buffer.clear()
# resend any timed out packets
for i in range(len(self.tcp.packets_in_flight)):
pckt,t = self.tcp.packets_in_flight[i]
if self.clock - t> self.tcp.timeout:
self.tcp.pckts_to_resend.append(i)
for i in self.tcp.pckts_to_resend:
pckt = self.tcp.packets_in_flight[i][0]
self.tcp.packets_to_send.insert(0,pckt)
# print("Host {} resending packet {} due to timeout.".format(self.get_ip(),pckt.get_seg_no()))
pass
for i in sorted(self.tcp.pckts_to_resend,reverse=True):
del self.tcp.packets_in_flight[i]
# reset window size and ssthresh in case of timeout
if len(self.tcp.pckts_to_resend) > 0:
self.tcp.ack_timeout_flag = True
# predict new window size using model
model_input = np.array([[
self.tcp.window_size,
self.tcp.ack_recv_flag,
self.tcp.ack_timeout_flag
]], dtype=np.float32)
model_input = self.poly_features.fit_transform(model_input)
model_output = self.model.predict(model_input)
self.tcp.window_size = int(model_output[0])
self.tcp.pckts_to_resend.clear()
if self.tcp.window_size < 1:
self.tcp.window_size = 1 # minimum window size
# send packets
# send packets only if there are no packets in flight
if len(self.tcp.packets_in_flight) == 0:
for i in range(self.tcp.window_size):
if len(self.tcp.packets_to_send) == 0:
break
pckt = self.tcp.packets_to_send.pop(0)
self.outgoing_buffer.append(pckt)
self.tcp.packets_in_flight.append((pckt,self.clock))
for pckt in self.outgoing_buffer:
if pckt.get_pckt_type() == Packet_Type.DATA:
# print("Host {} sent packet {} to host {}.".format(self.get_ip(), pckt.get_seg_no(), pckt.get_to().get_ip()))
pass
self.connected_router.receive_pckt(pckt)
self.outgoing_buffer.clear()
if __name__ == "__main__":
h = HostML("1")
h.step() | [
"sklearn.preprocessing.PolynomialFeatures",
"os.path.dirname",
"src.packet.Packet",
"numpy.array"
] | [((1043, 1094), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)', 'interaction_only': '(True)'}), '(degree=2, interaction_only=True)\n', (1061, 1094), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1467, 1525), 'src.packet.Packet', 'Packet', (['self.def_seg_no', 'self', 'to_device', 'Packet_Type.DATA'], {}), '(self.def_seg_no, self, to_device, Packet_Type.DATA)\n', (1473, 1525), False, 'from src.packet import Packet, Packet_Type\n'), ((4720, 4828), 'numpy.array', 'np.array', (['[[self.tcp.window_size, self.tcp.ack_recv_flag, self.tcp.ack_timeout_flag]]'], {'dtype': 'np.float32'}), '([[self.tcp.window_size, self.tcp.ack_recv_flag, self.tcp.\n ack_timeout_flag]], dtype=np.float32)\n', (4728, 4828), True, 'import numpy as np\n'), ((896, 921), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (911, 921), False, 'import os\n')] |
"""Reads and writes Cerfacs' XDMF files.
"""
import os
import warnings
import numpy as np
import h5py
import meshio
from meshio._common import num_nodes_per_cell
from pyhip.commands.readers import read_hdf5_mesh
from pyhip.commands.writers import write_hdf5
from pyhip.commands.operations import hip_exit
from pyhip.commands.mesh_idcard import extract_hdf_meshinfo
from pyhip.hipster import pyhip_cmd
import yamio
AXIS_MAP = {0: 'x', 1: 'y', 2: 'z'}
meshio_to_hip_type = {'line': 'bi',
'triangle': 'tri',
'quad': 'qua',
'tetra': 'tet',
'hexahedron': 'hex',
}
hip_to_meshio_type = {item: key for key, item in meshio_to_hip_type.items()}
class HipReader:
def read(self, filename):
h5_filename = '.'.join(filename.split('.')[:-1]) + '.h5'
with h5py.File(h5_filename, 'r') as h5_file:
cells = self._get_cells(h5_file)
points = self._get_points(h5_file)
bnd_patches = self._get_bnd_patches(h5_file)
return yamio.Mesh(points, cells, bnd_patches=bnd_patches)
def _get_cells(self, h5_file):
conns_basename = 'Connectivity'
conns_name = list(h5_file[conns_basename].keys())[0]
elem_type = hip_to_meshio_type[conns_name.split('-')[0]]
conns_path = f'{conns_basename}/{conns_name}'
conns = self._read_conns(h5_file, conns_path, elem_type)
return [meshio.CellBlock(elem_type, conns)]
def _read_conns(self, h5_file, conns_path, elem_type):
n_nodes_cell = num_nodes_per_cell[elem_type]
conns = np.array(h5_file[conns_path][()].reshape(-1, n_nodes_cell),
dtype=int)
return self._get_corrected_conns(conns, elem_type)
def _get_points(self, h5_file):
coords_basename = 'Coordinates'
axes = list(h5_file[coords_basename].keys())
return np.array([h5_file[f'{coords_basename}/{axis}'][()] for axis in axes]).T
def _get_corrected_conns(self, conns, elem_type):
conns = correct_cell_conns_reading.get(elem_type, lambda x: x)(conns)
conns -= 1 # correct initial index
return conns
def _get_bnd_patches(self, h5_file):
bnd_basename = 'Boundary'
# get all conns
for name in h5_file[bnd_basename].keys():
if name.endswith('->node'):
bnd_conns_name = name
break
else:
return None
hip_elem_type = bnd_conns_name.split('-')[0].split('_')[1]
elem_type = hip_to_meshio_type[hip_elem_type]
conns_path = f'{bnd_basename}/{bnd_conns_name}'
conns = self._read_conns(h5_file, conns_path, elem_type)
# get patch labels
patch_labels = [name.decode('utf-8').strip() for name in h5_file[f'{bnd_basename}/PatchLabels'][()]]
# organize patches
last_indices = h5_file[f'{bnd_basename}/bnd_{hip_elem_type}_lidx'][()]
fidx = 0
bnd_patches = {}
for patch_label, lidx in zip(patch_labels, last_indices):
bnd_patches[patch_label] = meshio.CellBlock(elem_type, conns[fidx:lidx])
fidx = lidx
return bnd_patches
class HipWriter:
def write(self, filename, mesh, commands=()):
"""
Args:
commands (array-like): Additional operations to be performed
between reading and writing within `hip`.
Notes:
If patches do not exist, then the file can still be written, but
several Hip features will not be available.
"""
pre_read_commands = []
file_basename = filename.split('.')[0]
tmp_filename = f'{file_basename}_tmp.mesh.h5'
with h5py.File(tmp_filename, 'w') as h5_file:
# write mesh topology (conns)
self._write_conns(h5_file, mesh)
# write mesh coordinates
self._write_coords(h5_file, mesh)
# write boundary data (only in h5 file)
if not hasattr(mesh, 'bnd_patches') or not mesh.bnd_patches:
h5_file.create_group('Boundary')
pre_read_commands.append('set check 0')
else:
self._write_bnd_patches(h5_file, mesh.bnd_patches)
# use pyhip to complete the file
for command in pre_read_commands:
pyhip_cmd(command)
read_hdf5_mesh(tmp_filename)
for command in commands:
pyhip_cmd(command)
write_hdf5(file_basename)
hip_exit()
# delete tmp file
os.remove(tmp_filename)
# validate mesh (volume)
# TODO: remove when new version of hip is available
try:
mesh_filename = f'{file_basename}.mesh.h5'
mesh_info, *_ = extract_hdf_meshinfo(mesh_filename)
if mesh_info['Metric']['Element volume [m3]'].min < 0:
raise Exception('Invalid grid: elements with negative volume')
except KeyError:
warnings.warn("Mesh validation was not performed.")
def _write_conns(self, h5_file, mesh):
# ignores mixed case
elem_type = mesh.cells[0].type
conns = mesh.cells[0].data.copy()
conns = correct_cell_conns_writing.get(elem_type, lambda x: x)(conns)
conns += 1
hip_elem_type = meshio_to_hip_type[elem_type]
h5_path = f'/Connectivity/{hip_elem_type}->node'
h5_file.create_dataset(h5_path, data=conns.ravel())
def _write_coords(self, h5_file, mesh):
points = mesh.points
for axis in range(points.shape[1]):
h5_file.create_dataset(f'/Coordinates/{AXIS_MAP[axis]}',
data=points[:, axis])
def _write_bnd_patches(self, h5_file, bnd_patches):
"""
Notes:
Only writes to Boundary and let's hip take care of everything else.
"""
# collect info
patch_labels = list(bnd_patches.keys())
bnd_node_groups = []
for patch_nodes in bnd_patches.values():
if isinstance(patch_nodes, meshio.CellBlock):
bnd_node_groups.append(np.unique(patch_nodes.data.ravel()))
else:
bnd_node_groups.append(patch_nodes)
nodes = np.concatenate(bnd_node_groups, axis=0)
group_dims = np.cumsum([len(node_groups) for node_groups in bnd_node_groups],
dtype=int)
# write to h5
h5_file.create_dataset('Boundary/PatchLabels', data=patch_labels,
dtype='S24')
h5_file.create_dataset('Boundary/bnode->node', data=nodes + 1)
h5_file.create_dataset('Boundary/bnode_lidx', data=group_dims)
def _correct_tetra_conns_reading(cells):
new_cells = cells.copy()
new_cells[:, [1, 2]] = new_cells[:, [2, 1]]
return new_cells
def _correct_tetra_conns_writing(cells):
new_cells = cells.copy()
new_cells[:, [2, 1]] = new_cells[:, [1, 2]]
return new_cells
# uses meshio names
correct_cell_conns_reading = {'tetra': _correct_tetra_conns_reading}
correct_cell_conns_writing = {'tetra': _correct_tetra_conns_writing}
| [
"pyhip.commands.readers.read_hdf5_mesh",
"os.remove",
"h5py.File",
"yamio.Mesh",
"pyhip.commands.writers.write_hdf5",
"numpy.array",
"pyhip.commands.mesh_idcard.extract_hdf_meshinfo",
"warnings.warn",
"meshio.CellBlock",
"pyhip.hipster.pyhip_cmd",
"numpy.concatenate",
"pyhip.commands.operation... | [((1087, 1137), 'yamio.Mesh', 'yamio.Mesh', (['points', 'cells'], {'bnd_patches': 'bnd_patches'}), '(points, cells, bnd_patches=bnd_patches)\n', (1097, 1137), False, 'import yamio\n'), ((4420, 4448), 'pyhip.commands.readers.read_hdf5_mesh', 'read_hdf5_mesh', (['tmp_filename'], {}), '(tmp_filename)\n', (4434, 4448), False, 'from pyhip.commands.readers import read_hdf5_mesh\n'), ((4521, 4546), 'pyhip.commands.writers.write_hdf5', 'write_hdf5', (['file_basename'], {}), '(file_basename)\n', (4531, 4546), False, 'from pyhip.commands.writers import write_hdf5\n'), ((4555, 4565), 'pyhip.commands.operations.hip_exit', 'hip_exit', ([], {}), '()\n', (4563, 4565), False, 'from pyhip.commands.operations import hip_exit\n'), ((4601, 4624), 'os.remove', 'os.remove', (['tmp_filename'], {}), '(tmp_filename)\n', (4610, 4624), False, 'import os\n'), ((6300, 6339), 'numpy.concatenate', 'np.concatenate', (['bnd_node_groups'], {'axis': '(0)'}), '(bnd_node_groups, axis=0)\n', (6314, 6339), True, 'import numpy as np\n'), ((882, 909), 'h5py.File', 'h5py.File', (['h5_filename', '"""r"""'], {}), "(h5_filename, 'r')\n", (891, 909), False, 'import h5py\n'), ((1477, 1511), 'meshio.CellBlock', 'meshio.CellBlock', (['elem_type', 'conns'], {}), '(elem_type, conns)\n', (1493, 1511), False, 'import meshio\n'), ((1942, 2011), 'numpy.array', 'np.array', (["[h5_file[f'{coords_basename}/{axis}'][()] for axis in axes]"], {}), "([h5_file[f'{coords_basename}/{axis}'][()] for axis in axes])\n", (1950, 2011), True, 'import numpy as np\n'), ((3136, 3181), 'meshio.CellBlock', 'meshio.CellBlock', (['elem_type', 'conns[fidx:lidx]'], {}), '(elem_type, conns[fidx:lidx])\n', (3152, 3181), False, 'import meshio\n'), ((3768, 3796), 'h5py.File', 'h5py.File', (['tmp_filename', '"""w"""'], {}), "(tmp_filename, 'w')\n", (3777, 3796), False, 'import h5py\n'), ((4393, 4411), 'pyhip.hipster.pyhip_cmd', 'pyhip_cmd', (['command'], {}), '(command)\n', (4402, 4411), False, 'from pyhip.hipster import pyhip_cmd\n'), ((4494, 4512), 'pyhip.hipster.pyhip_cmd', 'pyhip_cmd', (['command'], {}), '(command)\n', (4503, 4512), False, 'from pyhip.hipster import pyhip_cmd\n'), ((4815, 4850), 'pyhip.commands.mesh_idcard.extract_hdf_meshinfo', 'extract_hdf_meshinfo', (['mesh_filename'], {}), '(mesh_filename)\n', (4835, 4850), False, 'from pyhip.commands.mesh_idcard import extract_hdf_meshinfo\n'), ((5034, 5085), 'warnings.warn', 'warnings.warn', (['"""Mesh validation was not performed."""'], {}), "('Mesh validation was not performed.')\n", (5047, 5085), False, 'import warnings\n')] |
# ==================================================================
## {Description} -Mean Shift Intro - Practical Machine Learning Tutorial with Python p.41
# ==================================================================
## {License_info}
# ==================================================================
## Author: {G}
## Copyright: Copyright {year}, {project_name}
## Credits: [{credit_list}]
## License: {license}
## Version: {mayor}.{minor}.{rel}
## Maintainer: {maintainer}
## Email: {contact_email}
## Status: {dev_status}
# ==================================================================
# Import Libraries
# -----------------------------------------------------
# Installing packages -
# -----------------------------------------------------
import os
import os.path as path
ResourceDir = os.getcwd()
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
# -----------------------------------------------------
# Python Script Starting Point - P41
# -----------------------------------------------------
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
style.use('ggplot')
X, y = make_blobs(n_samples=100, centers=3, n_features=2)
##X = np.array([[1, 2],
## [1.5, 1.8],
## [5, 8],
## [8, 8],
## [1, 0.6],
## [9, 11],
## [8, 2],
## [10, 2],
## [9, 3]])
##plt.scatter(X[:, 0],X[:, 1], marker = "x", s=150, linewidths = 5, zorder = 10)
##plt.show()
'''
1. Start at every datapoint as a cluster center
2. take mean of radius around cluster, setting that as new cluster center
3. Repeat #2 until convergence.
'''
class Mean_Shift:
def __init__(self, radius = None, radius_norm_step = 100):
self.radius = radius
self.radius_norm_step = radius_norm_step
def fit(self,data):
if self.radius == None:
all_data_centroid = np.average(data,axis=0)
all_data_norm = np.linalg.norm(all_data_centroid)
self.radius = all_data_norm/self.radius_norm_step
print(self.radius)
centroids = {}
for i in range(len(data)):
centroids[i] = data[i]
weights = [i for i in range(self.radius_norm_step)][::-1]
while True:
new_centroids = []
for i in centroids:
in_bandwidth = []
centroid = centroids[i]
for featureset in data:
distance = np.linalg.norm(featureset-centroid)
if distance == 0:
distance = 0.00000000001
weight_index = int(distance/self.radius)
if weight_index > self.radius_norm_step-1:
weight_index = self.radius_norm_step-1
to_add = (weights[weight_index]**2)*[featureset]
in_bandwidth +=to_add
new_centroid = np.average(in_bandwidth,axis=0)
new_centroids.append(tuple(new_centroid))
uniques = sorted(list(set(new_centroids)))
to_pop = []
for i in uniques:
for ii in [i for i in uniques]:
if i == ii:
pass
elif np.linalg.norm(np.array(i)-np.array(ii)) <= self.radius:
#print(np.array(i), np.array(ii))
to_pop.append(ii)
break
for i in to_pop:
try:
uniques.remove(i)
except:
pass
prev_centroids = dict(centroids)
centroids = {}
for i in range(len(uniques)):
centroids[i] = np.array(uniques[i])
optimized = True
for i in centroids:
if not np.array_equal(centroids[i], prev_centroids[i]):
optimized = False
if optimized:
break
self.centroids = centroids
self.classifications = {}
for i in range(len(self.centroids)):
self.classifications[i] = []
for featureset in data:
#compare distance to either centroid
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
#print(distances)
classification = (distances.index(min(distances)))
# featureset that belongs to that cluster
self.classifications[classification].append(featureset)
def predict(self,data):
#compare distance to either centroid
distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]
classification = (distances.index(min(distances)))
return classification
clf = Mean_Shift()
clf.fit(X)
centroids = clf.centroids
print(centroids)
colors = 10*['r','g','b','c','k','y']
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0],featureset[1], marker = "x", color=color, s=150, linewidths = 5, zorder = 10)
for c in centroids:
plt.scatter(centroids[c][0],centroids[c][1], color='k', marker = "*", s=150, linewidths = 5)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.style.use",
"numpy.average",
"os.getcwd",
"matplotlib.pyplot.scatter",
"numpy.linalg.norm",
"numpy.array",
"sklearn.datasets.samples_generator.make_blobs",
"numpy.array_equal"
] | [((813, 824), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (822, 824), False, 'import os\n'), ((886, 905), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (895, 905), False, 'from matplotlib import style\n'), ((1214, 1233), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1223, 1233), False, 'from matplotlib import style\n'), ((1242, 1292), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'centers': '(3)', 'n_features': '(2)'}), '(n_samples=100, centers=3, n_features=2)\n', (1252, 1292), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((5431, 5441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5439, 5441), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5430), 'matplotlib.pyplot.scatter', 'plt.scatter', (['centroids[c][0]', 'centroids[c][1]'], {'color': '"""k"""', 'marker': '"""*"""', 's': '(150)', 'linewidths': '(5)'}), "(centroids[c][0], centroids[c][1], color='k', marker='*', s=150,\n linewidths=5)\n", (5348, 5430), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5310), 'matplotlib.pyplot.scatter', 'plt.scatter', (['featureset[0]', 'featureset[1]'], {'marker': '"""x"""', 'color': 'color', 's': '(150)', 'linewidths': '(5)', 'zorder': '(10)'}), "(featureset[0], featureset[1], marker='x', color=color, s=150,\n linewidths=5, zorder=10)\n", (5219, 5310), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2068), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2054, 2068), True, 'import numpy as np\n'), ((2096, 2129), 'numpy.linalg.norm', 'np.linalg.norm', (['all_data_centroid'], {}), '(all_data_centroid)\n', (2110, 2129), True, 'import numpy as np\n'), ((4779, 4826), 'numpy.linalg.norm', 'np.linalg.norm', (['(data - self.centroids[centroid])'], {}), '(data - self.centroids[centroid])\n', (4793, 4826), True, 'import numpy as np\n'), ((3069, 3101), 'numpy.average', 'np.average', (['in_bandwidth'], {'axis': '(0)'}), '(in_bandwidth, axis=0)\n', (3079, 3101), True, 'import numpy as np\n'), ((3876, 3896), 'numpy.array', 'np.array', (['uniques[i]'], {}), '(uniques[i])\n', (3884, 3896), True, 'import numpy as np\n'), ((4383, 4436), 'numpy.linalg.norm', 'np.linalg.norm', (['(featureset - self.centroids[centroid])'], {}), '(featureset - self.centroids[centroid])\n', (4397, 4436), True, 'import numpy as np\n'), ((2615, 2652), 'numpy.linalg.norm', 'np.linalg.norm', (['(featureset - centroid)'], {}), '(featureset - centroid)\n', (2629, 2652), True, 'import numpy as np\n'), ((3983, 4030), 'numpy.array_equal', 'np.array_equal', (['centroids[i]', 'prev_centroids[i]'], {}), '(centroids[i], prev_centroids[i])\n', (3997, 4030), True, 'import numpy as np\n'), ((3420, 3431), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3428, 3431), True, 'import numpy as np\n'), ((3432, 3444), 'numpy.array', 'np.array', (['ii'], {}), '(ii)\n', (3440, 3444), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""This script generates a sklearn random forest
model that predict values that were created using the
formula (a + b) / 2
The two arrays a and b must have values with range [0, 256].
"""
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
from sklearn.externals import joblib
def compute_efficiency(model_result, measurement):
diff = model_result - measurement
eff = 1- sum(diff * diff)/((measurement.var()) * len(measurement))
return(eff)
print("Create training data")
# Train a value adder that represents the formula (a + b) / 2
a = np.random.randint(-1, 257, 1000)
b = np.random.randint(-1, 257, 1000)
# Create the predicting data that is used for training
c = (a + b)/2
# Cast to integer
y = c.astype(int)
print("Train random forest model")
model = RandomForestRegressor(n_estimators=100, max_depth=12, max_features="log2", n_jobs=16,
min_samples_split=2, min_samples_leaf=1, verbose=0)
# This is the training data with two arrays
X = pd.DataFrame()
X["a"] = a
X["b"] = b
# Fit the model and compute the model efficiency
model = model.fit(X, y)
# Predict values
predicted_values = model.predict(X)
# Compute the score of the model
score = model.score(X, y)
# Compute the mean square error
mse = mean_squared_error(predicted_values, y)
print("Model score", score, "MSE", mse)
print("Perform 10-fold cross validation")
# Perform the cross validation and compute the model efficiency
cv_score = cross_val_score(model, X, y, cv=10)
cv_predicted_values = cross_val_predict(model, X, y, cv=10)
# Compute the efficiency of the cross validation
cv_eff = compute_efficiency(cv_predicted_values, y)
# Compute the mean square error
cv_mse = mean_squared_error(cv_predicted_values, y)
print("CV score", cv_eff, "MSE", cv_mse)
print("Save the model as compressed joblib object")
# Save the model with compression
joblib.dump(value=model, filename='rf_add_model.pkl.xz', compress=("xz", 3))
| [
"pandas.DataFrame",
"sklearn.externals.joblib.dump",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.cross_val_predict",
"sklearn.ensemble.RandomForestRegressor",
"numpy.random.randint",
"sklearn.metrics.mean_squared_error"
] | [((772, 804), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(257)', '(1000)'], {}), '(-1, 257, 1000)\n', (789, 804), True, 'import numpy as np\n'), ((809, 841), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(257)', '(1000)'], {}), '(-1, 257, 1000)\n', (826, 841), True, 'import numpy as np\n'), ((991, 1132), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'max_depth': '(12)', 'max_features': '"""log2"""', 'n_jobs': '(16)', 'min_samples_split': '(2)', 'min_samples_leaf': '(1)', 'verbose': '(0)'}), "(n_estimators=100, max_depth=12, max_features='log2',\n n_jobs=16, min_samples_split=2, min_samples_leaf=1, verbose=0)\n", (1012, 1132), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1207, 1221), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1219, 1221), True, 'import pandas as pd\n'), ((1469, 1508), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predicted_values', 'y'], {}), '(predicted_values, y)\n', (1487, 1508), False, 'from sklearn.metrics import mean_squared_error\n'), ((1669, 1704), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': '(10)'}), '(model, X, y, cv=10)\n', (1684, 1704), False, 'from sklearn.model_selection import cross_val_score\n'), ((1727, 1764), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['model', 'X', 'y'], {'cv': '(10)'}), '(model, X, y, cv=10)\n', (1744, 1764), False, 'from sklearn.model_selection import cross_val_predict\n'), ((1907, 1949), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['cv_predicted_values', 'y'], {}), '(cv_predicted_values, y)\n', (1925, 1949), False, 'from sklearn.metrics import mean_squared_error\n'), ((2080, 2156), 'sklearn.externals.joblib.dump', 'joblib.dump', ([], {'value': 'model', 'filename': '"""rf_add_model.pkl.xz"""', 'compress': "('xz', 3)"}), "(value=model, filename='rf_add_model.pkl.xz', compress=('xz', 3))\n", (2091, 2156), False, 'from sklearn.externals import joblib\n')] |
import tensorflow as tf
if __name__=='__main__':
tf.compat.v1.enable_eager_execution()
import random
import numpy as np
from tensorflow import keras
import math
def step_decay(epoch, initial_lrate=0.1, drop=0.5, epochs_drop=10.0):
'''
Specify the learning step decay
Example usage:
epochs = np.arange(1,100)
initial_lrate = 0.1
lr = [step_decay(e,drop=initial_lrate/0.125) for e in epochs]
plt.plot(epochs,lr)
'''
# drop = exp_decay(epoch,drop)
lrate = initial_lrate * math.pow(drop,
math.floor((1 + epoch) / epochs_drop))
return lrate
def exp_decay(epoch, initial_lrate=0.01):
'''
Specify the learning exp. decay
Example usage:
epochs = np.arange(1,100)
lr = [exp_decay(e) for e in epochs]
plt.plot(epochs,lr)
'''
k = 0.1
t = epoch
lrate = initial_lrate * np.exp(-k * t)
return lrate
def build_model(input_shape=(14,), seed=49, num_layers=3, num_hidden=100, num_class=2, optimizer=None):
'''
Create basic model consisting of a set of Dense layers
:param input_shape: shape of input tensor
:param seed: number used for seed initialisation
:param num_layers: number of Dense layers
:param num_hidden: number of units per dense layer
:param num_class: number of classes (used in the output layer)
:param optimizer: optimiser to be used
:return: compiled model
'''
tf.compat.v1.reset_default_graph()
graph_level_seed = seed
operation_level_seed = seed
random.seed(operation_level_seed)
np.random.seed(operation_level_seed)
tf.compat.v1.set_random_seed(graph_level_seed)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(shape=input_shape))
for i in range(num_layers):
model.add(
tf.keras.layers.Dense(num_hidden, activation=tf.nn.relu, kernel_regularizer=keras.regularizers.l2(0.03)))
model.add(tf.keras.layers.Dense(num_class, activation=tf.nn.softmax))
if optimizer is None:
optimizer = tf.keras.optimizers.RMSprop(0.01, decay=0.005)
loss = tf.keras.losses.categorical_crossentropy
model.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
return model
def clone_model(model):
'''
Clone and compile the model, including the same parameters, and hyperparameters (e.g. optimiser and loss)
:param model: original model (keras)
:return: cloned model
'''
model_orig = tf.keras.models.clone_model(model)
model_orig.build(input_shape=model.input_shape)
model_orig.compile(optimizer=model.optimizer, loss=model.loss, metrics=model.metrics)
return model_orig
# TODO: cleanup the weird comments
def fit_model(model, X_train, Y_train, EPOCHS=20, batch_size=256, verbose=0, use_checkpoint=False):
# TODO: start using learning rate scheculer
# if use_lr_scheduler:
# from keras.callbacks import LearningRateScheduler
# lrate = LearningRateScheduler(exp_decay)
callbacks = []
if use_checkpoint:
best_weights_filepath = './best_weights.hdf5'
# if os.path.exists(best_weights_filepath):
# os.remove(best_weights_filepath)
saveBestModel = keras.callbacks.ModelCheckpoint(best_weights_filepath, monitor='val_loss', verbose=0,
save_best_only=True, mode='auto')
callbacks.append(saveBestModel)
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=100, verbose=1, mode='auto')
callbacks.append(early_stop)
history = model.fit(X_train, Y_train, epochs=EPOCHS, batch_size=batch_size,
validation_split=0.1, verbose=verbose,
callbacks=callbacks)
# reload best weights
# model.load_weights(best_weights_filepath)
return history
| [
"tensorflow.keras.optimizers.RMSprop",
"numpy.random.seed",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.keras.models.clone_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"math.floor",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.compat.v1.set_rand... | [((54, 91), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (89, 91), True, 'import tensorflow as tf\n'), ((1453, 1487), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (1485, 1487), True, 'import tensorflow as tf\n'), ((1552, 1585), 'random.seed', 'random.seed', (['operation_level_seed'], {}), '(operation_level_seed)\n', (1563, 1585), False, 'import random\n'), ((1590, 1626), 'numpy.random.seed', 'np.random.seed', (['operation_level_seed'], {}), '(operation_level_seed)\n', (1604, 1626), True, 'import numpy as np\n'), ((1631, 1677), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['graph_level_seed'], {}), '(graph_level_seed)\n', (1659, 1677), True, 'import tensorflow as tf\n'), ((1691, 1712), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (1710, 1712), True, 'import tensorflow as tf\n'), ((2525, 2559), 'tensorflow.keras.models.clone_model', 'tf.keras.models.clone_model', (['model'], {}), '(model)\n', (2552, 2559), True, 'import tensorflow as tf\n'), ((3510, 3601), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(100)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_loss', patience=100, verbose=1,\n mode='auto')\n", (3539, 3601), False, 'from tensorflow import keras\n'), ((895, 909), 'numpy.exp', 'np.exp', (['(-k * t)'], {}), '(-k * t)\n', (901, 909), True, 'import numpy as np\n'), ((1727, 1767), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1748, 1767), True, 'import tensorflow as tf\n'), ((1954, 2012), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_class'], {'activation': 'tf.nn.softmax'}), '(num_class, activation=tf.nn.softmax)\n', (1975, 2012), True, 'import tensorflow as tf\n'), ((2061, 2107), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', (['(0.01)'], {'decay': '(0.005)'}), '(0.01, decay=0.005)\n', (2088, 2107), True, 'import tensorflow as tf\n'), ((3276, 3399), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['best_weights_filepath'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'mode': '"""auto"""'}), "(best_weights_filepath, monitor='val_loss',\n verbose=0, save_best_only=True, mode='auto')\n", (3307, 3399), False, 'from tensorflow import keras\n'), ((575, 612), 'math.floor', 'math.floor', (['((1 + epoch) / epochs_drop)'], {}), '((1 + epoch) / epochs_drop)\n', (585, 612), False, 'import math\n'), ((1909, 1936), 'tensorflow.keras.regularizers.l2', 'keras.regularizers.l2', (['(0.03)'], {}), '(0.03)\n', (1930, 1936), False, 'from tensorflow import keras\n')] |
import numpy as np
class Atom:
"""
The class "Atom" defines atom objects which have the attributes:
:param id: atom id number
:param element: chemical element
:param pos: x,y,z-coordinates of atom position
:param mag_mom: magnetic moment of atom (only used in class: vasp)
:param dyn: selective dynamics to be applied to this atom (only used in class:vasp)
"""
def __init__(
self,
id: int = 0,
element: str = "H",
position: np.ndarray = np.zeros(shape=(1, 3)),
velocity: np.ndarray = np.zeros(shape=(1, 3)),
magnetic_moment: np.ndarray = np.zeros(shape=(1, 3)),
):
self.id = id
self.element = element
self.position = position
self.velocity = velocity
self.magnetic_moment = magnetic_moment
return
def set_id(self, new_id: int = 0):
"""
Changes or assigns a new atom ID to an atom.
:param new_id: new atom id
:return: none
"""
self.id = new_id
return self
def set_element(self, new_element: str = "H"):
"""
Changes chemical element of atom
:param new_element: new element
:return: none
"""
self.element = new_element
return
def set_position(self, new_position: np.ndarray = np.zeros(shape=(1, 3))):
"""
Changes or updates atomic position to new specified atomic position.
:param new_pos: new atomic position
:return: none
"""
self.position = new_position
return
def displace(self, displacement: np.ndarray = np.zeros(shape=(1, 3))):
"""
Displace an atom by certain values. Default is (0,0,0)
:param displacement: Displacement vector to be applied to atom
:return: none
"""
new_position = self.position + displacement
return self.set_position(new_position)
def set_velocity(self, new_velocity: np.ndarray = np.zeros(shape=(1, 3))):
self.velocity = new_velocity
return
def accelerate(self, jolt: np.ndarray = np.zeros(shape=(1, 3))):
new_velocity = self.velocity + jolt
return self.set_velocity(new_velocity)
def set_magmom(self, new_magnetic_moment: np.ndarray = np.zeros(shape=(1, 3))):
self.magnetic_moment = new_magnetic_moment
return
def create(
id: int = 0,
element: str = "H",
position: np.ndarray = np.zeros(shape=(1, 3)),
velocity: np.ndarray = np.zeros(shape=(1, 3)),
magnetic_moment: np.ndarray = np.zeros(shape=(1, 3)),
) -> Atom:
return Atom(
id=id,
element=element,
position=position,
velocity=velocity,
magnetic_moment=magnetic_moment,
)
def displace_and_add(
atom: Atom,
displacement: np.ndarray = np.zeros(shape=(1, 3)),
) -> Atom:
new_atom = create(
atom.id,
atom.element,
atom.position,
atom.velocity,
atom.magnetic_moment,
)
new_atom.displace(displacement)
return new_atom
| [
"numpy.zeros"
] | [((2480, 2502), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2488, 2502), True, 'import numpy as np\n'), ((2531, 2553), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2539, 2553), True, 'import numpy as np\n'), ((2589, 2611), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2597, 2611), True, 'import numpy as np\n'), ((2853, 2875), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2861, 2875), True, 'import numpy as np\n'), ((508, 530), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (516, 530), True, 'import numpy as np\n'), ((563, 585), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (571, 585), True, 'import numpy as np\n'), ((625, 647), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (633, 647), True, 'import numpy as np\n'), ((1347, 1369), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1355, 1369), True, 'import numpy as np\n'), ((1650, 1672), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1658, 1672), True, 'import numpy as np\n'), ((2009, 2031), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2017, 2031), True, 'import numpy as np\n'), ((2131, 2153), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2139, 2153), True, 'import numpy as np\n'), ((2307, 2329), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2315, 2329), True, 'import numpy as np\n')] |
import json
from functools import partial
from pathlib import Path
from datetime import datetime
from itertools import repeat
from collections import OrderedDict
import torch
from PIL import Image
import numpy as np
import torch.nn.functional as F
def map_fn(batch, fn):
if isinstance(batch, dict):
for k in batch.keys():
batch[k] = map_fn(batch[k], fn)
return batch
elif isinstance(batch, list):
return [map_fn(e, fn) for e in batch]
else:
return fn(batch)
def to(data, device):
if isinstance(data, dict):
return {k: to(data[k], device) for k in data.keys()}
elif isinstance(data, list):
return [to(v, device) for v in data]
else:
return data.to(device)
eps = 1e-6
def preprocess_roi(depth_prediction, depth_gt: torch.Tensor, roi):
if roi is not None:
if isinstance(depth_prediction, list):
depth_prediction = [dpr[:, :, roi[0]:roi[1], roi[2]:roi[3]] for dpr in depth_prediction]
else:
depth_prediction = depth_prediction[:, :, roi[0]:roi[1], roi[2]:roi[3]]
depth_gt = depth_gt[:, :, roi[0]:roi[1], roi[2]:roi[3]]
return depth_prediction, depth_gt
def get_absolute_depth(depth_prediction, depth_gt: torch.Tensor, max_distance=None):
if max_distance is not None:
if isinstance(depth_prediction, list):
depth_prediction = [torch.clamp_min(dpr, 1 / max_distance) for dpr in depth_prediction]
else:
depth_prediction = torch.clamp_min(depth_prediction, 1 / max_distance)
depth_gt = torch.clamp_min(depth_gt, 1 / max_distance)
if isinstance(depth_prediction, list):
return [1 / dpr for dpr in depth_prediction], 1 / depth_gt
else:
return 1 / depth_prediction, 1 / depth_gt
def get_positive_depth(depth_prediction: torch.Tensor, depth_gt: torch.Tensor):
if isinstance(depth_prediction, list):
depth_prediction = [torch.nn.functional.relu(dpr) for dpr in depth_prediction]
else:
depth_prediction = torch.nn.functional.relu(depth_prediction)
depth_gt = torch.nn.functional.relu(depth_gt)
return depth_prediction, depth_gt
def depthmap_to_points(depth: torch.Tensor, intrinsics: torch.Tensor, flatten=False):
n, c, h, w = depth.shape
grid = DepthWarper._create_meshgrid(h, w).expand(n, -1, -1, -1).to(depth.device)
points = pixel2cam(depth, torch.inverse(intrinsics), grid)
if not flatten:
return points
else:
return points.view(n, h * w, 3)
def save_frame_for_tsdf(dir: Path, index, keyframe, depth, pose, crop=None, min_distance=None, max_distance=None):
if crop is not None:
keyframe = keyframe[:, crop[0]:crop[1], crop[2]:crop[3]]
depth = depth[crop[0]:crop[1], crop[2]:crop[3]]
keyframe = ((keyframe + .5) * 255).to(torch.uint8).permute(1, 2, 0)
depth = (1 / depth * 100).to(torch.int16)
depth[depth < 0] = 0
if min_distance is not None:
depth[depth < min_distance * 100] = 0
if max_distance is not None:
depth[depth > max_distance * 100] = 0
Image.fromarray(keyframe.numpy()).save(dir / f"frame-{index:06d}.color.jpg")
Image.fromarray(depth.numpy()).save(dir / f"frame-{index:06d}.depth.png")
np.savetxt(dir / f"frame-{index:06d}.pose.txt", torch.inverse(pose).numpy())
def save_intrinsics_for_tsdf(dir: Path, intrinsics, crop=None):
if crop is not None:
intrinsics[0, 2] -= crop[2]
intrinsics[1, 2] -= crop[0]
np.savetxt(dir / f"camera-intrinsics.txt", intrinsics[:3, :3].numpy())
def get_mask(pred: torch.Tensor, gt: torch.Tensor, max_distance=None, pred_all_valid=True):
mask = gt == 0
if max_distance:
mask |= (gt < 1 / max_distance)
if not pred_all_valid:
mask |= pred == 0
return mask
def mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None):
t = t.clone()
t[m] = 0
els = 1
if dim is None:
dim = list(range(len(t.shape)))
for d in dim:
els *= t.shape[d]
return torch.sum(t, dim=dim) / (els - torch.sum(m.to(torch.float), dim=dim))
def conditional_flip(x, condition, inplace=True):
if inplace:
x[condition, :, :, :] = x[condition, :, :, :].flip(3)
else:
flipped_x = x.clone()
flipped_x[condition, :, :, :] = x[condition, :, :, :].flip(3)
return flipped_x
def create_mask(c: int, height: int, width: int, border_radius: int, device):
mask = torch.ones(c, 1, height - 2 * border_radius, width - 2 * border_radius, device=device)
return torch.nn.functional.pad(mask, [border_radius, border_radius, border_radius, border_radius])
def median_scaling(data_dict):
target = data_dict["target"]
prediction = data_dict["result"]
mask = target > 0
ratios = mask.new_tensor([torch.median(target[i, mask[i]]) / torch.median(prediction[i, mask[i]]) for i in range(target.shape[0])], dtype=torch.float32)
data_dict = dict(data_dict)
data_dict["result"] = prediction * ratios.view(-1, 1, 1, 1)
return data_dict
unsqueezer = partial(torch.unsqueeze, dim=0)
class DS_Wrapper(torch.utils.data.Dataset):
def __init__(self, dataset, start=0, end=-1, every_nth=1):
super().__init__()
self.dataset = dataset
self.start = start
if end == -1:
self.end = len(self.dataset)
else:
self.end = end
self.every_nth = every_nth
def __getitem__(self, index: int):
return self.dataset.__getitem__(index * self.every_nth + self.start)
def __len__(self):
return (self.end - self.start) // self.every_nth + (1 if (self.end - self.start) % self.every_nth != 0 else 0)
class DS_Merger(torch.utils.data.Dataset):
def __init__(self, datasets):
super().__init__()
self.datasets = datasets
def __getitem__(self, index: int):
return (ds.__getitem__(index + self.start) for ds in self.datasets)
def __len__(self):
return len(self.datasets[0])
class LossWrapper(torch.nn.Module):
def __init__(self, loss_function, **kwargs):
super().__init__()
self.kwargs = kwargs
self.loss_function = loss_function
self.num_devices = 1.0
def forward(self, data):
loss_dict = self.loss_function(data, **self.kwargs)
loss_dict = map_fn(loss_dict, lambda x: (x / self.num_devices))
if loss_dict["loss"].requires_grad:
loss_dict["loss"].backward()
loss_dict["loss"].detach_()
return data, loss_dict
class ValueFader:
def __init__(self, steps, values):
self.steps = steps
self.values = values
self.epoch = 0
def set_epoch(self, epoch):
self.epoch = epoch
def get_value(self, epoch=None):
if epoch is None:
epoch = self.epoch
if epoch >= self.steps[-1]:
return self.values[-1]
step_index = 0
while step_index < len(self.steps)-1 and epoch >= self.steps[step_index+1]:
step_index += 1
p = float(epoch - self.steps[step_index]) / float(self.steps[step_index+1] - self.steps[step_index])
return (1-p) * self.values[step_index] + p * self.values[step_index+1]
def pose_distance_thresh(data_dict, spatial_thresh=.6, rotational_thresh=.05):
poses = torch.stack([data_dict["keyframe_pose"]] + data_dict["poses"], dim=1)
forward = poses.new_tensor([0, 0, 1], dtype=torch.float32)
spatial_expanse = torch.norm(torch.max(poses[..., :3, 3], dim=1)[0] - torch.min(poses[..., :3, 3], dim=1)[0], dim=1)
rotational_expanse = torch.norm(torch.max(poses[..., :3, :3] @ forward, dim=1)[0] - torch.min(poses[..., :3, :3] @ forward, dim=1)[0], dim=1)
return (spatial_expanse > spatial_thresh) | (rotational_expanse > rotational_thresh)
def dilate_mask(m: torch.Tensor, size: int = 15):
k = m.new_ones((1, 1, size, size), dtype=torch.float32)
dilated_mask = F.conv2d((m >= 0.5).to(dtype=torch.float32), k, padding=(size//2, size//2))
return dilated_mask > 0
def operator_on_dict(dict_0: dict, dict_1: dict, operator, default=0):
keys = set(dict_0.keys()).union(set(dict_1.keys()))
results = {}
for k in keys:
v_0 = dict_0[k] if k in dict_0 else default
v_1 = dict_1[k] if k in dict_1 else default
results[k] = operator(v_0, v_1)
return results
numbers = [f"{i:d}" for i in range(1, 10, 1)]
def filter_state_dict(state_dict, data_parallel=False):
if data_parallel:
state_dict = {k[7:]: state_dict[k] for k in state_dict}
state_dict = {(k[2:] if k.startswith("0") else k): state_dict[k] for k in state_dict if not k[0] in numbers}
return state_dict
def seed_rng(seed):
torch.manual_seed(seed)
import random
random.seed(seed)
np.random.seed(0)
def ensure_dir(dirname):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=False)
def read_json(fname):
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
class Timer:
def __init__(self):
self.cache = datetime.now()
def check(self):
now = datetime.now()
duration = now - self.cache
self.cache = now
return duration.total_seconds()
def reset(self):
self.cache = datetime.now()
| [
"torch.clamp_min",
"numpy.random.seed",
"pathlib.Path",
"torch.inverse",
"torch.nn.functional.pad",
"torch.ones",
"torch.median",
"random.seed",
"torch.nn.functional.relu",
"datetime.datetime.now",
"functools.partial",
"json.dump",
"torch.manual_seed",
"torch.max",
"torch.sum",
"torch.... | [((5072, 5103), 'functools.partial', 'partial', (['torch.unsqueeze'], {'dim': '(0)'}), '(torch.unsqueeze, dim=0)\n', (5079, 5103), False, 'from functools import partial\n'), ((2108, 2142), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['depth_gt'], {}), '(depth_gt)\n', (2132, 2142), False, 'import torch\n'), ((4468, 4558), 'torch.ones', 'torch.ones', (['c', '(1)', '(height - 2 * border_radius)', '(width - 2 * border_radius)'], {'device': 'device'}), '(c, 1, height - 2 * border_radius, width - 2 * border_radius,\n device=device)\n', (4478, 4558), False, 'import torch\n'), ((4566, 4661), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['mask', '[border_radius, border_radius, border_radius, border_radius]'], {}), '(mask, [border_radius, border_radius, border_radius,\n border_radius])\n', (4589, 4661), False, 'import torch\n'), ((7326, 7395), 'torch.stack', 'torch.stack', (["([data_dict['keyframe_pose']] + data_dict['poses'])"], {'dim': '(1)'}), "([data_dict['keyframe_pose']] + data_dict['poses'], dim=1)\n", (7337, 7395), False, 'import torch\n'), ((8731, 8754), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (8748, 8754), False, 'import torch\n'), ((8777, 8794), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (8788, 8794), False, 'import random\n'), ((8799, 8816), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (8813, 8816), True, 'import numpy as np\n'), ((8858, 8871), 'pathlib.Path', 'Path', (['dirname'], {}), '(dirname)\n', (8862, 8871), False, 'from pathlib import Path\n'), ((9303, 9322), 'itertools.repeat', 'repeat', (['data_loader'], {}), '(data_loader)\n', (9309, 9322), False, 'from itertools import repeat\n'), ((1587, 1630), 'torch.clamp_min', 'torch.clamp_min', (['depth_gt', '(1 / max_distance)'], {}), '(depth_gt, 1 / max_distance)\n', (1602, 1630), False, 'import torch\n'), ((2050, 2092), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['depth_prediction'], {}), '(depth_prediction)\n', (2074, 2092), False, 'import torch\n'), ((2413, 2438), 'torch.inverse', 'torch.inverse', (['intrinsics'], {}), '(intrinsics)\n', (2426, 2438), False, 'import torch\n'), ((4042, 4063), 'torch.sum', 'torch.sum', (['t'], {'dim': 'dim'}), '(t, dim=dim)\n', (4051, 4063), False, 'import torch\n'), ((9028, 9070), 'json.load', 'json.load', (['handle'], {'object_hook': 'OrderedDict'}), '(handle, object_hook=OrderedDict)\n', (9037, 9070), False, 'import json\n'), ((9149, 9202), 'json.dump', 'json.dump', (['content', 'handle'], {'indent': '(4)', 'sort_keys': '(False)'}), '(content, handle, indent=4, sort_keys=False)\n', (9158, 9202), False, 'import json\n'), ((9409, 9423), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9421, 9423), False, 'from datetime import datetime\n'), ((9460, 9474), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9472, 9474), False, 'from datetime import datetime\n'), ((9619, 9633), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9631, 9633), False, 'from datetime import datetime\n'), ((1516, 1567), 'torch.clamp_min', 'torch.clamp_min', (['depth_prediction', '(1 / max_distance)'], {}), '(depth_prediction, 1 / max_distance)\n', (1531, 1567), False, 'import torch\n'), ((1954, 1983), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['dpr'], {}), '(dpr)\n', (1978, 1983), False, 'import torch\n'), ((1403, 1441), 'torch.clamp_min', 'torch.clamp_min', (['dpr', '(1 / max_distance)'], {}), '(dpr, 1 / max_distance)\n', (1418, 1441), False, 'import torch\n'), ((3313, 3332), 'torch.inverse', 'torch.inverse', (['pose'], {}), '(pose)\n', (3326, 3332), False, 'import torch\n'), ((4813, 4845), 'torch.median', 'torch.median', (['target[i, mask[i]]'], {}), '(target[i, mask[i]])\n', (4825, 4845), False, 'import torch\n'), ((4848, 4884), 'torch.median', 'torch.median', (['prediction[i, mask[i]]'], {}), '(prediction[i, mask[i]])\n', (4860, 4884), False, 'import torch\n'), ((7492, 7527), 'torch.max', 'torch.max', (['poses[..., :3, 3]'], {'dim': '(1)'}), '(poses[..., :3, 3], dim=1)\n', (7501, 7527), False, 'import torch\n'), ((7533, 7568), 'torch.min', 'torch.min', (['poses[..., :3, 3]'], {'dim': '(1)'}), '(poses[..., :3, 3], dim=1)\n', (7542, 7568), False, 'import torch\n'), ((7616, 7662), 'torch.max', 'torch.max', (['(poses[..., :3, :3] @ forward)'], {'dim': '(1)'}), '(poses[..., :3, :3] @ forward, dim=1)\n', (7625, 7662), False, 'import torch\n'), ((7668, 7714), 'torch.min', 'torch.min', (['(poses[..., :3, :3] @ forward)'], {'dim': '(1)'}), '(poses[..., :3, :3] @ forward, dim=1)\n', (7677, 7714), False, 'import torch\n')] |
import os
import os.path as osp
import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from torchvision.transforms import functional as F
import numpy as np
import numpy.linalg as LA
import cv2
import json
import csv
import matplotlib.pyplot as plt
from pylsd import lsd
import datasets.transforms as T
def center_crop(img):
sz = img.shape[0:2]
side_length = np.min(sz)
if sz[0] > sz[1]:
ul_x = 0
ul_y = int(np.floor((sz[0]/2) - (side_length/2)))
x_inds = [ul_x, sz[1]-1]
y_inds = [ul_y, ul_y + side_length - 1]
else:
ul_x = int(np.floor((sz[1]/2) - (side_length/2)))
ul_y = 0
x_inds = [ul_x, ul_x + side_length - 1]
y_inds = [ul_y, sz[0]-1]
c_img = img[y_inds[0]:y_inds[1]+1, x_inds[0]:x_inds[1]+1, :]
return c_img
def create_masks(image):
masks = torch.zeros((1, height, width), dtype=torch.uint8)
return masks
def filter_length(segs, min_line_length=10):
lengths = LA.norm(segs[:,2:4] - segs[:,:2], axis=1)
segs = segs[lengths > min_line_length]
return segs[:,:4]
def normalize_segs(segs, pp, rho):
pp = np.array([pp[0], pp[1], pp[0], pp[1]], dtype=np.float32)
return rho*(segs - pp)
def normalize_safe_np(v, axis=-1, eps=1e-6):
de = LA.norm(v, axis=axis, keepdims=True)
de = np.maximum(de, eps)
return v/de
def segs2lines_np(segs):
ones = np.ones(len(segs))
ones = np.expand_dims(ones, axis=-1)
p1 = np.concatenate([segs[:,:2], ones], axis=-1)
p2 = np.concatenate([segs[:,2:], ones], axis=-1)
lines = np.cross(p1, p2)
return normalize_safe_np(lines)
def sample_segs_np(segs, num_sample, use_prob=True):
num_segs = len(segs)
sampled_segs = np.zeros([num_sample, 4], dtype=np.float32)
mask = np.zeros([num_sample, 1], dtype=np.float32)
if num_sample > num_segs:
sampled_segs[:num_segs] = segs
mask[:num_segs] = np.ones([num_segs, 1], dtype=np.float32)
else:
lengths = LA.norm(segs[:,2:] - segs[:,:2], axis=-1)
prob = lengths/np.sum(lengths)
idxs = np.random.choice(segs.shape[0], num_sample, replace=True, p=prob)
sampled_segs = segs[idxs]
mask = np.ones([num_sample, 1], dtype=np.float32)
return sampled_segs, mask
def sample_vert_segs_np(segs, thresh_theta=22.5):
lines = segs2lines_np(segs)
(a,b) = lines[:,0],lines[:,1]
theta = np.arctan2(np.abs(b),np.abs(a))
thresh_theta = np.radians(thresh_theta)
return segs[theta < thresh_theta]
class ImageDataset(Dataset):
def __init__(self, cfg, image_path, return_masks=False, transform=None):
self.input_width = cfg.DATASETS.INPUT_WIDTH
self.input_height = cfg.DATASETS.INPUT_HEIGHT
self.min_line_length = cfg.DATASETS.MIN_LINE_LENGTH
self.num_input_lines = cfg.DATASETS.NUM_INPUT_LINES
self.num_input_vert_lines = cfg.DATASETS.NUM_INPUT_VERT_LINE
self.vert_line_angle = cfg.DATASETS.VERT_LINE_ANGLE
self.return_vert_lines = cfg.DATASETS.RETURN_VERT_LINES
self.return_masks = return_masks
self.transform = transform
self.list_filename = [image_path,]
def __getitem__(self, idx):
target = {}
extra = {}
filename = self.list_filename[idx]
image = cv2.imread(filename)
assert image is not None, print(filename)
image = image[:,:,::-1] # convert to rgb
org_image = image
org_h, org_w = image.shape[0], image.shape[1]
org_sz = np.array([org_h, org_w])
crop_image = center_crop(org_image)
crop_h, crop_w = crop_image.shape[0], crop_image.shape[1]
crop_sz = np.array([crop_h, crop_w])
image = cv2.resize(image, dsize=(self.input_width, self.input_height))
input_sz = np.array([self.input_height, self.input_width])
# preprocess
ratio_x = float(self.input_width)/float(org_w)
ratio_y = float(self.input_height)/float(org_h)
pp = (org_w/2, org_h/2)
rho = 2.0/np.minimum(org_w,org_h)
# detect line and preprocess
gray = cv2.cvtColor(org_image, cv2.COLOR_BGR2GRAY)
org_segs = lsd(gray, scale=0.5)
org_segs = filter_length(org_segs, self.min_line_length)
num_segs = len(org_segs)
assert len(org_segs) > 10, print(len(org_segs))
segs = normalize_segs(org_segs, pp=pp, rho=rho)
# whole segs
sampled_segs, line_mask = sample_segs_np(
segs, self.num_input_lines)
sampled_lines = segs2lines_np(sampled_segs)
# vertical directional segs
vert_segs = sample_vert_segs_np(segs, thresh_theta=self.vert_line_angle)
if len(vert_segs) < 2:
vert_segs = segs
sampled_vert_segs, vert_line_mask = sample_segs_np(
vert_segs, self.num_input_vert_lines)
sampled_vert_lines = segs2lines_np(sampled_vert_segs)
if self.return_masks:
masks = create_masks(image)
image = np.ascontiguousarray(image)
if self.return_vert_lines:
target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_segs)).contiguous().float()
target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_vert_lines)).contiguous().float()
target['line_mask'] = torch.from_numpy(np.ascontiguousarray(vert_line_mask)).contiguous().float()
else:
target['segs'] = torch.from_numpy(np.ascontiguousarray(sampled_segs)).contiguous().float()
target['lines'] = torch.from_numpy(np.ascontiguousarray(sampled_lines)).contiguous().float()
target['line_mask'] = torch.from_numpy(np.ascontiguousarray(line_mask)).contiguous().float()
if self.return_masks:
target['masks'] = masks
target['org_img'] = org_image
target['org_sz'] = org_sz
target['crop_sz'] = crop_sz
target['input_sz'] = input_sz
target['img_path'] = filename
target['filename'] = filename
extra['lines'] = target['lines'].clone()
extra['line_mask'] = target['line_mask'].clone()
return self.transform(image, extra, target)
def __len__(self):
return len(self.list_filename)
def make_transform():
return T.Compose([
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def build_image(image_path, cfg):
dataset = ImageDataset(cfg, image_path, return_masks=cfg.MODELS.MASKS, transform=make_transform())
return dataset
| [
"numpy.maximum",
"numpy.abs",
"numpy.sum",
"numpy.floor",
"numpy.ones",
"numpy.linalg.norm",
"datasets.transforms.Normalize",
"cv2.cvtColor",
"numpy.random.choice",
"torch.zeros",
"cv2.resize",
"numpy.radians",
"numpy.minimum",
"numpy.cross",
"pylsd.lsd",
"numpy.min",
"datasets.trans... | [((422, 432), 'numpy.min', 'np.min', (['sz'], {}), '(sz)\n', (428, 432), True, 'import numpy as np\n'), ((901, 951), 'torch.zeros', 'torch.zeros', (['(1, height, width)'], {'dtype': 'torch.uint8'}), '((1, height, width), dtype=torch.uint8)\n', (912, 951), False, 'import torch\n'), ((1029, 1072), 'numpy.linalg.norm', 'LA.norm', (['(segs[:, 2:4] - segs[:, :2])'], {'axis': '(1)'}), '(segs[:, 2:4] - segs[:, :2], axis=1)\n', (1036, 1072), True, 'import numpy.linalg as LA\n'), ((1185, 1241), 'numpy.array', 'np.array', (['[pp[0], pp[1], pp[0], pp[1]]'], {'dtype': 'np.float32'}), '([pp[0], pp[1], pp[0], pp[1]], dtype=np.float32)\n', (1193, 1241), True, 'import numpy as np\n'), ((1332, 1368), 'numpy.linalg.norm', 'LA.norm', (['v'], {'axis': 'axis', 'keepdims': '(True)'}), '(v, axis=axis, keepdims=True)\n', (1339, 1368), True, 'import numpy.linalg as LA\n'), ((1378, 1397), 'numpy.maximum', 'np.maximum', (['de', 'eps'], {}), '(de, eps)\n', (1388, 1397), True, 'import numpy as np\n'), ((1481, 1510), 'numpy.expand_dims', 'np.expand_dims', (['ones'], {'axis': '(-1)'}), '(ones, axis=-1)\n', (1495, 1510), True, 'import numpy as np\n'), ((1520, 1564), 'numpy.concatenate', 'np.concatenate', (['[segs[:, :2], ones]'], {'axis': '(-1)'}), '([segs[:, :2], ones], axis=-1)\n', (1534, 1564), True, 'import numpy as np\n'), ((1573, 1617), 'numpy.concatenate', 'np.concatenate', (['[segs[:, 2:], ones]'], {'axis': '(-1)'}), '([segs[:, 2:], ones], axis=-1)\n', (1587, 1617), True, 'import numpy as np\n'), ((1629, 1645), 'numpy.cross', 'np.cross', (['p1', 'p2'], {}), '(p1, p2)\n', (1637, 1645), True, 'import numpy as np\n'), ((1784, 1827), 'numpy.zeros', 'np.zeros', (['[num_sample, 4]'], {'dtype': 'np.float32'}), '([num_sample, 4], dtype=np.float32)\n', (1792, 1827), True, 'import numpy as np\n'), ((1839, 1882), 'numpy.zeros', 'np.zeros', (['[num_sample, 1]'], {'dtype': 'np.float32'}), '([num_sample, 1], dtype=np.float32)\n', (1847, 1882), True, 'import numpy as np\n'), ((2527, 2551), 'numpy.radians', 'np.radians', (['thresh_theta'], {}), '(thresh_theta)\n', (2537, 2551), True, 'import numpy as np\n'), ((1978, 2018), 'numpy.ones', 'np.ones', (['[num_segs, 1]'], {'dtype': 'np.float32'}), '([num_segs, 1], dtype=np.float32)\n', (1985, 2018), True, 'import numpy as np\n'), ((2051, 2094), 'numpy.linalg.norm', 'LA.norm', (['(segs[:, 2:] - segs[:, :2])'], {'axis': '(-1)'}), '(segs[:, 2:] - segs[:, :2], axis=-1)\n', (2058, 2094), True, 'import numpy.linalg as LA\n'), ((2155, 2220), 'numpy.random.choice', 'np.random.choice', (['segs.shape[0]', 'num_sample'], {'replace': '(True)', 'p': 'prob'}), '(segs.shape[0], num_sample, replace=True, p=prob)\n', (2171, 2220), True, 'import numpy as np\n'), ((2270, 2312), 'numpy.ones', 'np.ones', (['[num_sample, 1]'], {'dtype': 'np.float32'}), '([num_sample, 1], dtype=np.float32)\n', (2277, 2312), True, 'import numpy as np\n'), ((2487, 2496), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (2493, 2496), True, 'import numpy as np\n'), ((2497, 2506), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (2503, 2506), True, 'import numpy as np\n'), ((3409, 3429), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (3419, 3429), False, 'import cv2\n'), ((3635, 3659), 'numpy.array', 'np.array', (['[org_h, org_w]'], {}), '([org_h, org_w])\n', (3643, 3659), True, 'import numpy as np\n'), ((3798, 3824), 'numpy.array', 'np.array', (['[crop_h, crop_w]'], {}), '([crop_h, crop_w])\n', (3806, 3824), True, 'import numpy as np\n'), ((3859, 3921), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': '(self.input_width, self.input_height)'}), '(image, dsize=(self.input_width, self.input_height))\n', (3869, 3921), False, 'import cv2\n'), ((3941, 3988), 'numpy.array', 'np.array', (['[self.input_height, self.input_width]'], {}), '([self.input_height, self.input_width])\n', (3949, 3988), True, 'import numpy as np\n'), ((4273, 4316), 'cv2.cvtColor', 'cv2.cvtColor', (['org_image', 'cv2.COLOR_BGR2GRAY'], {}), '(org_image, cv2.COLOR_BGR2GRAY)\n', (4285, 4316), False, 'import cv2\n'), ((4336, 4356), 'pylsd.lsd', 'lsd', (['gray'], {'scale': '(0.5)'}), '(gray, scale=0.5)\n', (4339, 4356), False, 'from pylsd import lsd\n'), ((5227, 5254), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {}), '(image)\n', (5247, 5254), True, 'import numpy as np\n'), ((493, 530), 'numpy.floor', 'np.floor', (['(sz[0] / 2 - side_length / 2)'], {}), '(sz[0] / 2 - side_length / 2)\n', (501, 530), True, 'import numpy as np\n'), ((642, 679), 'numpy.floor', 'np.floor', (['(sz[1] / 2 - side_length / 2)'], {}), '(sz[1] / 2 - side_length / 2)\n', (650, 679), True, 'import numpy as np\n'), ((2116, 2131), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (2122, 2131), True, 'import numpy as np\n'), ((4181, 4205), 'numpy.minimum', 'np.minimum', (['org_w', 'org_h'], {}), '(org_w, org_h)\n', (4191, 4205), True, 'import numpy as np\n'), ((6555, 6567), 'datasets.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (6565, 6567), True, 'import datasets.transforms as T\n'), ((6577, 6634), 'datasets.transforms.Normalize', 'T.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (6588, 6634), True, 'import datasets.transforms as T\n'), ((5345, 5384), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['sampled_vert_segs'], {}), '(sampled_vert_segs)\n', (5365, 5384), True, 'import numpy as np\n'), ((5454, 5494), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['sampled_vert_lines'], {}), '(sampled_vert_lines)\n', (5474, 5494), True, 'import numpy as np\n'), ((5568, 5604), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['vert_line_mask'], {}), '(vert_line_mask)\n', (5588, 5604), True, 'import numpy as np\n'), ((5687, 5721), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['sampled_segs'], {}), '(sampled_segs)\n', (5707, 5721), True, 'import numpy as np\n'), ((5791, 5826), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['sampled_lines'], {}), '(sampled_lines)\n', (5811, 5826), True, 'import numpy as np\n'), ((5900, 5931), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['line_mask'], {}), '(line_mask)\n', (5920, 5931), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 15:04:55 2015
@author: Ben
"""
import clearplot.plot_functions as pf
import numpy as np
x = np.arange(0,10,0.01)
y = np.sqrt(x)
pf.plot('bare_bones.png', x, y); | [
"numpy.arange",
"clearplot.plot_functions.plot",
"numpy.sqrt"
] | [((144, 166), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.01)'], {}), '(0, 10, 0.01)\n', (153, 166), True, 'import numpy as np\n'), ((169, 179), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (176, 179), True, 'import numpy as np\n'), ((180, 211), 'clearplot.plot_functions.plot', 'pf.plot', (['"""bare_bones.png"""', 'x', 'y'], {}), "('bare_bones.png', x, y)\n", (187, 211), True, 'import clearplot.plot_functions as pf\n')] |
import cv2
import numpy as np
class HandDetection:
def __init__(self):
self.trained_hand = False
self.hand_row_nw = None
self.hand_row_se = None
self.hand_col_nw = None
self.hand_col_se = None
self.hand_hist = None
def draw_hand_rect(self, frame):
rows, cols, _ = frame.shape
self.hand_row_nw = np.array([6 * rows / 20, 6 * rows / 20, 6 * rows / 20,
10 * rows / 20, 10 * rows / 20, 10 * rows / 20,
14 * rows / 20, 14 * rows / 20, 14 * rows / 20])
self.hand_col_nw = np.array([9 * cols / 20, 10 * cols / 20, 11 * cols / 20,
9 * cols / 20, 10 * cols / 20, 11 * cols / 20,
9 * cols / 20, 10 * cols / 20, 11 * cols / 20])
self.hand_row_se = self.hand_row_nw + 10
self.hand_col_se = self.hand_col_nw + 10
size = self.hand_row_nw.size
for i in range(size):
cv2.rectangle(frame, (int(self.hand_col_nw[i]), int(self.hand_row_nw[i])), (int(self.hand_col_se[i]),
int(self.hand_row_se[i])), (0, 255, 0), 1)
frame_final = frame
return frame_final
def train_hand(self, frame):
self.set_hand_hist(frame)
self.trained_hand = True
def set_hand_hist(self, frame):
# TODO use constants, only do HSV for ROI
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([90, 10, 3], dtype=hsv.dtype)
size = self.hand_row_nw.size
for i in range(size):
roi[i * 10:i * 10 + 10, 0:10] = hsv[int(self.hand_row_nw[i]):int(self.hand_row_nw[i]) + 10, int(self.hand_col_nw[i]):int(self.hand_col_nw[i]) + 10]
self.hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
cv2.normalize(self.hand_hist, self.hand_hist, 0, 255, cv2.NORM_MINMAX) | [
"cv2.cvtColor",
"cv2.calcHist",
"numpy.zeros",
"numpy.array",
"cv2.normalize"
] | [((387, 542), 'numpy.array', 'np.array', (['[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 10 * rows / 20, 10 * rows / \n 20, 10 * rows / 20, 14 * rows / 20, 14 * rows / 20, 14 * rows / 20]'], {}), '([6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 10 * rows / 20, 10 *\n rows / 20, 10 * rows / 20, 14 * rows / 20, 14 * rows / 20, 14 * rows / 20])\n', (395, 542), True, 'import numpy as np\n'), ((645, 800), 'numpy.array', 'np.array', (['[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / \n 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20]'], {}), '([9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 *\n cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20])\n', (653, 800), True, 'import numpy as np\n'), ((1548, 1586), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (1560, 1586), False, 'import cv2\n'), ((1602, 1640), 'numpy.zeros', 'np.zeros', (['[90, 10, 3]'], {'dtype': 'hsv.dtype'}), '([90, 10, 3], dtype=hsv.dtype)\n', (1610, 1640), True, 'import numpy as np\n'), ((1901, 1964), 'cv2.calcHist', 'cv2.calcHist', (['[roi]', '[0, 1]', 'None', '[180, 256]', '[0, 180, 0, 256]'], {}), '([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])\n', (1913, 1964), False, 'import cv2\n'), ((1974, 2044), 'cv2.normalize', 'cv2.normalize', (['self.hand_hist', 'self.hand_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(self.hand_hist, self.hand_hist, 0, 255, cv2.NORM_MINMAX)\n', (1987, 2044), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""Supports the Vector Electric Field Instrument (VEFI)
onboard the Communication and Navigation Outage Forecasting
System (C/NOFS) satellite. Downloads data from the
NASA Coordinated Data Analysis Web (CDAWeb).
Parameters
----------
platform : string
'cnofs'
name : string
'vefi'
tag : string
Select measurement type, one of {'dc_b'}
Note
----
- tag = 'dc_b': 1 second DC magnetometer data
Warnings
--------
- Limited cleaning routine.
- Module not written by VEFI team.
"""
from __future__ import print_function
from __future__ import absolute_import
import pandas as pds
import numpy as np
import pysat
import sys
import functools
from . import nasa_cdaweb_methods as cdw
platform = 'cnofs'
name = 'vefi'
tags = {'dc_b':'DC Magnetometer data - 1 second'}
sat_ids = {'':['dc_b']}
test_dates = {'':{'dc_b':pysat.datetime(2009,1,1)}}
# support list files routine
# use the default CDAWeb method
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'':{'dc_b':fname}}
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
# support load routine
# use the default CDAWeb method
load = cdw.load
# support download routine
# use the default CDAWeb method
basic_tag = {'dir':'/pub/data/cnofs/vefi/bfield_1sec',
'remote_fname':'{year:4d}/'+fname,
'local_fname':fname}
supported_tags = {'dc_b':basic_tag}
download = functools.partial(cdw.download, supported_tags)
def clean(inst):
"""Routine to return VEFI data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
'dusty' or 'clean' removes data when interpolation flag is set to 1
"""
if (inst.clean_level == 'dusty') | (inst.clean_level == 'clean'):
idx, = np.where(inst['B_flag'] == 0)
inst.data = inst[idx, :]
return None
| [
"pysat.datetime",
"functools.partial",
"numpy.where"
] | [((1070, 1134), 'functools.partial', 'functools.partial', (['cdw.list_files'], {'supported_tags': 'supported_tags'}), '(cdw.list_files, supported_tags=supported_tags)\n', (1087, 1134), False, 'import functools\n'), ((1480, 1527), 'functools.partial', 'functools.partial', (['cdw.download', 'supported_tags'], {}), '(cdw.download, supported_tags)\n', (1497, 1527), False, 'import functools\n'), ((859, 885), 'pysat.datetime', 'pysat.datetime', (['(2009)', '(1)', '(1)'], {}), '(2009, 1, 1)\n', (873, 885), False, 'import pysat\n'), ((2106, 2135), 'numpy.where', 'np.where', (["(inst['B_flag'] == 0)"], {}), "(inst['B_flag'] == 0)\n", (2114, 2135), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import astropy.io.fits as pyfits
import pylab
import specter.psf
import sys
import argparse
import string
import os.path
def readpsf(filename) :
try :
psftype=pyfits.open(filename)[0].header["PSFTYPE"]
except KeyError :
psftype=""
print("PSF Type=",psftype)
if psftype=="GAUSS-HERMITE" :
return specter.psf.GaussHermitePSF(filename)
elif psftype=="SPOTGRID" :
return specter.psf.SpotGridPSF(filename)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--psf1', type = str, default = None, required = True,
help = 'path of psf file')
parser.add_argument('--psf2', type = str, default = None, required = True,
help = 'path of psf second file')
parser.add_argument('-o','--output', type = str, default = None, required = False,
help = 'path to output image (png) file')
fibers1=[0,10,19]
fibers2=[0,250,490]
fibers2=[0,10,19]
waves=[5500,6700,7600]
args = parser.parse_args()
psf1=readpsf(args.psf1)
psf2=readpsf(args.psf2)
name1=os.path.basename(args.psf1)
name2=os.path.basename(args.psf2)
f,a = pylab.subplots(len(waves),len(fibers1),sharex=True, sharey=True)
for i in range(len(fibers1)) :
fiber1=fibers1[i]
fiber2=fibers2[i]
print("fiber1 %d fiber2 %d"%(fiber1,fiber2))
for j in range(len(waves)) :
wave=waves[len(waves)-1-j]
xy1=psf1.xy(fiber1,wave)
xy2=psf2.xy(fiber2,wave)
print("for psf1, xy=",xy1)
print("for psf2, xy=",xy2)
hw=5.
n1d=51
x=np.tile(np.linspace(-hw,hw,51),(n1d,1))
y=x.T
fpix1=psf1._value(x+xy1[0],y+xy1[1],fiber1,wave)
fpix2=psf2._value(x+xy2[0],y+xy2[1],fiber2,wave)
fpix1 /= np.sum(fpix1)
fpix2 /= np.sum(fpix2)
aa=a[j,i]
#aa.imshow(fpix1,origin=0,interpolation="nearest",extent=(-hw,hw,-hw,hw),aspect="auto")
levels = np.array([0.1,0.5,0.9])*np.max(fpix1)
aa.contour(x,y,fpix1,colors='b',levels=levels)
aa.contour(x,y,fpix2,colors='r',levels=levels)
if False :
#aa.set_title("#%d / #%d"%(fiber1,fiber2))
aa.text(-hw+0.3,hw-2,"%s Fiber #%d"%(name1,fiber1),fontsize=10,color="b")
aa.text(-hw+0.3,hw-1.2,"%s Fiber #%d"%(name2,fiber2),fontsize=10,color="r")
if True :
aa.text(-hw+0.3,-hw+1.7,"x y psf:",fontsize=10,color="k")
aa.text(-hw+0.3,-hw+0.3,"%4.1f %4.1f %s"%(xy1[0],xy1[1],name1),fontsize=10,color="b")
aa.text(-hw+0.3,-hw+1.,"%4.1f %4.1f %s"%(xy2[0],xy2[1],name2),fontsize=10,color="r")
#aa.legend(loc="upper left",fontsize="small")
if i==0 :
aa.set_ylabel("%dA"%wave)
if j==0 :
aa.set_title("fibers %d&%d"%(fiber1,fiber2))
#aa.set_xlim([-hw,hw])
#aa.set_ylim([-hw,hw])
f.subplots_adjust(hspace=0,wspace=0)
if args.output is not None :
fig.savefig(args.output)
pylab.show()
| [
"pylab.show",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.max",
"numpy.array",
"astropy.io.fits.open",
"numpy.linspace"
] | [((505, 584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (528, 584), False, 'import argparse\n'), ((3088, 3100), 'pylab.show', 'pylab.show', ([], {}), '()\n', (3098, 3100), False, 'import pylab\n'), ((1865, 1878), 'numpy.sum', 'np.sum', (['fpix1'], {}), '(fpix1)\n', (1871, 1878), True, 'import numpy as np\n'), ((1896, 1909), 'numpy.sum', 'np.sum', (['fpix2'], {}), '(fpix2)\n', (1902, 1909), True, 'import numpy as np\n'), ((1688, 1712), 'numpy.linspace', 'np.linspace', (['(-hw)', 'hw', '(51)'], {}), '(-hw, hw, 51)\n', (1699, 1712), True, 'import numpy as np\n'), ((2042, 2067), 'numpy.array', 'np.array', (['[0.1, 0.5, 0.9]'], {}), '([0.1, 0.5, 0.9])\n', (2050, 2067), True, 'import numpy as np\n'), ((2066, 2079), 'numpy.max', 'np.max', (['fpix1'], {}), '(fpix1)\n', (2072, 2079), True, 'import numpy as np\n'), ((213, 234), 'astropy.io.fits.open', 'pyfits.open', (['filename'], {}), '(filename)\n', (224, 234), True, 'import astropy.io.fits as pyfits\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.animation import FuncAnimation
from fplanck import fokker_planck, boundary, gaussian_pdf
nm = 1e-9
viscosity = 8e-4
radius = 50*nm
drag = 6*np.pi*viscosity*radius
L = 20*nm
U = lambda x: 5e-21*np.cos(x/L)
sim = fokker_planck(temperature=300, drag=drag, extent=600*nm,
resolution=10*nm, boundary=boundary.reflecting, potential=U)
### steady-state solution
steady = sim.steady_state()
### time-evolved solution
pdf = gaussian_pdf(-150*nm, 30*nm)
p0 = pdf(sim.grid[0])
Nsteps = 200
time, Pt = sim.propagate_interval(pdf, 10e-3, Nsteps=Nsteps)
### animation
fig, ax = plt.subplots()
ax.plot(sim.grid[0]/nm, steady, color='k', ls='--', alpha=.5)
ax.plot(sim.grid[0]/nm, p0, color='red', ls='--', alpha=.3)
line, = ax.plot(sim.grid[0]/nm, p0, lw=2, color='C3')
def update(i):
line.set_ydata(Pt[i])
return [line]
anim = FuncAnimation(fig, update, frames=range(Nsteps), interval=30)
ax.set(xlabel='x (nm)', ylabel='normalized PDF')
ax.margins(x=0)
plt.show()
| [
"fplanck.gaussian_pdf",
"fplanck.fokker_planck",
"matplotlib.pyplot.show",
"numpy.cos",
"matplotlib.pyplot.subplots"
] | [((306, 431), 'fplanck.fokker_planck', 'fokker_planck', ([], {'temperature': '(300)', 'drag': 'drag', 'extent': '(600 * nm)', 'resolution': '(10 * nm)', 'boundary': 'boundary.reflecting', 'potential': 'U'}), '(temperature=300, drag=drag, extent=600 * nm, resolution=10 *\n nm, boundary=boundary.reflecting, potential=U)\n', (319, 431), False, 'from fplanck import fokker_planck, boundary, gaussian_pdf\n'), ((524, 556), 'fplanck.gaussian_pdf', 'gaussian_pdf', (['(-150 * nm)', '(30 * nm)'], {}), '(-150 * nm, 30 * nm)\n', (536, 556), False, 'from fplanck import fokker_planck, boundary, gaussian_pdf\n'), ((674, 688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1072), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1070, 1072), True, 'import matplotlib.pyplot as plt\n'), ((288, 301), 'numpy.cos', 'np.cos', (['(x / L)'], {}), '(x / L)\n', (294, 301), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import time
import sys
import numpy as np
import LX200
from LX200.LX200Utils import *
port = LX200.LXSerial(debug=False)
port.connect('/dev/tty.usbserial')
scope = LX200.Telescope(port, "LX200GPS", debug=False)
alt = from_lx200_angle(scope.get_Altitude())
az = from_lx200_angle(scope.get_AZ())
ra = scope.get_RA()
dec = to_lx200_hms_angle(from_lx200_angle(scope.get_Dec()))
lst = scope.get_sidereal_time()
localtime = scope.get_local_time_24()
airmass = 1.0/np.sin(np.pi*alt/180.0)
ha = to_lx200_hms_angle(from_lx200_angle(lst) - from_lx200_angle(ra))
output = open("lx200.log", "a")
output.write("%s %s %s %s %s %.2f %.2f %.2f\n" % (localtime, lst, ra, dec, ha, alt, az, airmass))
output.close()
port.close()
| [
"LX200.LXSerial",
"numpy.sin",
"LX200.Telescope"
] | [((117, 144), 'LX200.LXSerial', 'LX200.LXSerial', ([], {'debug': '(False)'}), '(debug=False)\n', (131, 144), False, 'import LX200\n'), ((189, 235), 'LX200.Telescope', 'LX200.Telescope', (['port', '"""LX200GPS"""'], {'debug': '(False)'}), "(port, 'LX200GPS', debug=False)\n", (204, 235), False, 'import LX200\n'), ((484, 511), 'numpy.sin', 'np.sin', (['(np.pi * alt / 180.0)'], {}), '(np.pi * alt / 180.0)\n', (490, 511), True, 'import numpy as np\n')] |
import numpy as np
import em
import common
X = np.loadtxt("test_incomplete.txt")
X_gold = np.loadtxt("test_complete.txt")
K = 4
n, d = X.shape
seed = 0
# TODO: Your code here
| [
"numpy.loadtxt"
] | [((48, 81), 'numpy.loadtxt', 'np.loadtxt', (['"""test_incomplete.txt"""'], {}), "('test_incomplete.txt')\n", (58, 81), True, 'import numpy as np\n'), ((91, 122), 'numpy.loadtxt', 'np.loadtxt', (['"""test_complete.txt"""'], {}), "('test_complete.txt')\n", (101, 122), True, 'import numpy as np\n')] |
"""
Example of running optical and contact cluster stuff on gromacs file
"""
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import numpy.testing as npt
import pdb
import gsd.hoomd
import sys
import clustering as cl
import random
import scipy
import time
#from context import clustering as cl
#from context import smoluchowski as smol
from cdistances import conOptDistanceCython,alignDistancesCython
#import imp
#cl = imp.load_source('cl','/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/clustering.py')
data_path ='/home/rachael/coarsegraining/CG/active_learning/martini-assembly/dfmi/4_production' #folder where trajectory is
#trajectory should not have any water
#this can be done as follows:
#gmx trjconv -f after_eq.gro -o after_eq_whole.gro -pbc whole -s md.tpr
#choose protein
#gmx trjconv -f md.xtc -o md_whole.xtc -pbc whole -s md.tpr
#choose protein
#grompp -f md_dummy.mdp -c after_eq_whole.gro -p CG_dfmi_prot.top -o md_dummy.tpr
#where md_dummy is the same as the mdp file except with water removed, same
#for the topology file
def run_ang_spread():
"""
Try running on an xtc trajectory (from a pull simulation)
"""
trj = op.join(data_path,'md_whole.xtc')
tpr = op.join(data_path,'md_dummy.tpr')
molno = 100
ats = 33
tstart = 0
ttotal = 4000
cainds = range(12,23)
oainds = range(0,3)
cfname = op.join(data_path,'angle-spread-contact.dat')
ofname = op.join(data_path,'angle-spread-optical.dat')
comIDs = np.array([[12,13,14],[16,17,18],[20,21,22]])
cldict = {'contact':0.5*0.5,'optical':0.7*0.7}
cfname = op.join(data_path,'contact-CIDs.dat')
ofname = op.join(data_path,'optical-CIDs.dat')
start = time.time()
syst = cl.SnapSystem(trj,ats,molno,cldict,compairs=comIDs,
ttotal=ttotal,tstart=tstart,tpr=tpr)
end = time.time()
print("Time to setup: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_from_file('contact',cfname)
end = time.time()
print("Time to get contact: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_from_file('optical',ofname)
end = time.time()
print("Time to get optical: "+str(end-start)+"\n")
start = time.time()
syst.writeAngSpread('contact',cfname,cainds)
syst.writeAngSpread('optical',ofname,oainds)
end = time.time()
print("Time to get angle spread: "+str(end-start))
if __name__ == "__main__":
run_ang_spread() | [
"numpy.array",
"os.path.join",
"clustering.SnapSystem",
"time.time"
] | [((1238, 1272), 'os.path.join', 'op.join', (['data_path', '"""md_whole.xtc"""'], {}), "(data_path, 'md_whole.xtc')\n", (1245, 1272), True, 'import os.path as op\n'), ((1282, 1316), 'os.path.join', 'op.join', (['data_path', '"""md_dummy.tpr"""'], {}), "(data_path, 'md_dummy.tpr')\n", (1289, 1316), True, 'import os.path as op\n'), ((1441, 1487), 'os.path.join', 'op.join', (['data_path', '"""angle-spread-contact.dat"""'], {}), "(data_path, 'angle-spread-contact.dat')\n", (1448, 1487), True, 'import os.path as op\n'), ((1500, 1546), 'os.path.join', 'op.join', (['data_path', '"""angle-spread-optical.dat"""'], {}), "(data_path, 'angle-spread-optical.dat')\n", (1507, 1546), True, 'import os.path as op\n'), ((1559, 1611), 'numpy.array', 'np.array', (['[[12, 13, 14], [16, 17, 18], [20, 21, 22]]'], {}), '([[12, 13, 14], [16, 17, 18], [20, 21, 22]])\n', (1567, 1611), True, 'import numpy as np\n'), ((1674, 1712), 'os.path.join', 'op.join', (['data_path', '"""contact-CIDs.dat"""'], {}), "(data_path, 'contact-CIDs.dat')\n", (1681, 1712), True, 'import os.path as op\n'), ((1725, 1763), 'os.path.join', 'op.join', (['data_path', '"""optical-CIDs.dat"""'], {}), "(data_path, 'optical-CIDs.dat')\n", (1732, 1763), True, 'import os.path as op\n'), ((1784, 1795), 'time.time', 'time.time', ([], {}), '()\n', (1793, 1795), False, 'import time\n'), ((1807, 1905), 'clustering.SnapSystem', 'cl.SnapSystem', (['trj', 'ats', 'molno', 'cldict'], {'compairs': 'comIDs', 'ttotal': 'ttotal', 'tstart': 'tstart', 'tpr': 'tpr'}), '(trj, ats, molno, cldict, compairs=comIDs, ttotal=ttotal,\n tstart=tstart, tpr=tpr)\n', (1820, 1905), True, 'import clustering as cl\n'), ((1923, 1934), 'time.time', 'time.time', ([], {}), '()\n', (1932, 1934), False, 'import time\n'), ((1996, 2007), 'time.time', 'time.time', ([], {}), '()\n', (2005, 2007), False, 'import time\n'), ((2068, 2079), 'time.time', 'time.time', ([], {}), '()\n', (2077, 2079), False, 'import time\n'), ((2147, 2158), 'time.time', 'time.time', ([], {}), '()\n', (2156, 2158), False, 'import time\n'), ((2219, 2230), 'time.time', 'time.time', ([], {}), '()\n', (2228, 2230), False, 'import time\n'), ((2303, 2314), 'time.time', 'time.time', ([], {}), '()\n', (2312, 2314), False, 'import time\n'), ((2423, 2434), 'time.time', 'time.time', ([], {}), '()\n', (2432, 2434), False, 'import time\n')] |
import numpy as np
import pytest
import torch
from theissues import training
def test_build_sequences_splits_returns_indices():
tokens = torch.LongTensor([1, 2, 3, 1, 2])
indices = training.build_sequences_splits(tokens, 2)
np.testing.assert_equal(indices.tolist(), [1, 4])
def test_build_sequences_splits_handles_out_of_range():
tokens = torch.LongTensor([1, 2, 3, 1, 2])
indices = training.build_sequences_splits(tokens, 4)
np.testing.assert_equal(indices.tolist(), [])
def test_sequence_gather_indices_gathers_sequences():
num_tokens = 3
indices = training.build_sequence_gather_indices(num_tokens, [0, 1], 4)
assert indices.tolist() == [
[0, 1],
[1, 2],
[2, 0],
[0, 1],
]
def test_build_sequence_mask_after_masks_tokens():
sequences = torch.LongTensor(
[
[0, 0],
[1, 1],
[3, 2],
[3, 2],
[2, 2],
]
)
mask = training.build_sequence_mask_after(sequences, 3)
# masks after the first occurrence of 3, or nothing if there is no 3
assert mask.tolist() == [
[1, 1],
[1, 1],
[1, 1],
[0, 1],
[0, 1],
]
def test_select_uniqueish_tokens_raises_for_invalid_min():
rng = np.random.Generator(np.random.PCG64(123))
sequence = ["a", "b"]
candidates = ["a"]
# doesn't raise
training.select_uniqueish_token(rng, candidates, sequence, 0.0)
training.select_uniqueish_token(rng, candidates, sequence, 1.0)
# raises
with pytest.raises(ValueError):
training.select_uniqueish_token(rng, candidates, sequence, -0.01)
with pytest.raises(ValueError):
training.select_uniqueish_token(rng, candidates, sequence, 1.01)
def test_select_uniqueish_tokens_rejects_below_min():
rng = np.random.Generator(np.random.PCG64(123))
sequence = ["a", "b"]
candidates = ["a"]
# can't select the candidate as it will cause "a" to appear 0.667 > 0.5
assert training.select_uniqueish_token(rng, candidates, sequence, 0.5) is None
def test_select_uniqueish_tokens_accepts_new_token_above_min():
rng = np.random.Generator(np.random.PCG64(123))
sequence = ["a", "b"]
candidates = ["a", "b"]
# has 66% chance of accepting, with RNG this happens on 2nd try
assert training.select_uniqueish_token(rng, candidates, sequence, 0.0) == "b"
def test_select_uniqueish_tokens_accepts_new_token_with_min_1():
rng = np.random.Generator(np.random.PCG64(123))
sequence = ["a", "b"]
candidates = ["c"]
assert training.select_uniqueish_token(rng, candidates, sequence, 1.0) == "c"
| [
"theissues.training.build_sequence_gather_indices",
"torch.LongTensor",
"numpy.random.PCG64",
"theissues.training.build_sequence_mask_after",
"pytest.raises",
"theissues.training.select_uniqueish_token",
"theissues.training.build_sequences_splits"
] | [((144, 177), 'torch.LongTensor', 'torch.LongTensor', (['[1, 2, 3, 1, 2]'], {}), '([1, 2, 3, 1, 2])\n', (160, 177), False, 'import torch\n'), ((192, 234), 'theissues.training.build_sequences_splits', 'training.build_sequences_splits', (['tokens', '(2)'], {}), '(tokens, 2)\n', (223, 234), False, 'from theissues import training\n'), ((360, 393), 'torch.LongTensor', 'torch.LongTensor', (['[1, 2, 3, 1, 2]'], {}), '([1, 2, 3, 1, 2])\n', (376, 393), False, 'import torch\n'), ((408, 450), 'theissues.training.build_sequences_splits', 'training.build_sequences_splits', (['tokens', '(4)'], {}), '(tokens, 4)\n', (439, 450), False, 'from theissues import training\n'), ((590, 651), 'theissues.training.build_sequence_gather_indices', 'training.build_sequence_gather_indices', (['num_tokens', '[0, 1]', '(4)'], {}), '(num_tokens, [0, 1], 4)\n', (628, 651), False, 'from theissues import training\n'), ((824, 882), 'torch.LongTensor', 'torch.LongTensor', (['[[0, 0], [1, 1], [3, 2], [3, 2], [2, 2]]'], {}), '([[0, 0], [1, 1], [3, 2], [3, 2], [2, 2]])\n', (840, 882), False, 'import torch\n'), ((979, 1027), 'theissues.training.build_sequence_mask_after', 'training.build_sequence_mask_after', (['sequences', '(3)'], {}), '(sequences, 3)\n', (1013, 1027), False, 'from theissues import training\n'), ((1403, 1466), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(0.0)'], {}), '(rng, candidates, sequence, 0.0)\n', (1434, 1466), False, 'from theissues import training\n'), ((1471, 1534), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(1.0)'], {}), '(rng, candidates, sequence, 1.0)\n', (1502, 1534), False, 'from theissues import training\n'), ((1308, 1328), 'numpy.random.PCG64', 'np.random.PCG64', (['(123)'], {}), '(123)\n', (1323, 1328), True, 'import numpy as np\n'), ((1557, 1582), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1570, 1582), False, 'import pytest\n'), ((1592, 1657), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(-0.01)'], {}), '(rng, candidates, sequence, -0.01)\n', (1623, 1657), False, 'from theissues import training\n'), ((1667, 1692), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1680, 1692), False, 'import pytest\n'), ((1702, 1766), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(1.01)'], {}), '(rng, candidates, sequence, 1.01)\n', (1733, 1766), False, 'from theissues import training\n'), ((1853, 1873), 'numpy.random.PCG64', 'np.random.PCG64', (['(123)'], {}), '(123)\n', (1868, 1873), True, 'import numpy as np\n'), ((2011, 2074), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(0.5)'], {}), '(rng, candidates, sequence, 0.5)\n', (2042, 2074), False, 'from theissues import training\n'), ((2179, 2199), 'numpy.random.PCG64', 'np.random.PCG64', (['(123)'], {}), '(123)\n', (2194, 2199), True, 'import numpy as np\n'), ((2334, 2397), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(0.0)'], {}), '(rng, candidates, sequence, 0.0)\n', (2365, 2397), False, 'from theissues import training\n'), ((2502, 2522), 'numpy.random.PCG64', 'np.random.PCG64', (['(123)'], {}), '(123)\n', (2517, 2522), True, 'import numpy as np\n'), ((2584, 2647), 'theissues.training.select_uniqueish_token', 'training.select_uniqueish_token', (['rng', 'candidates', 'sequence', '(1.0)'], {}), '(rng, candidates, sequence, 1.0)\n', (2615, 2647), False, 'from theissues import training\n')] |
# Core functionality
import logging
from marvin import config
from astropy.io import fits
import astropy.units as u
from astropy.cosmology import WMAP9
import numpy as np
from extinction import fm07 as extinction_law
from marvin.tools.image import Image
from marvin.tools.maps import Maps
from marvin.tools.cube import Cube
from marvin.utils.general.general import get_drpall_table, get_dapall_file
from .DKAnalogMixin import DKAnalogMixin
import os
from astropy.table import Table
class DK_MWAnalogs(DKAnalogMixin):
"""
Core DAP and DRP Data product holder
Parameters
----------
filename_drp: 'str', optional, must be keyword
filename of DRP file
filename_dap: 'str', optional, must be keyword
filename of DAP file
no_analog: `bool`, optional, must be keyword
if True, only loads dap and drp tables
drpver: 'str', optional, must be keyword
DRP Version to load
dapver: 'str', optional, must be keyword
DAP Version to load
latest: 'bool', optional, must be keyword
if True, use latest sample selection method
filename_targets: 'str', optional, must be keyword
MaNGA Target list file
filename_gz: 'str', optional, must be keyword
Galaxy Zoo Morphological Classifications file
sersic: 'bool', optional must be keyword
if True, uses Sersic Mass
"""
def __init__(self, filename_drp = None, filename_dap = None, no_analog = False,
drpver = None, dapver = None, latest = True,
filename_targets = None, filename_gz = None, sersic = False,
**kwargs):
# Get or set filenames for DRP all file
if filename_drp is None:
if drpver is None:
self.drpver, _ = config.lookUpVersions()
logging.warning("Using DRP Version: {}".format(self.drpver))
else:
self.drpver = drpver
self.filename_drp = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'spectro', 'redux',
self.drpver, 'drpall-{}.fits'.format(self.drpver))
else:
self.filename_drp = filename_drp
# Get or set filenames for DAP all file
if filename_dap is None:
if dapver is None:
_, self.dapver = config.lookUpVersions()
logging.warning("Using DAP Version: {}".format(self.dapver))
else:
self.dapver = dapver
self.filename_dap = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'spectro', 'analysis',
self.drpver, self.dapver, 'dapall-{0}-{1}.fits'.format(self.drpver,self.dapver))
else:
self.filename_dap = filename_dap
# Get or set filename for Target File List
if filename_targets is None:
self.filename_targets = os.path.join(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'target',
'v1_2_27', 'MaNGA_targets_extNSA_tiled_ancillary.fits')
else:
self.filename_targets = filename_targets
# Get or set filename for Galaxy Zoo VAC
if filename_gz is None:
self.filename_gz = os.path.join(os.environ['SAS_BASE_DIR'], 'dr15', 'manga', 'morphology',
'galaxyzoo', 'MaNGA_gz-v1_0_1.fits')
else:
self.filename_gz = filename_gz
# Load Data
try:
self.drp = Table.read(self.filename_drp)
except FileNotFoundError:
logging.warning("DRP File not found, trying to download")
self.drp = get_drpall_table()
try:
self.dap = Table.read(self.filename_dap)
except FileNotFoundError:
logging.warning("DAP File not found, trying to download")
self.filename = get_dapall_file(self.drpver, self.dapver)
try:
self.dap = Table.read(self.filename_dap)
except FileNotFoundError:
if (self.drpver == "v2_4_e") & (self.dapver == "2.2.1"):
self.dap = Table.read("https://data.sdss.org/sas/dr15/manga/spectro/analysis/v2_4_3/2.2.1/dapall-v2_4_3-2.2.1.fits")
try:
self.targets = Table.read(self.filename_targets)
except FileNotFoundError:
logging.warning("Target Data File not found")
self.targets = Table()
try:
self.gz = Table.read(self.filename_gz)
except FileNotFoundError:
logging.warning("Galaxy Zoo Morphology Data File not found")
self.gz = Table()
if not no_analog:
self.sersic = sersic
# Set Ind Dictionary of Targets by MangaID
self.ind_dict_target = dict((k.rstrip(),i) for i,k in enumerate(self.targets['MANGAID']))
# Set some Milky Way Stellar Mass Estimates
self.mw_stellar_mass = 6.43 * 10**10 * u.solMass
self.mw_stellar_mass_err = 0.63 * 10**10 * u.solMass
self.mw_stellar_mass_jbh = 5.0 * 10**10 * u.solMass
self.mw_stellar_mass_jbh_err = 1.0 * 10**10 * u.solMass
self.targets_gz = self.targets_in_gz()
if latest:
self.barred_targets_mask = self.get_barred_galaxies_mask()
self.nonbarred_targets_mask = self.get_nonbarred_galaxies_mask()
# At least Green Valley Selection
# Set Color Keys:
color_keys = ["<KEY>", "r", "i", "z"]
# Cosmology Correction
h = 1*u.mag* u.littleh
cor = 5.* np.log10(h.to(u.mag,u.with_H0(WMAP9.H0)).value) * u.mag
barred_targets_phot = Table(
self.targets_gz[self.barred_targets_mask]['NSA_ELPETRO_ABSMAG'] * u.mag + cor,
names = color_keys
)
nonbarred_targets_phot = Table(
self.targets_gz[self.nonbarred_targets_mask]['NSA_ELPETRO_ABSMAG'] * u.mag + cor,
names = color_keys
)
# Remove Red Galaxies with NUV - r > 5
self.barred_targets_full = self.targets_gz[self.barred_targets_mask][(barred_targets_phot["N"] - barred_targets_phot["r"]) <= 5]
self.nonbarred_targets_full = self.targets_gz[self.nonbarred_targets_mask][(nonbarred_targets_phot["N"] - nonbarred_targets_phot["r"]) <= 5]
# Stellar Masses
self.barred_targets_stellar_mass = (10**(self.barred_targets_full["NSA_ELPETRO_MASS"]) *\
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
self.nonbarred_targets_stellar_mass = (10**(self.nonbarred_targets_full["NSA_ELPETRO_MASS"]) *\
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
#Get closest nonbarred galaxy by mass for each barred galaxy
all_gals_drawn = []
for gal_mass in self.barred_targets_stellar_mass:
dif = (np.log10(gal_mass.value) -
np.log10(self.nonbarred_targets_stellar_mass.value))
ind = np.argsort(np.abs(dif))
added = False
ell = 0
while not added:
if ind[ell] not in all_gals_drawn:
all_gals_drawn.append(ind[ell])
added = True
else:
ell += 1
self.nonbarred_targets_selected = self.nonbarred_targets_full[all_gals_drawn]
self.nonbarred_targets_selected_stellar_mass = self.nonbarred_targets_stellar_mass[all_gals_drawn]
barred_IDs = [mangaid.decode("utf-8").rstrip() for mangaid in self.barred_targets_full["MANGAID"].data]
nonbarred_IDs = [mangaid.decode("utf-8").rstrip() for mangaid in
self.nonbarred_targets_selected["MANGAID"].data]
ind_dict_drp = dict((k,i) for i,k in enumerate(self.drp['mangaid']))
barred_in_drp = set(ind_dict_drp).intersection(barred_IDs)
bar_in_drp_ind = [ind_dict_drp[x] for x in barred_in_drp]
nonbarred_in_drp = set(ind_dict_drp).intersection(nonbarred_IDs)
nonbar_in_drp_ind = [ind_dict_drp[x] for x in nonbarred_in_drp]
barred_plateifus = [plateifu.decode("utf").rstrip() for plateifu in self.drp[bar_in_drp_ind]["plateifu"].data]
nonbarred_plateifus = [plateifu.decode("utf").rstrip() for plateifu in self.drp[nonbar_in_drp_ind]["plateifu"].data]
ind_dict_dap = dict((k,i) for i,k in enumerate(self.dap['PLATEIFU']))
barred_sample_dap_ind = np.array([ind_dict_dap[plateifu] for plateifu in barred_plateifus])
nonbarred_sample_dap_ind = np.array([ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus])
if config.release == "MPL-8":
bad_barred_ind = ind_dict_dap["10507-12705"]
good_barred_mask = np.array([ind != bad_barred_ind for ind in barred_sample_dap_ind])
barred_sample_dap_ind = barred_sample_dap_ind[good_barred_mask]
bad_nonbarred_inds = [ind_dict_dap[bad] for bad in ["8332-12704", "8616-3704", "10498-12704"]]
good_nonbarred_mask = np.array([ind not in bad_nonbarred_inds for ind in nonbarred_sample_dap_ind])
nonbarred_sample_dap_ind = nonbarred_sample_dap_ind[good_nonbarred_mask]
self.barred_sample = self.dap[barred_sample_dap_ind]
self.nonbarred_sample = self.dap[nonbarred_sample_dap_ind]
argsort = np.argsort(self.barred_sample["NSA_Z"])
self.barred_sample = self.barred_sample[argsort]
argsort = np.argsort(self.nonbarred_sample["NSA_Z"])
self.nonbarred_sample = self.nonbarred_sample[argsort]
self.barred_stellar_mass = (self.drp[self.barred_sample["DRPALLINDX"]]["nsa_elpetro_mass"] *\
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
self.nonbarred_stellar_mass = (self.drp[self.nonbarred_sample["DRPALLINDX"]]["nsa_elpetro_mass"] *\
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
self.in_mass_range_barred = self.barred_stellar_mass <= (self.mw_stellar_mass + self.mw_stellar_mass_err * 2.5)
self.in_mass_range_barred &= self.barred_stellar_mass > self.mw_stellar_mass - self.mw_stellar_mass_err * 2.5
self.in_mass_range_nonbarred = self.nonbarred_stellar_mass <= self.mw_stellar_mass + self.mw_stellar_mass_err * 2.5
self.in_mass_range_nonbarred &= self.nonbarred_stellar_mass > self.mw_stellar_mass - self.mw_stellar_mass_err * 2.5
self.dk_sample = self.barred_sample[self.in_mass_range_barred]
self.dk_sample_nobar = self.nonbarred_sample[self.in_mass_range_nonbarred]
self.barred_sfr = (self.barred_sample["SFR_TOT"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.nonbarred_sfr = (self.nonbarred_sample["SFR_TOT"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.dk_sample_sfr = self.barred_sfr[self.in_mass_range_barred]
self.dk_sample_nobar_sfr = self.nonbarred_sfr[self.in_mass_range_nonbarred]
self.barred_sfr_1re = (self.barred_sample["SFR_1RE"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.nonbarred_sfr_1re = (self.nonbarred_sample["SFR_1RE"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.dk_sample_sfr_1re = self.barred_sfr_1re[self.in_mass_range_barred]
self.dk_sample_nobar_sfr_1re = self.nonbarred_sfr_1re[self.in_mass_range_nonbarred]
else:
# Set Full Barred and Unbarred Sample
self.barred_sample = self.get_barred_galaxies_dap()
argsort = np.argsort(self.barred_sample["NSA_Z"])
self.barred_sample = self.barred_sample[argsort]
self.nonbarred_sample = self.get_barred_galaxies_dap(nonbarred = True)
argsort = np.argsort(self.nonbarred_sample["NSA_Z"])
self.nonbarred_sample = self.nonbarred_sample[argsort]
# At least Green Valley Selection
# Set Color Keys:
color_keys = ["F", "N", "u", "g", "r", "i", "z"]
# Cosmology Correction
h = 1*u.mag* u.littleh
cor = 5.* np.log10(h.to(u.mag,u.with_H0(WMAP9.H0)).value) * u.mag
barred_sample_phot = Table(
self.drp[self.barred_sample["DRPALLINDX"]]['nsa_elpetro_absmag'] * u.mag + cor,
names = color_keys
)
nonbarred_sample_phot = Table(
self.drp[self.nonbarred_sample["DRPALLINDX"]]['nsa_elpetro_absmag'] * u.mag + cor,
names = color_keys
)
# Remove Red Galaxies with NUV - r > 5
self.barred_sample = self.barred_sample[(barred_sample_phot["N"] - barred_sample_phot["r"]) <= 5]
self.nonbarred_sample = self.nonbarred_sample[(nonbarred_sample_phot["N"] - nonbarred_sample_phot["r"]) <= 5]
# Stellar Masses
self.barred_stellar_mass = (self.drp[self.barred_sample["DRPALLINDX"]]["nsa_elpetro_mass"] * \
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
self.nonbarred_stellar_mass = (self.drp[self.nonbarred_sample["DRPALLINDX"]]["nsa_elpetro_mass"] * \
u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
# Mass Selections
self.in_mass_range_barred = self.barred_stellar_mass <= self.mw_stellar_mass + self.mw_stellar_mass_err
self.in_mass_range_barred &= self.barred_stellar_mass > self.mw_stellar_mass - self.mw_stellar_mass_err
self.in_mass_range_nonbarred = self.nonbarred_stellar_mass <= self.mw_stellar_mass + self.mw_stellar_mass_err
self.in_mass_range_nonbarred &= self.nonbarred_stellar_mass > self.mw_stellar_mass - self.mw_stellar_mass_err
#JBH
self.in_mass_range_barred_jbh = self.barred_stellar_mass <= self.mw_stellar_mass_jbh + self.mw_stellar_mass_jbh_err
self.in_mass_range_barred_jbh &= self.barred_stellar_mass > self.mw_stellar_mass_jbh - self.mw_stellar_mass_jbh_err
self.in_mass_range_nonbarred_jbh = self.nonbarred_stellar_mass <= self.mw_stellar_mass_jbh + self.mw_stellar_mass_jbh_err
self.in_mass_range_nonbarred_jbh &= self.nonbarred_stellar_mass > self.mw_stellar_mass_jbh - self.mw_stellar_mass_jbh_err
self.dk_sample = self.barred_sample[self.in_mass_range_barred]
self.dk_sample_nobar = self.nonbarred_sample[self.in_mass_range_nonbarred]
#JBH
self.dk_sample_jbh = self.barred_sample[self.in_mass_range_barred_jbh]
self.dk_sample_jbh_nobar = self.nonbarred_sample[self.in_mass_range_nonbarred_jbh]
#SFR
self.barred_sfr = (self.barred_sample["SFR_TOT"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.nonbarred_sfr = (self.nonbarred_sample["SFR_TOT"] * u.solMass / u.yr * u.littleh**-2).to(u.solMass / u.yr, u.with_H0(WMAP9.H0))
self.dk_sample_sfr = self.barred_sfr[self.in_mass_range_barred]
self.dk_sample_nobar_sfr = self.nonbarred_sfr[self.in_mass_range_nonbarred]
self.dk_sample_jbh_sfr = self.barred_sfr[self.in_mass_range_barred_jbh]
self.dk_sample_jbh_nobar_sfr = self.nonbarred_sfr[self.in_mass_range_nonbarred_jbh]
def targets_in_drp(self):
"""
return Table of Targets that are in the DRP ALL FILE
"""
data_targets_drp_ind = [self.ind_dict_target[x] for x in self.drp['mangaid']]
return self.targets[data_targets_drp_ind]
def targets_in_dap(self):
"""
return Table of Targets that are in the DRP ALL FILE
"""
data_targets_dap_ind = [self.ind_dict_target[x] for x in self.dap['MANGAID']]
return self.targets[data_targets_dap_ind]
def targets_in_gz(self):
"""
return Table of Targets that are in the Galaxy Zoo Catalog
"""
data_targets_gz_ind = [self.ind_dict_target[x] for x in self.gz['MANGAID']]
return self.targets[data_targets_gz_ind]
def get_barred_galaxies_mask(self):
"""
Return Barred Galaxy Mask from self.gz
"""
return (self.gz['t01_smooth_or_features_a02_features_or_disk_debiased'] > 0.430) & \
(self.gz['t02_edgeon_a05_no_debiased'] > 0.715) & \
(self.gz['t02_edgeon_a05_no_count'] >= 20) & \
(self.gz['t03_bar_a06_bar_debiased'] >= 0.8)
def get_nonbarred_galaxies_mask(self):
"""
Return Non-Barred Galaxy Mask from self.gz
"""
return (self.gz['t01_smooth_or_features_a02_features_or_disk_debiased'] > 0.430) & \
(self.gz['t02_edgeon_a05_no_debiased'] > 0.715) & \
(self.gz['t02_edgeon_a05_no_count'] >= 20) & \
(self.gz['t03_bar_a06_bar_debiased'] <= 0.2)
def get_barred_galaxies_dap(self, nonbarred = False):
"""
Return DAP Table for barred/nonbarred galaxies
"""
bar_mask = self.get_barred_galaxies_mask()
no_bar_mask = self.get_nonbarred_galaxies_mask()
barred_IDs = [mangaid.decode("utf-8").rstrip() for mangaid in self.targets_gz[bar_mask]["MANGAID"].data]
nonbarred_IDs = [mangaid.decode("utf-8").rstrip() for mangaid in self.targets_gz[no_bar_mask]["MANGAID"].data]
ind_dict_drp = dict((k,i) for i,k in enumerate(self.drp['mangaid']))
barred_in_drp = set(ind_dict_drp).intersection(barred_IDs)
bar_in_drp_ind = [ind_dict_drp[x] for x in barred_in_drp]
nonbarred_in_drp = set(ind_dict_drp).intersection(nonbarred_IDs)
nonbar_in_drp_ind = [ind_dict_drp[x] for x in nonbarred_in_drp]
barred_plateifus = [plateifu.decode("utf").rstrip() for plateifu in self.drp[bar_in_drp_ind]["plateifu"].data]
nonbarred_plateifus = [plateifu.decode("utf").rstrip() for plateifu in self.drp[nonbar_in_drp_ind]["plateifu"].data]
ind_dict_dap = dict((k,i) for i,k in enumerate(self.dap['PLATEIFU']))
bad_barred_ind = ind_dict_dap["10507-12705"]
barred_sample_dap_ind = np.array([ind_dict_dap[plateifu] for plateifu in barred_plateifus])
good_barred_mask = np.array([ind != bad_barred_ind for ind in barred_sample_dap_ind])
barred_sample_dap_ind = barred_sample_dap_ind[good_barred_mask]
bad_nonbarred_inds = [ind_dict_dap[bad] for bad in ["8332-12704", "8616-3704", "10498-12704"]]
nonbarred_sample_dap_ind = np.array([ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus])
good_nonbarred_mask = np.array([ind not in bad_nonbarred_inds for ind in nonbarred_sample_dap_ind])
nonbarred_sample_dap_ind = nonbarred_sample_dap_ind[good_nonbarred_mask]
barred_sample = self.dap[barred_sample_dap_ind]
nonbarred_sample = self.dap[nonbarred_sample_dap_ind]
if nonbarred:
return nonbarred_sample
else:
return barred_sample
def get_stellar_mass_mwa_mask(self, sersic = False):
"""
Return mask of galaxies in self.targets_gz that fit within stellar mass range of McMillan (2011) MW Stellar Mass estimates
"""
if sersic:
key = "NSA_SERSIC_MASS"
else:
key = 'NSA_ELPETRO_MASS'
NSA_STELLAR_MASS = (10**self.targets_gz[key] * u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
return (NSA_STELLAR_MASS < (self.mw_stellar_mass + self.mw_stellar_mass_err)) & \
(NSA_STELLAR_MASS > (self.mw_stellar_mass - self.mw_stellar_mass_err))
def get_jbh_stellar_mass_mwa_mask(self, sersic = False):
"""nsa_elpetro_mass
Return mask of galaxies in self.targets_gz that fit within stellar mass range of JBH (2016) MW Stellar Mass estimates
"""
if sersic:
key = "NSA_SERSIC_MASS"
else:
key = 'NSA_ELPETRO_MASS'
NSA_STELLAR_MASS = (10**self.targets_gz[key] * u.solMass* u.littleh**-2).to(u.solMass, u.with_H0(WMAP9.H0))
return (NSA_STELLAR_MASS < (self.mw_stellar_mass_jbh + self.mw_stellar_mass_jbh_err)) & \
(NSA_STELLAR_MASS > (self.mw_stellar_mass_jbh - self.mw_stellar_mass_jbh_err))
def determine_mass_mwa(self, reset = False, jbh = False, barred = True, no_morph = False, sersic = False):
"""
Return dap and drp entries with Stellar Mass within MW range
Parameters
----------
reset: 'bool', optional, must be keyword
if True, resets self.dap_mass_mwa to this
jbh: 'bool', optional, must be keyword
if True, uses JBH Stellarr Mass estimate
barred: 'bool', optional, must be keyword
if True, returns barred galaxies only
if False, returns non-barred galaxies only
no_morph: 'bool', optional, must be keyword
if True, doesn't consider morphology information and returns only mass cuts
sersic: 'bool', if True, uses sersic mass
"""
if no_morph:
if reset:
if not jbh:
self.mass_targets = self.targets_gz[self.get_stellar_mass_mwa_mask(sersic = sersic)]
else:
self.mass_jbh_targets = self.targets_gz[self.get_jbh_stellar_mass_mwa_mask(sersic = sersic)]
else:
if not jbh:
return self.targets_gz[self.get_stellar_mass_mwa_mask(sersic = sersic)]
else:
return self.targerts_gz[self.get_jbh_stellar_mass_mwa_mask(sersic = sersic)]
else:
if not jbh:
if barred:
mwa_gz = self.gz[np.logical_and(self.get_stellar_mass_mwa_mask(sersic = sersic), self.get_barred_galaxies_mask())]
else:
mwa_gz = self.gz[np.logical_and(self.get_stellar_mass_mwa_mask(sersic = sersic), self.get_nonbarred_galaxies_mask())]
else:
if barred:
mwa_gz = self.gz[np.logical_and(self.get_jbh_stellar_mass_mwa_mask(sersic = sersic), self.get_barred_galaxies_mask())]
else:
mwa_gz = self.gz[np.logical_and(self.get_jbh_stellar_mass_mwa_mask(sersic = sersic), self.get_nonbarred_galaxies_mask())]
ind_dict_dap = dict((k,i) for i,k in enumerate(self.dap['MANGAID']))
inter_bar_stellar_dap = set(ind_dict_dap).intersection(mwa_gz['MANGAID'])
bar_stellar_dap_ind = [ind_dict_dap[x] for x in inter_bar_stellar_dap]
bar_stellar_drp_ind = self.dap['DRPALLINDX'][bar_stellar_dap_ind]
if reset:
if not jbh:
if barred:
self.mass_mwa_dap = self.dap[bar_stellar_dap_ind]
self.mass_mwa_drp = self.drp[bar_stellar_drp_ind]
else:
self.mass_mwa_nobar_dap = self.dap[bar_stellar_dap_ind]
self.mass_mwa_nobar_drp = self.drp[bar_stellar_drp_ind]
else:
if barred:
self.mass_jbh_mwa_dap = self.dap[bar_stellar_dap_ind]
self.mass_jbh_mwa_drp = self.drp[bar_stellar_drp_ind]
else:
self.mass_jbh_mwa_nobar_dap = self.dap[bar_stellar_dap_ind]
self.mass_jbh_mwa_nobar_drp = self.drp[bar_stellar_drp_ind]
return self.dap[bar_stellar_dap_ind], self.drp[bar_stellar_drp_ind]
def get_mass_images(self, return_images = True, barred = True, jbh = False):
"""
Get images of Mass based MWAs
Parameters
----------
return_images: 'bool', optional, must be keyword
if True, returns images
if False, adds images to class
jbh: 'bool', optional, must be keyword
if True, uses JBH Stellarr Mass estimate
barred: 'bool', optional, must be keyword
if True, returns barred galaxies only
if False, returns non-barred galaxies only
"""
if jbh:
if barred:
images = Image.from_list(self.mass_jbh_mwa_dap["PLATEIFU"])
if return_images:
return images
else:
self.mass_jbh_mwa_images = images
else:
images = Image.from_list(self.mass_jbh_mwa_nobar_dap["PLATEIFU"])
if return_images:
return images
else:
self.mass_jbh_mwa_nobar_images = images
else:
if barred:
images = Image.from_list(self.mass_mwa_dap["PLATEIFU"])
if return_images:
return images
else:
self.mass_mwa_images = images
else:
images = Image.from_list(self.mass_mwa_nobar_dap["PLATEIFU"])
if return_images:
return images
else:
self.mass_mwa_nobar_images = images
| [
"astropy.units.with_H0",
"marvin.tools.image.Image.from_list",
"astropy.table.Table",
"numpy.abs",
"logging.warning",
"marvin.config.lookUpVersions",
"numpy.argsort",
"numpy.array",
"marvin.utils.general.general.get_dapall_file",
"marvin.utils.general.general.get_drpall_table",
"numpy.log10",
... | [((19120, 19187), 'numpy.array', 'np.array', (['[ind_dict_dap[plateifu] for plateifu in barred_plateifus]'], {}), '([ind_dict_dap[plateifu] for plateifu in barred_plateifus])\n', (19128, 19187), True, 'import numpy as np\n'), ((19215, 19283), 'numpy.array', 'np.array', (['[(ind != bad_barred_ind) for ind in barred_sample_dap_ind]'], {}), '([(ind != bad_barred_ind) for ind in barred_sample_dap_ind])\n', (19223, 19283), True, 'import numpy as np\n'), ((19493, 19563), 'numpy.array', 'np.array', (['[ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus]'], {}), '([ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus])\n', (19501, 19563), True, 'import numpy as np\n'), ((19594, 19673), 'numpy.array', 'np.array', (['[(ind not in bad_nonbarred_inds) for ind in nonbarred_sample_dap_ind]'], {}), '([(ind not in bad_nonbarred_inds) for ind in nonbarred_sample_dap_ind])\n', (19602, 19673), True, 'import numpy as np\n'), ((2904, 3036), 'os.path.join', 'os.path.join', (["os.environ['SAS_BASE_DIR']", '"""mangawork"""', '"""manga"""', '"""target"""', '"""v1_2_27"""', '"""MaNGA_targets_extNSA_tiled_ancillary.fits"""'], {}), "(os.environ['SAS_BASE_DIR'], 'mangawork', 'manga', 'target',\n 'v1_2_27', 'MaNGA_targets_extNSA_tiled_ancillary.fits')\n", (2916, 3036), False, 'import os\n'), ((3242, 3354), 'os.path.join', 'os.path.join', (["os.environ['SAS_BASE_DIR']", '"""dr15"""', '"""manga"""', '"""morphology"""', '"""galaxyzoo"""', '"""MaNGA_gz-v1_0_1.fits"""'], {}), "(os.environ['SAS_BASE_DIR'], 'dr15', 'manga', 'morphology',\n 'galaxyzoo', 'MaNGA_gz-v1_0_1.fits')\n", (3254, 3354), False, 'import os\n'), ((3495, 3524), 'astropy.table.Table.read', 'Table.read', (['self.filename_drp'], {}), '(self.filename_drp)\n', (3505, 3524), False, 'from astropy.table import Table\n'), ((3707, 3736), 'astropy.table.Table.read', 'Table.read', (['self.filename_dap'], {}), '(self.filename_dap)\n', (3717, 3736), False, 'from astropy.table import Table\n'), ((4274, 4307), 'astropy.table.Table.read', 'Table.read', (['self.filename_targets'], {}), '(self.filename_targets)\n', (4284, 4307), False, 'from astropy.table import Table\n'), ((4471, 4499), 'astropy.table.Table.read', 'Table.read', (['self.filename_gz'], {}), '(self.filename_gz)\n', (4481, 4499), False, 'from astropy.table import Table\n'), ((20395, 20414), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (20404, 20414), True, 'import astropy.units as u\n'), ((21034, 21053), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (21043, 21053), True, 'import astropy.units as u\n'), ((1783, 1806), 'marvin.config.lookUpVersions', 'config.lookUpVersions', ([], {}), '()\n', (1804, 1806), False, 'from marvin import config\n'), ((2338, 2361), 'marvin.config.lookUpVersions', 'config.lookUpVersions', ([], {}), '()\n', (2359, 2361), False, 'from marvin import config\n'), ((3571, 3628), 'logging.warning', 'logging.warning', (['"""DRP File not found, trying to download"""'], {}), "('DRP File not found, trying to download')\n", (3586, 3628), False, 'import logging\n'), ((3652, 3670), 'marvin.utils.general.general.get_drpall_table', 'get_drpall_table', ([], {}), '()\n', (3668, 3670), False, 'from marvin.utils.general.general import get_drpall_table, get_dapall_file\n'), ((3783, 3840), 'logging.warning', 'logging.warning', (['"""DAP File not found, trying to download"""'], {}), "('DAP File not found, trying to download')\n", (3798, 3840), False, 'import logging\n'), ((3869, 3910), 'marvin.utils.general.general.get_dapall_file', 'get_dapall_file', (['self.drpver', 'self.dapver'], {}), '(self.drpver, self.dapver)\n', (3884, 3910), False, 'from marvin.utils.general.general import get_drpall_table, get_dapall_file\n'), ((4354, 4399), 'logging.warning', 'logging.warning', (['"""Target Data File not found"""'], {}), "('Target Data File not found')\n", (4369, 4399), False, 'import logging\n'), ((4427, 4434), 'astropy.table.Table', 'Table', ([], {}), '()\n', (4432, 4434), False, 'from astropy.table import Table\n'), ((4546, 4606), 'logging.warning', 'logging.warning', (['"""Galaxy Zoo Morphology Data File not found"""'], {}), "('Galaxy Zoo Morphology Data File not found')\n", (4561, 4606), False, 'import logging\n'), ((4629, 4636), 'astropy.table.Table', 'Table', ([], {}), '()\n', (4634, 4636), False, 'from astropy.table import Table\n'), ((5745, 5852), 'astropy.table.Table', 'Table', (["(self.targets_gz[self.barred_targets_mask]['NSA_ELPETRO_ABSMAG'] * u.mag + cor)"], {'names': 'color_keys'}), "(self.targets_gz[self.barred_targets_mask]['NSA_ELPETRO_ABSMAG'] * u.\n mag + cor, names=color_keys)\n", (5750, 5852), False, 'from astropy.table import Table\n'), ((5951, 6060), 'astropy.table.Table', 'Table', (["(self.targets_gz[self.nonbarred_targets_mask]['NSA_ELPETRO_ABSMAG'] * u.mag +\n cor)"], {'names': 'color_keys'}), "(self.targets_gz[self.nonbarred_targets_mask]['NSA_ELPETRO_ABSMAG'] *\n u.mag + cor, names=color_keys)\n", (5956, 6060), False, 'from astropy.table import Table\n'), ((8908, 8975), 'numpy.array', 'np.array', (['[ind_dict_dap[plateifu] for plateifu in barred_plateifus]'], {}), '([ind_dict_dap[plateifu] for plateifu in barred_plateifus])\n', (8916, 8975), True, 'import numpy as np\n'), ((9019, 9089), 'numpy.array', 'np.array', (['[ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus]'], {}), '([ind_dict_dap[plateifu] for plateifu in nonbarred_plateifus])\n', (9027, 9089), True, 'import numpy as np\n'), ((9874, 9913), 'numpy.argsort', 'np.argsort', (["self.barred_sample['NSA_Z']"], {}), "(self.barred_sample['NSA_Z'])\n", (9884, 9913), True, 'import numpy as np\n'), ((10005, 10047), 'numpy.argsort', 'np.argsort', (["self.nonbarred_sample['NSA_Z']"], {}), "(self.nonbarred_sample['NSA_Z'])\n", (10015, 10047), True, 'import numpy as np\n'), ((12389, 12428), 'numpy.argsort', 'np.argsort', (["self.barred_sample['NSA_Z']"], {}), "(self.barred_sample['NSA_Z'])\n", (12399, 12428), True, 'import numpy as np\n'), ((12607, 12649), 'numpy.argsort', 'np.argsort', (["self.nonbarred_sample['NSA_Z']"], {}), "(self.nonbarred_sample['NSA_Z'])\n", (12617, 12649), True, 'import numpy as np\n'), ((13071, 13179), 'astropy.table.Table', 'Table', (["(self.drp[self.barred_sample['DRPALLINDX']]['nsa_elpetro_absmag'] * u.mag + cor\n )"], {'names': 'color_keys'}), "(self.drp[self.barred_sample['DRPALLINDX']]['nsa_elpetro_absmag'] * u.\n mag + cor, names=color_keys)\n", (13076, 13179), False, 'from astropy.table import Table\n'), ((13277, 13387), 'astropy.table.Table', 'Table', (["(self.drp[self.nonbarred_sample['DRPALLINDX']]['nsa_elpetro_absmag'] * u.\n mag + cor)"], {'names': 'color_keys'}), "(self.drp[self.nonbarred_sample['DRPALLINDX']]['nsa_elpetro_absmag'] *\n u.mag + cor, names=color_keys)\n", (13282, 13387), False, 'from astropy.table import Table\n'), ((25213, 25263), 'marvin.tools.image.Image.from_list', 'Image.from_list', (["self.mass_jbh_mwa_dap['PLATEIFU']"], {}), "(self.mass_jbh_mwa_dap['PLATEIFU'])\n", (25228, 25263), False, 'from marvin.tools.image import Image\n'), ((25451, 25507), 'marvin.tools.image.Image.from_list', 'Image.from_list', (["self.mass_jbh_mwa_nobar_dap['PLATEIFU']"], {}), "(self.mass_jbh_mwa_nobar_dap['PLATEIFU'])\n", (25466, 25507), False, 'from marvin.tools.image import Image\n'), ((25720, 25766), 'marvin.tools.image.Image.from_list', 'Image.from_list', (["self.mass_mwa_dap['PLATEIFU']"], {}), "(self.mass_mwa_dap['PLATEIFU'])\n", (25735, 25766), False, 'from marvin.tools.image import Image\n'), ((25950, 26002), 'marvin.tools.image.Image.from_list', 'Image.from_list', (["self.mass_mwa_nobar_dap['PLATEIFU']"], {}), "(self.mass_mwa_nobar_dap['PLATEIFU'])\n", (25965, 26002), False, 'from marvin.tools.image import Image\n'), ((3955, 3984), 'astropy.table.Table.read', 'Table.read', (['self.filename_dap'], {}), '(self.filename_dap)\n', (3965, 3984), False, 'from astropy.table import Table\n'), ((6701, 6720), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (6710, 6720), True, 'import astropy.units as u\n'), ((6919, 6938), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (6928, 6938), True, 'import astropy.units as u\n'), ((9234, 9302), 'numpy.array', 'np.array', (['[(ind != bad_barred_ind) for ind in barred_sample_dap_ind]'], {}), '([(ind != bad_barred_ind) for ind in barred_sample_dap_ind])\n', (9242, 9302), True, 'import numpy as np\n'), ((9534, 9613), 'numpy.array', 'np.array', (['[(ind not in bad_nonbarred_inds) for ind in nonbarred_sample_dap_ind]'], {}), '([(ind not in bad_nonbarred_inds) for ind in nonbarred_sample_dap_ind])\n', (9542, 9613), True, 'import numpy as np\n'), ((10332, 10351), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (10341, 10351), True, 'import astropy.units as u\n'), ((10553, 10572), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (10562, 10572), True, 'import astropy.units as u\n'), ((11388, 11407), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (11397, 11407), True, 'import astropy.units as u\n'), ((11537, 11556), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (11546, 11556), True, 'import astropy.units as u\n'), ((11858, 11877), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (11867, 11877), True, 'import astropy.units as u\n'), ((12011, 12030), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (12020, 12030), True, 'import astropy.units as u\n'), ((13948, 13967), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (13957, 13967), True, 'import astropy.units as u\n'), ((14148, 14167), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (14157, 14167), True, 'import astropy.units as u\n'), ((15783, 15802), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (15792, 15802), True, 'import astropy.units as u\n'), ((15932, 15951), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (15941, 15951), True, 'import astropy.units as u\n'), ((7148, 7172), 'numpy.log10', 'np.log10', (['gal_mass.value'], {}), '(gal_mass.value)\n', (7156, 7172), True, 'import numpy as np\n'), ((7199, 7250), 'numpy.log10', 'np.log10', (['self.nonbarred_targets_stellar_mass.value'], {}), '(self.nonbarred_targets_stellar_mass.value)\n', (7207, 7250), True, 'import numpy as np\n'), ((7289, 7300), 'numpy.abs', 'np.abs', (['dif'], {}), '(dif)\n', (7295, 7300), True, 'import numpy as np\n'), ((4127, 4242), 'astropy.table.Table.read', 'Table.read', (['"""https://data.sdss.org/sas/dr15/manga/spectro/analysis/v2_4_3/2.2.1/dapall-v2_4_3-2.2.1.fits"""'], {}), "(\n 'https://data.sdss.org/sas/dr15/manga/spectro/analysis/v2_4_3/2.2.1/dapall-v2_4_3-2.2.1.fits'\n )\n", (4137, 4242), False, 'from astropy.table import Table\n'), ((5670, 5689), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (5679, 5689), True, 'import astropy.units as u\n'), ((12997, 13016), 'astropy.units.with_H0', 'u.with_H0', (['WMAP9.H0'], {}), '(WMAP9.H0)\n', (13006, 13016), True, 'import astropy.units as u\n')] |
import numpy as np
import cv2
import glob
import config
import random
from multiprocessing.pool import ThreadPool
LABEL_PATH ='./labels'
IMG_PATH = './imgs'
# add dynamic scaling and shifting later
SCALE_RANGE = [0.6, 1.4]
SHIFT_RANGE = [-200, 200]
def random_warp(img, label, scale_range, shift_range, imgsize):
scale = np.random.uniform(low=scale_range[0], high=scale_range[1])
trans_x = np.random.uniform(low=shift_range[0], high=shift_range[1])
trans_y = np.random.uniform(low=shift_range[0], high=shift_range[1])
H = np.float32([[scale, 0, trans_x*scale], [0, scale, trans_y*scale]])
dst = cv2.warpAffine(img, H, dsize=imgsize)
label = (label + np.float32([trans_x, trans_y, 0, 0]) ) * scale
return dst, label
def filter_label(label, category, segs, imgsize):
res = []
cats = []
ss = []
num_label = len(label)
for i in range(num_label):
buff = label[i]
# x1, y1, x2, y2 = buff[0] - buff[2]/2, buff[1] - buff[3]/2, buff[0] + buff[2]/2, buff[1] + buff[3]/2
# if not (x1<0 or x2>imgsize[0] or y1<0 or y2>imgsize[1]):
if not (buff[0]-0.5*buff[2]<0 or buff[0]+0.5*buff[2]>imgsize[0] or buff[1]-0.5*buff[3]<0 or buff[1]+0.5*buff[3]>imgsize[1]):
res.append(buff)
cats.append(category[i])
ss.append(segs[i])
res = np.float32(res)
return res, cats, ss
def compute_iou_simple(w1,h1,w2,h2):
h = min(h1,h2)
w = min(w1,w2)
i = h*w
u = w1*h1 + w2 * h2 - i
iou = i / (u+1e-5)
return iou
def parse_single_label(labelmap, maskmap, lab, category, seg):
w,h = lab[2], lab[3]
out_channel = len(config.anchor_shape) * len(config.anchor_scale)
# determine which anchor
shapes = config.anchor_shape
scales = config.anchor_scale
whs = []
for s in shapes:
for sc in scales:
whs.append([sc*np.sqrt(s), sc/np.sqrt(s)])
ious = np.float32([compute_iou_simple(w,h,w1,h1) for w1,h1 in whs])
idx = np.argmax(ious)
wh = whs[idx]
# determine dx,dy
x,y = int(lab[0]), int(lab[1])
stride = config.stride
col = x//stride
row = y//stride
dx = x - col * stride - 0.5 * stride
dy = y - row * stride - 0.5 * stride
# determine dw, dh
dw = w / wh[0]
dh = h / wh[1]
dw = np.log(dw)
dh = np.log(dh)
xywh_idx = out_channel * 1 + idx*4
# determine category (class)
category_idx = out_channel * 5 + idx*config.categories + category
# determine segmentation (instance)
seg_idx = out_channel * (5 + config.categories) + config.seg_size * idx
# assign label map
labelmap[row, col, idx] = 1
labelmap[row, col, xywh_idx:xywh_idx+4] = np.float32([dx,dy,dw,dh])
labelmap[row, col, category_idx] = 1
labelmap[row, col, seg_idx:seg_idx+config.seg_size] = seg
# assign mask map
maskmap[row, col, :out_channel] = 1
maskmap[row, col, xywh_idx:xywh_idx+4] = 1
maskmap[row, col, out_channel*5+idx*config.categories : out_channel*5+(idx+1)*config.categories] = 1
maskmap[row, col, seg_idx:seg_idx+config.seg_size] = 1
def parse_label(labelmap, maskmap, lab, categories, segs):
for i in range(len(lab)):
parse_single_label(labelmap, maskmap, lab[i], categories[i], segs[i])
def process(batch, label_size):
imgs = []
out_channel = len(config.anchor_shape) * len(config.anchor_scale) * ( 5 + config.categories + config.seg_size)
labs = np.zeros([len(batch), label_size[0], label_size[1], out_channel]).astype(np.float32)
masks = np.zeros([len(batch), label_size[0], label_size[1], out_channel]).astype(np.float32)
for i, (fname, lab, category, segs) in enumerate(batch):
# print(fname)
img = cv2.imread(fname, 1)
lab = np.float32(lab)
if img.shape[0]>1000 and img.shape[1]>1000:
img = cv2.resize(img, None, fx=0.5, fy=0.5)
lab = lab * 0.5
lab_ = []
while len(lab_)==0:
img_, lab_ = random_warp(img, lab, SCALE_RANGE, SHIFT_RANGE, (512,512))
lab_, category_, segs_ = filter_label(lab_, category, segs, config.image_size)
imgs.append(img_)
parse_label(labs[i], masks[i], lab_, category_, segs_)
batch = [np.float32(imgs), labs, masks]
return batch
def sigmoid(x):
sigm = 1. / (1. + np.exp(-x))
return sigm
def visualize(img, label, sig=False):
img = np.uint8(img)
segs = np.zeros([img.shape[0], img.shape[1]]).astype(np.uint8)
out_channel = len(config.anchor_shape) * len(config.anchor_scale)
shapes = config.anchor_shape
scales = config.anchor_scale
stride = config.stride
whs = []
for s in shapes:
for sc in scales:
whs.append([sc*np.sqrt(s), sc/np.sqrt(s)])
shape_label = label.shape
for r in range(shape_label[0]):
for c in range(shape_label[1]):
for idx in range(out_channel):
if label[r,c,idx] > 0:
xywh = label[r,c,out_channel+idx*4:out_channel+idx*4+4]
seg = label[r,c, out_channel*5+idx*config.seg_size : out_channel*5+(idx+1)*config.seg_size]
if sig:
seg = sigmoid(seg)
seg = seg.reshape([8,8]) * 255
seg = np.uint8(seg)
x = c*stride + 0.5*stride + xywh[0]
y = r*stride + 0.5*stride + xywh[1]
w = np.exp(xywh[2]) * whs[idx][0]
h = np.exp(xywh[3]) * whs[idx][1]
x1,y1,x2,y2 = int(x-0.5*w) , int(y-0.5*h), int(x+0.5*w), int(y+0.5*h)
cv2.rectangle(img, (x1,y1), (x2,y2), (0,255,0), 2)
seg = cv2.resize(seg, (int(x2-x1),int(y2-y1)))
# print(seg.shape)
# print(segs.shape)
# print(y1, y2, x1,x2)
if y1<0 or x1<0 or y2>config.image_size[0] or x2>config.image_size[1]:
continue
segs[y1:y2, x1:x2] = np.amax(np.stack([seg, segs[y1:y2, x1:x2]], axis=-1), axis=-1)
return img, segs
class DataReader():
def __init__(self, bsize):
print('Loading meta files...')
self.data = []
for i in glob.glob(LABEL_PATH + '/*.txt'):
i = i.replace('\\','/')
# name = i.split('/')[-1]
name = i.replace(LABEL_PATH,IMG_PATH).replace('.txt','')
label, classes, seg = self.read_label(i)
self.data.append([name, label, classes, seg])
self.pool = ThreadPool(processes=1)
self.bsize = bsize
print('Meta files loaded.')
self.pre_fetch()
def read_label(self, file):
res = []
classes = []
segs = []
f = open(file)
for i in f:
i = i.strip()
i = i.split('\t')
bbox = [float(_) for _ in i[:4]]
classes.append(int(i[4]))
res.append(bbox)
seg = [float(_) for _ in i[5:]]
segs.append(seg)
assert len(seg)==config.seg_size
res = np.float32(res)
return res, classes, segs
def pre_fetch(self):
batch = random.sample(self.data, self.bsize)
self.p = self.pool.apply_async(process, args=(batch, config.output_shape))
def get_next(self):
batch = self.p.get()
self.pre_fetch()
return batch
if __name__=='__main__':
reader = DataReader(1)
img, label, mask = reader.get_next()
print(img.shape)
print(label.shape)
img = img[0]
label = label[0]
img = visualize(img, label)
cv2.imshow('a', img)
cv2.waitKey(0)
| [
"numpy.stack",
"numpy.random.uniform",
"numpy.uint8",
"numpy.log",
"multiprocessing.pool.ThreadPool",
"numpy.argmax",
"cv2.waitKey",
"random.sample",
"numpy.float32",
"numpy.zeros",
"cv2.rectangle",
"cv2.warpAffine",
"cv2.imread",
"numpy.exp",
"glob.glob",
"cv2.imshow",
"cv2.resize",... | [((328, 386), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'scale_range[0]', 'high': 'scale_range[1]'}), '(low=scale_range[0], high=scale_range[1])\n', (345, 386), True, 'import numpy as np\n'), ((398, 456), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'shift_range[0]', 'high': 'shift_range[1]'}), '(low=shift_range[0], high=shift_range[1])\n', (415, 456), True, 'import numpy as np\n'), ((468, 526), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'shift_range[0]', 'high': 'shift_range[1]'}), '(low=shift_range[0], high=shift_range[1])\n', (485, 526), True, 'import numpy as np\n'), ((532, 602), 'numpy.float32', 'np.float32', (['[[scale, 0, trans_x * scale], [0, scale, trans_y * scale]]'], {}), '([[scale, 0, trans_x * scale], [0, scale, trans_y * scale]])\n', (542, 602), True, 'import numpy as np\n'), ((606, 643), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'H'], {'dsize': 'imgsize'}), '(img, H, dsize=imgsize)\n', (620, 643), False, 'import cv2\n'), ((1249, 1264), 'numpy.float32', 'np.float32', (['res'], {}), '(res)\n', (1259, 1264), True, 'import numpy as np\n'), ((1835, 1850), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (1844, 1850), True, 'import numpy as np\n'), ((2111, 2121), 'numpy.log', 'np.log', (['dw'], {}), '(dw)\n', (2117, 2121), True, 'import numpy as np\n'), ((2128, 2138), 'numpy.log', 'np.log', (['dh'], {}), '(dh)\n', (2134, 2138), True, 'import numpy as np\n'), ((2478, 2506), 'numpy.float32', 'np.float32', (['[dx, dy, dw, dh]'], {}), '([dx, dy, dw, dh])\n', (2488, 2506), True, 'import numpy as np\n'), ((4034, 4047), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (4042, 4047), True, 'import numpy as np\n'), ((6630, 6650), 'cv2.imshow', 'cv2.imshow', (['"""a"""', 'img'], {}), "('a', img)\n", (6640, 6650), False, 'import cv2\n'), ((6652, 6666), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6663, 6666), False, 'import cv2\n'), ((3447, 3467), 'cv2.imread', 'cv2.imread', (['fname', '(1)'], {}), '(fname, 1)\n', (3457, 3467), False, 'import cv2\n'), ((3476, 3491), 'numpy.float32', 'np.float32', (['lab'], {}), '(lab)\n', (3486, 3491), True, 'import numpy as np\n'), ((3882, 3898), 'numpy.float32', 'np.float32', (['imgs'], {}), '(imgs)\n', (3892, 3898), True, 'import numpy as np\n'), ((5499, 5531), 'glob.glob', 'glob.glob', (["(LABEL_PATH + '/*.txt')"], {}), "(LABEL_PATH + '/*.txt')\n", (5508, 5531), False, 'import glob\n'), ((5757, 5780), 'multiprocessing.pool.ThreadPool', 'ThreadPool', ([], {'processes': '(1)'}), '(processes=1)\n', (5767, 5780), False, 'from multiprocessing.pool import ThreadPool\n'), ((6172, 6187), 'numpy.float32', 'np.float32', (['res'], {}), '(res)\n', (6182, 6187), True, 'import numpy as np\n'), ((6249, 6285), 'random.sample', 'random.sample', (['self.data', 'self.bsize'], {}), '(self.data, self.bsize)\n', (6262, 6285), False, 'import random\n'), ((663, 699), 'numpy.float32', 'np.float32', (['[trans_x, trans_y, 0, 0]'], {}), '([trans_x, trans_y, 0, 0])\n', (673, 699), True, 'import numpy as np\n'), ((3547, 3584), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': '(0.5)', 'fy': '(0.5)'}), '(img, None, fx=0.5, fy=0.5)\n', (3557, 3584), False, 'import cv2\n'), ((3963, 3973), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3969, 3973), True, 'import numpy as np\n'), ((4056, 4094), 'numpy.zeros', 'np.zeros', (['[img.shape[0], img.shape[1]]'], {}), '([img.shape[0], img.shape[1]])\n', (4064, 4094), True, 'import numpy as np\n'), ((4755, 4768), 'numpy.uint8', 'np.uint8', (['seg'], {}), '(seg)\n', (4763, 4768), True, 'import numpy as np\n'), ((5010, 5064), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (5023, 5064), False, 'import cv2\n'), ((1731, 1741), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (1738, 1741), True, 'import numpy as np\n'), ((1746, 1756), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (1753, 1756), True, 'import numpy as np\n'), ((4329, 4339), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (4336, 4339), True, 'import numpy as np\n'), ((4344, 4354), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (4351, 4354), True, 'import numpy as np\n'), ((4861, 4876), 'numpy.exp', 'np.exp', (['xywh[2]'], {}), '(xywh[2])\n', (4867, 4876), True, 'import numpy as np\n'), ((4900, 4915), 'numpy.exp', 'np.exp', (['xywh[3]'], {}), '(xywh[3])\n', (4906, 4915), True, 'import numpy as np\n'), ((5316, 5360), 'numpy.stack', 'np.stack', (['[seg, segs[y1:y2, x1:x2]]'], {'axis': '(-1)'}), '([seg, segs[y1:y2, x1:x2]], axis=-1)\n', (5324, 5360), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
def eval(x):
if x > 0.21111 and x < 0.3232:
return True
if x > 0.4957 and x < 0.9191:
return True
return False
N=[100,500,1000]
fig = plt.figure(figsize=(20,5))
for i in range(3):
x = np.array([])
d = np.random.rand(N[i])
for j in range(N[i]):
if eval(d[j]):
x=np.hstack((x,d[j]))
y = np.array([1/N[i] for k in range(len(x))])
e = np.array([1/N[i] for k in range(len(d))])
print(len(x)/len(d))
ax = fig.add_subplot(1,3,i+1)
ax.scatter(x=d,y=e,marker='o',c='r')
ax.scatter(x=x,y=y,marker='d',c='g')
ax.set_title('Scatter: N=%i' %N[i])
ax.set_xlabel('$x$')
ax.set_ylabel('$Prob.$=%f' %(1/N[i]))
plt.show()
fig.savefig('Naive.png',bbox_inches='tight')
| [
"matplotlib.pyplot.show",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.random.rand"
] | [((227, 254), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (237, 254), True, 'import matplotlib.pyplot as plt\n'), ((757, 767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (765, 767), True, 'import matplotlib.pyplot as plt\n'), ((281, 293), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (289, 293), True, 'import numpy as np\n'), ((302, 322), 'numpy.random.rand', 'np.random.rand', (['N[i]'], {}), '(N[i])\n', (316, 322), True, 'import numpy as np\n'), ((386, 406), 'numpy.hstack', 'np.hstack', (['(x, d[j])'], {}), '((x, d[j]))\n', (395, 406), True, 'import numpy as np\n')] |
from . import utils
from .klines import KLines
import numpy as np
class XLines(object):
def __init__(self, candidates, alpha0=0., init_alpha="one", max_iter=10, tol=0.001,
clustering_n_init=3, clustering_init="estimate", metric="silhouette",
verbose=0):
"""
Initialize a XLines object
Parameters
---
candidates : array-like, list of candidate k for which we will try to fit a KLines
alpha0 : float, initial value for alpha if init_alpha is None or "update"
init_alpha : {None, "one", "all", "update"}, if not None test a few alpha0 and
initialize the iterations with the best one.
If "one", this step is done only for the first candidate and later
ones use the resulting orientation as alpha0.
If "update", first candidate uses alpha0 and later ones use the
updated alpha0.
max_iter : int or array-like, maximum number of iteration
tol : float, tolerance to stop convergence
clustering_n_init : int, number of times the KMeans algorithms will
be run with different centroid seeds (n_init for KMeans)
clustering_init : {'estimate' or 'k-means++'}, initialization method
for KMeans. 'k-means++' is the standard for KMeans while 'estimate'
will use the centroids from the previous step
metric : ther metric to use for scoring, either 'silhouette' or 'CH'
Attributes
---
best_k_ : best candidate for the number of lines
best_model_ : the best KLines model
scores_ : a list of each candidate model scores
"""
self.candidates = candidates
self.n_candidates = len(candidates)
self.alpha0 = alpha0
self.init_alpha = init_alpha
self.max_iter = max_iter
self.tol = tol
self.clustering_n_init = clustering_n_init
self.clustering_init = clustering_init
self.metric = metric
self.verbose = verbose
self.best_k_ = None
self.best_model_ = None
self.scores_ = None
self._alpha0 = self.alpha0
# check inputs
if not(metric in ["silhouette", "CH"]):
raise ValueError("metric should be 'silhouette' or 'CH'")
if not(self.clustering_init in ["k-means++", "estimate"]):
raise ValueError("clustering_init should be either 'k-means++' or 'estimate' - got {} instead".format(clustering_init))
def fit(self, X):
"""
Fitting algorithm: fit a KLines on X for each candidate k
Parameters
---
X : array-like, shape (n_samples, n_features)
Returns
---
best_model : a fitted KLines model that corresponds to the highest score
best_k : the candidate k yielding the best model
"""
self.scores_ = []
sub_verbose = max(0, self.verbose-1)
models = {}
# alpha_initializations
if self.init_alpha is None or self.init_alpha == "update":
alpha_initializations = [False] * self.n_candidates
elif self.init_alpha == "one":
alpha_initializations = [True] + [False] * (self.n_candidates - 1)
elif self.init_alpha == "all":
alpha_initializations = [True] * self.n_candidates
else:
raise ValueError("init_alpha must be None, 'one', 'all' or 'update' - got {} instead".format(self.init_alpha))
# update_alpha0 after the first candidate if necessary
if self.init_alpha == "one" or self.init_alpha == "update":
update_alpha0 = True
else:
update_alpha0 = False
# maximum number of iterations for each candidate
if type(self.max_iter) is int:
max_iterations = [self.max_iter] * self.n_candidates
elif len(self.max_iter) != self.n_candidates:
raise ValueError("max_iter should be an integer or a list of the same size of the number of candidates")
else:
max_iterations = self.max_iter
# fit a KLines model for each candidate k
for idx, k in enumerate(self.candidates):
if self.verbose > 1:
print(("-Test {} components".format(k)))
model = KLines(k,
alpha0=self._alpha0, init_alpha=alpha_initializations[idx],
max_iter=max_iterations[idx], tol=self.tol,
clustering_n_init=self.clustering_n_init, clustering_init=self.clustering_init,
metric=self.metric, verbose=sub_verbose
)
model.fit(X)
models[k] = model
self.scores_.append(model.score())
# update_alpha0 after the first candidate if necessary
if update_alpha0:
self._alpha0 = model.alpha_
update_alpha0 = False
# select best model
self.best_k_ = self.candidates[np.argmax(self.scores_)]
self.best_model_ = models[self.best_k_]
if self.verbose:
print("-Results:")
print(("Candidate scores: {}".format(self.scores_)))
print(("Best model with {} components".format(self.best_k_)))
print(("Best model with orientation: {:.2f}".format(utils.rad2deg(self.best_model_.alpha_))))
return self.best_model_, self.best_k_
def predict(self, X):
"""
Predict the closest cluster each sample in X belongs to
Parameters
---
X : array-like, shape (n_samples, n_features)
Returns
---
labels : array, index of the cluster each sample belongs to
"""
if self.best_model_ is None:
raise RuntimeError("Model needs to be fitted first")
return self.best_model_.predict(X)
| [
"numpy.argmax"
] | [((4988, 5011), 'numpy.argmax', 'np.argmax', (['self.scores_'], {}), '(self.scores_)\n', (4997, 5011), True, 'import numpy as np\n')] |
import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
_num_samples = 41
_num_embeddings = 11
_sequence_length = 3
# Sample access functions
def get_sample(index):
np.random.seed(2019101500+index)
return np.random.randint(_num_embeddings, size=_sequence_length)
def num_samples():
return _num_samples
def sample_dims():
return (_sequence_length,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = num_samples() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
x = lbann.Identity(lbann.Input())
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# No padding index
# ------------------------------------------
# Embeddings
np.random.seed(20191015)
embedding_dim = 5
embeddings = np.random.normal(size=(_num_embeddings,embedding_dim))
# LBANN implementation
embedding_weights = lbann.Weights(
optimizer=lbann.SGD(),
initializer=lbann.ValueInitializer(values=tools.str_list(np.nditer(embeddings)))
)
x = x_lbann
y = lbann.Embedding(x,
weights=embedding_weights,
num_embeddings=_num_embeddings,
embedding_dim=embedding_dim)
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='no padding index'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i)
y = embeddings[x,:]
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Padding index 0
# ------------------------------------------
# Embeddings
np.random.seed(201910152)
embedding_dim = 7
padding_idx = 0
embeddings = np.random.normal(size=(_num_embeddings,embedding_dim))
# LBANN implementation
# Note: Embedding layer gradients are not exact if a padding index
# is set. Avoid gradient checking by not using an optimizer.
embedding_weights = lbann.Weights(
optimizer=None,
initializer=lbann.ValueInitializer(values=tools.str_list(np.nditer(embeddings)))
)
x = x_lbann
y = lbann.Embedding(x,
weights=embedding_weights,
num_embeddings=_num_embeddings,
embedding_dim=embedding_dim,
padding_idx=padding_idx)
z = lbann.L2Norm2(y)
metrics.append(lbann.Metric(z, name='padding index = 0'))
# NumPy implementation
vals = []
for i in range(num_samples()):
x = get_sample(i)
y = np.where((x==padding_idx).reshape((-1,1)), 0, embeddings[x,:])
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
# Construct model
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func
| [
"tools.create_python_data_reader",
"numpy.random.seed",
"os.path.realpath",
"os.path.dirname",
"tools.create_tests",
"numpy.nditer",
"numpy.finfo",
"numpy.random.randint",
"numpy.mean",
"numpy.random.normal",
"tools.numpy_l2norm2"
] | [((123, 149), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (139, 149), False, 'import os\n'), ((164, 193), 'os.path.dirname', 'os.path.dirname', (['current_file'], {}), '(current_file)\n', (179, 193), False, 'import os\n'), ((6033, 6079), 'tools.create_tests', 'tools.create_tests', (['setup_experiment', '__file__'], {}), '(setup_experiment, __file__)\n', (6051, 6079), False, 'import tools\n'), ((650, 684), 'numpy.random.seed', 'np.random.seed', (['(2019101500 + index)'], {}), '(2019101500 + index)\n', (664, 684), True, 'import numpy as np\n'), ((694, 751), 'numpy.random.randint', 'np.random.randint', (['_num_embeddings'], {'size': '_sequence_length'}), '(_num_embeddings, size=_sequence_length)\n', (711, 751), True, 'import numpy as np\n'), ((1797, 1821), 'numpy.random.seed', 'np.random.seed', (['(20191015)'], {}), '(20191015)\n', (1811, 1821), True, 'import numpy as np\n'), ((1861, 1916), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(_num_embeddings, embedding_dim)'}), '(size=(_num_embeddings, embedding_dim))\n', (1877, 1916), True, 'import numpy as np\n'), ((2615, 2628), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (2622, 2628), True, 'import numpy as np\n'), ((3020, 3045), 'numpy.random.seed', 'np.random.seed', (['(201910152)'], {}), '(201910152)\n', (3034, 3045), True, 'import numpy as np\n'), ((3105, 3160), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(_num_embeddings, embedding_dim)'}), '(size=(_num_embeddings, embedding_dim))\n', (3121, 3160), True, 'import numpy as np\n'), ((4067, 4080), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (4074, 4080), True, 'import numpy as np\n'), ((226, 254), 'os.path.dirname', 'os.path.dirname', (['current_dir'], {}), '(current_dir)\n', (241, 254), False, 'import os\n'), ((2559, 2581), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (2578, 2581), False, 'import tools\n'), ((4011, 4033), 'tools.numpy_l2norm2', 'tools.numpy_l2norm2', (['y'], {}), '(y)\n', (4030, 4033), False, 'import tools\n'), ((2649, 2669), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2657, 2669), True, 'import numpy as np\n'), ((4101, 4121), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (4109, 4121), True, 'import numpy as np\n'), ((5402, 5511), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""train"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'train')\n", (5433, 5511), False, 'import tools\n'), ((5633, 5741), 'tools.create_python_data_reader', 'tools.create_python_data_reader', (['lbann', 'current_file', '"""get_sample"""', '"""num_samples"""', '"""sample_dims"""', '"""test"""'], {}), "(lbann, current_file, 'get_sample',\n 'num_samples', 'sample_dims', 'test')\n", (5664, 5741), False, 'import tools\n'), ((2079, 2100), 'numpy.nditer', 'np.nditer', (['embeddings'], {}), '(embeddings)\n', (2088, 2100), True, 'import numpy as np\n'), ((3452, 3473), 'numpy.nditer', 'np.nditer', (['embeddings'], {}), '(embeddings)\n', (3461, 3473), True, 'import numpy as np\n')] |
# <NAME>
import argparse, sys, os
import numpy as np
import pylab as plt
from glob import glob
from spectral.io import envi
from scipy.stats import norm
from scipy.linalg import solve, inv
from astropy import modeling
from sklearn.linear_model import RANSACRegressor
from skimage.filters import threshold_otsu
from numpy import nanmedian
import json
from numba import jit
from lowess import lowess
from PIL import Image
def find_header(infile):
if os.path.exists(infile+'.hdr'):
return infile+'.hdr'
elif os.path.exists('.'.join(infile.split('.')[:-1])+'.hdr'):
return '.'.join(infile.split('.')[:-1])+'.hdr'
else:
raise FileNotFoundError('Did not find header file')
def moving_average(x, w=5):
return np.convolve(x, np.ones(w), 'same') / w
def polymax(y, plot=False):
series = moving_average(y)
ctr = np.argmax(series)
halfwid = 16
segment = y[max(0,ctr-halfwid):min(ctr+halfwid+1,len(y)-1)]
x = np.arange(len(segment))
p = np.polyfit(x,segment,6)
noisefree = np.polyval(p,x)
if plot:
plt.plot(x, segment, 'ko')
plt.plot(x, noisefree, 'r')
plt.show()
return noisefree.max(), np.std(noisefree-segment)
# Reference columns of the focal plane array used for
# radiometric calibration. Avoid the center (due to
# symmetric ghosting) and avoid the divot from 1015-1035.
reference_cols = np.concatenate((np.arange(140,340),
np.arange(940,1015),
np.arange(1035,1140)),axis=0)
def main():
description = "Calculate Flat field"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('input')
parser.add_argument('--cue_channel',default=50,type=int)
parser.add_argument('--background',type=str)
parser.add_argument('--config',type=str)
parser.add_argument('--mask_image',type=str,default=None)
parser.add_argument('output')
args = parser.parse_args()
infile = envi.open(find_header(args.input))
if int(infile.metadata['data type']) == 2:
dtype = np.uint16
elif int(infile.metadata['data type']) == 4:
dtype = np.float32
else:
raise ValueError('Unsupported data type')
if infile.metadata['interleave'] != 'bil':
raise ValueError('Unsupported interleave')
rows = int(infile.metadata['bands'])
columns = int(infile.metadata['samples'])
lines = int(infile.metadata['lines'])
nframe = rows * columns
margin=2
meta = {'interleave':'bsq', 'data type':4}
flat = np.ones((rows,columns)) * -9999
noise = np.ones((rows,columns)) * -9999
DN_average, DN_noise = [],[]
if args.mask_image is not None:
mask = np.asarray(Image.open(args.mask_image))
print(mask.shape)
if len(mask.shape)>2:
mask = mask.max(axis=2)
if mask.shape[0] != lines or mask.shape[1] != columns:
raise IndexError('mask does not match image')
n = max(mask.sum(axis=0))
foreground = np.zeros((rows,columns))
foreground_counts = np.zeros((rows,columns))
foreground_sq = np.zeros((rows,columns))
background = np.zeros((rows,columns))
background_counts = np.zeros((rows,columns))
background_sq = np.zeros((rows,columns))
with open(args.input,'rb') as fin:
# Accumulate n brightest observations of the source
for line in range(lines):
frame = np.fromfile(fin, count=nframe, dtype=dtype)
frame = np.array(frame.reshape((rows, columns)),dtype=np.float32)
use = np.where(mask[line,:]>0)
if len(use)<1:
continue
foreground[:,use] = foreground[:,use] + frame[:,use]
foreground_sq[:,use] = foreground_sq[:,use] + pow(frame[:,use],2)
foreground_counts[:,use] = foreground_counts[:,use] + 1
foreground = foreground / foreground_counts
foreground_sq = foreground_sq / foreground_counts
if args.background is not None:
with open(args.background,'rb') as fin:
for line in range(lines):
frame = np.fromfile(fin, count=nframe, dtype=dtype)
frame = np.array(frame.reshape((rows, columns)),dtype=np.float32)
use = np.where(mask[line,:]>0)
if len(use)<1:
continue
background[:,use] = background[:,use] + frame[:,use]
background_sq[:,use] = background_sq[:,use] + pow(frame[:,use],2)
background_counts[:,use] = background_counts[:,use] + 1
background = background / background_counts
background_sq = background_sq / background_counts
foreground_sd = np.sqrt(foreground_sq + pow(foreground,2))
background_sd = np.sqrt(background_sq + pow(background,2))
for row in range(rows):
flat[row,:] = foreground[row,:] - background[row,:]
noise[row,:] = np.sqrt(pow(foreground_sd[row,:],2)-pow(background_sd[row,:],2))
ref = nanmedian(flat[row, reference_cols])
ref_noise = nanmedian(noise[row, reference_cols])
print('row',row,'reference average is',ref)
flat[row,:] = ref / flat[row,:]
DN_average.append(ref)
DN_noise.append(ref_noise)
else:
foreground = np.ones((lines,rows,columns))
background = np.ones((lines,rows,columns))
with open(args.input,'rb') as fin:
# Accumulate n brightest observations of the source
for line in range(lines):
frame = np.fromfile(fin, count=nframe, dtype=dtype)
frame = np.array(frame.reshape((rows, columns)),dtype=np.float32)
foreground[line,:,:] = frame
if args.background is not None:
with open(args.background,'rb') as fin:
# Accumulate n brightest observations of the background
for line in range(lines):
bg = np.fromfile(fin, count=nframe, dtype=dtype)
bg = np.array(bg.reshape((rows, columns)), dtype=np.float32)
background[line,:,:] = bg
for row in range(rows):
for col in range(columns):
y = np.squeeze(foreground[:,row,col])
bg_y = np.squeeze(background[:,row,col])
fg, resid_fg = polymax(y,plot=False)#(row==150 and col==200))
bg, resid_bg = polymax(bg_y,plot=False)#(row==150 and col==200))
flat[row,col] = fg - bg
noise[row,col] = resid_fg
ref = nanmedian(flat[row, reference_cols])
ref_noise = nanmedian(noise[row, reference_cols])
print('row',row,'reference average is',ref)
flat[row,:] = ref / flat[row,:]
DN_average.append(ref)
DN_noise.append(ref_noise)
flat[np.logical_not(np.isfinite(flat))] = -9999
meta['average_DNs'] = np.array(DN_average)
meta['stdev_DNs'] = np.array(DN_noise)
envi.save_image(args.output+'.hdr',np.array(flat,dtype=np.float32),
metadata=meta,ext='',force=True)
if __name__ == '__main__':
main()
| [
"pylab.show",
"argparse.ArgumentParser",
"numpy.nanmedian",
"numpy.polyfit",
"numpy.polyval",
"numpy.argmax",
"numpy.std",
"numpy.fromfile",
"os.path.exists",
"numpy.ones",
"numpy.zeros",
"PIL.Image.open",
"numpy.isfinite",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.squeeze... | [((452, 483), 'os.path.exists', 'os.path.exists', (["(infile + '.hdr')"], {}), "(infile + '.hdr')\n", (466, 483), False, 'import argparse, sys, os\n'), ((840, 857), 'numpy.argmax', 'np.argmax', (['series'], {}), '(series)\n', (849, 857), True, 'import numpy as np\n'), ((979, 1004), 'numpy.polyfit', 'np.polyfit', (['x', 'segment', '(6)'], {}), '(x, segment, 6)\n', (989, 1004), True, 'import numpy as np\n'), ((1019, 1035), 'numpy.polyval', 'np.polyval', (['p', 'x'], {}), '(p, x)\n', (1029, 1035), True, 'import numpy as np\n'), ((1591, 1639), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (1614, 1639), False, 'import argparse, sys, os\n'), ((7161, 7181), 'numpy.array', 'np.array', (['DN_average'], {}), '(DN_average)\n', (7169, 7181), True, 'import numpy as np\n'), ((7206, 7224), 'numpy.array', 'np.array', (['DN_noise'], {}), '(DN_noise)\n', (7214, 7224), True, 'import numpy as np\n'), ((1056, 1082), 'pylab.plot', 'plt.plot', (['x', 'segment', '"""ko"""'], {}), "(x, segment, 'ko')\n", (1064, 1082), True, 'import pylab as plt\n'), ((1091, 1118), 'pylab.plot', 'plt.plot', (['x', 'noisefree', '"""r"""'], {}), "(x, noisefree, 'r')\n", (1099, 1118), True, 'import pylab as plt\n'), ((1127, 1137), 'pylab.show', 'plt.show', ([], {}), '()\n', (1135, 1137), True, 'import pylab as plt\n'), ((1166, 1193), 'numpy.std', 'np.std', (['(noisefree - segment)'], {}), '(noisefree - segment)\n', (1172, 1193), True, 'import numpy as np\n'), ((1393, 1412), 'numpy.arange', 'np.arange', (['(140)', '(340)'], {}), '(140, 340)\n', (1402, 1412), True, 'import numpy as np\n'), ((1441, 1461), 'numpy.arange', 'np.arange', (['(940)', '(1015)'], {}), '(940, 1015)\n', (1450, 1461), True, 'import numpy as np\n'), ((1490, 1511), 'numpy.arange', 'np.arange', (['(1035)', '(1140)'], {}), '(1035, 1140)\n', (1499, 1511), True, 'import numpy as np\n'), ((2551, 2575), 'numpy.ones', 'np.ones', (['(rows, columns)'], {}), '((rows, columns))\n', (2558, 2575), True, 'import numpy as np\n'), ((2595, 2619), 'numpy.ones', 'np.ones', (['(rows, columns)'], {}), '((rows, columns))\n', (2602, 2619), True, 'import numpy as np\n'), ((3024, 3049), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3032, 3049), True, 'import numpy as np\n'), ((3077, 3102), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3085, 3102), True, 'import numpy as np\n'), ((3126, 3151), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3134, 3151), True, 'import numpy as np\n'), ((3172, 3197), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3180, 3197), True, 'import numpy as np\n'), ((3225, 3250), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3233, 3250), True, 'import numpy as np\n'), ((3274, 3299), 'numpy.zeros', 'np.zeros', (['(rows, columns)'], {}), '((rows, columns))\n', (3282, 3299), True, 'import numpy as np\n'), ((5496, 5527), 'numpy.ones', 'np.ones', (['(lines, rows, columns)'], {}), '((lines, rows, columns))\n', (5503, 5527), True, 'import numpy as np\n'), ((5547, 5578), 'numpy.ones', 'np.ones', (['(lines, rows, columns)'], {}), '((lines, rows, columns))\n', (5554, 5578), True, 'import numpy as np\n'), ((7264, 7296), 'numpy.array', 'np.array', (['flat'], {'dtype': 'np.float32'}), '(flat, dtype=np.float32)\n', (7272, 7296), True, 'import numpy as np\n'), ((745, 755), 'numpy.ones', 'np.ones', (['w'], {}), '(w)\n', (752, 755), True, 'import numpy as np\n'), ((2725, 2752), 'PIL.Image.open', 'Image.open', (['args.mask_image'], {}), '(args.mask_image)\n', (2735, 2752), False, 'from PIL import Image\n'), ((5194, 5230), 'numpy.nanmedian', 'nanmedian', (['flat[row, reference_cols]'], {}), '(flat[row, reference_cols])\n', (5203, 5230), False, 'from numpy import nanmedian\n'), ((5254, 5291), 'numpy.nanmedian', 'nanmedian', (['noise[row, reference_cols]'], {}), '(noise[row, reference_cols])\n', (5263, 5291), False, 'from numpy import nanmedian\n'), ((6813, 6849), 'numpy.nanmedian', 'nanmedian', (['flat[row, reference_cols]'], {}), '(flat[row, reference_cols])\n', (6822, 6849), False, 'from numpy import nanmedian\n'), ((6873, 6910), 'numpy.nanmedian', 'nanmedian', (['noise[row, reference_cols]'], {}), '(noise[row, reference_cols])\n', (6882, 6910), False, 'from numpy import nanmedian\n'), ((7107, 7124), 'numpy.isfinite', 'np.isfinite', (['flat'], {}), '(flat)\n', (7118, 7124), True, 'import numpy as np\n'), ((3478, 3521), 'numpy.fromfile', 'np.fromfile', (['fin'], {'count': 'nframe', 'dtype': 'dtype'}), '(fin, count=nframe, dtype=dtype)\n', (3489, 3521), True, 'import numpy as np\n'), ((3626, 3653), 'numpy.where', 'np.where', (['(mask[line, :] > 0)'], {}), '(mask[line, :] > 0)\n', (3634, 3653), True, 'import numpy as np\n'), ((5755, 5798), 'numpy.fromfile', 'np.fromfile', (['fin'], {'count': 'nframe', 'dtype': 'dtype'}), '(fin, count=nframe, dtype=dtype)\n', (5766, 5798), True, 'import numpy as np\n'), ((6460, 6495), 'numpy.squeeze', 'np.squeeze', (['foreground[:, row, col]'], {}), '(foreground[:, row, col])\n', (6470, 6495), True, 'import numpy as np\n'), ((6516, 6551), 'numpy.squeeze', 'np.squeeze', (['background[:, row, col]'], {}), '(background[:, row, col])\n', (6526, 6551), True, 'import numpy as np\n'), ((4221, 4264), 'numpy.fromfile', 'np.fromfile', (['fin'], {'count': 'nframe', 'dtype': 'dtype'}), '(fin, count=nframe, dtype=dtype)\n', (4232, 4264), True, 'import numpy as np\n'), ((4377, 4404), 'numpy.where', 'np.where', (['(mask[line, :] > 0)'], {}), '(mask[line, :] > 0)\n', (4385, 4404), True, 'import numpy as np\n'), ((6182, 6225), 'numpy.fromfile', 'np.fromfile', (['fin'], {'count': 'nframe', 'dtype': 'dtype'}), '(fin, count=nframe, dtype=dtype)\n', (6193, 6225), True, 'import numpy as np\n')] |
import cv2
import csv
import torch
import numpy as np
from random import random, shuffle, randint
from torch.utils.data import DataLoader, Dataset
from django.db.models import Sum, Count
from django.db.models import Q
from data_labelling.models import DepthImage, FRONT_NORMAL_CAMERA_TYPE_V2, FRONT_ZOOM_CAMERA_TYPE_V2
torch.set_num_threads(5)
class DepthDataset(Dataset) :
"""
Implementation of Pytorch way of loading data in parallel instead of a single threaded training loop
"""
def __init__(self, H, W) :
self.qs = DepthImage.objects.filter(Q(camera_type=FRONT_NORMAL_CAMERA_TYPE_V2) | Q(camera_type=FRONT_ZOOM_CAMERA_TYPE_V2))
self.length = self.qs.count()
self.image_labels = list(self.qs)
self.H = H
self.W = W
self.DISTANCE_UNIT = 10 # 10 m = 1 unit
@property
def writer(self):
return csv.writer(open('offending_image_labels.csv'), delimiter=',')
def __len__(self) :
return self.length
def __getitem__(self, index) :
try :
image_label = self.image_labels[index]
speed = image_label.obd_speed * 1000/3600
time = (image_label.image3.get_timestamp() - image_label.image2.get_timestamp())
distance = speed * time / self.DISTANCE_UNIT
image1 = image_label.image1.get_image_data()
image2 = image_label.image2.get_image_data()
image3 = image_label.image3.get_image_data()
image4 = image_label.image4.get_image_data()
object_mask1 = np.zeros_like(image1)
object_mask1 = image_label.draw_mask(object_mask1, image_label.object_mask1)[:, :, 0]
object_mask2 = np.zeros_like(image2)
object_mask2 = image_label.draw_mask(object_mask2, image_label.object_mask2)[:, :, 0]
object_mask3 = np.zeros_like(image3)
object_mask3 = image_label.draw_mask(object_mask3, image_label.object_mask3)[:, :, 0]
object_mask4 = np.zeros_like(image4)
object_mask4 = image_label.draw_mask(object_mask4, image_label.object_mask4)[:, :, 0]
# image1 = cv2.GaussianBlur(image1,(5,5),cv2.BORDER_DEFAULT)
# image2 = cv2.GaussianBlur(image2,(5,5),cv2.BORDER_DEFAULT)
# image3 = cv2.GaussianBlur(image3,(5,5),cv2.BORDER_DEFAULT)
# image4 = cv2.GaussianBlur(image4,(5,5),cv2.BORDER_DEFAULT)
kernel = np.ones((21, 21), np.uint8)
object_mask1 = cv2.dilate(object_mask1, kernel, iterations=1)
object_mask2 = cv2.dilate(object_mask2, kernel, iterations=1)
object_mask3 = cv2.dilate(object_mask3, kernel, iterations=1)
object_mask4 = cv2.dilate(object_mask4, kernel, iterations=1)
object_mask1 = np.array(object_mask1 > 0.1, dtype='uint8')
object_mask2 = np.array(object_mask2 > 0.1, dtype='uint8')
object_mask3 = np.array(object_mask3 > 0.1, dtype='uint8')
object_mask4 = np.array(object_mask4 > 0.1, dtype='uint8')
image1, object_mask1 = self.reshape(image1, object_mask1)
image2, object_mask2 = self.reshape(image2, object_mask2)
image3, object_mask3 = self.reshape(image3, object_mask3)
image4, object_mask4 = self.reshape(image4, object_mask4)
image1 = torch.ByteTensor(image1.transpose((2, 0, 1)).copy())
mask1 = torch.ByteTensor(object_mask1.copy())
image2, mask2 = torch.ByteTensor(image2.transpose((2, 0, 1)).copy()), torch.ByteTensor(object_mask2.copy())
image3, mask3 = torch.ByteTensor(image3.transpose((2, 0, 1)).copy()), torch.ByteTensor(object_mask3.copy())
image4, mask4 = torch.ByteTensor(image4.transpose((2, 0, 1)).copy()), torch.ByteTensor(object_mask4.copy())
return image1, image2, image3, image4, mask1, mask2, mask3, mask4, torch.FloatTensor([distance])
except Exception as e:
print(e)
print(image_label.pk)
def reshape(self, image, mask) :
image = cv2.resize(image, (self.W, self.H))
mask = cv2.resize(mask, (self.W, self.H))
return image, mask | [
"numpy.zeros_like",
"cv2.dilate",
"torch.FloatTensor",
"numpy.ones",
"django.db.models.Q",
"torch.set_num_threads",
"numpy.array",
"cv2.resize"
] | [((319, 343), 'torch.set_num_threads', 'torch.set_num_threads', (['(5)'], {}), '(5)\n', (340, 343), False, 'import torch\n'), ((4313, 4348), 'cv2.resize', 'cv2.resize', (['image', '(self.W, self.H)'], {}), '(image, (self.W, self.H))\n', (4323, 4348), False, 'import cv2\n'), ((4364, 4398), 'cv2.resize', 'cv2.resize', (['mask', '(self.W, self.H)'], {}), '(mask, (self.W, self.H))\n', (4374, 4398), False, 'import cv2\n'), ((1725, 1746), 'numpy.zeros_like', 'np.zeros_like', (['image1'], {}), '(image1)\n', (1738, 1746), True, 'import numpy as np\n'), ((1872, 1893), 'numpy.zeros_like', 'np.zeros_like', (['image2'], {}), '(image2)\n', (1885, 1893), True, 'import numpy as np\n'), ((2019, 2040), 'numpy.zeros_like', 'np.zeros_like', (['image3'], {}), '(image3)\n', (2032, 2040), True, 'import numpy as np\n'), ((2166, 2187), 'numpy.zeros_like', 'np.zeros_like', (['image4'], {}), '(image4)\n', (2179, 2187), True, 'import numpy as np\n'), ((2625, 2652), 'numpy.ones', 'np.ones', (['(21, 21)', 'np.uint8'], {}), '((21, 21), np.uint8)\n', (2632, 2652), True, 'import numpy as np\n'), ((2680, 2726), 'cv2.dilate', 'cv2.dilate', (['object_mask1', 'kernel'], {'iterations': '(1)'}), '(object_mask1, kernel, iterations=1)\n', (2690, 2726), False, 'import cv2\n'), ((2754, 2800), 'cv2.dilate', 'cv2.dilate', (['object_mask2', 'kernel'], {'iterations': '(1)'}), '(object_mask2, kernel, iterations=1)\n', (2764, 2800), False, 'import cv2\n'), ((2828, 2874), 'cv2.dilate', 'cv2.dilate', (['object_mask3', 'kernel'], {'iterations': '(1)'}), '(object_mask3, kernel, iterations=1)\n', (2838, 2874), False, 'import cv2\n'), ((2902, 2948), 'cv2.dilate', 'cv2.dilate', (['object_mask4', 'kernel'], {'iterations': '(1)'}), '(object_mask4, kernel, iterations=1)\n', (2912, 2948), False, 'import cv2\n'), ((2989, 3032), 'numpy.array', 'np.array', (['(object_mask1 > 0.1)'], {'dtype': '"""uint8"""'}), "(object_mask1 > 0.1, dtype='uint8')\n", (2997, 3032), True, 'import numpy as np\n'), ((3060, 3103), 'numpy.array', 'np.array', (['(object_mask2 > 0.1)'], {'dtype': '"""uint8"""'}), "(object_mask2 > 0.1, dtype='uint8')\n", (3068, 3103), True, 'import numpy as np\n'), ((3131, 3174), 'numpy.array', 'np.array', (['(object_mask3 > 0.1)'], {'dtype': '"""uint8"""'}), "(object_mask3 > 0.1, dtype='uint8')\n", (3139, 3174), True, 'import numpy as np\n'), ((3202, 3245), 'numpy.array', 'np.array', (['(object_mask4 > 0.1)'], {'dtype': '"""uint8"""'}), "(object_mask4 > 0.1, dtype='uint8')\n", (3210, 3245), True, 'import numpy as np\n'), ((602, 644), 'django.db.models.Q', 'Q', ([], {'camera_type': 'FRONT_NORMAL_CAMERA_TYPE_V2'}), '(camera_type=FRONT_NORMAL_CAMERA_TYPE_V2)\n', (603, 644), False, 'from django.db.models import Q\n'), ((647, 687), 'django.db.models.Q', 'Q', ([], {'camera_type': 'FRONT_ZOOM_CAMERA_TYPE_V2'}), '(camera_type=FRONT_ZOOM_CAMERA_TYPE_V2)\n', (648, 687), False, 'from django.db.models import Q\n'), ((4126, 4155), 'torch.FloatTensor', 'torch.FloatTensor', (['[distance]'], {}), '([distance])\n', (4143, 4155), False, 'import torch\n')] |
import numpy as np
import pybdv
from pybdv.bdv_datasets import BdvDataset
def on_the_fly_2d():
"""Minimal example for writing to a single timepoint and setup slice by slice.
"""
x = np.random.rand(64, 128, 128)
scale_factors = [
(1, 2, 2), (1, 2, 2)
]
path = "./data.n5"
pybdv.initialize_bdv(path, shape=x.shape, dtype=x.dtype,
downscale_factors=scale_factors, chunks=(1, 64, 64))
ds = BdvDataset(path, setup_id=0, timepoint=0)
for z in range(x.shape[0]):
# TODO support better slicing
ds[z:z+1, 0:128, 0:128] = x[z:z+1, 0:128, 0:128]
def on_the_fly_3d():
"""Writing sub-regions to multiple timepoints and setups.
"""
shape = (64, 64, 64)
scale_factors = [(2, 2, 2), (2, 2, 2)]
n_setups = 2
n_timepoints = 2
path = "./data.n5"
# we use a nested dict to store the BdvDatasets for the individual
# setup/timepoint configurations
datasets = {setup_id: {} for setup_id in range(n_setups)}
for setup_id in range(n_setups):
for tp in range(n_timepoints):
pybdv.initialize_bdv(path, shape=shape, dtype="float32",
setup_id=setup_id, timepoint=tp,
downscale_factors=scale_factors, chunks=(32, 32, 32))
datasets[setup_id][tp] = BdvDataset(path, setup_id=setup_id, timepoint=tp)
# write sub-region to setup 0, timepoint 0
datasets[0][0][12:20, 32:64, 3:10] = np.random.rand(8, 32, 7)
# write sub-region to setup 1, timepoint 0
datasets[1][0][17:33, 0:32, 5:17] = np.random.rand(16, 32, 12)
# write sub-region to setup 1, timepoint 1
datasets[1][1][15:45, 32:48, 11:19] = np.random.rand(30, 16, 8)
if __name__ == '__main__':
# on_the_fly_2d()
on_the_fly_3d()
| [
"numpy.random.rand",
"pybdv.bdv_datasets.BdvDataset",
"pybdv.initialize_bdv"
] | [((196, 224), 'numpy.random.rand', 'np.random.rand', (['(64)', '(128)', '(128)'], {}), '(64, 128, 128)\n', (210, 224), True, 'import numpy as np\n'), ((311, 425), 'pybdv.initialize_bdv', 'pybdv.initialize_bdv', (['path'], {'shape': 'x.shape', 'dtype': 'x.dtype', 'downscale_factors': 'scale_factors', 'chunks': '(1, 64, 64)'}), '(path, shape=x.shape, dtype=x.dtype, downscale_factors=\n scale_factors, chunks=(1, 64, 64))\n', (331, 425), False, 'import pybdv\n'), ((455, 496), 'pybdv.bdv_datasets.BdvDataset', 'BdvDataset', (['path'], {'setup_id': '(0)', 'timepoint': '(0)'}), '(path, setup_id=0, timepoint=0)\n', (465, 496), False, 'from pybdv.bdv_datasets import BdvDataset\n'), ((1495, 1519), 'numpy.random.rand', 'np.random.rand', (['(8)', '(32)', '(7)'], {}), '(8, 32, 7)\n', (1509, 1519), True, 'import numpy as np\n'), ((1608, 1634), 'numpy.random.rand', 'np.random.rand', (['(16)', '(32)', '(12)'], {}), '(16, 32, 12)\n', (1622, 1634), True, 'import numpy as np\n'), ((1725, 1750), 'numpy.random.rand', 'np.random.rand', (['(30)', '(16)', '(8)'], {}), '(30, 16, 8)\n', (1739, 1750), True, 'import numpy as np\n'), ((1109, 1256), 'pybdv.initialize_bdv', 'pybdv.initialize_bdv', (['path'], {'shape': 'shape', 'dtype': '"""float32"""', 'setup_id': 'setup_id', 'timepoint': 'tp', 'downscale_factors': 'scale_factors', 'chunks': '(32, 32, 32)'}), "(path, shape=shape, dtype='float32', setup_id=setup_id,\n timepoint=tp, downscale_factors=scale_factors, chunks=(32, 32, 32))\n", (1129, 1256), False, 'import pybdv\n'), ((1356, 1405), 'pybdv.bdv_datasets.BdvDataset', 'BdvDataset', (['path'], {'setup_id': 'setup_id', 'timepoint': 'tp'}), '(path, setup_id=setup_id, timepoint=tp)\n', (1366, 1405), False, 'from pybdv.bdv_datasets import BdvDataset\n')] |
# Copyright (c) 2019, Build-A-Cell. All rights reserved.
# See LICENSE file in the project root directory for details.
from warnings import warn
from .sbmlutil import *
import warnings
import copy
import numpy as np
class Species(object):
""" A formal species object for a CRN
A Species must have a name. They may also have a materialtype (such as DNA,
RNA, Protein), and a list of attributes.
"""
def __init__(self, name, material_type="", attributes=[],
initial_concentration=0):
self.name = name
self.material_type = material_type
self.initial_concentration = initial_concentration
if material_type == "complex":
warn("species which are formed of two species or more should be "
"called using the chemical_reaction_network.complex "
"constructor for attribute inheritance purposes.")
self.attributes = []
if attributes is not None:
for attribute in attributes:
self.add_attribute(attribute)
def __repr__(self):
txt = self.material_type + "_" + self.name
if len(self.attributes) > 0 and self.attributes != []:
for i in self.attributes:
if i is not None:
txt += "_" + str(i)
txt.replace("'", "")
return txt
def add_attribute(self, attribute):
assert isinstance(attribute, str) or attribute is None, "Attribute: %s must be a string or None" % attribute
self.attributes.append(attribute)
def __eq__(self, other):
"""
Overrides the default implementation
Two species are equivalent if they have the same name, type, and attributes
:param other: Species instance
:return: boolean
"""
if isinstance(other, Species) \
and self.material_type == other.material_type \
and self.name == other.name \
and set(self.attributes) == set(other.attributes):
return True
else:
return False
def __hash__(self):
return str.__hash__(repr(self))
class ComplexSpecies(Species):
""" A special kind of species which is formed as a complex of two or more species.
Used for attribute inheritance
"""
def __init__(self, species, name = None, material_type = "complex",
attributes = None, initial_concentration = 0):
if len(species) < 1:
raise ValueError("chemical_reaction_network.complex requires 2 "
"or more species in its constructor.")
if name == None:
name = ""
species = copy.copy(species)
list.sort(species, key = lambda s:s.name)
for s in species:
if s.material_type != "complex":
name+=f"{s.material_type}_{s.name}_"
else:
name+=f"{s.name}_"
name = name[:-1]
self.name = name
self.material_type = material_type
self.initial_concentration = initial_concentration
if attributes == None:
attributes = []
for s in species:
attributes += s.attributes
attributes = list(set(attributes))
while None in attributes:
attributes.remove(None)
self.attributes = attributes
class Reaction(object):
""" An abstract representation of a chemical reaction in a CRN
A reaction has the form:
\sum_i n_i I_i --> \sum_i m_i O_i @ rate = k
where n_i is the count of the ith input, I_i, and m_i is the count of the
ith output, O_i.
If the reaction is reversible, the reverse reaction is also included:
\sum_i m_i O_i --> \sum_i n_i I_i @ rate = k_rev
"""
def __init__(self, inputs, outputs, k = 0, input_coefs = None,
output_coefs = None, k_rev = 0, propensity_type = "massaction",
rate_formula = None, propensity_params = None):
if k != 0 and propensity_params != None and "k" not in propensity_params:
propensity_params["k"] = k
elif k == 0 and propensity_params != None and "k" in propensity_params:
k = propensity_params["k"]
elif k != 0 and propensity_params != None and k != propensity_params['k']:
print("k=", k, "propensity_params[k]", propensity_params["k"], "propensity_params", propensity_params)
raise ValueError("Inconsistent rate constants: propensity_params['k'] != k.")
if propensity_type == "massaction" and propensity_params != None:
warn("ValueWarning: propensity_params dictionary passed into a "
"massaction propensity. Massaction propensities do not "
"require a param dictionary.")
elif propensity_type != "massaction" and propensity_params == None:
raise ValueError("Non-massaction propensities require a propensity_params dictionary passed to the propensity_params keyword.")
elif propensity_type != "massaction" and k_rev != 0:
raise ValueError("Invalid reversible reaction for propensity "
f"type = {propensity_type}. Only massaction "
"propensities support the reversible rate k_rev. "
"Consider creating two seperate reactions "
"instead.")
elif propensity_type == "hillpositive":
if not ("k" in propensity_params and "s1" in propensity_params and "K" in propensity_params \
and "n" in propensity_params):
raise ValueError("hillpositive propensities, p(s1; k, K, n) "
"= k*s1^n/(s1^n + K), require the following "
"propensity_params: "
"'k':rate constant (float)"
"'s1':species (chemical_reaction_network.species), "
"'n':cooperativity(float), "
"and 'K':dissociationc constant (float).")
elif propensity_type == "hillnegative":
if not ("k" in propensity_params and "s1" in propensity_params and "K" in propensity_params \
and "n" in propensity_params):
raise ValueError("hillnegative propensities, "
"p(s1; k, K, n) = k*1/(s1^n + K), require "
"the following propensity_params:"
"'k':rate constant (float)"
"'s1':species (chemical_reaction_network.species), "
"'n':cooperativity(float), "
"and 'K':dissociationc constant (float)")
elif propensity_type == "proportionalhillpositive":
if not ("k" in propensity_params and "s1" in propensity_params and "d" in propensity_params \
and "K" in propensity_params \
and "n" in propensity_params):
raise ValueError("proportionalhillpositive propensities, "
"p(s1, d; k, K, n) = k*d*s1^n/(s1^n + K), require the "
"following propensity_params: "
"'k':rate constant (float)"
"'s1':species (chemical_reaction_network.species), "
"'d':species (chemical_reaction_network.species), "
"'n':cooperativity(float), "
"and 'K':dissociationc onstant (float)")
elif propensity_type == "proportionalhillnegative":
if not ("k" in propensity_params and "s1" in propensity_params and "d" in propensity_params \
and "K" in propensity_params \
and "n" in propensity_params):
raise ValueError("proportionalhillnegative propensities, "
"p(s1, d; k, K, n) = k*d/(s1^n + K), require the "
"following propensity_params: "
"'k':rate constant (float)"
"'s1':species (chemical_reaction_network.species), "
"'d':species (chemical_reaction_network.species), "
"'n':cooperativity(float), "
"and 'K':dissociationc onstant (float)")
elif propensity_type == "general":
if "rate" not in propensity_params:
raise ValueError("general propensities, p(s) = k * f(s), "
"require the propensity_params: "
"'rate':f(s) where f(s) is an SBML compatable function "
"of arbitrary species, "
"s (use repr(chemical_reaction_network.species) to get "
"the proper text representation of a species name).")
elif propensity_type != "massaction":
raise ValueError(f"Unknown propensity type: {propensity_type}.")
self.propensity_type = propensity_type
self.propensity_params = propensity_params
# Check that inputs and outputs only contain species
if any(not isinstance(s, Species) for s in inputs + outputs):
raise ValueError("A non-species object was used as a species.")
# internal representation of a reaction
#self.inputs and self.outputs should be ordered lists.
self.inputs = []
for s in inputs:
if s not in self.inputs:
self.inputs.append(s)
self.outputs = []
for s in outputs:
if s not in self.outputs:
self.outputs.append(s)
#self.input_coefs[i] is the number of self.inputs[i] into the reaction
self.input_coefs = None
#self.output coefs is analogous to above
self.output_coefs = None
# Check that rates are valid
if k <= 0:
raise ValueError(f"Reaction rate <= 0: k={k}")
else:
self.k = k
if k_rev > 0:
self.reversible = True
self.k_r = k_rev
else:
self.k_r = 0
self.reversible = False
# TODO input coefficients should be stored with the species a dictionary (same for the output )
# Set input coefficients
if input_coefs is None:
self.input_coefs = [inputs.count(s) for s in self.inputs]
elif input_coefs is not None and len(input_coefs) == len(self.inputs):
self.input_coefs = input_coefs
elif len(input_coefs) == len(inputs) \
and len(self.inputs) != len(inputs):
raise ValueError("Input species and input_coefs contain "
"contradictory counts.")
else:
raise ValueError(f"len(input_coefs) ({len(input_coefs)}) doesn't "
f"match len(self.inputs) ({len(self.inputs)}).")
# Set Output Coefs
if output_coefs is None:
self.output_coefs = [outputs.count(s) for s in self.outputs]
elif output_coefs is not None \
and len(output_coefs) == len(self.outputs):
self.output_coefs = output_coefs
elif len(output_coefs) == len(outputs) \
and len(self.outputs) != len(outputs):
raise ValueError("Output species and output_coefs contain "
"contradictory counts.")
else:
raise ValueError(f"len(output_coefs) ({len(output_coefs)}) doesn't "
f"match len(self.outputs) ({len(self.outputs)}).")
def __repr__(self, **kwargs):
txt = ""
for i in range(len(self.inputs)):
if self.input_coefs[i] > 1:
txt += str(self.input_coefs[i]) + "*" + str(self.inputs[i])
else:
txt += str(self.inputs[i])
if i < len(self.inputs) - 1:
txt += " + "
if self.reversible:
txt += " <--> "
else:
txt += " --> "
for i in range(len(self.outputs)):
if self.output_coefs[i] > 1:
txt += str(self.output_coefs[i]) + "*" + str(self.outputs[i])
else:
txt += str(self.outputs[i])
if i < len(self.outputs) - 1:
txt += " + "
tab = (" " * 8)
txt += tab
if self.propensity_type == "massaction":
input_func_args = ""
input_prod = f"{self.k}"
for i in range(len(self.inputs)):
input_func_args += f"{self.inputs[i]}"
if self.input_coefs[i] > 1:
input_prod+=f"*{self.inputs[i]}^{self.input_coefs[i]}"
else:
input_prod+=f"*{self.inputs[i]}"
if i < len(self.inputs)-1:
input_func_args += ","
if len(self.inputs) > 0:
input_func_args = "("+input_func_args+")"
txt += f"massaction: k_f{input_func_args}={input_prod}"
if self.reversible:
output_func_args = ""
output_prod = f"{self.k_r}"
for i in range(len(self.outputs)):
output_func_args += f"{self.outputs[i]}"
if self.output_coefs[i] > 1:
output_prod+=f"*{self.outputs[i]}^{self.output_coefs[i]}"
else:
output_prod+=f"*{self.outputs[i]}"
if i < len(self.outputs)-1:
output_func_args += ","
if len(self.outputs) > 0:
output_func_args = "("+output_func_args+")"
txt += f"{tab}k_r{output_func_args}={output_prod}"
elif self.propensity_type == "hillpositive":
s1 = repr(self.propensity_params["s1"])
kd = str(self.propensity_params["K"])
n = str(self.propensity_params["n"])
txt += f"hillpositive: k({s1})={self.k}*{s1}^{n}/({kd}+{s1}^{n})"
elif self.propensity_type == "hillnegative":
s1 = repr(self.propensity_params["s1"])
kd = str(self.propensity_params["K"])
n = str(self.propensity_params["n"])
txt += f"hillnegative: k({s1})={self.k}*1/({kd}+{s1}^{n})"
elif self.propensity_type == "proportionalhillpositive":
s1 = repr(self.propensity_params["s1"])
s2 = repr(self.propensity_params["d"])
kd = str(self.propensity_params["K"])
n = str(self.propensity_params["n"])
txt += (f"proportionalhillpositive: k({s1}, "
f"{s2})={self.k}*{s2}*{s1}^{n}/({kd}+{s1}^{n})")
elif self.propensity_type == "proportionalhillnegative":
s1 = repr(self.propensity_params["s1"])
s2 = repr(self.propensity_params["d"])
kd = str(self.propensity_params["K"])
n = str(self.propensity_params["n"])
txt += (f"proportionalhillnegative: k({s1}, "
f"{s2})={self.k}*{s2}/({kd}+{s1}^{n})")
elif self.propensity_type == "general":
eq = self.propensity_params["rate"]
txt += f"general: k(x)={self.k}*{eq}"
else:
raise ValueError("Unknown Propensity Type: "
f"{self.propensity_type}.")
return txt
def __eq__(self, other):
"""Overrides the default implementation.
Two reactions are equivalent if they have the same inputs, outputs,
and rates."""
complexes_equal = Reaction.complex_set_equality(self.inputs,
self.input_coefs,
other.inputs,
other.input_coefs) \
and Reaction.complex_set_equality(self.outputs,
self.output_coefs,
other.outputs,
other.output_coefs)
rates_equal = (other.k == self.k and other.k_r == self.k_r)
propensity_types_equal = (self.propensity_type == other.propensity_type)
# must both be reactions with the same rates and numbers of inputs and
# outputs.
if not isinstance(other, Reaction):
return False
if complexes_equal and rates_equal and propensity_types_equal:
return True
elif complexes_equal and propensity_types_equal:
#warn("Two reactions with the same inputs and outputs but different "
#"rates are formally different, but may be undesired:"
#f"{repr(self)} and {repr(other)}.")
return False
# If the reactions are reversible inverses of eachother, one's forward
# reaction could be the other's reverse
elif self.reversible and other.reversible:
reverse_complex_equal = Reaction.complex_set_equality(self.inputs,
self.input_coefs,
other.outputs,
other.output_coefs)\
and Reaction.complex_set_equality(self.outputs,
self.output_coefs,
other.inputs,
other.input_coefs)
reverse_rates_equal = (other.k == self.k_r and other.k_r == self.k)
if reverse_complex_equal and reverse_rates_equal:
return True
elif reverse_complex_equal:
warn("Two reversible reactions with the same inputs and outputs"
" (reversed) but different rates are formally equal, but "
f"may be undesired:{repr(self)} and {repr(other)}")
return True
else:
return False
else:
return False
@staticmethod
def complex_set_equality(c1, c1_coefs, c2, c2_coefs):
"""Checks to see if two formal complexes (reaction input or output sets) are equal."""
if len(c1) != len(c2):
return False
else:
for i in range(len(c1)):
s1 = c1[i]
coef1 = c1_coefs[i]
if s1 not in c2 or coef1 != c2_coefs[c2.index(s1)]:
return False
return True
def pyrepr(self):
if self.reversible:
return [
([repr(i) for i in self.inputs], self.input_coefs,
[repr(i) for i in self.outputs], self.output_coefs,
self.k),
([repr(i) for i in self.outputs], self.output_coefs,
[repr(i) for i in self.inputs], self.input_coefs,
self.k_r)]
else:
return [([repr(i) for i in self.inputs], self.input_coefs,
[repr(i) for i in self.outputs],
self.output_coefs, self.k)]
class ChemicalReactionNetwork(object):
""" A chemical reaction network is a container of species and reactions
chemical reaction networks can be compiled into SBML or represented
conveniently as python tuple objects.
reaction types:
mass action: standard mass action semantics where the propensity of a
reaction is given by deterministic propensity =
k \Prod_{inputs i} [S_i]^a_i
stochastic propensity =
k \Prod_{inputs i} (S_i)!/(S_i - a_i)!
where a_i is the spectrometric coefficient of species i
"""
def __init__(self, species, reactions, warnings = False):
self.species, self.reactions = ChemicalReactionNetwork.check_crn_validity(reactions, species, warnings=warnings)
# TODO check whether we need this data structure
self.species2index = {}
for i in range(len(self.species)):
self.species2index[str(self.species[i])] = i
@staticmethod
def check_crn_validity(reactions, species, warnings = False):
# Check to make sure species are valid and only have a count of 1
checked_species = []
if not all(isinstance(s, Species) for s in species):
print(species)
raise ValueError("A non-species object was used as a species!")
for s in species:
if species.count(s) > 1:
pass
#warn("Species "+str(s)+" duplicated in CRN definition.
# Duplicates have been removed.")
if s not in checked_species:
checked_species.append(s)
# Check to make sure reactions are valid meaning:
# only have a count of 1
# all species in the inputs/outputs are also in the species list
checked_reactions = []
if not all(isinstance(r, Reaction) for r in reactions):
raise ValueError("A non-reaction object was used as a reaction!")
for r in reactions:
if reactions.count(r) > 1:
warn(f"Reaction {r} may be duplicated in CRN definitions. Duplicates "
"have NOT been removed.")
checked_reactions.append(r)
#if r not in checked_reactions:
# checked_reactions.append(r)
for s in r.inputs:
if s not in checked_species and warnings:
warn(f"Reaction {repr(r)} contains a species {repr(s)} "
"which is not in the CRN.")
for s in r.outputs:
if s not in checked_species and warnings:
warn(f"Reaction {repr(r)} contains a species {repr(s)} "
"which is not in the CRN.")
return checked_species, checked_reactions
def __repr__(self):
txt = "Species = "
for s in self.species:
txt += repr(s) + ", "
txt = txt[:-2] + '\n'
txt += "Reactions = [\n"
for r in self.reactions:
txt += "\t" + repr(r) + "\n"
txt += "]"
return txt
def pyrepr(self):
reactions = []
for r in self.reactions:
reactions += r.pyrepr()
species = [str(s) for s in self.species]
return species, reactions
# TODO check whether we need this method
def species_index(self, species):
if len(self.species2index) != len(self.species):
self.species2index = {}
for i in range(len(self.species)):
self.species2index[str(self.species[i])] = i
return self.species2index[str(species)]
def initial_condition_vector(self, init_cond_dict):
x0 = [0.0] * len(self.species)
for idx, s in enumerate(self.species):
if s in init_cond_dict:
x0[idx] = init_cond_dict[s]
return x0
def get_all_species_containing(self, species, return_as_strings = False):
"""Returns all species (complexes and otherwise) containing a given species
(or string).
"""
return_list = []
if not isinstance(species, Species):
raise ValueError('species argument must be an instance of Species!')
for s in self.species:
if repr(species) in repr(s):
if return_as_strings:
return_list.append(repr(s))
else:
return_list.append(s)
return return_list
def generate_sbml_model(self, stochastic_model = False, **keywords):
document, model = create_sbml_model(**keywords)
for s in self.species:
add_species(model=model, compartment=model.getCompartment(0),
species=s, initial_concentration=s.initial_concentration)
rxn_count = 0
for r in self.reactions:
rxn_id = "r" + str(rxn_count)
add_reaction(model, r.inputs, r.input_coefs, r.outputs,
r.output_coefs, rxn_id, r.k,
stochastic = stochastic_model,
propensity_type=r.propensity_type,
propensity_params = r.propensity_params)
rxn_count += 1
if r.reversible and r.propensity_type == "massaction":
add_reaction(model, r.outputs, r.output_coefs, r.inputs,
r.input_coefs, rxn_id, r.k_r,
stochastic=stochastic_model,
propensity_type=r.propensity_type)
rxn_count += 1
if document.getNumErrors():
warn('SBML model generated has errors. Use document.getErrorLog() to print all errors.')
return document, model
def write_sbml_file(self, file_name = None, **keywords):
document, _ = self.generate_sbml_model(**keywords)
sbml_string = libsbml.writeSBMLToString(document)
with open(file_name, 'w') as f:
f.write(sbml_string)
return True
def create_bioscrape_model(self):
from bioscrape.types import Model
species_list = []
initial_condition_dict = {}
for s in self.species:
species_list.append(repr(s))
if s.initial_concentration is None:
initial_condition_dict[repr(s)] = 0
else:
initial_condition_dict[repr(s)] = s.initial_concentration
reaction_list = []
reaction_counter = 0
rate_list = []
for rxn in self.reactions:
reactants = []
for i in range(len(rxn.inputs)):
reactants += [repr(rxn.inputs[i])]*int(rxn.input_coefs[i])
products = []
for i in range(len(rxn.outputs)):
products += [repr(rxn.outputs[i])]*int(rxn.output_coefs[i])
prop_type = rxn.propensity_type
if rxn.propensity_params == None:
prop_params = {}
else:
prop_params = {}
for k in rxn.propensity_params:
v = rxn.propensity_params[k]
if isinstance(v, Species):
prop_params[k] = repr(v)
elif isinstance(v, str):
prop_params[k] = v
else:
prop_params[k] = float(v)
prop_params['propensity_type'] = rxn.propensity_type
prop_params['k'] = rxn.k
reaction_list.append((reactants, products, prop_type,
dict(prop_params)))
if rxn.reversible and rxn.propensity_type == "massaction":
prop_params['k'] = rxn.k_r
reaction_list.append((products, reactants, prop_type,
dict(prop_params)))
elif rxn.reversible:
raise ValueError("Only massaction irreversible reactions are "
"supported for automatic bioscrape simulation."
" Consider creating two seperate reactions.")
model = Model(species = species_list, reactions = reaction_list,
initial_condition_dict = initial_condition_dict)
return model
def simulate_with_bioscrape(self, timepoints, initial_condition_dict = {},
stochastic = False, return_dataframe = True,
safe = False, via_sbml = True):
from bioscrape.simulator import py_simulate_model
m = self.create_bioscrape_model()
m.set_species(initial_condition_dict)
if not stochastic and safe:
safe = False
result = py_simulate_model(timepoints, Model = m,
stochastic = stochastic,
return_dataframe = return_dataframe,
safe = safe)
return result
def simulate_with_bioscrape_via_sbml(self, timepoints, file = None,
initial_condition_dict = {}, return_dataframe = True,
stochastic = False):
import bioscrape
if file is None:
self.write_sbml_file(file_name ="temp_sbml_file.xml")
file_name = "temp_sbml_file.xml"
elif isinstance(file, str):
file_name = file
else:
file_name = file.name
m = bioscrape.types.Model(sbml_filename = file_name)
m.set_species(initial_condition_dict)
result = bioscrape.simulator.py_simulate_model(timepoints, Model = m,
stochastic = stochastic,
return_dataframe = return_dataframe)
return result, m
def runsim_bioscrape(self, timepoints, file, simtype = "deterministic",
species_to_plot = [], plot_show = True):
'''
To simulate using bioscrape.
Returns the data for all species and bioscrape model object which can be
used to find out species indexes.
NOTE : Needs bioscrape package installed to simulate.
TODO : Returns result and model
'''
import matplotlib.pyplot as plt
try:
import bioscrape
except:
print("Bioscrape package must be installed to run simulations "
"using bioscrape.")
if isinstance(file, str):
filename = file
else:
filename = file.name
m = bioscrape.sbmlutil.import_sbml(filename)
s = bioscrape.simulator.ModelCSimInterface(m)
if simtype == 'deterministic':
s.py_prep_deterministic_simulation()
s.py_set_initial_time(timepoints[0])
sim = bioscrape.simulator.DeterministicSimulator()
result = sim.py_simulate(s, timepoints)
result = result.py_get_result()
if plot_show:
if species_to_plot:
for species in species_to_plot:
ind = m.get_species_index(species)
plt.plot(timepoints,result[:,ind])
plt.title(str(species_to_plot) + ' vs time')
plt.show()
else:
plt.plot(timepoints, result)
plt.show()
return result, m
elif simtype == 'stochastic':
warnings.warn("For stochastic simulation of SBML models using "
"bioscrape, it is highly recommended to NOT use "
"reversible reactions as the SSA algorithm might not "
"work for such cases.")
sim = bioscrape.simulator.SSASimulator()
s.py_set_initial_time(timepoints[0])
result = sim.py_simulate(s,timepoints)
result = result.py_get_result()
if plot_show:
if species_to_plot:
for species in species_to_plot:
ind = m.get_species_index(species)
plt.plot(timepoints,result[:,ind])
plt.title(str(species_to_plot) + ' vs time')
plt.show()
else:
plt.plot(timepoints, result)
plt.show()
return result, m
else:
raise ValueError("Optional argument 'simtype' must be either "
"deterministic or stochastic")
def runsim_roadrunner(self, timepoints, filename, species_to_plot = []):
'''
To simulate using roadrunner.
Returns the data for all species and bioscrape model object which can be
used to find out species indexes.
NOTE : Needs roadrunner package installed to simulate.
TODO : species_to_plot not implemented.
TODO : plot_show not implemented
TODO : bioscrape.convert_to_sbml not implemented (possibly available
in later versions of bioscrape)
'''
try:
import roadrunner
except:
print('roadrunner is not installed.')
rr = roadrunner.RoadRunner(filename)
if species_to_plot:
rr.timeCourseSelections = ['time', species_to_plot]
result = rr.simulate(timepoints[0],timepoints[-1],len(timepoints))
res_ar = np.array(result)
return res_ar[:,0],res_ar[:,1]
| [
"bioscrape.simulator.DeterministicSimulator",
"matplotlib.pyplot.show",
"roadrunner.RoadRunner",
"bioscrape.types.Model",
"bioscrape.sbmlutil.import_sbml",
"matplotlib.pyplot.plot",
"bioscrape.simulator.SSASimulator",
"copy.copy",
"bioscrape.simulator.py_simulate_model",
"numpy.array",
"bioscrap... | [((27320, 27424), 'bioscrape.types.Model', 'Model', ([], {'species': 'species_list', 'reactions': 'reaction_list', 'initial_condition_dict': 'initial_condition_dict'}), '(species=species_list, reactions=reaction_list, initial_condition_dict\n =initial_condition_dict)\n', (27325, 27424), False, 'from bioscrape.types import Model\n'), ((27914, 28025), 'bioscrape.simulator.py_simulate_model', 'py_simulate_model', (['timepoints'], {'Model': 'm', 'stochastic': 'stochastic', 'return_dataframe': 'return_dataframe', 'safe': 'safe'}), '(timepoints, Model=m, stochastic=stochastic,\n return_dataframe=return_dataframe, safe=safe)\n', (27931, 28025), False, 'from bioscrape.simulator import py_simulate_model\n'), ((28627, 28673), 'bioscrape.types.Model', 'bioscrape.types.Model', ([], {'sbml_filename': 'file_name'}), '(sbml_filename=file_name)\n', (28648, 28673), False, 'import bioscrape\n'), ((28740, 28861), 'bioscrape.simulator.py_simulate_model', 'bioscrape.simulator.py_simulate_model', (['timepoints'], {'Model': 'm', 'stochastic': 'stochastic', 'return_dataframe': 'return_dataframe'}), '(timepoints, Model=m, stochastic=\n stochastic, return_dataframe=return_dataframe)\n', (28777, 28861), False, 'import bioscrape\n'), ((29743, 29783), 'bioscrape.sbmlutil.import_sbml', 'bioscrape.sbmlutil.import_sbml', (['filename'], {}), '(filename)\n', (29773, 29783), False, 'import bioscrape\n'), ((29796, 29837), 'bioscrape.simulator.ModelCSimInterface', 'bioscrape.simulator.ModelCSimInterface', (['m'], {}), '(m)\n', (29834, 29837), False, 'import bioscrape\n'), ((32382, 32413), 'roadrunner.RoadRunner', 'roadrunner.RoadRunner', (['filename'], {}), '(filename)\n', (32403, 32413), False, 'import roadrunner\n'), ((32598, 32614), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (32606, 32614), True, 'import numpy as np\n'), ((705, 879), 'warnings.warn', 'warn', (['"""species which are formed of two species or more should be called using the chemical_reaction_network.complex constructor for attribute inheritance purposes."""'], {}), "(\n 'species which are formed of two species or more should be called using the chemical_reaction_network.complex constructor for attribute inheritance purposes.'\n )\n", (709, 879), False, 'from warnings import warn\n'), ((2740, 2758), 'copy.copy', 'copy.copy', (['species'], {}), '(species)\n', (2749, 2758), False, 'import copy\n'), ((4690, 4846), 'warnings.warn', 'warn', (['"""ValueWarning: propensity_params dictionary passed into a massaction propensity. Massaction propensities do not require a param dictionary."""'], {}), "(\n 'ValueWarning: propensity_params dictionary passed into a massaction propensity. Massaction propensities do not require a param dictionary.'\n )\n", (4694, 4846), False, 'from warnings import warn\n'), ((24821, 24919), 'warnings.warn', 'warn', (['"""SBML model generated has errors. Use document.getErrorLog() to print all errors."""'], {}), "(\n 'SBML model generated has errors. Use document.getErrorLog() to print all errors.'\n )\n", (24825, 24919), False, 'from warnings import warn\n'), ((29993, 30037), 'bioscrape.simulator.DeterministicSimulator', 'bioscrape.simulator.DeterministicSimulator', ([], {}), '()\n', (30035, 30037), False, 'import bioscrape\n'), ((21270, 21373), 'warnings.warn', 'warn', (['f"""Reaction {r} may be duplicated in CRN definitions. Duplicates have NOT been removed."""'], {}), "(\n f'Reaction {r} may be duplicated in CRN definitions. Duplicates have NOT been removed.'\n )\n", (21274, 21373), False, 'from warnings import warn\n'), ((30643, 30836), 'warnings.warn', 'warnings.warn', (['"""For stochastic simulation of SBML models using bioscrape, it is highly recommended to NOT use reversible reactions as the SSA algorithm might not work for such cases."""'], {}), "(\n 'For stochastic simulation of SBML models using bioscrape, it is highly recommended to NOT use reversible reactions as the SSA algorithm might not work for such cases.'\n )\n", (30656, 30836), False, 'import warnings\n'), ((30932, 30966), 'bioscrape.simulator.SSASimulator', 'bioscrape.simulator.SSASimulator', ([], {}), '()\n', (30964, 30966), False, 'import bioscrape\n'), ((30451, 30461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30459, 30461), True, 'import matplotlib.pyplot as plt\n'), ((30504, 30532), 'matplotlib.pyplot.plot', 'plt.plot', (['timepoints', 'result'], {}), '(timepoints, result)\n', (30512, 30532), True, 'import matplotlib.pyplot as plt\n'), ((30553, 30563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30561, 30563), True, 'import matplotlib.pyplot as plt\n'), ((30331, 30367), 'matplotlib.pyplot.plot', 'plt.plot', (['timepoints', 'result[:, ind]'], {}), '(timepoints, result[:, ind])\n', (30339, 30367), True, 'import matplotlib.pyplot as plt\n'), ((31428, 31438), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31436, 31438), True, 'import matplotlib.pyplot as plt\n'), ((31481, 31509), 'matplotlib.pyplot.plot', 'plt.plot', (['timepoints', 'result'], {}), '(timepoints, result)\n', (31489, 31509), True, 'import matplotlib.pyplot as plt\n'), ((31530, 31540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31538, 31540), True, 'import matplotlib.pyplot as plt\n'), ((31308, 31344), 'matplotlib.pyplot.plot', 'plt.plot', (['timepoints', 'result[:, ind]'], {}), '(timepoints, result[:, ind])\n', (31316, 31344), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
import os
import sys
import hashlib
from collections import OrderedDict
sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, 'openmoc'))
from testing_harness import TestHarness
from input_set import SimpleLatticeInput
import openmoc.process as process
import numpy as np
class MeshTallyProcessTestHarness(TestHarness):
"""An eigenvalue calculation with a mesh tally of the fission rates
using the openmoc.process module."""
def __init__(self):
super(MeshTallyProcessTestHarness, self).__init__()
self.input_set = SimpleLatticeInput()
# Change spacing to avoid having rays start on lattice planes
# Those rays are problematic because they cross through fuel pins
# parallelly to sector planes.
self.spacing = 0.12
def _run_openmoc(self):
"""Run an OpenMOC eigenvalue calculation."""
super(MeshTallyProcessTestHarness, self)._run_openmoc()
def _get_results(self, num_iters=True, keff=True, fluxes=True,
num_fsrs=True, num_tracks=True, num_segments=True,
hash_output=True):
"""Digest info from the mesh tallies and return as a string."""
# Create OpenMOC Mesh on which to tally fission rates
mesh = process.Mesh()
mesh.dimension = [4, 4]
mesh.lower_left = [-2., -2.]
mesh.upper_right = [2., 2.]
mesh.width = [1., 1.]
# Tally OpenMOC fission rates on the Mesh
fiss_rates = mesh.tally_fission_rates(self.solver)
# Append fission rates to the output string
outstr = 'Fission Rate Mesh Tally\n'
rates = ['{0:12.6E}'.format(rate) for rate in fiss_rates.ravel()]
outstr += '\n'.join(rates) + '\n'
# Retrieve the Materials and number of groups from the geometry
materials = self.input_set.geometry.getAllMaterials()
num_groups = self.input_set.geometry.getNumEnergyGroups()
# Aggregate the total cross sections for each Material
# into a dictionary to pass to the mesh tally
domains_to_coeffs = OrderedDict(
{'flux' : {},
'total' : {},
'nu-fission': {},
'scatter' : {}})
for material_id in materials:
for rxn in domains_to_coeffs:
domains_to_coeffs[rxn][material_id] = np.zeros(num_groups)
for group in range(num_groups):
domains_to_coeffs['flux'][material_id][group] = 1.
domains_to_coeffs['total'][material_id][group] = \
materials[material_id].getSigmaTByGroup(group+1)
domains_to_coeffs['nu-fission'][material_id][group] = \
materials[material_id].getNuSigmaFByGroup(group+1)
# Add up reaction rates for scattering to all energy groups
scatter = 0
for gprime in range(num_groups):
scatter += materials[material_id].getSigmaSByGroup(
group + 1, gprime + 1)
domains_to_coeffs['scatter'][material_id][group] = scatter
# Tally volume-averaged OpenMOC rates on the Mesh
tallies = OrderedDict()
for rxn, coeffs in domains_to_coeffs.items():
tallies[rxn] = mesh.tally_on_mesh(self.solver, coeffs,
domain_type='material',
volume='integrated')
# Append reaction rates to the output string
for rxn, rates in tallies.items():
outstr += rxn.title() + ' Rate Mesh Tally\n'
rates = ['{0:12.6E}'.format(rate) for rate in rates.ravel()]
outstr += '\n'.join(rates) + '\n'
return outstr
if __name__ == '__main__':
harness = MeshTallyProcessTestHarness()
harness.main()
| [
"input_set.SimpleLatticeInput",
"openmoc.process.Mesh",
"numpy.zeros",
"sys.path.insert",
"collections.OrderedDict",
"os.path.join"
] | [((95, 124), 'sys.path.insert', 'sys.path.insert', (['(0)', 'os.pardir'], {}), '(0, os.pardir)\n', (110, 124), False, 'import sys\n'), ((144, 178), 'os.path.join', 'os.path.join', (['os.pardir', '"""openmoc"""'], {}), "(os.pardir, 'openmoc')\n", (156, 178), False, 'import os\n'), ((587, 607), 'input_set.SimpleLatticeInput', 'SimpleLatticeInput', ([], {}), '()\n', (605, 607), False, 'from input_set import SimpleLatticeInput\n'), ((1296, 1310), 'openmoc.process.Mesh', 'process.Mesh', ([], {}), '()\n', (1308, 1310), True, 'import openmoc.process as process\n'), ((2117, 2188), 'collections.OrderedDict', 'OrderedDict', (["{'flux': {}, 'total': {}, 'nu-fission': {}, 'scatter': {}}"], {}), "({'flux': {}, 'total': {}, 'nu-fission': {}, 'scatter': {}})\n", (2128, 2188), False, 'from collections import OrderedDict\n'), ((3224, 3237), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3235, 3237), False, 'from collections import OrderedDict\n'), ((2389, 2409), 'numpy.zeros', 'np.zeros', (['num_groups'], {}), '(num_groups)\n', (2397, 2409), True, 'import numpy as np\n')] |
import io
import textwrap
from collections import namedtuple
import numpy as np
import pandas as pd
ProcessedModel = namedtuple("ProcessedModel", "params info name")
def _get_models_multiindex():
df = pd.DataFrame(
data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"]
)
df.index = pd.MultiIndex.from_tuples(
[("p_1", "v_1"), ("p_1", "v_2"), ("p_2", "v_2")]
)
info = {"n_obs": 400}
mod1 = ProcessedModel(params=df, info=info, name="m1")
mod2 = ProcessedModel(params=df, info=info, name="m2")
models = [mod1, mod2]
return models
def _get_models_single_index():
df = pd.DataFrame(
data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"]
)
df.index = [f"p{i}" for i in [1, 2, 3]]
info = {"n_obs": 400}
mod1 = ProcessedModel(params=df, info=info, name="m1")
mod2 = ProcessedModel(params=df, info=info, name="m2")
models = [mod1, mod2]
return models
def _get_models_multiindex_multi_column():
df = pd.DataFrame(
data=np.ones((3, 4)), columns=["value", "ci_lower", "ci_upper", "p_value"]
)
df.index = pd.MultiIndex.from_tuples(
[("p_1", "v_1"), ("p_1", "v_2"), ("p_2", "v_2")]
)
info = {"n_obs": 400}
mod1 = ProcessedModel(params=df.iloc[1:], info=info, name="m1")
mod2 = ProcessedModel(params=df, info=info, name="m2")
mod3 = ProcessedModel(params=df, info=info, name="m2")
models = [mod1, mod2, mod3]
return models
def _read_csv_string(string, index_cols=None):
string = textwrap.dedent(string)
return pd.read_csv(io.StringIO(string), index_col=index_cols)
| [
"textwrap.dedent",
"io.StringIO",
"pandas.MultiIndex.from_tuples",
"numpy.ones",
"collections.namedtuple"
] | [((119, 167), 'collections.namedtuple', 'namedtuple', (['"""ProcessedModel"""', '"""params info name"""'], {}), "('ProcessedModel', 'params info name')\n", (129, 167), False, 'from collections import namedtuple\n'), ((327, 402), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('p_1', 'v_1'), ('p_1', 'v_2'), ('p_2', 'v_2')]"], {}), "([('p_1', 'v_1'), ('p_1', 'v_2'), ('p_2', 'v_2')])\n", (352, 402), True, 'import pandas as pd\n'), ((1155, 1230), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('p_1', 'v_1'), ('p_1', 'v_2'), ('p_2', 'v_2')]"], {}), "([('p_1', 'v_1'), ('p_1', 'v_2'), ('p_2', 'v_2')])\n", (1180, 1230), True, 'import pandas as pd\n'), ((1569, 1592), 'textwrap.dedent', 'textwrap.dedent', (['string'], {}), '(string)\n', (1584, 1592), False, 'import textwrap\n'), ((1616, 1635), 'io.StringIO', 'io.StringIO', (['string'], {}), '(string)\n', (1627, 1635), False, 'import io\n'), ((236, 251), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (243, 251), True, 'import numpy as np\n'), ((675, 690), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (682, 690), True, 'import numpy as np\n'), ((1064, 1079), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (1071, 1079), True, 'import numpy as np\n')] |
"""
Spatial Analysis - Sales Prices in Washington State
Copyright (c) 2022 Cannlytics
Authors: <NAME> <<EMAIL>>
Created: 2/16/2022
Updated: 2/16/2022
License: MIT License <https://opensource.org/licenses/MIT>
Description: This script creates a histogram of flower prices from the
Washington State traceability data (2021-01-31 to 11-10-2021).
Data sources:
- Random sample of sales items
https://cannlytics.page.link/cds53
"""
# External imports.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Define the plot style.
plt.style.use('fivethirtyeight')
plt.rcParams.update({
'font.family': 'Times New Roman',
'font.size': 32,
})
#--------------------------------------------------------------------------
# Analyze the data.
#--------------------------------------------------------------------------
# Specify where the data lives.
DATA_DIR = 'D:\\leaf-data'
DATA_FILE = f'{DATA_DIR}/samples/random-sales-items-2022-02-16.csv'
# Read in the data.
data = pd.read_csv(DATA_FILE)
#--------------------------------------------------------------------------
# Clean the data.
#--------------------------------------------------------------------------
# Determine wholesale vs retail transactions.
data = data.loc[data['sale_type'] != 'wholesale']
# Drop observations with negative prices and prices in the upper quantile.
data = data.loc[data.price_total > 0]
data = data[data.price_total < data.price_total.quantile(.95)]
# Add a date column.
data['date'] = pd.to_datetime(data['created_at'])
data['day'] = data['date'].dt.date
# Identify flower sales.
sample_type = 'usable_marijuana'
sample_type_data = data.loc[data.intermediate_type == sample_type]
#--------------------------------------------------------------------------
# Create a histogram of flower prices.
#--------------------------------------------------------------------------
def pdf(mu, sigma, bins):
"""Calculate a PDF given mean, standard deviation, and number of bins."""
return 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(bins - mu)**2 / (2 * sigma**2))
# Identify the series.
series = sample_type_data['price_total'].loc[
(sample_type_data['date'] >= pd.to_datetime('2021-01-01')) &
(sample_type_data['date'] <= pd.to_datetime('2022-01-01'))
]
# Define a color palette.
colors = sns.color_palette('Set2', n_colors=10)
green = colors[0]
orange = colors[9]
# Create a histogram.
fig, ax = plt.subplots(figsize=(19.8, 12))
n, bins, patches = ax.hist(
series,
bins=200,
density=1,
color=green,
alpha=0.8,
edgecolor='#ccc',
)
# Calculate interesting statistics.
sigma = series.std()
mu = series.mean()
median = np.percentile(series, 50)
lower = np.percentile(series, 10)
upper = np.percentile(series, 90)
# Plot the PDF.
pdf_values = pdf(mu, sigma, bins)
ax.plot(bins, pdf_values, '--', color=orange, alpha=.6)
# Shade the inner 90% of values.
pdf_bins = bins[(bins >= lower) & (bins <= upper)]
ax.fill_between(
pdf_bins,
pdf(mu, sigma, pdf_bins),
0,
alpha=.3,
color=orange
)
# Annotate the median and lower and upper percentiles.
summary_stats = series.describe()
median = summary_stats['50%']
ax.annotate(
'Median: $%0.2f' % round(median, 2),
xy=(median, pdf(mu, sigma, median) + 0.005),
fontsize=32,
horizontalalignment='center',
verticalalignment='bottom',
)
ax.annotate(
'Q10: $%0.2f' % round(lower, 2),
xy=(lower, pdf(mu, sigma, lower) - 0.005),
fontsize=32,
horizontalalignment='center',
verticalalignment='bottom',
)
ax.annotate(
'Q90: $%0.2f' % round(upper, 2),
xy=(upper, pdf(mu, sigma, upper) + 0.005),
fontsize=32,
horizontalalignment='center',
verticalalignment='bottom',
)
# Add text.
ax.set_xlabel('Price ($)')
ax.set_ylabel('Density')
ax.set_title(
'Cannabis Flower Sale Prices in Washington State in 2021',
fontsize=42,
pad=24,
)
plt.text(
0,
-0.0575,
"""Data: A random sample of 36,481 “usable marijuana” sale items.
Data Source: Washington State Traceability Data from January 2021 to November 2021.
Notes: The top 5% of sale item observations by price were excluded as outliers.
The estimated probability distribution is depicted by the dotted orange line.""",
fontsize=32,
)
fig.savefig(
f'{DATA_DIR}/figures/histogram_{sample_type}_prices_2021.png',
format='png',
dpi=300,
bbox_inches='tight',
)
plt.show()
| [
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.percentile",
"matplotlib.pyplot.text",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.rcParams.update",
"pandas.to_datetime",
"numpy.exp",
"seaborn.color_palette",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((580, 612), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (593, 612), True, 'import matplotlib.pyplot as plt\n'), ((613, 685), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.family': 'Times New Roman', 'font.size': 32}"], {}), "({'font.family': 'Times New Roman', 'font.size': 32})\n", (632, 685), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1049), 'pandas.read_csv', 'pd.read_csv', (['DATA_FILE'], {}), '(DATA_FILE)\n', (1038, 1049), True, 'import pandas as pd\n'), ((1533, 1567), 'pandas.to_datetime', 'pd.to_datetime', (["data['created_at']"], {}), "(data['created_at'])\n", (1547, 1567), True, 'import pandas as pd\n'), ((2350, 2388), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {'n_colors': '(10)'}), "('Set2', n_colors=10)\n", (2367, 2388), True, 'import seaborn as sns\n'), ((2459, 2491), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(19.8, 12)'}), '(figsize=(19.8, 12))\n', (2471, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2703, 2728), 'numpy.percentile', 'np.percentile', (['series', '(50)'], {}), '(series, 50)\n', (2716, 2728), True, 'import numpy as np\n'), ((2737, 2762), 'numpy.percentile', 'np.percentile', (['series', '(10)'], {}), '(series, 10)\n', (2750, 2762), True, 'import numpy as np\n'), ((2771, 2796), 'numpy.percentile', 'np.percentile', (['series', '(90)'], {}), '(series, 90)\n', (2784, 2796), True, 'import numpy as np\n'), ((3935, 4289), 'matplotlib.pyplot.text', 'plt.text', (['(0)', '(-0.0575)', '"""Data: A random sample of 36,481 “usable marijuana” sale items.\nData Source: Washington State Traceability Data from January 2021 to November 2021.\nNotes: The top 5% of sale item observations by price were excluded as outliers.\nThe estimated probability distribution is depicted by the dotted orange line."""'], {'fontsize': '(32)'}), '(0, -0.0575,\n """Data: A random sample of 36,481 “usable marijuana” sale items.\nData Source: Washington State Traceability Data from January 2021 to November 2021.\nNotes: The top 5% of sale item observations by price were excluded as outliers.\nThe estimated probability distribution is depicted by the dotted orange line."""\n , fontsize=32)\n', (3943, 4289), True, 'import matplotlib.pyplot as plt\n'), ((4438, 4448), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4446, 4448), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2117), 'numpy.exp', 'np.exp', (['(-(bins - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(bins - mu) ** 2 / (2 * sigma ** 2))\n', (2079, 2117), True, 'import numpy as np\n'), ((2217, 2245), 'pandas.to_datetime', 'pd.to_datetime', (['"""2021-01-01"""'], {}), "('2021-01-01')\n", (2231, 2245), True, 'import pandas as pd\n'), ((2282, 2310), 'pandas.to_datetime', 'pd.to_datetime', (['"""2022-01-01"""'], {}), "('2022-01-01')\n", (2296, 2310), True, 'import pandas as pd\n'), ((2051, 2069), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2058, 2069), True, 'import numpy as np\n')] |
import numpy as np
# from modules.Node import Node
def sigmoid(sigma):
"""
sigmoid 激活函数
:param sigma: 求和值
:return: 激活值
"""
return 1.0 / (1 + np.exp(- sigma))
def norm2(point):
"""
欧几里得范数
:param point: 输出向量
:return: 范数值
"""
return np.vdot(point, point)
def classify(output, tag1, tag2):
"""
分类函数
:param output: 输出向量
:param tag1: 类别 1 的向量
:param tag2: 类别 2 的向量
:return: 类别
"""
if norm2(output - tag1) > norm2(output - tag2):
return tag2
else:
return tag1
class Network(object):
def __init__(self):
self.weight_ = []
self.x_ = []
self.matrix_ = []
self.tag_ = []
self.delta_ = []
self.output = None
self.rate = 0.9
def load_matrix(self, matrix_=np.array([[1, 2, 3],
[-1, 3, 4],
[2, 5, 2]])):
self.matrix_ = matrix_
return self
def load_tag(self, tag_=np.array([[1],
[0],
[1]])):
self.tag_ = tag_
return self
def set_structure(self, structure):
structure.insert(0, len(self.matrix_[0]))
structure.append(len(self.tag_[0]))
for index in range(len(structure) - 1):
self.weight_.append(np.random.random((structure[index + 1], structure[index])) * 2 - 1)
self.x_ = [0 for i in range(len(self.weight_) + 1)]
self.delta_ = [0 for i in range(len(self.weight_))]
return self
def forward(self, sample_index):
for index in range(len(self.weight_) + 1):
if index == 0:
self.x_[index] = self.matrix_[sample_index]
else:
self.x_[index] = sigmoid(np.dot(self.weight_[index - 1], self.x_[index - 1]))
# print(self.x_)
# print(self.weight_)
return self
def backward(self, sample_index):
for index in range(len(self.weight_) - 1, -1, -1):
output = self.x_[index + 1]
if index == len(self.weight_) - 1:
delta = (self.tag_[sample_index] - output) * output * (1 - output)
else:
delta = output * (1 - output) * np.dot(self.weight_[index + 1], self.delta_[index + 1])
self.delta_[index] = delta
self.weight_[index] += self.rate * np.outer(delta, self.x_[index])
print(self.delta_)
def main():
network = Network()
network.load_matrix().load_tag().set_structure([2]).forward(0).backward(0)
if __name__ == '__main__':
main()
| [
"numpy.outer",
"numpy.vdot",
"numpy.random.random",
"numpy.array",
"numpy.exp",
"numpy.dot"
] | [((285, 306), 'numpy.vdot', 'np.vdot', (['point', 'point'], {}), '(point, point)\n', (292, 306), True, 'import numpy as np\n'), ((819, 863), 'numpy.array', 'np.array', (['[[1, 2, 3], [-1, 3, 4], [2, 5, 2]]'], {}), '([[1, 2, 3], [-1, 3, 4], [2, 5, 2]])\n', (827, 863), True, 'import numpy as np\n'), ((1034, 1059), 'numpy.array', 'np.array', (['[[1], [0], [1]]'], {}), '([[1], [0], [1]])\n', (1042, 1059), True, 'import numpy as np\n'), ((169, 183), 'numpy.exp', 'np.exp', (['(-sigma)'], {}), '(-sigma)\n', (175, 183), True, 'import numpy as np\n'), ((2445, 2476), 'numpy.outer', 'np.outer', (['delta', 'self.x_[index]'], {}), '(delta, self.x_[index])\n', (2453, 2476), True, 'import numpy as np\n'), ((1841, 1892), 'numpy.dot', 'np.dot', (['self.weight_[index - 1]', 'self.x_[index - 1]'], {}), '(self.weight_[index - 1], self.x_[index - 1])\n', (1847, 1892), True, 'import numpy as np\n'), ((2303, 2358), 'numpy.dot', 'np.dot', (['self.weight_[index + 1]', 'self.delta_[index + 1]'], {}), '(self.weight_[index + 1], self.delta_[index + 1])\n', (2309, 2358), True, 'import numpy as np\n'), ((1398, 1456), 'numpy.random.random', 'np.random.random', (['(structure[index + 1], structure[index])'], {}), '((structure[index + 1], structure[index]))\n', (1414, 1456), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import sys
import argparse
import torchvision.models as models
from torchvision import transforms as trn
from PIL import Image
import numpy as np
from big_sleep.biggan import BigGAN
from torchvision.utils import save_image
parser = argparse.ArgumentParser()
parser.add_argument('--imageSize', type=int, default=512, help='the height / width of the input image to network')
parser.add_argument('--trunc', type=float, default=0.7, help='truncation, between 0.4 and 1')
parser.add_argument('--lat1', required=True, help='path to startpoint latents')
parser.add_argument('--lat2', required=True, help='path to endpoint latents')
parser.add_argument('--steps', type=int, default=200, help='number of intermediate steps')
parser.add_argument('--startFileIndex', type=int, default=0, help='index to begin file naming')
parser.add_argument('--fileName', type=str, default="file", help='file name')
opt = parser.parse_args()
assert(opt.imageSize in [256,512])
imgSize = opt.imageSize
startFileIndex = opt.startFileIndex
fileName = opt.fileName
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# ladataan latenssitiedot
lat1 = torch.load(opt.lat1)
lat2 = torch.load(opt.lat2)
best1 = lat1.best
best2 = lat2.best
noise1 = lat1.normu.to(device)
noise2 = lat2.normu.to(device)
class1 = lat1.cls.to(device)
class2 = lat2.cls.to(device)
# ladataan biggan
# load biggan
model = BigGAN.from_pretrained(f'biggan-deep-{imgSize}')
model.eval()
truncation = opt.trunc
model.to(device)
#n_delta = (noise2 - noise1) / opt.steps
#c_delta = (class2 - class1) / opt.steps
#noise_vector = noise1
#class_vector = class1
alphas = np.linspace(0., 1., opt.steps)
with torch.no_grad():
for i in range(0, opt.steps):
# Generate an image
alpha = alphas[i]
nv = (1-alpha)* noise1 + alpha * noise2
cv = (1-alpha)* class1 + alpha * class2
output = model(nv, torch.sigmoid(cv), truncation)
# save it
output = output.to('cpu')
output = (output + 1)/2
save_image(output, fileName + "."+str(startFileIndex)+".png")
startFileIndex+= 1
#noise_vector += n_delta
#class_vector += c_delta
| [
"argparse.ArgumentParser",
"torch.load",
"torch.sigmoid",
"torch.cuda.is_available",
"big_sleep.biggan.BigGAN.from_pretrained",
"numpy.linspace",
"torch.no_grad"
] | [((295, 320), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (318, 320), False, 'import argparse\n'), ((1191, 1211), 'torch.load', 'torch.load', (['opt.lat1'], {}), '(opt.lat1)\n', (1201, 1211), False, 'import torch\n'), ((1219, 1239), 'torch.load', 'torch.load', (['opt.lat2'], {}), '(opt.lat2)\n', (1229, 1239), False, 'import torch\n'), ((1443, 1491), 'big_sleep.biggan.BigGAN.from_pretrained', 'BigGAN.from_pretrained', (['f"""biggan-deep-{imgSize}"""'], {}), "(f'biggan-deep-{imgSize}')\n", (1465, 1491), False, 'from big_sleep.biggan import BigGAN\n'), ((1686, 1718), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'opt.steps'], {}), '(0.0, 1.0, opt.steps)\n', (1697, 1718), True, 'import numpy as np\n'), ((1119, 1144), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1142, 1144), False, 'import torch\n'), ((1723, 1738), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1736, 1738), False, 'import torch\n'), ((1930, 1947), 'torch.sigmoid', 'torch.sigmoid', (['cv'], {}), '(cv)\n', (1943, 1947), False, 'import torch\n')] |
######
## Inspired by the SVHN example on torchvision
## for AFEW dataset with folders of frame images
#####
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import torch.nn.functional as F
from torch.utils.data.sampler import SubsetRandomSampler, Sampler
import os
from list import list_dir, list_files
from PIL import Image
import numpy as np
import pickle
import multiprocessing as mp
# Thanks to Hassony2 for the pytorch transforms for videos, consistant across frames
import videotransforms.video_transforms as vidtrans
import videotransforms.volume_transforms as voltrans
import videotransforms.tensor_transforms as tentrans
import random
class AFEW(data.Dataset):
"""
Args:
root (string): Root directory of dataset where directory
``AFEW`` exists.
subset (string): train or test
"""
## This is a bit tricky because the train and val come from training folder
## and the test comes from val folder
def __init__(self, config, workers_delight_mp, subset):
self.workers_delight_mp = workers_delight_mp
self. tmp_dir = config.tmp_dir
self.weight_init_value = config.weight_init_value
self.exploration_epochs = config.exploration_epochs
self.inv_temperature = config.inv_temperature
#data
self.subset = subset # training set or test set
self.valid_size = config.valid_size
# alphabetical order anyway
self.label_map= {'Angry': 0,'Disgust':1, 'Fear':2, 'Happy':3,
'Neutral':4, 'Sad':5, 'Surprise':6}
self.afew_dir = config.afew_dir
################################################################################
if self.subset == "train_val":
self.subset_dir = os.path.join(self.afew_dir,"train")
elif self.subset == "test":
self.subset_dir = os.path.join(self.afew_dir,"val")
else:
#if self.subset not in ["train_val","test"] :
raise ValueError('Wrong subset entered! Please use subset="train_val" '
'subset="test"')
if len(list_dir(self.subset_dir)) != 7:
print("Wrong number of emotion directories ....")
exit()
################################################################################
# temporal uniformization of samples : clip sampling
self.num_frames = config.num_frames # number of frames in each sample of the batch
print("number of frames in training clips: ", self.num_frames)
# spatial uniformization of samples : resize and crop
self.resize_size = config.resize_size
self.crop_size = config.crop_size
# split validation and training from train set, store samples folder names (sid)
if self.subset == "train_val":
self.fold_splitting()
# Actual building of the dataset entries, store filenames of frames
self.build_dataset()
# Prepare the weighting, initializing
if self.subset == 'train_val' :
# clip exploration check boxes and init distribs to uniform
self.init_sampling_distribution_and_roadmap()
self.setup_video_transforms()
print("dataset created")
def fold_splitting (self):
# splits the train folder in training and validation samples
# returns a list of the sids (sample identifier) in valid_split_sids
# first look into the folders to split the datasets train and valid
# we split to maintain the emotion distribution in both
# not exact because of discarded samples down below
sids_in_emotion = {} # dict of samples (by sample identifiers) in an emotion directory
valid_split_sids = [] # list of samples in the valid split
# ['Angry','Disgust','Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
for emotion_dir in list_dir(self.subset_dir,prefix=True):
emotion_samples = []
for video_id in list_dir(emotion_dir,prefix=False):
emotion_samples.append(video_id)
#print(video_id)
emotion_name = os.path.basename(os.path.normpath(emotion_dir))
sids_in_emotion[emotion_name] = emotion_samples
samples = sids_in_emotion[emotion_name]
random.shuffle(samples) # this is the random splitting
valid_split_sids.extend( samples[ : int(self.valid_size*len(samples)) ] )
#train_idx.extend( samples[ int(self.valid_size*len(samples)) : ] ) # useless
#print(str(len(valid_idx)) + " samples for valid...")
#print(str(len(train_idx)) + " samples for train...")
# sids_in_emotion contains the entire videos list for each emotion
# valid_split_sids contains the sub group that is assigned to validation subset
self.valid_split_sids = valid_split_sids
def build_dataset (self):
# create dataset entries for all samples
# load the filenames of each frames and store some info abour length etc
if self.subset == 'train_val':
self.train_indices = []
self.valid_indices = []
self.dataset = [] # the main collection
for emotion_dir in list_dir(self.subset_dir,prefix=True):
sample_emotion = os.path.basename(emotion_dir)
print("Loading emotion :" , sample_emotion)
for video_dir in list_dir(emotion_dir,prefix=True):
#nb_available_frames = len(list_files(video_dir,".png"))
file_list = list_files(video_dir,".png",prefix=True) # stores frames path
video_name = os.path.basename(os.path.normpath(video_dir))
# flag to know if sample is in validation, train or test set
if self.subset == "train_val":
if video_name in self.valid_split_sids :
group = "validation"
else:
group = "training"
else:
group = "test"
if self.subset == "train_val" and len(file_list) < 1:
# discard empty sample... face detection failed or something
# in train set only for no cheat
continue
if group == "training" :
self.train_indices.append(len(self.dataset))
elif group == "validation" :
self.valid_indices.append(len(self.dataset))
sample_label = self.label_map[sample_emotion]
sample = {'files': file_list, 'label': sample_label, 'length': len(file_list), 'group': group, 'sid': video_name}
self.dataset.append(sample)
print("afew loaded : " + str(len(self.dataset))+ " samples")
print(self.subset)
def init_sampling_distribution_and_roadmap(self):
# We loop through the entire dataset to initialize temporal distributions of the correct size for sampling clips
# and the roadmap is a list of positions for the deterministic exploration that initializes the distributions
# keep checks of what clips we saw to have smooth exploration in warmup...
self.exploration_roadmap_mp = {} # a dic of shared arrays, sid : array roadmap
# The running estimates of clip scores (weights for softmax sampling distributions)
self.temporal_weights_dict_mp = {} # dict of shared distribs , sid : array distrib
for sample in self.dataset :
if sample['group'] == 'training':
# for each epoch of deterministic exploration,
# a flag tells us if we checked the n^th clip
self.exploration_roadmap_mp[sample['sid']] = mp.Array('i', self.exploration_epochs) # is zeroed
# just an array for each training video,
# with a score value for each possible clip (video_length-clip_length+1)
# init value is useless since we use deterministic exploration to score "all" clips
self.temporal_weights_dict_mp[sample['sid']] = mp.Array('f', max( 1, sample['length']-(self.num_frames-1)) ) # is zeroed
print("Initialized temporal sampling distributions for "+ str(len(self.temporal_weights_dict_mp))+" videos.")
def setup_video_transforms (self):
print("Preparing video transforms...")
self.mean = torch.FloatTensor( [0.2572, 0.2000, 0.1644])
self.std = torch.FloatTensor([1,1,1])
self.num_channels = 3
self.pad_path = None # just put zeros
# Initialize transforms
video_transform_list = [
vidtrans.RandomHorizontalFlip(),
vidtrans.RandomRotation(20),
vidtrans.Resize(self.resize_size),
vidtrans.RandomCrop(self.crop_size),
vidtrans.ColorJitter(0.2, 0.2, 0.2, 0.1),
voltrans.ClipToTensor(channel_nb=self.num_channels),
#transforms.Normalize(mean, std)
# we normalise in getitem because videos are not supported ?
]
# Initialize transforms
ResAndTens = [
vidtrans.Resize(self.resize_size),
vidtrans.CenterCrop(self.crop_size),
voltrans.ClipToTensor(channel_nb=self.num_channels),
]
# Transforms for train (data augmentation) and for eval
self.no_aug_transform = vidtrans.Compose(ResAndTens)
self.yes_aug_transform = vidtrans.Compose(video_transform_list)
def __getitem__(self, index):
# this getitem has lots of if cases because of different clip-sampling phases
# eval vs training, and warmup / exploration / stochastic_softmax
sample = self.dataset[index]
sid = sample['sid']
file_list = sample['files']
sample_frames = [] # frames that will be sampled for training clip
if sample['group'] == "training" :
transform = self.yes_aug_transform
else: # val and test
transform = self.no_aug_transform
self.test_max_num_frames = 999
idx_explo = -1 # an index to know which clip has been explorated during deterministic exploration, init -1 if not used ...
if sample['group'] == "test" or sample['group']== "validation":
# test and validation
# we do not uniformize samples : full length inference, batch = 1
# validation on short clips is nice for time savings through
# just add workers and batch size below, and remove valid from this if statement
# we dont pad in eval mode
if len(file_list) <= self.test_max_num_frames :
clip_start = 0
sample_frames = file_list
else :
# crop in time for memory limit if VERY long video
clip_start = random.randint(0, len(file_list)-self.test_max_num_frames)
clip = [file_list[f] for f in range(clip_start, clip_start + self.test_max_num_frames)]
sample_frames.extend(clip)
else :
# train with clip-sampling, batches ...
# contiguous clips
# We read the sampling phase (warmup, exploration, stochastic_softmax)
# from a file that synchronizes the dataset.sampler.getitem workers with the training loop
fn = os.path.join(self.tmp_dir, "workers_delight.wut")
with open( fn , "r") as file:
s = file.read()
if s == "stochastic_softmax" :
self.sampling_phase = "softmax"
elif s == "exploration":
self.sampling_phase = "explore"
elif s == "warmup":
self.sampling_phase = "warm-up"
else :
print("warmup worker delight file corrupt ? unknown sampling phase")
exit()
if (self.workers_delight_mp[:] != self.sampling_phase) and sample['group']!='test':
print("mp sampling phase dont match")
print("in wut is : ", self.sampling_phase)
print("in mp is : ", self.workers_delight_mp[:])
exit()
if len(file_list) <= self.num_frames:
# pad training clip if shorter
#zero-pad if shorter, pad_path is just None, will be black image
clip_start = 0
idx_explo = 0
sample_frames.extend( file_list )
sample_frames.extend( [self.pad_path] * (self.num_frames-len(file_list)) )
else :
# training-clip sampling from long videos
#weighted temporal sampling
if sample['group'] == "training":
# Always true, but simplifies modifications
# now we have several strategies depending on phases :
# warmup, exploration, stochastic_softmax
if not ( len(self.temporal_weights_dict_mp[sid][:]) == len(file_list)-(self.num_frames-1) or len(self.temporal_weights_dict_mp[sid][:]) == 1) :
# number of weights in temporal distribution doesnt match with video length
# this means init_sampling_distribution_and_roadmap failed ?
# or something broken
print(" ERROR : afew.py : the samples do no correspond in getitem and the temporal_weights mp infos ...")
print(self.temporal_weights_dict_mp[sid].size)
print(len(file_list))
exit(0)
weights = self.temporal_weights_dict_mp[sid]
if self.sampling_phase == "warm-up":
# random uniform sampling
clip_start = random.randint(0, len(file_list)-self.num_frames)
clip = [file_list[f] for f in range(clip_start, clip_start + self.num_frames)]
sample_frames.extend(clip)
elif self.sampling_phase == "explore" :
# the exploration_roadmap specifies which clip have not been explored
# random choice from the avilable ones, working with indexes
clips_to_explore = [idx for (idx, explored) in enumerate(self.exploration_roadmap_mp[sid]) if not explored]
idx = random.choice(clips_to_explore)
idx_explo = idx # to indicate this idx has now been explored
self.exploration_roadmap_mp[sid][idx_explo] = 1 # from 0 to 1, 1 flag means explored ... I wanted bools but idk how to multiprocess that, not that I know with arrays
# translate roadmap idx to frame position
clip_start = int( (len(file_list)-(self.num_frames-1)-1) * idx /(self.exploration_epochs-1))
# de 0 en t0 à nbclips-1 en t= nbepochs-1
clip = [file_list[f] for f in range(clip_start, clip_start + self.num_frames)]
sample_frames.extend(clip)
elif self.sampling_phase == "softmax" :
# THIS IS TEMPORAL STOCHASTIC SOFTMAX SAMPLING
weights_tensor = torch.FloatTensor(weights) * self.inv_temperature
distrib = torch.distributions.Categorical( logits = weights_tensor) # logits : it is turned into probas with softmax
clip_start = distrib.sample().item() # sample the clip position with the softmax distribution based on the temporal weights (scores)
clip = [file_list[f] for f in range(clip_start, clip_start + self.num_frames)]
sample_frames.extend(clip)
else :
print("error : unknown sampling phase for temporal stochastic softmax, in afew.py")
exit()
else :
# Never used, but can be used for short clip validation (faster)
# Uniform sampling
# normal case, inference, uniform sampling
clip_start = random.randint(0, len(file_list)-self.num_frames)
clip = [file_list[f] for f in range(clip_start, clip_start + self.num_frames)]
sample_frames.extend(clip)
# end of the switch to decide the frames we take, now we load for real
# load images from frame file path
# create a padding frame, in case it is needed
pad_array = np.zeros((self.crop_size[0],self.crop_size[1],3),np.uint8)
sample_images = []
for frame_file in sample_frames:
#print(frame_file)
if frame_file is None:
image = Image.fromarray(pad_array)
else:
image = Image.open( frame_file ) #.convert('RGB') #rgb for channeles first
sample_images.append(image)
sample_tensor = transform(sample_images) # data-augment or not depending on eval or train
#sample_tensor_no_aug = self.no_aug_transform(sample_images) # also give no_aug tensor if you want to score on clean samples
# normalisation
sample_data = [(sample_tensor[c]-self.mean[c])/self.std[c] for c in range(self.num_channels)]
#sample_data = [sample_tensor[c] for c in range(self.num_channels)]
# we normalise on cpu...
sample_data = torch.stack(sample_data)
# alternative padding method, maybe better to normalize before 0-padding
#sample_data = F.pad(sample_data, (0,0, 0,0, 0,self.num_frames - sample_data.size(1)), mode = 'constant', value=0 )
#sample_data_no_aug = [(sample_tensor_no_aug[c]-self.mean[c])/self.std[c] for c in range(self.num_channels)]
#sample_data_no_aug = torch.stack(sample_data_no_aug)
# This is to be sent to the training loop
loaded_item = {'data': sample_data, 'label': sample['label'],
'sid': sample['sid'], # ID / name of the video
'temporal_position': clip_start, # positon of the clip, for stochastic sampling : update the distributions with obtained scores
'idx_explo' : idx_explo, # update exploration roadmap
}
return loaded_item
def __len__(self):
return len(self.dataset)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.afew_dir)
return fmt_str
############################################################################
###### Data_loader modified from github.com/kevinzakka/recurrent-visual-attention
############################################################################
def get_train_valid_loader(config, workers_delight_mp):
error_msg = "[!] config.valid_size should be in the range [0, 1]."
assert ((config.valid_size >= 0) and (config.valid_size <= 1)), error_msg
# load dataset
dataset = AFEW(config, workers_delight_mp, subset="train_val")
random.shuffle(dataset.valid_indices)
random.shuffle(dataset.train_indices)
train_sampler = SubsetRandomSampler(dataset.train_indices)
valid_sampler = SubsetNotRandomSampler(dataset.valid_indices)
print("Train batch size : ", config.batch_size)
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=config.batch_size, sampler=train_sampler,
num_workers=4, pin_memory=True, drop_last = True,
)
valid_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, sampler=valid_sampler,
num_workers=1, pin_memory=True,
)
return (train_loader, valid_loader), dataset.temporal_weights_dict_mp
def get_test_loader(config, workers_delight_mp):
# load dataset
dataset = AFEW( config, workers_delight_mp = None, subset="test")
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False,
num_workers=1, pin_memory=True,
)
return data_loader
# for validation, more readable ~
class SubsetNotRandomSampler(Sampler):
"""Samples elements not randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices)
| [
"list.list_files",
"torch.distributions.Categorical",
"random.shuffle",
"videotransforms.video_transforms.Resize",
"videotransforms.volume_transforms.ClipToTensor",
"list.list_dir",
"os.path.join",
"videotransforms.video_transforms.Compose",
"videotransforms.video_transforms.ColorJitter",
"torch.u... | [((19285, 19322), 'random.shuffle', 'random.shuffle', (['dataset.valid_indices'], {}), '(dataset.valid_indices)\n', (19299, 19322), False, 'import random\n'), ((19327, 19364), 'random.shuffle', 'random.shuffle', (['dataset.train_indices'], {}), '(dataset.train_indices)\n', (19341, 19364), False, 'import random\n'), ((19386, 19428), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['dataset.train_indices'], {}), '(dataset.train_indices)\n', (19405, 19428), False, 'from torch.utils.data.sampler import SubsetRandomSampler, Sampler\n'), ((19569, 19711), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'config.batch_size', 'sampler': 'train_sampler', 'num_workers': '(4)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size=config.batch_size, sampler=\n train_sampler, num_workers=4, pin_memory=True, drop_last=True)\n', (19596, 19711), False, 'import torch\n'), ((19752, 19861), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'sampler': 'valid_sampler', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset, batch_size=1, sampler=valid_sampler,\n num_workers=1, pin_memory=True)\n', (19779, 19861), False, 'import torch\n'), ((20119, 20220), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset, batch_size=1, shuffle=False,\n num_workers=1, pin_memory=True)\n', (20146, 20220), False, 'import torch\n'), ((3946, 3984), 'list.list_dir', 'list_dir', (['self.subset_dir'], {'prefix': '(True)'}), '(self.subset_dir, prefix=True)\n', (3954, 3984), False, 'from list import list_dir, list_files\n'), ((5290, 5328), 'list.list_dir', 'list_dir', (['self.subset_dir'], {'prefix': '(True)'}), '(self.subset_dir, prefix=True)\n', (5298, 5328), False, 'from list import list_dir, list_files\n'), ((8486, 8526), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.2572, 0.2, 0.1644]'], {}), '([0.2572, 0.2, 0.1644])\n', (8503, 8526), False, 'import torch\n'), ((8550, 8578), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (8567, 8578), False, 'import torch\n'), ((9484, 9512), 'videotransforms.video_transforms.Compose', 'vidtrans.Compose', (['ResAndTens'], {}), '(ResAndTens)\n', (9500, 9512), True, 'import videotransforms.video_transforms as vidtrans\n'), ((9546, 9584), 'videotransforms.video_transforms.Compose', 'vidtrans.Compose', (['video_transform_list'], {}), '(video_transform_list)\n', (9562, 9584), True, 'import videotransforms.video_transforms as vidtrans\n'), ((16686, 16747), 'numpy.zeros', 'np.zeros', (['(self.crop_size[0], self.crop_size[1], 3)', 'np.uint8'], {}), '((self.crop_size[0], self.crop_size[1], 3), np.uint8)\n', (16694, 16747), True, 'import numpy as np\n'), ((17571, 17595), 'torch.stack', 'torch.stack', (['sample_data'], {}), '(sample_data)\n', (17582, 17595), False, 'import torch\n'), ((1810, 1846), 'os.path.join', 'os.path.join', (['self.afew_dir', '"""train"""'], {}), "(self.afew_dir, 'train')\n", (1822, 1846), False, 'import os\n'), ((4047, 4082), 'list.list_dir', 'list_dir', (['emotion_dir'], {'prefix': '(False)'}), '(emotion_dir, prefix=False)\n', (4055, 4082), False, 'from list import list_dir, list_files\n'), ((4365, 4388), 'random.shuffle', 'random.shuffle', (['samples'], {}), '(samples)\n', (4379, 4388), False, 'import random\n'), ((5359, 5388), 'os.path.basename', 'os.path.basename', (['emotion_dir'], {}), '(emotion_dir)\n', (5375, 5388), False, 'import os\n'), ((5475, 5509), 'list.list_dir', 'list_dir', (['emotion_dir'], {'prefix': '(True)'}), '(emotion_dir, prefix=True)\n', (5483, 5509), False, 'from list import list_dir, list_files\n'), ((8734, 8765), 'videotransforms.video_transforms.RandomHorizontalFlip', 'vidtrans.RandomHorizontalFlip', ([], {}), '()\n', (8763, 8765), True, 'import videotransforms.video_transforms as vidtrans\n'), ((8779, 8806), 'videotransforms.video_transforms.RandomRotation', 'vidtrans.RandomRotation', (['(20)'], {}), '(20)\n', (8802, 8806), True, 'import videotransforms.video_transforms as vidtrans\n'), ((8820, 8853), 'videotransforms.video_transforms.Resize', 'vidtrans.Resize', (['self.resize_size'], {}), '(self.resize_size)\n', (8835, 8853), True, 'import videotransforms.video_transforms as vidtrans\n'), ((8867, 8902), 'videotransforms.video_transforms.RandomCrop', 'vidtrans.RandomCrop', (['self.crop_size'], {}), '(self.crop_size)\n', (8886, 8902), True, 'import videotransforms.video_transforms as vidtrans\n'), ((8916, 8956), 'videotransforms.video_transforms.ColorJitter', 'vidtrans.ColorJitter', (['(0.2)', '(0.2)', '(0.2)', '(0.1)'], {}), '(0.2, 0.2, 0.2, 0.1)\n', (8936, 8956), True, 'import videotransforms.video_transforms as vidtrans\n'), ((8970, 9021), 'videotransforms.volume_transforms.ClipToTensor', 'voltrans.ClipToTensor', ([], {'channel_nb': 'self.num_channels'}), '(channel_nb=self.num_channels)\n', (8991, 9021), True, 'import videotransforms.volume_transforms as voltrans\n'), ((9223, 9256), 'videotransforms.video_transforms.Resize', 'vidtrans.Resize', (['self.resize_size'], {}), '(self.resize_size)\n', (9238, 9256), True, 'import videotransforms.video_transforms as vidtrans\n'), ((9270, 9305), 'videotransforms.video_transforms.CenterCrop', 'vidtrans.CenterCrop', (['self.crop_size'], {}), '(self.crop_size)\n', (9289, 9305), True, 'import videotransforms.video_transforms as vidtrans\n'), ((9319, 9370), 'videotransforms.volume_transforms.ClipToTensor', 'voltrans.ClipToTensor', ([], {'channel_nb': 'self.num_channels'}), '(channel_nb=self.num_channels)\n', (9340, 9370), True, 'import videotransforms.volume_transforms as voltrans\n'), ((11432, 11481), 'os.path.join', 'os.path.join', (['self.tmp_dir', '"""workers_delight.wut"""'], {}), "(self.tmp_dir, 'workers_delight.wut')\n", (11444, 11481), False, 'import os\n'), ((1912, 1946), 'os.path.join', 'os.path.join', (['self.afew_dir', '"""val"""'], {}), "(self.afew_dir, 'val')\n", (1924, 1946), False, 'import os\n'), ((2165, 2190), 'list.list_dir', 'list_dir', (['self.subset_dir'], {}), '(self.subset_dir)\n', (2173, 2190), False, 'from list import list_dir, list_files\n'), ((4209, 4238), 'os.path.normpath', 'os.path.normpath', (['emotion_dir'], {}), '(emotion_dir)\n', (4225, 4238), False, 'import os\n'), ((5612, 5654), 'list.list_files', 'list_files', (['video_dir', '""".png"""'], {'prefix': '(True)'}), "(video_dir, '.png', prefix=True)\n", (5622, 5654), False, 'from list import list_dir, list_files\n'), ((7821, 7859), 'multiprocessing.Array', 'mp.Array', (['"""i"""', 'self.exploration_epochs'], {}), "('i', self.exploration_epochs)\n", (7829, 7859), True, 'import multiprocessing as mp\n'), ((16904, 16930), 'PIL.Image.fromarray', 'Image.fromarray', (['pad_array'], {}), '(pad_array)\n', (16919, 16930), False, 'from PIL import Image\n'), ((16973, 16995), 'PIL.Image.open', 'Image.open', (['frame_file'], {}), '(frame_file)\n', (16983, 16995), False, 'from PIL import Image\n'), ((5720, 5747), 'os.path.normpath', 'os.path.normpath', (['video_dir'], {}), '(video_dir)\n', (5736, 5747), False, 'import os\n'), ((14498, 14529), 'random.choice', 'random.choice', (['clips_to_explore'], {}), '(clips_to_explore)\n', (14511, 14529), False, 'import random\n'), ((15470, 15524), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'logits': 'weights_tensor'}), '(logits=weights_tensor)\n', (15501, 15524), False, 'import torch\n'), ((15386, 15412), 'torch.FloatTensor', 'torch.FloatTensor', (['weights'], {}), '(weights)\n', (15403, 15412), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.