code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import time
import numpy as np
import paddle
from env import DinoGame
from model import Model
resize_shape = (1, 30, 90) # 训练缩放的大小
save_model_path = "models/model.pdparams" # 保存模型路径
FPS = 25 # 控制游戏截图帧数
def main():
# 初始化游戏
env = DinoGame()
# 图像输入形状和动作维度
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
# 创建模型
model = Model(obs_dim, action_dim)
model.load_dict(paddle.load(save_model_path))
model.eval()
# 开始游戏
obs = env.reset()
episode_reward = 0
done = False
last_time = time.time()
# 游戏未结束执行一直执行游戏
while not done:
obs = np.expand_dims(obs, axis=0)
obs = paddle.to_tensor(obs, dtype='float32')
action = model(obs)
action = paddle.argmax(action).numpy()[0]
obs, reward, done, info = env.step(action)
episode_reward += reward
# 防止截图太快
# fps_now = 1 / (time.time() - last_time)
# if fps_now > FPS:
# time.sleep(1 / FPS - 1 / fps_now)
# last_time = time.time()
print("最终得分为:{:.2f}".format(episode_reward))
if __name__ == '__main__':
main()
| [
"paddle.load",
"paddle.argmax",
"model.Model",
"numpy.expand_dims",
"time.time",
"env.DinoGame",
"paddle.to_tensor"
] | [((244, 254), 'env.DinoGame', 'DinoGame', ([], {}), '()\n', (252, 254), False, 'from env import DinoGame\n'), ((378, 404), 'model.Model', 'Model', (['obs_dim', 'action_dim'], {}), '(obs_dim, action_dim)\n', (383, 404), False, 'from model import Model\n'), ((562, 573), 'time.time', 'time.time', ([], {}), '()\n', (571, 573), False, 'import time\n'), ((425, 453), 'paddle.load', 'paddle.load', (['save_model_path'], {}), '(save_model_path)\n', (436, 453), False, 'import paddle\n'), ((628, 655), 'numpy.expand_dims', 'np.expand_dims', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (642, 655), True, 'import numpy as np\n'), ((670, 708), 'paddle.to_tensor', 'paddle.to_tensor', (['obs'], {'dtype': '"""float32"""'}), "(obs, dtype='float32')\n", (686, 708), False, 'import paddle\n'), ((754, 775), 'paddle.argmax', 'paddle.argmax', (['action'], {}), '(action)\n', (767, 775), False, 'import paddle\n')] |
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
from mpi4py import MPI
import numpy as np
from nest_elephant_tvb.transformation.communication.mpi_io_external import MPICommunicationExtern
class ConsumerNestData(MPICommunicationExtern):
"""
Class for the receiving data from Nest and transfer them to the translation function process.
"""
def simulation_time(self):
"""
Receive data from Nest and add them in a shared buffer.
"""
self.logger.info("Consumer Nest : simulation time")
self.logger.info("ConsumerNestData -- I am rank: "+ str(self.port_comms[0].Get_rank()))
status_ = MPI.Status()
num_sending = self.port_comms[0].Get_remote_size() # The total number of the rank in Nest MPI_COMM_WORLD
check = np.empty(1, dtype='b') # variable to get the state of Nest
shape = np.empty(1, dtype='i') # variable to receive the shape of the data
count = 0 # count the number of run
while True:
self.logger.info("Consumer Nest : loop start : wait all")
self.port_comms[0].Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)
state_nest = status_.Get_tag()
for source in range(1, num_sending):
# improvement: We do not care which source sends first, give MPI the freedom to send in whichever order.
self.port_comms[0].Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
if state_nest != status_.Get_tag():
raise Exception('Abnormal state : the state of Nest is different between rank')
if status_.Get_tag() == 0:
# INTERNAL :ready to write in the buffer
self.logger.info("Consumer Nest : prepare buffer")
self.communication_internal.send_spikes_ready()
if self.communication_internal.send_spike_exit:
self.logger.info("Consumer Nest : break")
break
self.logger.info("Consumer Nest : start get data")
for source in range(num_sending):
# send 'ready' to the nest rank
self.port_comms[0].Send([np.array(True, dtype='b'), MPI.BOOL], dest=source, tag=0)
# receive package size info
self.port_comms[0].Recv([shape, 1, MPI.INT], source=source, tag=0, status=status_)
self.logger.info("Consumer Nest : shape : "+str(self.communication_internal.shape_buffer))
# Add data in the buffer
self.port_comms[0].Recv([self.communication_internal.databuffer[self.communication_internal.shape_buffer[0]:], MPI.DOUBLE],
source=source, tag=0, status=status_)
self.communication_internal.shape_buffer[0] += shape[0] # move head
self.logger.info("Consumer Nest : end receive data")
# INTERNAL : end to write in the buffer
self.communication_internal.send_spikes()
elif status_.Get_tag() == 1:
count += 1
self.logger.info("Consumer Nest : receive end " + str(count))
elif status_.Get_tag() == 2:
self.logger.info("Consumer Nest : end simulation")
# INTERNAL : close the communication
self.communication_internal.send_spikes_end()
break
else:
raise Exception("Abnormal tag : bad mpi tag" + str(status_.Get_tag()))
self.logger.info('Consumer Nest : End of receive function')
class ProducerDataNest(MPICommunicationExtern):
"""
Class for sending data to Nest. The data are from the translated function.
"""
def __init__(self, id_first_spike_detector, *arg, **karg):
"""
Consume dat/spikes trains from Nest
:param id_first_spike_detector: id of the first spike detector
:param arg: other parameters
:param karg: other parameters
"""
super().__init__(*arg, **karg)
self.id_first_spike_detector = id_first_spike_detector
self.logger.info('Produce Nest : end init')
def simulation_time(self):
"""
Send data to Nest from a shared buffer
"""
self.logger.info('Produce Nest : simulation')
self.logger.info("ProducerNestData -- I am rank: "+ str(self.port_comms[0].Get_rank()))
# initialisation variable before the loop
status_ = MPI.Status()
source_sending = np.arange(0, self.port_comms[0].Get_remote_size(), 1) # list of all the rank of Nest MPI_COMM_WORLD
check = np.empty(1,dtype='b') # variable to get the state of Nest
while True:
self.logger.info('Produce Nest : loop start : wait all')
self.port_comms[0].Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)
state_nest = status_.Get_tag()
for source in source_sending[1:]:
# improvement: We do not care which source sends first, give MPI the freedom to send in whichever order.
self.port_comms[0].Recv([check, 1, MPI.CXX_BOOL], source=source, tag=MPI.ANY_TAG, status=status_)
if state_nest != status_.Get_tag():
raise Exception('Abnormal state : the state of Nest is different between rank')
self.logger.info("Produce Nest : Get check : status : "+str(status_.Get_tag()))
if status_.Get_tag() == 0:
# INTERNAL : get the data to send
# (here is spike trains but Nest can receive other type of data.
# For the other type of data, the format to send it is different
self.logger.info("Produce Nest : start to send ")
spikes_times = self.communication_internal.get_spikes()
self.logger.info("Produce Nest : shape buffer "+str(self.communication_internal.shape_buffer[0]))
if self.communication_internal.shape_buffer[0] == -1:
break
self.logger.info("Produce Nest : spike time")
# Waiting for some processes of Nest to receive the spikes
for source in source_sending:
# improvement: We do not care which source sends first,
# give MPI the freedom to send in whichever order.
# receive list ids
size_list = np.empty(1, dtype='i')
self.port_comms[0].Recv([size_list, 1, MPI.INT], source=source, tag=0, status=status_)
if size_list[0] != 0:
list_id = np.empty(size_list, dtype='i')
self.port_comms[0].Recv([list_id, size_list, MPI.INT], source=status_.Get_source(),
tag=0, status=status_)
# Select the good spike train and send it
self.logger.info("Produce Nest : rank " + str(source) + " list_id " + str(list_id)
+ " spikes :" + str(spikes_times[0]))
data = []
shape = []
for i in list_id:
shape += [len(spikes_times[i-self.id_first_spike_detector])]
data += [spikes_times[i-self.id_first_spike_detector]]
send_shape = np.array(np.concatenate(([np.sum(shape)], shape)), dtype='i')
# firstly send the size of the spikes train
self.port_comms[0].Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0])
# secondly send the spikes train
data = np.concatenate(data).astype('d')
self.port_comms[0].Send([data, MPI.DOUBLE], dest=source, tag=list_id[0])
self.logger.info("Produce Nest : end sending")
self.communication_internal.get_spikes_release()
elif status_.Get_tag() == 1:
# ending the run of Nest
self.logger.info("Produce Nest : end run")
elif status_.Get_tag() == 2:
self.logger.info("Produce Nest : end simulation")
# INTERNAL : close the buffer
self.communication_internal.get_spikes_end()
self.logger.info("Produce Nest : send false")
break
else:
raise Exception("Abnormal tag : bad mpi tag : "+str(status_.Get_tag()))
| [
"numpy.sum",
"mpi4py.MPI.Status",
"numpy.empty",
"numpy.array",
"numpy.concatenate"
] | [((830, 842), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (840, 842), False, 'from mpi4py import MPI\n'), ((973, 995), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""b"""'}), "(1, dtype='b')\n", (981, 995), True, 'import numpy as np\n'), ((1049, 1071), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (1057, 1071), True, 'import numpy as np\n'), ((4761, 4773), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (4771, 4773), False, 'from mpi4py import MPI\n'), ((4916, 4938), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""b"""'}), "(1, dtype='b')\n", (4924, 4938), True, 'import numpy as np\n'), ((6741, 6763), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (6749, 6763), True, 'import numpy as np\n'), ((6947, 6977), 'numpy.empty', 'np.empty', (['size_list'], {'dtype': '"""i"""'}), "(size_list, dtype='i')\n", (6955, 6977), True, 'import numpy as np\n'), ((2431, 2456), 'numpy.array', 'np.array', (['(True)'], {'dtype': '"""b"""'}), "(True, dtype='b')\n", (2439, 2456), True, 'import numpy as np\n'), ((8057, 8077), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (8071, 8077), True, 'import numpy as np\n'), ((7751, 7764), 'numpy.sum', 'np.sum', (['shape'], {}), '(shape)\n', (7757, 7764), True, 'import numpy as np\n')] |
#import joblib
import pickle
import numpy as np
tasks = [# Pushing
{'task': 'push' , 'obj_init_pos': np.array([0, 0.6 , 0.02]) , 'goal_pos': np.array([0, 0.81, 0.02]) , 'door_pos': np.array([0, 1.0, 0.3])} ,
{'task': 'push' , 'obj_init_pos': np.array([0, 0.6 , 0.02]) , 'goal_pos': np.array([-0.15, 0.77 , 0.02]) , 'door_pos': np.array([0, 1.0, 0.3]) } ,
{'task': 'push' , 'obj_init_pos': np.array([0, 0.6 , 0.02]) , 'goal_pos': np.array([0.15, 0.77 , 0.02]) , 'door_pos': np.array([0, 1.0, 0.3]) } ,
#Door
{'task': 'door' , 'door_pos': np.array([0, 1.0, 0.3]) , 'padded_target_angle': np.array([0.29 , 0, 0]) , 'obj_init_pos': np.array([0, 0.6 , 0.02]) } ,
{'task': 'door' , 'door_pos': np.array([0, 1.0, 0.3]) , 'padded_target_angle': np.array([0.6, 0 , 0] ) , 'obj_init_pos': np.array([0, 0.6 , 0.02]) } ,
{'task': 'door' , 'door_pos': np.array([0, 1.0, 0.3]) , 'padded_target_angle': np.array([0.87 , 0 ,0]) , 'obj_init_pos': np.array([0, 0.6 , 0.02]) }
]
#joblib.dump(tasks , 'push_door_v1.pkl')
fobj = open('push_door_v1.pkl' , 'wb')
pickle.dump(tasks, fobj)
| [
"pickle.dump",
"numpy.array"
] | [((1081, 1105), 'pickle.dump', 'pickle.dump', (['tasks', 'fobj'], {}), '(tasks, fobj)\n', (1092, 1105), False, 'import pickle\n'), ((105, 129), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (113, 129), True, 'import numpy as np\n'), ((145, 170), 'numpy.array', 'np.array', (['[0, 0.81, 0.02]'], {}), '([0, 0.81, 0.02])\n', (153, 170), True, 'import numpy as np\n'), ((186, 209), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (194, 209), True, 'import numpy as np\n'), ((252, 276), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (260, 276), True, 'import numpy as np\n'), ((292, 321), 'numpy.array', 'np.array', (['[-0.15, 0.77, 0.02]'], {}), '([-0.15, 0.77, 0.02])\n', (300, 321), True, 'import numpy as np\n'), ((338, 361), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (346, 361), True, 'import numpy as np\n'), ((405, 429), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (413, 429), True, 'import numpy as np\n'), ((445, 473), 'numpy.array', 'np.array', (['[0.15, 0.77, 0.02]'], {}), '([0.15, 0.77, 0.02])\n', (453, 473), True, 'import numpy as np\n'), ((490, 513), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (498, 513), True, 'import numpy as np\n'), ((563, 586), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (571, 586), True, 'import numpy as np\n'), ((613, 635), 'numpy.array', 'np.array', (['[0.29, 0, 0]'], {}), '([0.29, 0, 0])\n', (621, 635), True, 'import numpy as np\n'), ((655, 679), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (663, 679), True, 'import numpy as np\n'), ((719, 742), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (727, 742), True, 'import numpy as np\n'), ((769, 790), 'numpy.array', 'np.array', (['[0.6, 0, 0]'], {}), '([0.6, 0, 0])\n', (777, 790), True, 'import numpy as np\n'), ((811, 835), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (819, 835), True, 'import numpy as np\n'), ((875, 898), 'numpy.array', 'np.array', (['[0, 1.0, 0.3]'], {}), '([0, 1.0, 0.3])\n', (883, 898), True, 'import numpy as np\n'), ((925, 947), 'numpy.array', 'np.array', (['[0.87, 0, 0]'], {}), '([0.87, 0, 0])\n', (933, 947), True, 'import numpy as np\n'), ((967, 991), 'numpy.array', 'np.array', (['[0, 0.6, 0.02]'], {}), '([0, 0.6, 0.02])\n', (975, 991), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
t = torch.tensor([[0, 0], [1, 2], [2, 3]])
idx = torch.tensor([0, 1, 2, 0, 0, 1])
t[idx]
import gym
import gym_car_intersect
import numpy as np
env = gym.make('CarIntersect-v1')
env.reset()
for _ in range(500):
env.render()
a = np.random.choice(5)
_, _, done, _ = env.step(a)
if done:
env.reset()
| [
"gym.make",
"torch.tensor",
"numpy.random.choice"
] | [((40, 78), 'torch.tensor', 'torch.tensor', (['[[0, 0], [1, 2], [2, 3]]'], {}), '([[0, 0], [1, 2], [2, 3]])\n', (52, 78), False, 'import torch\n'), ((85, 117), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 0, 0, 1]'], {}), '([0, 1, 2, 0, 0, 1])\n', (97, 117), False, 'import torch\n'), ((188, 215), 'gym.make', 'gym.make', (['"""CarIntersect-v1"""'], {}), "('CarIntersect-v1')\n", (196, 215), False, 'import gym\n'), ((274, 293), 'numpy.random.choice', 'np.random.choice', (['(5)'], {}), '(5)\n', (290, 293), True, 'import numpy as np\n')] |
# Original code from https://github.com/araffin/robotics-rl-srl
# Authors: <NAME>, <NAME>, <NAME>
from constants import *
import cv2
import numpy as np
import torch
import os
def create_figure_and_sliders(name, state_dim):
"""
Creating a window for the latent space visualization,
and another one for the sliders to control it.
:param name: name of model (str)
:param state_dim: (int)
:return:
"""
# opencv gui setup
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, 500, 500)
cv2.namedWindow('slider for ' + name)
# add a slider for each component of the latent space
for i in range(state_dim):
# the sliders MUST be between 0 and max, so we placed max at 100, and start at 50
# So that when we substract 50 and divide 10 we get [-5,5] for each component
cv2.createTrackbar(str(i), 'slider for ' + name, 50, 100, (lambda a: None))
def main():
all_subdirs = ['vae/' + d for d in os.listdir('vae/') if os.path.isdir('vae/' + d)]
latest_subdir = max(all_subdirs, key=os.path.getmtime)
vae = torch.load(latest_subdir + '/saved_models/epoch_10_env_0', map_location={'cuda:0': 'cpu'})
all_subdirs = ['forward/' + d for d in os.listdir('forward/') if os.path.isdir('forward/' + d)]
latest_subdir = max(all_subdirs, key=os.path.getmtime)
ff = torch.load(latest_subdir + '/saved_models/epoch_300_env_0', map_location={'cuda:0': 'cpu'})
fig_name = "Decoder for the VAE"
# TODO: load data to infer bounds
bound_min = -10
bound_max = 10
state = np.array([0.,0.])
create_figure_and_sliders(fig_name, 2)
should_exit = False
while not should_exit:
# stop if escape is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
if k == ord('z'):
print('up')
action = [2]
next_state = ff.forward(torch.Tensor(state).reshape(-1,2),torch.Tensor(action).reshape(-1,1))
state = next_state
if k == ord('q'):
print('left')
action = [1]
next_state = ff.forward(torch.Tensor(state).reshape(-1,2),torch.Tensor(action).reshape(-1,1))
state = next_state
if k == ord('d'):
print('right')
action = [3]
next_state = ff.forward(torch.Tensor(state).reshape(-1,2),torch.Tensor(action).reshape(-1,1))
state = next_state
if k == ord('s'):
print('down')
action = [0]
next_state = ff.forward(torch.Tensor(state).reshape(-1,2),torch.Tensor(action).reshape(-1,1))
state = next_state
reconstructed_image = vae.forward(torch.Tensor(state), decode=True).detach().numpy().reshape(3,64,64).transpose((1,2,0))
# stop if user closed a window
if (cv2.getWindowProperty(fig_name, 0) < 0) or (cv2.getWindowProperty('slider for ' + fig_name, 0) < 0):
should_exit = True
break
cv2.imshow(fig_name, reconstructed_image)
# gracefully close
cv2.destroyAllWindows()
def translate_action(action):
if action == 2:
res = 'up'
if action == 0:
res = 'down'
if action == 1:
res = 'left'
if action == 3:
res = 'right'
return res
if __name__ == '__main__':
main() | [
"os.path.isdir",
"cv2.waitKey",
"torch.load",
"cv2.imshow",
"torch.Tensor",
"numpy.array",
"cv2.resizeWindow",
"cv2.destroyAllWindows",
"cv2.getWindowProperty",
"os.listdir",
"cv2.namedWindow"
] | [((457, 497), 'cv2.namedWindow', 'cv2.namedWindow', (['name', 'cv2.WINDOW_NORMAL'], {}), '(name, cv2.WINDOW_NORMAL)\n', (472, 497), False, 'import cv2\n'), ((502, 534), 'cv2.resizeWindow', 'cv2.resizeWindow', (['name', '(500)', '(500)'], {}), '(name, 500, 500)\n', (518, 534), False, 'import cv2\n'), ((539, 576), 'cv2.namedWindow', 'cv2.namedWindow', (["('slider for ' + name)"], {}), "('slider for ' + name)\n", (554, 576), False, 'import cv2\n'), ((1101, 1196), 'torch.load', 'torch.load', (["(latest_subdir + '/saved_models/epoch_10_env_0')"], {'map_location': "{'cuda:0': 'cpu'}"}), "(latest_subdir + '/saved_models/epoch_10_env_0', map_location={\n 'cuda:0': 'cpu'})\n", (1111, 1196), False, 'import torch\n'), ((1363, 1459), 'torch.load', 'torch.load', (["(latest_subdir + '/saved_models/epoch_300_env_0')"], {'map_location': "{'cuda:0': 'cpu'}"}), "(latest_subdir + '/saved_models/epoch_300_env_0', map_location={\n 'cuda:0': 'cpu'})\n", (1373, 1459), False, 'import torch\n'), ((1583, 1603), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1591, 1603), True, 'import numpy as np\n'), ((3076, 3099), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3097, 3099), False, 'import cv2\n'), ((3006, 3047), 'cv2.imshow', 'cv2.imshow', (['fig_name', 'reconstructed_image'], {}), '(fig_name, reconstructed_image)\n', (3016, 3047), False, 'import cv2\n'), ((981, 999), 'os.listdir', 'os.listdir', (['"""vae/"""'], {}), "('vae/')\n", (991, 999), False, 'import os\n'), ((1003, 1028), 'os.path.isdir', 'os.path.isdir', (["('vae/' + d)"], {}), "('vae/' + d)\n", (1016, 1028), False, 'import os\n'), ((1236, 1258), 'os.listdir', 'os.listdir', (['"""forward/"""'], {}), "('forward/')\n", (1246, 1258), False, 'import os\n'), ((1262, 1291), 'os.path.isdir', 'os.path.isdir', (["('forward/' + d)"], {}), "('forward/' + d)\n", (1275, 1291), False, 'import os\n'), ((1745, 1759), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1756, 1759), False, 'import cv2\n'), ((2848, 2882), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['fig_name', '(0)'], {}), '(fig_name, 0)\n', (2869, 2882), False, 'import cv2\n'), ((2892, 2942), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (["('slider for ' + fig_name)", '(0)'], {}), "('slider for ' + fig_name, 0)\n", (2913, 2942), False, 'import cv2\n'), ((1917, 1936), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (1929, 1936), False, 'import torch\n'), ((1951, 1971), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (1963, 1971), False, 'import torch\n'), ((2132, 2151), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (2144, 2151), False, 'import torch\n'), ((2166, 2186), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (2178, 2186), False, 'import torch\n'), ((2348, 2367), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (2360, 2367), False, 'import torch\n'), ((2382, 2402), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (2394, 2402), False, 'import torch\n'), ((2563, 2582), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (2575, 2582), False, 'import torch\n'), ((2597, 2617), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (2609, 2617), False, 'import torch\n'), ((2709, 2728), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (2721, 2728), False, 'import torch\n')] |
import geometry # local module
import data_generator # local module
import numpy as np
from keras import backend as K
from keras.preprocessing.image import Iterator
K.set_image_data_format('channels_first')
class DirectoryIterator(Iterator):
"""Iterator yielding data from a Numpy array.
Builds on keras.preprocessing.image.NumpyArrayIterator
but does not directly subclass
Inputs
------
load_fn: takes in an index (correspond to the y index)
and returns an image. This is designed to allow
you to read in images from disk, but it isn't required
y: Numpy array of targets data
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
num_channels: the number of channels of the image `load_fn`
will return
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
output_image_shape: None, or list/tuple
if list/tuple, should have ndim=2, of (height x width)
if None, defaults to input image shape
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self, load_fn, y, image_data_generator,
num_channels,
batch_size=32, shuffle=False, seed=None,
output_image_shape=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png'):
if data_format is None:
data_format = K.image_data_format()
self.load_fn = load_fn
self.epoch_size = len(y)
self.num_channels = num_channels
channels_axis = 3 if data_format == 'channels_last' else 1
self.channels_axis = channels_axis
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.output_image_shape = output_image_shape
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(DirectoryIterator, self).__init__(self.epoch_size, batch_size,
shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_shape = [None]*4
batch_shape[0] = len(index_array)
batch_shape[self.channels_axis] = self.num_channels
image_axes = (2, 3) if self.channels_axis == 1 else (3, 4)
if self.output_image_shape is None:
for image_axis in image_axes:
batch_shape[image_axis] = self.x.shape[image_axis]
else:
for i, image_axis in enumerate(image_axes):
batch_shape[image_axis] = self.output_image_shape[i]
batch_x = np.zeros(batch_shape,
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.load_fn(j)
x = self.image_data_generator.random_transform(
x.astype(
K.floatx()
)
)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format
)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class ImageDataGeneratorLoadable(data_generator.ImageDataGenerator):
"""Like data_generatory.ImageDataGenerator, except this loads in images
using an arbitrary load function (passed to `flow`) rather than assuming
images are preloaded into memory.
"""
def flow(self, load_fn, num_channels, y=None, batch_size=32, shuffle=True,
seed=None, save_to_dir=None, save_prefix='', save_format='png'):
return DirectoryIterator(
load_fn, y, self,
num_channels,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
output_image_shape=self.output_image_shape,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def fit(self, *args, **kwargs):
raise NotImplementedError("Look at data_generator.ImageDataGenerator to adapt this")
| [
"keras.backend.image_data_format",
"keras.backend.floatx",
"numpy.asarray",
"keras.backend.set_image_data_format",
"numpy.random.randint"
] | [((169, 210), 'keras.backend.set_image_data_format', 'K.set_image_data_format', (['"""channels_first"""'], {}), "('channels_first')\n", (192, 210), True, 'from keras import backend as K\n'), ((2021, 2042), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2040, 2042), True, 'from keras import backend as K\n'), ((2307, 2320), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2317, 2320), True, 'import numpy as np\n'), ((3397, 3407), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3405, 3407), True, 'from keras import backend as K\n'), ((3592, 3602), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3600, 3602), True, 'from keras import backend as K\n'), ((4047, 4073), 'numpy.random.randint', 'np.random.randint', (['(10000.0)'], {}), '(10000.0)\n', (4064, 4073), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import params
metaparams = {
'figure size' : params.fgz,
'bins' : params.bins,
'font size' : params.fsz,
'marker size' : params.ms,
'alpha' : params.alpha,
'marker' : params.mkr,
'color array' : params.colarr,
'line color' : params.lcol
}
class scama(object):
def __init__(self, data=None, fgz=None, bins=None, fsz=None, ms=None, alpha=None, mkr=None, colarr=None, lcol=None):
# --- initiation
self.fgz = fgz if fgz else metaparams['figure size']
self.bins = bins if bins else metaparams['bins']
self.fsz = fsz if fsz else metaparams['font size']
self.ms = ms if ms else metaparams['marker size']
self.alpha = alpha if alpha else metaparams['alpha']
self.mkr = mkr if mkr else metaparams['marker']
self.colarr = colarr if colarr else metaparams['color array']
self.lcol = lcol if lcol else metaparams['line color']
# --- set up data
data = data if data else np.load('test/data.npy')
self.mat = data[:,:-1] # data matrix
self.lbl = data[:,-1] # labels array
self.ns, self.nf = self.mat.shape
self.labels = np.unique(self.lbl) # labels keys
self.feat = np.arange(self.nf) # features id
self.draw_figure()
return
def draw_figure(self):
v1, v2 = np.meshgrid(self.feat, self.feat)
nb = self.nf * v2 + v1 + 1
hidx = np.diag(nb)
trix = np.tril(nb,-1)
sidx = trix[trix>0]
def find_vars(axid, nb):
u1, u2 = np.where(nb==axid)
return u1, u2
def setup_histo(axid, nb):
global fig, plt
v1, v2 = find_vars(axid=axid, nb=nb)
ax = plt.subplot(self.nf, self.nf, axid)
ax.spines["right"].set_color("none")
ax.spines["left"].set_color("none")
ax.spines["top"].set_color("none")
ax.tick_params(labelbottom=False, bottom=True, direction='in')
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_label_position("top")
for k in range(self.labels.size):
this_c = self.mat[:,v1[0]][self.lbl==self.labels[k]]
ax.hist(this_c, bins=self.bins, color=self.colarr[k], alpha=self.alpha, linewidth=1., histtype='step')
ax.set_xlabel('C'+str(v1[0]), fontsize=self.fsz[0])
if v1[0]==self.nf-1:
ax.tick_params(labelbottom=True, bottom=True, direction='in')
plt.xticks(rotation=-60)
else:
ax.tick_params(labelbottom=False, bottom=True, direction='in')
ax.set_xticks([])
return
def setup_scatter(axid, nb):
global fig, plt
v1, v2 = find_vars(axid=axid, nb=nb)
ax = plt.subplot(self.nf, self.nf, axid)
ax.tick_params(labelbottom=False, labeltop=False, labelleft=False, labelright=False, top=True, bottom=True, left=True, right=True)
if v1 == self.nf-1:
ax.tick_params(labelbottom=True, labeltop=False, labelleft=False, labelright=False, top=True, bottom=True, left=True, right=True)
if v2 == 0:
ax.tick_params(labelbottom=False, labeltop=False, labelleft=True, labelright=False, top=True, bottom=True, left=True, right=True)
if ((v1 == self.nf-1) and (v2 == 0)):
ax.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False, top=True, bottom=True, left=True, right=True)
for k in range(self.labels.size):
this_x = self.mat[:,v2[0]][self.lbl==self.labels[k]]
this_y = self.mat[:,v1[0]][self.lbl==self.labels[k]]
ax.scatter(this_x, this_y, color=self.colarr[k], alpha=self.alpha, s=self.ms, marker=self.mkr)
plt.xticks(rotation=-60)
return
global fig, plt
fig = plt.figure(figsize=(self.fgz, self.fgz))
plt.subplots_adjust(wspace=0., hspace=0.)
for ak in hidx:
setup_histo(axid=ak, nb=nb)
for ak in sidx:
setup_scatter(axid=ak, nb=nb)
plt.show()
return
| [
"matplotlib.pyplot.subplot",
"numpy.load",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.tril",
"matplotlib.ticker.NullLocator",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"numpy.diag",
"matplotlib.pyplot.xticks",
"numpy.unique"
] | [((1353, 1372), 'numpy.unique', 'np.unique', (['self.lbl'], {}), '(self.lbl)\n', (1362, 1372), True, 'import numpy as np\n'), ((1402, 1420), 'numpy.arange', 'np.arange', (['self.nf'], {}), '(self.nf)\n', (1411, 1420), True, 'import numpy as np\n'), ((1507, 1540), 'numpy.meshgrid', 'np.meshgrid', (['self.feat', 'self.feat'], {}), '(self.feat, self.feat)\n', (1518, 1540), True, 'import numpy as np\n'), ((1579, 1590), 'numpy.diag', 'np.diag', (['nb'], {}), '(nb)\n', (1586, 1590), True, 'import numpy as np\n'), ((1600, 1615), 'numpy.tril', 'np.tril', (['nb', '(-1)'], {}), '(nb, -1)\n', (1607, 1615), True, 'import numpy as np\n'), ((3683, 3723), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(self.fgz, self.fgz)'}), '(figsize=(self.fgz, self.fgz))\n', (3693, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3769), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(wspace=0.0, hspace=0.0)\n', (3745, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3878, 3880), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1211), 'numpy.load', 'np.load', (['"""test/data.npy"""'], {}), "('test/data.npy')\n", (1194, 1211), True, 'import numpy as np\n'), ((1678, 1698), 'numpy.where', 'np.where', (['(nb == axid)'], {}), '(nb == axid)\n', (1686, 1698), True, 'import numpy as np\n'), ((1814, 1849), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.nf', 'self.nf', 'axid'], {}), '(self.nf, self.nf, axid)\n', (1825, 1849), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2750), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.nf', 'self.nf', 'axid'], {}), '(self.nf, self.nf, axid)\n', (2726, 2750), True, 'import matplotlib.pyplot as plt\n'), ((3620, 3644), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(-60)'}), '(rotation=-60)\n', (3630, 3644), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2084), 'matplotlib.ticker.NullLocator', 'ticker.NullLocator', ([], {}), '()\n', (2082, 2084), True, 'import matplotlib.ticker as ticker\n'), ((2477, 2501), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(-60)'}), '(rotation=-60)\n', (2487, 2501), True, 'import matplotlib.pyplot as plt\n')] |
""" Setup the preCICE interface and execute the coupled solver.
Usage: python3 aeroelastic_two_way.py config.xml participant-name mesh-name
"""
from __future__ import division
# ensure packages are available at linux distribution level (not using virtual environment)
import argparse
import subprocess
import numpy as np
import precice
from local_context import PRECICE_FOLDER
from precice_post import write_solver_output_to_file
SOLVER_PYTHON_PATH = {
"SolverOne": "venv/Scripts/python.exe",
"SolverTwo": "venv/Scripts/python.exe",
}
def read_from_file(file):
"""Read arrays from text files."""
try:
array = np.loadtxt(file)
return array
except Exception as error:
print("Could not read solver input file - " + error)
def write_to_file(file, data):
"""Write array to text file."""
try:
assert isinstance(
data, type(np.array([0.0]))
), "data should be of type np.ndarray"
np.savetxt(file, data, fmt="%s")
except Exception as error:
print("Could not write solver output file - " + error)
def execute_python(solver, module, solver_output):
"""Execute python from local windows venv folder."""
output = subprocess.run(
SOLVER_PYTHON_PATH[solver] + " " + module,
cwd="../.",
shell=True,
check=True,
capture_output=True,
)
solver_output.append(output.stdout.decode("utf-8"))
return solver_output
def main(args):
"""Set up and execute a coupled solver process."""
configuration_file_name = args.configurationFileName
participant_name = args.participantName
mesh_name = args.meshName
# -----------------------------------
# check that the run folder exist - else create it
if not PRECICE_FOLDER.is_dir():
PRECICE_FOLDER.mkdir(parents=True)
# -----------------------------------
# setup the preCICE coupling interface
if participant_name == "SolverOne":
write_data_name = "dataOne"
read_data_name = "dataTwo"
if participant_name == "SolverTwo":
read_data_name = "dataOne"
write_data_name = "dataTwo"
solver_process_index = 0
solver_process_size = 1
interface = precice.Interface(
participant_name,
configuration_file_name,
solver_process_index,
solver_process_size,
)
mesh_id = interface.get_mesh_id(mesh_name)
dimensions = interface.get_dimensions()
# -----------------------------------
# initialise the solver variables
solver_output = []
if participant_name == "SolverOne":
if not (PRECICE_FOLDER / "solver_1_nodes.txt").is_file():
solver_output = execute_python(
solver=participant_name,
module="strip_theory_aerodynamics.py",
solver_output=solver_output,
)
vertices = read_from_file(
file=PRECICE_FOLDER / "solver_1_nodes.txt"
) # node x,y,z
read_data = np.zeros((len(vertices), dimensions)) # displacements
write_data = np.zeros((len(vertices), dimensions)) # forces
if participant_name == "SolverTwo":
if not (PRECICE_FOLDER / "solver_2_nodes.txt").is_file():
solver_output = execute_python(
solver=participant_name,
module="parametric_box.py",
solver_output=solver_output,
)
vertices = read_from_file(
file=PRECICE_FOLDER / "solver_2_nodes.txt"
) # node x,y,z
read_data = np.zeros((len(vertices), dimensions)) # forces
write_data = np.zeros((len(vertices), dimensions)) # displacements
vertex_ids = interface.set_mesh_vertices(mesh_id, vertices)
read_data_id = interface.get_data_id(read_data_name, mesh_id)
write_data_id = interface.get_data_id(write_data_name, mesh_id)
dt = interface.initialize()
iter_counter = 0
# -----------------------------------
# iterate until convergence is reached
while interface.is_coupling_ongoing():
if interface.is_action_required(precice.action_write_iteration_checkpoint()):
print(f"{participant_name}: Writing iteration checkpoint")
write_solver_output_to_file(
solver=participant_name, output=solver_output, folder=PRECICE_FOLDER
)
interface.mark_action_fulfilled(precice.action_write_iteration_checkpoint())
if interface.is_read_data_available():
read_data = interface.read_block_vector_data(read_data_id, vertex_ids)
if participant_name == "SolverOne":
# write displacement input data to file
write_to_file(
file=PRECICE_FOLDER / "solver_1_displacement.txt", data=read_data
)
# execute the aerodynamic analysis with the updated displacements
solver_output = execute_python(
solver=participant_name,
module="strip_theory_aerodynamics.py",
solver_output=solver_output,
)
# update wrtie_data with the force array
write_data = read_from_file(file=PRECICE_FOLDER / "solver_1_forces.txt")
if participant_name == "SolverTwo":
# write force input data to file
write_to_file(file=PRECICE_FOLDER / "solver_2_forces.txt", data=read_data)
# execute the aerodynamic analysis with the updated displacements
solver_output = execute_python(
solver=participant_name,
module="parametric_box.py",
solver_output=solver_output,
)
# update wrtie_data with the discplacement array
write_data = read_from_file(
file=PRECICE_FOLDER / "solver_2_displacements.txt"
)
if interface.is_write_data_required(dt):
# if participant_name == "SolverOne":
interface.write_block_vector_data(write_data_id, vertex_ids, write_data)
print(f"{participant_name} advancing in time")
dt = interface.advance(dt)
iter_counter += 1
if interface.is_action_required(precice.action_read_iteration_checkpoint()):
print(f"{participant_name}: Reading iteration checkpoint")
interface.mark_action_fulfilled(precice.action_read_iteration_checkpoint())
# -----------------------------------
# save iteration history
write_solver_output_to_file(
solver=participant_name, output=solver_output, folder=PRECICE_FOLDER
)
interface.finalize()
print(f"{participant_name}: Closing python solver ...")
if __name__ == "__main__":
# -----------------------------------
# read the command line arguments and define configuration variables
parser = argparse.ArgumentParser()
parser.add_argument(
"configurationFileName", help="Name of the xml config file.", type=str
)
parser.add_argument("participantName", help="Name of the solver.", type=str)
parser.add_argument("meshName", help="Name of the mesh.", type=str)
try:
args = parser.parse_args()
except SystemExit:
print("")
print(
"Usage: python3 aeroelastic_two_way.py config.xml participant-name mesh-name"
)
quit()
# -----------------------------------
# Execute the coupled analysis
main(args=args)
| [
"subprocess.run",
"local_context.PRECICE_FOLDER.mkdir",
"argparse.ArgumentParser",
"numpy.savetxt",
"precice_post.write_solver_output_to_file",
"precice.action_write_iteration_checkpoint",
"numpy.array",
"numpy.loadtxt",
"precice.action_read_iteration_checkpoint",
"precice.Interface",
"local_con... | [((1220, 1339), 'subprocess.run', 'subprocess.run', (["(SOLVER_PYTHON_PATH[solver] + ' ' + module)"], {'cwd': '"""../."""', 'shell': '(True)', 'check': '(True)', 'capture_output': '(True)'}), "(SOLVER_PYTHON_PATH[solver] + ' ' + module, cwd='../.', shell\n =True, check=True, capture_output=True)\n", (1234, 1339), False, 'import subprocess\n'), ((2229, 2336), 'precice.Interface', 'precice.Interface', (['participant_name', 'configuration_file_name', 'solver_process_index', 'solver_process_size'], {}), '(participant_name, configuration_file_name,\n solver_process_index, solver_process_size)\n', (2246, 2336), False, 'import precice\n'), ((6466, 6567), 'precice_post.write_solver_output_to_file', 'write_solver_output_to_file', ([], {'solver': 'participant_name', 'output': 'solver_output', 'folder': 'PRECICE_FOLDER'}), '(solver=participant_name, output=solver_output,\n folder=PRECICE_FOLDER)\n', (6493, 6567), False, 'from precice_post import write_solver_output_to_file\n'), ((6822, 6847), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6845, 6847), False, 'import argparse\n'), ((640, 656), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (650, 656), True, 'import numpy as np\n'), ((970, 1002), 'numpy.savetxt', 'np.savetxt', (['file', 'data'], {'fmt': '"""%s"""'}), "(file, data, fmt='%s')\n", (980, 1002), True, 'import numpy as np\n'), ((1777, 1800), 'local_context.PRECICE_FOLDER.is_dir', 'PRECICE_FOLDER.is_dir', ([], {}), '()\n', (1798, 1800), False, 'from local_context import PRECICE_FOLDER\n'), ((1810, 1844), 'local_context.PRECICE_FOLDER.mkdir', 'PRECICE_FOLDER.mkdir', ([], {'parents': '(True)'}), '(parents=True)\n', (1830, 1844), False, 'from local_context import PRECICE_FOLDER\n'), ((4105, 4148), 'precice.action_write_iteration_checkpoint', 'precice.action_write_iteration_checkpoint', ([], {}), '()\n', (4146, 4148), False, 'import precice\n'), ((4234, 4335), 'precice_post.write_solver_output_to_file', 'write_solver_output_to_file', ([], {'solver': 'participant_name', 'output': 'solver_output', 'folder': 'PRECICE_FOLDER'}), '(solver=participant_name, output=solver_output,\n folder=PRECICE_FOLDER)\n', (4261, 4335), False, 'from precice_post import write_solver_output_to_file\n'), ((6186, 6228), 'precice.action_read_iteration_checkpoint', 'precice.action_read_iteration_checkpoint', ([], {}), '()\n', (6226, 6228), False, 'import precice\n'), ((898, 913), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (906, 913), True, 'import numpy as np\n'), ((4406, 4449), 'precice.action_write_iteration_checkpoint', 'precice.action_write_iteration_checkpoint', ([], {}), '()\n', (4447, 4449), False, 'import precice\n'), ((6346, 6388), 'precice.action_read_iteration_checkpoint', 'precice.action_read_iteration_checkpoint', ([], {}), '()\n', (6386, 6388), False, 'import precice\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import xarray as xr
from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, \
update_dataset, ENCODINGS
from ooi_data_explorations.qartod.qc_processing import parse_qc
# load configuration settings
ATTRS = dict({
'raw_backscatter': {
'long_name': 'Raw Optical Backscatter at 700 nm',
'units': 'counts',
'comment': 'Raw optical backscatter measurements at 700 nm.',
'data_product_identifier': 'FLUBSCT_L0'
},
'raw_chlorophyll': {
'long_name': 'Raw Chlorophyll Fluorescence',
'units': 'counts',
'comment': 'Raw chlorophyll fluorescence (470 nm excitation/ 695 nm emission) measurements.',
'data_product_identifier': 'CHLAFLO_L0'
},
'raw_cdom': {
'long_name': 'Raw CDOM Fluorescence',
'units': 'counts',
'comment': 'Raw CDOM fluorescence (370 nm excitation/ 460 nm emission) measurements.',
'data_product_identifier': 'CDOMFLO_L0'
},
'estimated_chlorophyll': {
'long_name': 'Estimated Chlorophyll Concentration',
'standard_name': 'mass_concentration_of_chlorophyll_in_sea_water',
'units': 'ug L-1',
'comment': ('Estimated chlorophyll concentration based upon a calibration curve derived from a fluorescent ' +
'proxy approximately equal to 25 ug/l of a Thalassiosira weissflogii phytoplankton culture. ' +
'This measurement is considered to be an estimate only of the true chlorophyll concentration.'),
'data_product_identifier': 'CHLAFLO_L1',
'ancillary_variables': 'raw_chlorophyll estimated_chlorophyll_qc_executed estimated_chlorophyll_qc_results'
},
'fluorometric_cdom': {
'long_name': 'Fluorometric CDOM Concentration',
'standard_name': ('concentration_of_colored_dissolved_organic_matter_in_sea_water_expressed_as_equivalent' +
'_mass_fraction_of_quinine_sulfate_dihydrate'),
'units': 'ppb',
'comment': ('More commonly referred to as Chromophoric Dissolved Organic Matter (CDOM). CDOM plays an ' +
'important role in the carbon cycling and biogeochemistry of coastal waters. It occurs ' +
'naturally in aquatic environments primarily as a result of tannins released from decaying ' +
'plant and animal matter, and can enter coastal areas in river run-off containing organic ' +
'materials leached from soils.'),
'data_product_identifier': 'CDOMFLO_L1',
'ancillary_variables': 'raw_cdom fluorometric_cdom_qc_executed fluorometric_cdom_qc_results'
},
'beta_700': {
'long_name': 'Volume Scattering Function at 700 nm',
'standard_name': 'volume_scattering_function_of_radiative_flux_in_sea_water',
'units': 'm-1 sr-1',
'comment': ('Radiative flux is the sum of shortwave and longwave radiative fluxes. Scattering of ' +
'radiation is its deflection from its incident path without loss of energy. The volume ' +
'scattering function is the intensity (flux per unit solid angle) of scattered radiation per ' +
'unit length of scattering medium, normalised by the incident radiation flux.'),
'data_product_identifier': 'FLUBSCT_L1',
'ancillary_variables': 'raw_backscatter beta_700_qc_executed beta_700_qc_results'
},
'bback': {
'long_name': 'Total Optical Backscatter at 700 nm',
'units': 'm-1',
'comment': ('Total (particulate + water) optical backscatter at 700 nm, derived from the Volume ' +
'Scattering Function and corrected for effects of temperature and salinity.'),
'data_product_identifier': 'FLUBSCT_L2',
'ancillary_variables': 'beta_700 temperature salinity bback_qc_executed bback_qc_results'
},
'practical_salinity': {
'long_name': 'Practical Salinity',
'standard_name': 'sea_water_practical_salinity',
'units': '1',
'comment': ('Normally this would be seawater salinity data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'PRACSAL_L2'
},
'seawater_temperature': {
'long_name': 'Seawater Temperature',
'standard_name': 'sea_water_temperature',
'units': 'degree_Celsius',
'comment': ('Normally this would be seawater temperature data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'TEMPWAT_L1'
}
})
def quality_checks(ds):
"""
Assessment of the raw data and the calculated parameters for quality
using a susbset of the QARTOD flags to indicate the quality. QARTOD
flags used are:
1 = Pass
3 = Suspect or of High Interest
4 = Fail
The final flag value represents the worst case assessment of the data
quality.
:param ds: xarray dataset with the raw signal data and the calculated
bio-optical parameters
:return beta_flag: QARTOD quality flags for the backscatter measurements
:return cdom_flag: QARTOD quality flags for the CDOM measurements
:return chl_flag: QARTOD quality flags for the chlorophyll measurements
"""
max_counts = 4115 # counts should be greater than 0 and less than 4120 +/- 5
beta_flag = ds['time'].astype('int32') * 0 + 1 # default flag values, no errors
cdom_flag = ds['time'].astype('int32') * 0 + 1 # default flag values, no errors
chl_flag = ds['time'].astype('int32') * 0 + 1 # default flag values, no errors
# test the min/max values of the raw measurements
m = (ds.raw_backscatter == 0) | (ds.raw_backscatter > max_counts)
beta_flag[m] = 4 # raw volume scattering coefficient values off scale
m = (ds.raw_cdom == 0) | (ds.raw_cdom > max_counts)
cdom_flag[m] = 4 # raw CDOM values off scale
m = (ds.raw_chlorophyll == 0) | (ds.raw_chlorophyll > max_counts)
chl_flag[m] = 4 # raw chlorophyll values off scale
# test the min/max values of the derived measurements (values from the vendor code)
m = (ds.bback < 0) | (ds.bback > 5)
beta_flag[m] = 4 # scattering measurement range
m = (ds.fluorometric_cdom < 0) | (ds.fluorometric_cdom > 375)
cdom_flag[m] = 4 # fluorometric CDOM measurement range
m = (ds.estimated_chlorophyll < 0) | (ds.estimated_chlorophyll > 50)
chl_flag[m] = 4 # estimated chlorophyll measurement range
return beta_flag, cdom_flag, chl_flag
def flort_datalogger(ds, burst=True):
"""
Takes flort data recorded by the data loggers used in the CGSN/EA moorings
and cleans up the data set to make it more user-friendly. Primary task is
renaming parameters and dropping some that are of limited use. Additionally,
re-organize some of the variables to permit better assessments of the data.
:param ds: initial flort data set downloaded from OOI via the M2M system
:param burst: resample the data to the defined time interval
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# pressure_depth == variable assigned if this was a FLORT on a CSPP, not with moorings
# seawater_scattering_coefficient == not used
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient'])
# check for data from a co-located CTD, if not present add with appropriate attributes
if 'temp' not in ds.variables:
ds['temp'] = ('time', ds['deployment'] * np.nan)
ds['practical_salinity'] = ('time', ds['deployment'] * np.nan)
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'temp': 'seawater_temperature',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
# parse the OOI QC variables and add QARTOD style QC summary flags to the data, converting the
# bitmap represented flags into an integer value representing pass == 1, suspect or of high
# interest == 3, and fail == 4.
ds = parse_qc(ds)
# create qc flags for the data and add them to the OOI qc flags
beta_flag, cdom_flag, chl_flag = quality_checks(ds)
ds['beta_700_qc_summary_flag'] = ('time', (np.array([ds.beta_700_qc_summary_flag,
beta_flag])).max(axis=0))
ds['fluorometric_cdom_qc_summary_flag'] = ('time', (np.array([ds.fluorometric_cdom_qc_summary_flag,
cdom_flag])).max(axis=0))
ds['estimated_chlorophyll_qc_summary_flag'] = ('time', (np.array([ds.estimated_chlorophyll_qc_summary_flag,
chl_flag])).max(axis=0))
if burst:
# re-sample the data collected in burst mode using a 15-minute median average
burst = ds.resample(time='900s', skipna=True).median(dim='time', keep_attrs=True)
# for each of the three FLORT measurements, calculate stats (min, max, and the standard deviation)
# for each of the bursts
cdom = ds['fluorometric_cdom'].resample(time='900s', skipna=True)
cdom = np.array([cdom.min('time').values, cdom.max('time').values, cdom.std('time').values])
chl = ds['estimated_chlorophyll'].resample(time='900s', skipna=True)
chl = np.array([chl.min('time').values, chl.max('time').values, chl.std('time').values])
beta = ds['beta_700'].resample(time='900s', skipna=True)
beta = np.array([beta.min('time').values, beta.max('time').values, beta.std('time').values])
# create a data set with the burst statistics for the variables
stats = xr.Dataset({
'fluorometric_cdom_burst_stats': (['time', 'stats'], cdom.T),
'estimated_chlorophyll_burst_stats': (['time', 'stats'], chl.T),
'beta_700_burst_stats': (['time', 'stats'], beta.T)
}, coords={'time': burst['time'], 'stats': np.arange(0, 3).astype('int32')})
# add the stats into the burst averaged data set, and then remove the missing rows
burst = burst.merge(stats)
burst = burst.where(~np.isnan(burst.deployment), drop=True)
# save the newly average data
ds = burst
return ds
def flort_instrument(ds):
"""
Takes flort data recorded by the Sea-Bird Electronics SBE16Plus used in the
CGSN/EA moorings and cleans up the data set to make it more user-friendly.
Primary task is renaming parameters and dropping some that are of limited
use. Additionally, re-organize some of the variables to permit better
assessments of the data.
:param ds: initial flort data set downloaded from OOI via the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# pressure_depth == variable assigned if this was a FLORT on a CSPP, not with moorings
# seawater_scattering_coefficient == not used
ds = ds.reset_coords()
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient'])
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'temp': 'seawater_temperature',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
# parse the OOI QC variables and add QARTOD style QC summary flags to the data, converting the
# bitmap represented flags into an integer value representing pass == 1, suspect or of high
# interest == 3, and fail == 4.
ds = parse_qc(ds)
# create qc flags for the data and add them to the OOI qc flags
beta_flag, cdom_flag, chl_flag = quality_checks(ds)
ds['beta_700_qc_summary_flag'] = ('time', (np.array([ds.beta_700_qc_summary_flag,
beta_flag])).max(axis=0))
ds['fluorometric_cdom_qc_summary_flag'] = ('time', (np.array([ds.fluorometric_cdom_qc_summary_flag,
cdom_flag])).max(axis=0))
ds['estimated_chlorophyll_qc_summary_flag'] = ('time', (np.array([ds.estimated_chlorophyll_qc_summary_flag,
chl_flag])).max(axis=0))
return ds
def flort_cspp(ds):
"""
Takes FLORT data recorded by the CSPP loggers used by the Endurance Array
and cleans up the data set to make it more user-friendly. Primary task is
renaming parameters and dropping some that are of limited use. Additionally,
re-organize some of the variables to permit better assessments of the data.
:param ds: initial FLORT data set downloaded from OOI via the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# seawater_scattering_coefficient == not used
ds = ds.reset_coords()
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient'])
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'pressure': 'seawater_pressure',
'pressure_qc_executed': 'seawater_pressure_qc_executed',
'pressure_qc_results': 'seawater_pressure_qc_results',
'temperature': 'seawater_temperature',
'salinity': 'practical_salinity',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
# parse the OOI QC variables and add QARTOD style QC summary flags to the data, converting the
# bitmap represented flags into an integer value representing pass == 1, suspect or of high
# interest == 3, and fail == 4.
ds = parse_qc(ds)
# create qc flags for the data and add them to the OOI qc flags
beta_flag, cdom_flag, chl_flag = quality_checks(ds)
ds['beta_700_qc_summary_flag'] = ('time', (np.array([ds.beta_700_qc_summary_flag,
beta_flag])).max(axis=0))
ds['fluorometric_cdom_qc_summary_flag'] = ('time', (np.array([ds.fluorometric_cdom_qc_summary_flag,
cdom_flag])).max(axis=0))
ds['estimated_chlorophyll_qc_summary_flag'] = ('time', (np.array([ds.estimated_chlorophyll_qc_summary_flag,
chl_flag])).max(axis=0))
return ds
def flort_wfp(ds):
"""
Takes FLORT data recorded by the Wire-Following Profilers (used by CGSN/EA
as part of the coastal and global arrays) and cleans up the data set to
make it more user-friendly. Primary task is renaming parameters and
dropping some that are of limited use. Additionally, re-organize some of
the variables to permit better assessments of the data.
:param ds: initial FLORT data set downloaded from OOI via the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# internal_timestamp == superseded by time, redundant so can remove
# suspect_timestamp = not used
# measurement_wavelength_* == metadata, move into variable attributes.
# seawater_scattering_coefficient == not used
# raw_internal_temp == not available, NaN filled
ds = ds.reset_coords()
ds = ds.drop(['internal_timestamp', 'suspect_timestamp', 'measurement_wavelength_beta',
'measurement_wavelength_cdom', 'measurement_wavelength_chl', 'seawater_scattering_coefficient',
'raw_internal_temp'])
# lots of renaming here to get a better defined data set with cleaner attributes
rename = {
'int_ctd_pressure': 'seawater_pressure',
'ctdpf_ckl_seawater_temperature': 'seawater_temperature',
'raw_signal_chl': 'raw_chlorophyll',
'fluorometric_chlorophyll_a': 'estimated_chlorophyll',
'fluorometric_chlorophyll_a_qc_executed': 'estimated_chlorophyll_qc_executed',
'fluorometric_chlorophyll_a_qc_results': 'estimated_chlorophyll_qc_results',
'raw_signal_cdom': 'raw_cdom',
'raw_signal_beta': 'raw_backscatter',
'total_volume_scattering_coefficient': 'beta_700',
'total_volume_scattering_coefficient_qc_executed': 'beta_700_qc_executed',
'total_volume_scattering_coefficient_qc_results': 'beta_700_qc_results',
'optical_backscatter': 'bback',
'optical_backscatter_qc_executed': 'bback_qc_executed',
'optical_backscatter_qc_results': 'bback_qc_results',
}
ds = ds.rename(rename)
# reset some attributes
for key, value in ATTRS.items():
for atk, atv in value.items():
if key in ds.variables:
ds[key].attrs[atk] = atv
# add the original variable name as an attribute, if renamed
for key, value in rename.items():
ds[value].attrs['ooinet_variable_name'] = key
# parse the OOI QC variables and add QARTOD style QC summary flags to the data, converting the
# bitmap represented flags into an integer value representing pass == 1, suspect or of high
# interest == 3, and fail == 4.
ds = parse_qc(ds)
# create qc flags for the data and add them to the OOI qc flags
beta_flag, cdom_flag, chl_flag = quality_checks(ds)
ds['beta_700_qc_summary_flag'] = ('time', (np.array([ds.beta_700_qc_summary_flag,
beta_flag])).max(axis=0))
ds['fluorometric_cdom_qc_summary_flag'] = ('time', (np.array([ds.fluorometric_cdom_qc_summary_flag,
cdom_flag])).max(axis=0))
ds['estimated_chlorophyll_qc_summary_flag'] = ('time', (np.array([ds.estimated_chlorophyll_qc_summary_flag,
chl_flag])).max(axis=0))
return ds
def main(argv=None):
args = inputs(argv)
site = args.site
node = args.node
sensor = args.sensor
method = args.method
stream = args.stream
deploy = args.deploy
start = args.start
stop = args.stop
burst = args.burst
# check if we are specifying a deployment or a specific date and time range
if not deploy or (start and stop):
return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')
# if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server
if deploy:
# download the data for the deployment
flort = load_gc_thredds(site, node, sensor, method, stream, ('.*deployment%04d.*FLORT.*\\.nc$' % deploy))
# check to see if we downloaded any data
if not flort:
exit_text = ('Data unavailable for %s-%s-%s, %s, %s, deployment %d.' % (site, node, sensor, method,
stream, deploy))
raise SystemExit(exit_text)
else:
# otherwise, request the data for download from OOINet via the M2M API using the specified dates
r = m2m_request(site, node, sensor, method, stream, start, stop)
if not r:
exit_text = ('Request failed for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,
stream, start, stop))
raise SystemExit(exit_text)
# Valid M2M request, start downloading the data
flort = m2m_collect(r, '.*FLORT.*\\.nc$')
# check to see if we downloaded any data
if not flort:
exit_text = ('Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,
stream, start, stop))
raise SystemExit(exit_text)
# clean-up and reorganize the data
if node == 'SP001':
# this FLORT is part of a CSPP
flort = flort_cspp(flort)
elif node == 'WFP01':
# this FLORT is part of a Wire-Following Profiler
flort = flort_wfp(flort)
else:
# this FLORT is on one of the moorings
if method in ['telemetered', 'recovered_host']:
flort = flort_datalogger(flort, burst)
else:
flort = flort_instrument(flort)
vocab = get_vocabulary(site, node, sensor)[0]
flort = update_dataset(flort, vocab['maxdepth'])
# save the data to disk
out_file = os.path.abspath(args.outfile)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
flort.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)
if __name__ == '__main__':
main()
| [
"ooi_data_explorations.common.update_dataset",
"os.path.abspath",
"ooi_data_explorations.common.m2m_collect",
"os.path.dirname",
"numpy.isnan",
"ooi_data_explorations.common.m2m_request",
"ooi_data_explorations.common.load_gc_thredds",
"numpy.array",
"ooi_data_explorations.qartod.qc_processing.parse... | [((9894, 9906), 'ooi_data_explorations.qartod.qc_processing.parse_qc', 'parse_qc', (['ds'], {}), '(ds)\n', (9902, 9906), False, 'from ooi_data_explorations.qartod.qc_processing import parse_qc\n'), ((14730, 14742), 'ooi_data_explorations.qartod.qc_processing.parse_qc', 'parse_qc', (['ds'], {}), '(ds)\n', (14738, 14742), False, 'from ooi_data_explorations.qartod.qc_processing import parse_qc\n'), ((18159, 18171), 'ooi_data_explorations.qartod.qc_processing.parse_qc', 'parse_qc', (['ds'], {}), '(ds)\n', (18167, 18171), False, 'from ooi_data_explorations.qartod.qc_processing import parse_qc\n'), ((21585, 21597), 'ooi_data_explorations.qartod.qc_processing.parse_qc', 'parse_qc', (['ds'], {}), '(ds)\n', (21593, 21597), False, 'from ooi_data_explorations.qartod.qc_processing import parse_qc\n'), ((22343, 22355), 'ooi_data_explorations.common.inputs', 'inputs', (['argv'], {}), '(argv)\n', (22349, 22355), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((24837, 24877), 'ooi_data_explorations.common.update_dataset', 'update_dataset', (['flort', "vocab['maxdepth']"], {}), "(flort, vocab['maxdepth'])\n", (24851, 24877), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((24922, 24951), 'os.path.abspath', 'os.path.abspath', (['args.outfile'], {}), '(args.outfile)\n', (24937, 24951), False, 'import os\n'), ((22978, 23078), 'ooi_data_explorations.common.load_gc_thredds', 'load_gc_thredds', (['site', 'node', 'sensor', 'method', 'stream', "('.*deployment%04d.*FLORT.*\\\\.nc$' % deploy)"], {}), "(site, node, sensor, method, stream, \n '.*deployment%04d.*FLORT.*\\\\.nc$' % deploy)\n", (22993, 23078), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((23528, 23588), 'ooi_data_explorations.common.m2m_request', 'm2m_request', (['site', 'node', 'sensor', 'method', 'stream', 'start', 'stop'], {}), '(site, node, sensor, method, stream, start, stop)\n', (23539, 23588), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((23934, 23967), 'ooi_data_explorations.common.m2m_collect', 'm2m_collect', (['r', '""".*FLORT.*\\\\.nc$"""'], {}), "(r, '.*FLORT.*\\\\.nc$')\n", (23945, 23967), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((24787, 24821), 'ooi_data_explorations.common.get_vocabulary', 'get_vocabulary', (['site', 'node', 'sensor'], {}), '(site, node, sensor)\n', (24801, 24821), False, 'from ooi_data_explorations.common import inputs, load_gc_thredds, m2m_collect, m2m_request, get_vocabulary, update_dataset, ENCODINGS\n'), ((24978, 25003), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (24993, 25003), False, 'import os\n'), ((25026, 25051), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (25041, 25051), False, 'import os\n'), ((10079, 10129), 'numpy.array', 'np.array', (['[ds.beta_700_qc_summary_flag, beta_flag]'], {}), '([ds.beta_700_qc_summary_flag, beta_flag])\n', (10087, 10129), True, 'import numpy as np\n'), ((10257, 10316), 'numpy.array', 'np.array', (['[ds.fluorometric_cdom_qc_summary_flag, cdom_flag]'], {}), '([ds.fluorometric_cdom_qc_summary_flag, cdom_flag])\n', (10265, 10316), True, 'import numpy as np\n'), ((10456, 10518), 'numpy.array', 'np.array', (['[ds.estimated_chlorophyll_qc_summary_flag, chl_flag]'], {}), '([ds.estimated_chlorophyll_qc_summary_flag, chl_flag])\n', (10464, 10518), True, 'import numpy as np\n'), ((12010, 12036), 'numpy.isnan', 'np.isnan', (['burst.deployment'], {}), '(burst.deployment)\n', (12018, 12036), True, 'import numpy as np\n'), ((14915, 14965), 'numpy.array', 'np.array', (['[ds.beta_700_qc_summary_flag, beta_flag]'], {}), '([ds.beta_700_qc_summary_flag, beta_flag])\n', (14923, 14965), True, 'import numpy as np\n'), ((15093, 15152), 'numpy.array', 'np.array', (['[ds.fluorometric_cdom_qc_summary_flag, cdom_flag]'], {}), '([ds.fluorometric_cdom_qc_summary_flag, cdom_flag])\n', (15101, 15152), True, 'import numpy as np\n'), ((15292, 15354), 'numpy.array', 'np.array', (['[ds.estimated_chlorophyll_qc_summary_flag, chl_flag]'], {}), '([ds.estimated_chlorophyll_qc_summary_flag, chl_flag])\n', (15300, 15354), True, 'import numpy as np\n'), ((18344, 18394), 'numpy.array', 'np.array', (['[ds.beta_700_qc_summary_flag, beta_flag]'], {}), '([ds.beta_700_qc_summary_flag, beta_flag])\n', (18352, 18394), True, 'import numpy as np\n'), ((18522, 18581), 'numpy.array', 'np.array', (['[ds.fluorometric_cdom_qc_summary_flag, cdom_flag]'], {}), '([ds.fluorometric_cdom_qc_summary_flag, cdom_flag])\n', (18530, 18581), True, 'import numpy as np\n'), ((18721, 18783), 'numpy.array', 'np.array', (['[ds.estimated_chlorophyll_qc_summary_flag, chl_flag]'], {}), '([ds.estimated_chlorophyll_qc_summary_flag, chl_flag])\n', (18729, 18783), True, 'import numpy as np\n'), ((21770, 21820), 'numpy.array', 'np.array', (['[ds.beta_700_qc_summary_flag, beta_flag]'], {}), '([ds.beta_700_qc_summary_flag, beta_flag])\n', (21778, 21820), True, 'import numpy as np\n'), ((21948, 22007), 'numpy.array', 'np.array', (['[ds.fluorometric_cdom_qc_summary_flag, cdom_flag]'], {}), '([ds.fluorometric_cdom_qc_summary_flag, cdom_flag])\n', (21956, 22007), True, 'import numpy as np\n'), ((22147, 22209), 'numpy.array', 'np.array', (['[ds.estimated_chlorophyll_qc_summary_flag, chl_flag]'], {}), '([ds.estimated_chlorophyll_qc_summary_flag, chl_flag])\n', (22155, 22209), True, 'import numpy as np\n'), ((11820, 11835), 'numpy.arange', 'np.arange', (['(0)', '(3)'], {}), '(0, 3)\n', (11829, 11835), True, 'import numpy as np\n')] |
import numpy as np
import os
import cv2
import pickle as pkl
import torch
from tqdm import tqdm
import pandas as pd
# Detection imports
from hpe3d.models import hmr
from hpe3d.utils.img_utils import FakeCamera
from hpe3d.utils.kp_utils import get_joints_from_bvh, bbox_from_kp2d
import hpe3d.utils.config as cfg
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
spin = hmr()
dataset_path = cfg.ROOT_MHAD
def read_calib(cam):
ext_name = os.path.join(dataset_path, 'Calibration', 'RwTw_%s.txt' % cam)
int_name = os.path.join(dataset_path, 'Calibration', 'camcfg_%s.yml' % cam)
with open(ext_name) as fh:
rw = fh.readline().split('=')[1].rstrip().split(' ')
tw = fh.readline().split('=')[1].rstrip().split(' ')
R = np.array(rw).astype(float).reshape(3, 3)
T = np.array(tw).astype(float)
fh = cv2.FileStorage(int_name, cv2.FILE_STORAGE_READ)
K = fh.getNode('Camera_1').getNode('K').mat()
return R, T, K
def main(data_file):
info = data_file.split('_')
_, sub, act, rep = info
# Kinect k01 is the frontal view kinect camera of the dataset
kin = 'k01'
rel_img_path = os.path.join('Kinect', kin, sub.upper(), act.upper(), rep.upper())
first_image = cv2.imread(os.path.join(dataset_path,
rel_img_path,
'kin_%s_%s_%s_%s_color_00000.ppm' % (kin, sub, act, rep)),
cv2.IMREAD_UNCHANGED)
ts_corr = os.path.join(dataset_path, 'Kinect', 'Correspondences',
'corr_moc_kin%s_%s_%s_%s.txt' % (kin[-2:], sub, act, rep))
corr = pd.read_csv(ts_corr, sep=' ', header=None)[2]
ds_length = len(corr)
# joints reading
j_file = os.path.join(dataset_path, 'Mocap', 'SkeletalData', data_file + '_pos.csv')
joints_df = pd.read_csv(j_file)
R, T, K = read_calib(kin)
T_C_W = np.eye(4, dtype=float)
T_C_W[:3, :3] = R
T_C_W[:3, 3] = T / 1000. # mm to meter
# save information in dictionary:
pred_betas = []
pred_poses = []
pred_rotmat = []
pred_camera = []
gt_poses = []
gt_betas = []
gt_bboxes = []
gt_kp2d = []
gt_kp3d = []
gt_T_CW = []
gt_K = K
image_size = list(first_image.shape[:-1])
image_names = []
depth_names = []
for i in tqdm(range(ds_length)):
image_name = 'kin_%s_%s_%s_%s_color_%05i.ppm' % (kin, sub, act, rep, i)
depth_name = 'kin_%s_%s_%s_%s_depth_%05i.pgm' % (kin, sub, act, rep, i)
image = cv2.imread(os.path.join(dataset_path, rel_img_path, image_name), cv2.IMREAD_UNCHANGED)
# Depth not needed during preprocessing
# depth = cv2.imread(os.path.join(dataset_path, rel_img_path, depth_name), cv2.IMREAD_UNCHANGED)
joints_W = get_joints_from_bvh(joints_df.iloc[corr.iloc[i]])
joints_C = np.einsum('ij,bj->bi', R, joints_W) + T
joints_C_hom = joints_C / joints_C[:, -1:]
kp_2d = np.einsum('ij,bj->bi', gt_K, joints_C_hom)
gt_bbox = [bbox_from_kp2d(kp_2d, image_size)]
cam = FakeCamera(gt_bbox, K_r=gt_K, img_size=image_size)
det, norm = cam.preprocess_image(image[np.newaxis])
rotmat, poses, betas, camera = spin(norm)
pred_betas.append(betas.squeeze().cpu().detach().numpy())
pred_poses.append(poses.squeeze().cpu().detach().numpy())
pred_rotmat.append(rotmat.squeeze().cpu().detach().numpy())
pred_camera.append(camera.squeeze().cpu().detach().numpy())
gt_bboxes.append(gt_bbox)
gt_kp2d.append(kp_2d)
gt_kp3d.append(joints_W)
gt_T_CW.append(T_C_W)
image_names.append(os.path.join(rel_img_path, image_name))
depth_names.append(os.path.join(rel_img_path, depth_name))
seq_pred = dict([
('pred_betas', pred_betas),
('pred_poses', pred_poses),
('pred_rotmats', pred_rotmat),
('pred_cameras', pred_camera),
('gt_poses', gt_poses),
('gt_betas', gt_betas),
('gt_bboxes', gt_bboxes),
('gt_kp2d', gt_kp2d),
('gt_kp3d', gt_kp3d),
('gt_T_CW', gt_T_CW),
('gt_K', gt_K),
('image_size', image_size),
('image_names', image_names),
('depth_names', depth_names)
])
pred_folder = os.path.join(cfg.ROOT_HPE3D, 'data', 'Predictions')
try:
os.mkdir(pred_folder)
except FileExistsError:
pass
ds_name = 'mhad'
fname = 'pred_%s_%s_%s_%s_%s.pkl' % (ds_name, kin, sub, act, rep)
with open(os.path.join(pred_folder, fname), 'wb') as handle:
pkl.dump(seq_pred, handle)
if __name__ == "__main__":
data_folder = os.path.join(dataset_path, 'Mocap', 'SkeletalData')
data_files = [f[:-4] for f in os.listdir(data_folder) if f.endswith('.bvh')]
for data_file in data_files:
print(data_file)
main(data_file)
| [
"hpe3d.utils.img_utils.FakeCamera",
"os.mkdir",
"pickle.dump",
"pandas.read_csv",
"numpy.einsum",
"hpe3d.utils.kp_utils.get_joints_from_bvh",
"cv2.FileStorage",
"hpe3d.models.hmr",
"torch.cuda.is_available",
"numpy.array",
"torch.device",
"numpy.eye",
"hpe3d.utils.kp_utils.bbox_from_kp2d",
... | [((407, 412), 'hpe3d.models.hmr', 'hmr', ([], {}), '()\n', (410, 412), False, 'from hpe3d.models import hmr\n'), ((348, 373), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (371, 373), False, 'import torch\n'), ((324, 344), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (336, 344), False, 'import torch\n'), ((379, 398), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (391, 398), False, 'import torch\n'), ((482, 544), 'os.path.join', 'os.path.join', (['dataset_path', '"""Calibration"""', "('RwTw_%s.txt' % cam)"], {}), "(dataset_path, 'Calibration', 'RwTw_%s.txt' % cam)\n", (494, 544), False, 'import os\n'), ((560, 624), 'os.path.join', 'os.path.join', (['dataset_path', '"""Calibration"""', "('camcfg_%s.yml' % cam)"], {}), "(dataset_path, 'Calibration', 'camcfg_%s.yml' % cam)\n", (572, 624), False, 'import os\n'), ((881, 929), 'cv2.FileStorage', 'cv2.FileStorage', (['int_name', 'cv2.FILE_STORAGE_READ'], {}), '(int_name, cv2.FILE_STORAGE_READ)\n', (896, 929), False, 'import cv2\n'), ((1532, 1651), 'os.path.join', 'os.path.join', (['dataset_path', '"""Kinect"""', '"""Correspondences"""', "('corr_moc_kin%s_%s_%s_%s.txt' % (kin[-2:], sub, act, rep))"], {}), "(dataset_path, 'Kinect', 'Correspondences', \n 'corr_moc_kin%s_%s_%s_%s.txt' % (kin[-2:], sub, act, rep))\n", (1544, 1651), False, 'import os\n'), ((1792, 1867), 'os.path.join', 'os.path.join', (['dataset_path', '"""Mocap"""', '"""SkeletalData"""', "(data_file + '_pos.csv')"], {}), "(dataset_path, 'Mocap', 'SkeletalData', data_file + '_pos.csv')\n", (1804, 1867), False, 'import os\n'), ((1884, 1903), 'pandas.read_csv', 'pd.read_csv', (['j_file'], {}), '(j_file)\n', (1895, 1903), True, 'import pandas as pd\n'), ((1946, 1968), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (1952, 1968), True, 'import numpy as np\n'), ((4343, 4394), 'os.path.join', 'os.path.join', (['cfg.ROOT_HPE3D', '"""data"""', '"""Predictions"""'], {}), "(cfg.ROOT_HPE3D, 'data', 'Predictions')\n", (4355, 4394), False, 'import os\n'), ((4714, 4765), 'os.path.join', 'os.path.join', (['dataset_path', '"""Mocap"""', '"""SkeletalData"""'], {}), "(dataset_path, 'Mocap', 'SkeletalData')\n", (4726, 4765), False, 'import os\n'), ((1282, 1384), 'os.path.join', 'os.path.join', (['dataset_path', 'rel_img_path', "('kin_%s_%s_%s_%s_color_00000.ppm' % (kin, sub, act, rep))"], {}), "(dataset_path, rel_img_path, 'kin_%s_%s_%s_%s_color_00000.ppm' %\n (kin, sub, act, rep))\n", (1294, 1384), False, 'import os\n'), ((1685, 1727), 'pandas.read_csv', 'pd.read_csv', (['ts_corr'], {'sep': '""" """', 'header': 'None'}), "(ts_corr, sep=' ', header=None)\n", (1696, 1727), True, 'import pandas as pd\n'), ((2839, 2888), 'hpe3d.utils.kp_utils.get_joints_from_bvh', 'get_joints_from_bvh', (['joints_df.iloc[corr.iloc[i]]'], {}), '(joints_df.iloc[corr.iloc[i]])\n', (2858, 2888), False, 'from hpe3d.utils.kp_utils import get_joints_from_bvh, bbox_from_kp2d\n'), ((3015, 3057), 'numpy.einsum', 'np.einsum', (['"""ij,bj->bi"""', 'gt_K', 'joints_C_hom'], {}), "('ij,bj->bi', gt_K, joints_C_hom)\n", (3024, 3057), True, 'import numpy as np\n'), ((3128, 3178), 'hpe3d.utils.img_utils.FakeCamera', 'FakeCamera', (['gt_bbox'], {'K_r': 'gt_K', 'img_size': 'image_size'}), '(gt_bbox, K_r=gt_K, img_size=image_size)\n', (3138, 3178), False, 'from hpe3d.utils.img_utils import FakeCamera\n'), ((4412, 4433), 'os.mkdir', 'os.mkdir', (['pred_folder'], {}), '(pred_folder)\n', (4420, 4433), False, 'import os\n'), ((4639, 4665), 'pickle.dump', 'pkl.dump', (['seq_pred', 'handle'], {}), '(seq_pred, handle)\n', (4647, 4665), True, 'import pickle as pkl\n'), ((2590, 2642), 'os.path.join', 'os.path.join', (['dataset_path', 'rel_img_path', 'image_name'], {}), '(dataset_path, rel_img_path, image_name)\n', (2602, 2642), False, 'import os\n'), ((2908, 2943), 'numpy.einsum', 'np.einsum', (['"""ij,bj->bi"""', 'R', 'joints_W'], {}), "('ij,bj->bi', R, joints_W)\n", (2917, 2943), True, 'import numpy as np\n'), ((3078, 3111), 'hpe3d.utils.kp_utils.bbox_from_kp2d', 'bbox_from_kp2d', (['kp_2d', 'image_size'], {}), '(kp_2d, image_size)\n', (3092, 3111), False, 'from hpe3d.utils.kp_utils import get_joints_from_bvh, bbox_from_kp2d\n'), ((3714, 3752), 'os.path.join', 'os.path.join', (['rel_img_path', 'image_name'], {}), '(rel_img_path, image_name)\n', (3726, 3752), False, 'import os\n'), ((3781, 3819), 'os.path.join', 'os.path.join', (['rel_img_path', 'depth_name'], {}), '(rel_img_path, depth_name)\n', (3793, 3819), False, 'import os\n'), ((4580, 4612), 'os.path.join', 'os.path.join', (['pred_folder', 'fname'], {}), '(pred_folder, fname)\n', (4592, 4612), False, 'import os\n'), ((4800, 4823), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (4810, 4823), False, 'import os\n'), ((844, 856), 'numpy.array', 'np.array', (['tw'], {}), '(tw)\n', (852, 856), True, 'import numpy as np\n'), ((791, 803), 'numpy.array', 'np.array', (['rw'], {}), '(rw)\n', (799, 803), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
from thes_graphics.heat_map_plot.plot_heat_map import plot_heat_map
num_values = 9
num_values = math.pow(math.ceil(math.sqrt(num_values)), 2)
arr = np.linspace(-10, 20, num_values)
reshape_len = int(math.sqrt(num_values))
arr_reshaped = np.reshape(arr, (reshape_len, reshape_len))
fig = plt.figure(1)
plot_heat_map(
fig=fig,
prior_skill_dist=((-5, -5), (5, 5)),
heat_values=arr_reshaped,
log=True
)
| [
"math.sqrt",
"matplotlib.pyplot.figure",
"numpy.reshape",
"numpy.linspace",
"thes_graphics.heat_map_plot.plot_heat_map.plot_heat_map"
] | [((215, 247), 'numpy.linspace', 'np.linspace', (['(-10)', '(20)', 'num_values'], {}), '(-10, 20, num_values)\n', (226, 247), True, 'import numpy as np\n'), ((304, 347), 'numpy.reshape', 'np.reshape', (['arr', '(reshape_len, reshape_len)'], {}), '(arr, (reshape_len, reshape_len))\n', (314, 347), True, 'import numpy as np\n'), ((355, 368), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (365, 368), True, 'import matplotlib.pyplot as plt\n'), ((369, 469), 'thes_graphics.heat_map_plot.plot_heat_map.plot_heat_map', 'plot_heat_map', ([], {'fig': 'fig', 'prior_skill_dist': '((-5, -5), (5, 5))', 'heat_values': 'arr_reshaped', 'log': '(True)'}), '(fig=fig, prior_skill_dist=((-5, -5), (5, 5)), heat_values=\n arr_reshaped, log=True)\n', (382, 469), False, 'from thes_graphics.heat_map_plot.plot_heat_map import plot_heat_map\n'), ((266, 287), 'math.sqrt', 'math.sqrt', (['num_values'], {}), '(num_values)\n', (275, 287), False, 'import math\n'), ((182, 203), 'math.sqrt', 'math.sqrt', (['num_values'], {}), '(num_values)\n', (191, 203), False, 'import math\n')] |
# Joint IDs and Connectivity
#
# <NAME> <<EMAIL>>
import numpy as np
def get_joint_names_dict(joint_names):
return {name: i for i, name in enumerate(joint_names)}
def get_ikea_joint_names():
return [
"nose", # 0
"left eye", # 1
"right eye", # 2
"left ear", # 3
"right ear", # 4
"left shoulder", # 5
"right shoulder", # 6
"left elbow", # 7
"right elbow", # 8
"left wrist", # 9
"right wrist", # 10
"left hip", # 11
"right hip", # 12
"left knee", # 13
"right knee", # 14
"left ankle", # 15
"right ankle", # 16
]
def get_ikea_connectivity():
return [
[0, 1],
[0, 2],
[1, 3],
[2, 4],
[0, 5],
[0, 6],
[5, 6],
[5, 7],
[6, 8],
[7, 9],
[8, 10],
[5, 11],
[6, 12],
[11, 12],
[11, 13],
[12, 14],
[13, 15],
[14, 16]
]
def get_ikea_joint_groups_names():
return [
"head",
"shoulder",
"elbow",
"wrist",
"hip",
"knee",
"ankle",
]
def get_ikea_joint_groups():
return [
[0, 1, 2, 3, 4], # head
[5, 6], # shoulder
[7, 8], # elbow
[9, 10], # wrist
[11, 12], # hip
[13, 14], # knee
[15, 16] # ankle
]
def get_ikea_joint_hflip_names():
return {
'left eye': 'right eye',
'right eye': 'left eye',
'left ear': 'right ear',
'right ear': 'left ear',
'left shoulder': 'right shoulder',
'right shoulder': 'left shoulder',
'left elbow': 'right elbow',
'right elbow': 'left elbow',
'left wrist': 'right wrist',
'right wrist': 'left wrist',
'left hip': 'right hip',
'right hip': 'left hip',
'left knee': 'right knee',
'right knee': 'left knee',
'left ankle': 'right ankle',
'right ankle': 'left ankle',
}
def get_body25_joint_names():
return [
"nose", # 0
"neck", # 1
"right shoulder", # 2
"right elbow", # 3
"right wrist", # 4
"left shoulder", # 5
"left elbow", # 6
"left wrist", # 7
"mid hip", # 8
"right hip", # 9
"right knee", # 10
"right ankle", # 11
"left hip", # 12
"left knee", # 13
"left ankle", # 14
"right eye", # 15
"left eye", # 16
"right ear", # 17
"left ear", # 18
"left big toe", # 19
"left small toe", # 20
"left heel", # 21
"right big toe", # 22
"right small toe", # 23
"right heel", # 24
"background", # 25
]
def get_body25_connectivity():
return [
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[1, 5],
[5, 6],
[6, 7],
[1, 8],
[8, 9],
[9, 10],
[10, 11],
[8, 12],
[12, 13],
[13, 14],
[0, 15],
[0, 16],
[15, 17],
[16, 18],
[2, 9],
[5, 12],
[11, 22],
[11, 23],
[11, 24],
[14, 19],
[14, 20],
[14, 21],
]
def get_body21_joint_names():
return [
"nose", # 0
"neck", # 1
"right shoulder", # 2
"right elbow", # 3
"right wrist", # 4
"left shoulder", # 5
"left elbow", # 6
"left wrist", # 7
"mid hip", # 8
"right hip", # 9
"right knee", # 10
"right ankle", # 11
"left hip", # 12
"left knee", # 13
"left ankle", # 14
"right eye", # 15
"left eye", # 16
"right ear", # 17
"left ear", # 18
"neck (lsp)", # 19
"top of head (lsp)", # 20
]
def get_hmmr_joint_names():
return [
"right ankle", # 0
"right knee", # 1
"right hip", # 2
"left hip", # 3
"left knee", # 4
"left ankle", # 5
"right wrist", # 6
"right elbow", # 7
"right shoulder", # 8
"left shoulder", # 9
"left elbow", # 10
"left wrist", # 11
"neck", # 12
"top of head", # 13
"nose", # 14
"left eye", # 15
"right eye", # 16
"left ear", # 17
"right ear", # 18
"left big toe", # 19
"right big toe", # 20
"left small toe", # 21
"right small toe", # 22
"left heel", # 23
"right heel", # 24
]
def get_h36m_joint_names():
return [
"mid hip", # 0
"right hip", # 1
"right knee", # 2
"right ankle", # 3
"left hip", # 4
"left knee", # 5
"left ankle", # 6
"spine", # 7 -- mean(neck, mid hip)
"neck", # 8
"nose", # 9
"head", # 10 -- mean(left ear, right ear)
"left shoulder", # 11
"left elbow", # 12
"left wrist", # 13
"right shoulder", # 14
"right elbow", # 15
"right wrist", # 16
]
def get_pose_colors(mode='rgb'):
"""
Parameters
----------
mode : rgb | bgr color format to return
Returns
-------
list of part colors for skeleton visualization
"""
# colormap from OpenPose: https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/3c9441ae62197b478b15c551e81c748ac6479561/include/openpose/pose/poseParametersRender.hpp
colors = np.array(
[
[255., 0., 85.],
# [255., 0., 0.],
[255., 85., 0.],
[255., 170., 0.],
[255., 255., 0.],
[170., 255., 0.],
[85., 255., 0.],
[0., 255., 0.],
[255., 0., 0.],
[0., 255., 85.],
[0., 255., 170.],
[0., 255., 255.],
[0., 170., 255.],
[0., 85., 255.],
[0., 0., 255.],
[255., 0., 170.],
[170., 0., 255.],
[255., 0., 255.],
[85., 0., 255.],
[0., 0., 255.],
[0., 0., 255.],
[0., 0., 255.],
[0., 255., 255.],
[0., 255., 255.],
[0., 255., 255.]])
if mode == 'rgb':
return colors
elif mode == 'bgr':
colors[:, [0, 2]] = colors[:, [2, 0]]
return colors
else:
raise ValueError('Invalid color mode, please specify rgb or bgr') | [
"numpy.array"
] | [((5516, 6039), 'numpy.array', 'np.array', (['[[255.0, 0.0, 85.0], [255.0, 85.0, 0.0], [255.0, 170.0, 0.0], [255.0, 255.0,\n 0.0], [170.0, 255.0, 0.0], [85.0, 255.0, 0.0], [0.0, 255.0, 0.0], [\n 255.0, 0.0, 0.0], [0.0, 255.0, 85.0], [0.0, 255.0, 170.0], [0.0, 255.0,\n 255.0], [0.0, 170.0, 255.0], [0.0, 85.0, 255.0], [0.0, 0.0, 255.0], [\n 255.0, 0.0, 170.0], [170.0, 0.0, 255.0], [255.0, 0.0, 255.0], [85.0, \n 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [\n 0.0, 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]]'], {}), '([[255.0, 0.0, 85.0], [255.0, 85.0, 0.0], [255.0, 170.0, 0.0], [\n 255.0, 255.0, 0.0], [170.0, 255.0, 0.0], [85.0, 255.0, 0.0], [0.0, \n 255.0, 0.0], [255.0, 0.0, 0.0], [0.0, 255.0, 85.0], [0.0, 255.0, 170.0],\n [0.0, 255.0, 255.0], [0.0, 170.0, 255.0], [0.0, 85.0, 255.0], [0.0, 0.0,\n 255.0], [255.0, 0.0, 170.0], [170.0, 0.0, 255.0], [255.0, 0.0, 255.0],\n [85.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, \n 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]])\n', (5524, 6039), True, 'import numpy as np\n')] |
import calendar
import pickle as pkl
import pandas as pd
import numpy as np
import random
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
seed = 42
random.seed(seed)
np.random.seed(seed)
#load datafile and create pickle file.
df = pd.read_csv('./kickstarter.csv')
file1 = open("model.pkl", "wb")
file2 = open("encoder.pkl", "wb")
file3 = open("scaler.pkl", "wb")
#convert months and days to a numeric equivalent.
months = list(calendar.month_name)
days = list(calendar.day_name)
df['month'] = df['month'].map(lambda x: months.index(x))
df['day'] = df['day'].map(lambda x: days.index(x))
#declare encoder for the non-numeric fields to be a binary.
encoder = OneHotEncoder(handle_unknown="ignore", sparse=False)
encoder.fit(df[['category', 'subcategory', 'month', 'day', 'hour', 'state']])
#Create the min max scalar and apply it to our parameters. Drop all uneeded columns and store the column to be predicted as our y.
X = df.drop(columns=['Unnamed: 0', 'id', 'title', 'category', 'subcategory', 'blurb', 'launch', 'deadline', 'state', 'city', 'backers', 'pledged', 'ongoing', 'location', 'success'])
columns = X.columns
X = pd.DataFrame(X, columns=columns)
y = df['success']
#separate training and testing data for the model.
kf = KFold(n_splits=10)
scaler = MinMaxScaler()
scaler.fit(X)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
#Create model, train it, and test it.
model = HistGradientBoostingClassifier()#learning_rate=0.1, loss='binary_crossentropy', max_bins=255, max_depth=3, max_iter=100, max_leaf_nodes=31, min_samples_leaf=10)
#hyperparameters had slightly lower results of 71.96 average accuracy
model.fit(X,y)
pipeline = Pipeline([('scaler', scaler), ('HGB Classifier', model)])
score = cross_val_score(pipeline, X, y, cv=kf, scoring='accuracy').mean()
print(score)
#pickle the model for future use
pkl.dump(model, file1)
pkl.dump(encoder, file2)
pkl.dump(scaler, file3)
file1.close()
file2.close()
file3.close() | [
"pandas.DataFrame",
"pickle.dump",
"sklearn.ensemble.HistGradientBoostingClassifier",
"numpy.random.seed",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.model_selection.KFold",
"random.seed",
"s... | [((477, 494), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (488, 494), False, 'import random\n'), ((495, 515), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (509, 515), True, 'import numpy as np\n'), ((560, 592), 'pandas.read_csv', 'pd.read_csv', (['"""./kickstarter.csv"""'], {}), "('./kickstarter.csv')\n", (571, 592), True, 'import pandas as pd\n'), ((988, 1040), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""', 'sparse': '(False)'}), "(handle_unknown='ignore', sparse=False)\n", (1001, 1040), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1457, 1489), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'columns'}), '(X, columns=columns)\n', (1469, 1489), True, 'import pandas as pd\n'), ((1565, 1583), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (1570, 1583), False, 'from sklearn.model_selection import train_test_split, KFold, cross_val_score\n'), ((1593, 1607), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1605, 1607), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1761, 1793), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {}), '()\n', (1791, 1793), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((2018, 2075), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('scaler', scaler), ('HGB Classifier', model)]"], {}), "([('scaler', scaler), ('HGB Classifier', model)])\n", (2026, 2075), False, 'from sklearn.pipeline import Pipeline\n'), ((2197, 2219), 'pickle.dump', 'pkl.dump', (['model', 'file1'], {}), '(model, file1)\n', (2205, 2219), True, 'import pickle as pkl\n'), ((2220, 2244), 'pickle.dump', 'pkl.dump', (['encoder', 'file2'], {}), '(encoder, file2)\n', (2228, 2244), True, 'import pickle as pkl\n'), ((2245, 2268), 'pickle.dump', 'pkl.dump', (['scaler', 'file3'], {}), '(scaler, file3)\n', (2253, 2268), True, 'import pickle as pkl\n'), ((2084, 2142), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['pipeline', 'X', 'y'], {'cv': 'kf', 'scoring': '"""accuracy"""'}), "(pipeline, X, y, cv=kf, scoring='accuracy')\n", (2099, 2142), False, 'from sklearn.model_selection import train_test_split, KFold, cross_val_score\n')] |
"""
Benchmark the communication bandwidth with Ray + NCCL.
We use the python binding cupy.nccl to call NCCL.
Usage:
python3 profile_communication.py
"""
import argparse
import time
import os
import cupy as cp
from cupy.cuda import nccl
import numpy as np
import ray
MB = 1 << 20
GB = 1 << 30
def do_all_reduce(comm, in_buffer, out_buffer):
comm.allReduce(
in_buffer.data.ptr,
out_buffer.data.ptr,
in_buffer.size,
nccl.NCCL_FLOAT32,
0,
cp.cuda.Stream.null.ptr,
)
def do_all_gather(comm, in_buffer, out_buffer):
comm.allGather(
in_buffer.data.ptr,
out_buffer.data.ptr,
in_buffer.size,
nccl.NCCL_FLOAT32,
cp.cuda.Stream.null.ptr,
)
def do_send_recv(comm, buf, is_sender):
if is_sender:
comm.send(buf.data.ptr, buf.size, nccl.NCCL_FLOAT32,
1, cp.cuda.Stream.null.ptr)
else:
comm.recv(buf.data.ptr, buf.size, nccl.NCCL_FLOAT32,
0, cp.cuda.Stream.null.ptr)
@ray.remote(num_gpus=1)
class GpuHost:
def __init__(self, rank, world_size, nccl_uuid_list):
self.rank = rank
self.world_size = world_size
self.nccl_uuid_list = nccl_uuid_list
self.ct = 0
def init_communicator(self, groups):
if np.max(groups) >= self.world_size:
return None
if len(set(np.ravel(groups))) < len(np.ravel(groups)):
return None
comm = None
for group in groups:
nccl_uuid = self.nccl_uuid_list[self.ct]
self.ct += 1
for device_id in group:
if self.rank == device_id:
assert comm is None
comm = cp.cuda.nccl.NcclCommunicator(
len(group), nccl_uuid, group.index(self.rank))
cp.cuda.Device(0).synchronize()
return comm
def profile_allreduce(self, size, dtype, groups):
comm = self.init_communicator(groups)
if comm is None:
return
in_buffer = cp.ones(int(size), dtype)
out_buffer = cp.ones(int(size), dtype)
do_all_reduce(comm, in_buffer, out_buffer)
do_all_reduce(comm, in_buffer, out_buffer)
number = min(max(10, int((1 << 30) / (size * dtype().nbytes))), 1 << 13)
cp.cuda.Device(0).synchronize()
tic = time.time()
for i in range(number):
do_all_reduce(comm, in_buffer, out_buffer)
cp.cuda.Device(0).synchronize()
toc = time.time()
if self.rank == 0:
num_devices = len(groups[0])
time_cost = (toc - tic) / number
array_size = size * dtype().nbytes
communication_size = 2 * array_size * (num_devices - 1) / num_devices
bandwidth = communication_size / time_cost
print(f"AllReduce: {groups}\tBytes: {array_size / GB:.5f} GB\t"
f"Time: {time_cost:.5f} s\tBandwidth: {bandwidth / (1<<30):.2f} GB/s")
def profile_allgather(self, size, dtype, groups):
comm = self.init_communicator(groups)
if comm is None:
return
in_buffer = cp.ones(int(size) // len(groups[0]), dtype)
out_buffer = cp.ones(int(size), dtype)
do_all_gather(comm, in_buffer, out_buffer)
number = min(max(10, int((1 << 30) / (size * dtype().nbytes))), 1 << 13)
cp.cuda.Device(0).synchronize()
tic = time.time()
for i in range(number):
do_all_gather(comm, in_buffer, out_buffer)
cp.cuda.Device(0).synchronize()
toc = time.time()
if self.rank == 0:
num_devices = len(groups[0])
time_cost = (toc - tic) / number
array_size = size * dtype().nbytes
communication_size = array_size * (num_devices - 1) / num_devices
bandwidth = communication_size / time_cost
print(f"AllGather: {groups}\tBytes: {array_size / GB:.5f} GB\t"
f"Time: {time_cost:.5f} s\tBandwidth: {bandwidth / (1<<30):.2f} GB/s")
def profile_send_recv(self, size, dtype, from_rank, to_rank):
groups = [[from_rank, to_rank]]
comm = self.init_communicator(groups)
if comm is None:
return
buf = cp.ones(int(size), dtype)
do_send_recv(comm, buf, self.rank == from_rank)
do_send_recv(comm, buf, self.rank == from_rank)
number = min(max(10, int((1 << 30) / (size * dtype().nbytes))), 1 << 13)
cp.cuda.Device(0).synchronize()
tic = time.time()
for i in range(number):
do_send_recv(comm, buf, self.rank == from_rank)
cp.cuda.Device(0).synchronize()
toc = time.time()
if self.rank == from_rank:
time_cost = (toc - tic) / number
array_size = size * dtype().nbytes
communication_size = array_size
bandwidth = communication_size / time_cost
print(f"SendRecv: {groups}\tBytes: {array_size / GB:.5f} GB\t"
f"Time: {time_cost:.5f} s\tBandwidth: {bandwidth / (1<<30):.2f} GB/s")
def profile_multi_send_recv(self, size, dtype, groups):
comm = self.init_communicator(groups)
time.sleep(1)
comm_sync = self.init_communicator([list(np.ravel(groups))])
if comm is None or comm_sync is None:
return
assert all(len(group) == 2 for group in groups)
senders = set(group[0] for group in groups)
receivers = set(group[1] for group in groups)
buf = cp.ones(int(size), dtype)
buf_sync = cp.ones(1, dtype)
do_send_recv(comm, buf, self.rank in senders)
do_send_recv(comm, buf, self.rank in senders)
do_all_reduce(comm_sync, buf_sync, buf_sync)
number = min(max(10, int((1 << 30) / (size * dtype().nbytes))), 1 << 13)
cp.cuda.Device(0).synchronize()
tic = time.time()
for i in range(number):
do_send_recv(comm, buf, self.rank in senders)
do_all_reduce(comm_sync, buf_sync, buf_sync)
cp.cuda.Device(0).synchronize()
toc = time.time()
if self.rank == groups[0][0]:
time_cost = (toc - tic) / number
array_size = size * dtype().nbytes
communication_size = array_size
bandwidth = len(groups) * communication_size / time_cost
print(f"SendRecv: {groups}\tBytes: {array_size / GB:.5f} GB\t"
f"Time: {time_cost:.5f} s\tBandwidth: {bandwidth / (1<<30):.2f} GB/s")
def profile(self):
# All-reduce
for i in range(29, 30):
self.profile_allreduce(1 << i, cp.float32, [list(range(self.world_size))])
self.profile_allreduce(1 << i, cp.float32, [list(range(self.world_size//2))])
#self.profile_allreduce(1 << i, cp.float32, [[0, 3]])
#self.profile_allreduce(1 << i, cp.float32, [[0, 4], [1, 5], [2, 6], [3, 7]])
#self.profile_allreduce(1 << i, cp.float32, [[0, 2, 4, 6], [1, 3, 5, 7]])
#self.profile_allreduce(1 << i, cp.float32, [[0, 1, 2, 3], [4, 5, 6, 7]])
#self.profile_allreduce(1 << i, cp.float32, [[0, 1, 2, 3, 4, 5, 6, 7]])
# single Send-recv
for i in range(29, 30):
self.profile_send_recv(1 << i, cp.float32, 0, 1)
self.profile_send_recv(1 << i, cp.float32, 0, self.world_size - 1)
# multiple p2p Send-recv
for i in range(29, 30):
self.profile_multi_send_recv(1 << i, cp.float32, [[0, 1], [2, 3]])
self.profile_multi_send_recv(1 << i, cp.float32, [[0, self.world_size - 4], [1, self.world_size - 3]])
self.profile_multi_send_recv(1 << i, cp.float32, [[0, self.world_size - 2], [1, self.world_size - 1]])
self.profile_multi_send_recv(1 << i, cp.float32, [[0, self.world_size - 4], [1, self.world_size - 3], [2, self.world_size - 2], [3, self.world_size - 1]])
self.profile_multi_send_recv(1 << i, cp.float32, [[0, self.world_size - 8], [1, self.world_size - 7], [2, self.world_size - 6], [3, self.world_size - 5]])
self.profile_multi_send_recv(1 << i, cp.float32, [[0, self.world_size - 8], [1, self.world_size - 7], [2, self.world_size - 6], [3, self.world_size - 5],
[4, self.world_size - 4], [5, self.world_size - 3], [6, self.world_size - 2], [7, self.world_size - 1]])
def sync(self):
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--efa", action="store_true",
help="Use AWS EFS on p3.24 or p4.24 instances")
parser.add_argument("--debug", action="store_true",
help="Print nccl debug information")
args = parser.parse_args()
ray.init(address="auto")
num_gpus = int(ray.cluster_resources()["GPU"])
nccl_uuid_list = [cp.cuda.nccl.get_unique_id() for _ in range(500)]
workers = []
for i in range(num_gpus):
if args.efa:
env_vars = {
"FI_PROVIDER": "efa",
"FI_EFA_USE_DEVICE_RDMA": "1",
"LD_LIBRARY_PATH": os.environ.get("LD_LIBRARY_PATH", ""), # For libnccl-net.so
}
else:
env_vars = {
"NCCL_SOCKET_NTHREADS": "4",
"NCCL_NSOCKS_PERTHREAD": "4",
"LD_LIBRARY_PATH": os.environ.get("LD_LIBRARY_PATH", ""),
}
if args.debug:
env_vars["NCCL_DEBUG"] = "INFO"
workers.append(GpuHost.options(runtime_env={"env_vars": env_vars})\
.remote(i, num_gpus, nccl_uuid_list))
ray.get([w.profile.remote() for w in workers])
ray.get([w.sync.remote() for w in workers])
| [
"ray.init",
"ray.remote",
"argparse.ArgumentParser",
"numpy.ravel",
"cupy.ones",
"cupy.cuda.nccl.get_unique_id",
"time.sleep",
"time.time",
"os.environ.get",
"numpy.max",
"cupy.cuda.Device",
"ray.cluster_resources"
] | [((1030, 1052), 'ray.remote', 'ray.remote', ([], {'num_gpus': '(1)'}), '(num_gpus=1)\n', (1040, 1052), False, 'import ray\n'), ((8530, 8555), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8553, 8555), False, 'import argparse\n'), ((8803, 8827), 'ray.init', 'ray.init', ([], {'address': '"""auto"""'}), "(address='auto')\n", (8811, 8827), False, 'import ray\n'), ((2367, 2378), 'time.time', 'time.time', ([], {}), '()\n', (2376, 2378), False, 'import time\n'), ((2520, 2531), 'time.time', 'time.time', ([], {}), '()\n', (2529, 2531), False, 'import time\n'), ((3440, 3451), 'time.time', 'time.time', ([], {}), '()\n', (3449, 3451), False, 'import time\n'), ((3593, 3604), 'time.time', 'time.time', ([], {}), '()\n', (3602, 3604), False, 'import time\n'), ((4550, 4561), 'time.time', 'time.time', ([], {}), '()\n', (4559, 4561), False, 'import time\n'), ((4708, 4719), 'time.time', 'time.time', ([], {}), '()\n', (4717, 4719), False, 'import time\n'), ((5226, 5239), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5236, 5239), False, 'import time\n'), ((5598, 5615), 'cupy.ones', 'cp.ones', (['(1)', 'dtype'], {}), '(1, dtype)\n', (5605, 5615), True, 'import cupy as cp\n'), ((5914, 5925), 'time.time', 'time.time', ([], {}), '()\n', (5923, 5925), False, 'import time\n'), ((6123, 6134), 'time.time', 'time.time', ([], {}), '()\n', (6132, 6134), False, 'import time\n'), ((8902, 8930), 'cupy.cuda.nccl.get_unique_id', 'cp.cuda.nccl.get_unique_id', ([], {}), '()\n', (8928, 8930), True, 'import cupy as cp\n'), ((1306, 1320), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (1312, 1320), True, 'import numpy as np\n'), ((8847, 8870), 'ray.cluster_resources', 'ray.cluster_resources', ([], {}), '()\n', (8868, 8870), False, 'import ray\n'), ((1409, 1425), 'numpy.ravel', 'np.ravel', (['groups'], {}), '(groups)\n', (1417, 1425), True, 'import numpy as np\n'), ((1837, 1854), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (1851, 1854), True, 'import cupy as cp\n'), ((2321, 2338), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (2335, 2338), True, 'import cupy as cp\n'), ((2474, 2491), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (2488, 2491), True, 'import cupy as cp\n'), ((3394, 3411), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (3408, 3411), True, 'import cupy as cp\n'), ((3547, 3564), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (3561, 3564), True, 'import cupy as cp\n'), ((4504, 4521), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (4518, 4521), True, 'import cupy as cp\n'), ((4662, 4679), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (4676, 4679), True, 'import cupy as cp\n'), ((5868, 5885), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (5882, 5885), True, 'import cupy as cp\n'), ((6077, 6094), 'cupy.cuda.Device', 'cp.cuda.Device', (['(0)'], {}), '(0)\n', (6091, 6094), True, 'import cupy as cp\n'), ((9166, 9203), 'os.environ.get', 'os.environ.get', (['"""LD_LIBRARY_PATH"""', '""""""'], {}), "('LD_LIBRARY_PATH', '')\n", (9180, 9203), False, 'import os\n'), ((9406, 9443), 'os.environ.get', 'os.environ.get', (['"""LD_LIBRARY_PATH"""', '""""""'], {}), "('LD_LIBRARY_PATH', '')\n", (9420, 9443), False, 'import os\n'), ((1384, 1400), 'numpy.ravel', 'np.ravel', (['groups'], {}), '(groups)\n', (1392, 1400), True, 'import numpy as np\n'), ((5289, 5305), 'numpy.ravel', 'np.ravel', (['groups'], {}), '(groups)\n', (5297, 5305), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DeepLM io."""
import numpy as np
def load_bal_from_file(filename, feature_dim, camera_dim, point_dim, double=True):
"""load ba data"""
dtype = np.float64 if double else np.float32
with open(filename, 'r') as f:
num_cameras, num_points, num_observations = [int(i) for i in f.readline().strip().split()]
point_indices = []
cam_indices = []
t_camera = np.zeros((num_cameras, camera_dim)).astype(dtype)
t_point = np.zeros((num_points, point_dim)).astype(dtype)
t_feat = np.zeros((num_observations, feature_dim)).astype(dtype)
for i in range(num_observations):
features2d = []
if i % 1000 == 0:
print("\r Load observation {} of {}".format(i, num_observations), end="", flush=True)
cam_idx, point_idx, x, y = f.readline().strip().split()
point_indices.append(int(point_idx))
cam_indices.append(int(cam_idx))
features2d.append(float(x))
features2d.append(float(y))
t_feat[i] = (features2d)
t_point_indices = point_indices
t_cam_indices = cam_indices
for i in range(num_cameras):
camera_paras = []
for _ in range(camera_dim):
camera_para = f.readline().strip().split()[0]
camera_paras.append(float(camera_para))
t_camera[i] = camera_paras
for i in range(num_points):
points3d = []
for _ in range(point_dim):
point = f.readline().strip().split()[0]
points3d.append(float(point))
t_point[i] = points3d
return t_point, t_camera, t_feat, t_point_indices, t_cam_indices
| [
"numpy.zeros"
] | [((1065, 1100), 'numpy.zeros', 'np.zeros', (['(num_cameras, camera_dim)'], {}), '((num_cameras, camera_dim))\n', (1073, 1100), True, 'import numpy as np\n'), ((1133, 1166), 'numpy.zeros', 'np.zeros', (['(num_points, point_dim)'], {}), '((num_points, point_dim))\n', (1141, 1166), True, 'import numpy as np\n'), ((1198, 1239), 'numpy.zeros', 'np.zeros', (['(num_observations, feature_dim)'], {}), '((num_observations, feature_dim))\n', (1206, 1239), True, 'import numpy as np\n')] |
import pickle
import re
import tqdm
from typing import List, Tuple
import sling
import time
import os
from collections import defaultdict
import numpy as np
class SlingExtractor(object):
def load_kb(self, root_dir: str = 'local/data/e/wiki'):
print('loading and indexing kb ...')
start = time.time()
self.kb = sling.Store()
self.kb.load(os.path.join(root_dir, 'kb.sling'))
self.phrase = sling.PhraseTable(self.kb, os.path.join(root_dir, 'en', 'phrase-table.repo'))
self.kb.freeze()
self.extract_property_names()
print('loading took', (time.time() - start), 'secs')
def extract_property_names(self):
print('storing property names ...')
start = time.time()
self.property_names = defaultdict(list)
for frame in self.kb:
if 'id' in frame and frame.id.startswith('P'):
self.property_names[frame.id].append(frame.name)
print('found', str(len(self.property_names)), 'properties')
print('took', (time.time() - start), 'sec')
@staticmethod
def get_frame_id(frame):
if 'id' in frame:
return frame.id
if 'is' in frame:
if type(frame['is']) != sling.Frame:
return None
if 'id' in frame['is']:
return frame['is'].id
return None
@staticmethod
def get_date_property(prop, tail):
if 'target' not in prop:
return None
if prop.target.id != '/w/time':
return None
prop_id = SlingExtractor.get_frame_id(prop)
if type(tail) == int:
return (prop_id, tail)
elif type(tail) == sling.Frame and 'is' in tail and type(tail['is']) == int:
return (prop_id, tail['is'])
return None
@staticmethod
def get_canonical_property(prop, tail):
if type(prop) != sling.Frame or type(tail) != sling.Frame:
return None
prop_id = SlingExtractor.get_frame_id(prop)
tail_id = SlingExtractor.get_frame_id(tail)
if prop_id is None:
return None
if tail_id is None:
return None
if not prop_id.startswith('P') or not tail_id.startswith('Q'):
return None
return (prop_id, tail_id)
def get_type(self, wid) -> str:
for type_prop in ['P31', 'P279']:
try:
return self.kb[self.kb[wid][type_prop].id].name
except:
pass
return None
def get_name(self, wid) -> str:
return self.kb[wid].name
def iter_property(self, wid: str, type: str = 'can', shuffle: bool = False):
tup_li: List[Tuple] = []
if self.kb[wid] is None:
return []
for prop, tail in self.kb[wid]:
tup = self.get_canonical_property(prop, tail)
if tup is not None and type == 'can':
if not hasattr(self, 'filter') or tup[0] in self.filter:
tup_li.append(tup)
continue
if tup is None:
tup = self.get_date_property(prop, tail)
if tup is not None and type == 'time':
if not hasattr(self, 'filter') or tup[0] in self.filter:
tup_li.append(tup)
continue
group = defaultdict(list)
for k, v in tup_li:
group[k].append(v)
result = list(group.items())
if shuffle:
np.random.shuffle(result)
return result
def load_dbpedia_mapping():
mapping = {}
with open('examples/language_model/wikitext103_seg/instance-types_specific.ttl', encoding='utf-8') as mapping_f:
for line in mapping_f:
ls = line.strip().split()
if len(ls) == 4:
qid = ls[0].rstrip('>').split('/')[-1]
category = ls[2].rstrip('>').split('/')[-1]
mapping[qid] = category
return mapping
# mapping = load_dbpedia_mapping()
se = SlingExtractor()
se.load_kb(root_dir='/home/fangzhex/sling_data')
def process_splits(pkl_path):
data = pickle.load(open(pkl_path, 'rb'))
SECTION_PATTERN = re.compile(r"= = [^=]+ = =")
segmented_texts = []
corresponding_doc_ids = []
section_names = []
domain_types = []
domain_not_found_count = 0
for doc in tqdm.tqdm(data):
text = ' '.join(doc[0])
doc_id = doc[1][0][1]
starts = [0]
section_match = re.finditer(SECTION_PATTERN, text)
for m in section_match:
section_names.append(m.group(0))
domains = []
ps = se.iter_property(doc_id, type='can')
for p in ps:
if p[0] in {'P31', 'P279'}:
domains.extend(p[1])
if len(domains) == 0:
domain_not_found_count += 1
domain_types.append(';'.join(domains))
starts.append(m.start())
segmented_texts.append(text[starts[-2]:starts[-1]])
corresponding_doc_ids.append(doc_id)
print('domain not found:', domain_not_found_count)
open(pkl_path.rstrip('.pkl') + '.txt', 'w', encoding='utf-8').write('\n'.join(segmented_texts))
open(pkl_path.rstrip('.pkl') + '.txt.docid', 'w', encoding='utf-8').write(
'\n'.join(corresponding_doc_ids))
open(pkl_path.rstrip('.pkl') + '.txt.sec', 'w', encoding='utf-8').write(
'\n'.join(section_names))
open(pkl_path.rstrip('.pkl') + '.txt.dom', 'w', encoding='utf-8').write(
'\n'.join(domain_types))
if __name__ == '__main__':
process_splits('examples/language_model/wikitext103_seg/test.pkl')
process_splits('examples/language_model/wikitext103_seg/valid.pkl')
process_splits('examples/language_model/wikitext103_seg/train.pkl')
| [
"tqdm.tqdm",
"sling.Store",
"re.finditer",
"time.time",
"collections.defaultdict",
"os.path.join",
"numpy.random.shuffle",
"re.compile"
] | [((4187, 4214), 're.compile', 're.compile', (['"""= = [^=]+ = ="""'], {}), "('= = [^=]+ = =')\n", (4197, 4214), False, 'import re\n'), ((4363, 4378), 'tqdm.tqdm', 'tqdm.tqdm', (['data'], {}), '(data)\n', (4372, 4378), False, 'import tqdm\n'), ((310, 321), 'time.time', 'time.time', ([], {}), '()\n', (319, 321), False, 'import time\n'), ((340, 353), 'sling.Store', 'sling.Store', ([], {}), '()\n', (351, 353), False, 'import sling\n'), ((734, 745), 'time.time', 'time.time', ([], {}), '()\n', (743, 745), False, 'import time\n'), ((776, 793), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (787, 793), False, 'from collections import defaultdict\n'), ((3351, 3368), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3362, 3368), False, 'from collections import defaultdict\n'), ((4487, 4521), 're.finditer', 're.finditer', (['SECTION_PATTERN', 'text'], {}), '(SECTION_PATTERN, text)\n', (4498, 4521), False, 'import re\n'), ((375, 409), 'os.path.join', 'os.path.join', (['root_dir', '"""kb.sling"""'], {}), "(root_dir, 'kb.sling')\n", (387, 409), False, 'import os\n'), ((460, 509), 'os.path.join', 'os.path.join', (['root_dir', '"""en"""', '"""phrase-table.repo"""'], {}), "(root_dir, 'en', 'phrase-table.repo')\n", (472, 509), False, 'import os\n'), ((3497, 3522), 'numpy.random.shuffle', 'np.random.shuffle', (['result'], {}), '(result)\n', (3514, 3522), True, 'import numpy as np\n'), ((605, 616), 'time.time', 'time.time', ([], {}), '()\n', (614, 616), False, 'import time\n'), ((1039, 1050), 'time.time', 'time.time', ([], {}), '()\n', (1048, 1050), False, 'import time\n')] |
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
All reading and writing operations of kapture objects in CSV like files
"""
import datetime
import io
import os
import os.path as path
import re
from collections import namedtuple
from typing import Any, List, Optional, Set, Type, Union
import numpy as np
import quaternion
import sys
import kapture
import kapture.io.features
logger = kapture.logger
gettrace = getattr(sys, 'gettrace', None)
# file names conventions
CSV_FILENAMES = {
kapture.Sensors: path.join('sensors', 'sensors.txt'),
kapture.Trajectories: path.join('sensors', 'trajectories.txt'),
kapture.Rigs: path.join('sensors', 'rigs.txt'),
kapture.RecordsCamera: path.join('sensors', 'records_camera.txt'),
kapture.RecordsDepth: path.join('sensors', 'records_depth.txt'),
kapture.RecordsLidar: path.join('sensors', 'records_lidar.txt'),
kapture.RecordsWifi: path.join('sensors', 'records_wifi.txt'),
kapture.RecordsBluetooth: path.join('sensors', 'records_bluetooth.txt'),
kapture.RecordsGnss: path.join('sensors', 'records_gnss.txt'),
kapture.RecordsAccelerometer: path.join('sensors', 'records_accelerometer.txt'),
kapture.RecordsGyroscope: path.join('sensors', 'records_gyroscope.txt'),
kapture.RecordsMagnetic: path.join('sensors', 'records_magnetic.txt'),
kapture.Points3d: path.join('reconstruction', 'points3d.txt'),
kapture.Keypoints: path.join('reconstruction', 'keypoints', 'keypoints.txt'),
kapture.Descriptors: path.join('reconstruction', 'descriptors', 'descriptors.txt'),
kapture.GlobalFeatures: path.join('reconstruction', 'global_features', 'global_features.txt'),
kapture.Observations: path.join('reconstruction', 'observations.txt'),
}
def get_csv_fullpath(kapture_type: Any, kapture_dirpath: str = '') -> str:
"""
Returns the full path to csv kapture file for a given data structure and root directory.
This path is the concatenation of the kapture root path and subpath into kapture into data structure.
:param kapture_type: type of kapture data (kapture.RecordsCamera, kapture.Trajectories, ...)
:param kapture_dirpath: root kapture path
:return: full path of csv file for that type of data
"""
filename = CSV_FILENAMES[kapture_type]
return path.join(kapture_dirpath, filename)
PADDINGS = {
'timestamp': [8],
'device_id': [3],
'pose': [4, 4, 4, 4, 4, 4, 4],
}
KAPTURE_FORMAT_1 = "# kapture format: 1.0"
KAPTURE_FORMAT_PARSING_RE = '# kapture format\\:\\s*(?P<version>\\d+\\.\\d+)'
# Line separator for the kapture csv files
kapture_linesep = '\n'
def get_version_from_header(header_string: str) -> Optional[str]:
"""
Get the kapture format version from the given header string.
:param header_string: a header string
:return: version as string (i.e. '2.1') if found, None otherwise
"""
m = re.search(KAPTURE_FORMAT_PARSING_RE, header_string)
if m:
return m['version']
return None
def get_version_from_csv_file(csv_file_path: str) -> Optional[str]:
"""
Get the kapture format version from a file.
:param csv_file_path: path to the csv file
:return: version as string (i.e. '2.1') if found, None otherwise
"""
if path.isfile(csv_file_path):
with open(csv_file_path) as f:
first_line = f.readline()
return get_version_from_header(first_line)
return None
def current_format_version() -> str:
"""
Get the current format version
:return: format version
"""
return get_version_from_header(KAPTURE_FORMAT_1)
def kapture_format_version(kapture_dirpath: str) -> Optional[str]:
"""
Reads kapture format version.
:param kapture_dirpath: kapture directory root path
:return: kapture format version if found.
"""
sensors_file_path = path.join(kapture_dirpath, CSV_FILENAMES[kapture.Sensors])
return get_version_from_csv_file(sensors_file_path)
def float_safe(representation) -> Optional[float]:
"""
Safe float cast
https://stackoverflow.com/questions/6330071/safe-casting-in-python
:param representation: to cast
:return: float value if the value was castable, or None
"""
try:
return float(representation)
except (ValueError, TypeError):
return None
def float_array_or_none(representation_list) -> Optional[List[float]]:
"""
Safe cast of list of float representations
https://stackoverflow.com/questions/6330071/safe-casting-in-python
:param representation_list: list of values to convert
:return: an array of floats or None if a single one is invalid
"""
array = [float_safe(v) for v in representation_list]
return array if not any(v is None for v in array) else None
def table_to_file(file, table, header=None, padding=None) -> int:
"""
Writes the given table (list of list) into a file.
The file must be previously open as write mode.
If table is a generator, must be valid at runtime.
:param file: file id opened in write mode (with open(filepath, 'w') as file:)
:param table: an iterable of iterable
:param header: row added at the beginning of the file (+\n)
:param padding: the padding of each column as a list of int of same size of the rows of the table.
:return: number of records written
"""
if header:
file.write(KAPTURE_FORMAT_1 + kapture_linesep)
file.write(header + kapture_linesep)
nb_records = 0
for row in table:
if padding:
row = [str(v).rjust(padding[i]) for i, v in enumerate(row)]
file.write(', '.join(f'{v}' for v in row) + kapture_linesep)
nb_records += 1
return nb_records
SPLIT_PATTERN = re.compile(r'\s*,\s*')
def table_from_file(file):
"""
Returns an iterable of iterable (generator) on the opened file.
Be aware that the returned generator is valid as long as file is valid.
:param file: file id opened in read mode (with open(filepath, 'r') as file:)
:return: an iterable of iterable on kapture objects values
"""
table = file.readlines()
if gettrace is not None and gettrace():
table = (re.split(SPLIT_PATTERN, l1.rstrip("\n\r")) for l1 in table if l1.strip() and not l1.startswith('#'))
return list(table)
# remove comment lines, empty lines and trim trailing EOL
table = (l1.rstrip("\n\r") for l1 in table if l1.strip() and not l1.startswith('#'))
# then split comma (and trim afterwards spaces)
content = []
for line in table:
words = line.split(',')
words = list(word.strip() for word in words)
content.append(words)
return content
def get_last_line(opened_file: io.TextIOBase, max_line_size: int = 128) -> str:
"""
Get the last line of an opened text file
:param opened_file: an opened file (as returned by the builtin open)
:param max_line_size: the maximum size of a line
:return: last line if found, empty string otherwise
"""
distance_to_end = 2 * max_line_size
current_pos = opened_file.tell()
# Check file size
opened_file.seek(0, os.SEEK_END)
file_size = opened_file.tell()
if file_size > distance_to_end:
# If we have a big file: skip towards the end
opened_file.seek(file_size - distance_to_end, os.SEEK_SET)
else:
# back to start of file
opened_file.seek(0, os.SEEK_SET)
line = opened_file.readline()
last_line = line
while line:
line = opened_file.readline()
if line:
last_line = line
# back to position at the call of the function
opened_file.seek(current_pos, os.SEEK_SET)
return last_line
########################################################################################################################
# poses ###############################################################################################################
def pose_to_list(pose: kapture.PoseTransform) -> List[Union[float, str]]:
"""
Converts a 6D pose to a list of floats to save in CSV
:param pose: 6D pose
:return: list of float
"""
assert (isinstance(pose, kapture.PoseTransform))
rotation = pose.r_raw if pose.r is not None else 4 * ['']
translation = pose.t_raw if pose.t is not None else 3 * ['']
return rotation + translation
########################################################################################################################
# Sensor ###############################################################################################################
def sensor_to_list(sensor: kapture.Sensor) -> List[str]:
"""
Converts a sensor into a list of strings to save in CSV
:param sensor:
:return: list of strings
"""
assert (isinstance(sensor, kapture.Sensor))
# sensor_id, name, model, [model_params]+
fields = [sensor.name or ''] + [sensor.sensor_type] + [str(v) for v in sensor.sensor_params]
return fields
########################################################################################################################
# Sensors ##############################################################################################################
def sensors_to_file(filepath: str, sensors: kapture.Sensors) -> None:
"""
Writes the sensors to CSV file.
:param filepath: input file path
:param sensors: input sensors
"""
assert (isinstance(sensors, kapture.Sensors))
header = '# sensor_id, name, sensor_type, [sensor_params]+'
table = ([sensor_id] + sensor_to_list(sensor)
for sensor_id, sensor in sensors.items())
os.makedirs(path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as file:
table_to_file(file, table, header=header)
def sensors_from_file(filepath: str) -> kapture.Sensors:
"""
Reads sensors from CSV file.
:param filepath: input file path
:return: sensors
"""
sensors = kapture.Sensors()
with open(filepath) as file:
table = table_from_file(file)
# sensor_id, name, sensor_type, [sensor_params]+'
for sensor_id, name, sensor_type, *sensor_params in table:
sensor = kapture.create_sensor(sensor_type=sensor_type, sensor_params=sensor_params, name=name)
sensors[sensor_id] = sensor
return sensors
########################################################################################################################
# Rig ##################################################################################################################
def rigs_to_file(filepath: str, rigs: kapture.Rigs) -> None:
"""
Writes rigs to CSV file.
:param filepath:
:param rigs:
"""
assert (isinstance(rigs, kapture.Rigs))
header = '# rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz'
padding = PADDINGS['device_id'] + PADDINGS['device_id'] + PADDINGS['pose']
table = ([rig_id, sensor_id] + pose_to_list(pose)
for rig_id, rig in rigs.items()
for sensor_id, pose in rig.items())
os.makedirs(path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as file:
table_to_file(file, table, header=header, padding=padding)
def rigs_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None) -> kapture.Rigs:
"""
Reads rigs from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid sensor ids.
If a rig id collides one of them, raise error.
If a sensor in rig is not in sensor_ids, it is ignored.
:return: rigs
"""
# rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz
rigs = kapture.Rigs()
with open(filepath) as file:
table = table_from_file(file)
for rig_id, sensor_id, qw, qx, qy, qz, tx, ty, tz in table:
if rig_id in sensor_ids:
raise ValueError(f'collision between a sensor ID and rig ID ({rig_id})')
rotation = float_array_or_none([qw, qx, qy, qz])
translation = float_array_or_none([tx, ty, tz])
pose = kapture.PoseTransform(rotation, translation)
rigs[str(rig_id), sensor_id] = pose
if sensor_ids is not None:
# expunge all undesired sensors
rig_ids = set(rigs)
for rig_id in rig_ids:
for sensor_id in set(rigs[rig_id]):
if sensor_id not in sensor_ids and sensor_id not in rig_ids:
logger.debug(f'dropping sensor {sensor_id} from rig {rig_id} because it is unknown sensor.')
del rigs[rig_id][sensor_id]
return rigs
########################################################################################################################
# Trajectories #########################################################################################################
def trajectories_to_file(filepath: str, trajectories: kapture.Trajectories) -> None:
"""
Writes trajectories to CSV file.
:param filepath:
:param trajectories:
"""
assert (isinstance(trajectories, kapture.Trajectories))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, qw, qx, qy, qz, tx, ty, tz'
padding = PADDINGS['timestamp'] + PADDINGS['device_id'] + PADDINGS['pose']
table = (
[timestamp, sensor_id] + pose_to_list(trajectories[(timestamp, sensor_id)])
for timestamp, sensor_id in sorted(trajectories.key_pairs())
)
os.makedirs(path.dirname(filepath), exist_ok=True)
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header, padding=padding)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(trajectories)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def trajectories_from_file(filepath: str, device_ids: Optional[Set[str]] = None) -> kapture.Trajectories:
"""
Reads trajectories from CSV file.
:param filepath: input file path
:param device_ids: input set of valid device ids (rig or sensor).
If the trajectories contains unknown devices, they will be ignored.
If no device_ids given, everything is loaded.
:return: trajectories
"""
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
trajectories = kapture.Trajectories()
# timestamp, device_id, qw, qx, qy, qz, tx, ty, tz
for timestamp, device_id, qw, qx, qy, qz, tx, ty, tz in table:
if device_ids is not None and device_id not in device_ids:
# just ignore
continue
pose = kapture.PoseTransform.__new__(kapture.PoseTransform)
if qw != '' and qx != '' and qy != '' and qz != '':
rotation = quaternion.from_float_array([float(qw), float(qx), float(qy), float(qz)])
else:
rotation = None
pose._r = rotation
if tx != '' and ty != '' and tz != '':
trans = np.array([[float(tx)], [float(ty)], [float(tz)]], dtype=np.float)
else:
trans = None
pose._t = trans
trajectories.setdefault(int(timestamp), {})[device_id] = pose
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.Trajectories} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return trajectories
########################################################################################################################
# Records Camera #######################################################################################################
def records_camera_to_file(filepath: str, records_camera: kapture.RecordsCamera) -> None:
"""
Writes records_camera to CSV file.
:param filepath:
:param records_camera:
"""
assert (isinstance(records_camera, kapture.RecordsCamera))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, image_path'
table = (
[timestamp, sensor_id] + [records_camera[(timestamp, sensor_id)]]
for timestamp, sensor_id in sorted(records_camera.key_pairs())
)
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records_camera)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def records_camera_from_file(filepath: str, camera_ids: Optional[Set[str]] = None) -> kapture.RecordsCamera:
"""
Reads records_camera from CSV file.
:param filepath: input file path
:param camera_ids: input set of valid camera device ids.
If the records_camera contains unknown devices, they will be ignored.
If not given, all cameras are loaded.
:return: camera records
"""
records_camera = kapture.RecordsCamera()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
# timestamp, device_id, image_path
for timestamp, device_id, image_path in table:
if camera_ids is not None and device_id not in camera_ids:
# just ignore
continue
records_camera[(int(timestamp), str(device_id))] = image_path
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.RecordsCamera} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records_camera
########################################################################################################################
# Records Depth #######################################################################################################
def records_depth_to_file(filepath: str, records_depth: kapture.RecordsDepth) -> None:
"""
Writes records_depth to CSV file.
:param filepath:
:param records_depth:
"""
assert (isinstance(records_depth, kapture.RecordsDepth))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, depth_map_path'
table = (
[timestamp, sensor_id] + [records_depth[(timestamp, sensor_id)]]
for timestamp, sensor_id in sorted(records_depth.key_pairs())
)
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records_depth)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def records_depth_from_file(filepath: str, camera_ids: Optional[Set[str]] = None) -> kapture.RecordsDepth:
"""
Reads records_depth from CSV file.
:param filepath: input file path
:param camera_ids: input set of valid camera device ids.
If the records_camera contains unknown devices, they will be ignored.
If not given, all cameras are loaded.
:return: camera records
"""
records_depth = kapture.RecordsDepth()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
# timestamp, device_id, image_path
for timestamp, device_id, depth_map_path in table:
if camera_ids is not None and device_id not in camera_ids:
# just ignore
continue
records_depth[(int(timestamp), str(device_id))] = depth_map_path
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.RecordsDepth} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records_depth
########################################################################################################################
# Records Lidar ########################################################################################################
def records_lidar_to_file(filepath: str, records_lidar: kapture.RecordsLidar) -> None:
"""
Writes records_lidar to CSV file.
:param filepath:
:param records_lidar:
"""
assert (isinstance(records_lidar, kapture.RecordsLidar))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, point_cloud_path'
table = (
[timestamp, sensor_id] + [records_lidar[(timestamp, sensor_id)]]
for timestamp, sensor_id in sorted(records_lidar.key_pairs())
)
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records_lidar)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def records_lidar_from_file(filepath: str, lidar_ids: Optional[Set[str]] = None
) -> kapture.RecordsLidar:
"""
Reads records_lidar from CSV file.
:param filepath: input file path
:param lidar_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: Lidar records
"""
records_lidar = kapture.RecordsLidar()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
# timestamp, device_id, point_cloud_path
for timestamp, device_id, point_cloud_path in table:
if lidar_ids is not None and device_id not in lidar_ids:
# just ignore
continue
records_lidar[(int(timestamp), str(device_id))] = point_cloud_path
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.RecordsLidar} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records_lidar
########################################################################################################################
def records_generic_to_file(filepath: str, records: kapture.RecordsBase) -> None:
"""
Writes records_wifi to file
:param filepath: path where to save records file.
:param records:
"""
assert (isinstance(records, kapture.RecordsBase))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, ' + ', '.join(f.name for f in records.record_type.fields())
table = []
for timestamp, sensor_id, record in kapture.flatten(records, is_sorted=True):
table.append([timestamp, sensor_id] + [str(v) for v in record.astuple()])
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def records_generic_from_file(records_type: Type, filepath: str, sensor_ids: Optional[Set[str]] = None
) -> Union[kapture.RecordsBase,
kapture.RecordsGnss,
kapture.RecordsGyroscope,
kapture.RecordsAccelerometer,
kapture.RecordsMagnetic]:
"""
Reads Records data from CSV file.
:param records_type: type of records expected (eg RecordsWifi)
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: records
"""
records = records_type()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
# timestamp, device_id, *
nb_records = 0
for timestamp, device_id, *data in table:
timestamp = int(timestamp)
device_id = str(device_id)
if sensor_ids is not None and device_id not in sensor_ids:
# just ignore
continue
records[timestamp, device_id] = records_type.record_type(*data)
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {records_type} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records
# Records Wifi #########################################################################################################
def records_wifi_to_file(filepath: str, records_wifi: kapture.RecordsWifi) -> None:
"""
Writes records_wifi to file
:param filepath:
:param records_wifi:
"""
assert (isinstance(records_wifi, kapture.RecordsWifi))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, BSSID, frequency, RSSI, SSID, scan_time_start, scan_time_end'
table = []
for timestamp, sensor_id in sorted(records_wifi.key_pairs()):
for bssid, record in records_wifi[timestamp, sensor_id].items():
table.append([timestamp, sensor_id, bssid] + [str(v) for v in record.astuple()])
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records_wifi)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def records_wifi_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsWifi:
"""
Reads RecordsWifi from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: Wifi records
"""
records_wifi = kapture.RecordsWifi()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
# timestamp, device_id, BSSID, frequency, RSSI, SSID, scan_time_start, scan_time_end
for timestamp, device_id, BSSID, frequency, RSSI, SSID, scan_time_start, scan_time_end in table:
timestamp, device_id = int(timestamp), str(device_id)
if sensor_ids is not None and device_id not in sensor_ids:
# just ignore
continue
if (timestamp, device_id) not in records_wifi:
records_wifi[timestamp, device_id] = kapture.RecordWifi()
records_wifi[timestamp, device_id][BSSID] = kapture.RecordWifiSignal(
frequency, RSSI, SSID, scan_time_start, scan_time_end)
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.RecordsWifi} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records_wifi
# Records Bluetooth ####################################################################################################
def records_bluetooth_to_file(filepath: str, records_bluetooth: kapture.RecordsBluetooth) -> None:
"""
Writes Bluetooth records to file
:param filepath: output file path.
:param records_bluetooth: records to save
"""
assert (isinstance(records_bluetooth, kapture.RecordsBluetooth))
saving_start = datetime.datetime.now()
header = '# timestamp, device_id, address, RSSI, name'
table = []
for timestamp, sensor_id in sorted(records_bluetooth.key_pairs()):
for address, bt_record in records_bluetooth[timestamp, sensor_id].items():
table.append([timestamp, sensor_id, address] + [str(v) for v in bt_record.astuple()])
with open(filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(records_bluetooth)}'
f' in {saving_elapsed.total_seconds():.3f} seconds'.replace(',', ' '))
def records_bluetooth_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsBluetooth:
"""
Reads Bluetooth records from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: Bluetooth records
"""
records_bluetooth = kapture.RecordsBluetooth()
loading_start = datetime.datetime.now()
with open(filepath) as file:
table = table_from_file(file)
nb_records = 0
# timestamp, device_id, address, RSSI, name
for timestamp, device_id, address, RSSI, name in table:
timestamp, device_id = int(timestamp), str(device_id)
if sensor_ids is not None and device_id not in sensor_ids:
# just ignore
continue
if (timestamp, device_id) not in records_bluetooth:
records_bluetooth[timestamp, device_id] = kapture.RecordBluetooth()
records_bluetooth[timestamp, device_id][address] = kapture.RecordBluetoothSignal(rssi=RSSI, name=name)
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.RecordsBluetooth} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return records_bluetooth
# GNSS #################################################################################################################
def records_gnss_to_file(filepath: str, records_gnss: kapture.RecordsGnss) -> None:
"""
Writes Gnss records to file
:param filepath: output file path.
:param records_gnss: records to save
"""
records_generic_to_file(filepath, records_gnss)
def records_gnss_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsGnss:
"""
Reads Gnss records from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: Gnss records
"""
return records_generic_from_file(kapture.RecordsGnss, filepath, sensor_ids)
# Accelerometer ########################################################################################################
def records_accelerometer_to_file(filepath: str, records_accelerometer: kapture.RecordsAccelerometer) -> None:
"""
Writes accelerometer records to file
:param filepath: output file path.
:param records_accelerometer: records to save
"""
records_generic_to_file(filepath, records_accelerometer)
def records_accelerometer_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsAccelerometer:
"""
Reads accelerometer records from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: accelerometer records
"""
return records_generic_from_file(kapture.RecordsAccelerometer, filepath, sensor_ids)
# Gyroscope ########################################################################################################
def records_gyroscope_to_file(filepath: str, records_gyroscope: kapture.RecordsGyroscope) -> None:
"""
Writes gyroscope records to file
:param filepath: output file path.
:param records_gyroscope: records to save
"""
records_generic_to_file(filepath, records_gyroscope)
def records_gyroscope_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsGyroscope:
"""
Reads gyroscope records from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: gyroscope records
"""
return records_generic_from_file(kapture.RecordsGyroscope, filepath, sensor_ids)
# Magnetic ########################################################################################################
def records_magnetic_to_file(filepath: str, records_magnetic: kapture.RecordsMagnetic) -> None:
"""
Writes magnetic records to file
:param filepath: output file path.
:param records_magnetic: records to save
"""
records_generic_to_file(filepath, records_magnetic)
def records_magnetic_from_file(filepath: str, sensor_ids: Optional[Set[str]] = None
) -> kapture.RecordsMagnetic:
"""
Reads magnetic records from CSV file.
:param filepath: input file path
:param sensor_ids: input set of valid device ids. Any record of other than the given ones will be ignored.
If omitted, then it loads all devices.
:return: magnetic records
"""
return records_generic_from_file(kapture.RecordsMagnetic, filepath, sensor_ids)
########################################################################################################################
# features #############################################################################################################
ImageFeatureConfig = namedtuple('ImageFeatureConfig', ['name', 'dtype', 'dsize'])
# config file ##########################################################################################################
def image_features_config_to_file(config_filepath: str, config: ImageFeatureConfig) -> None:
"""
Writes feature config files, ie. name, data type, and data size.
:param config_filepath: input path to config file to write.
:param config: input config to be written.
"""
os.makedirs(path.dirname(config_filepath), exist_ok=True)
header = "# name, dtype, dsize"
dtype = str(config.dtype) if isinstance(config.dtype, np.dtype) else config.dtype.__name__
line = [config.name, dtype, str(config.dsize)]
with open(config_filepath, 'wt') as file:
table_to_file(file, [line], header=header)
def image_features_config_from_file(config_filepath: str) -> ImageFeatureConfig:
"""
Reads feature config files, ie. name, data type, and data size.
:param config_filepath: input path to config file to read.
:return: config The read config.
"""
if not path.exists(config_filepath):
raise FileNotFoundError(f'{path.basename(config_filepath)} file is missing')
with open(config_filepath, 'rt') as file:
table = table_from_file(file)
line = list(table)[0]
assert len(line) == 3
name, dtype, dsize = line[0], line[1], int(line[2])
# try to list all possible type from numpy that can be used in eval(dtype)
from numpy import float, float32, float64, int32, uint8 # noqa: F401
if isinstance(type(eval(dtype)), type):
dtype = eval(dtype)
else:
raise ValueError('Expect data type ')
return ImageFeatureConfig(name, dtype, dsize)
# files #########################################################################################################
def image_feature_to_file(config_filepath: str,
image_features: Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]
) -> None:
"""
Writes ImageFeatures config file only.
:param config_filepath: eg. /path/to/keypoints/keypoints.txt
:param image_features: input feature date.
"""
config = ImageFeatureConfig(image_features.type_name, image_features.dtype, image_features.dsize)
image_features_config_to_file(config_filepath, config)
def image_features_from_dir(kapture_type: Type[Union[kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures]],
kapture_dirpath: str,
image_filenames: Optional[Set[str]]
) -> kapture.ImageFeatures:
"""
Reads and builds ImageFeatures from images_filenames if given, or directly from actual files in root_dirpath.
:param kapture_type: kapture class type.
:param kapture_dirpath: input path to kapture root directory.
:param image_filenames: None or set of image relative paths.
:return: Features
"""
# make config_filepath from data_dirpath
config_filepath = get_csv_fullpath(kapture_type, kapture_dirpath)
config = image_features_config_from_file(config_filepath)
if image_filenames is None:
# images_path is empty, so populates all feature files
image_filenames_generator = kapture.io.features.image_ids_from_feature_dirpath(kapture_type, kapture_dirpath)
else:
# filter only existing files
image_filenames_generator = (
image_name
for image_name in image_filenames
if path.exists(kapture.io.features.get_features_fullpath(kapture_type, kapture_dirpath, image_name)))
image_filenames = set(image_filenames_generator)
return kapture_type(config.name, config.dtype, config.dsize, image_filenames)
########################################################################################################################
# keypoints ############################################################################################################
def keypoints_to_file(config_filepath: str, keypoints: kapture.Keypoints) -> None:
"""
Writes keypoints to CSV file.
:param config_filepath:
:param keypoints:
"""
return image_feature_to_file(config_filepath, keypoints)
def keypoints_from_dir(kapture_dirpath: str, images_paths: Optional[Set[str]]) -> kapture.Keypoints:
"""
Reads and builds keypoints from images_filenames if given, or directly from actual files in kapture_dirpath.
:param kapture_dirpath: root path of kapture
:param images_paths: optional list of image file names
:return: Keypoints
"""
return image_features_from_dir(kapture.Keypoints, kapture_dirpath, images_paths)
########################################################################################################################
# descriptors ##########################################################################################################
def descriptors_to_file(config_filepath: str, descriptors: kapture.Descriptors) -> None:
"""
Writes descriptors to CSV file.
:param config_filepath:
:param descriptors:
"""
return image_feature_to_file(config_filepath, descriptors)
def descriptors_from_dir(kapture_dirpath: str, images_paths: Set[str]) -> kapture.Descriptors:
"""
Reads and builds descriptors from images_filenames if given, or directly from actual files in kapture_dirpath.
:param kapture_dirpath: root path of kapture
:param images_paths: optional list of image file names
:return: Descriptors
"""
return image_features_from_dir(kapture.Descriptors, kapture_dirpath, images_paths)
########################################################################################################################
# global_features ######################################################################################################
def global_features_to_file(config_filepath: str, global_features: kapture.GlobalFeatures) -> None:
"""
Writes global features to CSV file.
:param config_filepath:
:param global_features:
"""
return image_feature_to_file(config_filepath, global_features)
def global_features_from_dir(kapture_dirpath: str, images_paths: Set[str]) -> kapture.GlobalFeatures:
"""
Reads and builds Global features from images_filenames if given, or directly from actual files in kapture_dirpath.
:param kapture_dirpath: root path of kapture
:param images_paths: optional list of image file names
:return: Global features
"""
return image_features_from_dir(kapture.GlobalFeatures, kapture_dirpath, images_paths)
########################################################################################################################
# matches ##############################################################################################################
def matches_from_dir(kapture_dirpath: str,
image_filenames: Optional[Set[str]] = None,
matches_pairsfile_path: Optional[str] = None) -> kapture.Matches:
"""
Reads and builds Matches from images_filenames if given, or directly from actual files in kapture_dirpath.
:param kapture_dirpath: root path of kapture
:param image_filenames: optional list of image file names
:param matches_pairsfile_path: text file in the csv format; where each line is image_name1, image_name2, score
:return: Matches
"""
if matches_pairsfile_path is None:
# populate files from disk
match_pairs_generator = kapture.io.features.matching_pairs_from_dirpath(kapture_dirpath)
else:
loading_start = datetime.datetime.now()
with open(matches_pairsfile_path, 'r') as fid:
table = table_from_file(fid)
# get matches list from pairsfile
match_pairs_generator = ((query_name, map_name) if query_name < map_name else (map_name, query_name)
for query_name, map_name, _ in table)
# keeps only the one that actually exists on disk
match_pairs_generator = (image_pair
for image_pair in match_pairs_generator
if path.isfile(kapture.io.features.get_matches_fullpath(image_pair,
kapture_dirpath))
)
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{len(table)} {kapture.Matches} in {loading_elapsed.total_seconds():.3f} seconds')
if image_filenames is not None:
# retains only files that correspond to known images
match_pairs_generator = (
image_pair
for image_pair in match_pairs_generator
if image_pair[0] in image_filenames and image_pair[1] in image_filenames
)
match_pairs = set(match_pairs_generator)
return kapture.Matches(match_pairs)
########################################################################################################################
# points3d #############################################################################################################
def points3d_to_file(filepath: str, points3d: kapture.Points3d) -> None:
"""
Writes 3d points to CSV file.
:param filepath: path to CSV file
:param points3d: the 3d points
"""
assert isinstance(points3d, kapture.Points3d)
os.makedirs(path.dirname(filepath), exist_ok=True)
header = 'X, Y, Z, R, G, B'
np.savetxt(filepath, points3d.as_array(), delimiter=',', header=header)
def points3d_from_file(filepath: str) -> kapture.Points3d:
"""
Reads 3d points from CSV file.
:param filepath: path to CSV file
:return: the 3d points
"""
loading_start = datetime.datetime.now()
data = np.loadtxt(filepath, dtype=np.float, delimiter=',', comments='#')
data = data.reshape((-1, 6)) # make sure of the shape, even if single line file.
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{kapture.Points3d} in {loading_elapsed.total_seconds():.3f} seconds')
return kapture.Points3d(data)
########################################################################################################################
# observations #########################################################################################################
def observations_to_file(observations_filepath: str, observations: kapture.Observations) -> None:
"""
Writes observations to CSV file.
:param observations_filepath: input path to CSV file of observation to write.
Containing directory is created if needed.
:param observations: input observations to be written.
"""
assert path.basename(observations_filepath) == 'observations.txt'
assert isinstance(observations, kapture.Observations)
saving_start = datetime.datetime.now()
header = '# point3d_id, [image_path, feature_id]*'
table = (
[str(point3d_idx)] + [str(k) for pair in observations[point3d_idx] for k in pair]
for point3d_idx in sorted(observations.keys())
)
os.makedirs(path.dirname(observations_filepath), exist_ok=True)
with open(observations_filepath, 'w') as file:
nb_records = table_to_file(file, table, header=header)
saving_elapsed = datetime.datetime.now() - saving_start
logger.debug(f'wrote {nb_records:12,d} {type(observations)} in {saving_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
def observations_from_file(observations_filepath: str, images_paths_with_keypoints: Optional[Set[str]] = None
) -> kapture.Observations:
"""
Reads observations from CSV file.
:param observations_filepath: path to CSV file to read.
:param images_paths_with_keypoints: input set of image names (ids) that have keypoints.
If given, used to filter out irrelevant observations.
You can get from set(kapture.keypoints)
:return: observations
"""
assert path.basename(observations_filepath) == 'observations.txt'
assert images_paths_with_keypoints is None \
or (isinstance(images_paths_with_keypoints, set) and len(images_paths_with_keypoints) > 0)
observations = kapture.Observations()
loading_start = datetime.datetime.now()
with open(observations_filepath) as file:
table = table_from_file(file)
nb_records = 0
# point3d_id, [image_path, feature_id]*
for points3d_id_str, *pairs in table:
points3d_id = int(points3d_id_str)
if len(pairs) > 1:
image_paths = pairs[0::2]
keypoints_ids = pairs[1::2]
for image_path, keypoint_id in zip(image_paths, keypoints_ids):
if images_paths_with_keypoints is not None and image_path not in images_paths_with_keypoints:
# image_path does not exist in kapture (perhaps it was removed), ignore it
continue
observations.add(points3d_id, image_path, int(keypoint_id))
nb_records += 1
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'{nb_records:12,d} {kapture.Observations} in {loading_elapsed.total_seconds():.3f} seconds'
.replace(',', ' '))
return observations
########################################################################################################################
# Kapture Write ########################################################################################################
KAPTURE_ATTRIBUTE_WRITERS = {
kapture.Sensors: sensors_to_file,
kapture.Rigs: rigs_to_file,
kapture.Trajectories: trajectories_to_file,
kapture.RecordsCamera: records_camera_to_file,
kapture.RecordsDepth: records_depth_to_file,
kapture.RecordsLidar: records_lidar_to_file,
kapture.RecordsWifi: records_wifi_to_file,
kapture.RecordsBluetooth: records_bluetooth_to_file,
kapture.RecordsGnss: records_gnss_to_file,
kapture.RecordsAccelerometer: records_accelerometer_to_file,
kapture.RecordsGyroscope: records_gyroscope_to_file,
kapture.RecordsMagnetic: records_magnetic_to_file,
kapture.Keypoints: keypoints_to_file,
kapture.Descriptors: descriptors_to_file,
kapture.GlobalFeatures: global_features_to_file,
kapture.Points3d: points3d_to_file,
kapture.Observations: observations_to_file,
}
KAPTURE_ATTRIBUTE_NAMES = { # used to list attributes to be saved
kapture.Sensors: 'sensors',
kapture.Rigs: 'rigs',
kapture.Trajectories: 'trajectories',
kapture.RecordsCamera: 'records_camera',
kapture.RecordsDepth: 'records_depth',
kapture.RecordsLidar: 'records_lidar',
kapture.RecordsWifi: 'records_wifi',
kapture.RecordsBluetooth: 'records_bluetooth',
kapture.RecordsGnss: 'records_gnss',
kapture.RecordsAccelerometer: 'records_accelerometer',
kapture.RecordsGyroscope: 'records_gyroscope',
kapture.RecordsMagnetic: 'records_magnetic',
kapture.Keypoints: 'keypoints',
kapture.Descriptors: 'descriptors',
kapture.GlobalFeatures: 'global_features',
kapture.Points3d: 'points3d',
kapture.Observations: 'observations',
}
def kapture_to_dir(kapture_dirpath: str, kapture_data: kapture.Kapture) -> None:
"""
Saves kapture data to given directory.
:param kapture_dirpath: kapture directory root path
:param kapture_data: input kapture data
"""
kapture_subtype_to_filepaths = {kapture_class: path.join(kapture_dirpath, filename)
for kapture_class, filename in CSV_FILENAMES.items()}
saving_start = datetime.datetime.now()
# save each member of kapture data
for kapture_class, kapture_member_name in KAPTURE_ATTRIBUTE_NAMES.items():
part_data = kapture_data.__getattribute__(kapture_member_name)
if part_data is not None:
# save it
logger.debug(f'saving {kapture_member_name} ...')
write_function = KAPTURE_ATTRIBUTE_WRITERS[kapture_class]
write_function(kapture_subtype_to_filepaths[kapture_class], part_data)
saving_elapsed = datetime.datetime.now() - saving_start
logger.info(f'Saved in {saving_elapsed.total_seconds():.3f} seconds in "{kapture_dirpath}"')
# Kapture Read #########################################################################################################
# list all data members of kapture.
KAPTURE_LOADABLE_TYPES = {
kapture.Sensors,
kapture.Rigs,
kapture.Trajectories,
kapture.RecordsCamera,
kapture.RecordsDepth,
kapture.RecordsLidar,
kapture.RecordsWifi,
kapture.RecordsBluetooth,
kapture.RecordsGnss,
kapture.RecordsAccelerometer,
kapture.RecordsGyroscope,
kapture.RecordsMagnetic,
kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures,
kapture.Matches,
kapture.Points3d,
kapture.Observations,
}
def kapture_from_dir(
kapture_dir_path: str,
matches_pairs_file_path: Optional[str] = None,
skip_list: List[Type[Union[kapture.Rigs,
kapture.Trajectories,
kapture.RecordsCamera,
kapture.RecordsDepth,
kapture.RecordsLidar,
kapture.RecordsWifi,
kapture.RecordsBluetooth,
kapture.RecordsGnss,
kapture.RecordsAccelerometer,
kapture.RecordsGyroscope,
kapture.RecordsMagnetic,
kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures,
kapture.Matches,
kapture.Points3d,
kapture.Observations
]]] = []
) -> kapture.Kapture:
"""
Reads and return kapture data from directory.
:param kapture_dir_path: kapture directory root path
:param matches_pairs_file_path: text file in the csv format; where each line is image_name1, image_name2, score
:param skip_list: Input option for expert only. Skip the load of specified parts.
:return: kapture data read
"""
if not path.isdir(kapture_dir_path):
raise FileNotFoundError(f'No kapture directory {kapture_dir_path}')
csv_file_paths = {dtype: path.join(kapture_dir_path, filename)
for dtype, filename in CSV_FILENAMES.items()}
data_dir_paths = {dtype: path.join(kapture_dir_path, dir_name)
for dtype, dir_name in kapture.io.features.FEATURES_DATA_DIRNAMES.items()}
# keep only those in load_only and that exists
kapture_data_paths = {**data_dir_paths, **csv_file_paths} # make sure files take precedence over dirs
kapture_loadable_data = {
kapture_type
for kapture_type in KAPTURE_LOADABLE_TYPES
if kapture_type not in skip_list and path.exists(kapture_data_paths[kapture_type])
}
kapture_data = kapture.Kapture()
loading_start = datetime.datetime.now()
# sensors
sensor_ids = None
sensors_file_path = csv_file_paths[kapture.Sensors]
if sensors_file_path:
logger.debug(f'loading sensors {sensors_file_path} ...')
kapture_data.__version__ = get_version_from_csv_file(sensors_file_path)
kapture_data.sensors = sensors_from_file(sensors_file_path)
sensor_ids = set(kapture_data.sensors.keys()) if kapture_data.sensors is not None else set()
if sensor_ids is None:
# no need to continue, everything else depends on sensors
raise FileNotFoundError(f'File {sensors_file_path} is missing or empty in {kapture_dir_path}')
# rigs
if kapture.Rigs in kapture_loadable_data:
rigs_file_path = csv_file_paths[kapture.Rigs]
logger.debug(f'loading rigs {rigs_file_path} ...')
assert sensor_ids is not None
kapture_data.rigs = rigs_from_file(rigs_file_path, sensor_ids)
# update sensor_ids with rig_id
sensor_ids.update(kapture_data.rigs.keys())
# trajectories
if kapture.Trajectories in kapture_loadable_data:
trajectories_file_path = csv_file_paths[kapture.Trajectories]
logger.debug(f'loading trajectories {trajectories_file_path} ...')
assert sensor_ids is not None
kapture_data.trajectories = trajectories_from_file(trajectories_file_path, sensor_ids)
_load_all_records(csv_file_paths, kapture_loadable_data, kapture_data)
_load_features_and_desc_and_matches(data_dir_paths, kapture_dir_path, matches_pairs_file_path,
kapture_loadable_data, kapture_data)
_load_points3d_and_observations(csv_file_paths, kapture_loadable_data, kapture_data)
loading_elapsed = datetime.datetime.now() - loading_start
logger.debug(f'Loaded in {loading_elapsed.total_seconds():.3f} seconds from "{kapture_dir_path}"')
return kapture_data
def get_sensor_ids_of_type(sensor_type: str, sensors: kapture.Sensors) -> Set[str]:
"""
Get the sensors of a certain kapture type ('camera', 'lidar', ...)
:param sensor_type: type of sensor
:param sensors: sensors to process
:return: sensors identifiers
"""
return set([sensor_id
for sensor_id in sensors.keys()
if sensors[sensor_id].sensor_type == sensor_type])
def _load_all_records(csv_file_paths, kapture_loadable_data, kapture_data) -> None:
"""
Loads all records from disk to the kapture in memory
:param csv_file_paths: file paths of the CVS records files
:param kapture_loadable_data: the records to load
:param kapture_data: to the kapture object to load into
"""
# records camera
if kapture.RecordsCamera in kapture_loadable_data:
records_camera_file_path = csv_file_paths[kapture.RecordsCamera]
logger.debug(f'loading images {records_camera_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type(kapture.SENSOR_TYPE_CAMERA, kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_camera = records_camera_from_file(csv_file_paths[kapture.RecordsCamera], sensor_ids)
# records depth
if kapture.RecordsDepth in kapture_loadable_data:
records_depth_file_path = csv_file_paths[kapture.RecordsDepth]
logger.debug(f'loading depth {records_depth_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type(kapture.SENSOR_TYPE_DEPTH_CAM, kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_depth = records_depth_from_file(csv_file_paths[kapture.RecordsDepth], sensor_ids)
# records lidar
if kapture.RecordsLidar in kapture_loadable_data:
records_lidar_file_path = csv_file_paths[kapture.RecordsLidar]
logger.debug(f'loading lidar {records_lidar_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('lidar', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_lidar = records_lidar_from_file(records_lidar_file_path, sensor_ids)
# records Wifi
if kapture.RecordsWifi in kapture_loadable_data:
records_wifi_file_path = csv_file_paths[kapture.RecordsWifi]
logger.debug(f'loading wifi {records_wifi_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('wifi', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_wifi = records_wifi_from_file(records_wifi_file_path, sensor_ids)
# records bluetooth
if kapture.RecordsBluetooth in kapture_loadable_data:
records_bluetooth_file_path = csv_file_paths[kapture.RecordsBluetooth]
logger.debug(f'loading bluetooth {records_bluetooth_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('bluetooth', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_bluetooth = records_bluetooth_from_file(records_bluetooth_file_path, sensor_ids)
# records GNSS
if kapture.RecordsGnss in kapture_loadable_data:
records_gnss_file_path = csv_file_paths[kapture.RecordsGnss]
logger.debug(f'loading GNSS {records_gnss_file_path} ...')
assert kapture_data.sensors is not None
epsg_codes = {sensor_id: sensor.sensor_params[0]
for sensor_id, sensor in kapture_data.sensors.items()
if sensor.sensor_type == 'gnss'}
if len(epsg_codes) > 0:
kapture_data.records_gnss = records_gnss_from_file(records_gnss_file_path, set(epsg_codes.keys()))
else:
logger.warning('no declared GNSS sensors: all GNSS data will be ignored')
# records Accelerometer
if kapture.RecordsAccelerometer in kapture_loadable_data:
records_accelerometer_file_path = csv_file_paths[kapture.RecordsAccelerometer]
logger.debug(f'loading Accelerations {records_accelerometer_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('accelerometer', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_accelerometer = records_accelerometer_from_file(records_accelerometer_file_path,
sensor_ids)
# records Gyroscope
if kapture.RecordsGyroscope in kapture_loadable_data:
records_gyroscope_file_path = csv_file_paths[kapture.RecordsGyroscope]
logger.debug(f'loading Gyroscope {records_gyroscope_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('gyroscope', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_gyroscope = records_gyroscope_from_file(records_gyroscope_file_path, sensor_ids)
# records Magnetic
if kapture.RecordsMagnetic in kapture_loadable_data:
records_magnetic_file_path = csv_file_paths[kapture.RecordsMagnetic]
logger.debug(f'loading Magnetic {records_magnetic_file_path} ...')
assert kapture_data.sensors is not None
sensor_ids = get_sensor_ids_of_type('magnetic', kapture_data.sensors)
assert sensor_ids is not None
kapture_data.records_magnetic = records_magnetic_from_file(records_magnetic_file_path, sensor_ids)
def _load_features_and_desc_and_matches(data_dir_paths, kapture_dir_path, matches_pairs_file_path,
kapture_loadable_data, kapture_data) -> None:
"""
Loads features, descriptors, key points and matches from disk to the kapture in memory
:param data_dir_paths: file paths of the data records files
:param kapture_dir_path: kapture top directory path
:param matches_pairs_file_path: text file in the csv format; where each line is image_name1, image_name2, score
:param kapture_loadable_data: the data to load
:param kapture_data: to the kapture object to load into
"""
# features
image_filenames = set(image_name
for _, _, image_name in
kapture.flatten(kapture_data.records_camera)) \
if kapture_data.records_camera is not None else set()
# keypoints
if kapture.Keypoints in kapture_loadable_data:
logger.debug(f'loading keypoints {data_dir_paths[kapture.Keypoints]} ...')
assert kapture_data.records_camera is not None
kapture_data.keypoints = keypoints_from_dir(kapture_dir_path, image_filenames)
# descriptors
if kapture.Descriptors in kapture_loadable_data:
logger.debug(f'loading descriptors {data_dir_paths[kapture.Descriptors]} ...')
assert kapture_data.records_camera is not None
kapture_data.descriptors = descriptors_from_dir(kapture_dir_path, image_filenames)
# global_features
if kapture.GlobalFeatures in kapture_loadable_data:
logger.debug(f'loading global features {data_dir_paths[kapture.GlobalFeatures]} ...')
assert kapture_data.records_camera is not None
kapture_data.global_features = global_features_from_dir(kapture_dir_path, image_filenames)
# matches
if kapture.Matches in kapture_loadable_data:
logger.debug(f'loading matches {data_dir_paths[kapture.Matches]} ...')
assert kapture_data.records_camera is not None
kapture_data.matches = matches_from_dir(kapture_dir_path, image_filenames, matches_pairs_file_path)
def _load_points3d_and_observations(csv_file_paths, kapture_loadable_data, kapture_data) -> None:
# points3d
if kapture.Points3d in kapture_loadable_data:
points3d_file_path = csv_file_paths[kapture.Points3d]
logger.debug(f'loading points 3d {points3d_file_path} ...')
kapture_data.points3d = points3d_from_file(points3d_file_path)
# observations
if kapture.Observations in kapture_loadable_data:
observations_file_path = csv_file_paths[kapture.Observations]
logger.debug(f'loading observations {observations_file_path} ...')
assert kapture_data.keypoints is not None
assert kapture_data.points3d is not None
kapture_data.observations = observations_from_file(observations_file_path, kapture_data.keypoints)
| [
"kapture.RecordsDepth",
"kapture.io.features.matching_pairs_from_dirpath",
"kapture.PoseTransform",
"kapture.io.features.get_matches_fullpath",
"kapture.Points3d",
"kapture.RecordsWifi",
"os.path.isfile",
"kapture.RecordsLidar",
"kapture.PoseTransform.__new__",
"os.path.join",
"kapture.RecordWif... | [((5754, 5777), 're.compile', 're.compile', (['"""\\\\s*,\\\\s*"""'], {}), "('\\\\s*,\\\\s*')\n", (5764, 5777), False, 'import re\n'), ((34394, 34454), 'collections.namedtuple', 'namedtuple', (['"""ImageFeatureConfig"""', "['name', 'dtype', 'dsize']"], {}), "('ImageFeatureConfig', ['name', 'dtype', 'dsize'])\n", (34404, 34454), False, 'from collections import namedtuple\n'), ((530, 565), 'os.path.join', 'path.join', (['"""sensors"""', '"""sensors.txt"""'], {}), "('sensors', 'sensors.txt')\n", (539, 565), True, 'import os.path as path\n'), ((593, 633), 'os.path.join', 'path.join', (['"""sensors"""', '"""trajectories.txt"""'], {}), "('sensors', 'trajectories.txt')\n", (602, 633), True, 'import os.path as path\n'), ((653, 685), 'os.path.join', 'path.join', (['"""sensors"""', '"""rigs.txt"""'], {}), "('sensors', 'rigs.txt')\n", (662, 685), True, 'import os.path as path\n'), ((714, 756), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_camera.txt"""'], {}), "('sensors', 'records_camera.txt')\n", (723, 756), True, 'import os.path as path\n'), ((784, 825), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_depth.txt"""'], {}), "('sensors', 'records_depth.txt')\n", (793, 825), True, 'import os.path as path\n'), ((853, 894), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_lidar.txt"""'], {}), "('sensors', 'records_lidar.txt')\n", (862, 894), True, 'import os.path as path\n'), ((921, 961), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_wifi.txt"""'], {}), "('sensors', 'records_wifi.txt')\n", (930, 961), True, 'import os.path as path\n'), ((993, 1038), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_bluetooth.txt"""'], {}), "('sensors', 'records_bluetooth.txt')\n", (1002, 1038), True, 'import os.path as path\n'), ((1065, 1105), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_gnss.txt"""'], {}), "('sensors', 'records_gnss.txt')\n", (1074, 1105), True, 'import os.path as path\n'), ((1141, 1190), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_accelerometer.txt"""'], {}), "('sensors', 'records_accelerometer.txt')\n", (1150, 1190), True, 'import os.path as path\n'), ((1222, 1267), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_gyroscope.txt"""'], {}), "('sensors', 'records_gyroscope.txt')\n", (1231, 1267), True, 'import os.path as path\n'), ((1298, 1342), 'os.path.join', 'path.join', (['"""sensors"""', '"""records_magnetic.txt"""'], {}), "('sensors', 'records_magnetic.txt')\n", (1307, 1342), True, 'import os.path as path\n'), ((1366, 1409), 'os.path.join', 'path.join', (['"""reconstruction"""', '"""points3d.txt"""'], {}), "('reconstruction', 'points3d.txt')\n", (1375, 1409), True, 'import os.path as path\n'), ((1434, 1491), 'os.path.join', 'path.join', (['"""reconstruction"""', '"""keypoints"""', '"""keypoints.txt"""'], {}), "('reconstruction', 'keypoints', 'keypoints.txt')\n", (1443, 1491), True, 'import os.path as path\n'), ((1518, 1579), 'os.path.join', 'path.join', (['"""reconstruction"""', '"""descriptors"""', '"""descriptors.txt"""'], {}), "('reconstruction', 'descriptors', 'descriptors.txt')\n", (1527, 1579), True, 'import os.path as path\n'), ((1609, 1678), 'os.path.join', 'path.join', (['"""reconstruction"""', '"""global_features"""', '"""global_features.txt"""'], {}), "('reconstruction', 'global_features', 'global_features.txt')\n", (1618, 1678), True, 'import os.path as path\n'), ((1706, 1753), 'os.path.join', 'path.join', (['"""reconstruction"""', '"""observations.txt"""'], {}), "('reconstruction', 'observations.txt')\n", (1715, 1753), True, 'import os.path as path\n'), ((2304, 2340), 'os.path.join', 'path.join', (['kapture_dirpath', 'filename'], {}), '(kapture_dirpath, filename)\n', (2313, 2340), True, 'import os.path as path\n'), ((2895, 2946), 're.search', 're.search', (['KAPTURE_FORMAT_PARSING_RE', 'header_string'], {}), '(KAPTURE_FORMAT_PARSING_RE, header_string)\n', (2904, 2946), False, 'import re\n'), ((3259, 3285), 'os.path.isfile', 'path.isfile', (['csv_file_path'], {}), '(csv_file_path)\n', (3270, 3285), True, 'import os.path as path\n'), ((3853, 3911), 'os.path.join', 'path.join', (['kapture_dirpath', 'CSV_FILENAMES[kapture.Sensors]'], {}), '(kapture_dirpath, CSV_FILENAMES[kapture.Sensors])\n', (3862, 3911), True, 'import os.path as path\n'), ((9998, 10015), 'kapture.Sensors', 'kapture.Sensors', ([], {}), '()\n', (10013, 10015), False, 'import kapture\n'), ((11726, 11740), 'kapture.Rigs', 'kapture.Rigs', ([], {}), '()\n', (11738, 11740), False, 'import kapture\n'), ((13181, 13204), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13202, 13204), False, 'import datetime\n'), ((14397, 14420), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14418, 14420), False, 'import datetime\n'), ((16207, 16230), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16228, 16230), False, 'import datetime\n'), ((17242, 17265), 'kapture.RecordsCamera', 'kapture.RecordsCamera', ([], {}), '()\n', (17263, 17265), False, 'import kapture\n'), ((17286, 17309), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17307, 17309), False, 'import datetime\n'), ((18478, 18501), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18499, 18501), False, 'import datetime\n'), ((19510, 19532), 'kapture.RecordsDepth', 'kapture.RecordsDepth', ([], {}), '()\n', (19530, 19532), False, 'import kapture\n'), ((19553, 19576), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (19574, 19576), False, 'import datetime\n'), ((20751, 20774), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20772, 20774), False, 'import datetime\n'), ((21772, 21794), 'kapture.RecordsLidar', 'kapture.RecordsLidar', ([], {}), '()\n', (21792, 21794), False, 'import kapture\n'), ((21815, 21838), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21836, 21838), False, 'import datetime\n'), ((22925, 22948), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22946, 22948), False, 'import datetime\n'), ((23102, 23142), 'kapture.flatten', 'kapture.flatten', (['records'], {'is_sorted': '(True)'}), '(records, is_sorted=True)\n', (23117, 23142), False, 'import kapture\n'), ((24378, 24401), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24399, 24401), False, 'import datetime\n'), ((25489, 25512), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (25510, 25512), False, 'import datetime\n'), ((26632, 26653), 'kapture.RecordsWifi', 'kapture.RecordsWifi', ([], {}), '()\n', (26651, 26653), False, 'import kapture\n'), ((26674, 26697), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26695, 26697), False, 'import datetime\n'), ((28178, 28201), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28199, 28201), False, 'import datetime\n'), ((29335, 29361), 'kapture.RecordsBluetooth', 'kapture.RecordsBluetooth', ([], {}), '()\n', (29359, 29361), False, 'import kapture\n'), ((29382, 29405), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29403, 29405), False, 'import datetime\n'), ((43541, 43569), 'kapture.Matches', 'kapture.Matches', (['match_pairs'], {}), '(match_pairs)\n', (43556, 43569), False, 'import kapture\n'), ((44423, 44446), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (44444, 44446), False, 'import datetime\n'), ((44458, 44523), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {'dtype': 'np.float', 'delimiter': '""","""', 'comments': '"""#"""'}), "(filepath, dtype=np.float, delimiter=',', comments='#')\n", (44468, 44523), True, 'import numpy as np\n'), ((44772, 44794), 'kapture.Points3d', 'kapture.Points3d', (['data'], {}), '(data)\n', (44788, 44794), False, 'import kapture\n'), ((45558, 45581), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (45579, 45581), False, 'import datetime\n'), ((47016, 47038), 'kapture.Observations', 'kapture.Observations', ([], {}), '()\n', (47036, 47038), False, 'import kapture\n'), ((47059, 47082), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (47080, 47082), False, 'import datetime\n'), ((50447, 50470), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (50468, 50470), False, 'import datetime\n'), ((54032, 54049), 'kapture.Kapture', 'kapture.Kapture', ([], {}), '()\n', (54047, 54049), False, 'import kapture\n'), ((54070, 54093), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (54091, 54093), False, 'import datetime\n'), ((4248, 4269), 'numpy.float', 'float', (['representation'], {}), '(representation)\n', (4253, 4269), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((9690, 9712), 'os.path.dirname', 'path.dirname', (['filepath'], {}), '(filepath)\n', (9702, 9712), True, 'import os.path as path\n'), ((11120, 11142), 'os.path.dirname', 'path.dirname', (['filepath'], {}), '(filepath)\n', (11132, 11142), True, 'import os.path as path\n'), ((13540, 13562), 'os.path.dirname', 'path.dirname', (['filepath'], {}), '(filepath)\n', (13552, 13562), True, 'import os.path as path\n'), ((14538, 14560), 'kapture.Trajectories', 'kapture.Trajectories', ([], {}), '()\n', (14558, 14560), False, 'import kapture\n'), ((15476, 15499), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15497, 15499), False, 'import datetime\n'), ((17752, 17775), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17773, 17775), False, 'import datetime\n'), ((20026, 20049), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20047, 20049), False, 'import datetime\n'), ((22296, 22319), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22317, 22319), False, 'import datetime\n'), ((24911, 24934), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (24932, 24934), False, 'import datetime\n'), ((27519, 27542), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (27540, 27542), False, 'import datetime\n'), ((30122, 30145), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (30143, 30145), False, 'import datetime\n'), ((34884, 34913), 'os.path.dirname', 'path.dirname', (['config_filepath'], {}), '(config_filepath)\n', (34896, 34913), True, 'import os.path as path\n'), ((35488, 35516), 'os.path.exists', 'path.exists', (['config_filepath'], {}), '(config_filepath)\n', (35499, 35516), True, 'import os.path as path\n'), ((37822, 37907), 'kapture.io.features.image_ids_from_feature_dirpath', 'kapture.io.features.image_ids_from_feature_dirpath', (['kapture_type', 'kapture_dirpath'], {}), '(kapture_type,\n kapture_dirpath)\n', (37872, 37907), False, 'import kapture\n'), ((42109, 42173), 'kapture.io.features.matching_pairs_from_dirpath', 'kapture.io.features.matching_pairs_from_dirpath', (['kapture_dirpath'], {}), '(kapture_dirpath)\n', (42156, 42173), False, 'import kapture\n'), ((42208, 42231), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (42229, 42231), False, 'import datetime\n'), ((44077, 44099), 'os.path.dirname', 'path.dirname', (['filepath'], {}), '(filepath)\n', (44089, 44099), True, 'import os.path as path\n'), ((44632, 44655), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (44653, 44655), False, 'import datetime\n'), ((45422, 45458), 'os.path.basename', 'path.basename', (['observations_filepath'], {}), '(observations_filepath)\n', (45435, 45458), True, 'import os.path as path\n'), ((45818, 45853), 'os.path.dirname', 'path.dirname', (['observations_filepath'], {}), '(observations_filepath)\n', (45830, 45853), True, 'import os.path as path\n'), ((46790, 46826), 'os.path.basename', 'path.basename', (['observations_filepath'], {}), '(observations_filepath)\n', (46803, 46826), True, 'import os.path as path\n'), ((47904, 47927), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (47925, 47927), False, 'import datetime\n'), ((50301, 50337), 'os.path.join', 'path.join', (['kapture_dirpath', 'filename'], {}), '(kapture_dirpath, filename)\n', (50310, 50337), True, 'import os.path as path\n'), ((50952, 50975), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (50973, 50975), False, 'import datetime\n'), ((53249, 53277), 'os.path.isdir', 'path.isdir', (['kapture_dir_path'], {}), '(kapture_dir_path)\n', (53259, 53277), True, 'import os.path as path\n'), ((53384, 53421), 'os.path.join', 'path.join', (['kapture_dir_path', 'filename'], {}), '(kapture_dir_path, filename)\n', (53393, 53421), True, 'import os.path as path\n'), ((53519, 53556), 'os.path.join', 'path.join', (['kapture_dir_path', 'dir_name'], {}), '(kapture_dir_path, dir_name)\n', (53528, 53556), True, 'import os.path as path\n'), ((55811, 55834), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (55832, 55834), False, 'import datetime\n'), ((10233, 10323), 'kapture.create_sensor', 'kapture.create_sensor', ([], {'sensor_type': 'sensor_type', 'sensor_params': 'sensor_params', 'name': 'name'}), '(sensor_type=sensor_type, sensor_params=sensor_params,\n name=name)\n', (10254, 10323), False, 'import kapture\n'), ((12146, 12190), 'kapture.PoseTransform', 'kapture.PoseTransform', (['rotation', 'translation'], {}), '(rotation, translation)\n', (12167, 12190), False, 'import kapture\n'), ((13722, 13745), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13743, 13745), False, 'import datetime\n'), ((14836, 14888), 'kapture.PoseTransform.__new__', 'kapture.PoseTransform.__new__', (['kapture.PoseTransform'], {}), '(kapture.PoseTransform)\n', (14865, 14888), False, 'import kapture\n'), ((16572, 16595), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16593, 16595), False, 'import datetime\n'), ((18845, 18868), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18866, 18868), False, 'import datetime\n'), ((21120, 21143), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (21141, 21143), False, 'import datetime\n'), ((23352, 23375), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (23373, 23375), False, 'import datetime\n'), ((25986, 26009), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (26007, 26009), False, 'import datetime\n'), ((27371, 27450), 'kapture.RecordWifiSignal', 'kapture.RecordWifiSignal', (['frequency', 'RSSI', 'SSID', 'scan_time_start', 'scan_time_end'], {}), '(frequency, RSSI, SSID, scan_time_start, scan_time_end)\n', (27395, 27450), False, 'import kapture\n'), ((28654, 28677), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (28675, 28677), False, 'import datetime\n'), ((30019, 30070), 'kapture.RecordBluetoothSignal', 'kapture.RecordBluetoothSignal', ([], {'rssi': 'RSSI', 'name': 'name'}), '(rssi=RSSI, name=name)\n', (30048, 30070), False, 'import kapture\n'), ((46009, 46032), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (46030, 46032), False, 'import datetime\n'), ((53602, 53652), 'kapture.io.features.FEATURES_DATA_DIRNAMES.items', 'kapture.io.features.FEATURES_DATA_DIRNAMES.items', ([], {}), '()\n', (53650, 53652), False, 'import kapture\n'), ((27294, 27314), 'kapture.RecordWifi', 'kapture.RecordWifi', ([], {}), '()\n', (27312, 27314), False, 'import kapture\n'), ((29930, 29955), 'kapture.RecordBluetooth', 'kapture.RecordBluetooth', ([], {}), '()\n', (29953, 29955), False, 'import kapture\n'), ((43034, 43057), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (43055, 43057), False, 'import datetime\n'), ((53960, 54005), 'os.path.exists', 'path.exists', (['kapture_data_paths[kapture_type]'], {}), '(kapture_data_paths[kapture_type])\n', (53971, 54005), True, 'import os.path as path\n'), ((35553, 35583), 'os.path.basename', 'path.basename', (['config_filepath'], {}), '(config_filepath)\n', (35566, 35583), True, 'import os.path as path\n'), ((38085, 38173), 'kapture.io.features.get_features_fullpath', 'kapture.io.features.get_features_fullpath', (['kapture_type', 'kapture_dirpath', 'image_name'], {}), '(kapture_type, kapture_dirpath,\n image_name)\n', (38126, 38173), False, 'import kapture\n'), ((62319, 62363), 'kapture.flatten', 'kapture.flatten', (['kapture_data.records_camera'], {}), '(kapture_data.records_camera)\n', (62334, 62363), False, 'import kapture\n'), ((15009, 15018), 'numpy.float', 'float', (['qw'], {}), '(qw)\n', (15014, 15018), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((15020, 15029), 'numpy.float', 'float', (['qx'], {}), '(qx)\n', (15025, 15029), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((15031, 15040), 'numpy.float', 'float', (['qy'], {}), '(qy)\n', (15036, 15040), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((15042, 15051), 'numpy.float', 'float', (['qz'], {}), '(qz)\n', (15047, 15051), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((42801, 42870), 'kapture.io.features.get_matches_fullpath', 'kapture.io.features.get_matches_fullpath', (['image_pair', 'kapture_dirpath'], {}), '(image_pair, kapture_dirpath)\n', (42841, 42870), False, 'import kapture\n'), ((15222, 15231), 'numpy.float', 'float', (['tx'], {}), '(tx)\n', (15227, 15231), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((15235, 15244), 'numpy.float', 'float', (['ty'], {}), '(ty)\n', (15240, 15244), False, 'from numpy import float, float32, float64, int32, uint8\n'), ((15248, 15257), 'numpy.float', 'float', (['tz'], {}), '(tz)\n', (15253, 15257), False, 'from numpy import float, float32, float64, int32, uint8\n')] |
import numpy as np
import cv2
import scipy.ndimage as ndi
img = cv2.imread('pictures/02.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
smooth = ndi.filters.median_filter(gray, size=2)
edges = smooth > 200
lines = cv2.HoughLines(edges.astype(np.uint8), 0.5, np.pi/180, 120)
for line in lines:
for rho,theta in line:
print(rho, theta)
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
# Show the result
cv2.imshow('Measure Size', img)
cv2.waitKey(0) | [
"cv2.line",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread",
"numpy.sin",
"numpy.cos",
"cv2.imshow",
"scipy.ndimage.filters.median_filter"
] | [((65, 94), 'cv2.imread', 'cv2.imread', (['"""pictures/02.png"""'], {}), "('pictures/02.png')\n", (75, 94), False, 'import cv2\n'), ((102, 139), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (114, 139), False, 'import cv2\n'), ((149, 188), 'scipy.ndimage.filters.median_filter', 'ndi.filters.median_filter', (['gray'], {'size': '(2)'}), '(gray, size=2)\n', (174, 188), True, 'import scipy.ndimage as ndi\n'), ((642, 673), 'cv2.imshow', 'cv2.imshow', (['"""Measure Size"""', 'img'], {}), "('Measure Size', img)\n", (652, 673), False, 'import cv2\n'), ((674, 688), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (685, 688), False, 'import cv2\n'), ((364, 377), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (370, 377), True, 'import numpy as np\n'), ((390, 403), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (396, 403), True, 'import numpy as np\n'), ((580, 629), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (588, 629), False, 'import cv2\n')] |
import logging
from functools import partial
import numpy as np
from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator
from matplotlib.ticker import (LogFormatterMathtext, ScalarFormatter,
FuncFormatter)
from ..core.data import CategoricalComponent
from ..core.decorators import memoize
def get_extent(view, transpose=False):
sy, sx = [s for s in view if isinstance(s, slice)]
if transpose:
return (sy.start, sy.stop, sx.start, sx.stop)
return (sx.start, sx.stop, sy.start, sy.stop)
def view_cascade(data, view):
""" Return a set of views progressively zoomed out of input at roughly
constant pixel count
:param data: Data object to view
:param view: Original view into data
:rtype: tuple of views
"""
shp = data.shape
v2 = list(view)
logging.debug("image shape: %s, view: %s", shp, view)
# choose stride length that roughly samples entire image
# at roughly the same pixel count
step = max(shp[i - 1] * v.step / max(v.stop - v.start, 1)
for i, v in enumerate(view) if isinstance(v, slice))
step = max(step, 1)
for i, v in enumerate(v2):
if not(isinstance(v, slice)):
continue
v2[i] = slice(0, shp[i - 1], step)
return tuple(v2), view
def small_view(data, attribute):
"""
Extract a downsampled view from a dataset, for quick
statistical summaries
"""
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
return data[attribute, view]
def small_view_array(data):
"""
Same as small_view, except using a numpy array as input
"""
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
return np.asarray(data)[view]
def fast_limits(data, plo, phi):
"""Quickly estimate percentiles in an array,
using a downsampled version
:param data: array-like
:param plo: Lo percentile
:param phi: High percentile
:rtype: Tuple of floats. Approximate values of each percentile in
data[component]
"""
try:
from scipy import stats
except ImportError:
raise ImportError("Scale clipping requires SciPy")
shp = data.shape
view = tuple([slice(None, None, max(s / 50, 1)) for s in shp])
values = np.asarray(data)[view]
if ~np.isfinite(values).any():
return (0.0, 1.0)
limits = (-np.inf, np.inf)
lo = stats.scoreatpercentile(values.flat, plo, limit=limits)
hi = stats.scoreatpercentile(values.flat, phi, limit=limits)
return lo, hi
def visible_limits(artists, axis):
"""Determines the data limits for the data in a set of artists
Ignores non-visible artists
Assumes each artist as a get_data method wich returns a tuple of x,y
:param artists: An iterable collection of artists
:param axis: Which axis to compute. 0=xaxis, 1=yaxis
:rtype: A tuple of min, max for the requested axis, or None if
no data present
"""
data = []
for art in artists:
if not art.visible:
continue
xy = art.get_data()
assert isinstance(xy, tuple)
val = xy[axis]
if val.size > 0:
data.append(xy[axis])
if len(data) == 0:
return
data = np.hstack(data)
if data.size == 0:
return
data = data[np.isfinite(data)]
if data.size == 0:
return
lo, hi = np.nanmin(data), np.nanmax(data)
if not np.isfinite(lo):
return
return lo, hi
def tick_linker(all_categories, pos, *args):
try:
return all_categories[int(pos)]
except IndexError:
return ''
def update_ticks(axes, coord, components, is_log):
""" Changes the axes to have the proper tick formatting based on the
type of component.
:param axes: A matplotlib axis object to alter
:param coord: 'x' or 'y'
:param components: A list() of components that are plotted along this axis
:param is_log: Boolean for log-scale.
:kwarg max_categories: The maximum number of categories to display.
:return: None or #categories if components is Categorical
"""
if coord == 'x':
axis = axes.xaxis
elif coord == 'y':
axis = axes.yaxis
else:
raise TypeError("coord must be one of x,y")
is_cat = all(isinstance(comp, CategoricalComponent) for comp in components)
if is_log:
axis.set_major_locator(LogLocator())
axis.set_major_formatter(LogFormatterMathtext())
elif is_cat:
all_categories = np.empty((0,), dtype=np.object)
for comp in components:
all_categories = np.union1d(comp._categories, all_categories)
locator = MaxNLocator(10, integer=True)
locator.view_limits(0, all_categories.shape[0])
format_func = partial(tick_linker, all_categories)
formatter = FuncFormatter(format_func)
axis.set_major_locator(locator)
axis.set_major_formatter(formatter)
return all_categories.shape[0]
else:
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
| [
"functools.partial",
"matplotlib.ticker.AutoLocator",
"logging.debug",
"scipy.stats.scoreatpercentile",
"numpy.empty",
"numpy.asarray",
"matplotlib.ticker.LogLocator",
"matplotlib.ticker.MaxNLocator",
"numpy.isfinite",
"numpy.nanmin",
"numpy.hstack",
"matplotlib.ticker.FuncFormatter",
"numpy... | [((840, 893), 'logging.debug', 'logging.debug', (['"""image shape: %s, view: %s"""', 'shp', 'view'], {}), "('image shape: %s, view: %s', shp, view)\n", (853, 893), False, 'import logging\n'), ((2458, 2513), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['values.flat', 'plo'], {'limit': 'limits'}), '(values.flat, plo, limit=limits)\n', (2481, 2513), False, 'from scipy import stats\n'), ((2523, 2578), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['values.flat', 'phi'], {'limit': 'limits'}), '(values.flat, phi, limit=limits)\n', (2546, 2578), False, 'from scipy import stats\n'), ((3308, 3323), 'numpy.hstack', 'np.hstack', (['data'], {}), '(data)\n', (3317, 3323), True, 'import numpy as np\n'), ((1770, 1786), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1780, 1786), True, 'import numpy as np\n'), ((2333, 2349), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2343, 2349), True, 'import numpy as np\n'), ((3379, 3396), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (3390, 3396), True, 'import numpy as np\n'), ((3450, 3465), 'numpy.nanmin', 'np.nanmin', (['data'], {}), '(data)\n', (3459, 3465), True, 'import numpy as np\n'), ((3467, 3482), 'numpy.nanmax', 'np.nanmax', (['data'], {}), '(data)\n', (3476, 3482), True, 'import numpy as np\n'), ((3494, 3509), 'numpy.isfinite', 'np.isfinite', (['lo'], {}), '(lo)\n', (3505, 3509), True, 'import numpy as np\n'), ((4461, 4473), 'matplotlib.ticker.LogLocator', 'LogLocator', ([], {}), '()\n', (4471, 4473), False, 'from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator\n'), ((4508, 4530), 'matplotlib.ticker.LogFormatterMathtext', 'LogFormatterMathtext', ([], {}), '()\n', (4528, 4530), False, 'from matplotlib.ticker import LogFormatterMathtext, ScalarFormatter, FuncFormatter\n'), ((4574, 4605), 'numpy.empty', 'np.empty', (['(0,)'], {'dtype': 'np.object'}), '((0,), dtype=np.object)\n', (4582, 4605), True, 'import numpy as np\n'), ((4730, 4759), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['(10)'], {'integer': '(True)'}), '(10, integer=True)\n', (4741, 4759), False, 'from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator\n'), ((4838, 4874), 'functools.partial', 'partial', (['tick_linker', 'all_categories'], {}), '(tick_linker, all_categories)\n', (4845, 4874), False, 'from functools import partial\n'), ((4895, 4921), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_func'], {}), '(format_func)\n', (4908, 4921), False, 'from matplotlib.ticker import LogFormatterMathtext, ScalarFormatter, FuncFormatter\n'), ((2364, 2383), 'numpy.isfinite', 'np.isfinite', (['values'], {}), '(values)\n', (2375, 2383), True, 'import numpy as np\n'), ((4667, 4711), 'numpy.union1d', 'np.union1d', (['comp._categories', 'all_categories'], {}), '(comp._categories, all_categories)\n', (4677, 4711), True, 'import numpy as np\n'), ((5087, 5100), 'matplotlib.ticker.AutoLocator', 'AutoLocator', ([], {}), '()\n', (5098, 5100), False, 'from matplotlib.ticker import AutoLocator, MaxNLocator, LogLocator\n'), ((5135, 5152), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {}), '()\n', (5150, 5152), False, 'from matplotlib.ticker import LogFormatterMathtext, ScalarFormatter, FuncFormatter\n')] |
import numpy as np
import warnings
from scipy.io import loadmat
from LFSpy import LocalFeatureSelection
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.pipeline import Pipeline
from sklearn import datasets
import matplotlib.pyplot as plt
np.random.seed(905)
def load_dataset(name, m=0):
'''
Loads a test/demo dataset.
'''
print('Loading dataset ' + name + '...')
if name is 'sample':
mat = loadmat('matlab_Data')
training_data = mat['Train'].T
training_labels = mat['TrainLables'][0]
testing_data = mat['Test'].T
testing_labels = mat['TestLables'][0]
elif name is 'iris':
# we only take the first two classes for binary classification
train_idx = np.arange(0, 100, 2)
test_idx = np.arange(1, 100, 2)
iris = datasets.load_iris()
if m > 0:
iris.data = add_noise_vars(iris.data, m)
training_data = iris.data[train_idx,:]
training_labels = iris.target[train_idx]
testing_data = iris.data[test_idx,:]
testing_labels = iris.target[test_idx]
return training_data, training_labels, testing_data, testing_labels
def add_noise_vars(x, m, std_range=[0,3]):
'''
Adds m Gaussian noise variables to data array x. Gaussian distribution have
zero mean with standard deviations sampled from a uniform distribution on
std_range.
'''
n = x.shape[0]
stds = np.random.uniform(low=std_range[0], high=std_range[1], size=m)
noise_vars = [np.random.normal(loc=0.0, scale=s, size=[n,]) for s in stds]
return np.hstack((x, np.stack(noise_vars).T))
def results_lfspy(x_train, y_train, x_test, y_test):
'''
Trains an tests and LFS model using default parameters on the given dataset.
'''
print('Training and testing an LFS model with default parameters.\nThis may take a few minutes...')
lfs = LocalFeatureSelection(rr_seed=777)
pipeline = Pipeline([('classifier', lfs)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
def results_rforest(x_train, y_train, x_test, y_test):
print('Training and testing a Random Forest with default parameters.')
rfc = RandomForestClassifier(random_state=777)
pipeline = Pipeline([('classifier', rfc)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
def results_fsvm(x_train, y_train, x_test, y_test):
print('Training and testing a SVM with default parameters with F-stat feature selection (25% of features selected).')
svm = LinearSVC(random_state=777)
sel = SelectKBest(f_classif, k=int(0.25*x_train.shape[1]))
pipeline = Pipeline([('feat_sel', sel), ('classifier', svm)])
pipeline.fit(x_train, y_train)
y_pred = pipeline.predict(x_test)
score = pipeline.score(x_test, y_test)
return score, y_pred
training_data, training_labels, testing_data, testing_labels = load_dataset('sample')
score_lfs, y_pred_lfs = results_lfspy(training_data, training_labels, testing_data, testing_labels)
score_rfc, y_pred_rfc = results_rforest(training_data, training_labels, testing_data, testing_labels)
score_svm, y_pred_svm = results_fsvm(training_data, training_labels, testing_data, testing_labels)
training_data, training_labels, testing_data, testing_labels = load_dataset('iris')
score_lfs_iris, y_pred_lfs_iris = results_lfspy(training_data, training_labels, testing_data, testing_labels)
score_rfc_iris, y_pred_rfc_iris = results_rforest(training_data, training_labels, testing_data, testing_labels)
score_svm_iris, y_pred_svm_iris = results_fsvm(training_data, training_labels, testing_data, testing_labels)
# Plot the comparison of results
scores = [score_lfs, score_rfc, score_svm]
scores_iris = [score_lfs_iris, score_rfc_iris, score_svm_iris]
def plotScores(scores, title=None):
'''
Plot classification scores.
'''
plt.figure()
plt.bar(['LFS','RFC','SVM'], scores)
plt.ylim([0,1])
for i, v in enumerate(scores):
plt.text(i - 0.1, 0.4, '{:.{}f}'.format(v,2), size=12)
plt.title(title, fontsize=14)
plt.savefig(title+'.png', bbox_inches='tight', pad_inches=0.1, dpi=300)
return None
plotScores(scores, 'Sample Data Classification Accuracies')
plotScores(scores_iris, 'Iris Data Classification Accuracies')
# %% Compare across number of noise variables on Iris dataset
Score_LFS = []
Score_SVM = []
Score_RFC = []
mlist = np.arange(0, 1001, 25)
for m in mlist:
training_data, training_labels, testing_data, testing_labels = load_dataset('iris', m=m)
s1, _ = results_lfspy(training_data, training_labels, testing_data, testing_labels)
s2, _ = results_rforest(training_data, training_labels, testing_data, testing_labels)
s3, _ = results_fsvm(training_data, training_labels, testing_data, testing_labels)
Score_LFS.append(s1)
Score_RFC.append(s2)
Score_SVM.append(s3)
# Plot the results
plt.figure()
plt.plot(mlist, Score_LFS)
plt.plot(mlist, Score_RFC)
plt.plot(mlist, Score_SVM)
plt.vlines(100, 0, 1.2, linestyles='dashed')
plt.ylim([0,1.2])
#plt.xlabel('Number of Noise Features')
plt.title('Classification Accuracy by Number of Added Noise Variables', fontsize=14)
plt.legend(['LFS','Random Forest','SVM'], loc='lower right')
plt.savefig('IrisData_noise_Results.png', bbox_inches='tight', pad_inches=0.1, dpi=300)
# %% GenData Experiment
def create_dataset(n_samples, n_features, n_informative, n_redundant, n_repeated, n_clusters):
'''
Create a synthetic dataset with desired properties.
'''
X, Y = datasets.make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_redundant=n_redundant,
n_repeated=n_repeated,
n_classes=2,
n_clusters_per_class=n_clusters,
weights=None,
flip_y=0.10,
class_sep=1.,
hypercube=True,
shift=None,
scale=1.0,
shuffle=True,
random_state=321)
return X, Y
def runExperiment():
'''
Run an example experiment showing the effect of redundant variables and
repeated variables in a p >> n setting.
'''
clusters = [1, 2, 3]
param = [[50, 5, 5, 0, 0],
[50, 45, 5, 40, 0],
[50, 45, 5, 0, 40],
[50, 45, 5, 20, 20]]
scores = np.zeros((len(clusters), len(param), 3), dtype=float)
for c, nc in enumerate(clusters):
for (i,p) in enumerate(param):
X, Y = create_dataset(p[0], p[1], p[2], p[3], p[4], nc)
training_data = X[:40]
training_labels = Y[:40]
testing_data = X[40:]
testing_labels = Y[40:]
scores[c,i,0], _ = results_lfspy(training_data, training_labels, testing_data, testing_labels)
scores[c,i,1], _ = results_rforest(training_data, training_labels, testing_data, testing_labels)
scores[c,i,2], _ = results_fsvm(training_data, training_labels, testing_data, testing_labels)
return scores
def plotScoresGrouped(scores, title=None):
'''
Plot classification scores grouped by setting
'''
n, l, nclf = scores.shape
ind = np.arange(l)
fig, ax = plt.subplots(n,1,figsize=(8,6))
for c in range(n):
slfs = np.squeeze(scores[c,:,0])
srfc = np.squeeze(scores[c,:,1])
ssvm = np.squeeze(scores[c,:,2])
width = 0.3
ax[c].bar(ind - width, slfs, width, label='LFS')
ax[c].bar(ind, srfc, width, label='RFC')
ax[c].bar(ind + width, ssvm, width, label='SVM')
ax[c].set_ylim([0,1])
ax[c].set(ylabel='{} Cluster(s)'.format(c+1))
for i, v in enumerate(slfs):
ax[c].text(i - width - 0.13, 0.3, '{:.{}f}'.format(v,2), size=12)
for i, v in enumerate(srfc):
ax[c].text(i - 0.13, 0.3, '{:.{}f}'.format(v,2), size=12)
for i, v in enumerate(ssvm):
ax[c].text(i + width -0.13, 0.3, '{:.{}f}'.format(v,2), size=12)
if c == 0:
fig.suptitle(title, fontsize=14)
ax[c].legend()
ax[c].xaxis.set_visible(False)
if c == 1:
plt.ylabel('Accuracy')
ax[c].xaxis.set_visible(False)
if c == 2:
plt.xticks(range(4), labels=['r=0, s=0','r=40, s=0','r=0, s=40', 'r=20, s=20'])
plt.savefig('GenDataCompare4.png', bbox_inches='tight', pad_inches=0.1, dpi=300)
return None
scores = runExperiment()
plotScoresGrouped(scores, 'Comparison with Generated Datasets')
| [
"matplotlib.pyplot.title",
"sklearn.datasets.load_iris",
"numpy.random.seed",
"scipy.io.loadmat",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.random.normal",
"LFSpy.LocalFeatureSelection",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.subplots",
"sklearn.ensemble.... | [((351, 370), 'numpy.random.seed', 'np.random.seed', (['(905)'], {}), '(905)\n', (365, 370), True, 'import numpy as np\n'), ((4708, 4730), 'numpy.arange', 'np.arange', (['(0)', '(1001)', '(25)'], {}), '(0, 1001, 25)\n', (4717, 4730), True, 'import numpy as np\n'), ((5210, 5222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5220, 5222), True, 'import matplotlib.pyplot as plt\n'), ((5223, 5249), 'matplotlib.pyplot.plot', 'plt.plot', (['mlist', 'Score_LFS'], {}), '(mlist, Score_LFS)\n', (5231, 5249), True, 'import matplotlib.pyplot as plt\n'), ((5250, 5276), 'matplotlib.pyplot.plot', 'plt.plot', (['mlist', 'Score_RFC'], {}), '(mlist, Score_RFC)\n', (5258, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5303), 'matplotlib.pyplot.plot', 'plt.plot', (['mlist', 'Score_SVM'], {}), '(mlist, Score_SVM)\n', (5285, 5303), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5348), 'matplotlib.pyplot.vlines', 'plt.vlines', (['(100)', '(0)', '(1.2)'], {'linestyles': '"""dashed"""'}), "(100, 0, 1.2, linestyles='dashed')\n", (5314, 5348), True, 'import matplotlib.pyplot as plt\n'), ((5349, 5367), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.2]'], {}), '([0, 1.2])\n', (5357, 5367), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5497), 'matplotlib.pyplot.title', 'plt.title', (['"""Classification Accuracy by Number of Added Noise Variables"""'], {'fontsize': '(14)'}), "('Classification Accuracy by Number of Added Noise Variables',\n fontsize=14)\n", (5418, 5497), True, 'import matplotlib.pyplot as plt\n'), ((5494, 5556), 'matplotlib.pyplot.legend', 'plt.legend', (["['LFS', 'Random Forest', 'SVM']"], {'loc': '"""lower right"""'}), "(['LFS', 'Random Forest', 'SVM'], loc='lower right')\n", (5504, 5556), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IrisData_noise_Results.png"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)', 'dpi': '(300)'}), "('IrisData_noise_Results.png', bbox_inches='tight', pad_inches=\n 0.1, dpi=300)\n", (5566, 5647), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1636), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'std_range[0]', 'high': 'std_range[1]', 'size': 'm'}), '(low=std_range[0], high=std_range[1], size=m)\n', (1591, 1636), True, 'import numpy as np\n'), ((2035, 2069), 'LFSpy.LocalFeatureSelection', 'LocalFeatureSelection', ([], {'rr_seed': '(777)'}), '(rr_seed=777)\n', (2056, 2069), False, 'from LFSpy import LocalFeatureSelection\n'), ((2085, 2116), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('classifier', lfs)]"], {}), "([('classifier', lfs)])\n", (2093, 2116), False, 'from sklearn.pipeline import Pipeline\n'), ((2404, 2444), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(777)'}), '(random_state=777)\n', (2426, 2444), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2460, 2491), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('classifier', rfc)]"], {}), "([('classifier', rfc)])\n", (2468, 2491), False, 'from sklearn.pipeline import Pipeline\n'), ((2823, 2850), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(777)'}), '(random_state=777)\n', (2832, 2850), False, 'from sklearn.svm import LinearSVC\n'), ((2929, 2979), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('feat_sel', sel), ('classifier', svm)]"], {}), "([('feat_sel', sel), ('classifier', svm)])\n", (2937, 2979), False, 'from sklearn.pipeline import Pipeline\n'), ((4159, 4171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4169, 4171), True, 'import matplotlib.pyplot as plt\n'), ((4176, 4214), 'matplotlib.pyplot.bar', 'plt.bar', (["['LFS', 'RFC', 'SVM']", 'scores'], {}), "(['LFS', 'RFC', 'SVM'], scores)\n", (4183, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4217, 4233), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (4225, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4345, 4374), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (4354, 4374), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4452), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(title + '.png')"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)', 'dpi': '(300)'}), "(title + '.png', bbox_inches='tight', pad_inches=0.1, dpi=300)\n", (4390, 4452), True, 'import matplotlib.pyplot as plt\n'), ((5851, 6175), 'sklearn.datasets.make_classification', 'datasets.make_classification', ([], {'n_samples': 'n_samples', 'n_features': 'n_features', 'n_informative': 'n_informative', 'n_redundant': 'n_redundant', 'n_repeated': 'n_repeated', 'n_classes': '(2)', 'n_clusters_per_class': 'n_clusters', 'weights': 'None', 'flip_y': '(0.1)', 'class_sep': '(1.0)', 'hypercube': '(True)', 'shift': 'None', 'scale': '(1.0)', 'shuffle': '(True)', 'random_state': '(321)'}), '(n_samples=n_samples, n_features=n_features,\n n_informative=n_informative, n_redundant=n_redundant, n_repeated=\n n_repeated, n_classes=2, n_clusters_per_class=n_clusters, weights=None,\n flip_y=0.1, class_sep=1.0, hypercube=True, shift=None, scale=1.0,\n shuffle=True, random_state=321)\n', (5879, 6175), False, 'from sklearn import datasets\n'), ((7941, 7953), 'numpy.arange', 'np.arange', (['l'], {}), '(l)\n', (7950, 7953), True, 'import numpy as np\n'), ((7968, 8002), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', '(1)'], {'figsize': '(8, 6)'}), '(n, 1, figsize=(8, 6))\n', (7980, 8002), True, 'import matplotlib.pyplot as plt\n'), ((9142, 9227), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""GenDataCompare4.png"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)', 'dpi': '(300)'}), "('GenDataCompare4.png', bbox_inches='tight', pad_inches=0.1, dpi=300\n )\n", (9153, 9227), True, 'import matplotlib.pyplot as plt\n'), ((532, 554), 'scipy.io.loadmat', 'loadmat', (['"""matlab_Data"""'], {}), "('matlab_Data')\n", (539, 554), False, 'from scipy.io import loadmat\n'), ((1655, 1699), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 's', 'size': '[n]'}), '(loc=0.0, scale=s, size=[n])\n', (1671, 1699), True, 'import numpy as np\n'), ((8043, 8070), 'numpy.squeeze', 'np.squeeze', (['scores[c, :, 0]'], {}), '(scores[c, :, 0])\n', (8053, 8070), True, 'import numpy as np\n'), ((8084, 8111), 'numpy.squeeze', 'np.squeeze', (['scores[c, :, 1]'], {}), '(scores[c, :, 1])\n', (8094, 8111), True, 'import numpy as np\n'), ((8125, 8152), 'numpy.squeeze', 'np.squeeze', (['scores[c, :, 2]'], {}), '(scores[c, :, 2])\n', (8135, 8152), True, 'import numpy as np\n'), ((850, 870), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(2)'], {}), '(0, 100, 2)\n', (859, 870), True, 'import numpy as np\n'), ((890, 910), 'numpy.arange', 'np.arange', (['(1)', '(100)', '(2)'], {}), '(1, 100, 2)\n', (899, 910), True, 'import numpy as np\n'), ((935, 955), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (953, 955), False, 'from sklearn import datasets\n'), ((8960, 8982), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (8970, 8982), True, 'import matplotlib.pyplot as plt\n'), ((1741, 1761), 'numpy.stack', 'np.stack', (['noise_vars'], {}), '(noise_vars)\n', (1749, 1761), True, 'import numpy as np\n')] |
import numpy as np
import sys
import yaml
import argparse
import torch
parser = argparse.ArgumentParser(description='Convert LASER model to Marian weight file.')
parser.add_argument('--laser', help='Path to LASER PyTorch model', required=True)
parser.add_argument('--marian', help='Output path for Marian weight file', required=True)
args = parser.parse_args()
laser = torch.load(args.laser)
config = dict()
config["type"] = "laser"
config["input-types"] = ["sequence"]
config["dim-vocabs"] = [laser["params"]["num_embeddings"]]
config["version"] = "laser2marian.py conversion"
config["enc-depth"] = laser["params"]["num_layers"]
config["enc-cell"] = "lstm"
config["dim-emb"] = laser["params"]["embed_dim"]
config["dim-rnn"] = laser["params"]["hidden_size"]
yaml.dump(laser["dictionary"], open(args.marian + ".vocab.yml", "w"))
marianModel = dict()
def transposeOrder(mat):
matT = np.transpose(mat) # just a view with changed row order
return matT.flatten(order="C").reshape(matT.shape) # force row order change and reshape
def convert(pd, srcs, trg, transpose=True, bias=False, lstm=False):
num = pd[srcs[0]].detach().numpy()
for i in range(1, len(srcs)):
num += pd[srcs[i]].detach().numpy()
out = num
if bias:
num = np.atleast_2d(num)
else:
if transpose:
num = transposeOrder(num) # transpose with row order change
if lstm: # different order in pytorch than marian
stateDim = int(num.shape[-1] / 4)
i = np.copy(num[:, 0*stateDim:1*stateDim])
f = np.copy(num[:, 1*stateDim:2*stateDim])
num[:, 0*stateDim:1*stateDim] = f
num[:, 1*stateDim:2*stateDim] = i
marianModel[trg] = num
for k in laser:
print(k)
for k in laser["model"]:
print(k, laser["model"][k].shape)
convert(laser["model"], ["embed_tokens.weight"], "encoder_Wemb", transpose=False)
for i in range(laser["params"]["num_layers"]):
convert(laser["model"], [f"lstm.weight_ih_l{i}"], f"encoder_lstm_l{i}_W", lstm=True)
convert(laser["model"], [f"lstm.weight_hh_l{i}"], f"encoder_lstm_l{i}_U", lstm=True)
convert(laser["model"], [f"lstm.bias_ih_l{i}", f"lstm.bias_hh_l{i}"], f"encoder_lstm_l{i}_b", bias=True, lstm=True) # needs to be summed!
convert(laser["model"], [f"lstm.weight_ih_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_W", lstm=True)
convert(laser["model"], [f"lstm.weight_hh_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_U", lstm=True)
convert(laser["model"], [f"lstm.bias_ih_l{i}_reverse", f"lstm.bias_hh_l{i}_reverse"], f"encoder_lstm_l{i}_reverse_b", bias=True, lstm=True) # needs to be summed!
for m in marianModel:
print(m, marianModel[m].shape)
configYamlStr = yaml.dump(config, default_flow_style=False)
desc = list(configYamlStr)
npDesc = np.chararray((len(desc),))
npDesc[:] = desc
npDesc.dtype = np.int8
marianModel["special:model.yml"] = npDesc
print("\nMarian config:")
print(configYamlStr)
print("Saving Marian model to %s" % (args.marian,))
np.savez(args.marian, **marianModel) | [
"argparse.ArgumentParser",
"numpy.copy",
"torch.load",
"yaml.dump",
"numpy.transpose",
"numpy.savez",
"numpy.atleast_2d"
] | [((82, 168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert LASER model to Marian weight file."""'}), "(description=\n 'Convert LASER model to Marian weight file.')\n", (105, 168), False, 'import argparse\n'), ((372, 394), 'torch.load', 'torch.load', (['args.laser'], {}), '(args.laser)\n', (382, 394), False, 'import torch\n'), ((2719, 2762), 'yaml.dump', 'yaml.dump', (['config'], {'default_flow_style': '(False)'}), '(config, default_flow_style=False)\n', (2728, 2762), False, 'import yaml\n'), ((3008, 3044), 'numpy.savez', 'np.savez', (['args.marian'], {}), '(args.marian, **marianModel)\n', (3016, 3044), True, 'import numpy as np\n'), ((898, 915), 'numpy.transpose', 'np.transpose', (['mat'], {}), '(mat)\n', (910, 915), True, 'import numpy as np\n'), ((1277, 1295), 'numpy.atleast_2d', 'np.atleast_2d', (['num'], {}), '(num)\n', (1290, 1295), True, 'import numpy as np\n'), ((1517, 1559), 'numpy.copy', 'np.copy', (['num[:, 0 * stateDim:1 * stateDim]'], {}), '(num[:, 0 * stateDim:1 * stateDim])\n', (1524, 1559), True, 'import numpy as np\n'), ((1568, 1610), 'numpy.copy', 'np.copy', (['num[:, 1 * stateDim:2 * stateDim]'], {}), '(num[:, 1 * stateDim:2 * stateDim])\n', (1575, 1610), True, 'import numpy as np\n')] |
import numpy as np
IMG_SHAPE = (1280, 720) # x, y
# Lane pixel extraction thresholds:
SATURATION_THRESHOLD = (155, 255)
SOBEL_X_ABS_SCALED_THRESHOLD = (40, 140)
# Key x and y coordinates for perspective transform:
LANE_START_X_LEFT = 185
LANE_START_X_RIGHT = IMG_SHAPE[0] - 150
LANE_WIDTH = LANE_START_X_RIGHT - LANE_START_X_LEFT
X_MIDDLE = (LANE_START_X_LEFT + LANE_WIDTH // 2)
APEX_Y = 450
APEX_X_OFFSET_LEFT = 65 # Relative to middle of lane
APEX_X_OFFSET_RIGHT = 30
DST_X_OFFSET = 300
DST_X_LEFT = DST_X_OFFSET
DST_X_RIGHT = IMG_SHAPE[0] - DST_X_OFFSET
# Perspective transform points (also usable as polygons):
# Counter clockwise from topleft:
LANE_AREA_SRC = np.array([
(X_MIDDLE - APEX_X_OFFSET_LEFT, APEX_Y),
(LANE_START_X_LEFT, IMG_SHAPE[1] - 1),
(LANE_START_X_RIGHT, IMG_SHAPE[1] - 1),
(X_MIDDLE + APEX_X_OFFSET_RIGHT, APEX_Y),
], dtype=np.float32)
LANE_AREA_DST = np.array([
(DST_X_LEFT, 0),
(DST_X_LEFT, IMG_SHAPE[1] - 1),
(DST_X_RIGHT, IMG_SHAPE[1] - 1),
(DST_X_RIGHT, 0),
], dtype=np.float32)
# From warped (perspective transformed) pixel space to approximate
# real-world space:
YM_PER_PIX = 30 / IMG_SHAPE[1]
XM_PER_PIX = 3.7 / (DST_X_RIGHT - DST_X_LEFT)
# Sanity check for distance in meters between lane lines:
# US spec is 3.7 m
LANE_DIST_MIN_M = 2.7
LANE_DIST_MAX_M = 4.7
# Number of "bad" frames we're willing to tolerate in the video before
# resetting:
NUM_BAD_FRAMES_UNTIL_RESET = 25
| [
"numpy.array"
] | [((673, 872), 'numpy.array', 'np.array', (['[(X_MIDDLE - APEX_X_OFFSET_LEFT, APEX_Y), (LANE_START_X_LEFT, IMG_SHAPE[1] -\n 1), (LANE_START_X_RIGHT, IMG_SHAPE[1] - 1), (X_MIDDLE +\n APEX_X_OFFSET_RIGHT, APEX_Y)]'], {'dtype': 'np.float32'}), '([(X_MIDDLE - APEX_X_OFFSET_LEFT, APEX_Y), (LANE_START_X_LEFT, \n IMG_SHAPE[1] - 1), (LANE_START_X_RIGHT, IMG_SHAPE[1] - 1), (X_MIDDLE +\n APEX_X_OFFSET_RIGHT, APEX_Y)], dtype=np.float32)\n', (681, 872), True, 'import numpy as np\n'), ((900, 1033), 'numpy.array', 'np.array', (['[(DST_X_LEFT, 0), (DST_X_LEFT, IMG_SHAPE[1] - 1), (DST_X_RIGHT, IMG_SHAPE[1\n ] - 1), (DST_X_RIGHT, 0)]'], {'dtype': 'np.float32'}), '([(DST_X_LEFT, 0), (DST_X_LEFT, IMG_SHAPE[1] - 1), (DST_X_RIGHT, \n IMG_SHAPE[1] - 1), (DST_X_RIGHT, 0)], dtype=np.float32)\n', (908, 1033), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.framework import ops
def unpool_with_argmax(pool, ind, name = None, ksize=[1, 2, 2, 1], upsample=[-1,-1]):
"""
Unpooling layer after max_pool_with_argmax.
Args:
pool: max pooled output tensor
ind: argmax indices
ksize: ksize is the same as for the pool
Return:
unpool: unpooling tensor
"""
upsample_w = upsample[0]
upsample_h = upsample[1]
with tf.variable_scope(name):
input_shape = pool.get_shape().as_list()
if (upsample_h > 0 or upsample_w > 0):
output_shape = (input_shape[0],upsample_h,upsample_w, input_shape[3])
else:
output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
flat_input_size = np.prod(input_shape)
flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]
pool_ = tf.reshape(pool, [flat_input_size])
batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, [flat_input_size, 1])
ind_ = tf.reshape(ind, [flat_input_size, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
ret = tf.reshape(ret, output_shape)
return ret
"""
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
return gen_nn_ops._max_pool_grad_with_argmax(op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
"""
DEFAULT_PADDING = 'SAME'
trainable = True
def make_var(name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable)
def validate_padding(padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
def conv(input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = make_var('weights', shape=[k_h, k_w, c_i / group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
def relu(input, name):
return tf.nn.relu(input, name=name)
def max_pool(input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
def batch_normalization(input, name, scale_offset=True, relu=False):
# NOTE: Currently, only inference is supported
with tf.variable_scope(name) as scope:
shape = [input.get_shape()[-1]]
if scale_offset:
scale = make_var('mean', shape=shape)
offset = make_var('variance', shape=shape)
else:
scale, offset = (None, None)
output = tf.nn.batch_normalization(
input,
#mean=make_var('mean', shape=shape),
#variance=make_var('variance', shape=shape),
mean=0.0,
variance=1.0,
offset=offset,
scale=scale,
# TODO: This is the default Caffe batch norm eps
# Get the actual eps from parameters
variance_epsilon=1e-5,
name=name)
if relu:
output = tf.nn.relu(output)
return output
| [
"tensorflow.nn.batch_normalization",
"tensorflow.range",
"tensorflow.nn.relu",
"tensorflow.reshape",
"tensorflow.nn.bias_add",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.ones_like",
"tensorflow.nn.max_pool",
"tensorflow.nn.conv2d",
"tensorflow.split",
"tensorflow.scatter_nd"... | [((2161, 2200), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape', 'trainable'], {}), '(name, shape, trainable)\n', (2176, 2200), True, 'import tensorflow as tf\n'), ((3892, 3920), 'tensorflow.nn.relu', 'tf.nn.relu', (['input'], {'name': 'name'}), '(input, name=name)\n', (3902, 3920), True, 'import tensorflow as tf\n'), ((4035, 4138), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['input'], {'ksize': '[1, k_h, k_w, 1]', 'strides': '[1, s_h, s_w, 1]', 'padding': 'padding', 'name': 'name'}), '(input, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1],\n padding=padding, name=name)\n', (4049, 4138), True, 'import tensorflow as tf\n'), ((561, 584), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (578, 584), True, 'import tensorflow as tf\n'), ((920, 940), 'numpy.prod', 'np.prod', (['input_shape'], {}), '(input_shape)\n', (927, 940), True, 'import numpy as np\n'), ((1057, 1092), 'tensorflow.reshape', 'tf.reshape', (['pool', '[flat_input_size]'], {}), '(pool, [flat_input_size])\n', (1067, 1092), True, 'import tensorflow as tf\n'), ((1259, 1294), 'tensorflow.reshape', 'tf.reshape', (['b', '[flat_input_size, 1]'], {}), '(b, [flat_input_size, 1])\n', (1269, 1294), True, 'import tensorflow as tf\n'), ((1310, 1347), 'tensorflow.reshape', 'tf.reshape', (['ind', '[flat_input_size, 1]'], {}), '(ind, [flat_input_size, 1])\n', (1320, 1347), True, 'import tensorflow as tf\n'), ((1363, 1386), 'tensorflow.concat', 'tf.concat', (['[b, ind_]', '(1)'], {}), '([b, ind_], 1)\n', (1372, 1386), True, 'import tensorflow as tf\n'), ((1402, 1453), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['ind_', 'pool_'], {'shape': 'flat_output_shape'}), '(ind_, pool_, shape=flat_output_shape)\n', (1415, 1453), True, 'import tensorflow as tf\n'), ((1468, 1497), 'tensorflow.reshape', 'tf.reshape', (['ret', 'output_shape'], {}), '(ret, output_shape)\n', (1478, 1497), True, 'import tensorflow as tf\n'), ((2869, 2922), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['i', 'k', '[1, s_h, s_w, 1]'], {'padding': 'padding'}), '(i, k, [1, s_h, s_w, 1], padding=padding)\n', (2881, 2922), True, 'import tensorflow as tf\n'), ((2932, 2955), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2949, 2955), True, 'import tensorflow as tf\n'), ((4368, 4391), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (4385, 4391), True, 'import tensorflow as tf\n'), ((4644, 4767), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['input'], {'mean': '(0.0)', 'variance': '(1.0)', 'offset': 'offset', 'scale': 'scale', 'variance_epsilon': '(1e-05)', 'name': 'name'}), '(input, mean=0.0, variance=1.0, offset=offset,\n scale=scale, variance_epsilon=1e-05, name=name)\n', (4669, 4767), True, 'import tensorflow as tf\n'), ((1126, 1168), 'tensorflow.range', 'tf.range', (['output_shape[0]'], {'dtype': 'ind.dtype'}), '(output_shape[0], dtype=ind.dtype)\n', (1134, 1168), True, 'import tensorflow as tf\n'), ((1215, 1232), 'tensorflow.ones_like', 'tf.ones_like', (['ind'], {}), '(ind)\n', (1227, 1232), True, 'import tensorflow as tf\n'), ((3328, 3353), 'tensorflow.split', 'tf.split', (['(3)', 'group', 'input'], {}), '(3, group, input)\n', (3336, 3353), True, 'import tensorflow as tf\n'), ((3382, 3408), 'tensorflow.split', 'tf.split', (['(3)', 'group', 'kernel'], {}), '(3, group, kernel)\n', (3390, 3408), True, 'import tensorflow as tf\n'), ((3557, 3584), 'tensorflow.concat', 'tf.concat', (['(3)', 'output_groups'], {}), '(3, output_groups)\n', (3566, 3584), True, 'import tensorflow as tf\n'), ((3697, 3727), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['output', 'biases'], {}), '(output, biases)\n', (3711, 3727), True, 'import tensorflow as tf\n'), ((3799, 3834), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {'name': 'scope.name'}), '(output, name=scope.name)\n', (3809, 3834), True, 'import tensorflow as tf\n'), ((5102, 5120), 'tensorflow.nn.relu', 'tf.nn.relu', (['output'], {}), '(output)\n', (5112, 5120), True, 'import tensorflow as tf\n')] |
import unittest
import yaml
import numpy as np
import network_attack_simulator.envs.loader as loader
from network_attack_simulator.envs.loader import INTERNET, DMZ, SENSITIVE, USER
from network_attack_simulator.envs.machine import Machine
class LoaderTestCase(unittest.TestCase):
def test_load_yaml_file_non_existent(self):
invalid_file = "invalid_file"
with self.assertRaises(FileNotFoundError): # noqa
loader.load_yaml_file(invalid_file)
def test_load_yaml_file_invalid(self):
invalid_file = "inputs/invalid_yaml.yaml"
with self.assertRaises(yaml.YAMLError):
loader.load_yaml_file(invalid_file)
def test_load_yaml_file_valid(self):
file = "inputs/valid_yaml.yaml"
expected = {"brackets": []}
actual = loader.load_yaml_file(file)
self.assertDictEqual(expected, actual)
def test_check_config_valid_invalid_key_number(self):
config_small = {"invalid_key": "some_data"}
expected_num_keys = len(loader.VALID_CONFIG_KEYS)
config_large = {}
for i in range(expected_num_keys + 1):
config_large["invalid_key" + str(i)] = "some_data" + str(i)
with self.assertRaises(KeyError):
loader.check_config_valid(config_small)
loader.check_config_valid(config_large)
def test_check_config_valid_invalid_key(self):
expected_num_keys = len(loader.VALID_CONFIG_KEYS)
config = {}
for i in range(expected_num_keys):
config["invalid_key" + str(i)] = "some_data" + str(i)
with self.assertRaises(KeyError):
loader.check_config_valid(config)
def test_check_config_valid_invalid_value_type(self):
config = {}
for k in loader.VALID_CONFIG_KEYS.keys():
config[k] = "some_data"
with self.assertRaises(TypeError):
loader.check_config_valid(config)
def test_check_config_valid_invalid_subnets(self):
config = self.get_valid_config_dict()
config["subnets"] = ["invalid_type"]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
config["subnets"] = [1.0]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
config["subnets"] = [-1]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
config["subnets"] = [0]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
def test_check_config_valid_invalid_topology(self):
config = self.get_valid_config_dict()
# invalid number of rows
config["topology"] = [[1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# not 2D array
config["topology"] = [1, 1, 1]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid number of columns
config["topology"] = [[1, 1],
[1, 1],
[1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid number of columns V2
config["topology"] = [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid value
config["topology"] = [[1.0, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid value V2
config["topology"] = [[1, -1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid value V3
config["topology"] = [[1, 1, 1, 1],
[1, 1, 2, 1],
[1, 1, 1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid value V4
config["topology"] = [[1, 1, 1, 1],
[1, 1, 1, 1],
[1, "invalid", 1, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
def test_check_config_valid_invalid_services(self):
config = self.get_valid_config_dict()
# invalid value
config["services"] = -1
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid value V2
config["services"] = 0
with self.assertRaises(ValueError):
loader.check_config_valid(config)
def test_check_config_valid_invalid_sensitive_machines(self):
config = self.get_valid_config_dict()
# too few sensitive_machines
config["sensitive_machines"] = []
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# too many sensitive_machines
config["sensitive_machines"] = [[0, 0, 1],
[1, 0, 1],
[2, 0, 1],
[3, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: too short
config["sensitive_machines"] = [[0, 0], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: too long
config["sensitive_machines"] = [[0, 0, 0, 0], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: subnet not int
config["sensitive_machines"] = [["str_a", 0, 0], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: subnet id too small
config["sensitive_machines"] = [[0, 0, 0], [-1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: subnet id too large
config["sensitive_machines"] = [[0, 0, 0], [1, 0, 1], [3, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: machine id not int
config["sensitive_machines"] = [[0, "str_a", 0], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: machine id too small
config["sensitive_machines"] = [[0, 0, 0], [1, -1, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: machine id too large
config["sensitive_machines"] = [[0, 0, 0], [1, 0, 1], [2, 2, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: value not int or float
config["sensitive_machines"] = [[0, 0, "str_a"], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: value too small
config["sensitive_machines"] = [[0, 0, 0], [1, 0, 1], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# invalid tuple form: value too small V2
config["sensitive_machines"] = [[0, 0, 100], [1, 0, -1.0], [2, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# duplicate machine addresses
config["sensitive_machines"] = [[0, 0, 1], [1, 0, 1], [0, 0, 1]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
# duplicate machine addresses V2
config["sensitive_machines"] = [[0, 0, 1], [1, 0, 1], [0, 0, 5]]
with self.assertRaises(ValueError):
loader.check_config_valid(config)
def test_check_config_valid_valid_config(self):
config = self.get_valid_config_dict()
try:
loader.check_config_valid(config)
except Exception:
self.fail()
def test_load_config_from_file_valid_config(self):
expected = self.get_valid_config_dict()
actual = loader.load_config("inputs/valid_config.yaml")
self.assertDictEqual(expected, actual)
def test_load_config_invalid_subnets(self):
file = "inputs/invalid_config_subnets.yaml"
with self.assertRaises(ValueError):
loader.load_config(file)
def test_load_config_invalid_topology(self):
file = "inputs/invalid_config_topology.yaml"
with self.assertRaises(ValueError):
loader.load_config(file)
def test_load_config_invalid_services(self):
file = "inputs/invalid_config_services.yaml"
with self.assertRaises(TypeError):
loader.load_config(file)
def test_load_config_invalid_sensitive_machines(self):
file = "inputs/invalid_config_sensitive_machines.yaml"
with self.assertRaises(ValueError):
loader.load_config(file)
def test_generate_topology(self):
subnets = [1, 1, 1, 1]
expected = np.asarray([[1, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
actual = loader.generate_topology(subnets)
self.assertTrue(np.array_equal(expected, actual))
subnets = [1, 1, 1, 5, 3]
expected = np.asarray([[1, 1, 0, 0, 0],
[1, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 1],
[0, 0, 0, 1, 1]])
actual = loader.generate_topology(subnets)
self.assertTrue(np.array_equal(expected, actual))
def test_correllated_configs(self):
nM = 40
nS = 10
alpha_H = [0.5, 1, 2.5, 5, 10]
alpha_V = [0.5, 1, 2.5, 5, 10]
lambda_V = [1, 2.5, 5, 10]
subnets = loader.generate_subnets(nM)
s_machines = loader.generate_sensitive_machines(subnets, 1, 1)
# test generating network with uniform dist of configs
print("\nTest Uniform dist of configurations")
np.random.seed(1)
machines = loader.generate_machines(subnets, nS, s_machines, uniform=True, alpha_H=0.5,
alpha_V=2.0, lambda_V=1.0)
num_configs, max_same = self.num_similiar_configs(machines)
print("\tUniform: num_configs={0}, max_same_configs={1}".format(num_configs, max_same))
max_vulns, avg_vulns = self.num_exploits_avail(nS, nM, machines)
print("\tUniform: max_vulns={0}, avg_vulns={1}".format(max_vulns, avg_vulns))
# for each compare number of similar configs for range of alpha_H
print("Test alpha_H")
for h in alpha_H:
np.random.seed(1)
machines = loader.generate_machines(subnets, nS, s_machines, uniform=False, alpha_H=h,
alpha_V=2.0, lambda_V=1.0)
num_configs, max_same = self.num_similiar_configs(machines)
print("\talpha_H={0}, num_configs={1}, max_same_configs={2}"
.format(h, num_configs, max_same))
# for each compare number of similar configs and services for range of alpha_V
print("Test alpha_V")
for v in alpha_V:
np.random.seed(1)
machines = loader.generate_machines(subnets, nS, s_machines, uniform=False, alpha_H=2.0,
alpha_V=v, lambda_V=1.0)
num_configs, max_same = self.num_similiar_configs(machines)
print("\talpha_V={0}, num_configs={1}, max_same_configs={2}"
.format(v, num_configs, max_same))
# for each compare number of services per machine for range of lambda_V
print("Test lambda_V")
for l in lambda_V:
np.random.seed(1)
machines = loader.generate_machines(subnets, nS, s_machines, uniform=False, alpha_H=2.0,
alpha_V=2.0, lambda_V=l)
max_vulns, avg_vulns = self.num_exploits_avail(nS, nM, machines)
print("\tlambda_V={0}, max_vulns={1}, avg_vulns={2}".format(l, max_vulns, avg_vulns))
def test_generate_firewall_one_exploit(self):
nM = 3
nS = 1
config = loader.generate_config(nM, nS, 1, 1)
firewall = config['firewall']
num_subnets = USER + 1
for src in range(num_subnets):
for dest in range(num_subnets):
services = firewall[src][dest]
if (src != INTERNET and dest == INTERNET) or (src == INTERNET and dest > DMZ):
for s in services:
self.assertFalse(s)
else:
for s in services:
self.assertTrue(s)
def test_generate_firewall_two_exploits_none_restricted(self):
nM = 3
nS = 2
config = loader.generate_config(nM, nS, 1, 1, restrictiveness=5)
firewall = config['firewall']
num_subnets = USER + 1
for src in range(num_subnets):
for dest in range(num_subnets):
services = firewall[src][dest]
if (src != INTERNET and dest == INTERNET) or (src == INTERNET and dest > DMZ):
for s in services:
self.assertFalse(s)
else:
for s in services:
self.assertTrue(s)
def test_generate_firewall_two_exploits_one_restricted(self):
subnets = [1, 1, 1, 1]
num_subnets = len(subnets)
nS = 2
restrictiveness = 1
machines = {}
machines[(DMZ, 0)] = Machine((DMZ, 0), [True, False], 0)
machines[(SENSITIVE, 0)] = Machine((SENSITIVE, 0), [True, False], 0)
machines[(USER, 0)] = Machine((USER, 0), [False, True], 0)
firewall = loader.generate_firewalls(subnets, nS, machines, restrictiveness)
expected = np.full((num_subnets, num_subnets, nS), False)
for src in range(num_subnets):
for dest in range(num_subnets):
if src == dest:
expected[src][dest][0] = True
expected[src][dest][1] = True
elif dest == INTERNET or (src == INTERNET and dest > DMZ):
continue
else:
m = machines[(dest, 0)]
expected[src][dest][0] = m._services[0]
expected[src][dest][1] = m._services[1]
self.assertTrue(np.equal(expected, firewall).all())
def test_generate_firewall_two_exploits_three_restricted(self):
subnets = [1, 1, 1, 1]
num_subnets = len(subnets)
nS = 2
restrictiveness = 3
machines = {}
machines[(DMZ, 0)] = Machine((DMZ, 0), [True, False], 0)
machines[(SENSITIVE, 0)] = Machine((SENSITIVE, 0), [True, False], 0)
machines[(USER, 0)] = Machine((USER, 0), [False, True], 0)
firewall = loader.generate_firewalls(subnets, nS, machines, restrictiveness)
expected = np.full((num_subnets, num_subnets, nS), True)
for src in range(num_subnets):
for dest in range(num_subnets):
if (src != INTERNET and dest == INTERNET) or (src == INTERNET and dest > DMZ):
expected[src][dest][0] = False
expected[src][dest][1] = False
self.assertTrue(np.equal(expected, firewall).all())
def num_similiar_configs(self, machines):
seen_configs = []
seen_configs_count = []
for m in machines.values():
cfg = m._services
if cfg in seen_configs:
i = seen_configs.index(cfg)
seen_configs_count[i] += 1
else:
seen_configs.append(cfg)
seen_configs_count.append(0)
return len(seen_configs), max(seen_configs_count)
def num_exploits_avail(self, nS, nM, machines):
vulns = np.zeros(nS)
for m in machines.values():
cfg = m._services
for i in range(nS):
if cfg[i]:
vulns[i] += 1
return np.max(vulns), np.average(vulns)
def get_valid_config_dict(self):
config = {}
for k, v in loader.VALID_CONFIG_KEYS.items():
if k is "subnets":
value = [1, 1, 1]
if k is "topology":
value = [[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]]
if k is "services":
value = 2
if k is "sensitive_machines":
value = [[1, 0, 9000], [2, 0, 5000]]
config[k] = value
return config
def print_firewall(self, firewall):
print()
num_subnets = len(firewall)
for src in range(num_subnets):
for dest in range(num_subnets):
services = firewall[src][dest]
print("{0} -> {1}: {2}".format(src, dest, services))
if __name__ == "__main__":
unittest.main()
| [
"numpy.random.seed",
"network_attack_simulator.envs.loader.generate_config",
"network_attack_simulator.envs.loader.generate_firewalls",
"network_attack_simulator.envs.loader.generate_topology",
"unittest.main",
"numpy.full",
"network_attack_simulator.envs.loader.generate_machines",
"numpy.equal",
"n... | [((17553, 17568), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17566, 17568), False, 'import unittest\n'), ((801, 828), 'network_attack_simulator.envs.loader.load_yaml_file', 'loader.load_yaml_file', (['file'], {}), '(file)\n', (822, 828), True, 'import network_attack_simulator.envs.loader as loader\n'), ((1759, 1790), 'network_attack_simulator.envs.loader.VALID_CONFIG_KEYS.keys', 'loader.VALID_CONFIG_KEYS.keys', ([], {}), '()\n', (1788, 1790), True, 'import network_attack_simulator.envs.loader as loader\n'), ((8528, 8574), 'network_attack_simulator.envs.loader.load_config', 'loader.load_config', (['"""inputs/valid_config.yaml"""'], {}), "('inputs/valid_config.yaml')\n", (8546, 8574), True, 'import network_attack_simulator.envs.loader as loader\n'), ((9464, 9532), 'numpy.asarray', 'np.asarray', (['[[1, 1, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]]'], {}), '([[1, 1, 0, 0], [1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1]])\n', (9474, 9532), True, 'import numpy as np\n'), ((9643, 9676), 'network_attack_simulator.envs.loader.generate_topology', 'loader.generate_topology', (['subnets'], {}), '(subnets)\n', (9667, 9676), True, 'import network_attack_simulator.envs.loader as loader\n'), ((9790, 9891), 'numpy.asarray', 'np.asarray', (['[[1, 1, 0, 0, 0], [1, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 1], [0, 0,\n 0, 1, 1]]'], {}), '([[1, 1, 0, 0, 0], [1, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1,\n 1], [0, 0, 0, 1, 1]])\n', (9800, 9891), True, 'import numpy as np\n'), ((10029, 10062), 'network_attack_simulator.envs.loader.generate_topology', 'loader.generate_topology', (['subnets'], {}), '(subnets)\n', (10053, 10062), True, 'import network_attack_simulator.envs.loader as loader\n'), ((10326, 10353), 'network_attack_simulator.envs.loader.generate_subnets', 'loader.generate_subnets', (['nM'], {}), '(nM)\n', (10349, 10353), True, 'import network_attack_simulator.envs.loader as loader\n'), ((10375, 10424), 'network_attack_simulator.envs.loader.generate_sensitive_machines', 'loader.generate_sensitive_machines', (['subnets', '(1)', '(1)'], {}), '(subnets, 1, 1)\n', (10409, 10424), True, 'import network_attack_simulator.envs.loader as loader\n'), ((10552, 10569), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (10566, 10569), True, 'import numpy as np\n'), ((10589, 10696), 'network_attack_simulator.envs.loader.generate_machines', 'loader.generate_machines', (['subnets', 'nS', 's_machines'], {'uniform': '(True)', 'alpha_H': '(0.5)', 'alpha_V': '(2.0)', 'lambda_V': '(1.0)'}), '(subnets, nS, s_machines, uniform=True, alpha_H=0.5,\n alpha_V=2.0, lambda_V=1.0)\n', (10613, 10696), True, 'import network_attack_simulator.envs.loader as loader\n'), ((12755, 12791), 'network_attack_simulator.envs.loader.generate_config', 'loader.generate_config', (['nM', 'nS', '(1)', '(1)'], {}), '(nM, nS, 1, 1)\n', (12777, 12791), True, 'import network_attack_simulator.envs.loader as loader\n'), ((13389, 13444), 'network_attack_simulator.envs.loader.generate_config', 'loader.generate_config', (['nM', 'nS', '(1)', '(1)'], {'restrictiveness': '(5)'}), '(nM, nS, 1, 1, restrictiveness=5)\n', (13411, 13444), True, 'import network_attack_simulator.envs.loader as loader\n'), ((14154, 14189), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(DMZ, 0)', '[True, False]', '(0)'], {}), '((DMZ, 0), [True, False], 0)\n', (14161, 14189), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((14225, 14266), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(SENSITIVE, 0)', '[True, False]', '(0)'], {}), '((SENSITIVE, 0), [True, False], 0)\n', (14232, 14266), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((14297, 14333), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(USER, 0)', '[False, True]', '(0)'], {}), '((USER, 0), [False, True], 0)\n', (14304, 14333), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((14353, 14418), 'network_attack_simulator.envs.loader.generate_firewalls', 'loader.generate_firewalls', (['subnets', 'nS', 'machines', 'restrictiveness'], {}), '(subnets, nS, machines, restrictiveness)\n', (14378, 14418), True, 'import network_attack_simulator.envs.loader as loader\n'), ((14439, 14485), 'numpy.full', 'np.full', (['(num_subnets, num_subnets, nS)', '(False)'], {}), '((num_subnets, num_subnets, nS), False)\n', (14446, 14485), True, 'import numpy as np\n'), ((15281, 15316), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(DMZ, 0)', '[True, False]', '(0)'], {}), '((DMZ, 0), [True, False], 0)\n', (15288, 15316), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((15352, 15393), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(SENSITIVE, 0)', '[True, False]', '(0)'], {}), '((SENSITIVE, 0), [True, False], 0)\n', (15359, 15393), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((15424, 15460), 'network_attack_simulator.envs.machine.Machine', 'Machine', (['(USER, 0)', '[False, True]', '(0)'], {}), '((USER, 0), [False, True], 0)\n', (15431, 15460), False, 'from network_attack_simulator.envs.machine import Machine\n'), ((15480, 15545), 'network_attack_simulator.envs.loader.generate_firewalls', 'loader.generate_firewalls', (['subnets', 'nS', 'machines', 'restrictiveness'], {}), '(subnets, nS, machines, restrictiveness)\n', (15505, 15545), True, 'import network_attack_simulator.envs.loader as loader\n'), ((15566, 15611), 'numpy.full', 'np.full', (['(num_subnets, num_subnets, nS)', '(True)'], {}), '((num_subnets, num_subnets, nS), True)\n', (15573, 15611), True, 'import numpy as np\n'), ((16477, 16489), 'numpy.zeros', 'np.zeros', (['nS'], {}), '(nS)\n', (16485, 16489), True, 'import numpy as np\n'), ((16775, 16807), 'network_attack_simulator.envs.loader.VALID_CONFIG_KEYS.items', 'loader.VALID_CONFIG_KEYS.items', ([], {}), '()\n', (16805, 16807), True, 'import network_attack_simulator.envs.loader as loader\n'), ((440, 475), 'network_attack_simulator.envs.loader.load_yaml_file', 'loader.load_yaml_file', (['invalid_file'], {}), '(invalid_file)\n', (461, 475), True, 'import network_attack_simulator.envs.loader as loader\n'), ((630, 665), 'network_attack_simulator.envs.loader.load_yaml_file', 'loader.load_yaml_file', (['invalid_file'], {}), '(invalid_file)\n', (651, 665), True, 'import network_attack_simulator.envs.loader as loader\n'), ((1244, 1283), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config_small'], {}), '(config_small)\n', (1269, 1283), True, 'import network_attack_simulator.envs.loader as loader\n'), ((1296, 1335), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config_large'], {}), '(config_large)\n', (1321, 1335), True, 'import network_attack_simulator.envs.loader as loader\n'), ((1629, 1662), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (1654, 1662), True, 'import network_attack_simulator.envs.loader as loader\n'), ((1883, 1916), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (1908, 1916), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2121, 2154), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2146, 2154), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2246, 2279), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2271, 2279), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2370, 2403), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2395, 2403), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2493, 2526), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2518, 2526), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2754, 2787), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2779, 2787), True, 'import network_attack_simulator.envs.loader as loader\n'), ((2906, 2939), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (2931, 2939), True, 'import network_attack_simulator.envs.loader as loader\n'), ((3146, 3179), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (3171, 3179), True, 'import network_attack_simulator.envs.loader as loader\n'), ((3404, 3437), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (3429, 3437), True, 'import network_attack_simulator.envs.loader as loader\n'), ((3652, 3685), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (3677, 3685), True, 'import network_attack_simulator.envs.loader as loader\n'), ((3902, 3935), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (3927, 3935), True, 'import network_attack_simulator.envs.loader as loader\n'), ((4151, 4184), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (4176, 4184), True, 'import network_attack_simulator.envs.loader as loader\n'), ((4408, 4441), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (4433, 4441), True, 'import network_attack_simulator.envs.loader as loader\n'), ((4657, 4690), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (4682, 4690), True, 'import network_attack_simulator.envs.loader as loader\n'), ((4805, 4838), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (4830, 4838), True, 'import network_attack_simulator.envs.loader as loader\n'), ((5087, 5120), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (5112, 5120), True, 'import network_attack_simulator.envs.loader as loader\n'), ((5419, 5452), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (5444, 5452), True, 'import network_attack_simulator.envs.loader as loader\n'), ((5619, 5652), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (5644, 5652), True, 'import network_attack_simulator.envs.loader as loader\n'), ((5824, 5857), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (5849, 5857), True, 'import network_attack_simulator.envs.loader as loader\n'), ((6038, 6071), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (6063, 6071), True, 'import network_attack_simulator.envs.loader as loader\n'), ((6252, 6285), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (6277, 6285), True, 'import network_attack_simulator.envs.loader as loader\n'), ((6465, 6498), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (6490, 6498), True, 'import network_attack_simulator.envs.loader as loader\n'), ((6683, 6716), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (6708, 6716), True, 'import network_attack_simulator.envs.loader as loader\n'), ((6898, 6931), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (6923, 6931), True, 'import network_attack_simulator.envs.loader as loader\n'), ((7112, 7145), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (7137, 7145), True, 'import network_attack_simulator.envs.loader as loader\n'), ((7334, 7367), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (7359, 7367), True, 'import network_attack_simulator.envs.loader as loader\n'), ((7543, 7576), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (7568, 7576), True, 'import network_attack_simulator.envs.loader as loader\n'), ((7760, 7793), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (7785, 7793), True, 'import network_attack_simulator.envs.loader as loader\n'), ((7961, 7994), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (7986, 7994), True, 'import network_attack_simulator.envs.loader as loader\n'), ((8165, 8198), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (8190, 8198), True, 'import network_attack_simulator.envs.loader as loader\n'), ((8323, 8356), 'network_attack_simulator.envs.loader.check_config_valid', 'loader.check_config_valid', (['config'], {}), '(config)\n', (8348, 8356), True, 'import network_attack_simulator.envs.loader as loader\n'), ((8779, 8803), 'network_attack_simulator.envs.loader.load_config', 'loader.load_config', (['file'], {}), '(file)\n', (8797, 8803), True, 'import network_attack_simulator.envs.loader as loader\n'), ((8963, 8987), 'network_attack_simulator.envs.loader.load_config', 'loader.load_config', (['file'], {}), '(file)\n', (8981, 8987), True, 'import network_attack_simulator.envs.loader as loader\n'), ((9146, 9170), 'network_attack_simulator.envs.loader.load_config', 'loader.load_config', (['file'], {}), '(file)\n', (9164, 9170), True, 'import network_attack_simulator.envs.loader as loader\n'), ((9350, 9374), 'network_attack_simulator.envs.loader.load_config', 'loader.load_config', (['file'], {}), '(file)\n', (9368, 9374), True, 'import network_attack_simulator.envs.loader as loader\n'), ((9702, 9734), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (9716, 9734), True, 'import numpy as np\n'), ((10088, 10120), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (10102, 10120), True, 'import numpy as np\n'), ((11203, 11220), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (11217, 11220), True, 'import numpy as np\n'), ((11244, 11350), 'network_attack_simulator.envs.loader.generate_machines', 'loader.generate_machines', (['subnets', 'nS', 's_machines'], {'uniform': '(False)', 'alpha_H': 'h', 'alpha_V': '(2.0)', 'lambda_V': '(1.0)'}), '(subnets, nS, s_machines, uniform=False, alpha_H=h,\n alpha_V=2.0, lambda_V=1.0)\n', (11268, 11350), True, 'import network_attack_simulator.envs.loader as loader\n'), ((11749, 11766), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (11763, 11766), True, 'import numpy as np\n'), ((11790, 11897), 'network_attack_simulator.envs.loader.generate_machines', 'loader.generate_machines', (['subnets', 'nS', 's_machines'], {'uniform': '(False)', 'alpha_H': '(2.0)', 'alpha_V': 'v', 'lambda_V': '(1.0)'}), '(subnets, nS, s_machines, uniform=False, alpha_H=\n 2.0, alpha_V=v, lambda_V=1.0)\n', (11814, 11897), True, 'import network_attack_simulator.envs.loader as loader\n'), ((12290, 12307), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (12304, 12307), True, 'import numpy as np\n'), ((12331, 12438), 'network_attack_simulator.envs.loader.generate_machines', 'loader.generate_machines', (['subnets', 'nS', 's_machines'], {'uniform': '(False)', 'alpha_H': '(2.0)', 'alpha_V': '(2.0)', 'lambda_V': 'l'}), '(subnets, nS, s_machines, uniform=False, alpha_H=\n 2.0, alpha_V=2.0, lambda_V=l)\n', (12355, 12438), True, 'import network_attack_simulator.envs.loader as loader\n'), ((16664, 16677), 'numpy.max', 'np.max', (['vulns'], {}), '(vulns)\n', (16670, 16677), True, 'import numpy as np\n'), ((16679, 16696), 'numpy.average', 'np.average', (['vulns'], {}), '(vulns)\n', (16689, 16696), True, 'import numpy as np\n'), ((15016, 15044), 'numpy.equal', 'np.equal', (['expected', 'firewall'], {}), '(expected, firewall)\n', (15024, 15044), True, 'import numpy as np\n'), ((15916, 15944), 'numpy.equal', 'np.equal', (['expected', 'firewall'], {}), '(expected, firewall)\n', (15924, 15944), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import altair as alt
import matplotlib.pyplot as plt
def get_first_row(s):
return s.iloc[0]
#Reads the first line of line of data and determines if data is categorical, quantitative or nominal
def auto_get_data_type(df):
type_dict = dict()
columns = list(df.columns)
for column in columns:
value = get_first_row(df[column])
if isinstance(value, str):
if value.isnumeric():
type_dict[column] = 'Q'
else:
type_dict[column] = 'C'
else:
type_dict[column] = 'Q'
return type_dict
#Manually enter if data is categorical, quantitative, nominal or id
def manual_entry_data_type(df):
type_dict = dict()
for column in list(df.columns):
type_dict[column] = input('Enter the variable type for {} (Quantitative/Categorical/Index/Time) Q/C/I/T:'.format(column))
return type_dict
def get_df_column_list(df):
return list(df.columns)
def manual_data_type_entry(df):
value = input('First time data entry (F), Correction (C), Skip this (S):')
if value == 'F':
type_dict = manual_enter_data_type(df)
elif value == 'C':
correction = 'y'
while correction == 'y':
variable = input('Enter variable name:')
value = input('Enter variable type:')
type_dict[variable] = value
correction = input('Update more variables(y/n):')
elif value == 'S':
print('Cool! here is dict:',type_dict)
return type_dict
def get_column_names_for_variable_type(columns,type_dict,variable_type):
cat_columns = [key for key,value in type_dict.items() if value is variable_type]
return cat_columns
def get_data_for_variables(df,data_type_dict,variable_type):
#print('get_data_for_variables--------------->',df)
columns = get_df_column_list(df)
var_columns = get_column_names_for_variable_type(columns,data_type_dict,variable_type)
index_column = get_index_column(columns,data_type_dict)
data_dict = dict()
if variable_type == 'C':
for column in var_columns:
summary = df.groupby(column).agg({index_column: 'count'}).reset_index()
data_dict[column] = summary
return data_dict,var_columns
elif variable_type == 'Q':
for column in var_columns:
quantitative_data = clean_quantitative_data(df[column])
data_dict[column] = quantitative_data
return data_dict,var_columns
def get_index_column(columns,type_dict):
index_column = [key for key,value in type_dict.items() if value is 'I']
return index_column[0]
def get_time_column(columns,type_dict):
time_column = [key for key,value in type_dict.items() if value is 'T']
return time_column[0]
def create_sorted_bar_chart(df,x_name,y_name,color='orange'):
chart = alt.Chart(df).mark_bar(color=color).encode(
x = x_name,
y = alt.Y(y_name, sort='-x'))
return chart
def get_x_y_column_names(df):
columns = get_df_column_list(df)
x_name = columns[1]
y_name = columns[0]
return x_name,y_name
def show_sorted_bar_chart(df):
x_name,y_name = get_x_y_column_names(df)
chart = create_sorted_bar_chart(df,x_name,y_name,color='orange')
return chart
def clean_quantitative_data(s):
s = pd.to_numeric(s, errors='coerce', downcast='float').dropna()
return s
def clean_dataframe_for_timeseries(df,data_type_dict):
columns = list(df.columns)
for column in columns:
value = data_type_dict[column]
if value == 'T':
df[column] = pd.to_datetime(df[column])
elif value == 'Q':
df[column] = pd.to_numeric(df[column], errors='coerce', downcast='float')
else:
continue
def get_central_tendency_for_variable(s):
return s.mean(),s.median(),s.mode()
def get_spread_for_variable(s):
return s.std(),s.var()
def get_skew_kurt_for_variable(s):
return s.kurtosis(),s.skew()
def get_summary_statistics(s):
mean, median, mode = get_central_tendency_for_variable(s)
std, var = get_spread_for_variable(s)
kurtosis, skewness = get_skew_kurt_for_variable(s)
summary_dict = {'mean':mean, 'median':median, 'mode':mode,'std':std,
'var':var,'kurtosis':kurtosis,'skewness':skewness}
return pd.DataFrame(summary_dict)
def run_non_graphical_EDA_categorical(df,data_type_dict):
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print(' Univariate Non-Graphical EDA ')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
print('-------------------------------CATEGORICAL-------------------------------------------------\n')
columns = get_df_column_list(df)
index_column = get_index_column(columns,data_type_dict)
cat_data_dict,cat_data_columns = get_data_for_variables(df=df,data_type_dict=data_type_dict,variable_type='C')
for column in cat_data_columns:
cata_data_summary = df.groupby(column).agg(
Frequency=pd.NamedAgg(column=index_column, aggfunc='count')).reset_index()
cat_data_dict[column] = cata_data_summary
print('---------------------------------------------------------------------------------------\n')
print(cata_data_summary)
def run_non_graphical_EDA_quantitative(df,data_type_dict):
print('-------------------------------QUANTITATIVE-------------------------------------------------\n')
columns = get_df_column_list(df)
index_column = get_index_column(columns,data_type_dict)
quant_data_dict,quant_data_columns = get_data_for_variables(df=df,data_type_dict=data_type_dict,variable_type='Q')
for column in quant_data_columns:
print('---------------------------Summary Statistics for {}-----------------------------------\n'.format(column))
quantitative_data = clean_quantitative_data(df[column])
print(get_summary_statistics(quantitative_data))
def run_graphical_EDA_categorical(df,data_type_dict):
all_columns = get_df_column_list(df)
index_column = get_index_column(all_columns,data_type_dict)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print(' Univariate Graphical EDA ')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
print('-------------------------------CATEGORICAL-------------------------------------------------\n')
#input_string = input('Enter the column names of the categorical data separated by comma (,) for visual exploration:')
cat_data_dict,cat_data_columns = get_data_for_variables(df=df,data_type_dict=data_type_dict,variable_type='C')
#cat_variables_for_graph = [item.strip() for item in input_string.split(',')]
for cat_variable in cat_data_columns:
cat_df = cat_data_dict[cat_variable]
cat_df = cat_df.sort_values(by=list(cat_df.columns)[1],ascending=False)
if len(cat_df) > 20:
value = input('The summary data for {} has many rows. Would you like see top 20 only?(y/n)'.format(cat_variable))
if value == 'y':
cat_df = cat_df.iloc[:20]
print('-------------------------------------------------------------------------------------\n')
show_sorted_bar_chart(cat_df).display()
def run_graphical_EDA_quantitative(df,data_type_dict):
print('-------------------------------QUANTITATIVE-------------------------------------------------\n')
all_columns = get_df_column_list(df)
index_column = get_index_column(all_columns,data_type_dict)
quant_data_dict,quant_data_columns = get_data_for_variables(df=df,data_type_dict=data_type_dict,variable_type='Q')
for column in quant_data_columns:
print('---------------------------Histogram for {}-----------------------------------\n'.format(column))
quantitative_data = clean_quantitative_data(df[column])
plt.hist(np.array(quantitative_data))
plt.show()
outlier_val = input('Would you like to see the distribution without outliers? (y/n):')
if outlier_val == 'y':
for column in quant_data_columns:
print('---------------------------Histogram for {} without outliers-----------------------------------\n'.format(column))
quantitative_data = clean_quantitative_data(df[column])
quantitative_data_no_outliers = quantitative_data[(np.abs(stats.zscore(quantitative_data)) < 3)]
plt.hist(np.array(quantitative_data_no_outliers))
plt.show()
outlier_val = input('remove more outliers for {}?(y/n):'.format(column))
while outlier_val == 'y':
quantitative_data_no_outliers = quantitative_data_no_outliers[(np.abs(stats.zscore(quantitative_data_no_outliers)) < 3)]
plt.hist(np.array(quantitative_data_no_outliers))
plt.show()
outlier_val = input('remove more outliers for {}?(y/n):'.format(column))
def truncate_data(df):
if len(df) > 5000:
df = df[:5000]
return df
def run_time_series_EDA(df,data_type_dict):
all_columns = get_df_column_list(df)
time_column = get_time_column(all_columns,data_type_dict)
quant_data_dict,quant_data_columns = get_data_for_variables(df=df,data_type_dict=data_type_dict,variable_type='Q')
for column in quant_data_columns:
print('---------------------------Time Series for {}-----------------------------------\n'.format(column))
df_chart = df[[time_column,column]]
#quantitative_data = clean_dataframe_for_timeseries(df_chart,data_type_dict)
quantitative_data = df_chart
chart = alt.Chart(truncate_data(quantitative_data)).mark_line().encode(
x=time_column+':T',
y=column+':Q'
)
chart.display()
def run_non_graphical_EDA_info(df,data_type_dict):
run_non_graphical_EDA_categorical(df,data_type_dict)
run_non_graphical_EDA_quantitative(df,data_type_dict)
return None
def run_graphical_EDA_info(df,data_type_dict):
run_graphical_EDA_categorical(df,data_type_dict)
run_graphical_EDA_quantitative(df,data_type_dict)
run_time_series_EDA(df,data_type_dict)
return None
def run_preliminary_EDA(df):
print(df.head())
data_type_dict = manual_entry_data_type(df)
run_non_graphical_EDA_info(df,data_type_dict)
run_graphical_EDA_info(df,data_type_dict)
return None | [
"pandas.DataFrame",
"pandas.NamedAgg",
"matplotlib.pyplot.show",
"altair.Y",
"altair.Chart",
"pandas.to_datetime",
"numpy.array",
"pandas.to_numeric"
] | [((4392, 4418), 'pandas.DataFrame', 'pd.DataFrame', (['summary_dict'], {}), '(summary_dict)\n', (4404, 4418), True, 'import pandas as pd\n'), ((8313, 8323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8321, 8323), True, 'import matplotlib.pyplot as plt\n'), ((2979, 3003), 'altair.Y', 'alt.Y', (['y_name'], {'sort': '"""-x"""'}), "(y_name, sort='-x')\n", (2984, 3003), True, 'import altair as alt\n'), ((3372, 3423), 'pandas.to_numeric', 'pd.to_numeric', (['s'], {'errors': '"""coerce"""', 'downcast': '"""float"""'}), "(s, errors='coerce', downcast='float')\n", (3385, 3423), True, 'import pandas as pd\n'), ((3649, 3675), 'pandas.to_datetime', 'pd.to_datetime', (['df[column]'], {}), '(df[column])\n', (3663, 3675), True, 'import pandas as pd\n'), ((8276, 8303), 'numpy.array', 'np.array', (['quantitative_data'], {}), '(quantitative_data)\n', (8284, 8303), True, 'import numpy as np\n'), ((8874, 8884), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8882, 8884), True, 'import matplotlib.pyplot as plt\n'), ((3728, 3788), 'pandas.to_numeric', 'pd.to_numeric', (['df[column]'], {'errors': '"""coerce"""', 'downcast': '"""float"""'}), "(df[column], errors='coerce', downcast='float')\n", (3741, 3788), True, 'import pandas as pd\n'), ((8821, 8860), 'numpy.array', 'np.array', (['quantitative_data_no_outliers'], {}), '(quantitative_data_no_outliers)\n', (8829, 8860), True, 'import numpy as np\n'), ((9227, 9237), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9235, 9237), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2908), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (2904, 2908), True, 'import altair as alt\n'), ((9170, 9209), 'numpy.array', 'np.array', (['quantitative_data_no_outliers'], {}), '(quantitative_data_no_outliers)\n', (9178, 9209), True, 'import numpy as np\n'), ((5249, 5298), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': 'index_column', 'aggfunc': '"""count"""'}), "(column=index_column, aggfunc='count')\n", (5260, 5298), True, 'import pandas as pd\n')] |
import math
import logging
import re
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pygest as ge
from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string
from pygest.algorithms import pct_similarity
from scipy.stats import ttest_ind
def mantel_correlogram(X, Y, by, bins=8, r_method='Pearson', fig_size=(8, 5), save_as=None,
title='Mantel Correlogram', xlabel='distance bins', ylabel='correlation',
logger=None):
""" Return a Mantel correlogram between vector_a and vector_b, over by
:param X: For our purposes, usually an expression vector. Can be any vector of floats
:param Y: For our purposes, usually a connectivity vector. Can be any vector of floats
:param by: For our purposes, usually a distance vector. Can be any vector of floats
:param bins: The number of bins can be specified
:param r_method: The correlation can be calculated as 'Pearson', 'Spearman', or 'Kendall'
:param tuple fig_size: size of desired plot, in inches (width, height)
:param save_as: A file name for saving out the correlogram
:param title: The title of the plot
:param xlabel: The x-axis (usually distance bins) label
:param ylabel: The y-axis (X vs Y correlations) label
:param logger: We can log notes to your logger or ours.
:return: matplotlib (Figure, Axes) objects containing the correlogram plot
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
# Figure out the boundaries of the distance bins
dist_min = math.floor(min(by)) - (math.floor(min(by)) % bins)
dist_max = math.ceil(max(by)) + bins - (math.ceil(max(by)) % bins)
dist_x_axis = np.arange(dist_min, dist_max, dist_max / bins)
logger.info("({:0.2f} - {:0.2f}) -> ({} - {}), {}".format(
min(by), max(by), dist_min, dist_max, dist_x_axis
))
# Calculate correlations for each distance bin separately.
ds = []
prs = []
ns = []
rlos = []
rhis = []
for a in dist_x_axis:
# Create distance filters for this particular bin
by_filter = np.logical_and(by >= a, by < a + dist_max / bins)
logger.info(" {ts:,} of {all:,} distances are between {sm:0.1f} and {lg:0.1f}.".format(
ts=np.count_nonzero(by_filter),
all=len(by),
sm=a, lg=a + dist_max / bins
))
# Filter vectors for this bin.
_Y = Y[by_filter]
_X = X[by_filter]
ns.append(len(_X))
logger.info(" using {:,} (in distance range) of the {:,} original values.".format(
len(_Y),
len(Y)
))
# Calculate the correlations for this distance bin
_r = ge.corr(_X, _Y, method=r_method)
prs.append(_r)
ds.append(a + dist_max / bins / 2)
# Since r values are not going to be normally distributed (except maybe right at zero)
# we need to transform them to Fisher's normal z' and back.
z_prime = 0.50 * math.log((1 + _r) / (1 - _r))
z_se = 1 / math.sqrt(len(_X) - 3)
z_lo = z_prime - z_se
z_hi = z_prime + z_se
r_lo = (math.exp(2 * z_lo) - 1) / (math.exp(2 * z_lo) + 1)
r_hi = (math.exp(2 * z_hi) - 1) / (math.exp(2 * z_hi) + 1)
rlos.append(r_lo)
rhis.append(r_hi)
logger.info(" r = {:0.4f} ({:0.3f} - {:0.3f}) for these {} sample-sample relationships.".format(
_r, r_lo, r_hi, len(_X)))
# Calculate an overall r for comparison
r = ge.corr(X, Y, method=r_method)
# Build the plot
fig = plt.figure(figsize=fig_size)
ax = fig.add_subplot(111)
ax.axis([dist_min, dist_max, -1.0, 1.0])
ax.axhline(y=0, xmin=0, xmax=1, linestyle=':', color='gray')
# ax.axhline(y=spearman, xmin=0, xmax=1, linestyle='--', color='green')
# ax.axhline(y=kendall, xmin=0, xmax=1, linestyle='--', color='red')
oline = ax.axhline(y=r, xmin=0, xmax=1, linestyle='--', color='black', linewidth=2)
# sline, = ax.plot(ds, srs, linestyle='-', marker='o', color='green')
# kline, = ax.plot(ds, krs, linestyle='-', marker='o', color='red')
pline, = ax.plot(ds, prs, linestyle='-', marker='o', color='black', linewidth=2)
ax.vlines(x=ds, ymin=rlos, ymax=rhis, linewidth=1, color='black')
ax.hlines(y=rhis, xmin=[x - 1 for x in ds], xmax=[x + 1 for x in ds], linewidth=1, color='black')
ax.hlines(y=rlos, xmin=[x - 1 for x in ds], xmax=[x + 1 for x in ds], linewidth=1, color='black')
for i, n in enumerate(ns):
ax.annotate('n=', (ds[i], -0.90), ha='center')
ax.annotate(n, (ds[i], -0.97), ha='center')
ax.legend((pline, oline), ('Pearson r', 'all distances'), loc='upper center')
# ax.legend((pline, sline, kline, oline), ('Pearson r', 'Spearman r', 'Kendall tau', 'all distances'))
ax.set_xticks(tuple(np.append(dist_x_axis, dist_max)))
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if save_as is not None:
fig.savefig(save_as)
# fig.close()
return fig, ax
def conn_vs_expr_scatter(X, Y, xd, yd, save_as=None,
title='Connectivity and Expression', xlabel='expression', ylabel='connectivity',
logger=None):
""" Scatter the many values for Y vs X in background and yd vs xd in foreground (darker).
This is helpful to visualize connectivity values and expression values juxtaposed.
Overlaying xd and yd can show how a subset of X and Y may lie in a particular area
of the plot or have a slightly different correlation.
:param X: A vector of expression correlations
:param Y: A vector of connectivity values
:param xd: A vector of expression correlations, a subset of X to call out
:param yd: A vector of connectivity values, a subset of Y to call out
:param save_as: The file name if you'd like to save the plot generated
:param title: Override the default title
:param xlabel: Override the default x label
:param ylabel: Override the default y label
:param logger: Catch logging output and divert it wherever you like
:return: matplotlib (Figure, Axes) objects containing the regression plot
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
logger.info("Plotting {} foreground points over {} background points.".format(
len(X), len(xd)
))
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.axis([min(X), max(X), -1.0, 1.0])
# Set the axes and plot the grid first
ax.axis([0.6, 1.0, -0.4, 1.0])
ax.axhline(y=0, xmin=0, xmax=1, linestyle=':', color='gray')
# plt.axvline(x=0, ymin=0, ymax=1, linestyle=':', color='gray')
ax.title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Plot the points next
ax.plot(X, Y, '.', color='lightblue')
ax.plot(xd, yd, '.', color='gray')
# And put the fit lines over the top of everything.
m, b = np.polyfit(X, Y, 1)
md, bd = np.polyfit(xd, yd, 1)
ax.plot(X, m * X + b, '-', color='blue')
ax.plot(xd, md * xd + bd, '-', color='black')
# Add annotations
r = np.corrcoef(X, Y)[0, 1]
ax.annotate("all:", (0.61, 0.97), ha="left", va="top", color="blue")
ax.annotate("m = {:0.3f}".format(m), (0.62, 0.91), ha="left", va="top", color="blue")
ax.annotate("r = {:0.3f}".format(r), (0.62, 0.85), ha="left", va="top", color="blue")
rd = np.corrcoef(xd, yd)[0, 1]
ax.annotate("dist:", (0.61, 0.78), ha="left", va="top", color="black")
ax.annotate("m = {:0.3f}".format(md), (0.62, 0.72), ha="left", va="top", color="black")
ax.annotate("r = {:0.3f}".format(rd), (0.62, 0.66), ha="left", va="top", color="black")
if save_as is not None:
logger.info("Saving plot to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def heat_map(expression_df,
title="Heat Map", fig_size=(5, 8), c_map="Reds",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param pandas.DataFrame expression_df: A pandas DataFrame containing data for the plot
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param str c_map: A seaborn color scheme string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig, ax: matplotlib figure and axes objects
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
fig, ax = plt.subplots(figsize=fig_size)
sns.set_style('white')
sns.heatmap(expression_df, annot=False, ax=ax, cmap=c_map)
ax.set_title(title)
if save_as is not None:
logger.info("Saving heat map to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def overlay_normal(ax, data, c="red"):
""" Provide a normal distribution Axes for overlay onto existing plot, based on data's mean and sd
:param matplotlib.Axes ax: The axes object to draw onto
:param data: The original data for basing our normal distribution
:param str c: A string referring to a seaborn color
:return: The same axes passed as an argument, but with a normal curve drawn over it
"""
norm_data = np.random.normal(loc=np.mean(data), scale=np.std(data), size=2048)
sns.kdeplot(norm_data, color=c, ax=ax)
# ax.vlines(x=np.mean(data), ymin=0.0, ymax=1.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data) - (2 * np.std(data)), ymin=0, ymax=5.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data), ymin=0, ymax=5.0, linewidth=0.5, color=c)
ax.vlines(x=np.mean(data) + (2 * np.std(data)), ymin=0, ymax=5.0, linewidth=0.5, color=c)
return ax
def distribution_plot(data,
title="Distribution", fig_size=(5, 5), c="red",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param pandas.DataFrame data: A pandas DataFrame containing data for the plot
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param str c: A seaborn color string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig, ax: matplotlib figure and axes objects
"""
# Density plots can take a long time to build with big samples; subsample if necessary
max_density_length = 1024
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
opp_c = "blue" if c == "red" else "red"
fig, ax = plt.subplots(figsize=fig_size)
sns.set_style('white')
sub_data = data if len(data) <= max_density_length else np.random.choice(data, max_density_length)
ax = overlay_normal(sns.distplot(sub_data, hist=True, rug=True, color=c), sub_data, c=opp_c)
ax.set_title(title)
if save_as is not None:
logger.info("Saving distribution plot to {}".format(save_as))
fig.savefig(save_as)
return fig, ax
def heat_and_density_plot(value_matrix, density_position='top',
title="Heat Map", fig_size=(6, 4), ratio=3, c_map="Reds",
save_as=None, logger=None):
""" Build, save, and return a heat map plot.
:param value_matrix: A DataFrame or matrix containing data for the plot
:param str density_position: Which edge gets the density_plot?
:param str title: Override the default plot title with one of your choosing
:param tuple fig_size: Dimensions (mostly relative) of figure generated
:param integer ratio: This number-to-one heat map to density plot size
:param str c_map: A seaborn color scheme string
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return fig: matplotlib figure object
"""
# Attach to the proper logger
if logger is None:
logger = logging.getLogger('pygest')
fig = plt.figure(figsize=fig_size)
if density_position == 'left':
gs = plt.GridSpec(ratio, ratio + 1)
ax_main = fig.add_subplot(gs[:, 1:])
ax_dens = fig.add_subplot(gs[:, 0])
go_vertical = True
elif density_position == 'right':
gs = plt.GridSpec(ratio, ratio + 1)
ax_main = fig.add_subplot(gs[:, :-1])
ax_dens = fig.add_subplot(gs[:, -1])
go_vertical = True
elif density_position == 'bottom':
gs = plt.GridSpec(ratio + 1, ratio)
ax_main = fig.add_subplot(gs[:-1, :])
ax_dens = fig.add_subplot(gs[-1, :])
go_vertical = False
else: # density_position == 'top' or some invalid setting triggering 'top' default
# GridSpec is set with nrows, ncols
gs = plt.GridSpec(ratio + 1, ratio)
# For a top-density, use [all rows after the 0th x all columns] for main
ax_main = fig.add_subplot(gs[1:, :])
# For a top-density, use [0th row x all columns] for density plot
ax_dens = fig.add_subplot(gs[0, :])
go_vertical = False
# Density plots can take a long time to build with big samples; subsample if necessary
max_density_length = 1024
if isinstance(value_matrix, pd.DataFrame):
value_matrix = value_matrix.as_matrix()
if value_matrix.shape[0] == value_matrix.shape[1]:
value_vector = value_matrix[np.tril_indices(n=value_matrix.shape[0], k=-1)]
else:
value_vector = value_matrix.flatten()
if len(value_vector) <= max_density_length:
sub_vector = value_vector
else:
sub_vector = np.random.choice(value_vector, max_density_length)
sns.set_style('white')
c = c_map.lower()[:-1]
sns.heatmap(value_matrix, annot=False, ax=ax_main, cmap=c_map,
xticklabels=False, yticklabels=False)
sns.set_style('white')
ax_dens = overlay_normal(sns.distplot(
sub_vector, hist=True, rug=True, color=c, ax=ax_dens, vertical=go_vertical
), sub_vector, c=c)
ax_dens.set_title(title)
if save_as is not None:
logger.info("Saving heat map to {}".format(save_as))
fig.savefig(save_as)
return fig
def whack_a_probe_plot(donor, hemisphere, samples, conns, conss=None, nulls=None, fig_size=(16, 9),
save_as=None, logger=None):
""" Plot increasing correlations by different whack-a-probe algorithms.
:param donor: The donor of interest
:param hemisphere: The donor's hemisphere of interest
:param samples: The subset (cor, sub, all) of donor's samples to represent
:param conns: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param conss: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param nulls: A list of tuples, each tuple (name, DataFrame), each DataFrame representing rising correlations
:param fig_size: The size, in inches, of the figure (width, height)
:param str save_as: If provided, the plot will be saved to this filename
:param logging.Logger logger: If provided, logging will be directed to this logger
:return: matplotlib figure object
"""
if logger is None:
logger = logging.getLogger("pygest")
fig, ax = plt.subplots(figsize=fig_size)
# Plot a single horizontal line at y=0
ax.axhline(0, 0, 17000, color='gray')
# Finally, plot the real curves
def plot_curves(the_curve, ls, lc):
if 'max' in the_curve[0]:
legend_label = "{}, max r={:0.3f}".format(
the_curve[0][6:], max(list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']))
)
elif 'min' in the_curve[0]:
legend_label = "{}, min r={:0.3f}".format(
the_curve[0][6:], min(list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']))
)
else:
legend_label = the_curve[0][6:]
ax.plot(list(the_curve[1].index), list(the_curve[1]['r' if 'r' in the_curve[1].columns else 'b']),
label=legend_label, linestyle=ls, color=lc)
# Plot the nulls first, so they are in the background
print("nulls = ".format(nulls))
if nulls is not None and len(nulls) > 0:
for a_null in nulls:
if 'smrt' in a_null[0]:
plot_curves(a_null, ls=':', lc='lightgray')
elif 'once' in a_null[0]:
plot_curves(a_null, ls=':', lc='lightgray')
else:
plot_curves(a_null, ls=':', lc='yellow')
# Also, plot the averaged null, our expected tortured r-value if we are only begging noise to confess
def plot_mean_curves(mm, f, the_nulls):
the_filter = [mm in x[0] for x in the_nulls]
if sum(the_filter) > 0:
the_nulls = [i for (i, v) in zip(the_nulls, the_filter) if v]
mean_the_nulls = np.mean([x[1]['r' if 'r' in x[1].columns else 'b'] for x in the_nulls], axis=0)
ll = "{}, mean {} r={:0.3f}".format('shuffled', mm, f(mean_the_nulls))
ax.plot(list(the_nulls[0][1].index), mean_the_nulls, linestyle=':', color='darkgray', label=ll)
plot_mean_curves("max", max, nulls)
plot_mean_curves("min", min, nulls)
if conns is not None and len(conns) > 0:
for a_real in conns:
if 'smrt' in a_real[0]:
plot_curves(a_real, ls='-', lc='black')
elif 'once' in a_real[0]:
plot_curves(a_real, ls='--', lc='black')
else:
plot_curves(a_real, ls='-', lc='yellow')
if conss is not None and len(conss) > 0:
for a_real in conss:
if 'smrt' in a_real[0]:
plot_curves(a_real, ls='-', lc='green')
elif 'once' in a_real[0]:
plot_curves(a_real, ls='--', lc='green')
else:
plot_curves(a_real, ls='-', lc='yellow')
# Tweak the legend, then add it to the axes, too
def leg_sort(t):
""" Sort the legend in a way that maps to peaks of lines visually. """
score = 0
if 'smrt' in t[0]:
score += 3
elif 'once' in t[0]:
score += 2
else:
score += 1
if 'max' in t[0]:
score *= -1
elif 'min' in t[0]:
score *= 1
return score
handles, labels = ax.get_legend_handles_labels()
# sort both labels and handles by labels
labels, handles = zip(*sorted(zip(labels, handles), key=leg_sort))
ax.legend(handles, labels, loc=2)
# Finish it off with a title
ax.set_title("{}, {} hemisphere, {} set".format(donor, hemisphere, samples))
if save_as is not None:
logger.info("Saving whack-a-probe plot to {}".format(save_as))
fig.savefig(save_as)
return fig
def light_dark_palettes(central_palette=None):
""" Return two palettes, one lighter and one darker than central_palette, but with the same scheme.
:param central_palette: A seaborn color palette, defaults to seaborn's colorblind palette.
:returns light_palette, dark_palette: Two palettes, one lighter and one darker
"""
# Generate colorblind palette, then adjust intensity up and down slightly for light and dark versions
# Seaborn handles palettes, strings, or even None to generate a palette.
pal = sns.color_palette(central_palette)
# Reduce the intensity of each color channel to darken the overall color
pal_dark = [(
c[0] - (c[0] / 3),
c[1] - (c[1] / 3),
c[2] - (c[2] / 3)
) for c in pal]
# Increase the intensity of each color channel to lighten the overall color
pal_light = [(
c[0] + ((1.0 - c[0]) / 3),
c[1] + ((1.0 - c[1]) / 3),
c[2] + ((1.0 - c[2]) / 3)
) for c in pal]
return pal_light, pal_dark
def curve_properties(df, shuffle_name, palette="colorblind"):
""" Return appropriate properties for curves and boxplots representing a given shuffle type.
:param pandas.DataFrame df: A dataframe with a 'path' column holding paths to result files by 'shuf'
:param str shuffle_name: The name of the shuffle type key
:param palette: The name of the seaborn color palette to use
:returns dict: dict with labelled properties of a curve
"""
# print("DEBUG: df, return value from curve_properties(df, {})".format(shuffle_name))
# print("df is {}; has {} {}-paths.".format(df.shape, df[df['shuf'] == shuffle_name].shape, shuffle_name))
# print(df.shape)
# Generate color palettes
pal_light, pal_dark = light_dark_palettes(palette)
# 0: blue, 1: orange, 2: green, 3: red, 4: violet, 5: brown, 6: pink, 7: gray, 8: yellow, 9: aqua
property_dict = {
"files": list(df.loc[df['shuf'] == shuffle_name, 'path']),
"shuf": shuffle_name,
}
if shuffle_name == "none":
property_dict.update({"linestyle": "-", "color": "black", "light_color": "gray", })
elif shuffle_name.startswith("be"):
property_dict.update({"linestyle": ":", "color": pal_dark[0], "light_color": pal_light[0], }) # 0 == blue
elif shuffle_name == "dist":
property_dict.update({"linestyle": ":", "color": pal_dark[3], "light_color": pal_light[3], }) # 3 == red
elif shuffle_name == "agno":
property_dict.update({"linestyle": ":", "color": pal_dark[6], "light_color": pal_light[6], }) # 6 == pink
elif shuffle_name == "smsh":
property_dict.update({"linestyle": ":", "color": pal_dark[5], "light_color": pal_light[5], }) # 5 == brown
elif shuffle_name == "edge":
property_dict.update({"linestyle": ":", "color": pal_dark[2], "light_color": pal_light[2], }) # 2 == green
else:
property_dict.update({"linestyle": ".", "color": pal_dark[7], "light_color": pal_light[7], }) # 7 == gray
# print("DEBUG: property_dict, return value from curve_properties(df, {})".format(shuffle_name))
# print("property_dict has {} files for shuf {}".format(len(property_dict['files']), shuffle_name))
return property_dict
def push_plot_via_dict(data, d):
""" Use settings from a json file to specify a push plot.
:param data: and instance of the pygest.Data object
:param d: a dictionary specifying configuration for plot
:return: 0 for success, integer error code for failure
"""
print("Pretending to make a plot from {}".format(d))
colormap = ['black', 'blue', 'red', 'green', 'gray', 'orange', 'violet']
plottables = []
for subgroup_spec in d['intra'].keys():
for i, subgroup in enumerate(d['intra'][subgroup_spec]):
bids_filter = d['controls'].copy()
bids_filter[subgroup_spec] = subgroup
plottables.append({
'files': data.derivatives(bids_filter),
'color': colormap[i % len(colormap)],
'linestyle': ':',
'label_keys': [subgroup_spec]
})
for curve in plottables:
print("Curves in {}".format(os.path.join(d['outdir'], d['filename'])))
print(curve)
fig, ax = push_plot(plottables, title=d['title'])
fig.savefig(os.path.join(d['outdir'], d['filename']))
return 0
def push_vs_null_plot(data, donor, hem, samp, algo='smrt', comp='conn', mask='none',
label_keys=None):
""" Use reasonable defaults to generate a push_plot for a particular dataset.
This function does all of the gathering of files and generation of lists
for sending to push_plot.
:param data: an instance of the pygest.Data object
:param donor: a string representing the donor of interest
:param hem: a single character representing left or right hemisphere
:param samp: 'cor' or 'sub' to indicate which sample set to use
:param algo: 'smrt' for the smart efficient algorithm, 'once' or 'evry' for alternatives
:param comp: 'conn' for connectivity, or 'cons' for connectivity similarity comparisons
:param mask: 'none' for full data, or 'fine', 'coarse' or an integer for masked data
:param label_keys: A list of keys can limit the size of the legend
:return figure: axes of the plot
"""
the_filters = {'sub': donor, 'hem': hem, 'samp': samp, 'algo': algo, 'comp': comp,
'mask': mask, 'adj': 'none', 'exclusions': ['test', 'NULL', ], }
# Get results for actual values and three types of shuffles
the_results = {}
for result_type in ['none', 'agno', 'dist', 'edge', ]:
# Ask Data for a list of files that match our filters
the_results[result_type] = data.derivatives(the_filters, shuffle=result_type)
print("Retrieved {} results for {} shuffles.".format(len(the_results[result_type]), result_type))
# Set up several sets of curves to be plotted
plottables = [
{'files': the_results['agno'], 'color': 'gray', 'linestyle': ':',
'label': 'shuffle (n={})'.format(len(the_results['agno']))},
{'files': the_results['dist'], 'color': 'red', 'linestyle': ':',
'label': 'weighted (n={})'.format(len(the_results['dist']))},
{'files': the_results['edge'], 'color': 'blue', 'linestyle': ':',
'label': 'edges (n={})'.format(len(the_results['edge']))},
{'files': the_results['none'], 'color': 'black', 'linestyle': '-',
'label_keys': ['comp', ]},
]
the_title = "{}_{}_{}_{}_{} actual vs shuffling".format(donor, hem, samp, comp, mask)
return push_plot(plottables, the_title, label_keys=label_keys, fig_size=(8, 5))
def push_plot(push_sets, title="Push Plot", label_keys=None, plot_overlaps=False, fig_size=(16, 12),
save_as=None, push_x_to=None):
""" Draw a plot with multiple push results overlaid for comparison.
:param push_sets: a list of dicts, each dict contains ('files', optional 'color', optional 'linestyle')
:param title: override the default "Push Plot" title with something more descriptive
:param label_keys: if specified, labels will be generated from these keys and the files in push_sets
:param plot_overlaps: If true, calculate pct_overlap for each group and annotate the plot with them
:param fig_size: override the default (16, 9) fig_size
:param save_as: if specified, the plot will be drawn into the file provided
:param push_x_to: if specified, an optimization from (0 to 100) will plot as (push_x_to - 100 to push_x_to)
:return: figure, axes of the plot
"""
fig, ax = plt.subplots(figsize=fig_size)
fig.tight_layout()
# Plot a single horizontal line at y=0
ax.axhline(0, 0, 17000, color='gray')
if len(push_sets) == 0:
return fig, ax
# Plot each push_set
ls = '-'
lc = 'black'
label = ''
curve_list = []
for i, push_set in enumerate(push_sets):
if 'linestyle' in push_set:
ls = push_set['linestyle']
if 'color' in push_set:
lc = push_set['color']
if 'label' in push_set:
label = push_set['label']
if 'label_keys' in push_set:
label_keys = push_set['label_keys']
if len(push_set) > 0:
ax, df = plot_pushes(push_set['files'], axes=ax, label=label, label_keys=label_keys,
linestyle=ls, color=lc, push_x_to=push_x_to)
df['push_set'] = i
if len(df) > 0:
curve_list.append(df)
all_curves = pd.concat(curve_list, axis=0, sort=True)
# Append summary statistics to a label
def label_add_summary(x, d):
if (len(d) == 0) or (len(d['best_score']) == 0):
return "{} empty".format(x)
if plot_overlaps and (len(d['f']) > 1):
return "{}={:0.2f} with {:0.1%} overlap (n={})".format(
x, np.mean(d['best_score']), pct_similarity(d['f']), len(d.index)
)
else:
return "{}={:0.2f} (n={})".format(
x, np.mean(d['best_score']), len(d.index)
)
# Tweak the legend, then add it to the axes, too
def legend_sort_val(t):
""" Sort the legend in a way that maps to peaks of lines visually. """
val = re.compile(r"^.*r=(\S+) .*$").search(t[0]).groups()[0]
# Return the negative so high values are first in the vertical legend.
return float(val) * -1.0
max_handles = []
min_handles = []
max_labels = []
min_labels = []
null_handles = []
null_labels = []
handles, labels = ax.get_legend_handles_labels()
# Add summary statistics to labels
labels = [label_add_summary(x, all_curves[all_curves['group'] == x]) for x in labels]
# sort both labels and handles by labels
if len(labels) > 0 and len(handles) > 0:
labels, handles = zip(*sorted(zip(labels, handles), key=legend_sort_val))
for i, l in enumerate(labels):
if "max" in l:
max_handles.append(handles[i])
max_labels.append(labels[i])
elif "min" in l:
min_handles.append(handles[i])
min_labels.append(labels[i])
else:
null_handles.append(handles[i])
null_labels.append(labels[i])
# Add a full legend (that will be emptied) and three separate legends with appropriate labels in each.
ax.legend(handles, labels, loc=7)
if len(max_labels) > 0:
ax.add_artist(ax.legend(max_handles, max_labels, loc=2))
if len(null_labels) > 0:
ax.add_artist(ax.legend(null_handles, null_labels, loc=6))
if len(min_labels) > 0:
ax.add_artist(ax.legend(min_handles, min_labels, loc=3))
# Finish it off with a title
ax.set_title(title)
if save_as is not None:
fig.savefig(save_as)
return fig, ax
def plot_pushes(files, axes=None, label='', label_keys=None, linestyle='-', color='black', push_x_to=None):
""" Plot the push results from a list of tsv files onto axes. This plots as many curves
as are in files, and is called repeatedly (each time with different curves) on a
single figure by push_plot.
:param list files: A list of full file paths to tsv data holding push results
:param axes: matplotlib axes for plotting to
:param label: if supplied, override the calculated label for use in the legend for this set of results
:param label_keys: if supplied, calculated the label from these fields
:param linestyle: this linestyle will be used to plot these results
:param color: this color will be used to plot these results
:param push_x_to: if specified, an optimization from (0 to 100) will plot as (push_x_to - 100 to push_x_to)
:returns axes, pd.DataFrame: the axes containing the representations of results in files
"""
if axes is None:
fig, axes = plt.subplots()
if len(files) == 0:
return axes, pd.DataFrame()
# Remember values for duplicate labels so we can average them at the end if necessary.
summary_list = []
# label_values = {}
# label_files = {}
for f in files:
df = pd.read_csv(f, sep='\t', index_col=0)
measure = 'r' if 'r' in df.columns else 'b'
summary = {'f': f, 'measure': measure, 'tgt': bids_val('tgt', f)}
if push_x_to is not None:
# Shift indices higher to allow alignment of variable-length plots to the right side.
df.index = df.index + (push_x_to - df.index.max())
if summary['tgt'] == 'max':
the_best_score = df[measure].max()
the_best_index = df[measure][5:].idxmax()
elif summary['tgt'] == 'min':
the_best_score = df[measure].min()
the_best_index = df[measure][5:].idxmin()
else:
the_best_score = 0.0
the_best_index = 0
summary['best_score'] = the_best_score
summary['best_index'] = the_best_index
# If a label is not provided, create it, and in a way we can modify it later.
# These labels are not tied to the axes, yet, and exist only in the local function
if label == '':
if label_keys is None:
# default values, if they aren't specified
label_keys = ['tgt', 'alg', 'msk', ]
label_group = "_".join([short_cmp(bids_val(k, f)) for k in label_keys])
label_group = label_group + ", {} {}".format(bids_val('tgt', f), measure)
# try:
# label_values[label_group].append(best_score)
# except KeyError:
# # If the label_group does not yet appear in label_values, start the list
# label_values[label_group] = [best_score, ]
# try:
# label_files[label_group].append(f)
# except KeyError:
# # If the label_group does not yet appear in label_files, start the list
# label_files[label_group] = [f, ]
else:
label_group = label
summary['group'] = label_group
# Plot the curve on the axes
real_handles, axes_labels = axes.get_legend_handles_labels()
if label_group in [x.split("=")[0] for x in axes_labels]:
# If a label already exists, just plot the line without a label.
axes.plot(list(df.index), list(df[measure]), linestyle=linestyle, color=color)
else:
# If there's no label, make one and plot the line with it.
axes.plot(list(df.index), list(df[measure]), linestyle=linestyle, color=color, label=label_group)
summary_list.append(summary)
summaries = pd.DataFrame(summary_list)
# Plot a center-point for the average 2D index,score in this group.
if len(summaries.index) > 0:
for grp in list(set(summaries['group'])):
grp_df = summaries[summaries['group'] == grp]
x_pos = np.mean(grp_df['best_index'])
y_pos = np.mean(grp_df['best_score'])
axes.plot(x_pos, y_pos, marker="D", markeredgecolor="white", markeredgewidth=2.0,
markersize=6.0, markerfacecolor=color)
return axes, summaries
def box_and_swarm(figure, placement, label, variable, data, shuffles, high_score=1.0, orientation="v",
lim=None, ps=True, push_ordinate_to=None, palette="colorblind"):
""" Create an axes object with a swarm plot drawn over a box plot of the same data. """
# print("DEBUG: shuffles (should be a list of strings):")
# print(shuffles)
annot_columns = []
for i, shuf in enumerate(shuffles):
x_dict = {"xo": float(i), "xp": float(i * 0.5)}
prop = curve_properties(data, shuf, palette=palette)
prop.update(x_dict) # updates prop dict in place, but returns None, not the updated dict
# print(" got '{}' property for '{}'".format(type(prop), shuf))
annot_columns.append(prop)
shuffle_color_boxes = [d['light_color'] for d in annot_columns]
shuffle_color_points = [d['color'] for d in annot_columns]
if push_ordinate_to is not None:
data[variable] = data[variable] + push_ordinate_to - data[variable].max()
ax = figure.add_axes(placement, label=label)
if orientation == "v":
sns.swarmplot(data=data, x='shuf', y=variable,
order=shuffles, palette=shuffle_color_points, size=3.0, ax=ax)
sns.boxplot(data=data, x='shuf', y=variable, order=shuffles, palette=shuffle_color_boxes, ax=ax)
ax.set_ylabel(None)
ax.set_xlabel(label)
if lim is not None:
ax.set_ylim(lim)
else:
sns.swarmplot(data=data, x=variable, y='shuf', order=shuffles, palette=shuffle_color_points, ax=ax)
sns.boxplot(data=data, x=variable, y='shuf', order=shuffles, palette=shuffle_color_boxes, ax=ax)
ax.set_xlabel(None)
ax.set_ylabel(label)
if lim is not None:
ax.set_xlim(lim)
""" Calculate p-values for each column in the above plots, and annotate accordingly. """
if ps & (orientation == "v"):
gap = 0.06
actual_results = data[data['shuf'] == 'none'][variable].values
try:
global_max_y = max(data[variable].values)
except ValueError:
global_max_y = high_score
for i, col in enumerate(annot_columns):
shuffle_results = data[data['shuf'] == col.get("shuf", "")]
try:
# max_y = max(data[data['phase'] == 'train'][y].values)
local_max_y = max(shuffle_results[variable].values)
except ValueError:
local_max_y = high_score
try:
y_pval = max(max(shuffle_results[variable].values), max(actual_results)) + gap
except ValueError:
y_pval = high_score + gap
try:
t, p = ttest_ind(actual_results, shuffle_results[variable].values)
# print(" plotting, full p = {}".format(p))
p_annotation = p_string(p, use_asterisks=False)
except TypeError:
p_annotation = "p N/A"
# y_pline = y_pval + 0.01 + (gap * i)
y_pline = global_max_y + 0.01 + (i * gap)
if i > 0:
ax.hlines(y_pline, 0.0, col['xo'], colors='gray', linewidth=1)
ax.vlines(0.0, y_pval, y_pline, colors='gray', linewidth=1)
ax.vlines(col['xo'], local_max_y + gap, y_pline, colors='gray', linewidth=1)
ax.text(gap + (i * 0.01), y_pline + 0.01, p_annotation, ha='left', va='bottom')
elif orientation == "h":
for i, col in enumerate(annot_columns):
shuffle_results = data[data['shuf'] == col['shuf']]
try:
local_min_x = min(shuffle_results[variable].values)
except ValueError:
local_min_x = 0
try:
local_mean = np.mean(shuffle_results[variable].values)
local_n = int(np.mean(shuffle_results['n'].values))
except ValueError:
local_mean = 0
local_n = 0
s = "mean {:,.0f} (top {:,.0f})".format(local_mean, local_n - local_mean)
ax.text(local_min_x - 500, i, s, ha='right', va='center')
return ax
def plot_a_vs_b(data, label, a_value, b_value, base_set):
""" Plot a in black solid lines and b in red dotted lines
"""
# Compare old richiardi cortical samples to new Schmidt cortical samples.
a = data.derivatives({**base_set, label: a_value}, shuffle='none', as_df=False)
b = data.derivatives({**base_set, label: b_value}, shuffle='none', as_df=False)
fig, ax = push_plot(
[{'files': b, 'linestyle': ':', 'color': 'red'},
{'files': a, 'linestyle': '-', 'color': 'black'}],
title="{} vs {} {}s".format(a_value, b_value, label),
label_keys=[label, ],
fig_size=(10, 8),
plot_overlaps=True,
)
return fig, ax
def plot_a_vs_null(data, label, a_value, base_set):
""" Plot a in black solid lines and null distributions in red and blue dotted lines
"""
# Compare old richiardi cortical samples to new Schmidt cortical samples.
a = data.derivatives({**base_set, label: a_value}, shuffle='none', as_df=False)
b = data.derivatives({**base_set, label: a_value}, shuffle='dist', as_df=False)
c = data.derivatives({**base_set, label: a_value}, shuffle='agno', as_df=False)
fig, ax = push_plot([
{'files': c, 'linestyle': ':', 'color': 'red'},
{'files': b, 'linestyle': ':', 'color': 'green'},
{'files': a, 'linestyle': '-', 'color': 'black'}, ],
title="{} vs null {}s".format(a_value, label),
label_keys=[label, 'shuffle'],
fig_size=(10, 8),
plot_overlaps=True,
)
return fig, ax
def plot_a_vs_null_and_test(pygest_data, df, fig_size=(12, 8), addon=None):
""" Plot a in black solid lines and null distributions in red and blue dotted lines
"""
# Extract some characteristics from the data.
main_traits = dict_from_bids(df['train_file'].unique()[0])
factor = [f for f in df['factor'].unique() if len(f) > 0][0]
# train_value = d[factor]
train_value = df[df['phase'] == 'train']['value'].values[0]
test_value = df[df['phase'] == 'test']['value'].values[0]
descriptor = '_'.join([main_traits['sub'], main_traits['hem'], main_traits['ctx']])
if factor not in ['sub', 'hem', 'ctx']:
descriptor = descriptor + ' (' + factor + '=' + train_value + ')'
a = [df['train_file'].unique()[0], ]
b = pygest_data.derivatives(main_traits, shuffle='edge', as_df=False)
c = pygest_data.derivatives(main_traits, shuffle='dist', as_df=False)
d = pygest_data.derivatives(main_traits, shuffle='agno', as_df=False)
if addon is None:
fig, ax = push_plot([
{'files': d, 'linestyle': ':', 'color': 'green'},
{'files': c, 'linestyle': ':', 'color': 'red'},
{'files': b, 'linestyle': ':', 'color': 'orchid'},
{'files': a, 'linestyle': '-', 'color': 'black'}, ],
title=descriptor,
label_keys=[factor, 'shuffle'],
fig_size=fig_size,
plot_overlaps=False,
)
else:
aa = [df['train_file'].unique()[0].replace('smrt', addon), ]
bb = pygest_data.derivatives({**main_traits, 'alg': addon}, shuffle='edge', as_df=False)
cc = pygest_data.derivatives({**main_traits, 'alg': addon}, shuffle='dist', as_df=False)
dd = pygest_data.derivatives({**main_traits, 'alg': addon}, shuffle='agno', as_df=False)
fig, ax = push_plot([
{'files': dd, 'linestyle': ':', 'color': 'burlywood'},
{'files': cc, 'linestyle': ':', 'color': 'gray'},
{'files': bb, 'linestyle': ':', 'color': 'gray'},
{'files': d, 'linestyle': ':', 'color': 'green'},
{'files': c, 'linestyle': ':', 'color': 'red'},
{'files': b, 'linestyle': ':', 'color': 'orchid'},
{'files': aa, 'linestyle': '-', 'color': 'black'},
{'files': a, 'linestyle': '-', 'color': 'black'}, ],
title=descriptor,
label_keys=[factor, 'shuffle', 'alg'],
fig_size=fig_size,
plot_overlaps=False,
)
# Move and resize rising plot of training data to make room for new box plots
ax.set_position([0.04, 0.12, 0.48, 0.80])
ax.set_yticklabels([])
ax.set_label('rise')
ax.set_xlabel('Training')
ax.set_ylabel('Mantel Correlation')
ax.yaxis.tick_right()
# Create two box plots, one with training data, one with test data
# (the_ax = ax_train, the_order = train_order, the_palette = train_color)
the_order = {
"train": ['train', 'edge', 'dist', 'agno'],
"grays": ['train', 'edge', 'dist', 'agno'],
"test": ['test', 'r_edge', 'r_dist', 'r_agno', 'random'],
}
the_palette = {
"train": sns.color_palette(['black', 'orchid', 'red', 'green']),
"test": sns.color_palette(['black', 'orchid', 'red', 'green', 'cyan']),
"grays": sns.color_palette(['black', 'burlywood', 'gray', 'gray']),
}
def the_plots(the_data, tt, the_ax):
""" Repeatable set of boxplot and swarmplot axes; just pass in data. """
sns.boxplot(x='phase', y='score', data=the_data, ax=the_ax, order=the_order[tt], palette=the_palette[tt])
sns.swarmplot(x='phase', y='score', data=the_data, ax=the_ax, order=the_order[tt], palette=the_palette[tt])
the_ax.set_ylabel(None)
the_ax.set_xlabel(tt)
the_ax.set_ylim(ax.get_ylim())
# Train pane
ax_train = fig.add_axes([0.54, 0.12, 0.17, 0.80], label='train')
if addon is None:
the_plots(the_data=df[df['value'] == train_value], tt="train", the_ax=ax_train)
else:
the_plots(the_data=df[(df['algo'] == 'smrt') & (df['value'] == train_value)], tt="train", the_ax=ax_train)
the_plots(the_data=df[(df['algo'] == addon) & (df['value'] == train_value)], tt="grays", the_ax=ax_train)
ax_train.set_yticklabels([])
ax_train.yaxis.tick_right()
ax_train.set_title("train ({})".format("=".join([factor, train_value])))
# Test pane
ax_test = fig.add_axes([0.75, 0.12, 0.23, 0.80], label='test')
if addon is None:
the_plots(the_data=df[df['value'] == test_value], tt="test", the_ax=ax_test)
else:
the_plots(the_data=df[(df['algo'] == 'smrt') & (df['value'] == test_value)], tt="test", the_ax=ax_test)
ax_test.set_title("test ({})".format("=".join([factor, test_value])))
# With 'addon', we get 'once' or 'evry' rows, but we ignore those for these calculations.
if addon is not None:
df = df[df['algo'] == 'smrt']
# Calculate overlaps for each column in the test boxplot, and annotate accordingly
# These have moved from the rising plot axes legend
overlap_columns = [
{'phase': 'test', 'x': 0.0},
{'phase': 'r_edge', 'x': 1.0},
{'phase': 'r_dist', 'x': 2.0},
{'phase': 'r_agno', 'x': 3.0},
{'phase': 'random', 'x': 4.0},
]
for col in overlap_columns:
overlaps = df[df['phase'] == col['phase']]['overlap'].values
max_y = max(df[df['phase'] == 'train']['score'].values)
y_overlap = max(df[df['phase'] == col['phase']]['score'].values)
try:
overlap_annotation = "{:0.1%}\nsimilar".format(np.nanmean(overlaps))
except TypeError:
overlap_annotation = "similarity\nN/A"
if y_overlap > max_y:
ax_test.text(col['x'], y_overlap - 0.02, overlap_annotation, ha='center', va='top')
else:
ax_test.text(col['x'], y_overlap + 0.02, overlap_annotation, ha='center', va='bottom')
return fig, (ax, ax_train, ax_test)
def plot_train_vs_test(df, mask_results=False, title="Title", fig_size=(12, 8), ymin=None, ymax=None):
""" Plot train in black solid lines and test in red and blue dotted lines
"""
# We can calculate train and test results by pure data or masked data.
if mask_results:
s_train = 'masked_train_score'
s_test = 'masked_test_score'
s_axis_label_mod = " (masked)"
else:
s_train = 'train_score'
s_test = 'test_score'
s_axis_label_mod = ""
if s_train not in df.columns or s_test not in df.columns:
print("Plotting train-vs-test, but don't have {} and {} columns!".format(s_train, s_test))
return None
# Calculate (or blindly accept) the range of the y-axis, which must be the same for all four axes.
if (ymax is None) and (len(df.index) > 0):
highest_possible_score = max(max(df['top_score']), max(df[s_train]), max(df[s_test]))
else:
highest_possible_score = ymax
if (ymin is None) and (len(df.index) > 0):
lowest_possible_score = min(min(df['top_score']), min(df[s_train]), min(df[s_test]))
else:
lowest_possible_score = ymin
y_limits = (lowest_possible_score, highest_possible_score)
# Plot the first pane, rising lines representing rising Mantel correlations as probes are dropped.
a = df.loc[df['shuffle'] == 'none', 'path']
b = df.loc[df['shuffle'] == 'edge', 'path']
c = df.loc[df['shuffle'] == 'dist', 'path']
d = df.loc[df['shuffle'] == 'agno', 'path']
fig, ax = push_plot([
{'files': list(d), 'linestyle': ':', 'color': 'green'},
{'files': list(c), 'linestyle': ':', 'color': 'red'},
{'files': list(b), 'linestyle': ':', 'color': 'orchid'},
{'files': list(a), 'linestyle': '-', 'color': 'black'}, ],
# title="Split-half train vs test results",
label_keys=['shuffle'],
fig_size=fig_size,
title="",
plot_overlaps=False,
)
fig.suptitle(title, fontsize=10)
# Move and resize rising plot of training data to make room for new box plots
ax.set_position([0.04, 0.12, 0.39, 0.80])
ax.set_yticklabels([])
ax.set_label('rise')
ax.set_xlabel('Training')
ax.set_ylabel('Mantel Correlation')
ax.yaxis.tick_right()
ax.set_ylim(bottom=y_limits[0], top=y_limits[1])
# Create two box plots, one with training data, one with test data
shuffle_order = ['none', 'edge', 'dist', 'agno']
shuffle_color_boxes = sns.color_palette(['gray', 'orchid', 'red', 'green'])
shuffle_color_points = sns.color_palette(['black', 'orchid', 'red', 'green'])
""" Training box and swarm plots """
ax_top = fig.add_axes([0.43, 0.12, 0.15, 0.80], label='top')
sns.boxplot(x='shuffle', y='top_score', data=df, ax=ax_top,
order=shuffle_order, palette=shuffle_color_boxes)
sns.swarmplot(x='shuffle', y='top_score', data=df, ax=ax_top,
order=shuffle_order, palette=shuffle_color_points)
ax_top.set_yticklabels([])
ax_top.yaxis.tick_right()
ax_top.set_ylabel(None)
ax_top.set_xlabel('Training')
# ax_train.set_title("train")
ax_top.set_ylim(ax.get_ylim())
""" Train box and swarm plots """
ax_train = fig.add_axes([0.62, 0.12, 0.15, 0.80], label='train')
sns.boxplot(x='shuffle', y=s_train, data=df, ax=ax_train,
order=shuffle_order, palette=shuffle_color_boxes)
sns.swarmplot(x='shuffle', y=s_train, data=df, ax=ax_train,
order=shuffle_order, palette=shuffle_color_points)
ax_train.set_ylabel(None)
ax_train.set_xlabel('Train' + s_axis_label_mod)
# ax_test.set_title("test")
ax_train.set_ylim(ax.get_ylim())
""" Test box and swarm plots """
ax_test = fig.add_axes([0.81, 0.12, 0.15, 0.80], label='test')
sns.boxplot(x='shuffle', y=s_test, data=df, ax=ax_test,
order=shuffle_order, palette=shuffle_color_boxes)
sns.swarmplot(x='shuffle', y=s_test, data=df, ax=ax_test,
order=shuffle_order, palette=shuffle_color_points)
ax_test.set_ylabel(None)
ax_test.set_xlabel('Test' + s_axis_label_mod)
# ax_test.set_title("test")
ax_test.set_ylim(ax.get_ylim())
""" Calculate overlaps and p-values for each column in the test boxplot, and annotate accordingly. """
overlap_columns = [
{'shuffle': 'none', 'xo': 0.0, 'xp': 0.0},
{'shuffle': 'edge', 'xo': 1.0, 'xp': 0.5},
{'shuffle': 'dist', 'xo': 2.0, 'xp': 1.0},
{'shuffle': 'agno', 'xo': 3.0, 'xp': 1.5},
]
actual_results = df[df['shuffle'] == 'none'][s_test].values
for i, col in enumerate(overlap_columns):
# Average all 'test_overlap' values for a given shuffle
overlaps = df[df['shuffle'] == col['shuffle']]['test_overlap'].values
test_scores = df[df['shuffle'] == col['shuffle']][s_test].values
try:
max_y = max(df[df['phase'] == 'train'][s_test].values)
except ValueError:
max_y = highest_possible_score
try:
y_overlap = max(test_scores)
y_pval = max(max(test_scores), max(actual_results))
except ValueError:
y_overlap = highest_possible_score
y_pval = highest_possible_score
try:
overlap_annotation = "{:0.1%}\nsimilar".format(np.nanmean(overlaps))
t, p = ttest_ind(actual_results, test_scores)
print(" plotting, full p = {}".format(p))
p_annotation = p_string(p)
except TypeError:
overlap_annotation = "similarity\nN/A"
p_annotation = "p N/A"
if y_overlap > max_y:
y_overlap = y_overlap - 0.02
ax_test.text(col['xo'], y_overlap, overlap_annotation, ha='center', va='top')
else:
# expected path, annotate just above the swarm's top-most point.
y_overlap = y_overlap + 0.02
ax_test.text(col['xo'], y_overlap, overlap_annotation, ha='center', va='bottom')
if i > 0:
gap = 0.04
y_pval = y_pval + 0.06
y_pline = y_pval + 0.01 + (gap * i)
ax_test.hlines(y_pline, 0.0, col['xo'], colors='k', linewidth=1)
ax_test.vlines(0.0, y_pval, y_pline, colors='k', linewidth=1)
ax_test.vlines(col['xo'], y_overlap + 0.06, y_pline, colors='k', linewidth=1)
ax_test.text(col['xp'], y_pline, p_annotation, ha='center', va='bottom')
return fig, (ax, ax_top, ax_train, ax_test)
| [
"pygest.algorithms.pct_similarity",
"seaborn.heatmap",
"seaborn.kdeplot",
"numpy.polyfit",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"pandas.DataFrame",
"numpy.std",
"seaborn.swarmplot",
"numpy.append",
"pygest.convenien... | [((1797, 1843), 'numpy.arange', 'np.arange', (['dist_min', 'dist_max', '(dist_max / bins)'], {}), '(dist_min, dist_max, dist_max / bins)\n', (1806, 1843), True, 'import numpy as np\n'), ((3612, 3642), 'pygest.corr', 'ge.corr', (['X', 'Y'], {'method': 'r_method'}), '(X, Y, method=r_method)\n', (3619, 3642), True, 'import pygest as ge\n'), ((3675, 3703), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (3685, 3703), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6562), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (6546, 6562), True, 'import matplotlib.pyplot as plt\n'), ((7095, 7114), 'numpy.polyfit', 'np.polyfit', (['X', 'Y', '(1)'], {}), '(X, Y, 1)\n', (7105, 7114), True, 'import numpy as np\n'), ((7128, 7149), 'numpy.polyfit', 'np.polyfit', (['xd', 'yd', '(1)'], {}), '(xd, yd, 1)\n', (7138, 7149), True, 'import numpy as np\n'), ((8810, 8840), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (8822, 8840), True, 'import matplotlib.pyplot as plt\n'), ((8845, 8867), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (8858, 8867), True, 'import seaborn as sns\n'), ((8872, 8930), 'seaborn.heatmap', 'sns.heatmap', (['expression_df'], {'annot': '(False)', 'ax': 'ax', 'cmap': 'c_map'}), '(expression_df, annot=False, ax=ax, cmap=c_map)\n', (8883, 8930), True, 'import seaborn as sns\n'), ((9608, 9646), 'seaborn.kdeplot', 'sns.kdeplot', (['norm_data'], {'color': 'c', 'ax': 'ax'}), '(norm_data, color=c, ax=ax)\n', (9619, 9646), True, 'import seaborn as sns\n'), ((10991, 11021), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (11003, 11021), True, 'import matplotlib.pyplot as plt\n'), ((11026, 11048), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (11039, 11048), True, 'import seaborn as sns\n'), ((12429, 12457), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (12439, 12457), True, 'import matplotlib.pyplot as plt\n'), ((14084, 14106), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (14097, 14106), True, 'import seaborn as sns\n'), ((14138, 14243), 'seaborn.heatmap', 'sns.heatmap', (['value_matrix'], {'annot': '(False)', 'ax': 'ax_main', 'cmap': 'c_map', 'xticklabels': '(False)', 'yticklabels': '(False)'}), '(value_matrix, annot=False, ax=ax_main, cmap=c_map, xticklabels=\n False, yticklabels=False)\n', (14149, 14243), True, 'import seaborn as sns\n'), ((14259, 14281), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (14272, 14281), True, 'import seaborn as sns\n'), ((15711, 15741), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (15723, 15741), True, 'import matplotlib.pyplot as plt\n'), ((19820, 19854), 'seaborn.color_palette', 'sns.color_palette', (['central_palette'], {}), '(central_palette)\n', (19837, 19854), True, 'import seaborn as sns\n'), ((26948, 26978), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'fig_size'}), '(figsize=fig_size)\n', (26960, 26978), True, 'import matplotlib.pyplot as plt\n'), ((27891, 27931), 'pandas.concat', 'pd.concat', (['curve_list'], {'axis': '(0)', 'sort': '(True)'}), '(curve_list, axis=0, sort=True)\n', (27900, 27931), True, 'import pandas as pd\n'), ((34044, 34070), 'pandas.DataFrame', 'pd.DataFrame', (['summary_list'], {}), '(summary_list)\n', (34056, 34070), True, 'import pandas as pd\n'), ((48736, 48789), 'seaborn.color_palette', 'sns.color_palette', (["['gray', 'orchid', 'red', 'green']"], {}), "(['gray', 'orchid', 'red', 'green'])\n", (48753, 48789), True, 'import seaborn as sns\n'), ((48817, 48871), 'seaborn.color_palette', 'sns.color_palette', (["['black', 'orchid', 'red', 'green']"], {}), "(['black', 'orchid', 'red', 'green'])\n", (48834, 48871), True, 'import seaborn as sns\n'), ((48983, 49097), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""shuffle"""', 'y': '"""top_score"""', 'data': 'df', 'ax': 'ax_top', 'order': 'shuffle_order', 'palette': 'shuffle_color_boxes'}), "(x='shuffle', y='top_score', data=df, ax=ax_top, order=\n shuffle_order, palette=shuffle_color_boxes)\n", (48994, 49097), True, 'import seaborn as sns\n'), ((49113, 49230), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""shuffle"""', 'y': '"""top_score"""', 'data': 'df', 'ax': 'ax_top', 'order': 'shuffle_order', 'palette': 'shuffle_color_points'}), "(x='shuffle', y='top_score', data=df, ax=ax_top, order=\n shuffle_order, palette=shuffle_color_points)\n", (49126, 49230), True, 'import seaborn as sns\n'), ((49548, 49660), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""shuffle"""', 'y': 's_train', 'data': 'df', 'ax': 'ax_train', 'order': 'shuffle_order', 'palette': 'shuffle_color_boxes'}), "(x='shuffle', y=s_train, data=df, ax=ax_train, order=\n shuffle_order, palette=shuffle_color_boxes)\n", (49559, 49660), True, 'import seaborn as sns\n'), ((49676, 49791), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""shuffle"""', 'y': 's_train', 'data': 'df', 'ax': 'ax_train', 'order': 'shuffle_order', 'palette': 'shuffle_color_points'}), "(x='shuffle', y=s_train, data=df, ax=ax_train, order=\n shuffle_order, palette=shuffle_color_points)\n", (49689, 49791), True, 'import seaborn as sns\n'), ((50065, 50174), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""shuffle"""', 'y': 's_test', 'data': 'df', 'ax': 'ax_test', 'order': 'shuffle_order', 'palette': 'shuffle_color_boxes'}), "(x='shuffle', y=s_test, data=df, ax=ax_test, order=shuffle_order,\n palette=shuffle_color_boxes)\n", (50076, 50174), True, 'import seaborn as sns\n'), ((50191, 50304), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""shuffle"""', 'y': 's_test', 'data': 'df', 'ax': 'ax_test', 'order': 'shuffle_order', 'palette': 'shuffle_color_points'}), "(x='shuffle', y=s_test, data=df, ax=ax_test, order=\n shuffle_order, palette=shuffle_color_points)\n", (50204, 50304), True, 'import seaborn as sns\n'), ((1560, 1587), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (1577, 1587), False, 'import logging\n'), ((2205, 2254), 'numpy.logical_and', 'np.logical_and', (['(by >= a)', '(by < a + dist_max / bins)'], {}), '(by >= a, by < a + dist_max / bins)\n', (2219, 2254), True, 'import numpy as np\n'), ((2808, 2840), 'pygest.corr', 'ge.corr', (['_X', '_Y'], {'method': 'r_method'}), '(_X, _Y, method=r_method)\n', (2815, 2840), True, 'import pygest as ge\n'), ((6382, 6409), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (6399, 6409), False, 'import logging\n'), ((7276, 7293), 'numpy.corrcoef', 'np.corrcoef', (['X', 'Y'], {}), '(X, Y)\n', (7287, 7293), True, 'import numpy as np\n'), ((7562, 7581), 'numpy.corrcoef', 'np.corrcoef', (['xd', 'yd'], {}), '(xd, yd)\n', (7573, 7581), True, 'import numpy as np\n'), ((8767, 8794), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (8784, 8794), False, 'import logging\n'), ((10903, 10930), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (10920, 10930), False, 'import logging\n'), ((11109, 11151), 'numpy.random.choice', 'np.random.choice', (['data', 'max_density_length'], {}), '(data, max_density_length)\n', (11125, 11151), True, 'import numpy as np\n'), ((11176, 11228), 'seaborn.distplot', 'sns.distplot', (['sub_data'], {'hist': '(True)', 'rug': '(True)', 'color': 'c'}), '(sub_data, hist=True, rug=True, color=c)\n', (11188, 11228), True, 'import seaborn as sns\n'), ((12390, 12417), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (12407, 12417), False, 'import logging\n'), ((12506, 12536), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['ratio', '(ratio + 1)'], {}), '(ratio, ratio + 1)\n', (12518, 12536), True, 'import matplotlib.pyplot as plt\n'), ((14028, 14078), 'numpy.random.choice', 'np.random.choice', (['value_vector', 'max_density_length'], {}), '(value_vector, max_density_length)\n', (14044, 14078), True, 'import numpy as np\n'), ((14311, 14404), 'seaborn.distplot', 'sns.distplot', (['sub_vector'], {'hist': '(True)', 'rug': '(True)', 'color': 'c', 'ax': 'ax_dens', 'vertical': 'go_vertical'}), '(sub_vector, hist=True, rug=True, color=c, ax=ax_dens, vertical\n =go_vertical)\n', (14323, 14404), True, 'import seaborn as sns\n'), ((15668, 15695), 'logging.getLogger', 'logging.getLogger', (['"""pygest"""'], {}), "('pygest')\n", (15685, 15695), False, 'import logging\n'), ((23618, 23658), 'os.path.join', 'os.path.join', (["d['outdir']", "d['filename']"], {}), "(d['outdir'], d['filename'])\n", (23630, 23658), False, 'import os\n'), ((31261, 31275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (31273, 31275), True, 'import matplotlib.pyplot as plt\n'), ((31531, 31568), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(f, sep='\\t', index_col=0)\n", (31542, 31568), True, 'import pandas as pd\n'), ((35659, 35773), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'data': 'data', 'x': '"""shuf"""', 'y': 'variable', 'order': 'shuffles', 'palette': 'shuffle_color_points', 'size': '(3.0)', 'ax': 'ax'}), "(data=data, x='shuf', y=variable, order=shuffles, palette=\n shuffle_color_points, size=3.0, ax=ax)\n", (35672, 35773), True, 'import seaborn as sns\n'), ((35799, 35900), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'data', 'x': '"""shuf"""', 'y': 'variable', 'order': 'shuffles', 'palette': 'shuffle_color_boxes', 'ax': 'ax'}), "(data=data, x='shuf', y=variable, order=shuffles, palette=\n shuffle_color_boxes, ax=ax)\n", (35810, 35900), True, 'import seaborn as sns\n'), ((36028, 36132), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'data': 'data', 'x': 'variable', 'y': '"""shuf"""', 'order': 'shuffles', 'palette': 'shuffle_color_points', 'ax': 'ax'}), "(data=data, x=variable, y='shuf', order=shuffles, palette=\n shuffle_color_points, ax=ax)\n", (36041, 36132), True, 'import seaborn as sns\n'), ((36136, 36237), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'data', 'x': 'variable', 'y': '"""shuf"""', 'order': 'shuffles', 'palette': 'shuffle_color_boxes', 'ax': 'ax'}), "(data=data, x=variable, y='shuf', order=shuffles, palette=\n shuffle_color_boxes, ax=ax)\n", (36147, 36237), True, 'import seaborn as sns\n'), ((43393, 43447), 'seaborn.color_palette', 'sns.color_palette', (["['black', 'orchid', 'red', 'green']"], {}), "(['black', 'orchid', 'red', 'green'])\n", (43410, 43447), True, 'import seaborn as sns\n'), ((43465, 43527), 'seaborn.color_palette', 'sns.color_palette', (["['black', 'orchid', 'red', 'green', 'cyan']"], {}), "(['black', 'orchid', 'red', 'green', 'cyan'])\n", (43482, 43527), True, 'import seaborn as sns\n'), ((43546, 43603), 'seaborn.color_palette', 'sns.color_palette', (["['black', 'burlywood', 'gray', 'gray']"], {}), "(['black', 'burlywood', 'gray', 'gray'])\n", (43563, 43603), True, 'import seaborn as sns\n'), ((43742, 43852), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""phase"""', 'y': '"""score"""', 'data': 'the_data', 'ax': 'the_ax', 'order': 'the_order[tt]', 'palette': 'the_palette[tt]'}), "(x='phase', y='score', data=the_data, ax=the_ax, order=the_order\n [tt], palette=the_palette[tt])\n", (43753, 43852), True, 'import seaborn as sns\n'), ((43856, 43968), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""phase"""', 'y': '"""score"""', 'data': 'the_data', 'ax': 'the_ax', 'order': 'the_order[tt]', 'palette': 'the_palette[tt]'}), "(x='phase', y='score', data=the_data, ax=the_ax, order=\n the_order[tt], palette=the_palette[tt])\n", (43869, 43968), True, 'import seaborn as sns\n'), ((3096, 3125), 'math.log', 'math.log', (['((1 + _r) / (1 - _r))'], {}), '((1 + _r) / (1 - _r))\n', (3104, 3125), False, 'import math\n'), ((4937, 4969), 'numpy.append', 'np.append', (['dist_x_axis', 'dist_max'], {}), '(dist_x_axis, dist_max)\n', (4946, 4969), True, 'import numpy as np\n'), ((9558, 9571), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (9565, 9571), True, 'import numpy as np\n'), ((9579, 9591), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (9585, 9591), True, 'import numpy as np\n'), ((9834, 9847), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (9841, 9847), True, 'import numpy as np\n'), ((12704, 12734), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['ratio', '(ratio + 1)'], {}), '(ratio, ratio + 1)\n', (12716, 12734), True, 'import matplotlib.pyplot as plt\n'), ((13811, 13857), 'numpy.tril_indices', 'np.tril_indices', ([], {'n': 'value_matrix.shape[0]', 'k': '(-1)'}), '(n=value_matrix.shape[0], k=-1)\n', (13826, 13857), True, 'import numpy as np\n'), ((31321, 31335), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (31333, 31335), True, 'import pandas as pd\n'), ((31675, 31693), 'pygest.convenience.bids_val', 'bids_val', (['"""tgt"""', 'f'], {}), "('tgt', f)\n", (31683, 31693), False, 'from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string\n'), ((34305, 34334), 'numpy.mean', 'np.mean', (["grp_df['best_index']"], {}), "(grp_df['best_index'])\n", (34312, 34334), True, 'import numpy as np\n'), ((34355, 34384), 'numpy.mean', 'np.mean', (["grp_df['best_score']"], {}), "(grp_df['best_score'])\n", (34362, 34384), True, 'import numpy as np\n'), ((51631, 51669), 'scipy.stats.ttest_ind', 'ttest_ind', (['actual_results', 'test_scores'], {}), '(actual_results, test_scores)\n', (51640, 51669), False, 'from scipy.stats import ttest_ind\n'), ((51754, 51765), 'pygest.convenience.p_string', 'p_string', (['p'], {}), '(p)\n', (51762, 51765), False, 'from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string\n'), ((3244, 3262), 'math.exp', 'math.exp', (['(2 * z_lo)'], {}), '(2 * z_lo)\n', (3252, 3262), False, 'import math\n'), ((3271, 3289), 'math.exp', 'math.exp', (['(2 * z_lo)'], {}), '(2 * z_lo)\n', (3279, 3289), False, 'import math\n'), ((3311, 3329), 'math.exp', 'math.exp', (['(2 * z_hi)'], {}), '(2 * z_hi)\n', (3319, 3329), False, 'import math\n'), ((3338, 3356), 'math.exp', 'math.exp', (['(2 * z_hi)'], {}), '(2 * z_hi)\n', (3346, 3356), False, 'import math\n'), ((9740, 9753), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (9747, 9753), True, 'import numpy as np\n'), ((9907, 9920), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (9914, 9920), True, 'import numpy as np\n'), ((12905, 12935), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(ratio + 1)', 'ratio'], {}), '(ratio + 1, ratio)\n', (12917, 12935), True, 'import matplotlib.pyplot as plt\n'), ((13200, 13230), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(ratio + 1)', 'ratio'], {}), '(ratio + 1, ratio)\n', (13212, 13230), True, 'import matplotlib.pyplot as plt\n'), ((17344, 17423), 'numpy.mean', 'np.mean', (["[x[1]['r' if 'r' in x[1].columns else 'b'] for x in the_nulls]"], {'axis': '(0)'}), "([x[1]['r' if 'r' in x[1].columns else 'b'] for x in the_nulls], axis=0)\n", (17351, 17423), True, 'import numpy as np\n'), ((23483, 23523), 'os.path.join', 'os.path.join', (["d['outdir']", "d['filename']"], {}), "(d['outdir'], d['filename'])\n", (23495, 23523), False, 'import os\n'), ((28241, 28265), 'numpy.mean', 'np.mean', (["d['best_score']"], {}), "(d['best_score'])\n", (28248, 28265), True, 'import numpy as np\n'), ((28267, 28289), 'pygest.algorithms.pct_similarity', 'pct_similarity', (["d['f']"], {}), "(d['f'])\n", (28281, 28289), False, 'from pygest.algorithms import pct_similarity\n'), ((28398, 28422), 'numpy.mean', 'np.mean', (["d['best_score']"], {}), "(d['best_score'])\n", (28405, 28422), True, 'import numpy as np\n'), ((37271, 37330), 'scipy.stats.ttest_ind', 'ttest_ind', (['actual_results', 'shuffle_results[variable].values'], {}), '(actual_results, shuffle_results[variable].values)\n', (37280, 37330), False, 'from scipy.stats import ttest_ind\n'), ((37425, 37457), 'pygest.convenience.p_string', 'p_string', (['p'], {'use_asterisks': '(False)'}), '(p, use_asterisks=False)\n', (37433, 37457), False, 'from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string\n'), ((45866, 45886), 'numpy.nanmean', 'np.nanmean', (['overlaps'], {}), '(overlaps)\n', (45876, 45886), True, 'import numpy as np\n'), ((51590, 51610), 'numpy.nanmean', 'np.nanmean', (['overlaps'], {}), '(overlaps)\n', (51600, 51610), True, 'import numpy as np\n'), ((2367, 2394), 'numpy.count_nonzero', 'np.count_nonzero', (['by_filter'], {}), '(by_filter)\n', (2383, 2394), True, 'import numpy as np\n'), ((9761, 9773), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (9767, 9773), True, 'import numpy as np\n'), ((9928, 9940), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (9934, 9940), True, 'import numpy as np\n'), ((32830, 32848), 'pygest.convenience.bids_val', 'bids_val', (['"""tgt"""', 'f'], {}), "('tgt', f)\n", (32838, 32848), False, 'from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string\n'), ((38333, 38374), 'numpy.mean', 'np.mean', (['shuffle_results[variable].values'], {}), '(shuffle_results[variable].values)\n', (38340, 38374), True, 'import numpy as np\n'), ((32735, 32749), 'pygest.convenience.bids_val', 'bids_val', (['k', 'f'], {}), '(k, f)\n', (32743, 32749), False, 'from pygest.convenience import bids_val, dict_from_bids, short_cmp, p_string\n'), ((38405, 38441), 'numpy.mean', 'np.mean', (["shuffle_results['n'].values"], {}), "(shuffle_results['n'].values)\n", (38412, 38441), True, 'import numpy as np\n'), ((28626, 28655), 're.compile', 're.compile', (['"""^.*r=(\\\\S+) .*$"""'], {}), "('^.*r=(\\\\S+) .*$')\n", (28636, 28655), False, 'import re\n')] |
from typing import (Callable, Optional, List, TypeVar, Tuple, Dict, Union)
from cytoolz.curried import ( # type: ignore
curry, compose, flip, nth, concat, itemmap, groupby, filter)
from returns.maybe import Maybe, Nothing
import numpy as np
import torch
from .config import Config
from .base import Data
A = TypeVar("A")
Result = List[Tuple[Data, float]]
Tensor = torch.Tensor
getattr = compose(curry, flip)(getattr)
fst = nth(0)
snd = nth(1)
def sigmoid(x: np.ndarray) -> np.ndarray:
return 1 / (1 + np.exp(-x))
def get_config(config: Optional[Config], default_cfg: A, name: str) -> A:
m_cfg: Maybe[A] = Maybe.from_optional(config).bind_optional(
lambda cfg: getattr(name, cfg))
if m_cfg == Nothing:
cfg = default_cfg
else:
cfg = m_cfg.unwrap()
return cfg
def datas_filter(pred: Callable[[Data], bool],
datas: List[Data]) -> List[Data]:
return compose(list, filter(pred))(datas)
def group_data(attr: str, datas: List[Data],
scores: np.ndarray) -> Dict[A, Result]:
return groupby(compose(getattr(attr), fst), zip(datas, scores))
def ungroup_data(fn, grp_datas):
datas, scores = map(list, zip(*concat(itemmap(fn, grp_datas).values(), )))
return datas, scores
def pair_sim(mat1: Tensor, mat2: Tensor) -> Tensor:
return torch.cosine_similarity(mat1.unsqueeze(1), mat2, dim=-1)
def from_vstack(mat: Union[np.ndarray, List[np.ndarray]]) -> Tensor:
return compose(torch.from_numpy, np.vstack)(mat)
def l2_approx(x: Tensor, mat: Tensor, mat_t: Tensor) -> Tensor:
return torch.inverse(mat_t @ mat) @ mat_t @ x
def rescale_scores(
t_min: float = 1,
t_max: float = 2,
inverse: bool = False) -> Callable[[np.ndarray], np.ndarray]:
def dispatch(scores: np.ndarray) -> np.ndarray:
r_min, r_max = min(scores), max(scores)
if inverse:
scaled_scores = (r_min - scores) / (r_max - r_min)
return scaled_scores * (t_max - t_min) + t_max
else:
scaled_scores = (scores - r_min) / (r_max - r_min)
return scaled_scores * (t_max - t_min) + t_min
return dispatch
@curry
def cos_sim(v1: np.ndarray, v2: np.ndarray) -> np.ndarray:
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def top_subset_sum(arr: Tensor, t: float) -> List[int]:
_, idxs = torch.sort(arr, descending=True)
for i in range(1, len(idxs) + 1):
if torch.sum(arr[idxs[:i]]) >= t:
return idxs[:i].tolist()
return idxs.tolist()
| [
"numpy.dot",
"torch.sum",
"cytoolz.curried.nth",
"returns.maybe.Maybe.from_optional",
"numpy.exp",
"numpy.linalg.norm",
"torch.sort",
"typing.TypeVar",
"torch.inverse",
"cytoolz.curried.filter",
"cytoolz.curried.compose",
"cytoolz.curried.itemmap"
] | [((316, 328), 'typing.TypeVar', 'TypeVar', (['"""A"""'], {}), "('A')\n", (323, 328), False, 'from typing import Callable, Optional, List, TypeVar, Tuple, Dict, Union\n'), ((432, 438), 'cytoolz.curried.nth', 'nth', (['(0)'], {}), '(0)\n', (435, 438), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n'), ((445, 451), 'cytoolz.curried.nth', 'nth', (['(1)'], {}), '(1)\n', (448, 451), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n'), ((396, 416), 'cytoolz.curried.compose', 'compose', (['curry', 'flip'], {}), '(curry, flip)\n', (403, 416), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n'), ((2295, 2327), 'torch.sort', 'torch.sort', (['arr'], {'descending': '(True)'}), '(arr, descending=True)\n', (2305, 2327), False, 'import torch\n'), ((1441, 1477), 'cytoolz.curried.compose', 'compose', (['torch.from_numpy', 'np.vstack'], {}), '(torch.from_numpy, np.vstack)\n', (1448, 1477), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n'), ((2166, 2180), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (2172, 2180), True, 'import numpy as np\n'), ((514, 524), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (520, 524), True, 'import numpy as np\n'), ((622, 649), 'returns.maybe.Maybe.from_optional', 'Maybe.from_optional', (['config'], {}), '(config)\n', (641, 649), False, 'from returns.maybe import Maybe, Nothing\n'), ((917, 929), 'cytoolz.curried.filter', 'filter', (['pred'], {}), '(pred)\n', (923, 929), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n'), ((1558, 1584), 'torch.inverse', 'torch.inverse', (['(mat_t @ mat)'], {}), '(mat_t @ mat)\n', (1571, 1584), False, 'import torch\n'), ((2184, 2202), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (2198, 2202), True, 'import numpy as np\n'), ((2205, 2223), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (2219, 2223), True, 'import numpy as np\n'), ((2371, 2395), 'torch.sum', 'torch.sum', (['arr[idxs[:i]]'], {}), '(arr[idxs[:i]])\n', (2380, 2395), False, 'import torch\n'), ((1181, 1203), 'cytoolz.curried.itemmap', 'itemmap', (['fn', 'grp_datas'], {}), '(fn, grp_datas)\n', (1188, 1203), False, 'from cytoolz.curried import curry, compose, flip, nth, concat, itemmap, groupby, filter\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# List as: python3
"""Tests for model_search.metric_fns."""
from absl.testing import parameterized
from model_search import metric_fns
import numpy as np
import tensorflow.compat.v2 as tf
class MetricFnsTest(tf.test.TestCase, parameterized.TestCase):
# pylint: disable=g-long-lambda
# tf.constant must be called in a lambda, otherwise the Op would be created
# in a different graph from where it would be used, which is not allowed.
@parameterized.named_parameters(
{
'testcase_name':
'int64_label_single_task',
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
'predictions_fn':
lambda: {
'predictions': tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy': np.float32(0.2)
}
}, {
'testcase_name':
'string_label_single_task',
'label_vocabulary': ['A', 'B', 'C', 'D', 'E'],
'labels_fn':
lambda: tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'predictions_fn':
lambda: {
'predictions': tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy': np.float32(0.2)
}
}, {
'testcase_name':
'string_label_no_vocab_single_task',
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'predictions_fn':
lambda: {
'predictions': tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {}
}, {
'testcase_name':
'int64_label_multi_task',
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
'task_b': tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([1, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 0, 0], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.6),
},
}, {
'testcase_name':
'string_label_multi_task',
'label_vocabulary': {
'task_a': ['A', 'B', 'C', 'D', 'E'],
'task_b': ['F', 'G', 'H', 'I', 'J'],
},
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant(['F', 'G', 'H', 'I', 'J'], dtype=tf.string),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.2),
},
}, {
'testcase_name':
'mixed_label_multi_task',
'label_vocabulary': {
'task_a': ['A', 'B', 'C', 'D', 'E'],
},
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant([1, 1, 0, 0, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_a': np.float32(0.2),
'accuracy/task_b': np.float32(0.4),
},
}, {
'testcase_name':
'string_no_vocab_multi_task',
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a':
tf.constant(['A', 'B', 'C', 'D', 'E'], dtype=tf.string),
'task_b':
tf.constant([1, 1, 0, 0, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'predictions':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_a':
tf.constant([0, 0, 0, 0, 0], dtype=tf.int64),
'predictions/task_b':
tf.constant([1, 1, 1, 1, 1], dtype=tf.int64),
},
'expected_metric_dict': {
'accuracy/task_b': np.float32(0.4),
},
})
# pylint: enable=g-long-lambda
def test_make_accuracy_metric_fn(self, label_vocabulary, labels_fn,
predictions_fn, expected_metric_dict):
# Force graph mode
with tf.compat.v1.Graph().as_default():
metric_fn = metric_fns.make_accuracy_metric_fn(label_vocabulary)
actual_metric_dict = metric_fn(labels_fn(), predictions_fn())
with self.test_session() as sess:
sess.run(tf.compat.v1.initializers.local_variables())
sess.run(tf.compat.v1.initializers.tables_initializer())
actual_metric_dict_val = sess.run(actual_metric_dict)
actual_metric_dict_val_clean = {
metric_key: metric_val[1]
for metric_key, metric_val in actual_metric_dict_val.items()
}
self.assertEqual(expected_metric_dict, actual_metric_dict_val_clean)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
'testcase_name':
'roc_perfect',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(1.0)
}
}, {
'testcase_name':
'roc_perfect_vocab',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary': ['ZERO', 'ONE'],
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(1.0)
}
}, {
'testcase_name':
'roc_random',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_roc': np.float32(0.5)
}
}, {
'testcase_name':
'pr_perfect',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(1.0)
}
}, {
'testcase_name':
'pr_perfect_vocab',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary': ['ZERO', 'ONE'],
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(1.0)
}
}, {
'testcase_name':
'pr_random',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32),
},
'expected_metric_dict': {
'auc_pr': np.float32(0.5)
}
})
# pylint: enable=g-long-lambda
def test_auc_metric_fn(self, metric_fn_factory, label_vocabulary, labels_fn,
predictions_fn, expected_metric_dict):
# Force graph mode
with tf.compat.v1.Graph().as_default():
metric_fn = metric_fn_factory(label_vocabulary)
actual_metric_dict = metric_fn(labels_fn(), predictions_fn())
with self.test_session() as sess:
sess.run(tf.compat.v1.initializers.local_variables())
sess.run(tf.compat.v1.initializers.tables_initializer())
actual_metric_dict_val = sess.run(actual_metric_dict)
actual_metric_dict_val_clean = {
metric_key: metric_val[1]
for metric_key, metric_val in actual_metric_dict_val.items()
}
self.assertAllClose(expected_metric_dict, actual_metric_dict_val_clean)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
'testcase_name':
'roc_multi_task',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 0], dtype=tf.int64),
'task_b': tf.constant([1, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_a':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_b':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'exception_class':
NotImplementedError,
}, {
'testcase_name':
'roc_rank3_prob_tensor',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[[0.5, 0.5], [0.5, 0.5]],
[[0.5, 0.5], [0.5, 0.5]]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'roc_prob_tensor_3_classes',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([2, 1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'pr_multi_task',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: {
'task_a': tf.constant([1, 0], dtype=tf.int64),
'task_b': tf.constant([1, 0], dtype=tf.int64),
},
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_a':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
'probabilities/task_b':
tf.constant([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32),
},
'exception_class':
NotImplementedError,
}, {
'testcase_name':
'pr_rank3_prob_tensor',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[[0.5, 0.5], [0.5, 0.5]],
[[0.5, 0.5], [0.5, 0.5]]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'pr_prob_tensor_3_classes',
'metric_fn_factory':
metric_fns.make_auc_pr_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant([2, 1, 0], dtype=tf.int64),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]],
dtype=tf.float32),
},
'exception_class':
ValueError,
}, {
'testcase_name':
'roc_string_label_no_vocab',
'metric_fn_factory':
metric_fns.make_auc_roc_metric_fn,
'label_vocabulary':
None,
'labels_fn':
lambda: tf.constant(['ONE', 'ZERO'], dtype=tf.string),
'predictions_fn':
lambda: {
'probabilities':
tf.constant([[1.0, 0.0], [0.0, 1.0]], dtype=tf.float32),
},
'exception_class':
ValueError,
})
# pylint: enable=g-long-lambda
def test_auc_metric_fn_error(self, metric_fn_factory, label_vocabulary,
labels_fn, predictions_fn, exception_class):
with self.assertRaises(exception_class):
metric_fn = metric_fn_factory(label_vocabulary)
metric_fn(labels_fn(), predictions_fn())
def test_create_num_parameters_metric_fn_no_tower(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
_ = tf.compat.v1.get_variable(
name='w', shape=[10, 2], dtype=tf.float32, trainable=True)
_ = tf.compat.v1.get_variable(
name='b', shape=[2], dtype=tf.float32, trainable=True)
metric_fn = metric_fns.create_num_parameters_metric_fn(None)
metrics_dict = metric_fn(None, None)
with self.test_session() as sess:
self.assertEqual(22, sess.run(metrics_dict['num_parameters'][1]))
def test_create_num_parameters_metric_fn_with_tower(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
_ = tf.compat.v1.get_variable(
name='Phoenix/name', shape=[10, 2], dtype=tf.float32, trainable=True)
_ = tf.compat.v1.get_variable(
name='b', shape=[2], dtype=tf.float32, trainable=True)
metric_fn = metric_fns.create_num_parameters_metric_fn('name')
metrics_dict = metric_fn(None, None)
with self.test_session() as sess:
self.assertEqual(20, sess.run(metrics_dict['num_parameters'][1]))
def test_combine_metric_fns(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
def metric_fn_1(labels, predictions, weights=None):
del labels
del predictions
del weights
one = tf.constant(1, dtype=tf.int32)
return {'foo1': (one, one)}
def metric_fn_2(labels, predictions, weights=None):
del labels
del predictions
del weights
two = tf.constant(2, dtype=tf.int32)
return {'foo2': (two, two)}
metric_fn_combined = metric_fns.combine_metric_fns(
[metric_fn_1, metric_fn_2])
metrics_dict = metric_fn_combined(None, None)
with self.test_session() as sess:
self.assertEqual(1, sess.run(metrics_dict['foo1'][1]))
self.assertEqual(2, sess.run(metrics_dict['foo2'][1]))
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| [
"model_search.metric_fns.combine_metric_fns",
"model_search.metric_fns.create_num_parameters_metric_fn",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.constant",
"numpy.float32",
"tensorflow.compat.v2.compat.v1.initializers.local_variables",
"tensor... | [((18365, 18388), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (18386, 18388), True, 'import tensorflow.compat.v2 as tf\n'), ((18391, 18405), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (18403, 18405), True, 'import tensorflow.compat.v2 as tf\n'), ((6458, 6510), 'model_search.metric_fns.make_accuracy_metric_fn', 'metric_fns.make_accuracy_metric_fn', (['label_vocabulary'], {}), '(label_vocabulary)\n', (6492, 6510), False, 'from model_search import metric_fns\n'), ((16509, 16597), 'tensorflow.compat.v2.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': '"""w"""', 'shape': '[10, 2]', 'dtype': 'tf.float32', 'trainable': '(True)'}), "(name='w', shape=[10, 2], dtype=tf.float32,\n trainable=True)\n", (16534, 16597), True, 'import tensorflow.compat.v2 as tf\n'), ((16615, 16700), 'tensorflow.compat.v2.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': '"""b"""', 'shape': '[2]', 'dtype': 'tf.float32', 'trainable': '(True)'}), "(name='b', shape=[2], dtype=tf.float32, trainable=True\n )\n", (16640, 16700), True, 'import tensorflow.compat.v2 as tf\n'), ((16726, 16774), 'model_search.metric_fns.create_num_parameters_metric_fn', 'metric_fns.create_num_parameters_metric_fn', (['None'], {}), '(None)\n', (16768, 16774), False, 'from model_search import metric_fns\n'), ((17071, 17171), 'tensorflow.compat.v2.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': '"""Phoenix/name"""', 'shape': '[10, 2]', 'dtype': 'tf.float32', 'trainable': '(True)'}), "(name='Phoenix/name', shape=[10, 2], dtype=tf.\n float32, trainable=True)\n", (17096, 17171), True, 'import tensorflow.compat.v2 as tf\n'), ((17188, 17273), 'tensorflow.compat.v2.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': '"""b"""', 'shape': '[2]', 'dtype': 'tf.float32', 'trainable': '(True)'}), "(name='b', shape=[2], dtype=tf.float32, trainable=True\n )\n", (17213, 17273), True, 'import tensorflow.compat.v2 as tf\n'), ((17299, 17349), 'model_search.metric_fns.create_num_parameters_metric_fn', 'metric_fns.create_num_parameters_metric_fn', (['"""name"""'], {}), "('name')\n", (17341, 17349), False, 'from model_search import metric_fns\n'), ((18046, 18103), 'model_search.metric_fns.combine_metric_fns', 'metric_fns.combine_metric_fns', (['[metric_fn_1, metric_fn_2]'], {}), '([metric_fn_1, metric_fn_2])\n', (18075, 18103), False, 'from model_search import metric_fns\n'), ((1226, 1270), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (1237, 1270), True, 'import tensorflow.compat.v2 as tf\n'), ((1482, 1497), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (1492, 1497), True, 'import numpy as np\n'), ((1692, 1747), 'tensorflow.compat.v2.constant', 'tf.constant', (["['A', 'B', 'C', 'D', 'E']"], {'dtype': 'tf.string'}), "(['A', 'B', 'C', 'D', 'E'], dtype=tf.string)\n", (1703, 1747), True, 'import tensorflow.compat.v2 as tf\n'), ((1959, 1974), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (1969, 1974), True, 'import numpy as np\n'), ((2171, 2226), 'tensorflow.compat.v2.constant', 'tf.constant', (["['A', 'B', 'C', 'D', 'E']"], {'dtype': 'tf.string'}), "(['A', 'B', 'C', 'D', 'E'], dtype=tf.string)\n", (2182, 2226), True, 'import tensorflow.compat.v2 as tf\n'), ((3208, 3223), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (3218, 3223), True, 'import numpy as np\n'), ((3258, 3273), 'numpy.float32', 'np.float32', (['(0.6)'], {}), '(0.6)\n', (3268, 3273), True, 'import numpy as np\n'), ((4247, 4262), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (4257, 4262), True, 'import numpy as np\n'), ((4297, 4312), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (4307, 4312), True, 'import numpy as np\n'), ((5223, 5238), 'numpy.float32', 'np.float32', (['(0.2)'], {}), '(0.2)\n', (5233, 5238), True, 'import numpy as np\n'), ((5273, 5288), 'numpy.float32', 'np.float32', (['(0.4)'], {}), '(0.4)\n', (5283, 5288), True, 'import numpy as np\n'), ((6157, 6172), 'numpy.float32', 'np.float32', (['(0.4)'], {}), '(0.4)\n', (6167, 6172), True, 'import numpy as np\n'), ((7346, 7381), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (7357, 7381), True, 'import tensorflow.compat.v2 as tf\n'), ((7627, 7642), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (7637, 7642), True, 'import numpy as np\n'), ((7900, 7945), 'tensorflow.compat.v2.constant', 'tf.constant', (["['ONE', 'ZERO']"], {'dtype': 'tf.string'}), "(['ONE', 'ZERO'], dtype=tf.string)\n", (7911, 7945), True, 'import tensorflow.compat.v2 as tf\n'), ((8191, 8206), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (8201, 8206), True, 'import numpy as np\n'), ((8460, 8495), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (8471, 8495), True, 'import tensorflow.compat.v2 as tf\n'), ((8741, 8756), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (8751, 8756), True, 'import numpy as np\n'), ((9009, 9044), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (9020, 9044), True, 'import tensorflow.compat.v2 as tf\n'), ((9289, 9304), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (9299, 9304), True, 'import numpy as np\n'), ((9560, 9605), 'tensorflow.compat.v2.constant', 'tf.constant', (["['ONE', 'ZERO']"], {'dtype': 'tf.string'}), "(['ONE', 'ZERO'], dtype=tf.string)\n", (9571, 9605), True, 'import tensorflow.compat.v2 as tf\n'), ((9850, 9865), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (9860, 9865), True, 'import numpy as np\n'), ((10117, 10152), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (10128, 10152), True, 'import tensorflow.compat.v2 as tf\n'), ((10397, 10412), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (10407, 10412), True, 'import numpy as np\n'), ((12464, 12499), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (12475, 12499), True, 'import tensorflow.compat.v2 as tf\n'), ((13092, 13130), 'tensorflow.compat.v2.constant', 'tf.constant', (['[2, 1, 0]'], {'dtype': 'tf.int64'}), '([2, 1, 0], dtype=tf.int64)\n', (13103, 13130), True, 'import tensorflow.compat.v2 as tf\n'), ((14545, 14580), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (14556, 14580), True, 'import tensorflow.compat.v2 as tf\n'), ((15171, 15209), 'tensorflow.compat.v2.constant', 'tf.constant', (['[2, 1, 0]'], {'dtype': 'tf.int64'}), '([2, 1, 0], dtype=tf.int64)\n', (15182, 15209), True, 'import tensorflow.compat.v2 as tf\n'), ((15749, 15794), 'tensorflow.compat.v2.constant', 'tf.constant', (["['ONE', 'ZERO']"], {'dtype': 'tf.string'}), "(['ONE', 'ZERO'], dtype=tf.string)\n", (15760, 15794), True, 'import tensorflow.compat.v2 as tf\n'), ((17748, 17778), 'tensorflow.compat.v2.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), '(1, dtype=tf.int32)\n', (17759, 17778), True, 'import tensorflow.compat.v2 as tf\n'), ((17951, 17981), 'tensorflow.compat.v2.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.int32'}), '(2, dtype=tf.int32)\n', (17962, 17981), True, 'import tensorflow.compat.v2 as tf\n'), ((6405, 6425), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (6423, 6425), True, 'import tensorflow.compat.v2 as tf\n'), ((6636, 6679), 'tensorflow.compat.v2.compat.v1.initializers.local_variables', 'tf.compat.v1.initializers.local_variables', ([], {}), '()\n', (6677, 6679), True, 'import tensorflow.compat.v2 as tf\n'), ((6698, 6744), 'tensorflow.compat.v2.compat.v1.initializers.tables_initializer', 'tf.compat.v1.initializers.tables_initializer', ([], {}), '()\n', (6742, 6744), True, 'import tensorflow.compat.v2 as tf\n'), ((1357, 1401), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 0, 0, 0, 0], dtype=tf.int64)\n', (1368, 1401), True, 'import tensorflow.compat.v2 as tf\n'), ((1834, 1878), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (1845, 1878), True, 'import tensorflow.compat.v2 as tf\n'), ((2313, 2357), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (2324, 2357), True, 'import tensorflow.compat.v2 as tf\n'), ((2616, 2660), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (2627, 2660), True, 'import tensorflow.compat.v2 as tf\n'), ((2690, 2734), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (2701, 2734), True, 'import tensorflow.compat.v2 as tf\n'), ((2860, 2904), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 0, 0, 0, 0], dtype=tf.int64)\n', (2871, 2904), True, 'import tensorflow.compat.v2 as tf\n'), ((2968, 3012), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 0, 0, 0, 0], dtype=tf.int64)\n', (2979, 3012), True, 'import tensorflow.compat.v2 as tf\n'), ((3076, 3120), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 0, 0], dtype=tf.int64)\n', (3087, 3120), True, 'import tensorflow.compat.v2 as tf\n'), ((3611, 3666), 'tensorflow.compat.v2.constant', 'tf.constant', (["['A', 'B', 'C', 'D', 'E']"], {'dtype': 'tf.string'}), "(['A', 'B', 'C', 'D', 'E'], dtype=tf.string)\n", (3622, 3666), True, 'import tensorflow.compat.v2 as tf\n'), ((3718, 3773), 'tensorflow.compat.v2.constant', 'tf.constant', (["['F', 'G', 'H', 'I', 'J']"], {'dtype': 'tf.string'}), "(['F', 'G', 'H', 'I', 'J'], dtype=tf.string)\n", (3729, 3773), True, 'import tensorflow.compat.v2 as tf\n'), ((3899, 3943), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (3910, 3943), True, 'import tensorflow.compat.v2 as tf\n'), ((4007, 4051), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (4018, 4051), True, 'import tensorflow.compat.v2 as tf\n'), ((4115, 4159), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (4126, 4159), True, 'import tensorflow.compat.v2 as tf\n'), ((4598, 4653), 'tensorflow.compat.v2.constant', 'tf.constant', (["['A', 'B', 'C', 'D', 'E']"], {'dtype': 'tf.string'}), "(['A', 'B', 'C', 'D', 'E'], dtype=tf.string)\n", (4609, 4653), True, 'import tensorflow.compat.v2 as tf\n'), ((4705, 4749), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 1, 0, 0, 0], dtype=tf.int64)\n', (4716, 4749), True, 'import tensorflow.compat.v2 as tf\n'), ((4875, 4919), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (4886, 4919), True, 'import tensorflow.compat.v2 as tf\n'), ((4983, 5027), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (4994, 5027), True, 'import tensorflow.compat.v2 as tf\n'), ((5091, 5135), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (5102, 5135), True, 'import tensorflow.compat.v2 as tf\n'), ((5532, 5587), 'tensorflow.compat.v2.constant', 'tf.constant', (["['A', 'B', 'C', 'D', 'E']"], {'dtype': 'tf.string'}), "(['A', 'B', 'C', 'D', 'E'], dtype=tf.string)\n", (5543, 5587), True, 'import tensorflow.compat.v2 as tf\n'), ((5639, 5683), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([1, 1, 0, 0, 0], dtype=tf.int64)\n', (5650, 5683), True, 'import tensorflow.compat.v2 as tf\n'), ((5809, 5853), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (5820, 5853), True, 'import tensorflow.compat.v2 as tf\n'), ((5917, 5961), 'tensorflow.compat.v2.constant', 'tf.constant', (['[0, 0, 0, 0, 0]'], {'dtype': 'tf.int64'}), '([0, 0, 0, 0, 0], dtype=tf.int64)\n', (5928, 5961), True, 'import tensorflow.compat.v2 as tf\n'), ((6025, 6069), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 1, 1, 1, 1]'], {'dtype': 'tf.int64'}), '([1, 1, 1, 1, 1], dtype=tf.int64)\n', (6036, 6069), True, 'import tensorflow.compat.v2 as tf\n'), ((10642, 10662), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (10660, 10662), True, 'import tensorflow.compat.v2 as tf\n'), ((10856, 10899), 'tensorflow.compat.v2.compat.v1.initializers.local_variables', 'tf.compat.v1.initializers.local_variables', ([], {}), '()\n', (10897, 10899), True, 'import tensorflow.compat.v2 as tf\n'), ((10918, 10964), 'tensorflow.compat.v2.compat.v1.initializers.tables_initializer', 'tf.compat.v1.initializers.tables_initializer', ([], {}), '()\n', (10962, 10964), True, 'import tensorflow.compat.v2 as tf\n'), ((7492, 7547), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (7503, 7547), True, 'import tensorflow.compat.v2 as tf\n'), ((8056, 8111), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (8067, 8111), True, 'import tensorflow.compat.v2 as tf\n'), ((8606, 8661), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.5, 0.5], [0.5, 0.5]]'], {'dtype': 'tf.float32'}), '([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32)\n', (8617, 8661), True, 'import tensorflow.compat.v2 as tf\n'), ((9155, 9210), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (9166, 9210), True, 'import tensorflow.compat.v2 as tf\n'), ((9716, 9771), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (9727, 9771), True, 'import tensorflow.compat.v2 as tf\n'), ((10263, 10318), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.5, 0.5], [0.5, 0.5]]'], {'dtype': 'tf.float32'}), '([[0.5, 0.5], [0.5, 0.5]], dtype=tf.float32)\n', (10274, 10318), True, 'import tensorflow.compat.v2 as tf\n'), ((11604, 11639), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (11615, 11639), True, 'import tensorflow.compat.v2 as tf\n'), ((11669, 11704), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (11680, 11704), True, 'import tensorflow.compat.v2 as tf\n'), ((11832, 11887), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (11843, 11887), True, 'import tensorflow.compat.v2 as tf\n'), ((11953, 12008), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (11964, 12008), True, 'import tensorflow.compat.v2 as tf\n'), ((12074, 12129), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (12085, 12129), True, 'import tensorflow.compat.v2 as tf\n'), ((12610, 12698), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]]], dtype=tf.\n float32)\n', (12621, 12698), True, 'import tensorflow.compat.v2 as tf\n'), ((13241, 13306), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], dtype=tf.float32)\n', (13252, 13306), True, 'import tensorflow.compat.v2 as tf\n'), ((13687, 13722), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (13698, 13722), True, 'import tensorflow.compat.v2 as tf\n'), ((13752, 13787), 'tensorflow.compat.v2.constant', 'tf.constant', (['[1, 0]'], {'dtype': 'tf.int64'}), '([1, 0], dtype=tf.int64)\n', (13763, 13787), True, 'import tensorflow.compat.v2 as tf\n'), ((13915, 13970), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (13926, 13970), True, 'import tensorflow.compat.v2 as tf\n'), ((14036, 14091), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (14047, 14091), True, 'import tensorflow.compat.v2 as tf\n'), ((14157, 14212), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0], [1.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0], [1.0, 0.0]], dtype=tf.float32)\n', (14168, 14212), True, 'import tensorflow.compat.v2 as tf\n'), ((14691, 14779), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]]]'], {'dtype': 'tf.float32'}), '([[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]]], dtype=tf.\n float32)\n', (14702, 14779), True, 'import tensorflow.compat.v2 as tf\n'), ((15320, 15385), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]'], {'dtype': 'tf.float32'}), '([[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], dtype=tf.float32)\n', (15331, 15385), True, 'import tensorflow.compat.v2 as tf\n'), ((15905, 15960), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[1.0, 0.0], [0.0, 1.0]]'], {'dtype': 'tf.float32'}), '([[1.0, 0.0], [0.0, 1.0]], dtype=tf.float32)\n', (15916, 15960), True, 'import tensorflow.compat.v2 as tf\n'), ((16464, 16484), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (16482, 16484), True, 'import tensorflow.compat.v2 as tf\n'), ((17026, 17046), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (17044, 17046), True, 'import tensorflow.compat.v2 as tf\n'), ((17577, 17597), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (17595, 17597), True, 'import tensorflow.compat.v2 as tf\n')] |
import math
import numpy as np
from .traj import Trajectory
# UED cross sections (computed by ELSEPA for a 3.7 MeV e- beam with default settings)
_ued_cross_sections = {
1: 3.92943e-04,
2: 5.96348e-04,
3: 3.89833e-03,
4: 6.17327e-03,
5: 7.76737e-03,
6: 8.74560e-03,
7: 9.42320e-03,
8: 9.92602e-03,
9: 1.03156e-02,
10: 1.06265e-02,
11: 1.69220e-02,
12: 2.20789e-02,
13: 2.80195e-02,
14: 3.22411e-02,
15: 3.54220e-02,
16: 3.79121e-02,
17: 3.99156e-02,
18: 4.15608e-02,
19: 5.48441e-02,
20: 6.56685e-02,
21: 6.76687e-02,
22: 6.88909e-02,
23: 6.97234e-02,
24: 6.49267e-02,
25: 7.07457e-02,
26: 7.10577e-02,
27: 7.12812e-02,
28: 7.14359e-02,
29: 6.67471e-02,
30: 7.15914e-02,
31: 7.91437e-02,
32: 8.51549e-02,
33: 9.02497e-02,
34: 9.46627e-02,
35: 9.85306e-02,
36: 1.01948e-01,
37: 1.20694e-01,
38: 1.36372e-01,
39: 1.42990e-01,
40: 1.47529e-01,
41: 1.44643e-01,
42: 1.47227e-01,
43: 1.49397e-01,
44: 1.51232e-01,
45: 1.52791e-01,
46: 1.47081e-01,
47: 1.55245e-01,
48: 1.63144e-01,
49: 1.74926e-01,
50: 1.84575e-01,
51: 1.92955e-01,
52: 2.00383e-01,
53: 2.07039e-01,
54: 2.13039e-01,
55: 2.40272e-01,
56: 2.62970e-01,
57: 2.73268e-01,
58: 2.64265e-01,
59: 2.64055e-01,
60: 2.63588e-01,
61: 2.62944e-01,
62: 2.62170e-01,
63: 2.61295e-01,
64: 2.68502e-01,
65: 2.59327e-01,
66: 2.58262e-01,
67: 2.57156e-01,
68: 2.56016e-01,
69: 2.54849e-01,
70: 2.53659e-01,
71: 2.60687e-01,
72: 2.65547e-01,
73: 2.69569e-01,
74: 2.73027e-01,
75: 2.76042e-01,
76: 2.78691e-01,
77: 2.81022e-01,
78: 2.76923e-01,
79: 2.78661e-01,
80: 2.86460e-01,
81: 3.00666e-01,
82: 3.12359e-01,
83: 3.22665e-01,
84: 3.31940e-01,
85: 3.40371e-01,
86: 3.48076e-01,
87: 3.78187e-01,
88: 4.03532e-01,
89: 4.18951e-01,
90: 4.30842e-01,
91: 4.24330e-01,
92: 4.25599e-01,
93: 4.26351e-01,
94: 4.17340e-01,
}
def compute_ued_simple(
traj: Trajectory,
key: str,
R: np.ndarray,
alpha: float,
ABpairs=None,
) -> Trajectory:
"""Compute the simple pairwise-distance form of the UED cross section,
with Gaussian blurring in R.
Params:
traj: the Trajectory object to compute the property for (modified in
place)
key: the name of the property
R: the distances to collocate the
UED cross section to.
alpha: the Guassian blurring exponent
ABpairs: a restricted list of atom pair indices
to include in the computation, or None for all atom pairs.
Return:
traj: reference to the input Trajectory object. The property
key is set to computed UED property.
"""
for frame in traj.frames:
N = frame.N
xyz = frame.xyz
# Which pair indices?
if ABpairs is None:
ABpairs2 = []
for A in range(len(N)):
for B in range(A):
ABpairs2.append((A, B))
else:
ABpairs2 = ABpairs
# Compute UED cross section
V = np.zeros_like(R)
for A, B in ABpairs2:
rAB = xyz[A, :] - xyz[B, :]
RAB = math.sqrt(sum(rAB ** 2))
SAB = math.sqrt(_ued_cross_sections[N[A]] * _ued_cross_sections[N[B]]) / RAB
V += SAB * math.sqrt(alpha / math.pi) * np.exp(-alpha * (R - RAB) ** 2)
frame.properties[key] = V
return traj
| [
"numpy.zeros_like",
"numpy.exp",
"math.sqrt"
] | [((3287, 3303), 'numpy.zeros_like', 'np.zeros_like', (['R'], {}), '(R)\n', (3300, 3303), True, 'import numpy as np\n'), ((3435, 3499), 'math.sqrt', 'math.sqrt', (['(_ued_cross_sections[N[A]] * _ued_cross_sections[N[B]])'], {}), '(_ued_cross_sections[N[A]] * _ued_cross_sections[N[B]])\n', (3444, 3499), False, 'import math\n'), ((3558, 3589), 'numpy.exp', 'np.exp', (['(-alpha * (R - RAB) ** 2)'], {}), '(-alpha * (R - RAB) ** 2)\n', (3564, 3589), True, 'import numpy as np\n'), ((3529, 3555), 'math.sqrt', 'math.sqrt', (['(alpha / math.pi)'], {}), '(alpha / math.pi)\n', (3538, 3555), False, 'import math\n')] |
"""
Copyright (c) 2018-2021, <NAME>. All rights reserved.
Licensed under BSD-3 Clause, https://opensource.org/licenses/BSD-3-Clause
"""
# Import the libSIA python bindings and numpy
import pysia as sia
import numpy as np
import argparse
# Import plotting helpers
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation, rc
from mpl_toolkits.mplot3d import Axes3D
# Colormap for particles
cmap = cm.plasma
def create_dynamics(q: float, dt: float) -> sia.NonlinearGaussianDynamicsCT:
"""Creates the system dynamics model"""
# Lorenz attractor chaotic parameters
rho = 28
sig = 10
bet = 8 / 3
# Lorenz attractor dynamics equation. For NonlinearGaussian systems, we
# pass a lambda function to the constructor
f = lambda x, u: np.array([
sig * (x[1] - x[0]),
x[0] * (rho - x[2]) - x[1],
x[0] * x[1] - bet * x[2],
])
# Suppose that noise is added to all 3 channels
Q = q * np.identity(3)
# Create the system
return sia.NonlinearGaussianDynamicsCT(f, Q, dt)
def create_measurement(r: float, dt: float) -> sia.NonlinearGaussianMeasurementCT:
"""Creates the system measurement model"""
# Suppose we measure a linear combination of states in the measurement
# equation. For NonlinearGaussian systems, we pass a lambda function to
# the constructor
h = lambda x: np.array([
x[0] - x[1],
x[2],
])
R = r * np.identity(2)
# Create the system
return sia.NonlinearGaussianMeasurementCT(h, R, dt)
def create_estimators(dynamics: sia.NonlinearGaussianDynamicsCT,
measurement: sia.NonlinearGaussianMeasurementCT,
num_particles: int,
resample_threshold: float,
roughening_factor: float,
buffer_size: int):
"""Creates the estimators"""
# Initialize a gaussian belief
gaussian = sia.Gaussian(mean=np.array([0, 0, 20]),
covariance=1e3 * np.identity(3))
# Initialize the extended kalman filter
ekf = sia.EKF(dynamics=dynamics, measurement=measurement, state=gaussian)
# Initialize a particle belief
particles = sia.Particles.uniform(lower=np.array([-30, -30, -10]),
upper=np.array([30, 30, 50]),
num_particles=num_particles,
weighted_stats=True)
# Initialize the particle filter
pf = sia.PF(dynamics=dynamics,
measurement=measurement,
particles=particles,
resample_threshold=resample_threshold,
roughening_factor=roughening_factor)
# Initial true state
x = np.array([-10, 5, 20])
state = sia.Gaussian(3)
state.setMean(x)
# Initialize the runner
runner = sia.Runner({"ekf": ekf, "pf": pf}, buffer_size=buffer_size)
return (runner, state, ekf, pf)
def create_animate_3d_sim(dynamics, measurement, runner, state, ekf, pf, num_steps, dpi):
"""Creates an animation function for the 3d sim plot"""
# Set up the figure, the axis, and the plot element we want to animate
particles = pf.getBelief()
xp = particles.values()
wp = particles.weights()
x = state.mean()
fig = plt.figure(figsize=(1280 / dpi, 720 / dpi), dpi=dpi)
fig.patch.set_facecolor('black')
ax0 = fig.add_subplot(111, projection='3d')
ax0.patch.set_facecolor('black')
ax0.set_xlim([-30, 30])
ax0.set_ylim([-30, 30])
ax0.set_zlim([0, 50])
cs = 100 * wp / max(wp)
scatter = ax0.scatter(xp[0, :],
xp[1, :],
xp[2, :],
color=cmap(cs.astype(int)),
marker='.',
s=25,
alpha=1,
linewidths=None)
line, = ax0.plot(x[0], x[1], x[2], '-w')
point, = ax0.plot(x[0], x[1], x[2], '.w', ms=25)
plt.tight_layout()
plt.axis('off')
# Render the animation
return animation.FuncAnimation(fig,
step_animate_3d_sim,
fargs=(dynamics, measurement, runner, state, ekf, pf,
scatter, line, point, num_steps),
frames=num_steps,
interval=20,
blit=False)
def step_animate_3d_sim(i, dynamics, measurement, runner, state, ekf, pf, scatter, line,
point, num_steps):
"""Animation function for 3d sim. This is called sequentially."""
recorder = runner.recorder()
particles = pf.getBelief()
if i > 0:
# There is not forcing term to the system so we just assign zeros
u = np.zeros(3)
x = state.mean()
# This steps the system state, takes a measurement and steps each estimator
state.setMean(runner.stepAndEstimate(dynamics, measurement, x, u))
# Update state trajectory plot
line.set_data(recorder.getStates()[:2, :])
line.set_3d_properties(recorder.getStates()[2, :])
# Update the state point
point.set_data(recorder.getStates()[:2, -1])
point.set_3d_properties(recorder.getStates()[2, -1])
# Update particle plot
xp = particles.values()
wp = particles.weights()
scatter._offsets3d = (xp[0, :], xp[1, :], xp[2, :])
cs = 100 * wp / max(wp)
scatter._facecolor3d = cmap(cs.astype(int))
scatter._edgecolor3d = cmap(cs.astype(int))
def plot_estimates(dynamics, measurement, runner, state, ekf, pf, num_steps, dpi):
"""Plots estimates of the particle filter and ekf"""
# Set up the figure, the axis, and the plot element we want to animate
recorder = runner.recorder()
state = recorder.getStates()
pf_mean = recorder.getEstimateMeans("pf")
pf_mode = recorder.getEstimateModes("pf")
pf_var = recorder.getEstimateVariances("pf")
ekf_mean = recorder.getEstimateMeans("ekf")
ekf_var = recorder.getEstimateVariances("ekf")
fig = plt.figure(figsize=(1280 / dpi, 720 / dpi), dpi=dpi)
t = np.linspace(1, num_steps, num_steps)
ax = [0] * 3
for i in range(3):
ax[i] = fig.add_subplot(3, 1, (i + 1))
plt.sca(ax[i])
ax[i].fill_between(t,
pf_mean[i, :] - 3 * np.sqrt(pf_var[i, :]),
pf_mean[i, :] + 3 * np.sqrt(pf_var[i, :]),
alpha=0.2,
label="PF")
ax[i].plot(t, pf_mode[i, :], lw=1)
ax[i].fill_between(t,
ekf_mean[i, :] - 3 * np.sqrt(ekf_var[i, :]),
ekf_mean[i, :] + 3 * np.sqrt(ekf_var[i, :]),
alpha=0.2,
label="EKF")
ax[i].plot(t, ekf_mean[i, :], lw=1)
ax[i].plot(t, state[i, :], "-k", label="Truth")
if i == 2:
ax[i].legend(frameon=False, loc='lower center', ncol=3)
plt.ylabel("State " + str(i))
plt.axis("on")
plt.box(on=None)
ax[i].get_xaxis().set_visible(False)
ax[i].get_yaxis().set_visible(True)
ax[i].set_yticks([])
def main(num_steps: int, dt: float, process_noise: float, measurement_noise: float,
num_particles: int, resample_threshold: float,
roughening_factor: float, video_name: str, dpi: int,
show_plots: bool):
""""Run estimators on a Lorenz attractor estimation problem"""
# Create the system
dynamics = create_dynamics(process_noise, dt)
measurement = create_measurement(measurement_noise, dt)
# Setup the estimators
runner, state, ekf, pf = create_estimators(dynamics, measurement, num_particles,
resample_threshold,
roughening_factor, num_steps)
# Create the animation function
anim = create_animate_3d_sim(dynamics, measurement, runner, state, ekf, pf, num_steps,
dpi)
# Render and save the animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=30,
metadata=dict(title='Particle filter Lorenz attractor',
artist='<NAME>'),
bitrate=5000,
extra_args=['-vcodec', 'libx264'])
anim.save(video_name, writer=writer, dpi=dpi)
# Plot the estimates
plot_estimates(dynamics, measurement, runner, state, ekf, pf, num_steps, dpi)
# Show the animation
if show_plots:
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run estimators on a Lorenz attractor estimation problem")
parser.add_argument('--num_steps',
action="store",
dest="num_steps",
default=300,
type=int,
help="Number of time steps to animate")
parser.add_argument('--dt',
action="store",
dest="dt",
default=0.01,
type=float,
help="Time step (s)")
parser.add_argument('--process_noise',
action="store",
dest="process_noise",
default=1E-1,
type=float,
help="Process noise variance")
parser.add_argument('--measurement_noise',
action="store",
dest="measurement_noise",
default=1E1,
type=float,
help="Process noise variance")
parser.add_argument('--num_particles',
action="store",
dest="num_particles",
default=1000,
type=int,
help="Number of particles to initialize")
parser.add_argument('--resample_threshold',
action="store",
dest="resample_threshold",
default=0.4,
type=float,
help="Threshold [0, 1] to resample particles")
parser.add_argument('--roughening_factor',
action="store",
dest="roughening_factor",
default=2E-3,
type=float,
help="Magnitude of roughening [0, \infty]")
parser.add_argument('--video_name',
action="store",
dest="video_name",
default="lorenz-animated.mp4",
type=str,
help="Name of the rendered video file")
parser.add_argument('--dpi',
action="store",
dest="dpi",
default=150,
type=int,
help="Resolution of the images to render")
parser.add_argument('--show_plots',
action="store",
dest="show_plots",
default=True,
type=bool,
help="Show and animate plots")
args = parser.parse_args()
main(num_steps=args.num_steps,
dt=args.dt,
process_noise=args.process_noise,
measurement_noise=args.measurement_noise,
num_particles=args.num_particles,
resample_threshold=args.resample_threshold,
roughening_factor=args.roughening_factor,
video_name=args.video_name,
dpi=args.dpi,
show_plots=args.show_plots)
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.box",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pysia.NonlinearGaussianDynamicsCT",
"numpy.identity",
"numpy.linspace",
"pysia.Runner",
"matplotlib.pyplot.show",
"pysia.EKF",
"pysia.PF",
... | [((1032, 1073), 'pysia.NonlinearGaussianDynamicsCT', 'sia.NonlinearGaussianDynamicsCT', (['f', 'Q', 'dt'], {}), '(f, Q, dt)\n', (1063, 1073), True, 'import pysia as sia\n'), ((1514, 1558), 'pysia.NonlinearGaussianMeasurementCT', 'sia.NonlinearGaussianMeasurementCT', (['h', 'R', 'dt'], {}), '(h, R, dt)\n', (1548, 1558), True, 'import pysia as sia\n'), ((2116, 2183), 'pysia.EKF', 'sia.EKF', ([], {'dynamics': 'dynamics', 'measurement': 'measurement', 'state': 'gaussian'}), '(dynamics=dynamics, measurement=measurement, state=gaussian)\n', (2123, 2183), True, 'import pysia as sia\n'), ((2532, 2683), 'pysia.PF', 'sia.PF', ([], {'dynamics': 'dynamics', 'measurement': 'measurement', 'particles': 'particles', 'resample_threshold': 'resample_threshold', 'roughening_factor': 'roughening_factor'}), '(dynamics=dynamics, measurement=measurement, particles=particles,\n resample_threshold=resample_threshold, roughening_factor=roughening_factor)\n', (2538, 2683), True, 'import pysia as sia\n'), ((2778, 2800), 'numpy.array', 'np.array', (['[-10, 5, 20]'], {}), '([-10, 5, 20])\n', (2786, 2800), True, 'import numpy as np\n'), ((2813, 2828), 'pysia.Gaussian', 'sia.Gaussian', (['(3)'], {}), '(3)\n', (2825, 2828), True, 'import pysia as sia\n'), ((2892, 2951), 'pysia.Runner', 'sia.Runner', (["{'ekf': ekf, 'pf': pf}"], {'buffer_size': 'buffer_size'}), "({'ekf': ekf, 'pf': pf}, buffer_size=buffer_size)\n", (2902, 2951), True, 'import pysia as sia\n'), ((3336, 3388), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1280 / dpi, 720 / dpi)', 'dpi': 'dpi'}), '(figsize=(1280 / dpi, 720 / dpi), dpi=dpi)\n', (3346, 3388), True, 'import matplotlib.pyplot as plt\n'), ((4034, 4052), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4050, 4052), True, 'import matplotlib.pyplot as plt\n'), ((4057, 4072), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4065, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4112, 4300), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'step_animate_3d_sim'], {'fargs': '(dynamics, measurement, runner, state, ekf, pf, scatter, line, point, num_steps\n )', 'frames': 'num_steps', 'interval': '(20)', 'blit': '(False)'}), '(fig, step_animate_3d_sim, fargs=(dynamics,\n measurement, runner, state, ekf, pf, scatter, line, point, num_steps),\n frames=num_steps, interval=20, blit=False)\n', (4135, 4300), False, 'from matplotlib import animation, rc\n'), ((6173, 6225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1280 / dpi, 720 / dpi)', 'dpi': 'dpi'}), '(figsize=(1280 / dpi, 720 / dpi), dpi=dpi)\n', (6183, 6225), True, 'import matplotlib.pyplot as plt\n'), ((6234, 6270), 'numpy.linspace', 'np.linspace', (['(1)', 'num_steps', 'num_steps'], {}), '(1, num_steps, num_steps)\n', (6245, 6270), True, 'import numpy as np\n'), ((8759, 8858), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run estimators on a Lorenz attractor estimation problem"""'}), "(description=\n 'Run estimators on a Lorenz attractor estimation problem')\n", (8782, 8858), False, 'import argparse\n'), ((799, 889), 'numpy.array', 'np.array', (['[sig * (x[1] - x[0]), x[0] * (rho - x[2]) - x[1], x[0] * x[1] - bet * x[2]]'], {}), '([sig * (x[1] - x[0]), x[0] * (rho - x[2]) - x[1], x[0] * x[1] - \n bet * x[2]])\n', (807, 889), True, 'import numpy as np\n'), ((981, 995), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (992, 995), True, 'import numpy as np\n'), ((1398, 1427), 'numpy.array', 'np.array', (['[x[0] - x[1], x[2]]'], {}), '([x[0] - x[1], x[2]])\n', (1406, 1427), True, 'import numpy as np\n'), ((1463, 1477), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (1474, 1477), True, 'import numpy as np\n'), ((4879, 4890), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4887, 4890), True, 'import numpy as np\n'), ((6366, 6380), 'matplotlib.pyplot.sca', 'plt.sca', (['ax[i]'], {}), '(ax[i])\n', (6373, 6380), True, 'import matplotlib.pyplot as plt\n'), ((7156, 7170), 'matplotlib.pyplot.axis', 'plt.axis', (['"""on"""'], {}), "('on')\n", (7164, 7170), True, 'import matplotlib.pyplot as plt\n'), ((7179, 7195), 'matplotlib.pyplot.box', 'plt.box', ([], {'on': 'None'}), '(on=None)\n', (7186, 7195), True, 'import matplotlib.pyplot as plt\n'), ((8706, 8716), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8714, 8716), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1998), 'numpy.array', 'np.array', (['[0, 0, 20]'], {}), '([0, 0, 20])\n', (1986, 1998), True, 'import numpy as np\n'), ((2264, 2289), 'numpy.array', 'np.array', (['[-30, -30, -10]'], {}), '([-30, -30, -10])\n', (2272, 2289), True, 'import numpy as np\n'), ((2335, 2357), 'numpy.array', 'np.array', (['[30, 30, 50]'], {}), '([30, 30, 50])\n', (2343, 2357), True, 'import numpy as np\n'), ((2045, 2059), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (2056, 2059), True, 'import numpy as np\n'), ((6458, 6479), 'numpy.sqrt', 'np.sqrt', (['pf_var[i, :]'], {}), '(pf_var[i, :])\n', (6465, 6479), True, 'import numpy as np\n'), ((6528, 6549), 'numpy.sqrt', 'np.sqrt', (['pf_var[i, :]'], {}), '(pf_var[i, :])\n', (6535, 6549), True, 'import numpy as np\n'), ((6749, 6771), 'numpy.sqrt', 'np.sqrt', (['ekf_var[i, :]'], {}), '(ekf_var[i, :])\n', (6756, 6771), True, 'import numpy as np\n'), ((6821, 6843), 'numpy.sqrt', 'np.sqrt', (['ekf_var[i, :]'], {}), '(ekf_var[i, :])\n', (6828, 6843), True, 'import numpy as np\n')] |
import os
import pandas as pd
import numpy as np
import torch
import torch.utils.data as data
from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine
from torchvision import transforms
INPUT_DIM = 224
MAX_PIXEL_VAL = 255
MEAN = 58.09
STDDEV = 49.73
class MRData():
"""This class used to load MRnet dataset from `./images` dir
"""
def __init__(self,task = 'acl', train = True, transform = None, weights = None):
"""Initialize the dataset
Args:
plane : along which plane to load the data
task : for which task to load the labels
train : whether to load the train or val data
transform : which transforms to apply
weights (Tensor) : Give wieghted loss to postive class eg. `weights=torch.tensor([2.223])`
"""
self.planes=['axial', 'coronal', 'sagittal']
self.diseases = ['abnormal','acl','meniscus']
self.records = {'abnormal' : None, 'acl' : None, 'meniscus' : None}
# an empty dictionary
self.image_path={}
if train:
for disease in self.diseases:
self.records[disease] = pd.read_csv('./images/train-{}.csv'.format(disease),header=None, names=['id', 'label'])
'''
self.image_path[<plane>]= dictionary {<plane>: path to folder containing
image for that plane}
'''
for plane in self.planes:
self.image_path[plane] = './images/train/{}/'.format(plane)
else:
for disease in self.diseases:
self.records[disease] = pd.read_csv('./images/valid-{}.csv'.format(disease),header=None, names=['id', 'label'])
'''
self.image_path[<plane>]= dictionary {<plane>: path to folder containing
image for that plane}
'''
for plane in self.planes:
self.image_path[plane] = './images/valid/{}/'.format(plane)
self.transform = transform
for disease in self.diseases:
self.records[disease]['id'] = self.records[disease]['id'].map(
lambda i: '0' * (4 - len(str(i))) + str(i))
# empty dictionary
self.paths={}
for plane in self.planes:
self.paths[plane] = [self.image_path[plane] + filename +
'.npy' for filename in self.records['acl']['id'].tolist()]
self.labels = {'abnormal' : None, 'acl' : None, 'meniscus' : None}
for disease in self.diseases:
self.labels[disease] = self.records[disease]['label'].tolist()
weights_ = []
for disease in self.diseases:
pos = sum(self.labels[disease])
neg = len(self.labels[disease]) - pos
weights_.append(neg/pos)
# Find the wieghts of pos and neg classes
if weights:
self.weights = torch.FloatTensor(weights)
else:
self.weights = torch.FloatTensor(weights_)
print('Weights for loss is : ', self.weights)
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.records['acl'])
def __getitem__(self, index):
"""
Returns `(images,labels)` pair
where image is a list [imgsPlane1,imgsPlane2,imgsPlane3]
and labels is a list [gt,gt,gt]
"""
img_raw = {}
for plane in self.planes:
img_raw[plane] = np.load(self.paths[plane][index])
img_raw[plane] = self._resize_image(img_raw[plane])
label = []
for disease in self.diseases:
label.append(self.labels[disease][index])
label = torch.FloatTensor(label)
return [img_raw[plane] for plane in self.planes], label
def _resize_image(self, image):
"""Resize the image to `(3,224,224)` and apply
transforms if possible.
"""
# Resize the image
pad = int((image.shape[2] - INPUT_DIM)/2)
image = image[:,pad:-pad,pad:-pad]
image = (image-np.min(image))/(np.max(image)-np.min(image))*MAX_PIXEL_VAL
image = (image - MEAN) / STDDEV
if self.transform:
image = self.transform(image)
else:
image = np.stack((image,)*3, axis=1)
image = torch.FloatTensor(image)
return image
def load_data(task : str):
# Define the Augmentation here only
augments = Compose([
transforms.Lambda(lambda x: torch.Tensor(x)),
RandomRotate(25),
RandomTranslate([0.11, 0.11]),
RandomFlip(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1, 1).permute(1, 0, 2, 3)),
])
print('Loading Train Dataset of {} task...'.format(task))
train_data = MRData(task, train=True, transform=augments)
train_loader = data.DataLoader(
train_data, batch_size=1, num_workers=4, shuffle=True
)
print('Loading Validation Dataset of {} task...'.format(task))
val_data = MRData(task, train=False)
val_loader = data.DataLoader(
val_data, batch_size=1, num_workers=4, shuffle=False
)
return train_loader, val_loader, train_data.weights, val_data.weights
| [
"numpy.stack",
"numpy.load",
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torchsample.transforms.RandomRotate",
"torchsample.transforms.RandomTranslate",
"torchsample.transforms.RandomFlip",
"numpy.min",
"numpy.max",
"torch.Tensor"
] | [((5026, 5096), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_data'], {'batch_size': '(1)', 'num_workers': '(4)', 'shuffle': '(True)'}), '(train_data, batch_size=1, num_workers=4, shuffle=True)\n', (5041, 5096), True, 'import torch.utils.data as data\n'), ((5237, 5306), 'torch.utils.data.DataLoader', 'data.DataLoader', (['val_data'], {'batch_size': '(1)', 'num_workers': '(4)', 'shuffle': '(False)'}), '(val_data, batch_size=1, num_workers=4, shuffle=False)\n', (5252, 5306), True, 'import torch.utils.data as data\n'), ((3888, 3912), 'torch.FloatTensor', 'torch.FloatTensor', (['label'], {}), '(label)\n', (3905, 3912), False, 'import torch\n'), ((4515, 4539), 'torch.FloatTensor', 'torch.FloatTensor', (['image'], {}), '(image)\n', (4532, 4539), False, 'import torch\n'), ((3066, 3092), 'torch.FloatTensor', 'torch.FloatTensor', (['weights'], {}), '(weights)\n', (3083, 3092), False, 'import torch\n'), ((3134, 3161), 'torch.FloatTensor', 'torch.FloatTensor', (['weights_'], {}), '(weights_)\n', (3151, 3161), False, 'import torch\n'), ((3649, 3682), 'numpy.load', 'np.load', (['self.paths[plane][index]'], {}), '(self.paths[plane][index])\n', (3656, 3682), True, 'import numpy as np\n'), ((4461, 4491), 'numpy.stack', 'np.stack', (['((image,) * 3)'], {'axis': '(1)'}), '((image,) * 3, axis=1)\n', (4469, 4491), True, 'import numpy as np\n'), ((4717, 4733), 'torchsample.transforms.RandomRotate', 'RandomRotate', (['(25)'], {}), '(25)\n', (4729, 4733), False, 'from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine\n'), ((4743, 4772), 'torchsample.transforms.RandomTranslate', 'RandomTranslate', (['[0.11, 0.11]'], {}), '([0.11, 0.11])\n', (4758, 4772), False, 'from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine\n'), ((4782, 4794), 'torchsample.transforms.RandomFlip', 'RandomFlip', ([], {}), '()\n', (4792, 4794), False, 'from torchsample.transforms import RandomRotate, RandomTranslate, RandomFlip, ToTensor, Compose, RandomAffine\n'), ((4258, 4271), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (4264, 4271), True, 'import numpy as np\n'), ((4274, 4287), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4280, 4287), True, 'import numpy as np\n'), ((4288, 4301), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (4294, 4301), True, 'import numpy as np\n'), ((4691, 4706), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (4703, 4706), False, 'import torch\n')] |
import pandas
import numpy as np
from keras.preprocessing import image
from keras.layers import Conv2D,Flatten, Dense, Dropout, MaxPool2D
from keras.optimizers import Adam
from keras.models import Sequential
from keras import regularizers
from keras.optimizers import Adam
import scipy.misc
import tensorflow as tf
import cv2
from subprocess import call
import os
import matplotlib.pyplot as plt
from keras.layers import Conv2D,Dense,MaxPool2D,Dropout,Flatten
from keras.models import Sequential
from skimage import io
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
import time
from timeit import default_timer as timer
# MODEL 2
model=Sequential()
model.add(Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1),data_format='channels_last'))
model.add(MaxPool2D((2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPool2D((2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
#model.add(Dense(512,activation='relu'))
#model.add(Dense(256,activation='relu'))
model.add(Dense(128,activation='relu'))
model.add(Dense(62,activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.load_weights('./weights/z-1-weights-0.8679.h5')
#img1 = cv2.imread('s1.png',0)
#rows,cols = img_str.shape
#img1 = image.img_to_array(img1) / 255.0
#img1 = io.imread("s1.png",as_gray=True) #image.load_img("s1.png", color_mode = 'rgb', target_size = [28, 28, 1])
#img1 = image.img_to_array(img1) / 255.0
#img1 = cv2.resize(img1, (28, 28)).flatten()
#img = np.array(img1)
#pred = cmodel.predict(img)
#print(pred)
mapp={}
a='abcdefghijklmnopqrstuvwxyz'
count=0
for x in range(10):
mapp[x]=count
count+=1
for y in a:
mapp[count]=y.upper()
count+=1
for y in a:
mapp[count]=y
count+=1
#he = []
#lab = []
test = []
ans = []
#for i in range(10115, 10121):
# test.append(io.imread("./Test/"+str(i)+".Bmp",as_gray=True))
# he.append(i)
# lab.append(str(i).split('.')[0])
vid = cv2.VideoCapture(0)
while(cv2.waitKey(1) != ord('q')): #and (start - end != 3)):
ret, frame = vid.read()
cv2.imshow('frame', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
fr = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
test.append(fr)
test_img = np.array([cv2.resize(image,(28,28)) for image in test])
test_img = test_img[:,:,:,np.newaxis]
#print(test_img.shape, "SHAPE")
predictions = model.predict(test_img)
predictions = np.argmax(predictions, axis=1)
# print(mapp.get([x for x in predictions]), " : PREDICTED")
for x in predictions:
ans.append(mapp.get(x))
print(mapp.get(x), " : PREDICTED")
# if len(ans) < len(he):
# qq = len(ans)
# else:
# qq = len(he)
#for k in range(len(ans)):
# print(ans[k], " PREDICTED")
print("ENDING")
coun = 0
for cc in ans:
if(cc == "k" or cc == 'K'):
coun += 1
print("Correct Ratio: ", coun, " : ", len(ans))
cv2.destroyAllWindows()
| [
"numpy.argmax",
"cv2.waitKey",
"keras.layers.Dropout",
"cv2.cvtColor",
"keras.layers.MaxPool2D",
"keras.layers.Flatten",
"cv2.imshow",
"cv2.VideoCapture",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((734, 746), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (744, 746), False, 'from keras.models import Sequential\n'), ((2234, 2253), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2250, 2253), False, 'import cv2\n'), ((3214, 3237), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3235, 3237), False, 'import cv2\n'), ((758, 854), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(28, 28, 1)', 'data_format': '"""channels_last"""'}), "(32, (3, 3), activation='relu', input_shape=(28, 28, 1), data_format=\n 'channels_last')\n", (764, 854), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((854, 871), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (863, 871), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((882, 894), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (889, 894), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((907, 944), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (913, 944), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((953, 970), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (962, 970), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((981, 993), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (988, 993), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1006, 1044), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (1012, 1044), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1053, 1070), 'keras.layers.MaxPool2D', 'MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (1062, 1070), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1081, 1093), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1088, 1093), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1106, 1115), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1113, 1115), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1212, 1241), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1217, 1241), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((1253, 1284), 'keras.layers.Dense', 'Dense', (['(62)'], {'activation': '"""softmax"""'}), "(62, activation='softmax')\n", (1258, 1284), False, 'from keras.layers import Conv2D, Dense, MaxPool2D, Dropout, Flatten\n'), ((2261, 2275), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2272, 2275), False, 'import cv2\n'), ((2349, 2375), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (2359, 2375), False, 'import cv2\n'), ((2446, 2485), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2458, 2485), False, 'import cv2\n'), ((2724, 2754), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (2733, 2754), True, 'import numpy as np\n'), ((2537, 2564), 'cv2.resize', 'cv2.resize', (['image', '(28, 28)'], {}), '(image, (28, 28))\n', (2547, 2564), False, 'import cv2\n')] |
"""
Module containing tasks for morphological operations
Credits:
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2019-2020 <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import itertools as it
from enum import Enum
from typing import Optional, Callable, Union
import skimage.morphology
import skimage.filters.rank
import numpy as np
from eolearn.core import EOTask, MapFeatureTask
class ErosionTask(EOTask):
"""
The task performs an erosion to the provided mask
:param mask_feature: The mask which is to be eroded
:type mask_feature: (FeatureType, str)
:param disk_radius: Radius of the erosion disk (in pixels). Default is set to `1`
:type disk_radius: int
:param erode_labels: List of labels to erode. If `None`, all unique labels are eroded. Default is `None`
:type erode_labels: list(int)
:param no_data_label: Value used to replace eroded pixels. Default is set to `0`
:type no_data_label: int
"""
def __init__(self, mask_feature, disk_radius=1, erode_labels=None, no_data_label=0):
if not isinstance(disk_radius, int) or disk_radius is None or disk_radius < 1:
raise ValueError("Disk radius should be an integer larger than 0!")
self.mask_type, self.mask_name, self.new_mask_name = self.parse_renamed_feature(mask_feature)
self.disk = skimage.morphology.disk(disk_radius)
self.erode_labels = erode_labels
self.no_data_label = no_data_label
def execute(self, eopatch):
feature_array = eopatch[(self.mask_type, self.mask_name)].squeeze().copy()
all_labels = np.unique(feature_array)
erode_labels = self.erode_labels if self.erode_labels else all_labels
erode_labels = set(erode_labels) - {self.no_data_label}
other_labels = set(all_labels) - set(erode_labels) - {self.no_data_label}
eroded_masks = [skimage.morphology.binary_erosion(feature_array == label, self.disk) for label in erode_labels]
other_masks = [feature_array == label for label in other_labels]
merged_mask = np.logical_or.reduce(eroded_masks + other_masks, axis=0)
feature_array[~merged_mask] = self.no_data_label
eopatch[(self.mask_type, self.new_mask_name)] = np.expand_dims(feature_array, axis=-1)
return eopatch
class MorphologicalOperations(Enum):
"""Enum class of morphological operations"""
OPENING = "opening"
CLOSING = "closing"
DILATION = "dilation"
EROSION = "erosion"
MEDIAN = "median"
@classmethod
def get_operation(cls, morph_type):
"""Maps morphological operation type to function
:param morph_type: Morphological operation type
:type morph_type: MorphologicalOperations
:return: function
"""
return {
cls.OPENING: skimage.morphology.opening,
cls.CLOSING: skimage.morphology.closing,
cls.DILATION: skimage.morphology.dilation,
cls.EROSION: skimage.morphology.erosion,
cls.MEDIAN: skimage.filters.rank.median,
}[morph_type]
class MorphologicalStructFactory:
"""
Factory methods for generating morphological structuring elements
"""
@staticmethod
def get_disk(radius):
"""
:param radius: Radius of disk
:type radius: int
:return: The structuring element where elements of the neighborhood are 1 and 0 otherwise.
:rtype: numpy.ndarray
"""
return skimage.morphology.disk(radius)
@staticmethod
def get_diamond(radius):
"""
:param radius: Radius of diamond
:type radius: int
:return: The structuring element where elements of the neighborhood are 1 and 0 otherwise.
:rtype: numpy.ndarray
"""
return skimage.morphology.diamond(radius)
@staticmethod
def get_rectangle(width, height):
"""
:param width: Width of rectangle
:type width: int
:param height: Height of rectangle
:type height: int
:return: A structuring element consisting only of ones, i.e. every pixel belongs to the neighborhood.
:rtype: numpy.ndarray
"""
return skimage.morphology.rectangle(width, height)
@staticmethod
def get_square(width):
"""
:param width: Size of square
:type width: int
:return: A structuring element consisting only of ones, i.e. every pixel belongs to the neighborhood.
:rtype: numpy.ndarray
"""
return skimage.morphology.square(width)
class MorphologicalFilterTask(MapFeatureTask):
"""Performs morphological operations on masks."""
def __init__(
self,
input_features,
output_features=None,
*,
morph_operation: Union[MorphologicalOperations, Callable],
struct_elem: Optional[np.ndarray] = None,
):
"""
:param input_features: Input features to be processed.
:param output_features: Outputs of input features. If not provided the `input_features` are overwritten.
:param morph_operation: A morphological operation.
:param struct_elem: A structuring element to be used with the morphological operation. Usually it is generated
with a factory method from MorphologicalStructElements
"""
if output_features is None:
output_features = input_features
super().__init__(input_features, output_features)
if isinstance(morph_operation, MorphologicalOperations):
self.morph_operation = MorphologicalOperations.get_operation(morph_operation)
else:
self.morph_operation = morph_operation
self.struct_elem = struct_elem
def map_method(self, feature):
"""Applies the morphological operation to a raster feature."""
feature = feature.copy()
if feature.ndim == 3:
for channel in range(feature.shape[2]):
feature[..., channel] = self.morph_operation(feature[..., channel], self.struct_elem)
elif feature.ndim == 4:
for time, channel in it.product(range(feature.shape[0]), range(feature.shape[3])):
feature[time, ..., channel] = self.morph_operation(feature[time, ..., channel], self.struct_elem)
else:
raise ValueError(f"Invalid number of dimensions: {feature.ndim}")
return feature
| [
"numpy.logical_or.reduce",
"numpy.expand_dims",
"numpy.unique"
] | [((1865, 1889), 'numpy.unique', 'np.unique', (['feature_array'], {}), '(feature_array)\n', (1874, 1889), True, 'import numpy as np\n'), ((2332, 2388), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(eroded_masks + other_masks)'], {'axis': '(0)'}), '(eroded_masks + other_masks, axis=0)\n', (2352, 2388), True, 'import numpy as np\n'), ((2503, 2541), 'numpy.expand_dims', 'np.expand_dims', (['feature_array'], {'axis': '(-1)'}), '(feature_array, axis=-1)\n', (2517, 2541), True, 'import numpy as np\n')] |
import types
import json
import sys
import re
import numpy
from datetime import datetime
import odc
from urllib.parse import urlparse, parse_qs
# Logging Levels
Trace = 0
Debug = 1
Info = 2
Warn = 3
Error = 4
Critical = 5
# Exact string values for Event parameters which are passed as strings
# EventTypes, ConnectState,Binary,Analog,Counter,FrozenCounter,BinaryOutputStatus,AnalogOutputStatus,ControlRelayOutputBlock and others...
# QualityFlags, ONLINE,RESTART,COMM_LOST,REMOTE_FORCED,LOCAL_FORCE,OVERRANGE,REFERENCE_ERR,ROLLOVER,DISCONTINUITY,CHATTER_FILTER
# ConnectState, PORT_UP,CONNECTED,DISCONNECTED,PORT_DOWN
# ControlCode, NUL,NUL_CANCEL,PULSE_ON,PULSE_ON_CANCEL,PULSE_OFF,PULSE_OFF_CANCEL,LATCH_ON,LATCH_ON_CANCEL,LATCH_OFF,LATCH_OFF_CANCEL,
# CLOSE_PULSE_ON,CLOSE_PULSE_ON_CANCEL,TRIP_PULSE_ON,TRIP_PULSE_ON_CANCEL,UNDEFINED
#
# This is a Simulator for an RTU and the equipment connected to it.
#
class SimPortClass:
''' Our class to handle an ODC Port. We must have __init__, ProcessJSONConfig, Enable, Disable, EventHander, TimerHandler and
RestRequestHandler defined, as they will be called by our c/c++ code.
ODC publishes some functions to this Module (when run) they are part of the odc module(include).
We currently have odc.log, odc.SetTimer and odc.PublishEvent.
'''
# Worker Methods. They need to be high in the code so they are available in the code below. No forward declaration in Python
def LogTrace(self, message ):
odc.log(self.guid, Trace, message )
def LogError(self, message ):
odc.log(self.guid,Error, message )
def LogDebug(self, message ):
odc.log(self.guid, Debug, message )
def LogInfo(self, message ):
odc.log(self.guid,Info, message )
def LogWarn(self, message ):
odc.log(self.guid, Warn, message )
def LogCritical(self, message ):
odc.log(self.guid,Critical, message )
# Required Method
def __init__(self, odcportguid, objectname):
self.objectname = objectname # Documentation/error use only.
self.guid = odcportguid # So that when we call an odc method, ODC can work out which pyport to hand it too.
self.Enabled = False;
self.i = 0
self.ConfigDict = {} # Config Dictionary
self.LogDebug("PyPortRtuSim - SimPortClass Init Called - {}".format(objectname))
self.LogDebug("Python sys.path - {}".format(sys.path))
return
# Required Method
def Config(self, MainJSON, OverrideJSON):
""" The JSON values are passed as strings (stripped of comments), which we then load into a dictionary for processing
Note that this does not handle Inherits JSON entries correctly (Inherits is effectily an Include file entry)"""
self.LogDebug("Passed Main JSON Config information - Len {} , {}".format(len(MainJSON),MainJSON))
self.LogDebug("Passed Override JSON Config information - Len {} , {}".format(len(OverrideJSON), OverrideJSON))
# Load JSON into Dicts
Override = {}
try:
if len(MainJSON) != 0:
self.ConfigDict = json.loads(MainJSON)
if len(OverrideJSON) != 0:
Override = json.loads(OverrideJSON)
except:
self.LogError("Exception on parsing JSON Config data - {}".format(sys.exc_info()[0]))
return
self.LogDebug("JSON Config strings Parsed")
# Now use the override config settings to adjust or add to the MainConfig. Only root json values can be adjusted.
# So you cannot change a single value in a Binary point definition without rewriting the whole "Binaries" json key.
self.ConfigDict.update(Override) # Merges with Override doing just that - no recursion into sub dictionaries
self.LogDebug("Combined (Merged) JSON Config {}".format(json.dumps(self.ConfigDict)))
# Now extract what is needed for this instance, or just reference the ConfigDict when needed.
return
# Worker
def SendInitialState(self):
if "Analogs" in self.ConfigDict:
for x in self.ConfigDict["Analogs"]:
odc.PublishEvent(self.guid,"Analog",x["Index"],"|ONLINE|",str(x["Value"]))
if "Binaries" in self.ConfigDict:
for x in self.ConfigDict["Binaries"]:
odc.PublishEvent(self.guid,"Binary",x["Index"],"|ONLINE|",str(x["Value"]))
return
#-------------------Worker Methods------------------
# Get the current state for a digital bit, matching by the passed parameters.
def GetBinaryValue(self, Type, Number, BitID):
for x in self.ConfigDict["Binaries"]:
if (x["Type"] == Type) and (x["Number"] == Number) and (x["BitID"] == BitID):
# we have a match!
return x["Value"]
raise Exception("Could not find a matching binary value for {}, {}, {}".format(Type, Number, BitID))
# Get the current state for a digital bit, matching by the passed parameters.
def GetBinaryValueUsingIndex(self, Index):
for x in self.ConfigDict["Binaries"]:
if (x["Index"] == Index):
# we have a match!
return x["Value"]
raise Exception("Could not find a matching binary value for Index {}".format(Index))
def SetBinaryValue(self, Type, Number, BitID, Value):
for x in self.ConfigDict["Binaries"]:
if (x["Type"] == Type) and (x["Number"] == Number) and (x["BitID"] == BitID):
if type(Value) is str: # Handle string values
Value = int(Value)
if (x["Value"] == Value):
self.LogDebug("No Change in value of bit {}".format(BitID))
return # nothing to do, the bit has not changed
x["Value"] = Value
odc.PublishEvent(self.guid,"Binary",x["Index"],"|ONLINE|",str(Value)) # Need to send this change through as an event
return
raise Exception("Could not find a matching binary value for {}, {}, {}".format(Type, Number, BitID))
def SetBinaryValueUsingIndex(self, Index, Value):
for x in self.ConfigDict["Binaries"]:
if (x["Index"] == Index):
# we have a match!
if type(Value) is str: # Handle string values
Value = int(Value)
if (x["Value"] == Value):
self.LogDebug("No Change in value of bit {}".format(BitID))
return # nothing to do, the bit has not changed
x["Value"] = Value
odc.PublishEvent(self.guid,"Binary",x["Index"],"|ONLINE|",str(Value)) # Need to send this change through as an event
return
raise Exception("Could not find a matching binary value for Index {}".format(Index))
def GetAnalogValue(self, Type, Number):
for x in self.ConfigDict["Analogs"]:
if (x["Type"] == Type) and (Type == "TapChanger"):
if x["Number"] == Number:
return x["Value"]
elif (x["Type"] == Type) and (Type == "Sim"):
if x["Index"] == Number: # No number for Sim types, revert to ODCIndex
return x["Value"]
raise Exception("Could not find a matching analog field for {}, {}".format(Type, Number))
# Get the current state for a digital bit, matching by the passed parameters.
def GetAnalogValueUsingIndex(self, Index):
for x in self.ConfigDict["Analogs"]:
if (x["Index"] == Index):
# we have a match!
return x["Value"]
raise Exception("Could not find a matching analog field for Index {}".format(Index))
def SetAnalogValue(self, Type, Number, Value):
for x in self.ConfigDict["Analogs"]:
if (x["Type"] == Type) and (Type == "Sim"):
if x["Index"] == Number: # No number for Sim types, revert to ODCIndex
if type(Value) is str: # Handle string values
Value = int(Value)
x["Value"] = Value
odc.PublishEvent(self.guid,"Analog",x["Index"],"|ONLINE|",str(Value)) # Need to send this change through as an event
return
elif (x["Type"] == Type) and (x["Number"] == Number):
if type(Value) is str: # Handle string values
Value = int(Value)
x["Value"] = Value
odc.PublishEvent(self.guid,"Analog",x["Index"],"|ONLINE|",str(Value)) # Need to send this change through as an event
return
raise Exception("Could not find a matching analog field for {}, {}".format(Type, Number))
def SetAnalogValueUsingIndex(self, Index, Value):
for x in self.ConfigDict["Analogs"]:
if (x["Index"] == Index):
if type(Value) is str: # Handle string values
Value = int(Value)
if (x["Value"] == Value):
self.LogDebug("No Change in value of analog {}".format(Index))
return # nothing to do, the value has not changed
x["Value"] = Value
odc.PublishEvent(self.guid,"Analog",x["Index"],"|ONLINE|",str(Value)) # Need to send this change through as an event
return
raise Exception("Could not find a matching Value field for {}".format(Index))
def UpdateAnalogValue(self, x, secs):
# use the secs, value, mean and std dev to calculate a new value - also if there is actually a new value.
try:
newval = numpy.random.normal(x["Mean"], x["StdDev"])
if (newval != x["Value"]):
x["Value"] = newval
odc.PublishEvent(self.guid,"Analog",x["Index"],"|ONLINE|",str(x["Value"])) # Need to send this change through as an event
except (RuntimeError, TypeError, NameError, Exception) as e:
self.LogError("Exception - {}".format(e))
return # dont change anything
return
def UpdateAnalogSimValues(self,secs):
for x in self.ConfigDict["Analogs"]:
if (x["Type"] == "Sim"):
if "Timer" in x:
x["Timer"] -= secs*1000;
if (x["Timer"] <= 0):
#Timer expired! Send Event
x["Timer"] = x["UpdateIntervalms"];
self.UpdateAnalogValue(x,secs)
else:
if ("UpdateIntervalms" in x):
if (x["UpdateIntervalms"] != 0): # If 0 or not a field, do not do updates.
# Need to randomise the start count down period so they dont fire all at once.
x["Timer"] = numpy.random.randint(0,x["UpdateIntervalms"])
return
# Return a string indicating what the two binary bits mean
def CBGetCombinedState(self,Bit1, Bit0):
CBState = [["Maintenance","Closed"],["Open","Fault"]]
return CBState[Bit1][Bit0]
def CBSetState(self, Number, Command):
CBState = {"MaintenanceZeros":[0,0],
"Maintenance":[0,0],
"Closed":[1,0],
"Open":[0,1],
"FaultOnes":[1,1],
"Fault":[1,1]}
StateBits = CBState[Command]
self.LogDebug("Setting CBState of {} to {}".format(Number,Command))
self.SetBinaryValue( "CB", Number, 0, StateBits[0])
self.SetBinaryValue( "CB", Number, 1, StateBits[1])
return
def GetFBType( self, Number ):
for x in self.ConfigDict["BinaryControls"]:
if (x["Type"] == "TapChanger") and (x["Number"] == Number):
return x["FB"]
return "Error"
def GetBCDValue(self, Number):
val = 0
for x in self.ConfigDict["Binaries"]:
if (x["Type"]=="TapChanger") and (x["Number"]==Number):
val += x["Value"] << x["BitID"]
return val
def SetBCDValue(self, Number,val):
for x in self.ConfigDict["Binaries"]:
if (x["Type"]=="TapChanger") and (x["Number"]==Number):
x["Value"] = 1 & (val >> x["BitID"])
return
def GetTapChangerValue( self, Number, FBType = None):
if not FBType:
FBType = self.GetFBType( Number )
if (FBType == "ANA"):
return self.GetAnalogValue("TapChanger",Number)
if (FBType == "BCD"):
return self.GetBCDValue(Number)
self.LogError("Illegal Type passed to GetTapChangerValue - {}".format(FBType))
return
def SetTapChangerValue( self, Number, val, FBType = None):
if not FBType:
FBType = self.GetFBType( Number )
if (FBType == "ANA"):
self.SetAnalogValue("TapChanger",Number,val)
return
if (FBType == "BCD"):
self.SetBCDValue(Number,val)
return
self.LogError("Illegal Feedback Type passed to SetTapChangerValue - {}".format(FBType))
return
def TapChangerTapUp( self, Number, FBType, Max ):
# Does the tapup command and limits the value to the maximum allowed.
self.LogDebug("TapUpCommand")
val = self.GetTapChangerValue(Number,FBType)
val += 1
if (val <= Max):
self.SetTapChangerValue(Number,val,FBType)
return
def TapChangerTapDown( self, Number, FBType, Min ):
# Does the tapdown command and limits the value to the minimum allowed.
self.LogDebug("TapDownCommand")
val = self.GetTapChangerValue(Number,FBType)
val -= 1
if (val >= Min):
self.SetTapChangerValue(Number,val,FBType)
return
# Returns values from loaded config. "Type" : "CB", "Number" : 1, "Command":"Trip"
def GetControlDetailsUsingIndex(self, Index):
for x in self.ConfigDict["BinaryControls"]:
if(x["Index"] == Index):
# we have a match!
return x
raise Exception("Could not find a matching Number and/or Command field for Index {}}".format(Index))
#---------------- Required Methods --------------------
def Operational(self):
""" This is called from ODC once ODC is ready for us to be fully operational - normally after Build is complete"""
self.LogTrace("Port Operational - {}".format(datetime.now().isoformat(" ")))
self.SendInitialState()
#odc.SetTimer(self.guid, 1, 60*1000) # Set up to run for first time after 1 minute
return
# Required Method
def Enable(self):
self.LogTrace("Enabled - {}".format(datetime.now().isoformat(" ")))
self.enabled = True;
return
# Required Method
def Disable(self):
self.LogDebug("Disabled - {}".format(datetime.now().isoformat(" ")))
self.enabled = False
return
# Needs to return True or False, which will be translated into CommandStatus::SUCCESS or CommandStatus::UNDEFINED
# EventType (string) Index (int), Time (msSinceEpoch), Quality (string) Payload (string) Sender (string)
# There is no callback available, the ODC code expects this method to return without delay.
# Required Method
def EventHandler(self,EventType, Index, Time, Quality, Payload, Sender):
self.LogDebug("EventHander: {}, {}, {} {} {} - {}".format(self.guid,Sender,Index,EventType,Quality,Payload))
try:
if (EventType == "ConnectState"):
self.LogDebug("ConnectState Event {}".format(Payload)) #PORT_UP/CONNECTED/DISCONNECTED/PORT_DOWN
if (Payload == "CONNECTED") :
self.SendInitialState()
return True
if not re.search("ONLINE",Quality):
self.LogDebug("Quality not ONLINE, ignoring event")
return True;
if (EventType == "ControlRelayOutputBlock"):
# Need to work out which Circuit Breaker and what command
x = self.GetControlDetailsUsingIndex(Index)
if x["Type"] == "CB":
# {"Index": 0, "Type" : "CB", "Number" : 1, "Command":"Trip"},
if x["Command"] == "Trip":
self.CBSetState( x["Number"], "Open")
elif x["Command"] == "Close":
self.CBSetState( x["Number"], "Closed")
else:
self.LogDebug("Command received not recognised - {}".format(x["Command"]))
elif x["Type"] == "TapChanger":
# {"Index": 2, "Type" : "TapChanger", "Number" : 1, "FB": "ANA", "Max": 32, "Command":"TapUp"},
self.LogDebug("Tap Changer")
if x["Command"] == "TapUp":
self.TapChangerTapUp( x["Number"], x["FB"], x["Max"] )
elif x["Command"] == "TapDown":
self.TapChangerTapDown( x["Number"], x["FB"], x["Min"] )
else:
self.LogDebug("Command received not recognised - {}".format(x["Command"]))
elif x["Type"] == "CONTROL":
# The old style control does not go anywhere.
self.LogDebug("CONTROL Signal")
else:
self.LogDebug("Command type received not recognised - {}".format(x["Type"]))
elif (EventType == "Analog"):
self.SetAnalogValueUsingIndex(Index,Payload)
elif (EventType == "Binary"):
self.SetBinaryValueUsingIndex(Index,Payload)
else:
self.LogDebug("Event is not one we are interested in - Ignoring")
except (RuntimeError, TypeError, NameError, Exception) as e:
self.LogError("Exception - {}".format(e))
return False
# Any changes that were made to the state, triggered events when they were made.
return True
# Will be called at the appropriate time by the ASIO handler system. Will be passed an id for the timeout,
# so you can have multiple timers running.
# Required Method
def TimerHandler(self,TimerId):
self.LogDebug("TimerHander: ID {}, {}".format(TimerId, self.guid))
# This will be used to send Sim value changes that are the result of mean and std deviation values at the UpdateRate
# Run once every 10 seconds?
if (TimerId == 1):
self.UpdateAnalogSimValues(10) # Pass in seconds since last run
odc.SetTimer(self.guid, 1, 10*1000) # Set up to run again
return
# The Rest response interface - the following method will be called whenever the restful interface
# (a single interface for all PythonPorts) gets called.
# It will be decoded sufficiently so that it is passed to the correct PythonPort (us)
#
# We return the response that we want sent back to the caller. This will be a JSON string for GET.
# A null (empty) string will be reported as a bad request by the c++ code.
# Required Method
def RestRequestHandler(self, eurl, content):
Response = {} # Empty Dict
url = ""
try:
HttpMethod = ""
if ("GET" in eurl):
url = eurl.replace("GET ","",1)
HttpMethod = "GET"
elif ("POST" in eurl):
url = eurl.replace("POST ","",1)
HttpMethod = "POST"
else:
self.LogError("PyPort only supports GET and POST Http Requests")
return "" # Will cause an error response to the request
urlp = urlparse(url) # Split into sections that we can reference. Mainly to get parameters
# We have only one get option at the moment - Status with Number as the parameter.
if (HttpMethod == "GET" and "/PyPortRtuSim/status" in eurl):
self.LogDebug("GET url {} ".format(url))
qs = parse_qs(urlp.query)
Number = int(qs["Number"][0])
Type = qs["Type"][0]
if Type == "CB":
self.LogDebug("Circuit Breaker Status Request - {}".format(Number))
Response["Bit0"] = self.GetBinaryValue( "CB", Number, 0)
Response["Bit1"] = self.GetBinaryValue( "CB", Number, 1)
Response["State"] = self.CBGetCombinedState(Response["Bit1"],Response["Bit0"])
return json.dumps(Response)
if Type == "TapChanger":
self.LogDebug("Tap Changer Status Request - {}".format(Number))
Response["State"] = self.GetTapChangerValue(Number)
return json.dumps(Response)
if Type == "Analog":
Response["State"] = self.GetAnalogValueUsingIndex(Number)
return json.dumps(Response)
if Type == "Binary":
Response["State"] = self.GetBinaryValueUsingIndex(Number)
return json.dumps(Response)
elif(HttpMethod == "POST" and "/PyPortRtuSim/set" in eurl):
# Any failure in decoding the content will trigger an exception, and an error to the PUSH request
self.LogDebug("POST url {} content {}".format(url, content))
jPayload = {}
jPayload = json.loads(content)
if type(jPayload["Number"]) is str:
Number = int(jPayload["Number"])
else:
Number = jPayload["Number"]
Type = jPayload["Type"]
"""{"Type" : "CB", "Number" : 1, "State" : "Open"/"Closed"/"MaintenanceZeros"/"Maintenance"/"FaultOnes"/"Fault" } """
if Type == "CB":
self.CBSetState( Number, jPayload["State"])
Response["Result"] = "OK"
return json.dumps(Response)
if Type == "TapChanger":
self.SetTapChangerValue(Number,jPayload["State"])
Response["Result"] = "OK"
return json.dumps(Response)
if Type == "Analog":
self.SetAnalogValueUsingIndex( Number, jPayload["State"])
Response["Result"] = "OK"
return json.dumps(Response)
if Type == "Binary":
self.SetBinaryValueUsingIndex( Number, jPayload["State"])
Response["Result"] = "OK"
return json.dumps(Response)
else:
self.LogError("Illegal http request")
except (RuntimeError, TypeError, NameError, Exception) as e:
self.LogError("Exception - {}".format(e))
Response["Result"] = "Exception - {}".format(e)
return ""
return "" | [
"json.loads",
"json.dumps",
"urllib.parse.parse_qs",
"odc.log",
"datetime.datetime.now",
"numpy.random.randint",
"numpy.random.normal",
"sys.exc_info",
"odc.SetTimer",
"re.search",
"urllib.parse.urlparse"
] | [((1497, 1531), 'odc.log', 'odc.log', (['self.guid', 'Trace', 'message'], {}), '(self.guid, Trace, message)\n', (1504, 1531), False, 'import odc\n'), ((1575, 1609), 'odc.log', 'odc.log', (['self.guid', 'Error', 'message'], {}), '(self.guid, Error, message)\n', (1582, 1609), False, 'import odc\n'), ((1652, 1686), 'odc.log', 'odc.log', (['self.guid', 'Debug', 'message'], {}), '(self.guid, Debug, message)\n', (1659, 1686), False, 'import odc\n'), ((1729, 1762), 'odc.log', 'odc.log', (['self.guid', 'Info', 'message'], {}), '(self.guid, Info, message)\n', (1736, 1762), False, 'import odc\n'), ((1804, 1837), 'odc.log', 'odc.log', (['self.guid', 'Warn', 'message'], {}), '(self.guid, Warn, message)\n', (1811, 1837), False, 'import odc\n'), ((1884, 1921), 'odc.log', 'odc.log', (['self.guid', 'Critical', 'message'], {}), '(self.guid, Critical, message)\n', (1891, 1921), False, 'import odc\n'), ((9697, 9740), 'numpy.random.normal', 'numpy.random.normal', (["x['Mean']", "x['StdDev']"], {}), "(x['Mean'], x['StdDev'])\n", (9716, 9740), False, 'import numpy\n'), ((18668, 18705), 'odc.SetTimer', 'odc.SetTimer', (['self.guid', '(1)', '(10 * 1000)'], {}), '(self.guid, 1, 10 * 1000)\n', (18680, 18705), False, 'import odc\n'), ((19782, 19795), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (19790, 19795), False, 'from urllib.parse import urlparse, parse_qs\n'), ((3136, 3156), 'json.loads', 'json.loads', (['MainJSON'], {}), '(MainJSON)\n', (3146, 3156), False, 'import json\n'), ((3223, 3247), 'json.loads', 'json.loads', (['OverrideJSON'], {}), '(OverrideJSON)\n', (3233, 3247), False, 'import json\n'), ((3875, 3902), 'json.dumps', 'json.dumps', (['self.ConfigDict'], {}), '(self.ConfigDict)\n', (3885, 3902), False, 'import json\n'), ((15861, 15889), 're.search', 're.search', (['"""ONLINE"""', 'Quality'], {}), "('ONLINE', Quality)\n", (15870, 15889), False, 'import re\n'), ((20116, 20136), 'urllib.parse.parse_qs', 'parse_qs', (['urlp.query'], {}), '(urlp.query)\n', (20124, 20136), False, 'from urllib.parse import urlparse, parse_qs\n'), ((20622, 20642), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (20632, 20642), False, 'import json\n'), ((20868, 20888), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (20878, 20888), False, 'import json\n'), ((21032, 21052), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (21042, 21052), False, 'import json\n'), ((21196, 21216), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (21206, 21216), False, 'import json\n'), ((21539, 21558), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (21549, 21558), False, 'import json\n'), ((14502, 14516), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14514, 14516), False, 'from datetime import datetime\n'), ((14766, 14780), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14778, 14780), False, 'from datetime import datetime\n'), ((14933, 14947), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14945, 14947), False, 'from datetime import datetime\n'), ((22080, 22100), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (22090, 22100), False, 'import json\n'), ((22286, 22306), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (22296, 22306), False, 'import json\n'), ((22496, 22516), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (22506, 22516), False, 'import json\n'), ((22706, 22726), 'json.dumps', 'json.dumps', (['Response'], {}), '(Response)\n', (22716, 22726), False, 'import json\n'), ((3341, 3355), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3353, 3355), False, 'import sys\n'), ((10871, 10917), 'numpy.random.randint', 'numpy.random.randint', (['(0)', "x['UpdateIntervalms']"], {}), "(0, x['UpdateIntervalms'])\n", (10891, 10917), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import logging
import numpy as np
import os
import re
from collections import OrderedDict
from hashlib import md5
from astropy.io import fits
from astropy.stats import biweight_scale, sigma_clip
from scipy import interpolate, ndimage, polyfit, poly1d, optimize as op, signal
from ..robust_polyfit import polyfit as rpolyfit
from .spectrum import Spectrum1D
logger = logging.getLogger(__name__)
#def find_peaks(flux, cont=None, noise=None, detection_sigma=2.0):
# # Subtract continuum:
# if cont is not None: flux = flux - cont
#
# # Find threshold for peak finding
# if noise is None:
# clipped = sigma_clip(flux)
# noise = biweight_scale(clipped)
# thresh = detection_sigma * noise
#
# # 1st derivative for peak finding
# dflux = np.gradient(flux)
# ii1 = flux > thresh
# ii2 = dflux >= 0
# ii3 = np.zeros_like(ii2)
# ii3[:-1] = dflux[1:] < 0
# peaklocs = ii1 & ii2 & ii3
# peaklocs[mask] = False
# peakindices = np.where(peaklocs)[0]
def find_peaks(flux,
window = 51,niter = 5,
clip_iter = 5,clip_sigma_upper = 5.0,clip_sigma_lower = 5.0,
detection_sigma = 3.0,
min_peak_dist_sigma = 5.0,
gaussian_width = 1.0,
make_fig=False):
"""
* Subtract median filter (param "window")
* Iterate: (param "niter")
* Sigma clip, estimate noise (params clip_iter, clip_sigma_upper clip_sigma_lower)
* Find peaks (param detection_sigma)
* Remove peaks too close to previous (param min_peak_dist_sigma)
* Fit Gaussians to peaks (initialize width at param gaussian_width)
Returns:
allpeakx: locations of peaks
fullmodel: the model of all the gaussians
If make_fig=True: fig, a plot showing all the peaks found at each iteration.
"""
# This is the data we will try to fit with a
# combination of Gaussians
xarr = np.arange(len(flux))
flux = flux - signal.medfilt(flux,window)
continuum = models.Linear1D(slope=0, intercept=0)
fullmodel = continuum
allpeakx = []
allpeaksigma = []
fitter = fitting.LevMarLSQFitter()
if make_fig: fig, axes = plt.subplots(niter)
for iiter in range(niter):
# Subtract existing peaks
tflux = flux - fullmodel(xarr)
# Estimate noise
cflux = sigma_clip(tflux,
iters=clip_iter,
sigma_upper=clip_sigma_upper,
sigma_lower=clip_sigma_lower)
noise = np.std(cflux)
# Find peaks in residual using gradient = 0
# Only keep peaks above detection threshold
deriv = np.gradient(tflux)
peaklocs = (deriv[:-1] >= 0) & (deriv[1:] < 0) & \
(tflux[:-1] > detection_sigma * noise)
peakx = np.where(peaklocs)[0]
peaky = flux[:-1][peaklocs]
# Prune peaks too close to existing peaks
peaks_to_keep = np.ones_like(peakx, dtype=bool)
for ix,x in enumerate(peakx):
z = (x-np.array(allpeakx))/np.array(allpeaksigma)
if np.any(np.abs(z) < min_peak_dist_sigma):
peaks_to_keep[ix] = False
peakx = peakx[peaks_to_keep]
peaky = peaky[peaks_to_keep]
# Add new peaks to the model
for x, y in zip(peakx, peaky):
g = models.Gaussian1D(amplitude=y, mean=x,
stddev=gaussian_width)
fullmodel = fullmodel + g
print("iter {}: {} peaks (found {}, added {})".format(
iiter, fullmodel.n_submodels()-1,
len(peaks_to_keep), len(peakx)))
# Fit the full model
fullmodel = fitter(fullmodel, xarr, flux, maxiter=200*(fullmodel.parameters.size+1))
print(fitter.fit_info["message"], fitter.fit_info["ierr"])
# Extract peak x and sigma
peak_x_indices = np.where(["mean_" in param for param in fullmodel.param_names])[0]
peak_y_indices = peak_x_indices - 1
peak_sigma_indices = peak_x_indices + 1
allpeakx = fullmodel.parameters[peak_x_indices]
allpeaky = fullmodel.parameters[peak_y_indices]
allpeaksigma = fullmodel.parameters[peak_sigma_indices]
# Make a plot
if make_fig:
try:
ax = axes[iiter]
except:
ax = axes
ax.plot(xarr,flux)
ax.plot(peakx,peaky,'ro')
ax.plot(xarr,fullmodel(xarr), lw=1)
ax.axhspan(-noise,+noise,color='k',alpha=.2)
ax.plot(xarr,flux-fullmodel(xarr))
ax.vlines(allpeakx, allpeaky*1.1, allpeaky*1.1+300, color='r', lw=1)
if make_fig: return allpeakx, fullmodel, fig
return allpeakx, fullmodel
| [
"astropy.stats.sigma_clip",
"numpy.ones_like",
"numpy.abs",
"numpy.std",
"scipy.signal.medfilt",
"logging.getLogger",
"numpy.where",
"numpy.array",
"numpy.gradient"
] | [((525, 552), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (542, 552), False, 'import logging\n'), ((2138, 2166), 'scipy.signal.medfilt', 'signal.medfilt', (['flux', 'window'], {}), '(flux, window)\n', (2152, 2166), False, 'from scipy import interpolate, ndimage, polyfit, poly1d, optimize as op, signal\n'), ((2529, 2627), 'astropy.stats.sigma_clip', 'sigma_clip', (['tflux'], {'iters': 'clip_iter', 'sigma_upper': 'clip_sigma_upper', 'sigma_lower': 'clip_sigma_lower'}), '(tflux, iters=clip_iter, sigma_upper=clip_sigma_upper,\n sigma_lower=clip_sigma_lower)\n', (2539, 2627), False, 'from astropy.stats import biweight_scale, sigma_clip\n'), ((2722, 2735), 'numpy.std', 'np.std', (['cflux'], {}), '(cflux)\n', (2728, 2735), True, 'import numpy as np\n'), ((2856, 2874), 'numpy.gradient', 'np.gradient', (['tflux'], {}), '(tflux)\n', (2867, 2874), True, 'import numpy as np\n'), ((3133, 3164), 'numpy.ones_like', 'np.ones_like', (['peakx'], {'dtype': 'bool'}), '(peakx, dtype=bool)\n', (3145, 3164), True, 'import numpy as np\n'), ((3001, 3019), 'numpy.where', 'np.where', (['peaklocs'], {}), '(peaklocs)\n', (3009, 3019), True, 'import numpy as np\n'), ((4083, 4148), 'numpy.where', 'np.where', (["[('mean_' in param) for param in fullmodel.param_names]"], {}), "([('mean_' in param) for param in fullmodel.param_names])\n", (4091, 4148), True, 'import numpy as np\n'), ((3242, 3264), 'numpy.array', 'np.array', (['allpeaksigma'], {}), '(allpeaksigma)\n', (3250, 3264), True, 'import numpy as np\n'), ((3222, 3240), 'numpy.array', 'np.array', (['allpeakx'], {}), '(allpeakx)\n', (3230, 3240), True, 'import numpy as np\n'), ((3287, 3296), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (3293, 3296), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
r"""myfile.exam
To edit examination paper
-------------------------------
Path: examsystem/exam.py
Author: William/2016-01-02
"""
import collections
import pathlib
import datetime
import copy
import numpy as np
from pylatex import *
from pylatex.base_classes import *
from pylatex.utils import *
import pylatex_ext
from base import *
class Solve(Environment):
"""solve environment
Solve(data='the solution')
"""
escape = False
content_separator = "\\\\\n"
class ExamPaper(pylatex_ext.XeDocument):
"""ExamPaper < Document
"""
def __init__(self, subject='', title=None, *args, **kwargs):
"""
Argument:
subject: str, the name of the subject of the examination;
title: str, will be given automaticly
"""
super(ExamPaper, self).__init__(documentclass='ctexart', document_options='12pt,a4paper', *args, **kwargs)
self.latex_name = 'document'
self.escape = False
self.subject = subject
if title is None:
import semester
s = semester.Semester()
title = '''浙江工业大学之江学院%s试卷'''%s.totex()
self.title = title
self.usepackage(('mathrsfs, amsfonts, amsmath, amssymb', 'enumerate', 'analysis, algebra', 'exampaper',
'fancyhdr', 'geometry'))
self.preamble.append(Command('geometry', 'left=3.3cm,right=3.3cm,top=2.3cm,foot=1.5cm'))
self.preamble.append(Command('pagestyle', 'fancy'))
self.preamble.append(Command('chead', NoEscape(Command('textbf', '浙 江 工 业 大 学 之 江 学 院 考 试 命 题 纸').dumps())))
self.preamble.append(Command('cfoot', NoEscape(r'\footnotesize{第~\thepage~页~(共~\pageref{LastPage}~页)}')))
self.preamble.append(Command('renewcommand', arguments=Arguments(NoEscape(r'\headrulewidth'), '0pt')))
# header = PageStyle("header")
# with header.create(Foot("C")):
# ft = Command('footnotesize', arguments=NoEscape('第~\\thepage~页~(共~\pageref{LastPage}~页)'))
# header.append(ft)
# self.preamble.append(header)
def build(self):
# the head of the paper
self.make_head()
# make problems
if hasattr(self, 'fill'):
self.make_fill()
self.append('\n\n')
if hasattr(self, 'truefalse'):
self.make_truefalse()
self.append('\n\n')
if hasattr(self, 'choice'):
self.make_choice()
self.append('\n\n')
if hasattr(self, 'calculation'):
self.make_calculation()
def make_head(self):
self.append(Center(data=pylatex_ext.large(bold(NoEscape(self.title)))))
table = Tabular('lclc')
table.escape = False
table.add_row(r'二级学院:&理学院&专业名称:&信计专业'.split('&'))
table.add_row(r'课程名称:&Python程序设计&课程代码:&10800570'.split('&'))
table.add_row(r'主讲教师:&宋丛威&&'.split('&'))
table = Tabular('|c|c|c|c|c|c|')
table.escape = False
table.add_hline()
table.add_row(r'\sws{题号} \sws{一} \sws{二} \sws{三} \sws{四} \sws{总评}'.split())
table.add_hline()
table.add_row((MultiRow(2, data='计分'), '', '', '', '', ''))
table.add_empty_row()
table.add_hline()
self.append(Center(data=table))
self.append(Command('thispagestyle', 'plain'))
def make_fill(self):
# make filling problems
self.append('\\noindent 一、填空题 (每空 2 分, 共 20 分):')
with self.create(Enumerate(options='1)')) as enum:
enum.escape = False
for p in self.fill:
enum.add_item(NoEscape(p.totex()))
def make_truefalse(self):
# make true-false problem
self.append('\\noindent 二、判断题 (每空 2 分, 共 10 分):')
with self.create(Enumerate(options='1)')) as enum:
enum.escape = False
for p in self.truefalse:
enum.add_item(NoEscape(p.totex()))
def make_choice(self):
# make choice problems
self.append('\\noindent 三、选择题 (每空 2 分, 共 10 分):')
with self.create(Enumerate(options='1)')) as enum:
enum.escape = False
for p in self.choice:
enum.add_item(NoEscape(p.totex()))
def make_calculation(self):
# make calculation problems
self.append('\\noindent 四、计算题 (每题 10 分, 共 60 分):')
with self.create(Enumerate(options='1)')) as enum:
enum.escape = False
for p in self.calculation:
if p.solution is None:
enum.add_item(NoEscape(p.totex() + '\n\n' + Command('vspace', '10cm').dumps()))
else:
enum.add_item(NoEscape(p.totex()))
def write(self, filename=None):
if filename is None:
filename = self.subject + 'exam'
super(ExamPaper, self).write(filename)
def topdf(self, filename=None):
if filename is None:
filename = self.subject + 'exam'
super(ExamPaper, self).topdf(filename)
class Problem(BaseTemplate):
# Problem class
def __init__(self, template='', parameter={}, realm=None):
super(Problem, self).__init__(template, parameter)
self.realm = realm
self.point = 10
self.solution = None # :Solution
def totex(self):
solution = self.solution
if solution: # with solution
if issubclass(solution, Solution):
# solution is a class
solution = solution.fromProblem(self)
return super(Problem, self).totex() + '\n\n' + Solve(data=solution.totex()).dumps()
else: # without solution
return super(Problem, self).totex()
class Solution(BaseTemplate):
"""Solution class
solution of a problem
Extends:
BaseTemplate
"""
@classmethod
def fromProblem(cls, problem):
obj = cls(parameter=problem.parameter)
obj.genTemplate(problem)
return obj
def genTemplate(self, problem=None):
# self.template = ''
pass
# select problems from banks
USER_FOLDER = pathlib.Path('~').expanduser()
BANK_FOLDER = USER_FOLDER / 'Teaching/examsystem/bank'
PAPER_FOLDER = USER_FOLDER / 'Teaching/考试'
import json, yaml
class OtherSolution(Solution):
def genTemplate(self, problem):
self.template = problem.template
class OtherProblem(Problem):
solution = OtherSolution
mask = Command('mypar', '')
mask_flag = False
masked = {'answer'}
def totex(self):
if self.mask_flag:
for k in self.masked:
self[k] = self.mask
return super(OtherProblem, self).totex()
def __setstate__(self, state):
self.template, self.parameter, answer = state['template'] + '~~{{answer}}', state.get('parameter', {}), state['answer']
self.solution = None
@staticmethod
def random(filename='', n=1, encoding='utf-8', *args, **kwargs):
# read n problems from yaml files (randomly)
filename = (BANK_FOLDER / filename).with_suffix('.yaml')
problems = yaml.load(filename.read_text(encoding=encoding))
ret = []
for _ in range(n):
p = np.random.choice(problems)
problems.remove(p)
ret.append(p)
return ret
@classmethod
def read_yaml(cls, filename, encoding='utf-8', *args, **kwargs):
filename = (BANK_FOLDER / filename).with_suffix('.yaml')
return yaml.load(filename.read_text(encoding=encoding))
class TrueFalseProblem(OtherProblem):
def __setstate__(self, state):
super(TrueFalseProblem, self).__setstate__(state)
if 'answer' in state:
if isinstance(state['answer'], bool):
answer = 'true' if state['answer'] else 'false'
else:
answer = state['answer']
else:
answer = 'true'
self.parameter.update({'answer': Command(answer)})
class ChoiceProblem(OtherProblem):
def __setstate__(self, state):
choices = '~~'.join(['(%s) %s'%(k, v) for k, v in state['options'].items()])
self.template, self.parameter, answer = state['template'] + '~~{{answer}}\\\\\n' + choices, state.get('parameter', {}), state['answer']
self.solution = None
self.parameter.update({'answer':Command('mypar', answer)})
class FillProblem(OtherProblem):
mask = Command('autolenunderline', '')
def __setstate__(self, state):
self.template, self.parameter, answer = state['template'], state.get('parameter', {}), state.get('answer', {})
self.solution = None
self.masked = set(answer.keys())
self.parameter.update({k:Command('autolenunderline', NoEscape(v)) for k, v in answer.items()})
# with open('bank/python_choice.yaml', encoding='utf-8') as fo:
# problem = yaml.load(fo)[0]
| [
"pathlib.Path",
"semester.Semester",
"numpy.random.choice"
] | [((6332, 6349), 'pathlib.Path', 'pathlib.Path', (['"""~"""'], {}), "('~')\n", (6344, 6349), False, 'import pathlib\n'), ((1147, 1166), 'semester.Semester', 'semester.Semester', ([], {}), '()\n', (1164, 1166), False, 'import semester\n'), ((7458, 7484), 'numpy.random.choice', 'np.random.choice', (['problems'], {}), '(problems)\n', (7474, 7484), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
from movement_handling import move, confine_particles, get_dead_indices
from prey.agent import Prey
import keyboard
from matplotlib.font_manager import FontProperties
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams.update({'font.size': 12})
it = FontProperties()
it.set_family('serif')
it.set_name('Times New Roman')
it.set_style('italic')
it.set_size(16)
times = FontProperties()
times.set_family('serif')
times.set_name('Times New Roman')
times.set_size(16)
box_size = 100
radius = 5
speed = 0.5
prey_radius = 0.5
n_prey = 100
indices = np.arange(n_prey)
position = (np.array((box_size, box_size)) / 2).reshape((1, 2))
v = np.array((0, speed), dtype=float).reshape((1, 2))
angle = np.pi / 2
angle_diff = 0.07
period = np.pi * 2
kills_history = np.zeros((0, 2))
loss_history = np.zeros((0, 2))
prey = Prey(box_size, n_prey, buffer_size=5000, buffer_behaviour="discard_old", epsilon_final=0.15, episode_length=800,
preprocessing="box_size", network_memory=30)
colormap = plt.cm.get_cmap('viridis')
predator_color = np.array([0.5, 0.1, 0.2, 1])
colors = np.array([colormap(0.2 + 0.6 * i/(n_prey-1)) for i in range(n_prey)] + [predator_color])
fig, ax = plt.subplots()
ax.set_xlim(0, box_size)
ax.set_ylim(0, box_size)
ax.set_aspect('equal', adjustable='box')
generation = 1
s = np.array([10*prey.radius**2] * n_prey + [20*radius**2])
scatter_plot = ax.scatter([None] * (1+n_prey), [None] * (1+n_prey), s=s, facecolors=colors, edgecolors='k')
fig.suptitle(r"Generation ${}$: $\epsilon = {}$ Previous generation kills: ${}$".format(
generation, prey.get_epsilon(), 0))
fig.canvas.draw()
plt.show(block=False)
def keyboard_event():
global angle
if keyboard.is_pressed('a') or keyboard.is_pressed('left arrow'):
angle += angle_diff
v[0, 0] = np.cos(angle) * speed
v[0, 1] = np.sin(angle) * speed
elif keyboard.is_pressed('d') or keyboard.is_pressed('right arrow'):
angle -= angle_diff
v[0, 0] = np.cos(angle) * speed
v[0, 1] = np.sin(angle) * speed
if abs(angle) > period:
angle -= np.sign(angle) * period
while plt.fignum_exists(fig.number):
keyboard_event()
confine_particles(position, v, box_size, box_size, radius)
move(position, v)
prey.respawn()
prey.select_actions(position[0], radius, v[0])
dead_indices = get_dead_indices(prey.agents, prey.int_max)
reinitialize = prey.reward()
reinforced_loss = prey.reinforce(train_size=500)
if reinforced_loss is not None:
t = generation + prey.count / prey.episode_length
loss_history = np.append(loss_history, np.array([[t, reinforced_loss]]), axis=0)
if reinitialize is not None:
kills_history = np.append(kills_history, np.array([[generation, dead_indices.size]]), axis=0)
generation += 1
fig.suptitle(r"Generation ${}$: $\epsilon = {}$ Previous generation kills: ${}$".format(
generation, round(prey.get_epsilon(), 2), dead_indices.size))
colors = np.array([colormap(0.2 + 0.6 * i / (n_prey - 1)) for i in range(n_prey)] + [predator_color])
inds = indices[~np.isin(indices, dead_indices)]
all_inds = np.append(inds, n_prey)
scatter_plot.set_offsets(np.append(prey.positions[inds], position, axis=0))
scatter_plot.set_facecolor(colors[all_inds])
scatter_plot.set_sizes(s[all_inds])
fig.canvas.draw()
fig.canvas.flush_events()
stats_figure, stats_ax = plt.subplots(1, 2, sharex=True)
stats_figure.set_size_inches(12.5, 5)
stats_ax[0].plot(kills_history[:, 0], np.cumsum(kills_history[:, 1]), 'k')
stats_ax[0].set_title("Number of kills per generation")
stats_ax[0].set_ylabel("Kills")
stats_ax[1].plot(loss_history[:, 0], loss_history[:, 1], 'k')
stats_ax[1].set_title("Loss per generation")
stats_ax[1].set_ylabel("Loss")
stats_figure.text(0.5, 0.02, 'Generation', ha='center')
plt.show()
| [
"numpy.isin",
"numpy.sin",
"numpy.arange",
"movement_handling.confine_particles",
"matplotlib.font_manager.FontProperties",
"numpy.append",
"numpy.cumsum",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.fignum_exists",
"matplotlib.pyplot.show",
"numpy.cos... | [((303, 341), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (322, 341), True, 'from matplotlib import pyplot as plt\n'), ((348, 364), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (362, 364), False, 'from matplotlib.font_manager import FontProperties\n'), ((467, 483), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (481, 483), False, 'from matplotlib.font_manager import FontProperties\n'), ((644, 661), 'numpy.arange', 'np.arange', (['n_prey'], {}), '(n_prey)\n', (653, 661), True, 'import numpy as np\n'), ((851, 867), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (859, 867), True, 'import numpy as np\n'), ((883, 899), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (891, 899), True, 'import numpy as np\n'), ((908, 1073), 'prey.agent.Prey', 'Prey', (['box_size', 'n_prey'], {'buffer_size': '(5000)', 'buffer_behaviour': '"""discard_old"""', 'epsilon_final': '(0.15)', 'episode_length': '(800)', 'preprocessing': '"""box_size"""', 'network_memory': '(30)'}), "(box_size, n_prey, buffer_size=5000, buffer_behaviour='discard_old',\n epsilon_final=0.15, episode_length=800, preprocessing='box_size',\n network_memory=30)\n", (912, 1073), False, 'from prey.agent import Prey\n'), ((1089, 1115), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1104, 1115), True, 'from matplotlib import pyplot as plt\n'), ((1133, 1161), 'numpy.array', 'np.array', (['[0.5, 0.1, 0.2, 1]'], {}), '([0.5, 0.1, 0.2, 1])\n', (1141, 1161), True, 'import numpy as np\n'), ((1271, 1285), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1283, 1285), True, 'from matplotlib import pyplot as plt\n'), ((1397, 1460), 'numpy.array', 'np.array', (['([10 * prey.radius ** 2] * n_prey + [20 * radius ** 2])'], {}), '([10 * prey.radius ** 2] * n_prey + [20 * radius ** 2])\n', (1405, 1460), True, 'import numpy as np\n'), ((1710, 1731), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1718, 1731), True, 'from matplotlib import pyplot as plt\n'), ((2209, 2238), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['fig.number'], {}), '(fig.number)\n', (2226, 2238), True, 'from matplotlib import pyplot as plt\n'), ((3533, 3564), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)'}), '(1, 2, sharex=True)\n', (3545, 3564), True, 'from matplotlib import pyplot as plt\n'), ((3960, 3970), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3968, 3970), True, 'from matplotlib import pyplot as plt\n'), ((2266, 2324), 'movement_handling.confine_particles', 'confine_particles', (['position', 'v', 'box_size', 'box_size', 'radius'], {}), '(position, v, box_size, box_size, radius)\n', (2283, 2324), False, 'from movement_handling import move, confine_particles, get_dead_indices\n'), ((2329, 2346), 'movement_handling.move', 'move', (['position', 'v'], {}), '(position, v)\n', (2333, 2346), False, 'from movement_handling import move, confine_particles, get_dead_indices\n'), ((2437, 2480), 'movement_handling.get_dead_indices', 'get_dead_indices', (['prey.agents', 'prey.int_max'], {}), '(prey.agents, prey.int_max)\n', (2453, 2480), False, 'from movement_handling import move, confine_particles, get_dead_indices\n'), ((3262, 3285), 'numpy.append', 'np.append', (['inds', 'n_prey'], {}), '(inds, n_prey)\n', (3271, 3285), True, 'import numpy as np\n'), ((3641, 3671), 'numpy.cumsum', 'np.cumsum', (['kills_history[:, 1]'], {}), '(kills_history[:, 1])\n', (3650, 3671), True, 'import numpy as np\n'), ((730, 763), 'numpy.array', 'np.array', (['(0, speed)'], {'dtype': 'float'}), '((0, speed), dtype=float)\n', (738, 763), True, 'import numpy as np\n'), ((1780, 1804), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""a"""'], {}), "('a')\n", (1799, 1804), False, 'import keyboard\n'), ((1808, 1841), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""left arrow"""'], {}), "('left arrow')\n", (1827, 1841), False, 'import keyboard\n'), ((3315, 3364), 'numpy.append', 'np.append', (['prey.positions[inds]', 'position'], {'axis': '(0)'}), '(prey.positions[inds], position, axis=0)\n', (3324, 3364), True, 'import numpy as np\n'), ((674, 704), 'numpy.array', 'np.array', (['(box_size, box_size)'], {}), '((box_size, box_size))\n', (682, 704), True, 'import numpy as np\n'), ((1889, 1902), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1895, 1902), True, 'import numpy as np\n'), ((1929, 1942), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1935, 1942), True, 'import numpy as np\n'), ((1960, 1984), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""d"""'], {}), "('d')\n", (1979, 1984), False, 'import keyboard\n'), ((1988, 2022), 'keyboard.is_pressed', 'keyboard.is_pressed', (['"""right arrow"""'], {}), "('right arrow')\n", (2007, 2022), False, 'import keyboard\n'), ((2177, 2191), 'numpy.sign', 'np.sign', (['angle'], {}), '(angle)\n', (2184, 2191), True, 'import numpy as np\n'), ((2709, 2741), 'numpy.array', 'np.array', (['[[t, reinforced_loss]]'], {}), '([[t, reinforced_loss]])\n', (2717, 2741), True, 'import numpy as np\n'), ((2834, 2877), 'numpy.array', 'np.array', (['[[generation, dead_indices.size]]'], {}), '([[generation, dead_indices.size]])\n', (2842, 2877), True, 'import numpy as np\n'), ((3215, 3245), 'numpy.isin', 'np.isin', (['indices', 'dead_indices'], {}), '(indices, dead_indices)\n', (3222, 3245), True, 'import numpy as np\n'), ((2070, 2083), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2076, 2083), True, 'import numpy as np\n'), ((2110, 2123), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2116, 2123), True, 'import numpy as np\n')] |
from . import config, utils
import IPython as ipy
import matplotlib.pyplot as plt
import torch
import torch_geometric as tg
import torch_geometric.data
from tqdm.auto import tqdm
import copy
import itertools as it
import os
import time
import numpy as np
def train_epoch(model, opt, loader, max_grad_norm=config.max_grad_norm):
model.train()
losses = []
for g, h, lb, ub in tqdm(loader, desc='batches', leave=False):
g = g.to(config.device)
h = h.to(config.device)
lb = lb.to(config.device)
ub = ub.to(config.device)
pred = model(g, h)
loss = model.criterion(lb, ub, pred)
losses.append(loss.item())
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
opt.step()
return losses
def train_loop(model, opt, loader, n_epochs=None, max_grad_norm=config.max_grad_norm):
losses = []
ectr = 0
etime = 0
while True:
tic = time.time()
new_losses = train_epoch(model, opt, loader)
toc = time.time()
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.plot(new_losses)
plt.xlabel('batch')
plt.ylabel('loss')
plt.title('Last Epoch')
losses += new_losses
plt.subplot(122)
plt.plot(losses)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('All Epochs')
ipy.display.clear_output(wait=True)
plt.show()
ectr += 1
if ectr == n_epochs:
break
etime += toc-tic
print(f'epoch: {ectr}\ttotal time: {etime:.3f} s\ttime per epoch: {etime/ectr:.3f} s')
def train_epoch_val(model, opt, loader, val_loader, lrs=None, dump_path=None, max_grad_norm=config.max_grad_norm):
model.train()
losses = []
val_losses = []
# for batch, val_batch in zip(tqdm(loader, desc='batches', leave=False), it.cycle(val_loader)):#
for batch, val_batch in zip(loader, it.cycle(val_loader)):
val_g, val_h, val_lb, val_ub = (x.to(config.device) for x in val_batch)
with torch.no_grad():
val_pred = model(val_g, val_h)
val_loss = model.criterion(val_lb, val_ub, val_pred)
val_losses.append(val_loss.item())
g, h, lb, ub = (x.to(config.device) for x in batch)
pred = model(g, h)
loss = model.criterion(lb, ub, pred)
losses.append(loss.item())
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
opt.step()
if lrs is not None:
lrs.step()
return losses, val_losses
def train_loop_val(model, opt, loader, val_loader, n_epochs=None, max_grad_norm=config.max_grad_norm):
losses = []
val_losses = []
ectr = 0
etime = 0
while True:
tic = time.time()
new_losses, new_val_losses = train_epoch_val(model, opt, loader, val_loader)
toc = time.time()
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.plot(new_losses, label='train')
plt.plot(new_val_losses, label='val')
plt.legend()
plt.xlabel('batch')
plt.ylabel('loss')
plt.title('Last Epoch')
losses += new_losses
val_losses += new_val_losses
plt.subplot(122)
plt.plot(losses, label='train')
plt.plot(val_losses, label='val')
plt.legend()
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('All Epochs')
ipy.display.clear_output(wait=True)
plt.show()
ectr += 1
if ectr == n_epochs:
break
etime += toc-tic
print(f'epoch: {ectr}\ttotal time: {etime:.3f} s\ttime per epoch: {etime/ectr:.3f} s')
def train_full(model, loader, val_loader, lr, weight_decay, cycle_patience, step_size_up, step_size_down, dump_path=None, lrs=None, max_grad_norm=config.max_grad_norm):
tqdm.write(f'dump path: {dump_path}')
opt = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
lrs = lrs if lrs is not None else torch.optim.lr_scheduler.CyclicLR(opt, base_lr=0, max_lr=lr, step_size_up=step_size_up, step_size_down=step_size_down, cycle_momentum=False)
all_losses = []
all_val_losses = []
ectr = 0
best_val_loss = float('inf')
bctr = 0
tic = time.time()
while True:
ectr += 1
losses = []
val_losses = []
for batch, val_batch in zip(loader, it.cycle(val_loader)):
val_g, val_h, val_lb, val_ub = (x.to(config.device) for x in val_batch)
with torch.no_grad():
val_pred = model(val_g, val_h)
val_loss = model.criterion(val_lb, val_ub, val_pred)
val_losses.append(val_loss.item())
if val_loss >= best_val_loss:
bctr += 1
if bctr > cycle_patience * (step_size_up+step_size_down):
model.load_state_dict(best_model)
return all_losses, all_val_losses
else:
bctr = 0
best_model = copy.deepcopy(model.state_dict())
if dump_path is not None:
torch.save(best_model, os.path.join(dump_path, 'best_model.pt'))
best_val_loss = val_loss
g, h, lb, ub = (x.to(config.device) for x in batch)
pred = model(g, h)
loss = model.criterion(lb, ub, pred)
losses.append(loss.item())
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
opt.step()
lrs.step()
all_losses += losses
all_val_losses += val_losses
plt.figure()
plt.plot(all_losses, label='train')
plt.plot(all_val_losses, label='val')
plt.legend()
plt.xlabel('iteration')
plt.ylabel('loss')
if val_losses[0] <= 1:
plt.ylim((0,1))
elif val_losses[0] <= 5:
plt.ylim((0,5))
elif val_losses[0] <= 10:
plt.ylim((0,10))
elif val_losses[0] <= 100:
plt.ylim((0,100))
plt.title(f'epoch:{ectr} | time:{int(time.time()-tic)}s | val:{val_loss:.3} | best:{best_val_loss:.3} | patience:{bctr//(step_size_up+step_size_down)}/{cycle_patience}')
if utils.is_notebook() and not dump_path:
ipy.display.clear_output(wait=True)
plt.show()
else:
assert dump_path is not None
plt.savefig(os.path.join(dump_path, 'loss_curve.png'))
plt.close()
if dump_path is not None:
torch.save(all_losses, os.path.join(dump_path, 'losses.pt'))
torch.save(all_val_losses, os.path.join(dump_path, 'val_losses.pt'))
torch.save(lrs, os.path.join(dump_path, 'lrs.pt'))
torch.save(model.state_dict(), os.path.join(dump_path, 'model.pt'))
with open(os.path.join(dump_path, 'losses.txt'),'a') as file:
file.write(f'train:{np.average(losses)}\t val:{np.average(val_losses)}\n') | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"itertools.cycle",
"torch.no_grad",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show",
"numpy.average",
"torch.optim.lr_scheduler.CyclicLR",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"tqdm.auto.tqdm",
"matpl... | [((389, 430), 'tqdm.auto.tqdm', 'tqdm', (['loader'], {'desc': '"""batches"""', 'leave': '(False)'}), "(loader, desc='batches', leave=False)\n", (393, 430), False, 'from tqdm.auto import tqdm\n'), ((4004, 4041), 'tqdm.auto.tqdm.write', 'tqdm.write', (['f"""dump path: {dump_path}"""'], {}), "(f'dump path: {dump_path}')\n", (4014, 4041), False, 'from tqdm.auto import tqdm\n'), ((4422, 4433), 'time.time', 'time.time', ([], {}), '()\n', (4431, 4433), False, 'import time\n'), ((991, 1002), 'time.time', 'time.time', ([], {}), '()\n', (1000, 1002), False, 'import time\n'), ((1070, 1081), 'time.time', 'time.time', ([], {}), '()\n', (1079, 1081), False, 'import time\n'), ((1091, 1118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (1101, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1142), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1137, 1142), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1171), 'matplotlib.pyplot.plot', 'plt.plot', (['new_losses'], {}), '(new_losses)\n', (1159, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1180, 1199), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""batch"""'], {}), "('batch')\n", (1190, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1208, 1226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1218, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1258), 'matplotlib.pyplot.title', 'plt.title', (['"""Last Epoch"""'], {}), "('Last Epoch')\n", (1244, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1312), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1307, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1337), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (1329, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1369), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (1356, 1369), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1396), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1388, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1428), 'matplotlib.pyplot.title', 'plt.title', (['"""All Epochs"""'], {}), "('All Epochs')\n", (1414, 1428), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1472), 'IPython.display.clear_output', 'ipy.display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (1461, 1472), True, 'import IPython as ipy\n'), ((1481, 1491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1489, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2010), 'itertools.cycle', 'it.cycle', (['val_loader'], {}), '(val_loader)\n', (1998, 2010), True, 'import itertools as it\n'), ((2892, 2903), 'time.time', 'time.time', ([], {}), '()\n', (2901, 2903), False, 'import time\n'), ((3003, 3014), 'time.time', 'time.time', ([], {}), '()\n', (3012, 3014), False, 'import time\n'), ((3024, 3051), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (3034, 3051), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3075), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3070, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3084, 3119), 'matplotlib.pyplot.plot', 'plt.plot', (['new_losses'], {'label': '"""train"""'}), "(new_losses, label='train')\n", (3092, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3128, 3165), 'matplotlib.pyplot.plot', 'plt.plot', (['new_val_losses'], {'label': '"""val"""'}), "(new_val_losses, label='val')\n", (3136, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3186), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3184, 3186), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""batch"""'], {}), "('batch')\n", (3205, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3241), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3233, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3273), 'matplotlib.pyplot.title', 'plt.title', (['"""Last Epoch"""'], {}), "('Last Epoch')\n", (3259, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3373), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3368, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3382, 3413), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'label': '"""train"""'}), "(losses, label='train')\n", (3390, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3455), 'matplotlib.pyplot.plot', 'plt.plot', (['val_losses'], {'label': '"""val"""'}), "(val_losses, label='val')\n", (3430, 3455), True, 'import matplotlib.pyplot as plt\n'), ((3464, 3476), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3474, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3485, 3508), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (3495, 3508), True, 'import matplotlib.pyplot as plt\n'), ((3517, 3535), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3527, 3535), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3567), 'matplotlib.pyplot.title', 'plt.title', (['"""All Epochs"""'], {}), "('All Epochs')\n", (3553, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3611), 'IPython.display.clear_output', 'ipy.display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (3600, 3611), True, 'import IPython as ipy\n'), ((3620, 3630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3628, 3630), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4312), 'torch.optim.lr_scheduler.CyclicLR', 'torch.optim.lr_scheduler.CyclicLR', (['opt'], {'base_lr': '(0)', 'max_lr': 'lr', 'step_size_up': 'step_size_up', 'step_size_down': 'step_size_down', 'cycle_momentum': '(False)'}), '(opt, base_lr=0, max_lr=lr, step_size_up=\n step_size_up, step_size_down=step_size_down, cycle_momentum=False)\n', (4200, 4312), False, 'import torch\n'), ((5855, 5867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5865, 5867), True, 'import matplotlib.pyplot as plt\n'), ((5876, 5911), 'matplotlib.pyplot.plot', 'plt.plot', (['all_losses'], {'label': '"""train"""'}), "(all_losses, label='train')\n", (5884, 5911), True, 'import matplotlib.pyplot as plt\n'), ((5920, 5957), 'matplotlib.pyplot.plot', 'plt.plot', (['all_val_losses'], {'label': '"""val"""'}), "(all_val_losses, label='val')\n", (5928, 5957), True, 'import matplotlib.pyplot as plt\n'), ((5966, 5978), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5976, 5978), True, 'import matplotlib.pyplot as plt\n'), ((5987, 6010), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (5997, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6037), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (6029, 6037), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2122), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2120, 2122), False, 'import torch\n'), ((4565, 4585), 'itertools.cycle', 'it.cycle', (['val_loader'], {}), '(val_loader)\n', (4573, 4585), True, 'import itertools as it\n'), ((6090, 6106), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (6098, 6106), True, 'import matplotlib.pyplot as plt\n'), ((6548, 6583), 'IPython.display.clear_output', 'ipy.display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (6572, 6583), True, 'import IPython as ipy\n'), ((6596, 6606), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6604, 6606), True, 'import matplotlib.pyplot as plt\n'), ((6741, 6752), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6750, 6752), True, 'import matplotlib.pyplot as plt\n'), ((4702, 4717), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4715, 4717), False, 'import torch\n'), ((6151, 6167), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 5)'], {}), '((0, 5))\n', (6159, 6167), True, 'import matplotlib.pyplot as plt\n'), ((6686, 6727), 'os.path.join', 'os.path.join', (['dump_path', '"""loss_curve.png"""'], {}), "(dump_path, 'loss_curve.png')\n", (6698, 6727), False, 'import os\n'), ((6831, 6867), 'os.path.join', 'os.path.join', (['dump_path', '"""losses.pt"""'], {}), "(dump_path, 'losses.pt')\n", (6843, 6867), False, 'import os\n'), ((6908, 6948), 'os.path.join', 'os.path.join', (['dump_path', '"""val_losses.pt"""'], {}), "(dump_path, 'val_losses.pt')\n", (6920, 6948), False, 'import os\n'), ((6978, 7011), 'os.path.join', 'os.path.join', (['dump_path', '"""lrs.pt"""'], {}), "(dump_path, 'lrs.pt')\n", (6990, 7011), False, 'import os\n'), ((7056, 7091), 'os.path.join', 'os.path.join', (['dump_path', '"""model.pt"""'], {}), "(dump_path, 'model.pt')\n", (7068, 7091), False, 'import os\n'), ((6213, 6230), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 10)'], {}), '((0, 10))\n', (6221, 6230), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7152), 'os.path.join', 'os.path.join', (['dump_path', '"""losses.txt"""'], {}), "(dump_path, 'losses.txt')\n", (7127, 7152), False, 'import os\n'), ((5323, 5363), 'os.path.join', 'os.path.join', (['dump_path', '"""best_model.pt"""'], {}), "(dump_path, 'best_model.pt')\n", (5335, 5363), False, 'import os\n'), ((6277, 6295), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 100)'], {}), '((0, 100))\n', (6285, 6295), True, 'import matplotlib.pyplot as plt\n'), ((6353, 6364), 'time.time', 'time.time', ([], {}), '()\n', (6362, 6364), False, 'import time\n'), ((7203, 7221), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (7213, 7221), True, 'import numpy as np\n'), ((7230, 7252), 'numpy.average', 'np.average', (['val_losses'], {}), '(val_losses)\n', (7240, 7252), True, 'import numpy as np\n')] |
'''
datecreated: 190930
objective: want to use opencv to make some kind of animated plotting tool.
KJG190930: using cv2 is MUCH MUCH faster, will use this instead of matplotlib
KJG190930: at this point, will use tkinter to try and control the rectangle
KJG191001: tkinter now functional, with multi-key input. now capable of having
high-quality graphing window along with manual input control
applications:
* animated plot
* live updating
* user / computer controlled animation
* computer controlled demo
THINGS TO IMPLEMENT
status | description
done | plot fast-updating (60Hz+) plot area
done | have a rotating rectangle
done | use polygons instead of "rect", in custom function
done | be able to control item with keyboard (one key)
'''
import threading # handling two different sequences at once (getting frames, displaying them)
import tkinter as tk # keyboard control
import cv2
import time
import numpy as np
RED = (0,0,255) # for use with opencv (BGR)
BLU = (255,0,0)
GRN = (0,255,0)
WHT = (255,255,255)
BLK = (0,0,0)
CVFONT = cv2.FONT_HERSHEY_SIMPLEX
IMW=400
IMH=300
def qs(img,title='CLOSE WITH KEYBOARD'):
cv2.imshow(title,img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def rect2(img, center,dims,angle, color,*kargs):
''' general steps:
1. take in parameters
2. rotate rectangle centered at origin
3. translate to given spot.
'''
xc,yc=center
w,h=dims
theta = np.radians(angle)
c,s=np.cos(theta),np.sin(theta)
R=np.array([
[c,-s,0],
[s,c,0],
[0,0,1]]) # 3x3
pts=np.array([
[-w,h,1],
[w,h,1],
[w,-h,1],
[-w,-h,1],
[-w,h,1] ])/2
# rotate points
pts2=pts@R
pts2[:,0]+=xc
pts2[:,1]+=yc
pts2=pts2[:,:2].reshape((-1,1,2)).astype(int)
cv2.polylines(img,[pts2],True,RED)
class Timer:
def __init__(self):
self.t0=time.time() # start time
self._lap = time.time()
def now(self):
''' return time since start of program '''
return time.time()-self.t0
def lap(self):
''' get lap time and reset timer '''
elapsed = time.time() - self._lap
self._lap = time.time()
return elapsed
class Global:
def __init__(self):
self.var=0
gvar = Global()
class KBControl_r0:
def __init__(self):
''' user note: tkinter should only be used in main thread and has issues
working with threading module. do not put this class in separate
thread
src: https://stackoverflow.com/questions/45799121/runtimeerror-calling-tcl-from-different-appartment-tkinter-and-threading
'''
self.R = tk.Tk()
self.F = tk.Frame(self.R, width=100, height=100)
self.F.bind('a',self.leftKey)
self.F.bind('d',self.rightKey)
self.F.bind('q',self.quit)
self.F.focus_set()
self.F.pack()
self.var_dir=tk.IntVar()
def getstatus(self):
print('value:',self.var_dir.get()) # may simplify later
def leftKey(self,event):
self.var_dir.set(0)
gvar.var = 0
def rightKey(self,event):
self.var_dir.set(1)
gvar.var = 1
def quit(self,event):
self.R.quit()
def run(self):
self.R.mainloop()
class KBControl_r1:
def __init__(self):
self.R = tk.Tk()
self.V = tk.StringVar()
self.V.set('0') # initial value
self.a_label = tk.Label(self.R,textvariable = self.V ).pack() # create label object
self.history = [] # create empty list
self.v_dir = ''
self.F = tk.Frame(self.R, width=200, height=200) #create self.F in main window
self.F.bind("<KeyPress>", self.keydown) # bind "keydown" fn to keyPRESS
self.F.bind("<KeyRelease>", self.keyup) # bind "keyup" fn to keyRELEASE
self.F.bind('q',self.quit)
self.F.pack() # activate self.F
self.F.focus_set() # set self.F in focus
def keyup(self,e):
# print e.char # when a key is un-pressed, print to screen
if e.char in self.history :
self.history.pop(self.history.index(e.char)) #remove it from the list
# NOTE: LIST IS NOW UPDATED
self.v_dir = self.direction(self.history)
gvar.var = self.v_dir
self.V.set(self.v_dir) # convert current state of history into string
# here, would send the updated command to the serial port.
def keydown(self,e):
if not e.char in self.history : # if key isn't alrdy in list...
self.history.append(e.char) # add key to END(!) of list
# NOTE: LIST IS NOW UPDATED
self.v_dir = self.direction(self.history)
gvar.var = self.v_dir
self.V.set(self.v_dir) # convert current state of list into string
# here, would send updated command to the serial port
def direction(self,e):
''' Take in list of currently pressed keys, return direction. General
steps:
1. receive list
2. check if list has more than two elements
3. check which two elements active
4. return direction
NOTE: keypad:
1 2 3
4 5 6
7 8 9
0 '''
if(len(e)==1):
# only one button pressed
if('w' in e):
return '2' # NORTH
elif('a' in e):
return '4' # WEST
elif('s' in e):
return '8' # SOUTH
elif('d' in e):
return '6' # EAST
else:
return '0'
elif(len(e)==2):
if('w' in e and 'a' in e):
return '1' # NWEST
elif('w' in e and 'd' in e):
return '3' # NEAST
elif('s' in e and 'a' in e):
return '7' # SWEST
elif('s' in e and 'd' in e):
return '9' # SEAST
else:
return '0'
else:
return '0'
def quit(self,e):
self.R.quit()
def run(self):
self.R.mainloop() # activate whole program
class DisplayWindow:
''' should be capable of putting everything into a thread '''
def __init__(self):
self.xc=IMW/2
self.yc=IMH/2
self.w=50
self.h=100
def run(self):
while(True):
lap = timer.lap()
bkgd = np.ones((IMH,IMW,3))*255 # follows image format
cv2.putText(bkgd,str(round(lap,3)),(50,30),CVFONT,1,BLU)
cv2.putText(bkgd,str(round(timer.now(),3)),(50,60),CVFONT,1,BLU)
cv2.putText(bkgd,str(gvar.var),(50,90),CVFONT,1,BLU)
cv2.circle(bkgd,(int(IMW/2),int(IMH/2)),10,GRN)
rect2(bkgd,(self.xc,self.yc),(self.w,self.h),timer.now()*180,RED)
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2)) # critical for drawing a polygon
cv2.imshow("press 'q' to exit",bkgd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
timer = Timer()
dw = DisplayWindow()
kbc = KBControl_r1()
#
thread_dw=threading.Thread(target=dw.run,daemon=True) # kill this window if tkinter closes
thread_dw.start()
#
#
# # print('ready to exit')
kbc.run() # tkinter thing, should be final thing to run
| [
"tkinter.StringVar",
"threading.Thread",
"numpy.radians",
"cv2.polylines",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.ones",
"time.time",
"numpy.sin",
"numpy.array",
"numpy.cos",
"tkinter.Frame",
"tkinter.IntVar",
"cv2.imshow",
"tkinter.Label",
"tkinter.Tk"
] | [((7387, 7431), 'threading.Thread', 'threading.Thread', ([], {'target': 'dw.run', 'daemon': '(True)'}), '(target=dw.run, daemon=True)\n', (7403, 7431), False, 'import threading\n'), ((1155, 1177), 'cv2.imshow', 'cv2.imshow', (['title', 'img'], {}), '(title, img)\n', (1165, 1177), False, 'import cv2\n'), ((1181, 1195), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1192, 1195), False, 'import cv2\n'), ((1200, 1223), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1221, 1223), False, 'import cv2\n'), ((1450, 1467), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (1460, 1467), True, 'import numpy as np\n'), ((1510, 1554), 'numpy.array', 'np.array', (['[[c, -s, 0], [s, c, 0], [0, 0, 1]]'], {}), '([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n', (1518, 1554), True, 'import numpy as np\n'), ((1821, 1858), 'cv2.polylines', 'cv2.polylines', (['img', '[pts2]', '(True)', 'RED'], {}), '(img, [pts2], True, RED)\n', (1834, 1858), False, 'import cv2\n'), ((1476, 1489), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1482, 1489), True, 'import numpy as np\n'), ((1490, 1503), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1496, 1503), True, 'import numpy as np\n'), ((1588, 1658), 'numpy.array', 'np.array', (['[[-w, h, 1], [w, h, 1], [w, -h, 1], [-w, -h, 1], [-w, h, 1]]'], {}), '([[-w, h, 1], [w, h, 1], [w, -h, 1], [-w, -h, 1], [-w, h, 1]])\n', (1596, 1658), True, 'import numpy as np\n'), ((1910, 1921), 'time.time', 'time.time', ([], {}), '()\n', (1919, 1921), False, 'import time\n'), ((1955, 1966), 'time.time', 'time.time', ([], {}), '()\n', (1964, 1966), False, 'import time\n'), ((2198, 2209), 'time.time', 'time.time', ([], {}), '()\n', (2207, 2209), False, 'import time\n'), ((2688, 2695), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2693, 2695), True, 'import tkinter as tk\n'), ((2713, 2752), 'tkinter.Frame', 'tk.Frame', (['self.R'], {'width': '(100)', 'height': '(100)'}), '(self.R, width=100, height=100)\n', (2721, 2752), True, 'import tkinter as tk\n'), ((2935, 2946), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (2944, 2946), True, 'import tkinter as tk\n'), ((3353, 3360), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3358, 3360), True, 'import tkinter as tk\n'), ((3378, 3392), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (3390, 3392), True, 'import tkinter as tk\n'), ((3630, 3669), 'tkinter.Frame', 'tk.Frame', (['self.R'], {'width': '(200)', 'height': '(200)'}), '(self.R, width=200, height=200)\n', (3638, 3669), True, 'import tkinter as tk\n'), ((2052, 2063), 'time.time', 'time.time', ([], {}), '()\n', (2061, 2063), False, 'import time\n'), ((2154, 2165), 'time.time', 'time.time', ([], {}), '()\n', (2163, 2165), False, 'import time\n'), ((7067, 7126), 'numpy.array', 'np.array', (['[[10, 5], [20, 30], [70, 20], [50, 10]]', 'np.int32'], {}), '([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)\n', (7075, 7126), True, 'import numpy as np\n'), ((7206, 7243), 'cv2.imshow', 'cv2.imshow', (['"""press \'q\' to exit"""', 'bkgd'], {}), '("press \'q\' to exit", bkgd)\n', (7216, 7243), False, 'import cv2\n'), ((3463, 3500), 'tkinter.Label', 'tk.Label', (['self.R'], {'textvariable': 'self.V'}), '(self.R, textvariable=self.V)\n', (3471, 3500), True, 'import tkinter as tk\n'), ((6652, 6674), 'numpy.ones', 'np.ones', (['(IMH, IMW, 3)'], {}), '((IMH, IMW, 3))\n', (6659, 6674), True, 'import numpy as np\n'), ((7258, 7272), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7269, 7272), False, 'import cv2\n')] |
import numpy as np
import matplotlib.pyplot as plt
bin_labels = np.loadtxt(fname="histogramdata", dtype=float, usecols=(0))
hist_data = np.loadtxt(fname="histogramdata", dtype=float, usecols=(1))
bin_labels = [round(x,2) for x in bin_labels]
plt.plot(bin_labels, hist_data)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.plot"
] | [((77, 134), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': '"""histogramdata"""', 'dtype': 'float', 'usecols': '(0)'}), "(fname='histogramdata', dtype=float, usecols=0)\n", (87, 134), True, 'import numpy as np\n'), ((150, 207), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': '"""histogramdata"""', 'dtype': 'float', 'usecols': '(1)'}), "(fname='histogramdata', dtype=float, usecols=1)\n", (160, 207), True, 'import numpy as np\n'), ((257, 288), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_labels', 'hist_data'], {}), '(bin_labels, hist_data)\n', (265, 288), True, 'import matplotlib.pyplot as plt\n'), ((289, 299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (297, 299), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pylab as plt
def bin_data(data, minimum=None, maximum=None, bin_size=None, bin_number=100, normalised=True):
"""Returns the (normalised) number count of a data set with values within defined bins.
Parameters
----------
data : array_like
The data to be binned.
minimum : float
A given minimum for the bin edges.
maximum : float
A given maximum for the bin edges
bin_size : float
The size of the bins.
bin_number : int
The number of bins to be used.
normalised : bool
Tf true will return a normalised histogram, if false returns a number counts.
Returns
-------
bin_centres : array_like
The central value of each bin.
binned_data : array_like
The binned number count (or normalised histogram) of the data set.
"""
if minimum is None:
minimum = np.min(data)
if maximum is None:
maximum = np.max(data)
if bin_size is None:
_bin_edge = np.linspace(minimum, maximum, bin_number+1)
if bin_size is not None:
_bin_edge = np.arange(minimum, maximum+bin_size, bin_size)
condition = np.where(_bin_edge <= maximum)[0]
_bin_edge = _bin_edge[condition]
binned_data, _bin_edges = np.histogram(data, bins=_bin_edge, density=normalised)
bin_centres = 0.5 * (_bin_edge[1:] + _bin_edge[:-1])
return bin_centres, binned_data
class HistMST:
def __init__(self):
"""Initialises MST histogram class.
"""
# bin edges information
self.uselog = None
self.use_s_sqrt = None
self.usenorm = None
self.d_min = None
self.d_max = None
self.num_d_bins = None
self.l_min = None
self.l_max = None
self.num_l_bins = None
self.b_min = None
self.b_max = None
self.num_b_bins = None
self.s_min = None
self.s_max = None
self.num_s_bins = None
self.logl_min = None
self.logl_max = None
self.logb_min = None
self.logb_max = None
self.group_mode = False
def setup(self, uselog=False, use_sqrt_s=True, usenorm=True, d_min=0.5, d_max=6.5, num_d_bins=6,
l_min=0., l_max=None, num_l_bins=100, b_min=0., b_max=None, num_b_bins=100,
s_min=0., s_max=1., num_s_bins=50, logl_min=None, logl_max=None, logb_min=None, logb_max=None):
"""Setups bin sizes for the MST statistics.
Parameters
----------
uselog : bool
Determines whether to use log bins for l and b.
use_sqrt_s : bool
Determines whether to use the sqrt(1-s) projection of s or just s itself.
usenorm : bool
Determines whether to normalise the histograms.
d_min : float
Minimum for degree bins (use half integer values).
d_max : float
Maximum for degree bins (use half integer values).
num_d_bins : int
Number of bins for the distribution of degree, this should be equal to d_max - d_min.
l_min : float
Minimum for edge length bins.
l_max : float
Maximum for edge length bins.
num_l_bins : int
Number of bins for the distribution of edge lengths.
b_min : float
Minimum for branch length bins.
b_max : float
Maximum for branch length bins.
num_b_bins : int
Number of bins for the distribution of branch lengths.
s_min : float
Minimum for branch shape bins.
s_max : float
Maximum for branch shape bins.
num_s_bins : int
Number of bins for the distribution of branch shapes.
logl_min : float
Minimum of edge lengths in log to base 10.
logl_max : float
Maximum of edge lengths in log to base 10.
logb_min : float
Minimum of branch lengths in log to base 10.
logb_max : float
Maximum of branch lengths in log to base 10.
"""
self.uselog = uselog
self.use_sqrt_s= use_sqrt_s
self.usenorm = usenorm
self.d_min = d_min
self.d_max = d_max
self.num_d_bins = num_d_bins
self.l_min = l_min
self.l_max = l_max
self.num_l_bins = num_l_bins
self.b_min = b_min
self.b_max = b_max
self.num_b_bins = num_b_bins
self.s_min = s_min
self.s_max = s_max
self.num_s_bins = num_s_bins
self.logl_min = logl_min
self.logl_max = logl_max
self.logb_min = logb_min
self.logb_max = logb_max
def get_hist(self, d, l, b, s):
"""Bins the MST distribution which is returned as a dictionary.
Parameters
----------
d : array_like
Distribution of degree.
l : array_like
Distribution of edge length.
b : array_like
Distribution of branch length.
s : array_like
Distribution of branch shape.
Returns
-------
mst_hist : dict
Dictionary of MST binned histograms.
"""
# find minimum and maximum
if self.l_max is None:
self.l_max = 1.05*l.max()
if self.b_max is None:
self.b_max = 1.05*b.max()
if self.logl_min is None:
self.logl_min = np.log10(0.95*l.min())
if self.logl_max is None:
self.logl_max = np.log10(1.05*l.max())
if self.logb_min is None:
self.logb_min = np.log10(0.95*b.min())
if self.logb_max is None:
self.logb_max = np.log10(1.05*b.max())
# bin mst statistics
x_d, y_d = bin_data(d, minimum=self.d_min, maximum=self.d_max, bin_number=self.num_d_bins, normalised=self.usenorm)
if self.uselog == False:
x_l, y_l = bin_data(l, minimum=self.l_min, maximum=self.l_max, bin_number=self.num_l_bins, normalised=self.usenorm)
x_b, y_b = bin_data(b, minimum=self.b_min, maximum=self.b_max, bin_number=self.num_b_bins, normalised=self.usenorm)
else:
x_logl, y_l = bin_data(np.log10(l), minimum=self.logl_min, maximum=self.logl_max, bin_number=self.num_l_bins, normalised=self.usenorm)
x_logb, y_b = bin_data(np.log10(b), minimum=self.logb_min, maximum=self.logb_max, bin_number=self.num_b_bins, normalised=self.usenorm)
x_l = 10.**x_logl
x_b = 10.**x_logb
if self.use_sqrt_s == False:
x_s, y_s = bin_data(s, minimum=self.s_min, maximum=self.s_max, bin_number=self.num_s_bins, normalised=self.usenorm)
else:
x_s, y_s = bin_data(np.sqrt(1.-s), minimum=self.s_min, maximum=self.s_max, bin_number=self.num_s_bins, normalised=self.usenorm)
mst_hist = {
"uselog" : self.uselog, "use_sqrt_s" : self.use_sqrt_s, "usenorm" : self.usenorm, "isgroup" : False,
"x_d" : x_d, "y_d" : y_d, "x_l" : x_l, "y_l" : y_l, "x_b" : x_b, "y_b" : y_b, "x_s" : x_s, "y_s" : y_s
}
if self.group_mode == True:
self.x_d = mst_hist['x_d']
self.x_l = mst_hist['x_l']
self.x_b = mst_hist['x_b']
self.x_s = mst_hist['x_s']
self.group_y_d.append(mst_hist['y_d'])
self.group_y_l.append(mst_hist['y_l'])
self.group_y_b.append(mst_hist['y_b'])
self.group_y_s.append(mst_hist['y_s'])
return mst_hist
def start_group(self):
"""Begins group mode for calculating the mean and standard deviation of the MST
from different realisations of data points coming from the same model.
"""
self.group_mode = True
self.group_y_d = []
self.group_y_l = []
self.group_y_b = []
self.group_y_s = []
def end_group(self):
"""Ends group mode for calculating the mean and standard deviation of the MST.
Returns
-------
mst_hist : dict
Dictionary of the mean and standard deviation of the MST binned histograms.
"""
self.group_mode = False
self.group_y_d = np.array(self.group_y_d)
self.group_y_l = np.array(self.group_y_l)
self.group_y_b = np.array(self.group_y_b)
self.group_y_s = np.array(self.group_y_s)
y_d_mean = np.mean(self.group_y_d, axis=0)
y_l_mean = np.mean(self.group_y_l, axis=0)
y_b_mean = np.mean(self.group_y_b, axis=0)
y_s_mean = np.mean(self.group_y_s, axis=0)
y_d_std = np.std(self.group_y_d, axis=0)
y_l_std = np.std(self.group_y_l, axis=0)
y_b_std = np.std(self.group_y_b, axis=0)
y_s_std = np.std(self.group_y_s, axis=0)
mst_hist = {
"uselog" : self.uselog, "use_sqrt_s" : self.use_sqrt_s,
"usenorm" : self.usenorm, "isgroup" : True,
"x_d" : self.x_d, "y_d" : y_d_mean, "y_d_std" : y_d_std,
"x_l" : self.x_l, "y_l" : y_l_mean, "y_l_std" : y_l_std,
"x_b" : self.x_b, "y_b" : y_b_mean, "y_b_std" : y_b_std,
"x_s" : self.x_s, "y_s" : y_s_mean, "y_s_std" : y_s_std,
}
return mst_hist
def clean(self):
"""Resets HistMST variables.
"""
self.__init__()
| [
"numpy.std",
"numpy.histogram",
"numpy.max",
"numpy.arange",
"numpy.min",
"numpy.linspace",
"numpy.array",
"numpy.mean",
"numpy.where",
"numpy.log10",
"numpy.sqrt"
] | [((1293, 1347), 'numpy.histogram', 'np.histogram', (['data'], {'bins': '_bin_edge', 'density': 'normalised'}), '(data, bins=_bin_edge, density=normalised)\n', (1305, 1347), True, 'import numpy as np\n'), ((915, 927), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (921, 927), True, 'import numpy as np\n'), ((970, 982), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (976, 982), True, 'import numpy as np\n'), ((1028, 1073), 'numpy.linspace', 'np.linspace', (['minimum', 'maximum', '(bin_number + 1)'], {}), '(minimum, maximum, bin_number + 1)\n', (1039, 1073), True, 'import numpy as np\n'), ((1121, 1169), 'numpy.arange', 'np.arange', (['minimum', '(maximum + bin_size)', 'bin_size'], {}), '(minimum, maximum + bin_size, bin_size)\n', (1130, 1169), True, 'import numpy as np\n'), ((8209, 8233), 'numpy.array', 'np.array', (['self.group_y_d'], {}), '(self.group_y_d)\n', (8217, 8233), True, 'import numpy as np\n'), ((8259, 8283), 'numpy.array', 'np.array', (['self.group_y_l'], {}), '(self.group_y_l)\n', (8267, 8283), True, 'import numpy as np\n'), ((8309, 8333), 'numpy.array', 'np.array', (['self.group_y_b'], {}), '(self.group_y_b)\n', (8317, 8333), True, 'import numpy as np\n'), ((8359, 8383), 'numpy.array', 'np.array', (['self.group_y_s'], {}), '(self.group_y_s)\n', (8367, 8383), True, 'import numpy as np\n'), ((8403, 8434), 'numpy.mean', 'np.mean', (['self.group_y_d'], {'axis': '(0)'}), '(self.group_y_d, axis=0)\n', (8410, 8434), True, 'import numpy as np\n'), ((8454, 8485), 'numpy.mean', 'np.mean', (['self.group_y_l'], {'axis': '(0)'}), '(self.group_y_l, axis=0)\n', (8461, 8485), True, 'import numpy as np\n'), ((8505, 8536), 'numpy.mean', 'np.mean', (['self.group_y_b'], {'axis': '(0)'}), '(self.group_y_b, axis=0)\n', (8512, 8536), True, 'import numpy as np\n'), ((8556, 8587), 'numpy.mean', 'np.mean', (['self.group_y_s'], {'axis': '(0)'}), '(self.group_y_s, axis=0)\n', (8563, 8587), True, 'import numpy as np\n'), ((8606, 8636), 'numpy.std', 'np.std', (['self.group_y_d'], {'axis': '(0)'}), '(self.group_y_d, axis=0)\n', (8612, 8636), True, 'import numpy as np\n'), ((8655, 8685), 'numpy.std', 'np.std', (['self.group_y_l'], {'axis': '(0)'}), '(self.group_y_l, axis=0)\n', (8661, 8685), True, 'import numpy as np\n'), ((8704, 8734), 'numpy.std', 'np.std', (['self.group_y_b'], {'axis': '(0)'}), '(self.group_y_b, axis=0)\n', (8710, 8734), True, 'import numpy as np\n'), ((8753, 8783), 'numpy.std', 'np.std', (['self.group_y_s'], {'axis': '(0)'}), '(self.group_y_s, axis=0)\n', (8759, 8783), True, 'import numpy as np\n'), ((1188, 1218), 'numpy.where', 'np.where', (['(_bin_edge <= maximum)'], {}), '(_bin_edge <= maximum)\n', (1196, 1218), True, 'import numpy as np\n'), ((6215, 6226), 'numpy.log10', 'np.log10', (['l'], {}), '(l)\n', (6223, 6226), True, 'import numpy as np\n'), ((6362, 6373), 'numpy.log10', 'np.log10', (['b'], {}), '(b)\n', (6370, 6373), True, 'import numpy as np\n'), ((6745, 6761), 'numpy.sqrt', 'np.sqrt', (['(1.0 - s)'], {}), '(1.0 - s)\n', (6752, 6761), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
import os
def change_type(ann_org, ann_dir):
try:
os.makedirs(ann_dir, exist_ok=True)
print("create dir " + ann_dir)
except:
pass
for root, dirs, files in os.walk(ann_org):
continue
for file in files:
img_path_org = ann_org+'/'+file
img_path_dir = ann_dir+'/'+file
img_org = Image.open(img_path_org)
img_arr = np.array(img_org)
img_dir = Image.fromarray(img_arr)
img_dir.save(img_path_dir)
print('{} mode {} change to {} mode {}'.format(img_path_org,img_org.mode,img_path_dir,img_dir.mode))
if __name__ == '__main__':
ann_org = './base_annotations'
ann_dir = './base_annotations_L'
change_type(ann_org, ann_dir) | [
"os.makedirs",
"os.walk",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray"
] | [((235, 251), 'os.walk', 'os.walk', (['ann_org'], {}), '(ann_org)\n', (242, 251), False, 'import os\n'), ((105, 140), 'os.makedirs', 'os.makedirs', (['ann_dir'], {'exist_ok': '(True)'}), '(ann_dir, exist_ok=True)\n', (116, 140), False, 'import os\n'), ((391, 415), 'PIL.Image.open', 'Image.open', (['img_path_org'], {}), '(img_path_org)\n', (401, 415), False, 'from PIL import Image\n'), ((434, 451), 'numpy.array', 'np.array', (['img_org'], {}), '(img_org)\n', (442, 451), True, 'import numpy as np\n'), ((470, 494), 'PIL.Image.fromarray', 'Image.fromarray', (['img_arr'], {}), '(img_arr)\n', (485, 494), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from PIL import Image
import random
import matplotlib.pyplot as plt
"""
Created on Mon May 21 18:41:43 2018
@author: <NAME>
VSM code with TensorFlow-v1.4
"""
def findTopK(scores, trainids, k):
recommend_list = tf.nn.top_k(input=scores, k=tf.add(tf.size(trainids), k+1), sorted=True)
recommend_ids = recommend_list.indices
result, _ = tf.setdiff1d(recommend_ids, trainids)
return result[0:k]
def calPrecision(prediction, groundtruth):
'''
Caculate precision based on the prdiction
:param prediction: A tensor that store the top-K predicted items
:param groundtruth: A tensor indicates the positive items
:return: Precision that is same meaning with the prediction (top-K)
'''
nonhit = tf.setdiff1d(groundtruth, prediction).out
hit_num = tf.size(groundtruth) - tf.size(nonhit)
result = tf.divide(tf.cast(hit_num, dtype=tf.float32), tf.cast(tf.size(prediction), dtype=tf.float32))
return result
def calRecall(prediction, groundtruth):
nonhit = tf.setdiff1d(groundtruth, prediction).out
hit_num = tf.size(groundtruth) - tf.size(nonhit)
result = tf.divide(tf.cast(hit_num, dtype=tf.float32), tf.cast(tf.size(groundtruth), dtype=tf.float32))
return result
def calNDCG(prediction, groundtruth):
# Compute IDCG
range = tf.cast(tf.add(tf.range(start=0, limit=tf.size(groundtruth)), 2), dtype=tf.float32)
range_ = tf.cast(tf.tile(input=[2], multiples=[tf.size(groundtruth)]), dtype=tf.float32)
idcg = tf.reduce_sum(tf.divide(tf.log(range_), tf.log(range)))
# Compute DCG part
non_hits = tf.setdiff1d(prediction,groundtruth).out
hits = tf.setdiff1d(prediction, non_hits).idx
ranks = tf.cast(tf.add(hits, 2), dtype=tf.float32)
ranks_ = tf.cast(tf.tile(input=[2], multiples=[tf.size(ranks)]), dtype=tf.float32)
dcg = tf.reduce_sum(tf.divide(tf.log(ranks_), tf.log(ranks)))
return tf.divide(dcg, idcg)
def calAUC(scores_with_train, groundtruth, trainids):
image_id_list = tf.nn.top_k(input=tf.multiply(scores_with_train,-1), k=tf.size(scores_with_train)).indices
no_train_ids_sorted, _ = tf.setdiff1d(image_id_list, trainids)
residual_part = tf.setdiff1d(no_train_ids_sorted, groundtruth).out
rank_pos = tf.setdiff1d(no_train_ids_sorted, residual_part).idx
pos_sum = tf.cast(tf.reduce_sum(tf.add(rank_pos, 1)), dtype=tf.float32)
M = tf.cast(tf.size(groundtruth), dtype=tf.float32)
N = tf.cast(tf.size(residual_part), dtype=tf.float32)
return tf.divide(pos_sum-(M+1)*M/2.0, M*N)
def weightedMultiply(tensors, weights):
'''
Summarize these tensors by its weights
:param tensors: [tensors with type of tf.float32] A matrix that should be weighted sum according to the weights
:param weights: [tensors with type of tf.float32] Denotes the weight of row-vector in the tensors
:return: [tensors with type of tf.float32y] A vector that has the same column size with weights
Criterion:
1. Generate a dialog matrix according to vector weights. eg. [1,2,3] -> [ [1,0,0], [0,2,0], [0,0,3]]
2. Multiply the dialog matrix by the tensors
3. Reduce summary
'''
# Generate a dialog matrix according to the weights vector
dia_weights = tf.matrix_diag(weights)
dia_weights = tf.cast(dia_weights, dtype=tf.float32)
tensors = tf.cast(tensors, tf.float32)
# Multiply these tensor by the dialog weight matrix
weighted_tensors = tf.matmul(dia_weights, tensors)
return weighted_tensors
def computeOffsetAndTarget(shape, box):
image_height = shape[0]
image_width = shape[1]
offset_height = int(box[3] * image_height)
offset_width = int(box[1] * image_width)
target_height = int((box[4] - box[3]) * image_height)
target_width = int((box[2] - box[1]) * image_width)
return offset_height, offset_width, target_height, target_width#, box[0]
def per_image_standardization(img):
img = Image.fromarray(img)
prob = random.randint(0 ,1)
if prob == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'RGB':
channel = 3
num_compare = img.size[0] * img.size[1] * channel
img_arr = np.array(img)
img_t = (img_arr - np.mean(img_arr)) / max(np.std(img_arr), 1 / num_compare)
img = np.array(img_t)
return img
def padRemoverInt_np(vector):
result = []
vector = np.asarray(vector)
for i in range(vector.size):
if vector[i] != 0:
result.append(int(vector[i]))
return np.asarray(result)
def padRemoverInt(tensor):
'''
Remove padding from a tensor before sending to network
Ex: input = [[1, 2], [0, 0], [0, 0], [3, 4], [5, 6]]
output = [[1, 2], [3, 4], [5, 6]]
:param tensor: Tensor needed to remove zero sclice
:return: Removed padding tensor
'''
nonpad_ids = tf.to_int32(tf.where(tensor > 0))
tensor_shape = tensor.get_shape().as_list()
result = tf.gather_nd(tensor, indices=nonpad_ids)
result = tf.reshape(result, shape=[-1] + tensor_shape[1:])
return result
def countNonZero1d_np(arr):
result = 0
for i in range(np.array(arr).size):
if arr[i] != 0:
result = result + 1
return result
def findOptimal(result, all_loss, conf):
result = np.asarray(result, dtype=np.float32)
best_pre5 = 0
epoch_best_pre5 = 0
best_pre10 = 0
epoch_best_pre10 = 0
best_rec5 = 0
epoch_best_rec5 = 0
best_rec10 = 0
epoch_best_rec10 = 0
for i in range(result.shape[0]):
cur_result = result[i]
if cur_result[0] > best_pre5:
best_pre5 = cur_result[0]
epoch_best_pre5 = i + 1
if cur_result[1] > best_pre10:
best_pre10 = cur_result[1]
epoch_best_pre10 = i + 1
if cur_result[2] > best_rec5:
best_rec5 = cur_result[2]
epoch_best_rec5 = i + 1
if cur_result[3] > best_rec10:
best_rec10 = cur_result[3]
epoch_best_rec10 = i + 1
print('\n\n')
print('Optimal precision5 and epoch: ', result[epoch_best_pre5 - 1], ' ', epoch_best_pre5, '(',
int(conf.epochs / conf.eval_epoch), ')')
print('Optimal precision10 and epoch: ', result[epoch_best_pre10 - 1], ' ', epoch_best_pre10, '(',
int(conf.epochs / conf.eval_epoch), ')')
print('Optimal recall5 and epoch: ', result[epoch_best_rec5 - 1], ' ', epoch_best_rec5, '(',
int(conf.epochs / conf.eval_epoch), ')')
print('Optimal recall10 and epoch: ', result[epoch_best_rec10 - 1], ' ', epoch_best_rec10, '(',
int(conf.epochs / conf.eval_epoch), ')')
all_loss = np.asarray(all_loss, dtype=np.float32)
drawFigure(all_loss, 'Loss tendency.', 'VSM')
drawFigure(result[:, 0], 'Precision@5', 'VSM')
drawFigure(result[:, 1], 'Precision@10', 'VSM')
drawFigure(result[:, 2], 'Recall@5', 'VSM')
drawFigure(result[:, 3], 'Recall@10', 'VSM')
def drawFigure(losses, title, model):
y = np.asarray(losses)
x = np.arange(1, y.size+1)
plt.xlim(1, y.size)
plt.title(model)
plt.xlabel('Epoch')
plt.ylabel(title)
plt.grid(True)
plt.plot(x, y)
plt.show()
| [
"matplotlib.pyplot.title",
"tensorflow.gather_nd",
"tensorflow.reshape",
"tensorflow.setdiff1d",
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.divide",
"numpy.arange",
"numpy.mean",
"random.randint",
"tensorflow.size",
"numpy.std",
"tensorflow.matrix_diag",
"tensorflow.cast",
"... | [((435, 472), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['recommend_ids', 'trainids'], {}), '(recommend_ids, trainids)\n', (447, 472), True, 'import tensorflow as tf\n'), ((2005, 2025), 'tensorflow.divide', 'tf.divide', (['dcg', 'idcg'], {}), '(dcg, idcg)\n', (2014, 2025), True, 'import tensorflow as tf\n'), ((2225, 2262), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['image_id_list', 'trainids'], {}), '(image_id_list, trainids)\n', (2237, 2262), True, 'import tensorflow as tf\n'), ((2609, 2654), 'tensorflow.divide', 'tf.divide', (['(pos_sum - (M + 1) * M / 2.0)', '(M * N)'], {}), '(pos_sum - (M + 1) * M / 2.0, M * N)\n', (2618, 2654), True, 'import tensorflow as tf\n'), ((3355, 3378), 'tensorflow.matrix_diag', 'tf.matrix_diag', (['weights'], {}), '(weights)\n', (3369, 3378), True, 'import tensorflow as tf\n'), ((3398, 3436), 'tensorflow.cast', 'tf.cast', (['dia_weights'], {'dtype': 'tf.float32'}), '(dia_weights, dtype=tf.float32)\n', (3405, 3436), True, 'import tensorflow as tf\n'), ((3452, 3480), 'tensorflow.cast', 'tf.cast', (['tensors', 'tf.float32'], {}), '(tensors, tf.float32)\n', (3459, 3480), True, 'import tensorflow as tf\n'), ((3562, 3593), 'tensorflow.matmul', 'tf.matmul', (['dia_weights', 'tensors'], {}), '(dia_weights, tensors)\n', (3571, 3593), True, 'import tensorflow as tf\n'), ((4061, 4081), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (4076, 4081), False, 'from PIL import Image\n'), ((4094, 4114), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4108, 4114), False, 'import random\n'), ((4304, 4317), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4312, 4317), True, 'import numpy as np\n'), ((4411, 4426), 'numpy.array', 'np.array', (['img_t'], {}), '(img_t)\n', (4419, 4426), True, 'import numpy as np\n'), ((4507, 4525), 'numpy.asarray', 'np.asarray', (['vector'], {}), '(vector)\n', (4517, 4525), True, 'import numpy as np\n'), ((4643, 4661), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (4653, 4661), True, 'import numpy as np\n'), ((5081, 5121), 'tensorflow.gather_nd', 'tf.gather_nd', (['tensor'], {'indices': 'nonpad_ids'}), '(tensor, indices=nonpad_ids)\n', (5093, 5121), True, 'import tensorflow as tf\n'), ((5136, 5185), 'tensorflow.reshape', 'tf.reshape', (['result'], {'shape': '([-1] + tensor_shape[1:])'}), '(result, shape=[-1] + tensor_shape[1:])\n', (5146, 5185), True, 'import tensorflow as tf\n'), ((5428, 5464), 'numpy.asarray', 'np.asarray', (['result'], {'dtype': 'np.float32'}), '(result, dtype=np.float32)\n', (5438, 5464), True, 'import numpy as np\n'), ((6834, 6872), 'numpy.asarray', 'np.asarray', (['all_loss'], {'dtype': 'np.float32'}), '(all_loss, dtype=np.float32)\n', (6844, 6872), True, 'import numpy as np\n'), ((7178, 7196), 'numpy.asarray', 'np.asarray', (['losses'], {}), '(losses)\n', (7188, 7196), True, 'import numpy as np\n'), ((7206, 7230), 'numpy.arange', 'np.arange', (['(1)', '(y.size + 1)'], {}), '(1, y.size + 1)\n', (7215, 7230), True, 'import numpy as np\n'), ((7234, 7253), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', 'y.size'], {}), '(1, y.size)\n', (7242, 7253), True, 'import matplotlib.pyplot as plt\n'), ((7259, 7275), 'matplotlib.pyplot.title', 'plt.title', (['model'], {}), '(model)\n', (7268, 7275), True, 'import matplotlib.pyplot as plt\n'), ((7281, 7300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (7291, 7300), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7323), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['title'], {}), '(title)\n', (7316, 7323), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7343), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7337, 7343), True, 'import matplotlib.pyplot as plt\n'), ((7349, 7363), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (7357, 7363), True, 'import matplotlib.pyplot as plt\n'), ((7369, 7379), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7377, 7379), True, 'import matplotlib.pyplot as plt\n'), ((828, 865), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['groundtruth', 'prediction'], {}), '(groundtruth, prediction)\n', (840, 865), True, 'import tensorflow as tf\n'), ((885, 905), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (892, 905), True, 'import tensorflow as tf\n'), ((908, 923), 'tensorflow.size', 'tf.size', (['nonhit'], {}), '(nonhit)\n', (915, 923), True, 'import tensorflow as tf\n'), ((948, 982), 'tensorflow.cast', 'tf.cast', (['hit_num'], {'dtype': 'tf.float32'}), '(hit_num, dtype=tf.float32)\n', (955, 982), True, 'import tensorflow as tf\n'), ((1106, 1143), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['groundtruth', 'prediction'], {}), '(groundtruth, prediction)\n', (1118, 1143), True, 'import tensorflow as tf\n'), ((1163, 1183), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (1170, 1183), True, 'import tensorflow as tf\n'), ((1186, 1201), 'tensorflow.size', 'tf.size', (['nonhit'], {}), '(nonhit)\n', (1193, 1201), True, 'import tensorflow as tf\n'), ((1226, 1260), 'tensorflow.cast', 'tf.cast', (['hit_num'], {'dtype': 'tf.float32'}), '(hit_num, dtype=tf.float32)\n', (1233, 1260), True, 'import tensorflow as tf\n'), ((1690, 1727), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['prediction', 'groundtruth'], {}), '(prediction, groundtruth)\n', (1702, 1727), True, 'import tensorflow as tf\n'), ((1743, 1777), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['prediction', 'non_hits'], {}), '(prediction, non_hits)\n', (1755, 1777), True, 'import tensorflow as tf\n'), ((1803, 1818), 'tensorflow.add', 'tf.add', (['hits', '(2)'], {}), '(hits, 2)\n', (1809, 1818), True, 'import tensorflow as tf\n'), ((2284, 2330), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['no_train_ids_sorted', 'groundtruth'], {}), '(no_train_ids_sorted, groundtruth)\n', (2296, 2330), True, 'import tensorflow as tf\n'), ((2351, 2399), 'tensorflow.setdiff1d', 'tf.setdiff1d', (['no_train_ids_sorted', 'residual_part'], {}), '(no_train_ids_sorted, residual_part)\n', (2363, 2399), True, 'import tensorflow as tf\n'), ((2498, 2518), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (2505, 2518), True, 'import tensorflow as tf\n'), ((2555, 2577), 'tensorflow.size', 'tf.size', (['residual_part'], {}), '(residual_part)\n', (2562, 2577), True, 'import tensorflow as tf\n'), ((4996, 5016), 'tensorflow.where', 'tf.where', (['(tensor > 0)'], {}), '(tensor > 0)\n', (5004, 5016), True, 'import tensorflow as tf\n'), ((992, 1011), 'tensorflow.size', 'tf.size', (['prediction'], {}), '(prediction)\n', (999, 1011), True, 'import tensorflow as tf\n'), ((1270, 1290), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (1277, 1290), True, 'import tensorflow as tf\n'), ((1618, 1632), 'tensorflow.log', 'tf.log', (['range_'], {}), '(range_)\n', (1624, 1632), True, 'import tensorflow as tf\n'), ((1634, 1647), 'tensorflow.log', 'tf.log', (['range'], {}), '(range)\n', (1640, 1647), True, 'import tensorflow as tf\n'), ((1961, 1975), 'tensorflow.log', 'tf.log', (['ranks_'], {}), '(ranks_)\n', (1967, 1975), True, 'import tensorflow as tf\n'), ((1977, 1990), 'tensorflow.log', 'tf.log', (['ranks'], {}), '(ranks)\n', (1983, 1990), True, 'import tensorflow as tf\n'), ((2441, 2460), 'tensorflow.add', 'tf.add', (['rank_pos', '(1)'], {}), '(rank_pos, 1)\n', (2447, 2460), True, 'import tensorflow as tf\n'), ((4342, 4358), 'numpy.mean', 'np.mean', (['img_arr'], {}), '(img_arr)\n', (4349, 4358), True, 'import numpy as np\n'), ((4366, 4381), 'numpy.std', 'np.std', (['img_arr'], {}), '(img_arr)\n', (4372, 4381), True, 'import numpy as np\n'), ((5272, 5285), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (5280, 5285), True, 'import numpy as np\n'), ((336, 353), 'tensorflow.size', 'tf.size', (['trainids'], {}), '(trainids)\n', (343, 353), True, 'import tensorflow as tf\n'), ((2122, 2156), 'tensorflow.multiply', 'tf.multiply', (['scores_with_train', '(-1)'], {}), '(scores_with_train, -1)\n', (2133, 2156), True, 'import tensorflow as tf\n'), ((2159, 2185), 'tensorflow.size', 'tf.size', (['scores_with_train'], {}), '(scores_with_train)\n', (2166, 2185), True, 'import tensorflow as tf\n'), ((1443, 1463), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (1450, 1463), True, 'import tensorflow as tf\n'), ((1540, 1560), 'tensorflow.size', 'tf.size', (['groundtruth'], {}), '(groundtruth)\n', (1547, 1560), True, 'import tensorflow as tf\n'), ((1890, 1904), 'tensorflow.size', 'tf.size', (['ranks'], {}), '(ranks)\n', (1897, 1904), True, 'import tensorflow as tf\n')] |
import io
import cv2
import numpy as np
from PIL import Image
import skvideo.io
from tqdm import tqdm
import argparse
from utils import process_image
parser = argparse.ArgumentParser()
parser.add_argument("--input", dest='input', type=str, default="test.mp4")
parser.add_argument("--output", dest='output', type=str, default="output.mp4")
parser.add_argument("--limit", dest='limit', type=int, default=None)
args = parser.parse_args()
probe = skvideo.io.ffprobe(args.input)
videogen = skvideo.io.vreader(args.input)
writer = skvideo.io.FFmpegWriter(args.output, outputdict={
'-vcodec': 'libx264',
'-pix_fmt': 'yuv420p',
})
total = int(probe['video']['@nb_frames'])
maximum = args.limit if args.limit else total
current = 0
for frame in tqdm(videogen, total=maximum):
image = Image.fromarray(frame).convert('RGB')
result_array = process_image(np.asarray(image))
writer.writeFrame(result_array)
current += 1
if current == maximum:
break
writer.close()
| [
"PIL.Image.fromarray",
"numpy.asarray",
"tqdm.tqdm",
"argparse.ArgumentParser"
] | [((161, 186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (184, 186), False, 'import argparse\n'), ((749, 778), 'tqdm.tqdm', 'tqdm', (['videogen'], {'total': 'maximum'}), '(videogen, total=maximum)\n', (753, 778), False, 'from tqdm import tqdm\n'), ((863, 880), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (873, 880), True, 'import numpy as np\n'), ((792, 814), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (807, 814), False, 'from PIL import Image\n')] |
#!/usr/bin/env python3
from sys import argv
import re
import struct
from numpy import array
def readsepheader(filename):
lines=open(filename,"r").readlines()
header={}
for l in lines:
allgroups=l.split()
for i in allgroups:
mtch=(re.match("(.+)=(.+)",i))
if mtch!=None:
header[mtch.group(1)]=mtch.group(2)
header['esize']=int(header['esize'])
header['in']=header['in'].strip("\"")
header['data_format']=header['data_format'].strip("\"")
#print(header['in'])
for k in range(1,4):
index='n'+str(k)
header[index]=int(header[index])
#print(header[index])
index='d'+str(k)
header[index]=float(header[index])
#print(header[index])
index='o'+str(k)
header[index]=float(header[index])
#print(header[index])
return(header)
def by4(f):
rec='x' # place holder for the while
while rec:
rec=f.read(4)
if rec: yield rec
def readsepdata(filename,n1,n2,n3):
with open(filename,'rb') as sep:
a=[]
for rec in by4(sep):
pos=struct.unpack('f',rec)
a.append(pos[0])
return(array(a).reshape(n3,n2,n1))
def writeascii(filename,header,a):
f=open(filename,'w')
f.write("{}\n".format(header['n1']))
f.write("{}\n".format(header['n2']))
f.write("{}\n".format(header['n3']))
f.write("{}\n".format(header['d1']))
f.write("{}\n".format(header['d2']))
f.write("{}\n".format(header['d3']))
f.write("{}\n".format(header['o1']))
f.write("{}\n".format(header['o2']))
f.write("{}\n".format(header['o3']))
f.close()
if __name__=="__main__":
hdr=readsepheader(argv[1])
a=readsepdata(hdr['in'],hdr['n1'],hdr['n2'],hdr['n3'])
writeascii("/dev/shm/temp.file",hdr,a)
| [
"struct.unpack",
"numpy.array",
"re.match"
] | [((272, 296), 're.match', 're.match', (['"""(.+)=(.+)"""', 'i'], {}), "('(.+)=(.+)', i)\n", (280, 296), False, 'import re\n'), ((1145, 1168), 'struct.unpack', 'struct.unpack', (['"""f"""', 'rec'], {}), "('f', rec)\n", (1158, 1168), False, 'import struct\n'), ((1212, 1220), 'numpy.array', 'array', (['a'], {}), '(a)\n', (1217, 1220), False, 'from numpy import array\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pyprobml_utils import save_fig
xs = np.linspace(-1,1,21)
a = -1
b = 1
px = 1/(b-a) * np.ones(len(xs))
fn = lambda x: x**2
ys = fn(xs)
#analytic
ppy = 1/(2*np.sqrt(ys))
#monte carlo
n = 1000
samples = np.random.uniform(a,b, size=n)
samples2 = fn(samples)
fig, ax = plt.subplots(nrows=1, ncols=3)
ax[0].plot(xs, px, "-")
ax[1].plot(ys, ppy, "-")
sns.distplot(samples2, kde=False, ax=ax[2], bins=20, norm_hist=True, hist_kws=dict(edgecolor="k", linewidth=0.5))
save_fig("changeOfVars.pdf")
plt.show()
| [
"numpy.random.uniform",
"pyprobml_utils.save_fig",
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((115, 137), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(21)'], {}), '(-1, 1, 21)\n', (126, 137), True, 'import numpy as np\n'), ((284, 315), 'numpy.random.uniform', 'np.random.uniform', (['a', 'b'], {'size': 'n'}), '(a, b, size=n)\n', (301, 315), True, 'import numpy as np\n'), ((349, 379), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)'}), '(nrows=1, ncols=3)\n', (361, 379), True, 'import matplotlib.pyplot as plt\n'), ((544, 572), 'pyprobml_utils.save_fig', 'save_fig', (['"""changeOfVars.pdf"""'], {}), "('changeOfVars.pdf')\n", (552, 572), False, 'from pyprobml_utils import save_fig\n'), ((573, 583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (581, 583), True, 'import matplotlib.pyplot as plt\n'), ((238, 249), 'numpy.sqrt', 'np.sqrt', (['ys'], {}), '(ys)\n', (245, 249), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 20:15:19 2021
@author: Christian
"""
import hysteresis as hys
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Button
# from matplotlib.animation import FuncAnimation
import numpy as np
# Add this function to somewhere else
def init_Animation():
fig, ax = plt.subplots()
return fig, ax
def getAnixy(rawFrames, skipFrames):
xyAni = rawFrames[::skipFrames,:]
return xyAni
def getAniFrames(x, targetdx):
"""
Returns a frame every target dx. Can be useful for pre-processing data
if input data has a variable timestep.
No linearl inerpolatin is used for intermediate frames.
"""
NFrames = len(x)
NframesOut = []
jj = 0
for ii in range(NFrames):
while jj*targetdx < x[ii]:
NframesOut.append(x[ii])
jj+=1
return np.array(NframesOut)
class AnimationBase:
def __init__(self):
self.play = True
# Replace with imported function
fig, ax = init_Animation()
# Connect clicking
# fig.canvas.mpl_connect('button_press_event', self.on_click)
fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
self.fig = fig
self.ax = ax
def togglePlay(self):
self.play = self.play == False
def on_click(self, event):
xclick = event.x
yclick = event.y
return xclick, yclick
# print(xclick, yclick)
# To be overwritten
def toggle_pause(self,event, *args, **kwargs):
pass
# # Check where the click happened
# (xm,ym),(xM,yM) = plotSlider.label.clipbox.get_points()
# if xm < event.x < xM and ym < event.y < yM:
# # Event happened within the slider, ignore since it is handled in update_slider
# return
# else:
# # Toggle on off based on clicking
# global is_paused
# if is_paused == True:
# is_paused=False
# elif is_paused == False:
# is_paused=True
# class FrameHelper():
# def __init__(self, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0):
# self.pointsPerFrame = pointsPerFrame
class Animation(AnimationBase):
def __init__(self, Curve, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0, interval = 50):
super().__init__()
self.Curve = Curve
self.xy = Curve.xy
self.pointsPerFrame = pointsPerFrame
self.interval = interval
xyAni = getAnixy(Curve.xy, skipFrames)
self.xyAni = xyAni
# self.xyAni = self.xy
self.Nframes = int(len(self.xyAni) / pointsPerFrame)
self.frames = np.arange(self.Nframes)
self.lines = []
# self.fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
# def setAniXY()
def initfunc(self):
line = plt.plot(self.xyAni[:,0], self.xyAni[:,1])[0]
self.lines.append(line) # def initAnimation(self):
return line,
def update(self, frame):
# for ii in range()
points = int(frame*self.pointsPerFrame)
newXY = self.xyAni[:points,:]
line = self.lines[0]
line.set_data(newXY[:,0], newXY[:,1])
return line,
def Animate(self):
self.ani = animation.FuncAnimation(self.fig, self.update, self.frames, self.initfunc,
interval=self.interval, blit=True)
# def toggle_pause(self, *args, **kwargs):
# self.togglePlay()
# if self.play:
# self.ani.resume()
# else:
# self.ani.pause()
class JointAnimation(AnimationBase):
def __init__(self, Curves, pointsPerFrame = 1, skipFrames = 1, skipStart = 0, skipEnd = 0, interval = 50):
super().__init__()
self.pointsPerFrame = pointsPerFrame
self.interval = interval
self.Curves = Curves
self.Ncurves = len(Curves)
xyAni = getAnixy(Curves[0].xy, skipFrames)
self.Nframes = int(len(xyAni) / pointsPerFrame)
self.frames = np.arange(self.Nframes)
self.xyCurves = [None]*self.Ncurves
self.lines = []
for ii in range(self.Ncurves):
curve = self.Curves[ii]
xy = curve.xy
xyAni = getAnixy(xy, skipFrames)
self.xyCurves[ii] = xyAni
# self.xyAni = xyAni
# self.fig.canvas.mpl_connect('button_press_event', self.toggle_pause)
# def setAniXY()
def initfunc(self):
for ii in range(self.Ncurves):
tempXY = self.xyCurves[ii]
line = plt.plot(tempXY[:,0], tempXY[:,1])[0]
self.lines.append(line) # def initAnimation(self):
return self.lines
def update(self, frame):
# print(frame)
points = int(frame*self.pointsPerFrame)
# print(points)
lines = [None]*self.Ncurves
for ii in range(self.Ncurves):
tempXY = self.xyCurves[ii]
# print()
newXY = tempXY[:points,:]
line = self.lines[ii]
line.set_data(newXY[:,0], newXY[:,1])
lines[ii] = line
# lines[ii] = line
# lines = self.lines
return lines
def Animate(self):
self.ani = animation.FuncAnimation(self.fig, self.update, self.frames, self.initfunc,
interval=50, blit=True)
# axSlider = plt.axes([0.25, .03, 0.50, 0.02])
# plotSlider = Slider(axSlider, 'Time', framesTime[FrameStart], framesTime[FrameEnd], valinit=framesTime[FrameStart])
# # Slider Location and size relative to plot
# # [x, y, xsize, ysize]
# axSlider = plt.axes([0.25, .03, 0.50, 0.02])
# plotSlider = Slider(axSlider, 'Time', framesTime[FrameStart], framesTime[FrameEnd], valinit=framesTime[FrameStart])
# # Animation controls
# global is_paused
# is_paused = False # True if user has taken control of the animation
# def on_click(event):
# # Check where the click happened
# (xm,ym),(xM,yM) = plotSlider.label.clipbox.get_points()
# if xm < event.x < xM and ym < event.y < yM:
# # Event happened within the slider, ignore since it is handled in update_slider
# return
# else:
# # Toggle on off based on clicking
# global is_paused
# if is_paused == True:
# is_paused=False
# elif is_paused == False:
# is_paused=True
# def animate2D_slider(Time):
# """
# The slider value is liked with the plot - we update the plot by updating
# the slider.
# """
# global is_paused
# is_paused=True
# # Convert time to frame
# TimeStep = (np.abs(framesTime - Time)).argmin()
# # The current node coordinants in (x,y) or (x,y,z)
# CurrentNodeCoords = nodes[:,1:] + Disp[TimeStep,:,:]
# # Update Plots
# # update node locations
# EqfigNodes.set_xdata(CurrentNodeCoords[:,0])
# EqfigNodes.set_ydata(CurrentNodeCoords[:,1])
# # Get new node mapping
# # I don't like doing this loop every time - there has to be a faster way
# xy_labels = {}
# for jj in range(Nnodes):
# xy_labels[nodeLabels[jj]] = CurrentNodeCoords[jj,:]
# # Define the surface
# SurfCounter = 0
# # update element locations
# for jj in range(Nele):
# # Get the node number for the first and second node connected by the element
# TempNodes = elements[jj][1:]
# # This is the xy coordinates of each node in the group
# TempNodeCoords = [xy_labels[node] for node in TempNodes]
# coords_x = [xy[0] for xy in TempNodeCoords]
# coords_y = [xy[1] for xy in TempNodeCoords]
# # Update element lines
# EqfigLines[jj].set_xdata(coords_x)
# EqfigLines[jj].set_ydata(coords_y)
# # print('loop start')
# # Update the surface if necessary
# if 2 < len(TempNodes):
# tempxy = np.column_stack([coords_x, coords_y])
# EqfigSurfaces[SurfCounter].xy = tempxy
# SurfCounter += 1
# # update time Text
# # time_text.set_text("Time= "+'%.2f' % time[TimeStep]+ " s")
# # redraw canvas while idle
# fig.canvas.draw_idle()
# return EqfigNodes, EqfigLines, EqfigSurfaces, EqfigText
# Saving
# if Movie != "none":
# MovefileName = Movie + '.mp4'
# ODBdir = Model+"_ODB" # ODB Dir name
# Movfile = os.path.join(ODBdir, LoadCase, MovefileName)
# print("Saving the animation movie as "+MovefileName+" in "+ODBdir+"->"+LoadCase+" folder")
# ani.save(Movfile, writer='ffmpeg')
| [
"matplotlib.pyplot.plot",
"matplotlib.animation.FuncAnimation",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((370, 384), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (382, 384), True, 'import matplotlib.pyplot as plt\n'), ((945, 965), 'numpy.array', 'np.array', (['NframesOut'], {}), '(NframesOut)\n', (953, 965), True, 'import numpy as np\n'), ((2977, 3000), 'numpy.arange', 'np.arange', (['self.Nframes'], {}), '(self.Nframes)\n', (2986, 3000), True, 'import numpy as np\n'), ((3649, 3762), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update', 'self.frames', 'self.initfunc'], {'interval': 'self.interval', 'blit': '(True)'}), '(self.fig, self.update, self.frames, self.initfunc,\n interval=self.interval, blit=True)\n', (3672, 3762), True, 'import matplotlib.animation as animation\n'), ((4480, 4503), 'numpy.arange', 'np.arange', (['self.Nframes'], {}), '(self.Nframes)\n', (4489, 4503), True, 'import numpy as np\n'), ((5816, 5918), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update', 'self.frames', 'self.initfunc'], {'interval': '(50)', 'blit': '(True)'}), '(self.fig, self.update, self.frames, self.initfunc,\n interval=50, blit=True)\n', (5839, 5918), True, 'import matplotlib.animation as animation\n'), ((3193, 3237), 'matplotlib.pyplot.plot', 'plt.plot', (['self.xyAni[:, 0]', 'self.xyAni[:, 1]'], {}), '(self.xyAni[:, 0], self.xyAni[:, 1])\n', (3201, 3237), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5120), 'matplotlib.pyplot.plot', 'plt.plot', (['tempXY[:, 0]', 'tempXY[:, 1]'], {}), '(tempXY[:, 0], tempXY[:, 1])\n', (5092, 5120), True, 'import matplotlib.pyplot as plt\n')] |
from keras import backend as K
import os
def set_keras_backend(backend):
if K.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
try:
from importlib import reload
reload(K) # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
reload(K)
except ImportError:
from imp import reload # Python 3.0 - 3.3
reload(K)
assert K.backend() == backend
set_keras_backend("cntk")
K.set_image_dim_ordering('tf')
import pandas as pd
import numpy as np
from timeit import default_timer as timer
from keras.callbacks import ModelCheckpoint
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Input, Dense, Flatten, Embedding
from keras.layers.pooling import GlobalMaxPooling1D,MaxPooling1D
from keras.layers.convolutional import Convolution1D
from keras.layers.core import Lambda
from keras import optimizers
from keras.models import Model
from keras.regularizers import l1
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from sklearn import svm, metrics
from sklearn.model_selection import train_test_split
from IPython.display import SVG
import pydot
from keras.utils.vis_utils import model_to_dot
import re
import io
from nltk.tokenize import TweetTokenizer
from nltk.tokenize import RegexpTokenizer
import num2words
random_seed=1
np.random.seed(random_seed)
base_path = os.environ['HOMEPATH']
data_folder='data'
data_dir = os.path.join(base_path, data_folder)
embedding_folder = os.path.join(base_path, 'vectors')
model_identifier = 'SSWE_Basic_Keras_w_CNTK'
if not os.path.exists(embedding_folder):
os.makedirs(embedding_folder)
max_sequence_length = 15 # each sentence of the input should be padded to have at least this many tokens
embedding_dim = 50 # Embedding layer size
no_filters = 15 # No of filters for the convolution layer
filter_size = 5 # Filter size for the convolution layer
trainable = True # flag specifying whether the embedding layer weights should be changed during the training or not
batch_size = 64 # batch size can be increased to have better gpu utilization
no_epochs = 5 # No of training epochs
# Data preprocessing
pos_emoticons=["(^.^)","(^-^)","(^_^)","(^_~)","(^3^)","(^o^)","(~_^)","*)",":)",":*",":-*",":]",":^)",":}",
":>",":3",":b",":-b",":c)",":D",":-D",":O",":-O",":o)",":p",":-p",":P",":-P",":Þ",":-Þ",":X",
":-X",";)",";-)",";]",";D","^)","^.~","_)m"," ~.^","<=8","<3","<333","=)","=///=","=]","=^_^=",
"=<_<=","=>.<="," =>.>="," =3","=D","=p","0-0","0w0","8D","8O","B)","C:","d'-'","d(>w<)b",":-)",
"d^_^b","qB-)","X3","xD","XD","XP","ʘ‿ʘ","❤","💜","💚","💕","💙","💛","💓","💝","💖","💞",
"💘","💗","😗","😘","😙","😚","😻","😀","😁","😃","☺","😄","😆","😇","😉","😊","😋","😍",
"😎","😏","😛","😜","😝","😮","😸","😹","😺","😻","😼","👍"]
neg_emoticons=["--!--","(,_,)","(-.-)","(._.)","(;.;)9","(>.<)","(>_<)","(>_>)","(¬_¬)","(X_X)",":&",":(",":'(",
":-(",":-/",":-@[1]",":[",":\\",":{",":<",":-9",":c",":S",";(",";*(",";_;","^>_>^","^o)","_|_",
"`_´","</3","<=3","=/","=\\",">:(",">:-(","💔","☹️","😌","😒","😓","😔","😕","😖","😞","😟",
"😠","😡","😢","😣","😤","😥","😦","😧","😨","😩","😪","😫","😬","😭","😯","😰","😱","😲",
"😳","😴","😷","😾","😿","🙀","💀","👎"]
# Emails
emailsRegex=re.compile(r'[\w\.-]+@[\w\.-]+')
# Mentions
userMentionsRegex=re.compile(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)')
#Urls
urlsRegex=re.compile('r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+') # It may not be handling all the cases like t.co without http
#Numerics
numsRegex=re.compile(r"\b\d+\b")
punctuationNotEmoticonsRegex=re.compile(r'(?<=\w)[^\s\w](?![^\s\w])')
emoticonsDict = {}
for i,each in enumerate(pos_emoticons):
emoticonsDict[each]=' POS_EMOTICON_'+num2words.num2words(i).upper()+' '
for i,each in enumerate(neg_emoticons):
emoticonsDict[each]=' NEG_EMOTICON_'+num2words.num2words(i).upper()+' '
# use these three lines to do the replacement
rep = dict((re.escape(k), v) for k, v in emoticonsDict.items())
emoticonsPattern = re.compile("|".join(rep.keys()))
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens"""
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
return padded_lines
def read_labels(filename):
""" read the tweet labels from the file"""
arr= np.genfromtxt(filename, delimiter='\n')
arr[arr==4]=1 # Encode the positive category as 1
return arr
# Loading Training and Validation Data
texts = []
labels = []
nb_train_samples = 0
nb_valid_samples = 0
print ('Loading Training Labels')
train_labels=read_labels(data_dir+'\\training_label.csv')
print ('Loading Training data')
train_texts=read_data(data_dir+'//training_text.csv')
print (len(train_labels), len(train_texts))
print ("Using Keras tokenizer to tokenize and build word index")
tokenizer = Tokenizer(lower=False, filters='\n\t?"!')
train_texts=[each for each in train_texts]
tokenizer.fit_on_texts(train_texts)
sorted_voc = [wc[0] for wc in sorted(tokenizer.word_counts.items(),reverse=True, key= lambda x:x[1]) ]
tokenizer.word_index = dict(list(zip(sorted_voc, list(range(2, len(sorted_voc) + 2)))))
tokenizer.word_index['<PAD>']=0
tokenizer.word_index['<UNK>']=1
word_index = tokenizer.word_index
reverse_dictionary={v:k for (k,v) in tokenizer.word_index.items()}
vocab_size=len(tokenizer.word_index.keys())
print ('Size of the vocab is', vocab_size)
# Shuffling /Padding the data
print ('Padding sentences and shuffling the data')
sequences = tokenizer.texts_to_sequences(train_texts)
#Pad the sentences to have consistent length
data = pad_sequences(sequences, maxlen=max_sequence_length, padding='post')
labels = to_categorical(np.asarray(train_labels))
indices = np.arange(len(labels))
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
train_x, valid_x, train_y, valid_y=train_test_split(data, labels, test_size=0.2, random_state=random_seed)
train_x=np.array(train_x).astype('float32')
valid_x=np.array(valid_x).astype('float32')
train_y=np.array(train_y)
valid_y=np.array(valid_y)
embedding_matrix = np.zeros((len(word_index) , embedding_dim))
training_word_index=tokenizer.word_index.copy()
# Model Instantiation
print ('Initializing the model')
mcp = ModelCheckpoint('./model_chkpoint', monitor="val_acc", save_best_only=True, save_weights_only=False)
#Creating network
model = Sequential()
model.add(Embedding(len(word_index)+2,
embedding_dim,
input_length=max_sequence_length,
trainable=trainable, name='embedding'))
model.add(Convolution1D(no_filters, filter_size, activation='relu'))
model.add(MaxPooling1D(max_sequence_length - filter_size))
model.add(Flatten())
model.add(Dense(no_filters, activation='tanh'))
model.add(Dense(len(labels[0]), activation='softmax'))
optim=optimizers.Adam(lr=0.1, )
model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['acc'])
model.summary()
# Training
start=timer()
hist=model.fit(train_x, train_y,nb_epoch=no_epochs, batch_size=batch_size,validation_data=(valid_x, valid_y),callbacks=[mcp])
end=timer()
# Exporting the Embedding Matrix and Vocabulary
def export_embeddings(model_orig):
""" export embeddings to file"""
embedding_weights=pd.DataFrame(model_orig.layers[0].get_weights()[0]).reset_index()
word_indices_df=pd.DataFrame.from_dict(training_word_index,orient='index').reset_index()
word_indices_df.columns=['word','index']
print (word_indices_df.shape,embedding_weights.shape)
merged=pd.merge(word_indices_df,embedding_weights)
print (merged.shape)
merged=merged[[each for each in merged.columns if each!='index']]
merged.to_csv(embedding_folder+'//embeddings_{}.tsv'.format(model_identifier), sep='\t',
index=False, header=False,float_format='%.6f',encoding='utf-8')
return embedding_weights, word_indices_df, merged
embedding_weights, word_indices_df, merged_df=export_embeddings(model) | [
"imp.reload",
"numpy.random.seed",
"keras.preprocessing.sequence.pad_sequences",
"sklearn.model_selection.train_test_split",
"keras.backend.set_image_dim_ordering",
"keras.layers.pooling.MaxPooling1D",
"os.path.join",
"pandas.merge",
"os.path.exists",
"numpy.genfromtxt",
"keras.layers.Flatten",
... | [((552, 582), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (576, 582), True, 'from keras import backend as K\n'), ((1539, 1566), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1553, 1566), True, 'import numpy as np\n'), ((1633, 1669), 'os.path.join', 'os.path.join', (['base_path', 'data_folder'], {}), '(base_path, data_folder)\n', (1645, 1669), False, 'import os\n'), ((1690, 1724), 'os.path.join', 'os.path.join', (['base_path', '"""vectors"""'], {}), "(base_path, 'vectors')\n", (1702, 1724), False, 'import os\n'), ((3550, 3585), 're.compile', 're.compile', (['"""[\\\\w\\\\.-]+@[\\\\w\\\\.-]+"""'], {}), "('[\\\\w\\\\.-]+@[\\\\w\\\\.-]+')\n", (3560, 3585), False, 'import re\n'), ((3613, 3680), 're.compile', 're.compile', (['"""(?<=^|(?<=[^a-zA-Z0-9-_\\\\.]))@([A-Za-z]+[A-Za-z0-9]+)"""'], {}), "('(?<=^|(?<=[^a-zA-Z0-9-_\\\\.]))@([A-Za-z]+[A-Za-z0-9]+)')\n", (3623, 3680), False, 'import re\n'), ((3698, 3746), 're.compile', 're.compile', (['"""r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+"""'], {}), "('r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+')\n", (3708, 3746), False, 'import re\n'), ((3830, 3854), 're.compile', 're.compile', (['"""\\\\b\\\\d+\\\\b"""'], {}), "('\\\\b\\\\d+\\\\b')\n", (3840, 3854), False, 'import re\n'), ((3883, 3927), 're.compile', 're.compile', (['"""(?<=\\\\w)[^\\\\s\\\\w](?![^\\\\s\\\\w])"""'], {}), "('(?<=\\\\w)[^\\\\s\\\\w](?![^\\\\s\\\\w])')\n", (3893, 3927), False, 'import re\n'), ((5938, 5979), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'lower': '(False)', 'filters': '"""\n\t?"!"""'}), '(lower=False, filters=\'\\n\\t?"!\')\n', (5947, 5979), False, 'from keras.preprocessing.text import Tokenizer\n'), ((6695, 6763), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'maxlen': 'max_sequence_length', 'padding': '"""post"""'}), "(sequences, maxlen=max_sequence_length, padding='post')\n", (6708, 6763), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((6847, 6873), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (6864, 6873), True, 'import numpy as np\n'), ((6956, 7027), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': 'random_seed'}), '(data, labels, test_size=0.2, random_state=random_seed)\n', (6972, 7027), False, 'from sklearn.model_selection import train_test_split\n'), ((7124, 7141), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (7132, 7141), True, 'import numpy as np\n'), ((7150, 7167), 'numpy.array', 'np.array', (['valid_y'], {}), '(valid_y)\n', (7158, 7167), True, 'import numpy as np\n'), ((7341, 7445), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""./model_chkpoint"""'], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "('./model_chkpoint', monitor='val_acc', save_best_only=True,\n save_weights_only=False)\n", (7356, 7445), False, 'from keras.callbacks import ModelCheckpoint\n'), ((7469, 7481), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7479, 7481), False, 'from keras.models import Sequential\n'), ((7953, 7976), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (7968, 7976), False, 'from keras import optimizers\n'), ((8122, 8129), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8127, 8129), True, 'from timeit import default_timer as timer\n'), ((8260, 8267), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8265, 8267), True, 'from timeit import default_timer as timer\n'), ((1778, 1810), 'os.path.exists', 'os.path.exists', (['embedding_folder'], {}), '(embedding_folder)\n', (1792, 1810), False, 'import os\n'), ((1816, 1845), 'os.makedirs', 'os.makedirs', (['embedding_folder'], {}), '(embedding_folder)\n', (1827, 1845), False, 'import os\n'), ((5413, 5452), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '"""\n"""'}), "(filename, delimiter='\\n')\n", (5426, 5452), True, 'import numpy as np\n'), ((6788, 6812), 'numpy.asarray', 'np.asarray', (['train_labels'], {}), '(train_labels)\n', (6798, 6812), True, 'import numpy as np\n'), ((7704, 7761), 'keras.layers.convolutional.Convolution1D', 'Convolution1D', (['no_filters', 'filter_size'], {'activation': '"""relu"""'}), "(no_filters, filter_size, activation='relu')\n", (7717, 7761), False, 'from keras.layers.convolutional import Convolution1D\n'), ((7773, 7820), 'keras.layers.pooling.MaxPooling1D', 'MaxPooling1D', (['(max_sequence_length - filter_size)'], {}), '(max_sequence_length - filter_size)\n', (7785, 7820), False, 'from keras.layers.pooling import GlobalMaxPooling1D, MaxPooling1D\n'), ((7832, 7841), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7839, 7841), False, 'from keras.layers import Input, Dense, Flatten, Embedding\n'), ((7853, 7889), 'keras.layers.Dense', 'Dense', (['no_filters'], {'activation': '"""tanh"""'}), "(no_filters, activation='tanh')\n", (7858, 7889), False, 'from keras.layers import Input, Dense, Flatten, Embedding\n'), ((8684, 8728), 'pandas.merge', 'pd.merge', (['word_indices_df', 'embedding_weights'], {}), '(word_indices_df, embedding_weights)\n', (8692, 8728), True, 'import pandas as pd\n'), ((81, 92), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (90, 92), True, 'from keras import backend as K\n'), ((7036, 7053), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (7044, 7053), True, 'import numpy as np\n'), ((7080, 7097), 'numpy.array', 'np.array', (['valid_x'], {}), '(valid_x)\n', (7088, 7097), True, 'import numpy as np\n'), ((217, 226), 'imp.reload', 'reload', (['K'], {}), '(K)\n', (223, 226), False, 'from imp import reload\n'), ((502, 513), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (511, 513), True, 'from keras import backend as K\n'), ((4244, 4256), 're.escape', 're.escape', (['k'], {}), '(k)\n', (4253, 4256), False, 'import re\n'), ((5029, 5065), 're.sub', 're.sub', (['"""(.)\\\\1{2,}"""', '"""\\\\1\\\\1"""', 'line'], {}), "('(.)\\\\1{2,}', '\\\\1\\\\1', line)\n", (5035, 5065), False, 'import re\n'), ((8497, 8556), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['training_word_index'], {'orient': '"""index"""'}), "(training_word_index, orient='index')\n", (8519, 8556), True, 'import pandas as pd\n'), ((360, 369), 'imp.reload', 'reload', (['K'], {}), '(K)\n', (366, 369), False, 'from imp import reload\n'), ((4025, 4047), 'num2words.num2words', 'num2words.num2words', (['i'], {}), '(i)\n', (4044, 4047), False, 'import num2words\n'), ((4146, 4168), 'num2words.num2words', 'num2words.num2words', (['i'], {}), '(i)\n', (4165, 4168), False, 'import num2words\n'), ((477, 486), 'imp.reload', 'reload', (['K'], {}), '(K)\n', (483, 486), False, 'from imp import reload\n'), ((5117, 5133), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (5131, 5133), False, 'from nltk.tokenize import TweetTokenizer\n')] |
#
# Copyright 2019-2020 <NAME>
# 2018, 2020 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import pytest
from muFFT import FFT
from NuMPI import MPI
from ContactMechanics import PeriodicFFTElasticHalfSpace
from NuMPI.Tools import Reduction
@pytest.fixture
def pnp(comm):
return Reduction(comm)
@pytest.fixture
def basenpoints(comm):
return (comm.Get_size() - 1) * 8
# Base number of points in order to avoid empty subdomains
# when using a lot of processors
@pytest.mark.parametrize("nx, ny", [(64, 33),
(65, 32),
(64, 64)])
# TODO: merge the serial test of the weights into this
def test_weights(comm, pnp, nx, ny,
basenpoints):
"""compares the MPI-Implemtation of the halfspace with the serial one"""
nx += basenpoints
ny += basenpoints
sx = 30.0
sy = 1.0
# equivalent Young's modulus
E_s = 1.0
substrate = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft='mpi', communicator=comm)
reference = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft="fftw",
communicator=MPI.COMM_SELF)
np.testing.assert_allclose(
reference.greens_function[substrate.fourier_slices],
substrate.greens_function, rtol=0, atol=1e-16,
err_msg="weights are different")
np.testing.assert_allclose(
reference.surface_stiffness[substrate.fourier_slices],
substrate.surface_stiffness, rtol=0, atol=1e-16,
err_msg="iweights are different")
@pytest.mark.parametrize("nx, ny", [(8, 15),
(8, 4),
(9, 4),
(113, 765)])
def test_sineWave_disp(comm, pnp, nx, ny, basenpoints):
"""
for given sinusoidal displacements, compares the pressures and the energies
to the analytical solutions
Special cases at the edges of the fourier domain are done
Parameters
----------
comm
pnp
fftengine_class
nx
ny
basenpoints
Returns
-------
"""
nx += basenpoints
ny += basenpoints
sx = 2.45 # 30.0
sy = 1.0
ATOL = 1e-10 * (nx * ny)
# equivalent Young's modulus
E_s = 1.0
for k in [(1, 0),
(0, 1),
(1, 2),
(nx // 2, 0),
(1, ny // 2),
(0, 2),
(nx // 2, ny // 2),
(0, ny // 2)]:
# print("testing wavevector ({}* np.pi * 2 / sx,
# {}* np.pi * 2 / sy) ".format(*k))
qx = k[0] * np.pi * 2 / sx
qy = k[1] * np.pi * 2 / sy
q = np.sqrt(qx ** 2 + qy ** 2)
Y, X = np.meshgrid(np.linspace(0, sy, ny + 1)[:-1],
np.linspace(0, sx, nx + 1)[:-1])
disp = np.cos(qx * X + qy * Y) + np.sin(qx * X + qy * Y)
refpressure = - disp * E_s / 2 * q
substrate = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft='mpi', communicator=comm)
fftengine = FFT((nx, ny), fft='mpi', communicator=comm)
fftengine.create_plan(1)
kpressure = substrate.evaluate_k_force(
disp[substrate.subdomain_slices]) / substrate.area_per_pt
expected_k_disp = np.zeros((nx // 2 + 1, ny), dtype=complex)
expected_k_disp[k[0], k[1]] += (.5 - .5j)*(nx * ny)
# add the symetrics
if k[0] == 0:
expected_k_disp[0, -k[1]] += (.5 + .5j)*(nx * ny)
if k[0] == nx // 2 and nx % 2 == 0:
expected_k_disp[k[0], -k[1]] += (.5 + .5j)*(nx * ny)
fft_disp = np.zeros(substrate.nb_fourier_grid_pts, order='f',
dtype=complex)
fftengine.fft(disp[substrate.subdomain_slices], fft_disp)
np.testing.assert_allclose(fft_disp,
expected_k_disp[substrate.fourier_slices],
rtol=1e-7, atol=ATOL)
expected_k_pressure = - E_s / 2 * q * expected_k_disp
np.testing.assert_allclose(
kpressure, expected_k_pressure[substrate.fourier_slices],
rtol=1e-7, atol=ATOL)
computedpressure = substrate.evaluate_force(
disp[substrate.subdomain_slices]) / substrate.area_per_pt
np.testing.assert_allclose(computedpressure,
refpressure[substrate.subdomain_slices],
atol=ATOL, rtol=1e-7)
computedenergy_kspace = \
substrate.evaluate(disp[substrate.subdomain_slices], pot=True,
forces=False)[0]
computedenergy = \
substrate.evaluate(disp[substrate.subdomain_slices], pot=True,
forces=True)[0]
refenergy = E_s / 8 * 2 * q * sx * sy
# print(substrate.nb_domain_grid_pts[-1] % 2)
# print(substrate.nb_fourier_grid_pts)
# print(substrate.fourier_locations[-1] +
# substrate.nb_fourier_grid_pts[-1] - 1)
# print(substrate.nb_domain_grid_pts[-1] // 2 )
# print(computedenergy)
# print(computedenergy_kspace)
# print(refenergy)
np.testing.assert_allclose(
computedenergy, refenergy, rtol=1e-10,
err_msg="wavevektor {} for nb_domain_grid_pts {}, "
"subdomain nb_grid_pts {}, nb_fourier_grid_pts {}"
.format(k, substrate.nb_domain_grid_pts,
substrate.nb_subdomain_grid_pts,
substrate.nb_fourier_grid_pts))
np.testing.assert_allclose(
computedenergy_kspace, refenergy,
rtol=1e-10,
err_msg="wavevektor {} for nb_domain_grid_pts {}, "
"subdomain nb_grid_pts {}, nb_fourier_grid_pts {}"
.format(k, substrate.nb_domain_grid_pts,
substrate.nb_subdomain_grid_pts,
substrate.nb_fourier_grid_pts))
@pytest.mark.parametrize("nx, ny", [(8, 8),
(17, 128),
(3, 128)])
def test_sineWave_disp_rotation_invariance(comm, pnp, nx, ny, basenpoints):
"""
for a sinusoidal displacement, test if the energy depends on if the wave is
oriented in x or y direction
Parameters
----------
comm
pnp
fftengine_class
nx
ny
basenpoints
Returns
-------
"""
nx += basenpoints
ny += basenpoints
sx = 3. # 30.0
sy = 3.
# equivalent Young's modulus
E_s = 1.0
computedenergies = []
computedenergies_kspace = []
for k in [(min(nx, ny) // 2, 0), (0, min(nx, ny) // 2)]:
qx = k[0] * np.pi * 2 / sx
qy = k[1] * np.pi * 2 / sy
Y, X = np.meshgrid(np.linspace(0, sy, ny + 1)[:-1],
np.linspace(0, sx, nx + 1)[:-1])
# At the Nyquist frequency for even number of points, the energy
# computation can only be exact for this point
disp = np.cos(qx * X + qy * Y) + np.sin(
qx * X + qy * Y)
substrate = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft='mpi', communicator=comm)
computedenergies_kspace += [
substrate.evaluate(disp[substrate.subdomain_slices], pot=True,
forces=False)[0]]
computedenergies += [
substrate.evaluate(disp[substrate.subdomain_slices], pot=True,
forces=True)[0]]
# np.testing.assert_allclose(computedpressures[0],computedpressures[1].T)
np.testing.assert_allclose(*computedenergies, rtol=1e-10)
np.testing.assert_allclose(*computedenergies_kspace, rtol=1e-10)
@pytest.mark.parametrize("nx, ny", [(64, 33),
(65, 32),
(64, 64)])
def test_sineWave_force(comm, pnp, nx, ny, basenpoints):
"""
for a given sinusoidal force, compares displacement with a reference
solution
Parameters
----------
comm
pnp
fftengine_class
nx
ny
basenpoints
Returns
-------
"""
nx += basenpoints
ny += basenpoints
sx = 2 # 30.0
sy = 1.0
# equivalent Young's modulus
E_s = 1.0
Y, X = np.meshgrid(np.linspace(0, sy, ny + 1)[:-1],
np.linspace(0, sx, nx + 1)[:-1])
qx = 1 * np.pi * 2 / sx
qy = 4 * np.pi * 2 / sy
q = np.sqrt(qx ** 2 + qy ** 2)
p = np.cos(qx * X + qy * Y)
refdisp = - p / E_s * 2 / q
substrate = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft='mpi', communicator=comm)
computeddisp = substrate.evaluate_disp(
p[substrate.subdomain_slices] * substrate.area_per_pt)
np.testing.assert_allclose(computeddisp,
refdisp[substrate.subdomain_slices], atol=1e-7,
rtol=1e-10)
# computedenergy = substrate.evaluate(p[substrate.subdomain_slices]*
# substrate.area_per_pt)
# refenergy = sx * sy/(2 * q * E_s) * 1
# np.testing.assert_allclose(computedenergy,refenergy,rtol = 1e-4)
# def test_k_force_maxq(self):
# Y, X = np.meshgrid(np.linspace(0, sy, ny + 1)[:-1],
# np.linspace(0, sx, nx + 1)[:-1])
#
# qx = 1 * np.pi * 2 / sx
# qy = ny//2 * np.pi * 2 / sy
#
# q = np.sqrt(qx ** 2 + qy ** 2)
# h=1
# disp = h*np.cos(qx * X + qy * Y)
#
# ref_k_force= np.zeros((nx, ny//2+1))
# ref_k_force[1,ny//2] = q * h *E_s /2
@pytest.mark.parametrize("nx, ny", [(64, 33),
(65, 32),
(64, 64)])
def test_multipleSineWaves_evaluate(comm, pnp, nx, ny, basenpoints):
"""
displacements: superposition of sinwaves, compares forces and energes with
analytical solution
Parameters
----------
comm
pnp
fftengine_class
nx
ny
basenpoints
Returns
-------
"""
nx += basenpoints
ny += basenpoints
sx = 2 # 30.0
sy = 1.0
# equivalent Young's modulus
E_s = 1.0
Y, X = np.meshgrid(np.linspace(0, sy, ny + 1)[:-1],
np.linspace(0, sx, nx + 1)[:-1])
disp = np.zeros((nx, ny))
refForce = np.zeros((nx, ny))
refEnergy = 0
for qx, qy in zip((1, 0, 5, nx // 2 - 1),
(4, 4, 0, ny // 2 - 2)):
qx = qx * np.pi * 2 / sx
qy = qy * np.pi * 2 / sy
q = np.sqrt(qx ** 2 + qy ** 2)
h = 1 # q**(-0.8)
disp += h * (np.cos(qx * X + qy * Y) + np.sin(qx * X + qy * Y))
refForce += h * (np.cos(qx * X + qy * Y) + np.sin(
qx * X + qy * Y)) * E_s / 2 * q
refEnergy += E_s / 8 * q * 2 * h ** 2
# * 2 because the amplitude of cos(x) + sin(x) is sqrt(2)
# max possible Wavelengths at the edge
for qx, qy in zip((nx // 2, nx // 2, 0),
(ny // 2, 0, ny // 2)):
qx = qx * np.pi * 2 / sx
qy = qy * np.pi * 2 / sy
q = np.sqrt(qx ** 2 + qy ** 2)
h = 1 # q**(-0.8)
disp += h * (np.cos(qx * X + qy * Y) + np.sin(qx * X + qy * Y))
refForce += h * (np.cos(qx * X + qy * Y) + np.sin(
qx * X + qy * Y)) * E_s / 2 * q
refEnergy += E_s / 8 * q * h ** 2 * 2
# * 2 because the amplitude of cos(x) + sin(x) is sqrt(2)
refEnergy *= sx * sy
refForce *= -sx * sy / (nx * ny)
substrate = PeriodicFFTElasticHalfSpace((nx, ny), E_s, (sx, sy),
fft='mpi', communicator=comm)
computed_E_k_space = substrate.evaluate(disp[substrate.subdomain_slices],
pot=True, forces=False)[0]
# If force is not queried this computes the energy using kspace
computed_E_realspace, computed_force = substrate.evaluate(
disp[substrate.subdomain_slices], pot=True,
forces=True)
# print("{}: Local: E_kspace: {}, E_realspace: {}"
# .format(substrate.fftengine.comm.Get_rank(),computed_E_k_space,computed_E_realspace))
# print(computed_E_k_space)
# print(refEnergy)
# if substrate.fftengine.comm.Get_rank() == 0 :
# print(computed_E_k_space)
# print(computed_E_realspace)
# print("{}: Global: E_kspace: {}, E_realspace: {}"
# .format(substrate.fftengine.comm.Get_rank(),
# computed_E_k_space, computed_E_realspace))
# Make an MPI-Reduce of the Energies !
# print(substrate.evaluate_elastic_energy(refForce, disp))
# print(0.5*np.vdot(refForce,disp))
# print(substrate.evaluate_elastic_energy(substrate.evaluate_force(disp),disp))
# print(computed_E_k_space)
# print(computed_E_realspace)
# print(refEnergy)
np.testing.assert_almost_equal(computed_E_k_space, refEnergy)
np.testing.assert_almost_equal(computed_E_realspace, refEnergy)
np.testing.assert_allclose(computed_force,
refForce[substrate.subdomain_slices], atol=1e-7,
rtol=1e-10)
| [
"NuMPI.Tools.Reduction",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_allclose",
"numpy.zeros",
"muFFT.FFT",
"numpy.sin",
"numpy.cos",
"numpy.linspace",
"pytest.mark.parametrize",
"ContactMechanics.PeriodicFFTElasticHalfSpace",
"numpy.sqrt"
] | [((1553, 1618), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nx, ny"""', '[(64, 33), (65, 32), (64, 64)]'], {}), "('nx, ny', [(64, 33), (65, 32), (64, 64)])\n", (1576, 1618), False, 'import pytest\n'), ((2735, 2807), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nx, ny"""', '[(8, 15), (8, 4), (9, 4), (113, 765)]'], {}), "('nx, ny', [(8, 15), (8, 4), (9, 4), (113, 765)])\n", (2758, 2807), False, 'import pytest\n'), ((7220, 7284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nx, ny"""', '[(8, 8), (17, 128), (3, 128)]'], {}), "('nx, ny', [(8, 8), (17, 128), (3, 128)])\n", (7243, 7284), False, 'import pytest\n'), ((9008, 9073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nx, ny"""', '[(64, 33), (65, 32), (64, 64)]'], {}), "('nx, ny', [(64, 33), (65, 32), (64, 64)])\n", (9031, 9073), False, 'import pytest\n'), ((10866, 10931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nx, ny"""', '[(64, 33), (65, 32), (64, 64)]'], {}), "('nx, ny', [(64, 33), (65, 32), (64, 64)])\n", (10889, 10931), False, 'import pytest\n'), ((1356, 1371), 'NuMPI.Tools.Reduction', 'Reduction', (['comm'], {}), '(comm)\n', (1365, 1371), False, 'from NuMPI.Tools import Reduction\n'), ((2025, 2111), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), E_s, (sx, sy), fft='mpi',\n communicator=comm)\n", (2052, 2111), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((2168, 2264), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""fftw"""', 'communicator': 'MPI.COMM_SELF'}), "((nx, ny), E_s, (sx, sy), fft='fftw',\n communicator=MPI.COMM_SELF)\n", (2195, 2264), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((2353, 2522), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reference.greens_function[substrate.fourier_slices]', 'substrate.greens_function'], {'rtol': '(0)', 'atol': '(1e-16)', 'err_msg': '"""weights are different"""'}), "(reference.greens_function[substrate.\n fourier_slices], substrate.greens_function, rtol=0, atol=1e-16, err_msg\n ='weights are different')\n", (2379, 2522), True, 'import numpy as np\n'), ((2542, 2715), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reference.surface_stiffness[substrate.fourier_slices]', 'substrate.surface_stiffness'], {'rtol': '(0)', 'atol': '(1e-16)', 'err_msg': '"""iweights are different"""'}), "(reference.surface_stiffness[substrate.\n fourier_slices], substrate.surface_stiffness, rtol=0, atol=1e-16,\n err_msg='iweights are different')\n", (2568, 2715), True, 'import numpy as np\n'), ((8878, 8935), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['*computedenergies'], {'rtol': '(1e-10)'}), '(*computedenergies, rtol=1e-10)\n', (8904, 8935), True, 'import numpy as np\n'), ((8940, 9004), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['*computedenergies_kspace'], {'rtol': '(1e-10)'}), '(*computedenergies_kspace, rtol=1e-10)\n', (8966, 9004), True, 'import numpy as np\n'), ((9733, 9759), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (9740, 9759), True, 'import numpy as np\n'), ((9768, 9791), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (9774, 9791), True, 'import numpy as np\n'), ((9842, 9928), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), E_s, (sx, sy), fft='mpi',\n communicator=comm)\n", (9869, 9928), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((10080, 10186), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['computeddisp', 'refdisp[substrate.subdomain_slices]'], {'atol': '(1e-07)', 'rtol': '(1e-10)'}), '(computeddisp, refdisp[substrate.subdomain_slices\n ], atol=1e-07, rtol=1e-10)\n', (10106, 10186), True, 'import numpy as np\n'), ((11564, 11582), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (11572, 11582), True, 'import numpy as np\n'), ((11598, 11616), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {}), '((nx, ny))\n', (11606, 11616), True, 'import numpy as np\n'), ((12786, 12872), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), E_s, (sx, sy), fft='mpi',\n communicator=comm)\n", (12813, 12872), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((14076, 14137), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed_E_k_space', 'refEnergy'], {}), '(computed_E_k_space, refEnergy)\n', (14106, 14137), True, 'import numpy as np\n'), ((14142, 14205), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['computed_E_realspace', 'refEnergy'], {}), '(computed_E_realspace, refEnergy)\n', (14172, 14205), True, 'import numpy as np\n'), ((14210, 14319), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['computed_force', 'refForce[substrate.subdomain_slices]'], {'atol': '(1e-07)', 'rtol': '(1e-10)'}), '(computed_force, refForce[substrate.\n subdomain_slices], atol=1e-07, rtol=1e-10)\n', (14236, 14319), True, 'import numpy as np\n'), ((3833, 3859), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (3840, 3859), True, 'import numpy as np\n'), ((4111, 4197), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), E_s, (sx, sy), fft='mpi',\n communicator=comm)\n", (4138, 4197), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((4262, 4305), 'muFFT.FFT', 'FFT', (['(nx, ny)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), fft='mpi', communicator=comm)\n", (4265, 4305), False, 'from muFFT import FFT\n'), ((4484, 4526), 'numpy.zeros', 'np.zeros', (['(nx // 2 + 1, ny)'], {'dtype': 'complex'}), '((nx // 2 + 1, ny), dtype=complex)\n', (4492, 4526), True, 'import numpy as np\n'), ((4829, 4894), 'numpy.zeros', 'np.zeros', (['substrate.nb_fourier_grid_pts'], {'order': '"""f"""', 'dtype': 'complex'}), "(substrate.nb_fourier_grid_pts, order='f', dtype=complex)\n", (4837, 4894), True, 'import numpy as np\n'), ((4997, 5104), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['fft_disp', 'expected_k_disp[substrate.fourier_slices]'], {'rtol': '(1e-07)', 'atol': 'ATOL'}), '(fft_disp, expected_k_disp[substrate.\n fourier_slices], rtol=1e-07, atol=ATOL)\n', (5023, 5104), True, 'import numpy as np\n'), ((5240, 5352), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['kpressure', 'expected_k_pressure[substrate.fourier_slices]'], {'rtol': '(1e-07)', 'atol': 'ATOL'}), '(kpressure, expected_k_pressure[substrate.\n fourier_slices], rtol=1e-07, atol=ATOL)\n', (5266, 5352), True, 'import numpy as np\n'), ((5504, 5617), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['computedpressure', 'refpressure[substrate.subdomain_slices]'], {'atol': 'ATOL', 'rtol': '(1e-07)'}), '(computedpressure, refpressure[substrate.\n subdomain_slices], atol=ATOL, rtol=1e-07)\n', (5530, 5617), True, 'import numpy as np\n'), ((8349, 8435), 'ContactMechanics.PeriodicFFTElasticHalfSpace', 'PeriodicFFTElasticHalfSpace', (['(nx, ny)', 'E_s', '(sx, sy)'], {'fft': '"""mpi"""', 'communicator': 'comm'}), "((nx, ny), E_s, (sx, sy), fft='mpi',\n communicator=comm)\n", (8376, 8435), False, 'from ContactMechanics import PeriodicFFTElasticHalfSpace\n'), ((11808, 11834), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (11815, 11834), True, 'import numpy as np\n'), ((12364, 12390), 'numpy.sqrt', 'np.sqrt', (['(qx ** 2 + qy ** 2)'], {}), '(qx ** 2 + qy ** 2)\n', (12371, 12390), True, 'import numpy as np\n'), ((3996, 4019), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (4002, 4019), True, 'import numpy as np\n'), ((4022, 4045), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (4028, 4045), True, 'import numpy as np\n'), ((8265, 8288), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (8271, 8288), True, 'import numpy as np\n'), ((8291, 8314), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (8297, 8314), True, 'import numpy as np\n'), ((9578, 9604), 'numpy.linspace', 'np.linspace', (['(0)', 'sy', '(ny + 1)'], {}), '(0, sy, ny + 1)\n', (9589, 9604), True, 'import numpy as np\n'), ((9634, 9660), 'numpy.linspace', 'np.linspace', (['(0)', 'sx', '(nx + 1)'], {}), '(0, sx, nx + 1)\n', (9645, 9660), True, 'import numpy as np\n'), ((11463, 11489), 'numpy.linspace', 'np.linspace', (['(0)', 'sy', '(ny + 1)'], {}), '(0, sy, ny + 1)\n', (11474, 11489), True, 'import numpy as np\n'), ((11519, 11545), 'numpy.linspace', 'np.linspace', (['(0)', 'sx', '(nx + 1)'], {}), '(0, sx, nx + 1)\n', (11530, 11545), True, 'import numpy as np\n'), ((3888, 3914), 'numpy.linspace', 'np.linspace', (['(0)', 'sy', '(ny + 1)'], {}), '(0, sy, ny + 1)\n', (3899, 3914), True, 'import numpy as np\n'), ((3948, 3974), 'numpy.linspace', 'np.linspace', (['(0)', 'sx', '(nx + 1)'], {}), '(0, sx, nx + 1)\n', (3959, 3974), True, 'import numpy as np\n'), ((8029, 8055), 'numpy.linspace', 'np.linspace', (['(0)', 'sy', '(ny + 1)'], {}), '(0, sy, ny + 1)\n', (8040, 8055), True, 'import numpy as np\n'), ((8089, 8115), 'numpy.linspace', 'np.linspace', (['(0)', 'sx', '(nx + 1)'], {}), '(0, sx, nx + 1)\n', (8100, 8115), True, 'import numpy as np\n'), ((11883, 11906), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (11889, 11906), True, 'import numpy as np\n'), ((11909, 11932), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (11915, 11932), True, 'import numpy as np\n'), ((12439, 12462), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (12445, 12462), True, 'import numpy as np\n'), ((12465, 12488), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (12471, 12488), True, 'import numpy as np\n'), ((11959, 11982), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (11965, 11982), True, 'import numpy as np\n'), ((11985, 12008), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (11991, 12008), True, 'import numpy as np\n'), ((12515, 12538), 'numpy.cos', 'np.cos', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (12521, 12538), True, 'import numpy as np\n'), ((12541, 12564), 'numpy.sin', 'np.sin', (['(qx * X + qy * Y)'], {}), '(qx * X + qy * Y)\n', (12547, 12564), True, 'import numpy as np\n')] |
import cv2
import json
import os
import yaml
from pycocotools.coco import COCO
import numpy as np
import pycocotools.mask as maskUtils
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
class GetAnn():
'''
get map of coco imgid and annotation which fit our gaussian dataset
'''
def __init__(self):
# our defined detection object
self.gaussian_things_list = [
'lane markings', 'person', 'rider', 'car', 'truck', 'bus', 'motorcycle', 'bicycle', 'robot',
'pole', 'traffic light', 'traffic sign', 'dustbin', 'billboard', 'traffic_cone', 'road_pile',
'garbage', 'slops', 'ladder', 'pack', 'face', 'chair', 'table', 'carpet', 'potted plant'
]
# our defined stuff object
self.gaussian_stuff_list = [
'road', 'pavement', 'sidewalk', 'fence',
'sky', 'vegetation', 'building', 'wall'
]
# stuff defined in coco which fit our stuff object but not in gaussian_stuff_list above
self.coco_fitted_stuff_list = [
'bush', 'grass', 'tree', 'carpet', 'rug', 'sky-other', 'clouds', 'table',
'wall-stone', 'wall-brick'
]
self.target_stuff_list = self.gaussian_stuff_list + self.coco_fitted_stuff_list
self.coco_data_dir = '/dl/data/coco'
def get_gaussian_imgIds(self, data_type):
'''
get target img list and annotations from coco things and stuff
:param data_type: train2017/val2017
:return: set of coco imgIds
'''
img_ids = []
# get coco things category(subcategory)
annFile_instance = '{}/annotations/instances_{}.json'.format(self.coco_data_dir, data_type)
self.thing_coco = COCO(annFile_instance)
coco_things_cats = self.thing_coco.loadCats(self.thing_coco.getCatIds())
coco_things_catnms = [cat['name'] for cat in coco_things_cats]
# find target things in coco, return imgid
for thing in self.gaussian_things_list:
if thing in coco_things_catnms:
# get catid list with given category name
catIds = self.thing_coco.getCatIds(catNms=[thing])
img_ids.extend(self.thing_coco.getImgIds(catIds=catIds))
# get coco stuff category(subcategory)
annFile_stuff = '{}/annotations/stuff_{}.json'.format(self.coco_data_dir, data_type)
self.stuff_coco = COCO(annFile_stuff)
coco_stuff_cats = self.stuff_coco.loadCats(self.stuff_coco.getCatIds())
coco_stuff_catnms = [cat['name'] for cat in coco_stuff_cats]
# find target stuff in coco, return imgid
for stuff in self.target_stuff_list:
if stuff in coco_stuff_catnms:
# get catid list with given category name
catIds = self.stuff_coco.getCatIds(catNms=[stuff])
img_ids.extend(self.stuff_coco.getImgIds(catIds=catIds))
return set(img_ids)
def get_img_ann_list(self, img_ids):
'''
get imgs list and annotations from coco label json
:param img_ids:
:return: filtered imgs and annotations
'''
# get gaussian category map {cat_name:id}
gs_cat_map = {}
f = open(r'/media/pesong/e/dl_gaussian/data/coco/cocoapi/coco2gaussian/categories.yml', 'r')
catseqs = yaml.load(f)
for super, seqs in catseqs.items():
for cat_name, id in seqs.items():
gs_cat_map[cat_name] = id
anns_list = []
img_list = []
anns_thing = []
anns_stuff = []
for img_id in img_ids:
img_list.append(self.thing_coco.loadImgs(img_id)[0])
# get mapped thing annotations
coco_annIds_thing = self.thing_coco.getAnnIds(img_id)
coco_anns_thing = self.thing_coco.loadAnns(coco_annIds_thing)
# get mapped thing annotations
for ann in coco_anns_thing:
thing_name = self.thing_coco.loadCats(ann['category_id'])[0]['name']
if thing_name in self.gaussian_things_list:
ann['category_id'] = gs_cat_map[thing_name]
anns_thing.append(ann)
# get mapped stuff annotations
coco_annIds_stuff = self.stuff_coco.getAnnIds(img_id)
coco_anns_stuff = self.stuff_coco.loadAnns(coco_annIds_stuff)
for ann in coco_anns_stuff:
stuff_name = self.stuff_coco.loadCats(ann['category_id'])[0]['name']
if stuff_name in self.target_stuff_list:
if stuff_name in ['bush', 'grass', 'tree']:
ann['category_id'] = gs_cat_map['vegetation']
if stuff_name in ['carpet', 'rug']:
ann['category_id'] = gs_cat_map['carpet']
if stuff_name in ['sky-other', 'clouds']:
ann['category_id'] = gs_cat_map['sky']
if stuff_name in ['wall-stone', 'wall-brick']:
ann['category_id'] = gs_cat_map['wall']
if stuff_name == 'table':
ann['category_id'] = gs_cat_map['table']
anns_stuff.append(ann)
anns_list = anns_thing + anns_stuff
return anns_list, img_list
def mask2polys(self, anns_list):
'''
convert segmentation with mask to ploys
:param anns_dict: imgs annotations filtered by gaussian target object
:return: converted annotations
'''
for ann in anns_list:
if "segmentation" in ann and type(ann['segmentation']) != list:
# mask
t = self.stuff_coco.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
mask = maskUtils.decode(rle)
mask = np.squeeze(mask, 2).astype(np.uint8).copy()
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
segmentation = []
for contour in contours:
contour = contour.flatten().tolist()
segmentation.append(contour)
if len(contour) > 4:
segmentation.append(contour)
if len(segmentation) == 0:
continue
ann['segmentation'] = segmentation
return anns_list
if __name__ == "__main__":
coco_ann = GetAnn()
img_ids_val = coco_ann.get_gaussian_imgIds('val2017')
anns_val, img_val = coco_ann.get_img_ann_list(img_ids_val)
refine_anns_val = coco_ann.mask2polys(anns_val)
print("")
# person bicycle car motorcycle airplane bus train truck boat traffic light fire
# hydrant stop sign parking meter bench bird cat dog horse sheep cow elephant bear
# zebra giraffe backpack umbrella handbag tie suitcase frisbee skis snowboard sports
# ball kite baseball bat baseball glove skateboard surfboard tennis racket bottle
# wine glass cup fork knife spoon bowl banana apple sandwich orange broccoli carrot
# hot dog pizza donut cake chair couch potted plant bed dining table toilet tv laptop
# mouse remote keyboard cell phone microwave oven toaster sink refrigerator book
# clock vase scissors teddy bear hair drier toothbrush
# banner blanket branch bridge building-other bush cabinet cage cardboard carpet
# ceiling-other ceiling-tile cloth clothes clouds counter cupboard curtain
# desk-stuff dirt door-stuff fence floor-marble floor-other floor-stone floor-tile
# floor-wood flower fog food-other fruit furniture-other grass gravel ground-other
# hill house leaves light mat metal mirror-stuff moss mountain mud napkin net paper
# pavement pillow plant-other plastic platform playingfield railing railroad river
# road rock roof rug salad sand sea shelf sky-other skyscraper snow solid-other stairs
# stone straw structural-other table tent textile-other towel tree vegetable wall-brick
# wall-concrete wall-other wall-panel wall-stone wall-tile wall-wood water-other
# waterdrops window-blind window-other wood other
# person bicycle car motorcycle bus truck traffic light chair
# table, road, pavement, bush/grass/tree (vegetation) carpet/rug(地毯), fence, pavement
#
# 大类:sky/sky wall/wall
| [
"yaml.load",
"pycocotools.mask.decode",
"pycocotools.coco.COCO",
"numpy.squeeze",
"pycocotools.mask.frPyObjects",
"cv2.findContours"
] | [((1761, 1783), 'pycocotools.coco.COCO', 'COCO', (['annFile_instance'], {}), '(annFile_instance)\n', (1765, 1783), False, 'from pycocotools.coco import COCO\n'), ((2446, 2465), 'pycocotools.coco.COCO', 'COCO', (['annFile_stuff'], {}), '(annFile_stuff)\n', (2450, 2465), False, 'from pycocotools.coco import COCO\n'), ((3373, 3385), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (3382, 3385), False, 'import yaml\n'), ((6019, 6040), 'pycocotools.mask.decode', 'maskUtils.decode', (['rle'], {}), '(rle)\n', (6035, 6040), True, 'import pycocotools.mask as maskUtils\n'), ((6143, 6205), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (6159, 6205), False, 'import cv2\n'), ((5856, 5925), 'pycocotools.mask.frPyObjects', 'maskUtils.frPyObjects', (["[ann['segmentation']]", "t['height']", "t['width']"], {}), "([ann['segmentation']], t['height'], t['width'])\n", (5877, 5925), True, 'import pycocotools.mask as maskUtils\n'), ((6065, 6084), 'numpy.squeeze', 'np.squeeze', (['mask', '(2)'], {}), '(mask, 2)\n', (6075, 6084), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
_show_plots_ = False
import time
import numpy
import quantarhei as qr
from quantarhei.qm.liouvillespace.integrodiff.integrodiff \
import IntegrodiffPropagator
print("")
print("***********************************************************")
print("* *")
print("* Quantarhei's HEOM implementation demo *")
print("* *")
print("***********************************************************")
###############################################################################
#
# Model system definition
#
###############################################################################
# Three molecules
with qr.energy_units("1/cm"):
m1 = qr.Molecule([0.0, 10100.0])
m2 = qr.Molecule([0.0, 10300.0])
m3 = qr.Molecule([0.0, 10000.0])
# Aggregate is built from the molecules
agg = qr.Aggregate([m1, m2, m3])
# Couplings between them are set
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,80.0)
agg.set_resonance_coupling(0,2,100.0)
# Interaction with the bath is set through bath correlation functions
timea = qr.TimeAxis(0.0, 500, 1.0)
cpar1 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
cpar2 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
with qr.energy_units("1/cm"):
cfce1 = qr.CorrelationFunction(timea, cpar1)
cfce2 = qr.CorrelationFunction(timea, cpar2)
m1.set_transition_environment((0, 1), cfce1)
m2.set_transition_environment((0, 1), cfce1)
m3.set_transition_environment((0, 1), cfce2)
# Aggregate is built
agg.build()
###############################################################################
#
# Definition of the hierarchy
#
###############################################################################
# Hamiltonian and the system-bath interaction operator is needed to
# define the Kubo-Tanimura hierarchy
ham = agg.get_Hamiltonian()
sbi = agg.get_SystemBathInteraction()
# We define the hierarchy
#Hy3 = qr.KTHierarchy(ham, sbi, 3)
#Hy4 = qr.KTHierarchy(ham, sbi, 4)
#Hy5 = qr.KTHierarchy(ham, sbi, 5)
Hy6 = qr.KTHierarchy(ham, sbi, 3)
print("Size of hierarchy of depth",Hy6.depth,"is",Hy6.hsize)
Hy7 = qr.KTHierarchy(ham, sbi, 4)
print("Size of hierarchy of depth",Hy7.depth,"is",Hy7.hsize)
# testing generation of hierarchy indices
#print(Hy.generate_indices(4, level=4))
#
#raise Exception()
###############################################################################
#
# Propagation of the HEOM
#
###############################################################################
# Initial density matrix
rhoi = qr.ReducedDensityMatrix(dim=ham.dim)
with qr.eigenbasis_of(ham):
rhoi.data[2,2] = 0.8
rhoi.data[1,1] = 0.1
rhoi.data[3,3] = 0.1
#print(rhoi)
# Definition of the HEOM propagator
#kprop3 = qr.KTHierarchyPropagator(timea, Hy3)
#kprop4 = qr.KTHierarchyPropagator(timea, Hy4)
#kprop5 = qr.KTHierarchyPropagator(timea, Hy5)
kprop6 = qr.KTHierarchyPropagator(timea, Hy6)
kprop7 = qr.KTHierarchyPropagator(timea, Hy7)
# Propagation of the hierarchy and saving the density operator
t1 = time.time()
#rhot3 = kprop3.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot4 = kprop4.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot5 = kprop5.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
rhot6 = kprop6.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
t1 = time.time()
rhot7 = kprop7.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
###############################################################################
#
# Graphical output of the results
#
###############################################################################
if _show_plots_:
import matplotlib.pyplot as plt
N = timea.length
with qr.eigenbasis_of(ham):
# plt.plot(timea.data[0:N], rhot3.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot3.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot3.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot4.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot4.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot4.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot5.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot5.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot5.data[0:N,3,3],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,0,0])
plt.plot(timea.data[0:N], rhot6.data[0:N,1,3],"-b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,3],"-r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,3],"--b")
plt.plot(timea.data[0:N], rhot7.data[0:N,2,3],"--r")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,2],"--k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,1], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,2], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,3], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,4], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,5], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,6], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,7], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,8], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,9], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,10], "-g")
plt.show()
print("Kernel generation")
ker = Hy6.get_kernel(timea)
ip8 = IntegrodiffPropagator(timea, ham, kernel=ker,
fft=True, timefac=3, decay_fraction=2.0)
#fft=False) #, cutoff_time=100)
rhot8 = ip8.propagate(rhoi)
trc = numpy.zeros(timea.length, dtype=qr.REAL)
for ti in range(timea.length):
trc[ti] = numpy.real(numpy.trace(rhot8.data[ti,:,:]))
if _show_plots_:
N = timea.length
with qr.eigenbasis_of(ham):
#plt.plot(timea.data[0:N], rhot8.data[0:N,0,0])
#plt.plot(timea.data[0:N], trc[0:N],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,1,1,1],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,2,1,2],"-m")
plt.plot(timea.data[0:N], ker[0:N,2,2,2,2],"-m")
plt.show()
plt.plot(timea.data[0:N], rhot8.data[0:N,1,1],"-b")
plt.plot(timea.data[0:N], rhot8.data[0:N,2,2],"-r")
plt.plot(timea.data[0:N], rhot8.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,1],"--b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,2],"--r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"--k")
plt.show()
print("")
print("***********************************************************")
print("* *")
print("* Demo finished successfully *")
print("* *")
print("***********************************************************")
| [
"quantarhei.qm.liouvillespace.integrodiff.integrodiff.IntegrodiffPropagator",
"quantarhei.Molecule",
"numpy.trace",
"quantarhei.ReducedDensityMatrix",
"matplotlib.pyplot.show",
"quantarhei.Aggregate",
"matplotlib.pyplot.plot",
"quantarhei.KTHierarchyPropagator",
"quantarhei.KTHierarchy",
"numpy.ze... | [((954, 980), 'quantarhei.Aggregate', 'qr.Aggregate', (['[m1, m2, m3]'], {}), '([m1, m2, m3])\n', (966, 980), True, 'import quantarhei as qr\n'), ((1211, 1237), 'quantarhei.TimeAxis', 'qr.TimeAxis', (['(0.0)', '(500)', '(1.0)'], {}), '(0.0, 500, 1.0)\n', (1222, 1237), True, 'import quantarhei as qr\n'), ((2263, 2290), 'quantarhei.KTHierarchy', 'qr.KTHierarchy', (['ham', 'sbi', '(3)'], {}), '(ham, sbi, 3)\n', (2277, 2290), True, 'import quantarhei as qr\n'), ((2358, 2385), 'quantarhei.KTHierarchy', 'qr.KTHierarchy', (['ham', 'sbi', '(4)'], {}), '(ham, sbi, 4)\n', (2372, 2385), True, 'import quantarhei as qr\n'), ((2780, 2816), 'quantarhei.ReducedDensityMatrix', 'qr.ReducedDensityMatrix', ([], {'dim': 'ham.dim'}), '(dim=ham.dim)\n', (2803, 2816), True, 'import quantarhei as qr\n'), ((3128, 3164), 'quantarhei.KTHierarchyPropagator', 'qr.KTHierarchyPropagator', (['timea', 'Hy6'], {}), '(timea, Hy6)\n', (3152, 3164), True, 'import quantarhei as qr\n'), ((3174, 3210), 'quantarhei.KTHierarchyPropagator', 'qr.KTHierarchyPropagator', (['timea', 'Hy7'], {}), '(timea, Hy7)\n', (3198, 3210), True, 'import quantarhei as qr\n'), ((3282, 3293), 'time.time', 'time.time', ([], {}), '()\n', (3291, 3293), False, 'import time\n'), ((3610, 3621), 'time.time', 'time.time', ([], {}), '()\n', (3619, 3621), False, 'import time\n'), ((3661, 3672), 'time.time', 'time.time', ([], {}), '()\n', (3670, 3672), False, 'import time\n'), ((3755, 3766), 'time.time', 'time.time', ([], {}), '()\n', (3764, 3766), False, 'import time\n'), ((5740, 5830), 'quantarhei.qm.liouvillespace.integrodiff.integrodiff.IntegrodiffPropagator', 'IntegrodiffPropagator', (['timea', 'ham'], {'kernel': 'ker', 'fft': '(True)', 'timefac': '(3)', 'decay_fraction': '(2.0)'}), '(timea, ham, kernel=ker, fft=True, timefac=3,\n decay_fraction=2.0)\n', (5761, 5830), False, 'from quantarhei.qm.liouvillespace.integrodiff.integrodiff import IntegrodiffPropagator\n'), ((5926, 5966), 'numpy.zeros', 'numpy.zeros', (['timea.length'], {'dtype': 'qr.REAL'}), '(timea.length, dtype=qr.REAL)\n', (5937, 5966), False, 'import numpy\n'), ((765, 788), 'quantarhei.energy_units', 'qr.energy_units', (['"""1/cm"""'], {}), "('1/cm')\n", (780, 788), True, 'import quantarhei as qr\n'), ((799, 826), 'quantarhei.Molecule', 'qr.Molecule', (['[0.0, 10100.0]'], {}), '([0.0, 10100.0])\n', (810, 826), True, 'import quantarhei as qr\n'), ((836, 863), 'quantarhei.Molecule', 'qr.Molecule', (['[0.0, 10300.0]'], {}), '([0.0, 10300.0])\n', (847, 863), True, 'import quantarhei as qr\n'), ((873, 900), 'quantarhei.Molecule', 'qr.Molecule', (['[0.0, 10000.0]'], {}), '([0.0, 10000.0])\n', (884, 900), True, 'import quantarhei as qr\n'), ((1022, 1045), 'quantarhei.energy_units', 'qr.energy_units', (['"""1/cm"""'], {}), "('1/cm')\n", (1037, 1045), True, 'import quantarhei as qr\n'), ((1440, 1463), 'quantarhei.energy_units', 'qr.energy_units', (['"""1/cm"""'], {}), "('1/cm')\n", (1455, 1463), True, 'import quantarhei as qr\n'), ((1477, 1513), 'quantarhei.CorrelationFunction', 'qr.CorrelationFunction', (['timea', 'cpar1'], {}), '(timea, cpar1)\n', (1499, 1513), True, 'import quantarhei as qr\n'), ((1526, 1562), 'quantarhei.CorrelationFunction', 'qr.CorrelationFunction', (['timea', 'cpar2'], {}), '(timea, cpar2)\n', (1548, 1562), True, 'import quantarhei as qr\n'), ((2823, 2844), 'quantarhei.eigenbasis_of', 'qr.eigenbasis_of', (['ham'], {}), '(ham)\n', (2839, 2844), True, 'import quantarhei as qr\n'), ((5665, 5675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5673, 5675), True, 'import matplotlib.pyplot as plt\n'), ((6791, 6801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6799, 6801), True, 'import matplotlib.pyplot as plt\n'), ((4092, 4113), 'quantarhei.eigenbasis_of', 'qr.eigenbasis_of', (['ham'], {}), '(ham)\n', (4108, 4113), True, 'import quantarhei as qr\n'), ((4672, 4720), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 0, 0]'], {}), '(timea.data[0:N], rhot6.data[0:N, 0, 0])\n', (4680, 4720), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4781), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 1, 3]', '"""-b"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 1, 3], '-b')\n", (4735, 4781), True, 'import matplotlib.pyplot as plt\n'), ((4787, 4841), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 2, 3]', '"""-r"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 2, 3], '-r')\n", (4795, 4841), True, 'import matplotlib.pyplot as plt\n'), ((4847, 4901), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 1, 2]', '"""-k"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 1, 2], '-k')\n", (4855, 4901), True, 'import matplotlib.pyplot as plt\n'), ((4911, 4966), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot7.data[0:N, 1, 3]', '"""--b"""'], {}), "(timea.data[0:N], rhot7.data[0:N, 1, 3], '--b')\n", (4919, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4972, 5027), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot7.data[0:N, 2, 3]', '"""--r"""'], {}), "(timea.data[0:N], rhot7.data[0:N, 2, 3], '--r')\n", (4980, 5027), True, 'import matplotlib.pyplot as plt\n'), ((5033, 5088), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot7.data[0:N, 1, 2]', '"""--k"""'], {}), "(timea.data[0:N], rhot7.data[0:N, 1, 2], '--k')\n", (5041, 5088), True, 'import matplotlib.pyplot as plt\n'), ((6023, 6056), 'numpy.trace', 'numpy.trace', (['rhot8.data[ti, :, :]'], {}), '(rhot8.data[ti, :, :])\n', (6034, 6056), False, 'import numpy\n'), ((6104, 6125), 'quantarhei.eigenbasis_of', 'qr.eigenbasis_of', (['ham'], {}), '(ham)\n', (6120, 6125), True, 'import quantarhei as qr\n'), ((6241, 6294), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'ker[0:N, 1, 1, 1, 1]', '"""-m"""'], {}), "(timea.data[0:N], ker[0:N, 1, 1, 1, 1], '-m')\n", (6249, 6294), True, 'import matplotlib.pyplot as plt\n'), ((6298, 6351), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'ker[0:N, 1, 2, 1, 2]', '"""-m"""'], {}), "(timea.data[0:N], ker[0:N, 1, 2, 1, 2], '-m')\n", (6306, 6351), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6408), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'ker[0:N, 2, 2, 2, 2]', '"""-m"""'], {}), "(timea.data[0:N], ker[0:N, 2, 2, 2, 2], '-m')\n", (6363, 6408), True, 'import matplotlib.pyplot as plt\n'), ((6412, 6422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6420, 6422), True, 'import matplotlib.pyplot as plt\n'), ((6431, 6485), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot8.data[0:N, 1, 1]', '"""-b"""'], {}), "(timea.data[0:N], rhot8.data[0:N, 1, 1], '-b')\n", (6439, 6485), True, 'import matplotlib.pyplot as plt\n'), ((6491, 6545), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot8.data[0:N, 2, 2]', '"""-r"""'], {}), "(timea.data[0:N], rhot8.data[0:N, 2, 2], '-r')\n", (6499, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6551, 6605), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot8.data[0:N, 1, 2]', '"""-k"""'], {}), "(timea.data[0:N], rhot8.data[0:N, 1, 2], '-k')\n", (6559, 6605), True, 'import matplotlib.pyplot as plt\n'), ((6612, 6667), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 1, 1]', '"""--b"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 1, 1], '--b')\n", (6620, 6667), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6728), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 2, 2]', '"""--r"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 2, 2], '--r')\n", (6681, 6728), True, 'import matplotlib.pyplot as plt\n'), ((6734, 6789), 'matplotlib.pyplot.plot', 'plt.plot', (['timea.data[0:N]', 'rhot6.data[0:N, 1, 2]', '"""--k"""'], {}), "(timea.data[0:N], rhot6.data[0:N, 1, 2], '--k')\n", (6742, 6789), True, 'import matplotlib.pyplot as plt\n')] |
import natsort
import numpy as np
import torch
import os
from torchvision.transforms import transforms
from medpy.io import load
class DatasetSignalAndNoiseSamples(torch.utils.data.Dataset):
def __init__(self, e, split):
self.split = split
self.path_noise_samples = os.path.join(e.path_noise_samples, split)
self.path_signal_samples = os.path.join(e.path_signal_samples, split)
self.names_noise_samples = []
for _, _, f_names in os.walk(self.path_noise_samples):
for f in f_names:
self.names_noise_samples.append(f)
self.names_signal_samples = []
for _, _, f_names in os.walk(self.path_signal_samples):
for f in f_names:
self.names_signal_samples.append(f)
self.names_noise_samples = natsort.natsorted(self.names_noise_samples)
self.names_signal_samples = natsort.natsorted(self.names_signal_samples)
if self.split == 'val' or self.split == 'test':
if len(self.names_signal_samples) < len(self.names_noise_samples):
raise Exception("There are less signal samples than noise samples for '" + self.split + "' split. Please generate more signal samples or remove some noise samples from the specified dataset.")
self.scaling_factor_for_signal_samples = e.scaling_factor_for_signal_samples
self.random_scaling_for_signal_samples = e.random_scaling_for_signal_samples
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0], std=[e.divisor_for_data_normalization])])
def __getitem__(self, index):
name_noise_sample = self.names_noise_samples[index]
# Use random signal samples for training, but make the selection deterministic for val and test.
if self.split == 'val' or self.split == 'test':
index_signal_sample = index
else:
index_signal_sample = np.random.randint(len(self.names_signal_samples))
name_signal_sample = self.names_signal_samples[index_signal_sample]
noise_sample = load(os.path.join(self.path_noise_samples, name_noise_sample))[0].astype('float32')
signal_sample = load(os.path.join(self.path_signal_samples, name_signal_sample))[0].astype('float32')
if self.random_scaling_for_signal_samples:
if self.split == 'val' or self.split == 'test':
# decreasing with index so that first val sample (usually the one that is printed) has the highest scaling factor
current_scaling_factor_for_signal_sample = (1.0 - float(index)/len(self.names_noise_samples)) * self.scaling_factor_for_signal_samples
else:
current_scaling_factor_for_signal_sample = (1.0 - np.random.rand()) * self.scaling_factor_for_signal_samples
else: # Deterministic scaling factor
current_scaling_factor_for_signal_sample = self.scaling_factor_for_signal_samples
signal_sample = signal_sample * current_scaling_factor_for_signal_sample
noisy_signal = self.transform(signal_sample + noise_sample)
noise_sample = self.transform(noise_sample)
# true_signal = self.transform(data_signal)
return noisy_signal, noise_sample, name_noise_sample, name_signal_sample
def __len__(self):
"""Return the total number of images."""
return len(self.names_noise_samples)
| [
"os.walk",
"torchvision.transforms.transforms.ToTensor",
"torchvision.transforms.transforms.Normalize",
"numpy.random.rand",
"os.path.join",
"natsort.natsorted"
] | [((289, 330), 'os.path.join', 'os.path.join', (['e.path_noise_samples', 'split'], {}), '(e.path_noise_samples, split)\n', (301, 330), False, 'import os\n'), ((366, 408), 'os.path.join', 'os.path.join', (['e.path_signal_samples', 'split'], {}), '(e.path_signal_samples, split)\n', (378, 408), False, 'import os\n'), ((477, 509), 'os.walk', 'os.walk', (['self.path_noise_samples'], {}), '(self.path_noise_samples)\n', (484, 509), False, 'import os\n'), ((661, 694), 'os.walk', 'os.walk', (['self.path_signal_samples'], {}), '(self.path_signal_samples)\n', (668, 694), False, 'import os\n'), ((814, 857), 'natsort.natsorted', 'natsort.natsorted', (['self.names_noise_samples'], {}), '(self.names_noise_samples)\n', (831, 857), False, 'import natsort\n'), ((894, 938), 'natsort.natsorted', 'natsort.natsorted', (['self.names_signal_samples'], {}), '(self.names_signal_samples)\n', (911, 938), False, 'import natsort\n'), ((1500, 1521), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1519, 1521), False, 'from torchvision.transforms import transforms\n'), ((1523, 1593), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0]', 'std': '[e.divisor_for_data_normalization]'}), '(mean=[0], std=[e.divisor_for_data_normalization])\n', (1543, 1593), False, 'from torchvision.transforms import transforms\n'), ((2097, 2153), 'os.path.join', 'os.path.join', (['self.path_noise_samples', 'name_noise_sample'], {}), '(self.path_noise_samples, name_noise_sample)\n', (2109, 2153), False, 'import os\n'), ((2205, 2263), 'os.path.join', 'os.path.join', (['self.path_signal_samples', 'name_signal_sample'], {}), '(self.path_signal_samples, name_signal_sample)\n', (2217, 2263), False, 'import os\n'), ((2763, 2779), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2777, 2779), True, 'import numpy as np\n')] |
import os
import json
import cv2
import numpy as np
import random
from shapely import wkt
from shapely.geometry import Polygon
import torch
from torch.utils.data import Dataset, DataLoader
from utils import preprocess
import torchvision.transforms as transforms
import multiprocessing
multiprocessing.set_start_method('spawn', True)
class XView2Dataset(Dataset):
"""xView2
input: Post image
target: pixel-wise classes
"""
dmg_type = {'background': 0, 'no-damage': 1, 'minor-damage': 2, 'major-damage': 3, 'destroyed': 4,
'un-classified': 255}
diaster_type = {'earthquake': 0, 'fire': 1, 'tsunami': 2, 'volcano': 3, 'wind': 4, 'flooding': 5}
def __init__(self, root_dir, rgb_bgr='rgb', preprocessing=None, mode='train'):
assert mode in ('train', 'test')
self.mode = mode
self.root = root_dir
assert rgb_bgr in ('rgb', 'bgr')
self.rgb = bool(rgb_bgr == 'rgb')
self.preprocessing = preprocessing
self.dirs = {'train_imgs': os.path.join(self.root, 'train', 'images'),
'train_labs': os.path.join(self.root, 'train', 'labels'),
'tier3_imgs': os.path.join(self.root, 'tier3', 'images'),
'tier3_labs': os.path.join(self.root, 'tier3', 'labels'),
'test_imgs': os.path.join(self.root, 'test', 'images')}
train_imgs = [s for s in os.listdir(self.dirs['train_imgs'])]
tier3_imgs = [s for s in os.listdir(self.dirs['tier3_imgs'])]
train_labs = [s for s in os.listdir(self.dirs['train_labs'])]
tier3_labs = [s for s in os.listdir(self.dirs['tier3_labs'])]
test_imgs = [s for s in os.listdir(self.dirs['test_imgs'])]
self.sample_files = []
self.neg_sample_files = []
if self.mode == 'train':
self.add_samples_train(self.dirs['train_imgs'], self.dirs['train_labs'], train_imgs, train_labs)
self.add_samples_train(self.dirs['tier3_imgs'], self.dirs['tier3_labs'], tier3_imgs, tier3_labs)
else:
for pre in os.listdir(self.dirs['test_imgs']):
if pre[:9] != 'test_pre_':
continue
img_id = pre[9:][:-4]
post = 'test_post_' + pre[9:]
assert post in test_imgs
files = {'img_id': img_id,
'pre_img': os.path.join(self.dirs['test_imgs'], pre),
'post_img': os.path.join(self.dirs['test_imgs'], post)}
self.sample_files.append(files)
if mode == 'test':
self.data_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def add_samples_train(self, img_dirs, lab_dirs, imgs, labs):
for pre in os.listdir(img_dirs):
if pre[-17:] != '_pre_disaster.png':
continue
chop = pre[:-4].split('_')
img_id = '_'.join(chop[:2])
post = img_id + '_post_disaster.png'
pre_json = img_id + '_pre_disaster.json'
post_json = img_id + '_post_disaster.json'
assert post in imgs
assert pre_json in labs
assert post_json in labs
assert img_id not in self.sample_files
files = {'img_id': img_id,
'pre_img': os.path.join(img_dirs, pre),
'post_img': os.path.join(img_dirs, post),
'pre_json': os.path.join(lab_dirs, pre_json),
'post_json': os.path.join(lab_dirs, post_json)}
self.sample_files.append(files)
def get_sample_info(self, idx):
files = self.sample_files[idx]
pre_img = cv2.imread(files['pre_img'])
post_img = cv2.imread(files['post_img'])
if self.rgb:
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_BGR2RGB)
post_img = cv2.cvtColor(post_img, cv2.COLOR_BGR2RGB)
pre_json = json.loads(open(files['pre_json']).read())
post_json = json.loads(open(files['post_json']).read())
sample = {'pre_img': pre_img, 'post_img': post_img, 'image_id': files['img_id'],
'im_width': post_json['metadata']['width'],
'im_height': post_json['metadata']['height'],
'disaster': post_json['metadata']['disaster_type'],
'pre_meta': {m: pre_json['metadata'][m] for m in pre_json['metadata']},
'post_meta': {m: post_json['metadata'][m] for m in post_json['metadata']},
'pre_builds': dict(), 'post_builds': dict(), 'builds': dict()}
for b in pre_json['features']['xy']:
buid = b['properties']['uid']
sample['pre_builds'][buid] = {p: b['properties'][p] for p in b['properties']}
poly = Polygon(wkt.loads(b['wkt']))
sample['pre_builds'][buid]['poly'] = list(poly.exterior.coords)
for b in post_json['features']['xy']:
buid = b['properties']['uid']
sample['post_builds'][buid] = {p: b['properties'][p] for p in b['properties']}
poly = Polygon(wkt.loads(b['wkt']))
sample['post_builds'][buid]['poly'] = list(poly.exterior.coords)
sample['builds'][buid] = {'poly': list(poly.exterior.coords),
'subtype': b['properties']['subtype']}
# sample['mask_img'] = self.make_mask_img(**sample)
return sample
def __getitem__(self, idx):
files = self.sample_files[idx]
pre_img = cv2.imread(files['pre_img'])
post_img = cv2.imread(files['post_img'])
if self.rgb:
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_BGR2RGB)
post_img = cv2.cvtColor(post_img, cv2.COLOR_BGR2RGB)
if self.mode == 'train':
sample = self.get_sample_with_mask(files, pre_img, post_img)
sample['image_id'] = files['img_id']
if self.preprocessing is not None:
transformed = preprocess(sample['pre_img'], sample['post_img'], sample['mask_img'],
flip=self.preprocessing['flip'],
scale=self.preprocessing['scale'],
crop=self.preprocessing['crop'])
sample['pre_img'] = transformed[0]
sample['post_img'] = transformed[1]
sample['mask_img'] = transformed[2]
else:
pre_img = self.data_transforms(pre_img)
post_img = self.data_transforms(post_img)
sample = {'pre_img': pre_img, 'post_img': post_img, 'image_id': files['img_id']}
return sample
@staticmethod
def _get_building_from_json(post_json):
buildings = dict()
for b in post_json['features']['xy']:
buid = b['properties']['uid']
poly = Polygon(wkt.loads(b['wkt']))
buildings[buid] = {'poly': list(poly.exterior.coords),
'subtype': b['properties']['subtype']}
return buildings
def get_sample_with_mask(self, files, pre_img, post_img):
post_json = json.loads(open(files['post_json']).read())
sample = {'pre_img': pre_img, 'post_img': post_img, 'image_id': files['img_id'],
'disaster': self.diaster_type[post_json['metadata']['disaster_type']]}
buildings = self._get_building_from_json(post_json)
sample['mask_img'] = self.make_mask_img(**buildings)
return sample
def make_mask_img(self, **kwargs):
width = 1024
height = 1024
builings = kwargs
mask_img = np.zeros([height, width], dtype=np.uint8)
for dmg in self.dmg_type:
polys_dmg = [np.array(builings[p]['poly']).round().astype(np.int32).reshape(-1, 1, 2)
for p in builings if builings[p]['subtype'] == dmg]
cv2.fillPoly(mask_img, polys_dmg, [self.dmg_type[dmg]])
return mask_img
def show_sample(self, **kwargs):
pass
def __len__(self):
return len(self.sample_files)
if __name__ == '__main__':
root_path = "/mnt/Dataset/xView2/v2"
dataset = XView2Dataset(root_path, rgb_bgr='rgb', preprocessing={'flip': True, 'scale': None, 'crop': (513, 513)})
dataset_test = XView2Dataset(root_path, rgb_bgr='rgb',
preprocessing={'flip': False, 'scale': (0.8, 2.0), 'crop': (1024, 1024)})
n_samples = len(dataset)
n_train = int(n_samples * 0.85)
n_test = n_samples - n_train
trainset, testset = torch.utils.data.random_split(dataset, [n_train, n_test])
dataloader = DataLoader(trainset, batch_size=5, shuffle=True, num_workers=4)
for i in range(n_test):
sample = testset[i]
original_idx = testset.indices[i]
info = dataset.get_sample_info(original_idx)
info2 = dataset_test.get_sample_info(original_idx)
sample2 = dataset_test[original_idx]
print(i, original_idx, sample['disaster'], sample['image_id'], sample['post_img'].shape)
print(i, original_idx, sample2['disaster'], sample2['image_id'], sample2['post_img'].shape)
print(i, original_idx, info['disaster'], info['image_id'])
print(i, original_idx, info2['disaster'], info2['image_id'])
for i, samples in enumerate(dataloader):
print(i, samples['disaster'], samples['image_id'], samples['post_img'].shape)
| [
"torch.utils.data.DataLoader",
"cv2.cvtColor",
"multiprocessing.set_start_method",
"numpy.zeros",
"cv2.fillPoly",
"cv2.imread",
"numpy.array",
"torch.utils.data.random_split",
"torchvision.transforms.Normalize",
"utils.preprocess",
"os.path.join",
"os.listdir",
"shapely.wkt.loads",
"torchv... | [((287, 334), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""', '(True)'], {}), "('spawn', True)\n", (319, 334), False, 'import multiprocessing\n'), ((8676, 8733), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['dataset', '[n_train, n_test]'], {}), '(dataset, [n_train, n_test])\n', (8705, 8733), False, 'import torch\n'), ((8752, 8815), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': '(5)', 'shuffle': '(True)', 'num_workers': '(4)'}), '(trainset, batch_size=5, shuffle=True, num_workers=4)\n', (8762, 8815), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2876, 2896), 'os.listdir', 'os.listdir', (['img_dirs'], {}), '(img_dirs)\n', (2886, 2896), False, 'import os\n'), ((3801, 3829), 'cv2.imread', 'cv2.imread', (["files['pre_img']"], {}), "(files['pre_img'])\n", (3811, 3829), False, 'import cv2\n'), ((3849, 3878), 'cv2.imread', 'cv2.imread', (["files['post_img']"], {}), "(files['post_img'])\n", (3859, 3878), False, 'import cv2\n'), ((5631, 5659), 'cv2.imread', 'cv2.imread', (["files['pre_img']"], {}), "(files['pre_img'])\n", (5641, 5659), False, 'import cv2\n'), ((5679, 5708), 'cv2.imread', 'cv2.imread', (["files['post_img']"], {}), "(files['post_img'])\n", (5689, 5708), False, 'import cv2\n'), ((7741, 7782), 'numpy.zeros', 'np.zeros', (['[height, width]'], {'dtype': 'np.uint8'}), '([height, width], dtype=np.uint8)\n', (7749, 7782), True, 'import numpy as np\n'), ((1025, 1067), 'os.path.join', 'os.path.join', (['self.root', '"""train"""', '"""images"""'], {}), "(self.root, 'train', 'images')\n", (1037, 1067), False, 'import os\n'), ((1104, 1146), 'os.path.join', 'os.path.join', (['self.root', '"""train"""', '"""labels"""'], {}), "(self.root, 'train', 'labels')\n", (1116, 1146), False, 'import os\n'), ((1183, 1225), 'os.path.join', 'os.path.join', (['self.root', '"""tier3"""', '"""images"""'], {}), "(self.root, 'tier3', 'images')\n", (1195, 1225), False, 'import os\n'), ((1262, 1304), 'os.path.join', 'os.path.join', (['self.root', '"""tier3"""', '"""labels"""'], {}), "(self.root, 'tier3', 'labels')\n", (1274, 1304), False, 'import os\n'), ((1340, 1381), 'os.path.join', 'os.path.join', (['self.root', '"""test"""', '"""images"""'], {}), "(self.root, 'test', 'images')\n", (1352, 1381), False, 'import os\n'), ((2086, 2120), 'os.listdir', 'os.listdir', (["self.dirs['test_imgs']"], {}), "(self.dirs['test_imgs'])\n", (2096, 2120), False, 'import os\n'), ((3922, 3962), 'cv2.cvtColor', 'cv2.cvtColor', (['pre_img', 'cv2.COLOR_BGR2RGB'], {}), '(pre_img, cv2.COLOR_BGR2RGB)\n', (3934, 3962), False, 'import cv2\n'), ((3986, 4027), 'cv2.cvtColor', 'cv2.cvtColor', (['post_img', 'cv2.COLOR_BGR2RGB'], {}), '(post_img, cv2.COLOR_BGR2RGB)\n', (3998, 4027), False, 'import cv2\n'), ((5752, 5792), 'cv2.cvtColor', 'cv2.cvtColor', (['pre_img', 'cv2.COLOR_BGR2RGB'], {}), '(pre_img, cv2.COLOR_BGR2RGB)\n', (5764, 5792), False, 'import cv2\n'), ((5816, 5857), 'cv2.cvtColor', 'cv2.cvtColor', (['post_img', 'cv2.COLOR_BGR2RGB'], {}), '(post_img, cv2.COLOR_BGR2RGB)\n', (5828, 5857), False, 'import cv2\n'), ((8004, 8059), 'cv2.fillPoly', 'cv2.fillPoly', (['mask_img', 'polys_dmg', '[self.dmg_type[dmg]]'], {}), '(mask_img, polys_dmg, [self.dmg_type[dmg]])\n', (8016, 8059), False, 'import cv2\n'), ((1416, 1451), 'os.listdir', 'os.listdir', (["self.dirs['train_imgs']"], {}), "(self.dirs['train_imgs'])\n", (1426, 1451), False, 'import os\n'), ((1486, 1521), 'os.listdir', 'os.listdir', (["self.dirs['tier3_imgs']"], {}), "(self.dirs['tier3_imgs'])\n", (1496, 1521), False, 'import os\n'), ((1556, 1591), 'os.listdir', 'os.listdir', (["self.dirs['train_labs']"], {}), "(self.dirs['train_labs'])\n", (1566, 1591), False, 'import os\n'), ((1626, 1661), 'os.listdir', 'os.listdir', (["self.dirs['tier3_labs']"], {}), "(self.dirs['tier3_labs'])\n", (1636, 1661), False, 'import os\n'), ((1695, 1729), 'os.listdir', 'os.listdir', (["self.dirs['test_imgs']"], {}), "(self.dirs['test_imgs'])\n", (1705, 1729), False, 'import os\n'), ((3435, 3462), 'os.path.join', 'os.path.join', (['img_dirs', 'pre'], {}), '(img_dirs, pre)\n', (3447, 3462), False, 'import os\n'), ((3497, 3525), 'os.path.join', 'os.path.join', (['img_dirs', 'post'], {}), '(img_dirs, post)\n', (3509, 3525), False, 'import os\n'), ((3560, 3592), 'os.path.join', 'os.path.join', (['lab_dirs', 'pre_json'], {}), '(lab_dirs, pre_json)\n', (3572, 3592), False, 'import os\n'), ((3628, 3661), 'os.path.join', 'os.path.join', (['lab_dirs', 'post_json'], {}), '(lab_dirs, post_json)\n', (3640, 3661), False, 'import os\n'), ((4907, 4926), 'shapely.wkt.loads', 'wkt.loads', (["b['wkt']"], {}), "(b['wkt'])\n", (4916, 4926), False, 'from shapely import wkt\n'), ((5210, 5229), 'shapely.wkt.loads', 'wkt.loads', (["b['wkt']"], {}), "(b['wkt'])\n", (5219, 5229), False, 'from shapely import wkt\n'), ((6091, 6271), 'utils.preprocess', 'preprocess', (["sample['pre_img']", "sample['post_img']", "sample['mask_img']"], {'flip': "self.preprocessing['flip']", 'scale': "self.preprocessing['scale']", 'crop': "self.preprocessing['crop']"}), "(sample['pre_img'], sample['post_img'], sample['mask_img'], flip=\n self.preprocessing['flip'], scale=self.preprocessing['scale'], crop=\n self.preprocessing['crop'])\n", (6101, 6271), False, 'from utils import preprocess\n'), ((6980, 6999), 'shapely.wkt.loads', 'wkt.loads', (["b['wkt']"], {}), "(b['wkt'])\n", (6989, 6999), False, 'from shapely import wkt\n'), ((2398, 2439), 'os.path.join', 'os.path.join', (["self.dirs['test_imgs']", 'pre'], {}), "(self.dirs['test_imgs'], pre)\n", (2410, 2439), False, 'import os\n'), ((2478, 2520), 'os.path.join', 'os.path.join', (["self.dirs['test_imgs']", 'post'], {}), "(self.dirs['test_imgs'], post)\n", (2490, 2520), False, 'import os\n'), ((2670, 2691), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2689, 2691), True, 'import torchvision.transforms as transforms\n'), ((2709, 2775), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2729, 2775), True, 'import torchvision.transforms as transforms\n'), ((7842, 7871), 'numpy.array', 'np.array', (["builings[p]['poly']"], {}), "(builings[p]['poly'])\n", (7850, 7871), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import, unicode_literals, division
import csv
import random
from collections import OrderedDict
import pandas as pd
import nltk
import numpy as np
from keras_preprocessing.sequence import pad_sequences
from nltk import word_tokenize
import json
from sklearn import preprocessing
from tabulate import tabulate
from keras.preprocessing.text import Tokenizer
from amt.settings import PATH_visible_not_visible_actions_csv
from classify.elmo_embeddings import load_elmo_embedding
from classify.utils import reshape_3d_to_2d
from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, \
print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions
import os
import glob
from shutil import copytree
import string
from tqdm import tqdm
from nltk.tag import StanfordPOSTagger
from nltk import PorterStemmer
stemmer = PorterStemmer()
os.environ["CLASSPATH"] = "stanford-postagger-full-2018-10-16/"
os.environ["STANFORD_MODELS"] = "stanford-postagger-full-2018-10-16/models/"
st = StanfordPOSTagger('english-bidirectional-distsim.tagger')
path_visible_not_visible_actions_csv = 'data/AMT/Output/All/new_clean_visible_not_visible_actions_video_after_spam.csv'
glove = pd.read_table("data/glove.6B.50d.txt", sep=" ", index_col=0, header=None, quoting=csv.QUOTE_NONE)
table = str.maketrans({key: None for key in string.punctuation})
glove_pos = pd.read_table("data/glove_vectors.txt", sep=" ", index_col=0, header=None, quoting=csv.QUOTE_NONE)
def load_embeddings():
embeddings_index = dict()
with open("data/glove.6B.50d.txt") as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Loaded %s word vectors.' % len(embeddings_index))
return embeddings_index
def chunks(l, n):
n = max(1, n)
chunk_list = []
chunk_list += (l[i:i + n] for i in xrange(0, len(l), n))
return chunk_list
def merge_chunks(list_chunks):
merged = []
for l in list_chunks:
merged += l
return merged
def get_word_embedding(embeddings_index, word):
embedding_vector = embeddings_index.get(word)
if embedding_vector is None:
return None
else:
word_embedding = np.asarray(embedding_vector)
return word_embedding
# Retrieve embedding for a word
def vec(w, glove_emb):
return glove_emb.loc[w].as_matrix()
def getStartEnd(action, action_context):
action = action.split()
action = [i.translate(table) for i in action if i.isalpha()]
action_context = [i.translate(table) for i in action_context.split()]
possible_beginnings = [i for i in range(len(action_context)) if action_context[i] == action[0]]
tenable_beginnings = []
tenable_endings = []
for beginning in possible_beginnings:
current_spot = beginning
tenable = True
for word in action[1:]:
ok = 0
if word in action_context[current_spot + 1:]:
current_spot = action_context[current_spot + 1:].index(word) + current_spot + 1
ok = 1
else:
for l in action_context[current_spot + 1:]:
if word in l:
current_spot = action_context[current_spot + 1:].index(l) + current_spot + 1
ok = 1
break
if ok == 0:
tenable = False
break
if tenable:
tenable_beginnings.append(beginning)
tenable_endings.append(current_spot)
beginning = tenable_beginnings[-1]
ending = tenable_endings[-1]
return (beginning, ending)
def getPOSEmbeddings(action, action_context):
action = action.replace("y' all", "y'all")
tagged_sentences = st.tag(action_context.split())
(beginning, ending) = getStartEnd(action, action_context)
action_pos = tagged_sentences[beginning:ending + 1]
pos_representation = [0] * 50
count = 0
for (word, pos) in action_pos:
if pos in glove_pos.index:
count += 1
pos_representation += vec(pos, glove_pos)
if count > 0:
pos_representation /= count
return pos_representation
def getContextEmbeddings(action, action_context, context_size=5):
action = action.replace("y' all", "y'all")
(beginning, ending) = getStartEnd(action, action_context)
if beginning - context_size < 0:
left_context = action_context.split()[0:beginning]
else:
left_context = action_context.split()[beginning-context_size:beginning]
if ending + 1 + context_size > len(action_context.split()):
right_context = action_context.split()[ending + 1:]
else:
right_context = action_context.split()[ending + 1:ending + 1 + context_size]
left_representation = [0] * 50
count = 0
for word in left_context:
if word in glove.index:
count += 1
left_representation += vec(word, glove)
if count > 0:
left_representation /= count
right_representation = [0] * 50
count = 0
for word in right_context:
if word in glove.index:
count += 1
right_representation += vec(word, glove)
if count > 0:
right_representation /= count
return (right_representation, left_representation)
def get_pos_emb_all():
video_list = []
action_list = []
pos_embedding_list = []
with open('data/dict_context.json', 'r') as fp:
context = json.load(fp)
for video in tqdm(context.keys()):
for action in context[video].keys():
action_context = context[video][action]
if action_context != []:
pos_embedding = getPOSEmbeddings(action, action_context)
else:
pos_embedding = [0] * 50
video_list.append(video)
action_list.append(action)
pos_embedding_list.append(pos_embedding)
results_train = pd.DataFrame({'video': video_list, 'action': action_list, 'pos_embedding': pos_embedding_list})
results_train.to_csv("data/Embeddings/new_pos_embeddings.csv")
def get_context_emb_all():
video_list = []
action_list = []
left_context_list = []
right_context_list = []
with open('data/dict_context.json', 'r') as fp:
context = json.load(fp)
for video in context.keys():
for action in context[video].keys():
action_context = context[video][action]
if action_context != []:
(left_context, right_context) = getContextEmbeddings(action, action_context)
else:
(left_context, right_context) = ([0] * 50, [0] * 50)
video_list.append(video)
action_list.append(action)
left_context_list.append(left_context)
right_context_list.append(right_context)
results_train = pd.DataFrame({'video': video_list, 'action': action_list, 'left_context': left_context_list, \
'right_context': right_context_list})
results_train.to_csv("data/Embeddings/context_embeddings.csv")
def create_context_dict(dict_video_actions, path_context_data):
reader = csv.reader(open(path_context_data, 'r'))
headers = next(reader)
dict_action_sentence = dict()
dict_video_action_sentence = dict()
for row in reader:
[action, end_time, video_name, sentence, start_time] = row[1:]
if video_name not in dict_action_sentence:
dict_action_sentence[video_name] = []
dict_action_sentence[video_name].append([action, sentence])
dict_video_sentence = dict()
for video_name in dict_video_actions.keys():
video_name_in_file = "(" + video_name.split("_")[0] + ", " + video_name.split("_")[1].split("mini")[0] + ")"
for index_visible_not_visible in [0, 1]:
list_actions = dict_video_actions[video_name][index_visible_not_visible]
if video_name_in_file in dict_action_sentence:
# the sentence has also time info
# strip the time stamps, compare with both
for [action, sentence] in dict_action_sentence[video_name_in_file]:
if list_actions.count(action) != 0:
if (video_name, action) not in dict_video_sentence:
dict_video_sentence[(video_name_in_file, action)] = [[], []]
if video_name not in dict_video_action_sentence:
dict_video_action_sentence[video_name] = [[], []]
no_time_sentence = nltk.re.sub('[0-9][0-9]:*', '', sentence)
dict_video_sentence[(video_name_in_file, action)][index_visible_not_visible].append(
no_time_sentence)
dict_video_action_sentence[(video_name)][index_visible_not_visible].append(
[action, no_time_sentence])
return dict_video_sentence, dict_video_action_sentence
def get_data_sentence(dict_train_data):
dict_context_data, _ = create_context_dict(dict_train_data)
train_data_sentence = []
for key in dict_train_data.keys():
video_name_in_file = "(" + key.split("_")[0] + ", " + key.split("_")[1].split("mini")[0] + ")"
for visible_action in dict_train_data[key][0]:
if (video_name_in_file, visible_action) in dict_context_data.keys():
sentence = dict_context_data[(video_name_in_file, visible_action)][0]
if sentence == []:
train_data_sentence.append([""])
else:
train_data_sentence.append(sentence)
else:
train_data_sentence.append([""])
for non_visible_action in dict_train_data[key][1]:
if (video_name_in_file, non_visible_action) in dict_context_data.keys():
sentence = dict_context_data[(video_name_in_file, non_visible_action)][1]
if sentence == []:
train_data_sentence.append([""])
else:
train_data_sentence.append(sentence)
else:
train_data_sentence.append([""])
return train_data_sentence
def create_action_embedding(embeddings_index, action, dimension_embedding):
# no prev or next action: ned to distinguish between cases when action is not recognized
if action == "":
average_word_embedding = np.ones((1, dimension_embedding), dtype='float32') * 10
else:
list_words = word_tokenize(action)
set_words_not_in_glove = set()
nb_words = 0
average_word_embedding = np.zeros((1, dimension_embedding), dtype='float32')
for word in list_words:
if word in set_words_not_in_glove:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is None:
set_words_not_in_glove.add(word)
continue
word_embedding = np.asarray(embedding_vector)
average_word_embedding += word_embedding
nb_words += 1
if nb_words != 0:
average_word_embedding = average_word_embedding / nb_words
if (average_word_embedding == np.zeros((1,), dtype=np.float32)).all():
# couldn't find any word of the action in the vocabulary -> initialize random
average_word_embedding = np.random.rand(1, dimension_embedding).astype('float32')
return average_word_embedding
def create_average_action_embedding(embeddings_index, list_actions):
dimension_embedding = len(embeddings_index.get("example"))
embedding_matrix_actions = np.zeros((len(list_actions), dimension_embedding))
index = 0
for action in list_actions:
average_word_embedding = create_action_embedding(embeddings_index, action, dimension_embedding)
embedding_matrix_actions[index] = average_word_embedding
index += 1
return embedding_matrix_actions
def BOW(train_data, list_word_in_vocab):
vocab_size = len(list_word_in_vocab)
embedding_matrix_actions_train = np.zeros((len(train_data), vocab_size))
i = 0
for action in train_data:
action_embedding = np.zeros(vocab_size)
list_words_in_action = nltk.word_tokenize(action)
for index_word_in_vocab in range(0, len(list_word_in_vocab)):
if list_word_in_vocab[index_word_in_vocab] in list_words_in_action:
action_embedding[index_word_in_vocab] = 1
embedding_matrix_actions_train[i] = action_embedding
i += 1
return embedding_matrix_actions_train
def process_batch_data(train_data, batch_size):
list_data_chunks = chunks(train_data, batch_size)
first_chunk_list = list_data_chunks[0]
embedding_matrix_actions_train = load_elmo_embedding(first_chunk_list)
for chhunk in list_data_chunks[1:]:
embedding_matrix_actions_train_1 = load_elmo_embedding(chhunk)
embedding_matrix_actions_train = np.concatenate(
(embedding_matrix_actions_train, embedding_matrix_actions_train_1), axis=0)
return embedding_matrix_actions_train
def preprocess_pos_embeddings(train_video, path_embedding):
df_pos = pd.read_csv(path_embedding)
dict_pos_embeddings = {}
for index, row in df_pos.iterrows():
video = row['video']
if video in train_video:
action = row['action']
if 'my vegetables in water instead of oil' in action:
action = 'sauteing my vegetables in water instead of oil'
if ',' in row['pos_embedding'][1:-1]:
pos_embed = np.asarray(row['pos_embedding'][1:-1].split(','))
else:
pos_embed = np.asarray(row['pos_embedding'][1:-1].split())
# TODO: add label
dict_pos_embeddings[(video, action)] = pos_embed
return dict_pos_embeddings
def preprocess_context_embeddings(train_video, path_embedding):
df_context = pd.read_csv(path_embedding)
dict_context_embedding = {}
for index, row in df_context.iterrows():
video = row['video']
if video in train_video:
action = row['action']
if ',' in row['right_context'][1:-1]:
right_context = np.asarray([float(x) for x in row['right_context'][1:-1].split(',')])
else:
right_context = np.asarray([float(x) for x in row['right_context'][1:-1].split()])
if ',' in row['left_context'][1:-1]:
left_context = np.asarray([float(x) for x in row['left_context'][1:-1].split(',')])
else:
left_context = np.asarray([float(x) for x in row['left_context'][1:-1].split()])
left_right_context = np.concatenate((left_context, right_context), axis=0)
dict_context_embedding[(video, action)] = left_right_context
return dict_context_embedding
def get_pos_embedding(train_data, dict_pos_embeddings):
pos_embedding_size = 50
nb_train_actions = len(train_data)
embedding_pos_train = np.zeros((nb_train_actions, pos_embedding_size))
index_train = 0
for [video, action, label] in train_data:
if (video, action) not in dict_pos_embeddings.keys():
raise ValueError(str((video, action, label)) + ' not in dict_pos_embeddings!!')
else:
pos_embedding = dict_pos_embeddings[(video, action)]
embedding_pos_train[index_train] = pos_embedding
index_train += 1
return embedding_pos_train
def get_context_embedding(train_data, dict_context_embeddings):
context_embedding_size = 100
nb_train_actions = len(train_data)
embedding_context_train = np.zeros((nb_train_actions, context_embedding_size))
index_train = 0
json_dict_context_embeddings = {}
for key in dict_context_embeddings:
json_dict_context_embeddings[str(key)] = 0
for [video, action, label] in train_data:
if (video, action) not in dict_context_embeddings.keys():
raise ValueError(str((video, action, label)) + 'not in dict_context_embeddings!!')
else:
context_embedding = dict_context_embeddings[(video, action)]
embedding_context_train[index_train] = context_embedding
index_train += 1
return embedding_context_train
def create_visual_features_matrices(train_miniclips, type_feat, avg_or_concatenate):
nb_frames = 61
if type_feat[0] == 'inception' or type_feat == 'inception':
print("Using inception")
path_video_features = 'data/Video/Features/inception/'
dimension_output = 2048
elif type_feat[0] == 'inception + c3d' or type_feat == 'inception + c3d':
print("Using inception + c3d")
path_video_features = 'data/Video/Features/inception_c3d/'
dimension_output = 6144
elif type_feat[0] == 'c3d' or type_feat == 'c3d':
print("Using c3d")
path_video_features = 'data/Video/Features/c3d/'
dimension_output = 4096
else:
print("Using default: inception + c3d")
path_video_features = 'data/Video/Features/inception_c3d/'
dimension_output = 6144
index = 0
if avg_or_concatenate == 'avg':
matrix_visual_features = np.zeros(
(len(train_miniclips), dimension_output)) # nb actions = nb miniclips
padded_video_features = np.zeros(dimension_output) # no need to pad if avg
else:
matrix_visual_features = np.zeros(
(len(train_miniclips), nb_frames, dimension_output)) # nb actions = nb miniclips
padded_video_features = np.zeros((nb_frames, dimension_output))
for miniclip_id in train_miniclips:
video_features = np.load(str(path_video_features + miniclip_id.replace('.mp4', '') + '.npy'))
# video_features = video_features[1:-1, :]
if avg_or_concatenate == 'avg':
avg_video_features = np.mean(video_features, axis=0)
padded_video_features = avg_video_features
# L2 normalize: the square elems sum to 1
padded_video_features = preprocessing.normalize(np.asarray(padded_video_features).reshape(1,-1), norm='l2')
else:
for i in range(dimension_output):
padded_video_features[:, i] = np.array(
list(video_features[:, i]) + (nb_frames - video_features.shape[0]) * [0])
matrix_visual_features[index] = padded_video_features
index += 1
return matrix_visual_features
def get_visual_features(train_miniclips, test_miniclips, val_miniclips, type_feat, avg_or_concatenate):
visual_feat_train = create_visual_features_matrices(train_miniclips, type_feat, avg_or_concatenate)
visual_feat_test = create_visual_features_matrices(test_miniclips, type_feat, avg_or_concatenate)
visual_feat_val = create_visual_features_matrices(val_miniclips, type_feat, avg_or_concatenate)
return visual_feat_train, visual_feat_test, visual_feat_val
def get_matrix_word_embedding(embeddings_index, train_data, test_data, val_data):
[train_actions, test_actions, val_actions], _, _ = process_data(train_data, test_data, val_data)
all_actions = train_actions + test_actions + val_actions
t = Tokenizer()
t.fit_on_texts(all_actions)
vocab_size = len(t.word_index) + 1 # nb of unique words
max_length_word = max(all_actions, key=len)
max_length = len(max_length_word.split(" "))
# create a weight matrix for words in all docs
embedding_words_all = np.zeros((vocab_size, 50))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_words_all[i] = embedding_vector
return embedding_words_all, max_length
def get_action_embedding(embeddings_index, train_actions, test_actions, val_actions):
embedding_actions_train = create_average_action_embedding(embeddings_index, train_actions)
embedding_actions_test = create_average_action_embedding(embeddings_index, test_actions)
embedding_actions_val = create_average_action_embedding(embeddings_index, val_actions)
return embedding_actions_train, embedding_actions_test, embedding_actions_val
def add_pos_embed(train_data, test_data, val_data, embedding_matrix_actions_train, embedding_matrix_actions_test,
embedding_matrix_actions_val):
_, _, [train_video, test_video, val_video] = process_data(train_data, test_data, val_data)
dict_pos_train = preprocess_pos_embeddings(train_video, path_embedding='data/Embeddings/pos_embeddings.csv')
dict_pos_test = preprocess_pos_embeddings(test_video, path_embedding='data/Embeddings/pos_embeddings.csv')
dict_pos_val = preprocess_pos_embeddings(val_video, path_embedding='data/Embeddings/pos_embeddings.csv')
embedding_pos_train = get_pos_embedding(train_data, dict_pos_train)
embedding_pos_test = get_pos_embedding(test_data, dict_pos_test)
embedding_pos_val = get_pos_embedding(val_data, dict_pos_val)
if embedding_matrix_actions_train is None:
return embedding_pos_train, embedding_pos_test, embedding_pos_val
if len(embedding_matrix_actions_train.shape) == 3:
embedding_matrix_actions_train = reshape_3d_to_2d(embedding_matrix_actions_train)
embedding_matrix_actions_test = reshape_3d_to_2d(embedding_matrix_actions_test)
embedding_matrix_actions_val = reshape_3d_to_2d(embedding_matrix_actions_val)
embedding_pos_concat_train = np.concatenate((embedding_matrix_actions_train, embedding_pos_train),
axis=1)
embedding_pos_concat_test = np.concatenate((embedding_matrix_actions_test, embedding_pos_test),
axis=1)
embedding_pos_concat_val = np.concatenate((embedding_matrix_actions_val, embedding_pos_val),
axis=1)
return embedding_pos_concat_train, embedding_pos_concat_test, embedding_pos_concat_val
def add_context_embed(train_data, test_data, val_data, embedding_matrix_actions_train, embedding_matrix_actions_test,
embedding_matrix_actions_val):
_, _, [train_video, test_video, val_video] = process_data(train_data, test_data, val_data)
dict_context_train = preprocess_context_embeddings(train_video,
path_embedding='data/Embeddings/context_embeddings.csv')
dict_context_test = preprocess_context_embeddings(test_video,
path_embedding='data/Embeddings/context_embeddings.csv')
dict_context_val = preprocess_context_embeddings(val_video, path_embedding='data/Embeddings/context_embeddings.csv')
embedding_context_train = get_context_embedding(train_data, dict_context_train)
embedding_context_test = get_context_embedding(test_data, dict_context_test)
embedding_context_val = get_context_embedding(val_data, dict_context_val)
if embedding_matrix_actions_train is None:
return embedding_context_train, embedding_context_test, embedding_context_val
if len(embedding_matrix_actions_train.shape) == 3:
embedding_matrix_actions_train = reshape_3d_to_2d(embedding_matrix_actions_train)
embedding_matrix_actions_test = reshape_3d_to_2d(embedding_matrix_actions_test)
embedding_matrix_actions_val = reshape_3d_to_2d(embedding_matrix_actions_val)
embedding_context_concat_train = np.concatenate((embedding_matrix_actions_train, embedding_context_train),
axis=1)
embedding_context_concat_test = np.concatenate((embedding_matrix_actions_test, embedding_context_test),
axis=1)
embedding_context_concat_val = np.concatenate((embedding_matrix_actions_val, embedding_context_val),
axis=1)
return embedding_context_concat_train, embedding_context_concat_test, embedding_context_concat_val
def pad_actions(train_actions, test_actions, val_actions):
all_actions = train_actions + test_actions + val_actions
t = Tokenizer()
t.fit_on_texts(all_actions)
# t.fit_on_texts(train_actions)
max_length_word = max(all_actions, key=len)
max_length = len(max_length_word.split(" "))
# process data for LSTM
encoded_docs_train = t.texts_to_sequences(train_actions)
encoded_docs_test = t.texts_to_sequences(test_actions)
encoded_docs_val = t.texts_to_sequences(val_actions)
# pad documents to a max length of the largest string in the list of actions
x_train = pad_sequences(encoded_docs_train, maxlen=max_length, padding='post')
x_test = pad_sequences(encoded_docs_test, maxlen=max_length, padding='post')
x_val = pad_sequences(encoded_docs_val, maxlen=max_length, padding='post')
return x_train, x_test, x_val
def get_concreteness_score(list_actions, type):
with open('data/dict_action_pos_concreteness.json', 'r') as fp:
dict_concreteness = json.load(fp)
list_scores = []
# 98 % coverage
for action in list_actions:
if action in dict_concreteness:
if type == 'all':
scores = [l[2] for l in dict_concreteness[action]]
elif type == 'noun + vb':
scores = [l[2] for l in dict_concreteness[action] if ('VB' in l[1] or 'NN' in l[1])]
elif type == 'vb':
scores = [l[2] for l in dict_concreteness[action] if 'VB' in l[1]]
elif type == 'noun':
scores = [l[2] for l in dict_concreteness[action] if 'NN' in l[1]]
else:
raise ValueError("Wrong type in concreteness dict_type")
else:
scores = []
if scores:
action_concreteness_score = max(scores)
else:
action_concreteness_score = 0
list_scores.append(action_concreteness_score)
scores = np.array(list_scores).reshape(-1, 1)
return scores
def add_concreteness_score(train_actions, test_actions, val_actions, embedding_matrix_actions_train,
embedding_matrix_actions_test, embedding_matrix_actions_val, type):
scores_train = get_concreteness_score(train_actions, type)
scores_test = get_concreteness_score(test_actions, type)
scores_val = get_concreteness_score(val_actions, type)
if embedding_matrix_actions_train is None:
return scores_train, scores_test, scores_val
if len(embedding_matrix_actions_train.shape) == 3:
embedding_matrix_actions_train = reshape_3d_to_2d(embedding_matrix_actions_train)
embedding_matrix_actions_test = reshape_3d_to_2d(embedding_matrix_actions_test)
embedding_matrix_actions_val = reshape_3d_to_2d(embedding_matrix_actions_val)
embedding_concreteness_concat_train = np.concatenate((embedding_matrix_actions_train, scores_train),
axis=1)
embedding_concreteness_concat_test = np.concatenate((embedding_matrix_actions_test, scores_test),
axis=1)
embedding_concreteness_concat_val = np.concatenate((embedding_matrix_actions_val, scores_val),
axis=1)
return embedding_concreteness_concat_train, embedding_concreteness_concat_test, embedding_concreteness_concat_val
def get_embedding_next_action(embeddings_index, train_data):
dict_prev_next_action = get_dict_prev_next_actions()
list_next_actions = []
for (video, action, label) in train_data:
[_, next_action_label] = dict_prev_next_action[(video, action, label)]
next_action, next_label = next_action_label
list_next_actions.append(next_action)
embedding_next_actions = create_average_action_embedding(embeddings_index, list_next_actions)
return embedding_next_actions
def add_next_action(embeddings_index, train_data, test_data, val_data, embedding_matrix_actions_train,
embedding_matrix_actions_test, embedding_matrix_actions_val):
embedding_next_actions_train = get_embedding_next_action(embeddings_index, train_data)
embedding_next_actions_test = get_embedding_next_action(embeddings_index, test_data)
embedding_next_actions_val = get_embedding_next_action(embeddings_index, val_data)
if embedding_matrix_actions_train is None:
return embedding_next_actions_train, embedding_next_actions_test, embedding_next_actions_val
if len(embedding_matrix_actions_train.shape) == 3:
embedding_matrix_actions_train = reshape_3d_to_2d(embedding_matrix_actions_train)
embedding_matrix_actions_test = reshape_3d_to_2d(embedding_matrix_actions_test)
embedding_matrix_actions_val = reshape_3d_to_2d(embedding_matrix_actions_val)
embedding_next_concat_train = np.concatenate((embedding_matrix_actions_train, embedding_next_actions_train),
axis=1)
embedding_next_concat_test = np.concatenate((embedding_matrix_actions_test, embedding_next_actions_test),
axis=1)
embedding_next_concat_val = np.concatenate((embedding_matrix_actions_val, embedding_next_actions_val),
axis=1)
return embedding_next_concat_train, embedding_next_concat_test, embedding_next_concat_val
def get_embedding_prev_action(embeddings_index, train_data):
dict_prev_next_action = get_dict_prev_next_actions()
list_prev_actions = []
for (video, action, label) in train_data:
[prev_action_label, _] = dict_prev_next_action[(video, action, label)]
prev_action, prev_label = prev_action_label
list_prev_actions.append(prev_action)
embedding_prev_actions = create_average_action_embedding(embeddings_index, list_prev_actions)
return embedding_prev_actions
def add_prev_action(embeddings_index, train_data, test_data, val_data, embedding_matrix_actions_train,
embedding_matrix_actions_test, embedding_matrix_actions_val):
embedding_prev_actions_train = get_embedding_prev_action(embeddings_index, train_data)
embedding_prev_actions_test = get_embedding_prev_action(embeddings_index, test_data)
embedding_prev_actions_val = get_embedding_prev_action(embeddings_index, val_data)
if embedding_matrix_actions_train is None:
return embedding_prev_actions_train, embedding_prev_actions_test, embedding_prev_actions_val
if len(embedding_matrix_actions_train.shape) == 3:
embedding_matrix_actions_train = reshape_3d_to_2d(embedding_matrix_actions_train)
embedding_matrix_actions_test = reshape_3d_to_2d(embedding_matrix_actions_test)
embedding_matrix_actions_val = reshape_3d_to_2d(embedding_matrix_actions_val)
embedding_prev_concat_train = np.concatenate((embedding_prev_actions_train, embedding_matrix_actions_train),
axis=1)
embedding_prev_concat_test = np.concatenate((embedding_prev_actions_test, embedding_matrix_actions_test),
axis=1)
embedding_prev_concat_val = np.concatenate((embedding_prev_actions_val, embedding_matrix_actions_val),
axis=1)
return embedding_prev_concat_train, embedding_prev_concat_test, embedding_prev_concat_val
def add_visual_features(train_data, test_data, val_data, x_train, x_test,
x_val, type_feat):
[train_actions, test_actions, val_actions], _, [train_miniclips, test_miniclips, val_miniclips] = process_data(train_data, test_data, val_data)
video_data_train, video_data_test, video_data_val = get_visual_features(train_miniclips, test_miniclips,
val_miniclips, type_feat,
avg_or_concatenate='avg')
if x_train is not None:
visual_concat_train = np.concatenate((x_train, video_data_train), axis=1)
visual_concat_test = np.concatenate((x_test, video_data_test), axis=1)
visual_concat_val = np.concatenate((x_val, video_data_val), axis=1)
else:
visual_concat_train = video_data_train
visual_concat_test = video_data_test
visual_concat_val = video_data_val
print("Visual feature: ")
for i in range(len(video_data_val)):
print(val_actions[i], video_data_val[i])
return visual_concat_train, visual_concat_test, visual_concat_val
def get_embeddings_by_type(type_embedding, add_extra,
embeddings_index, train_data,
test_data, val_data, type_concreteness):
[train_actions, test_actions, val_actions], _, _ = process_data(train_data, test_data, val_data)
if type_embedding == "action":
x_train, x_test, x_val = get_action_embedding(embeddings_index, train_actions, test_actions, val_actions)
elif type_embedding == "padding":
x_train, x_test, x_val = pad_actions(train_actions, test_actions, val_actions)
else:
print("No embedding to concatenate to. Will store only extra embeddings")
x_train, x_test, x_val = [None, None, None]
if "pos" in add_extra:
print("Add pos")
x_train, x_test, x_val = add_pos_embed(train_data, test_data, val_data, x_train, x_test, x_val)
if "context" in add_extra:
print("Add context")
x_train, x_test, x_val = add_context_embed(train_data, test_data, val_data, x_train, x_test, x_val)
if "concreteness" in add_extra:
print("Add concreteness: " + type_concreteness + " max score")
x_train, x_test, x_val = add_concreteness_score(train_actions, test_actions, val_actions, x_train, x_test,
x_val, type_concreteness)
if "prev-next-action" in add_extra:
print("Add prev-next action")
x_train, x_test, x_val = add_prev_action(embeddings_index, train_data, test_data, val_data, x_train, x_test,
x_val)
x_train, x_test, x_val = add_next_action(embeddings_index, train_data, test_data, val_data, x_train, x_test,
x_val)
if "visual-c3d-inception" in add_extra:
print("Add visual-c3d-inception")
x_train, x_test, x_val = add_visual_features(train_data, test_data, val_data, x_train, x_test,
x_val, type_feat='inception')
return x_train, x_test, x_val
def get_dict_prev_next_actions(path_visible_not_visible_actions_csv=PATH_visible_not_visible_actions_csv):
df_data = pd.read_csv(path_visible_not_visible_actions_csv)
dict_miniclip_action = OrderedDict()
for index, row in df_data.iterrows():
miniclip = row['Video_name']
if pd.isnull(row['Visible Actions']) and pd.isnull(row['Not Visible Actions']):
continue
elif pd.isnull(row['Visible Actions']):
action = row['Not Visible Actions']
if type(action) is str:
action = action.encode('utf8').lower()
label = 1
else:
action = row['Visible Actions']
if type(action) is str:
action = action.encode('utf8').lower()
label = 0
if miniclip not in dict_miniclip_action.keys():
dict_miniclip_action[miniclip] = []
dict_miniclip_action[miniclip].append([action, label])
dict_prev_next_action = OrderedDict()
for video in dict_miniclip_action.keys():
list_action_labels = dict_miniclip_action[video]
# if only action in miniclip:
if len(list_action_labels) == 1:
[action, label] = list_action_labels[0]
prev_action_label = ["", -1]
next_action_label = ["", -1]
dict_prev_next_action[(video, action, label)] = [prev_action_label, next_action_label]
else:
# first action in the miniclip
[action, label] = list_action_labels[0]
prev_action_label = ["", -1]
next_action_label = list_action_labels[1]
dict_prev_next_action[(video, action, label)] = [prev_action_label, next_action_label]
for index in range(1, len(list_action_labels) - 1):
action, label = list_action_labels[index]
prev_action_label = list_action_labels[index - 1]
next_action_label = list_action_labels[index + 1]
dict_prev_next_action[(video, action, label)] = [prev_action_label, next_action_label]
# last action in the miniclip
[action, label] = list_action_labels[-1]
prev_action_label = list_action_labels[-2]
next_action_label = ["", -1]
dict_prev_next_action[(video, action, label)] = [prev_action_label, next_action_label]
return dict_prev_next_action
def split_data_after_video_from_csv(path_visible_not_visible_actions_csv=PATH_visible_not_visible_actions_csv):
with open(path_visible_not_visible_actions_csv) as csv_file:
reader = csv.DictReader(csv_file)
dict_video_actions = OrderedDict()
for row in reader:
visible_action = ''
not_visible_action = ''
video_name = ''
for (column_name, value) in row.items():
if column_name == 'Video_name':
video_name = value
if video_name not in dict_video_actions.keys():
dict_video_actions[video_name] = []
if column_name == 'Visible Actions':
visible_action = value
if column_name == 'Not Visible Actions':
not_visible_action = value
if visible_action:
dict_video_actions[video_name].append([visible_action.encode('utf8').lower(), 0])
if not_visible_action:
dict_video_actions[video_name].append([not_visible_action.encode('utf8').lower(), 1])
return dict_video_actions
def balance_data(balance, dict_video_actions, dict_train_data):
nb_visible_actions, nb_not_visible_actions = get_nb_visible_not_visible(dict_train_data)
if nb_not_visible_actions >= nb_visible_actions:
ratio_visible_not_visible = int(nb_not_visible_actions / nb_visible_actions)
else:
ratio_visible_not_visible = int(nb_visible_actions / nb_not_visible_actions)
if balance == "upsample":
# Upsample data
for video_name in dict_train_data.keys():
list_visible_actions = get_list_actions_for_label(dict_train_data, video_name, 0)
for elem in list_visible_actions:
dict_video_actions[video_name].append([elem, 0])
nb_visible_actions, nb_not_visible_actions = get_nb_visible_not_visible(dict_train_data)
diff_nb_actions = abs(nb_not_visible_actions - nb_visible_actions)
while diff_nb_actions:
# this makes the # actions to vary in Train, Test Eval after each run
# run it once and save the list
random_video_name = random.choice(list(dict_train_data))
list_visible_actions = get_list_actions_for_label(dict_train_data, random_video_name, 0)
if list_visible_actions:
dict_video_actions[random_video_name].append([list_visible_actions[0], 0])
diff_nb_actions -= 1
elif balance == "downsample":
# Downsample data --> delete the non-visible actions
for video_name in dict_train_data.keys():
list_not_visible_actions = get_list_actions_for_label(dict_video_actions, video_name, 1)
index = 0
list_all_actions = dict_video_actions[video_name]
for elem in list_not_visible_actions:
if index % ratio_visible_not_visible == 0:
list_all_actions.remove([elem, 1])
index += 1
dict_video_actions[video_name] = list_all_actions
nb_visible_actions, nb_not_visible_actions = get_nb_visible_not_visible(dict_train_data)
diff_nb_actions = abs(nb_not_visible_actions - nb_visible_actions)
while (diff_nb_actions):
# this makes the # actions to vary in Train, Test Eval after each run
# run it once and save the list
random_video_name = random.choice(list(dict_train_data))
list_not_visible_actions = get_list_actions_for_label(dict_video_actions, random_video_name, 1)
if list_not_visible_actions:
list_all_actions = dict_video_actions[random_video_name]
list_all_actions.remove([list_not_visible_actions[0], 1])
diff_nb_actions -= 1
return dict_video_actions, dict_train_data
def split_train_test_val_data(dict_video_actions, channel_test, channel_val):
dict_train_data = OrderedDict()
dict_test_data = OrderedDict()
dict_val_data = OrderedDict()
for channel in range(1, 11):
if channel == channel_test or channel == channel_val:
continue
for key in dict_video_actions.keys():
# if str(channel) + "p" in key or 'p' not in key[:-3]:
if str(channel) + "p" in key:
dict_train_data[key] = dict_video_actions[key]
for channel in range(channel_val, channel_val + 1):
for key in dict_video_actions.keys():
if str(channel) + "p" in key:
dict_val_data[key] = dict_video_actions[key]
for channel in range(channel_test, channel_test + 1):
for key in dict_video_actions.keys():
if str(channel) + "p" in key:
dict_test_data[key] = dict_video_actions[key]
return dict_train_data, dict_test_data, dict_val_data
# lists triples of (miniclip, action, label)
def create_data(dict_train_data, dict_test_data, dict_val_data):
train_data = []
test_data = []
val_data = []
for miniclip in dict_train_data.keys():
for [action, label] in dict_train_data[miniclip]:
train_data.append((miniclip, action, label))
for miniclip in dict_test_data.keys():
for [action, label] in dict_test_data[miniclip]:
test_data.append((miniclip, action, label))
for miniclip in dict_val_data.keys():
for [action, label] in dict_val_data[miniclip]:
val_data.append((miniclip, action, label))
return train_data, test_data, val_data
def process_data(train_data, test_data, val_data):
train_labels = [label for (video, action, label) in train_data]
test_labels = [label for (video, action, label) in test_data]
val_labels = [label for (video, action, label) in val_data]
train_actions = [action for (video, action, label) in train_data]
test_actions = [action for (video, action, label) in test_data]
val_actions = [action for (video, action, label) in val_data]
train_video = [video for (video, action, label) in train_data]
test_video = [video for (video, action, label) in test_data]
val_video = [video for (video, action, label) in val_data]
return [train_actions, test_actions, val_actions], [train_labels, test_labels, val_labels], [train_video,
test_video, val_video]
def get_data(balance, channel_test, channel_val):
# dict_video_actions = split_data_after_video_from_csv()
with open("data/miniclip_actions.json") as f:
dict_video_actions = json.loads(f.read())
dict_train_data, dict_test_data, dict_val_data = split_train_test_val_data(dict_video_actions, channel_test,
channel_val)
# balance only train data
if balance:
dict_video_actions, dict_train_data = balance_data(balance, dict_video_actions, dict_train_data)
# print_nb_actions_miniclips_train_test_eval(dict_train_data, dict_test_data, dict_val_data)
# measure_nb_unique_actions(dict_video_actions)
train_data, test_data, val_data = create_data(dict_train_data, dict_test_data, dict_val_data)
print_action_balancing_stats(balance, 0, dict_video_actions, dict_train_data, dict_test_data,
dict_val_data, test_data)
print_nb_actions_miniclips_train_test_eval(dict_train_data, dict_test_data, dict_val_data)
return dict_video_actions, dict_train_data, dict_test_data, dict_val_data, train_data, test_data, val_data
| [
"pandas.read_csv",
"numpy.ones",
"numpy.mean",
"nltk.tag.StanfordPOSTagger",
"pandas.read_table",
"nltk.word_tokenize",
"pandas.DataFrame",
"classify.visualization.print_nb_actions_miniclips_train_test_eval",
"numpy.random.rand",
"keras.preprocessing.text.Tokenizer",
"classify.visualization.prin... | [((937, 952), 'nltk.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (950, 952), False, 'from nltk import PorterStemmer\n'), ((1101, 1158), 'nltk.tag.StanfordPOSTagger', 'StanfordPOSTagger', (['"""english-bidirectional-distsim.tagger"""'], {}), "('english-bidirectional-distsim.tagger')\n", (1118, 1158), False, 'from nltk.tag import StanfordPOSTagger\n'), ((1290, 1391), 'pandas.read_table', 'pd.read_table', (['"""data/glove.6B.50d.txt"""'], {'sep': '""" """', 'index_col': '(0)', 'header': 'None', 'quoting': 'csv.QUOTE_NONE'}), "('data/glove.6B.50d.txt', sep=' ', index_col=0, header=None,\n quoting=csv.QUOTE_NONE)\n", (1303, 1391), True, 'import pandas as pd\n'), ((1466, 1568), 'pandas.read_table', 'pd.read_table', (['"""data/glove_vectors.txt"""'], {'sep': '""" """', 'index_col': '(0)', 'header': 'None', 'quoting': 'csv.QUOTE_NONE'}), "('data/glove_vectors.txt', sep=' ', index_col=0, header=None,\n quoting=csv.QUOTE_NONE)\n", (1479, 1568), True, 'import pandas as pd\n'), ((6126, 6225), 'pandas.DataFrame', 'pd.DataFrame', (["{'video': video_list, 'action': action_list, 'pos_embedding':\n pos_embedding_list}"], {}), "({'video': video_list, 'action': action_list, 'pos_embedding':\n pos_embedding_list})\n", (6138, 6225), True, 'import pandas as pd\n'), ((7049, 7183), 'pandas.DataFrame', 'pd.DataFrame', (["{'video': video_list, 'action': action_list, 'left_context':\n left_context_list, 'right_context': right_context_list}"], {}), "({'video': video_list, 'action': action_list, 'left_context':\n left_context_list, 'right_context': right_context_list})\n", (7061, 7183), True, 'import pandas as pd\n'), ((13021, 13058), 'classify.elmo_embeddings.load_elmo_embedding', 'load_elmo_embedding', (['first_chunk_list'], {}), '(first_chunk_list)\n', (13040, 13058), False, 'from classify.elmo_embeddings import load_elmo_embedding\n'), ((13432, 13459), 'pandas.read_csv', 'pd.read_csv', (['path_embedding'], {}), '(path_embedding)\n', (13443, 13459), True, 'import pandas as pd\n'), ((14196, 14223), 'pandas.read_csv', 'pd.read_csv', (['path_embedding'], {}), '(path_embedding)\n', (14207, 14223), True, 'import pandas as pd\n'), ((15278, 15326), 'numpy.zeros', 'np.zeros', (['(nb_train_actions, pos_embedding_size)'], {}), '((nb_train_actions, pos_embedding_size))\n', (15286, 15326), True, 'import numpy as np\n'), ((15917, 15969), 'numpy.zeros', 'np.zeros', (['(nb_train_actions, context_embedding_size)'], {}), '((nb_train_actions, context_embedding_size))\n', (15925, 15969), True, 'import numpy as np\n'), ((19462, 19473), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (19471, 19473), False, 'from keras.preprocessing.text import Tokenizer\n'), ((19743, 19769), 'numpy.zeros', 'np.zeros', (['(vocab_size, 50)'], {}), '((vocab_size, 50))\n', (19751, 19769), True, 'import numpy as np\n'), ((21733, 21810), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_train, embedding_pos_train)'], {'axis': '(1)'}), '((embedding_matrix_actions_train, embedding_pos_train), axis=1)\n', (21747, 21810), True, 'import numpy as np\n'), ((21891, 21966), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_test, embedding_pos_test)'], {'axis': '(1)'}), '((embedding_matrix_actions_test, embedding_pos_test), axis=1)\n', (21905, 21966), True, 'import numpy as np\n'), ((22045, 22118), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_val, embedding_pos_val)'], {'axis': '(1)'}), '((embedding_matrix_actions_val, embedding_pos_val), axis=1)\n', (22059, 22118), True, 'import numpy as np\n'), ((23739, 23824), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_train, embedding_context_train)'], {'axis': '(1)'}), '((embedding_matrix_actions_train, embedding_context_train),\n axis=1)\n', (23753, 23824), True, 'import numpy as np\n'), ((23909, 23988), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_test, embedding_context_test)'], {'axis': '(1)'}), '((embedding_matrix_actions_test, embedding_context_test), axis=1)\n', (23923, 23988), True, 'import numpy as np\n'), ((24075, 24152), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_val, embedding_context_val)'], {'axis': '(1)'}), '((embedding_matrix_actions_val, embedding_context_val), axis=1)\n', (24089, 24152), True, 'import numpy as np\n'), ((24436, 24447), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (24445, 24447), False, 'from keras.preprocessing.text import Tokenizer\n'), ((24914, 24982), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_docs_train'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(encoded_docs_train, maxlen=max_length, padding='post')\n", (24927, 24982), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((24996, 25063), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_docs_test'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(encoded_docs_test, maxlen=max_length, padding='post')\n", (25009, 25063), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((25076, 25142), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoded_docs_val'], {'maxlen': 'max_length', 'padding': '"""post"""'}), "(encoded_docs_val, maxlen=max_length, padding='post')\n", (25089, 25142), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((27144, 27214), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_train, scores_train)'], {'axis': '(1)'}), '((embedding_matrix_actions_train, scores_train), axis=1)\n', (27158, 27214), True, 'import numpy as np\n'), ((27313, 27381), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_test, scores_test)'], {'axis': '(1)'}), '((embedding_matrix_actions_test, scores_test), axis=1)\n', (27327, 27381), True, 'import numpy as np\n'), ((27478, 27544), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_val, scores_val)'], {'axis': '(1)'}), '((embedding_matrix_actions_val, scores_val), axis=1)\n', (27492, 27544), True, 'import numpy as np\n'), ((29179, 29269), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_train, embedding_next_actions_train)'], {'axis': '(1)'}), '((embedding_matrix_actions_train,\n embedding_next_actions_train), axis=1)\n', (29193, 29269), True, 'import numpy as np\n'), ((29348, 29436), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_test, embedding_next_actions_test)'], {'axis': '(1)'}), '((embedding_matrix_actions_test, embedding_next_actions_test),\n axis=1)\n', (29362, 29436), True, 'import numpy as np\n'), ((29513, 29599), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_val, embedding_next_actions_val)'], {'axis': '(1)'}), '((embedding_matrix_actions_val, embedding_next_actions_val),\n axis=1)\n', (29527, 29599), True, 'import numpy as np\n'), ((31198, 31288), 'numpy.concatenate', 'np.concatenate', (['(embedding_prev_actions_train, embedding_matrix_actions_train)'], {'axis': '(1)'}), '((embedding_prev_actions_train,\n embedding_matrix_actions_train), axis=1)\n', (31212, 31288), True, 'import numpy as np\n'), ((31367, 31455), 'numpy.concatenate', 'np.concatenate', (['(embedding_prev_actions_test, embedding_matrix_actions_test)'], {'axis': '(1)'}), '((embedding_prev_actions_test, embedding_matrix_actions_test),\n axis=1)\n', (31381, 31455), True, 'import numpy as np\n'), ((31532, 31618), 'numpy.concatenate', 'np.concatenate', (['(embedding_prev_actions_val, embedding_matrix_actions_val)'], {'axis': '(1)'}), '((embedding_prev_actions_val, embedding_matrix_actions_val),\n axis=1)\n', (31546, 31618), True, 'import numpy as np\n'), ((35126, 35175), 'pandas.read_csv', 'pd.read_csv', (['path_visible_not_visible_actions_csv'], {}), '(path_visible_not_visible_actions_csv)\n', (35137, 35175), True, 'import pandas as pd\n'), ((35203, 35216), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (35214, 35216), False, 'from collections import OrderedDict\n'), ((35982, 35995), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (35993, 35995), False, 'from collections import OrderedDict\n'), ((38667, 38710), 'classify.visualization.get_nb_visible_not_visible', 'get_nb_visible_not_visible', (['dict_train_data'], {}), '(dict_train_data)\n', (38693, 38710), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((41390, 41403), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (41401, 41403), False, 'from collections import OrderedDict\n'), ((41425, 41438), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (41436, 41438), False, 'from collections import OrderedDict\n'), ((41459, 41472), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (41470, 41472), False, 'from collections import OrderedDict\n'), ((44676, 44799), 'classify.visualization.print_action_balancing_stats', 'print_action_balancing_stats', (['balance', '(0)', 'dict_video_actions', 'dict_train_data', 'dict_test_data', 'dict_val_data', 'test_data'], {}), '(balance, 0, dict_video_actions,\n dict_train_data, dict_test_data, dict_val_data, test_data)\n', (44704, 44799), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((44834, 44928), 'classify.visualization.print_nb_actions_miniclips_train_test_eval', 'print_nb_actions_miniclips_train_test_eval', (['dict_train_data', 'dict_test_data', 'dict_val_data'], {}), '(dict_train_data, dict_test_data,\n dict_val_data)\n', (44876, 44928), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((2386, 2414), 'numpy.asarray', 'np.asarray', (['embedding_vector'], {}), '(embedding_vector)\n', (2396, 2414), True, 'import numpy as np\n'), ((5656, 5669), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (5665, 5669), False, 'import json\n'), ((6485, 6498), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (6494, 6498), False, 'import json\n'), ((10739, 10760), 'nltk.word_tokenize', 'word_tokenize', (['action'], {}), '(action)\n', (10752, 10760), False, 'from nltk import word_tokenize\n'), ((10854, 10905), 'numpy.zeros', 'np.zeros', (['(1, dimension_embedding)'], {'dtype': '"""float32"""'}), "((1, dimension_embedding), dtype='float32')\n", (10862, 10905), True, 'import numpy as np\n'), ((12431, 12451), 'numpy.zeros', 'np.zeros', (['vocab_size'], {}), '(vocab_size)\n', (12439, 12451), True, 'import numpy as np\n'), ((12483, 12509), 'nltk.word_tokenize', 'nltk.word_tokenize', (['action'], {}), '(action)\n', (12501, 12509), False, 'import nltk\n'), ((13142, 13169), 'classify.elmo_embeddings.load_elmo_embedding', 'load_elmo_embedding', (['chhunk'], {}), '(chhunk)\n', (13161, 13169), False, 'from classify.elmo_embeddings import load_elmo_embedding\n'), ((13211, 13305), 'numpy.concatenate', 'np.concatenate', (['(embedding_matrix_actions_train, embedding_matrix_actions_train_1)'], {'axis': '(0)'}), '((embedding_matrix_actions_train,\n embedding_matrix_actions_train_1), axis=0)\n', (13225, 13305), True, 'import numpy as np\n'), ((17600, 17626), 'numpy.zeros', 'np.zeros', (['dimension_output'], {}), '(dimension_output)\n', (17608, 17626), True, 'import numpy as np\n'), ((17831, 17870), 'numpy.zeros', 'np.zeros', (['(nb_frames, dimension_output)'], {}), '((nb_frames, dimension_output))\n', (17839, 17870), True, 'import numpy as np\n'), ((21476, 21524), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_train'], {}), '(embedding_matrix_actions_train)\n', (21492, 21524), False, 'from classify.utils import reshape_3d_to_2d\n'), ((21565, 21612), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_test'], {}), '(embedding_matrix_actions_test)\n', (21581, 21612), False, 'from classify.utils import reshape_3d_to_2d\n'), ((21652, 21698), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_val'], {}), '(embedding_matrix_actions_val)\n', (21668, 21698), False, 'from classify.utils import reshape_3d_to_2d\n'), ((23478, 23526), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_train'], {}), '(embedding_matrix_actions_train)\n', (23494, 23526), False, 'from classify.utils import reshape_3d_to_2d\n'), ((23567, 23614), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_test'], {}), '(embedding_matrix_actions_test)\n', (23583, 23614), False, 'from classify.utils import reshape_3d_to_2d\n'), ((23654, 23700), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_val'], {}), '(embedding_matrix_actions_val)\n', (23670, 23700), False, 'from classify.utils import reshape_3d_to_2d\n'), ((25323, 25336), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (25332, 25336), False, 'import json\n'), ((26878, 26926), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_train'], {}), '(embedding_matrix_actions_train)\n', (26894, 26926), False, 'from classify.utils import reshape_3d_to_2d\n'), ((26967, 27014), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_test'], {}), '(embedding_matrix_actions_test)\n', (26983, 27014), False, 'from classify.utils import reshape_3d_to_2d\n'), ((27054, 27100), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_val'], {}), '(embedding_matrix_actions_val)\n', (27070, 27100), False, 'from classify.utils import reshape_3d_to_2d\n'), ((28921, 28969), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_train'], {}), '(embedding_matrix_actions_train)\n', (28937, 28969), False, 'from classify.utils import reshape_3d_to_2d\n'), ((29010, 29057), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_test'], {}), '(embedding_matrix_actions_test)\n', (29026, 29057), False, 'from classify.utils import reshape_3d_to_2d\n'), ((29097, 29143), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_val'], {}), '(embedding_matrix_actions_val)\n', (29113, 29143), False, 'from classify.utils import reshape_3d_to_2d\n'), ((30940, 30988), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_train'], {}), '(embedding_matrix_actions_train)\n', (30956, 30988), False, 'from classify.utils import reshape_3d_to_2d\n'), ((31029, 31076), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_test'], {}), '(embedding_matrix_actions_test)\n', (31045, 31076), False, 'from classify.utils import reshape_3d_to_2d\n'), ((31116, 31162), 'classify.utils.reshape_3d_to_2d', 'reshape_3d_to_2d', (['embedding_matrix_actions_val'], {}), '(embedding_matrix_actions_val)\n', (31132, 31162), False, 'from classify.utils import reshape_3d_to_2d\n'), ((32396, 32447), 'numpy.concatenate', 'np.concatenate', (['(x_train, video_data_train)'], {'axis': '(1)'}), '((x_train, video_data_train), axis=1)\n', (32410, 32447), True, 'import numpy as np\n'), ((32477, 32526), 'numpy.concatenate', 'np.concatenate', (['(x_test, video_data_test)'], {'axis': '(1)'}), '((x_test, video_data_test), axis=1)\n', (32491, 32526), True, 'import numpy as np\n'), ((32555, 32602), 'numpy.concatenate', 'np.concatenate', (['(x_val, video_data_val)'], {'axis': '(1)'}), '((x_val, video_data_val), axis=1)\n', (32569, 32602), True, 'import numpy as np\n'), ((37595, 37619), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (37609, 37619), False, 'import csv\n'), ((37649, 37662), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (37660, 37662), False, 'from collections import OrderedDict\n'), ((39309, 39352), 'classify.visualization.get_nb_visible_not_visible', 'get_nb_visible_not_visible', (['dict_train_data'], {}), '(dict_train_data)\n', (39335, 39352), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((1771, 1810), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (1781, 1810), True, 'import numpy as np\n'), ((10652, 10702), 'numpy.ones', 'np.ones', (['(1, dimension_embedding)'], {'dtype': '"""float32"""'}), "((1, dimension_embedding), dtype='float32')\n", (10659, 10702), True, 'import numpy as np\n'), ((11212, 11240), 'numpy.asarray', 'np.asarray', (['embedding_vector'], {}), '(embedding_vector)\n', (11222, 11240), True, 'import numpy as np\n'), ((14966, 15019), 'numpy.concatenate', 'np.concatenate', (['(left_context, right_context)'], {'axis': '(0)'}), '((left_context, right_context), axis=0)\n', (14980, 15019), True, 'import numpy as np\n'), ((18140, 18171), 'numpy.mean', 'np.mean', (['video_features'], {'axis': '(0)'}), '(video_features, axis=0)\n', (18147, 18171), True, 'import numpy as np\n'), ((26243, 26264), 'numpy.array', 'np.array', (['list_scores'], {}), '(list_scores)\n', (26251, 26264), True, 'import numpy as np\n'), ((35307, 35340), 'pandas.isnull', 'pd.isnull', (["row['Visible Actions']"], {}), "(row['Visible Actions'])\n", (35316, 35340), True, 'import pandas as pd\n'), ((35345, 35382), 'pandas.isnull', 'pd.isnull', (["row['Not Visible Actions']"], {}), "(row['Not Visible Actions'])\n", (35354, 35382), True, 'import pandas as pd\n'), ((35418, 35451), 'pandas.isnull', 'pd.isnull', (["row['Visible Actions']"], {}), "(row['Visible Actions'])\n", (35427, 35451), True, 'import pandas as pd\n'), ((39085, 39143), 'classify.visualization.get_list_actions_for_label', 'get_list_actions_for_label', (['dict_train_data', 'video_name', '(0)'], {}), '(dict_train_data, video_name, 0)\n', (39111, 39143), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((39689, 39754), 'classify.visualization.get_list_actions_for_label', 'get_list_actions_for_label', (['dict_train_data', 'random_video_name', '(0)'], {}), '(dict_train_data, random_video_name, 0)\n', (39715, 39754), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((40559, 40602), 'classify.visualization.get_nb_visible_not_visible', 'get_nb_visible_not_visible', (['dict_train_data'], {}), '(dict_train_data)\n', (40585, 40602), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((40106, 40167), 'classify.visualization.get_list_actions_for_label', 'get_list_actions_for_label', (['dict_video_actions', 'video_name', '(1)'], {}), '(dict_video_actions, video_name, 1)\n', (40132, 40167), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((40946, 41014), 'classify.visualization.get_list_actions_for_label', 'get_list_actions_for_label', (['dict_video_actions', 'random_video_name', '(1)'], {}), '(dict_video_actions, random_video_name, 1)\n', (40972, 41014), False, 'from classify.visualization import print_action_balancing_stats, get_list_actions_for_label, get_nb_visible_not_visible, print_nb_actions_miniclips_train_test_eval, measure_nb_unique_actions\n'), ((11456, 11488), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.float32'}), '((1,), dtype=np.float32)\n', (11464, 11488), True, 'import numpy as np\n'), ((11624, 11662), 'numpy.random.rand', 'np.random.rand', (['(1)', 'dimension_embedding'], {}), '(1, dimension_embedding)\n', (11638, 11662), True, 'import numpy as np\n'), ((8778, 8819), 'nltk.re.sub', 'nltk.re.sub', (['"""[0-9][0-9]:*"""', '""""""', 'sentence'], {}), "('[0-9][0-9]:*', '', sentence)\n", (8789, 8819), False, 'import nltk\n'), ((18341, 18374), 'numpy.asarray', 'np.asarray', (['padded_video_features'], {}), '(padded_video_features)\n', (18351, 18374), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
print(f'Loading {__file__}')
def align_gisaxs_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.y, -rang, rang, point)
ps(der=der)
yield from bps.mv(piezo.y, ps.cen)
def align_gisaxs_th(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], piezo.th, -rang, rang, point)
ps()
yield from bps.mv(piezo.th, ps.peak)
def align_xrr_prs(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], prs, -rang, rang, point)
ps()
yield from bps.mv(prs, ps.peak)
def align_xrr_height(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], piezo.x, -rang, rang, point)
ps(der=der)
yield from bps.mv(piezo.x, ps.peak)
def align_gisaxs_height_hex(rang=0.3, point=31, der=False):
yield from bp.rel_scan([pil1M], stage.y, -rang, rang, point)
ps(der=der)
yield from bps.mv(stage.y, ps.cen)
def align_gisaxs_th_hex(rang=0.3, point=31):
yield from bp.rel_scan([pil1M], stage.th, -rang, rang, point)
ps()
yield from bps.mv(stage.th, ps.peak)
def alignement_xrr(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='xrr')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_xrr_height(800, 16, der=True)
# For XRR alignment, a poor results was obtained at incident angle 0. To improve the alignment success
# the prs alignment is done at an angle of 0.15 deg
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(1.5, 20)
yield from smi.setDirectBeamROI()
yield from align_xrr_height(500, 13, der=True)
yield from smi.setReflectedBeamROI(total_angle=-0.15, technique='xrr')
yield from align_xrr_prs(0.6, 21)
yield from bps.mv(prs, ps.peak + 0.15)
# move to theta 0 + value
yield from bps.mv(prs, ps.peak - angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=-angle, technique='xrr')
# Scan theta and height
yield from align_xrr_prs(0.2, 31)
yield from align_xrr_height(200, 21)
yield from align_xrr_prs(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(prs, ps.cen + angle)
yield from smi.modeMeasurement()
def alignement_gisaxs(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(700, 16, der=True)
yield from align_gisaxs_th(1, 15)
yield from align_gisaxs_height(300, 11, der=True)
yield from align_gisaxs_th(0.5, 16)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_special(angle=0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(700, 16, der=True)
yield from smi.setReflectedBeamROI(total_angle=0.12, technique='gisaxs')
yield from align_gisaxs_th(1, 15)
yield from smi.setDirectBeamROI()
yield from align_gisaxs_height(300, 11, der=True)
yield from smi.setReflectedBeamROI(total_angle=0.1, technique='gisaxs')
yield from align_gisaxs_th(0.5, 16)
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_new(angle=0.15, he_ra_db=700, he_np_db=16, th_ra_db=0.7, th_np_db=11, th_ra_rb=700, th_np_rb = 16, he_ra_rb=700, he_np_rb = 16):
"""
Standart macro for aligning the sample for GISAXS. First alignement of height and theta on the direct beam (twice with different ranges).
Then alignememnt of theta and height on the reflected beam. At the end of teh macros, theta will return to the new zeros
angle: incident angle at which alignement on the reflected beam will be done
he_ra_db, he_ra_db, th_ra_db, th_np_db: height and theta range and number of point for the direct beam alignement
he_ra_rb, he_ra_rb, th_ra_rb, th_np_rb: height and theta range and number of point for the reflected beam alignement
"""
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(he_ra_db, he_np_db, der=True)
yield from align_gisaxs_th(th_ra_db, th_np_db)
yield from align_gisaxs_height(np.int(0.5*he_ra_db), np.int(0.7*he_np_db), der=True)
yield from align_gisaxs_th(np.int(0.5*th_ra_db), np.int(1.5*he_np_db))
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle, technique='gisaxs')
# Scan theta and height
yield from align_gisaxs_th(0.2, 31)
yield from align_gisaxs_height(300, 21)
yield from align_gisaxs_th(0.05, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_hex(angle=0.1):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.5, 0.5)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.700, 16, der=True)
# yield from align_gisaxs_th_hex(1, 11)
yield from align_gisaxs_height_hex(0.300, 11, der=True)
# yield from align_gisaxs_th_hex(0.4, 16)
# move to theta 0 + value
# yield from bps.mv(stage.th, angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th_hex(0.5, 31)
yield from align_gisaxs_height_hex(0.200, 21)
yield from align_gisaxs_th_hex(0.1, 31)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_hex_short(angle = 0.12):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height_hex(0.500, 21, der=True)
# move to theta 0 + value
yield from bps.mv(stage.th, angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th_hex(0.7, 23)
yield from align_gisaxs_height_hex(0.15, 31)
yield from align_gisaxs_th_hex(0.06, 25)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(stage.th, ps.cen-angle)
yield from smi.modeMeasurement()
def quickalign_gisaxs(angle = 0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# move to theta 0 + value
yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_height(200, 31)
yield from align_gisaxs_th(0.1, 21)
# Close all the matplotlib windows
plt.close('all')
# Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
def alignement_gisaxs_shorter(angle = 0.15):
sample_id(user_name='test', sample_name='test')
det_exposure_time(0.3, 0.3)
smi = SMI_Beamline()
yield from smi.modeAlignment()
# Set direct beam ROI
yield from smi.setDirectBeamROI()
# Scan theta and height
yield from align_gisaxs_height(300, 21, der=True)
yield from align_gisaxs_th(1, 21)
# move to theta 0 + value
#yield from bps.mv(piezo.th, ps.peak + angle)
# Set reflected ROI
yield from smi.setReflectedBeamROI(total_angle=angle)
# Scan theta and height
yield from align_gisaxs_th(0.5, 21)
yield from align_gisaxs_height(150, 21)
yield from align_gisaxs_th(0.05, 16)
# Close all the matplotlib windows
plt.close('all')
#Return angle
yield from bps.mv(piezo.th, ps.cen - angle)
yield from smi.modeMeasurement()
| [
"matplotlib.pyplot.close",
"numpy.int"
] | [((2537, 2553), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2546, 2553), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3692), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3685, 3692), True, 'import matplotlib.pyplot as plt\n'), ((5028, 5044), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5037, 5044), True, 'import matplotlib.pyplot as plt\n'), ((7005, 7021), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7014, 7021), True, 'import matplotlib.pyplot as plt\n'), ((8135, 8151), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8144, 8151), True, 'import matplotlib.pyplot as plt\n'), ((9110, 9126), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9119, 9126), True, 'import matplotlib.pyplot as plt\n'), ((9830, 9846), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9839, 9846), True, 'import matplotlib.pyplot as plt\n'), ((10825, 10841), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10834, 10841), True, 'import matplotlib.pyplot as plt\n'), ((6427, 6449), 'numpy.int', 'np.int', (['(0.5 * he_ra_db)'], {}), '(0.5 * he_ra_db)\n', (6433, 6449), True, 'import numpy as np\n'), ((6449, 6471), 'numpy.int', 'np.int', (['(0.7 * he_np_db)'], {}), '(0.7 * he_np_db)\n', (6455, 6471), True, 'import numpy as np\n'), ((6516, 6538), 'numpy.int', 'np.int', (['(0.5 * th_ra_db)'], {}), '(0.5 * th_ra_db)\n', (6522, 6538), True, 'import numpy as np\n'), ((6538, 6560), 'numpy.int', 'np.int', (['(1.5 * he_np_db)'], {}), '(1.5 * he_np_db)\n', (6544, 6560), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 15:48:47 2018
@author: r.dewinter
"""
import numpy as np
def TBTD(x):
y = x[0]
x1 = x[1]
x2 = x[2]
fvolume = (x1*((16+y**2)**0.5)) + (x2*((1+y**2)**0.5))
fstress = (20*((16+y**2)**0.5))/(y*x1)
fstressBC = (80*((1+y**2)**0.5))/(y*x2)
g1 = fvolume - 0.1
g2 = fstress - 100000
g3 = fstressBC - 100000
return [np.array([fvolume, fstress]), np.array([g1,g2,g3])]
#rngMin = np.array([1,1e-16,1e-16])
#rngMax = np.array([3,1,1])
#nVar = 3
#parameters = np.empty((1000000,3))
#objectives = np.empty((1000000,2))
#constraints = np.empty((1000000,3))
#objectives[:] = 0
#constraints[:] = 0
#for i in range(1000000):
# x = np.random.rand(nVar)*(rngMax-rngMin)+rngMin
# parameters[i] = x
# obj, cons = TBTD(x)
# objectives[i] = obj
# constraints[i] = cons
#
#a = np.sum(constraints<0, axis=1)==3
#sum(a)
#iteration time 146.92799997329712
#15948.508999824524
#15992.766000270844
#iteration time 162.1710000038147
#13681.631999969482
#13729.1859998703 | [
"numpy.array"
] | [((434, 462), 'numpy.array', 'np.array', (['[fvolume, fstress]'], {}), '([fvolume, fstress])\n', (442, 462), True, 'import numpy as np\n'), ((464, 486), 'numpy.array', 'np.array', (['[g1, g2, g3]'], {}), '([g1, g2, g3])\n', (472, 486), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
__author__ = '<EMAIL>'
"""
加载数据
"""
import sys
import codecs
import pickle
import numpy as np
from utils import map_item2id
def load_vocs(paths):
"""
加载vocs
Args:
paths: list of str, voc路径
Returns:
vocs: list of dict
"""
vocs = []
for path in paths:
with open(path, 'rb') as file_r:
vocs.append(pickle.load(file_r))
return vocs
def load_lookup_tables(paths):
"""
加载lookup tables
Args:
paths: list of str, emb路径
Returns:
lookup_tables: list of dict
"""
lookup_tables = []
for path in paths:
with open(path, 'rb', encoding='utf-8') as file_r:
lookup_tables.append(pickle.load(file_r))
return lookup_tables
def init_data(feature_names, vocs, max_len, model='train',path=None, test_sens=None,
use_char_feature=False, word_len=None, sep='\t'):
"""
加载数据(待优化,目前是一次性加载整个数据集)
Args:
path: str, 数据路径
test_sens: list, [[[u'白带常规', u 'ni', u 'S_ex_name'],[u ':', u 'w', u 'O'],[],...],[[],[],[],...],...]
feature_names: list of str, 特征名称
vocs: list of dict
max_len: int, 句子最大长度
model: str, in ('train', 'test')
use_char_feature: bool,是否使用char特征
word_len: None or int,单词最大长度
sep: str, 特征之间的分割符, default is '\t'
Returns:
data_dict: dict
"""
assert model in ('train','test')
if model=='train':
with codecs.open(path,'r',encoding='utf8') as file_r:
sentences = file_r.read().strip().split('\n\n')
sentences = [[j.split(sep) for j in sen.split('\n')] for sen in sentences]
else:
if not test_sens:
raise ValueError('请保证测试语料非空!!!')
sentences = test_sens
sentences_count = len(sentences)
print('sentences_count1',sentences_count)
feature_count = len(feature_names)
data_dict = dict()
for feature_name in feature_names:
data_dict[feature_name] = np.zeros((sentences_count, max_len), dtype='int32')
#char feature
if use_char_feature:
data_dict['char'] = np.zeros((sentences_count,max_len,word_len),dtype='int32')
char_voc=vocs.pop(0)
if model == 'train':
data_dict['label'] = np.zeros((len(sentences),max_len),dtype='int32')
for index, items in enumerate(sentences):
one_instance_items = []
[one_instance_items.append([]) for _ in range(len(feature_names)+1)]
for feature_tokens in items:
for j in range(feature_count):
one_instance_items[j].append(feature_tokens[j])
if model == 'train':
one_instance_items[-1].append(feature_tokens[-1])
for i in range(len(feature_names)):
data_dict[feature_names[i]][index,:]=map_item2id(
one_instance_items[i],vocs[i],max_len)
if use_char_feature:
for i,word in enumerate(one_instance_items[0]):
if i>= max_len:
break
data_dict['char'][index][i,:] = map_item2id(word,char_voc,word_len)
if model == 'train':
data_dict['label'][index,:] = map_item2id(one_instance_items[-1],
vocs[-1],max_len)
print('loading data: %d\r'%index)
return data_dict
| [
"pickle.load",
"utils.map_item2id",
"numpy.zeros",
"codecs.open"
] | [((2030, 2081), 'numpy.zeros', 'np.zeros', (['(sentences_count, max_len)'], {'dtype': '"""int32"""'}), "((sentences_count, max_len), dtype='int32')\n", (2038, 2081), True, 'import numpy as np\n'), ((2158, 2219), 'numpy.zeros', 'np.zeros', (['(sentences_count, max_len, word_len)'], {'dtype': '"""int32"""'}), "((sentences_count, max_len, word_len), dtype='int32')\n", (2166, 2219), True, 'import numpy as np\n'), ((1508, 1547), 'codecs.open', 'codecs.open', (['path', '"""r"""'], {'encoding': '"""utf8"""'}), "(path, 'r', encoding='utf8')\n", (1519, 1547), False, 'import codecs\n'), ((2840, 2892), 'utils.map_item2id', 'map_item2id', (['one_instance_items[i]', 'vocs[i]', 'max_len'], {}), '(one_instance_items[i], vocs[i], max_len)\n', (2851, 2892), False, 'from utils import map_item2id\n'), ((3210, 3264), 'utils.map_item2id', 'map_item2id', (['one_instance_items[-1]', 'vocs[-1]', 'max_len'], {}), '(one_instance_items[-1], vocs[-1], max_len)\n', (3221, 3264), False, 'from utils import map_item2id\n'), ((413, 432), 'pickle.load', 'pickle.load', (['file_r'], {}), '(file_r)\n', (424, 432), False, 'import pickle\n'), ((750, 769), 'pickle.load', 'pickle.load', (['file_r'], {}), '(file_r)\n', (761, 769), False, 'import pickle\n'), ((3103, 3140), 'utils.map_item2id', 'map_item2id', (['word', 'char_voc', 'word_len'], {}), '(word, char_voc, word_len)\n', (3114, 3140), False, 'from utils import map_item2id\n')] |
import numpy as np
from collections import Counter, defaultdict
from itertools import chain, tee, islice
from scipy.sparse import csc_matrix, csr_matrix
from tqdm import tqdm
def get_ngrams(doc, ngram_range=(1,1)):
for n in range(ngram_range[0],ngram_range[1]+1):
tlst = doc
while True:
a, b = tee(tlst)
l = tuple(islice(a, n))
if len(l) == n:
yield ' '.join(l)
next(b)
tlst = b
else:
break
def count_bag_of_ngrams(corpus, ngram_range, tokenizer, unk_token):
# Definiciones para la matriz de coocurrencias:
data = []
indices = []
indptr = [0]
# Definiciones para el vocabulario:
full_vocab = defaultdict()
full_vocab.default_factory = full_vocab.__len__
# Cuento palabras:
for doc in tqdm(corpus):
features = dict(Counter(get_ngrams(tokenizer(doc),ngram_range)))
data.extend(features.values())
indices.extend([full_vocab[tk] for tk in features])
indptr.append(len(indices))
# Armo la matriz y el diccionario de vocabulario:
full_vocab = dict(full_vocab)
if isinstance(unk_token,str):
full_vocab[unk_token] = len(full_vocab)
vocab_len = len(full_vocab)
X = csr_matrix((data,indices,indptr),shape=(len(corpus),vocab_len))
return X, full_vocab
def filter_by_counts(X,full_vocab,min_count,max_count,max_words,unk_token):
freqs = X.sum(axis=0).A1
sorted_indices = np.argsort(freqs)[::-1]
sorted_frequencies = freqs[sorted_indices]
if min_count <= 0 and max_count is None:
mask = np.ones(X.shape[1],dtype=bool)
else:
if max_count is None:
max_count = np.inf
mask = np.logical_and(sorted_frequencies <= max_count,
sorted_frequencies >= min_count)
sorted_indices = sorted_indices[mask]
if max_words is not None:
sorted_indices = sorted_indices[:max_words]
if isinstance(unk_token,str):
unk_idx = full_vocab[unk_token]
if unk_idx not in sorted_indices:
if max_words is not None:
if len(sorted_indices) < max_words:
sorted_indices = np.hstack((sorted_indices,np.array([unk_idx])))
else:
sorted_indices[-1] = unk_idx
else:
sorted_indices = np.hstack((sorted_indices,np.array([unk_idx])))
mask = np.ones(X.shape[1],dtype=bool)
mask[sorted_indices] = False
X[:,unk_idx] += X[:,mask].sum(axis=1)
X = X[:,sorted_indices]
idx_to_tk = {idx:tk for tk,idx in full_vocab.items()}
vocab = {idx_to_tk[idx]:i for i,idx in enumerate(sorted_indices)}
return X, vocab
def filter_by_vocab(X,full_vocab,true_vocab,unk_token):
idx_to_be_kept = []
for tk in full_vocab.keys():
keep_tk = True
for unigram in tk.split(' '):
if unigram in true_vocab or unigram == unk_token:
continue
keep_tk = False
break
if keep_tk:
idx_to_be_kept.append(full_vocab[tk])
idx_to_be_kept = sorted(idx_to_be_kept)
mask = np.zeros(X.shape[1],dtype=bool)
mask[idx_to_be_kept] = True
if isinstance(unk_token,str):
unk_idx = full_vocab[unk_token]
X[:,unk_idx] += X[:,~mask].sum(axis=1)
X = X[:,mask]
idx_to_tk = {idx:tk for tk,idx in full_vocab.items()}
new_true_vocab = {idx_to_tk[idx]:i for i,idx in enumerate(idx_to_be_kept)}
return X, new_true_vocab
class BONgramsVectorizer(object):
""" Vectorizer para convertir texto en vectores.
Tiene dos formas de uso: cuando vocab=None, recibe min_count, max_count,
max_words y ngram_range para obtener un vocabulario a partir del corpus
con todas esas limitaciones. Cuando vocab es una lista de palabras, cuenta
sólo esas palabras, y si unk_token es un string, entonces lo incluye en el
vocabulario. """
def __init__(self, tokenizer=None, min_count=0.,
max_count=None, max_words=None, ngram_range=(1,1),
vocab=None, unk_token=None):
if tokenizer is None:
self.tokenizer = lambda x: x
elif not callable(tokenizer):
raise ValueError('Tokenizer must be callable or None.')
else:
self.tokenizer = tokenizer
self.min_count = min_count
self.max_count = max_count
self.max_words = max_words
self.ngram_range = ngram_range
if vocab is None:
self.vocab = None
self.infer_vocab = True
else:
self.vocab = {tk:idx for idx,tk in enumerate(vocab)}
self.infer_vocab = False
if unk_token is None or isinstance(unk_token,str):
self.unk_token = unk_token
else:
raise ValueError('unk_token must be string or None')
def fit_transform(self,corpus):
min_count = self.min_count
max_count = self.max_count
max_words = self.max_words
ngram_range = self.ngram_range
tokenizer = self.tokenizer
unk_token = self.unk_token
true_vocab = self.vocab
X, full_vocab = count_bag_of_ngrams(corpus, ngram_range, tokenizer,
unk_token)
if not self.infer_vocab:
X, true_vocab = filter_by_vocab(X,full_vocab,true_vocab,unk_token)
else:
true_vocab = full_vocab
X, true_vocab = filter_by_counts(X,true_vocab,min_count,max_count,
max_words,unk_token)
self.vocab = true_vocab
return X
def fit(self,corpus):
_ = self.fit_transform(corpus)
def transform(self,corpus):
vocab = self.vocab
unk_token = self.unk_token
tokenizer = self.tokenizer
ngram_range = self.ngram_range
# Definiciones para la matriz de coocurrencias:
data = []
indices = []
indptr = [0]
# Cuento palabras:
unk_idx = len(vocab)
for doc in tqdm(corpus):
features = dict(Counter(get_ngrams(tokenizer(doc),ngram_range)))
data.extend(features.values())
indices.extend([vocab.get(tk,unk_idx) for tk in features])
indptr.append(len(indices))
# Armo la matriz y el diccionario de vocabulario:
""" X = csr_matrix((data,indices,indptr),shape=(len(corpus),len(vocab)+1))
if isinstance(unk_token,str):
X[:,vocab[unk_token]] = X[:,-1]
X.resize(X.shape[0],X.shape[1]-1)
X = X.tocsr() """
""" X = csr_matrix((data,indices,indptr),shape=(len(corpus),len(vocab)+1)).T
if isinstance(unk_token,str):
X[vocab[unk_token],:] = X[-1,:]
X.resize(X.shape[0]-1,X.shape[1])
X = X.T.tocsr() """
""" X = csr_matrix((data,indices,indptr),shape=(len(corpus),len(vocab)+1)).tocsc()
if isinstance(unk_token,str):
X[:,vocab[unk_token]] = X[:,-1]
X.resize(X.shape[0],X.shape[1]-1)
X = X.tocsr() """
X = csr_matrix((data,indices,indptr),shape=(len(corpus),len(vocab)+1)).tolil()
if isinstance(unk_token,str):
X[:,vocab[unk_token]] = X[:,-1]
X.resize(X.shape[0],X.shape[1]-1)
X = X.tocsr()
return X
| [
"tqdm.tqdm",
"numpy.logical_and",
"numpy.zeros",
"numpy.ones",
"collections.defaultdict",
"numpy.argsort",
"numpy.array",
"itertools.islice",
"itertools.tee"
] | [((705, 718), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (716, 718), False, 'from collections import Counter, defaultdict\n'), ((801, 813), 'tqdm.tqdm', 'tqdm', (['corpus'], {}), '(corpus)\n', (805, 813), False, 'from tqdm import tqdm\n'), ((2818, 2850), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {'dtype': 'bool'}), '(X.shape[1], dtype=bool)\n', (2826, 2850), True, 'import numpy as np\n'), ((1398, 1415), 'numpy.argsort', 'np.argsort', (['freqs'], {}), '(freqs)\n', (1408, 1415), True, 'import numpy as np\n'), ((1518, 1549), 'numpy.ones', 'np.ones', (['X.shape[1]'], {'dtype': 'bool'}), '(X.shape[1], dtype=bool)\n', (1525, 1549), True, 'import numpy as np\n'), ((1612, 1697), 'numpy.logical_and', 'np.logical_and', (['(sorted_frequencies <= max_count)', '(sorted_frequencies >= min_count)'], {}), '(sorted_frequencies <= max_count, sorted_frequencies >= min_count\n )\n', (1626, 1697), True, 'import numpy as np\n'), ((2191, 2222), 'numpy.ones', 'np.ones', (['X.shape[1]'], {'dtype': 'bool'}), '(X.shape[1], dtype=bool)\n', (2198, 2222), True, 'import numpy as np\n'), ((5294, 5306), 'tqdm.tqdm', 'tqdm', (['corpus'], {}), '(corpus)\n', (5298, 5306), False, 'from tqdm import tqdm\n'), ((317, 326), 'itertools.tee', 'tee', (['tlst'], {}), '(tlst)\n', (320, 326), False, 'from itertools import chain, tee, islice\n'), ((346, 358), 'itertools.islice', 'islice', (['a', 'n'], {}), '(a, n)\n', (352, 358), False, 'from itertools import chain, tee, islice\n'), ((2157, 2176), 'numpy.array', 'np.array', (['[unk_idx]'], {}), '([unk_idx])\n', (2165, 2176), True, 'import numpy as np\n'), ((2035, 2054), 'numpy.array', 'np.array', (['[unk_idx]'], {}), '([unk_idx])\n', (2043, 2054), True, 'import numpy as np\n')] |
import logging
import numpy as np
from typing import Sequence
from tenacity import retry, wait_random_exponential, retry_if_result
import geode.models as m
from geode.utils import marshall_to, point_to_str
from .distance_matrix import map_from_distance_matrix_response
from .geocoding import map_from_address
from .models import GoogleGeocodingResponse, GoogleDistanceMatrixResponse, GoogleStatus
logger = logging.getLogger()
def is_over_query_limit(x):
return x.status == GoogleStatus.OVER_QUERY_LIMIT or x.status == GoogleStatus.MAX_ELEMENTS_EXCEEDED
# random wait alleviates retry bursts
GEOCODE_RETRY = retry(
wait=wait_random_exponential(multiplier=0.1, min=0.1, max=2, exp_base=1.5),
retry=retry_if_result(is_over_query_limit),
)
MATRIX_RETRY = retry(
wait=wait_random_exponential(multiplier=0.1, min=0.1, max=2, exp_base=1.5),
retry=retry_if_result(is_over_query_limit),
)
class Client(m.distance_matrix.Client, m.geocoding.Client):
type_: str
base_url: str
geocoding_path = 'maps/api/geocode/json'
distance_matrix_path = 'maps/api/distancematrix/json'
point_sep = '|'
key: str
area_max: int
factor_max: int
# client_id: str
# secret: str
def __init__(
self,
type_='google',
base_url='https://maps.googleapis.com/',
key='',
area_max=625,
factor_max=380,
geocode_retry=GEOCODE_RETRY,
matrix_retry=MATRIX_RETRY
):
self.type_ = type_
self.base_url = base_url
self.key = key
self.area_max = area_max
self.factor_max = factor_max
self._geocode = geocode_retry(self._geocode)
self._distance_matrix = matrix_retry(self._distance_matrix)
async def request(self, path, params, session=None):
return await session.get(
self.base_url + path,
params=dict(
key=self.key,
**params
)
)
async def _geocode(self, location: m.Location, session=None) -> GoogleGeocodingResponse:
if isinstance(location, str):
res = await self.request(self.geocoding_path, dict(address=location), session=session)
else:
res = await self.request(self.geocoding_path, dict(address=point_to_str(location)), session=session)
return marshall_to(GoogleGeocodingResponse, await res.json())
async def geocode(self, location: m.Location, session=None) -> Sequence[m.geocoding.Result]:
data = await self._geocode(location, session)
return list(map(map_from_address, data.results))
async def _distance_matrix(self, origins: np.ndarray, destinations: np.ndarray,
session=None) -> GoogleDistanceMatrixResponse:
res = await self.request(
self.distance_matrix_path,
dict(
origins=self.point_sep.join(map(point_to_str, origins)),
destinations=self.point_sep.join(map(point_to_str, destinations))
),
session=session
)
return marshall_to(GoogleDistanceMatrixResponse, await res.json())
@m.distance_matrix.partition
async def distance_matrix(self, origins: np.ndarray, destinations: np.ndarray,
session=None) -> m.distance_matrix.Result:
if len(origins) == 0 or len(destinations) == 0:
return m.distance_matrix.Result(
origins=origins,
destinations=destinations,
distances=np.array([])
)
data = await self._distance_matrix(origins, destinations, session)
result = map_from_distance_matrix_response(data)
return m.distance_matrix.Result(
origins=origins,
destinations=destinations,
distances=result
)
| [
"geode.models.distance_matrix.Result",
"tenacity.retry_if_result",
"numpy.array",
"tenacity.wait_random_exponential",
"geode.utils.point_to_str",
"logging.getLogger"
] | [((408, 427), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (425, 427), False, 'import logging\n'), ((633, 702), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'multiplier': '(0.1)', 'min': '(0.1)', 'max': '(2)', 'exp_base': '(1.5)'}), '(multiplier=0.1, min=0.1, max=2, exp_base=1.5)\n', (656, 702), False, 'from tenacity import retry, wait_random_exponential, retry_if_result\n'), ((714, 750), 'tenacity.retry_if_result', 'retry_if_result', (['is_over_query_limit'], {}), '(is_over_query_limit)\n', (729, 750), False, 'from tenacity import retry, wait_random_exponential, retry_if_result\n'), ((786, 855), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'multiplier': '(0.1)', 'min': '(0.1)', 'max': '(2)', 'exp_base': '(1.5)'}), '(multiplier=0.1, min=0.1, max=2, exp_base=1.5)\n', (809, 855), False, 'from tenacity import retry, wait_random_exponential, retry_if_result\n'), ((867, 903), 'tenacity.retry_if_result', 'retry_if_result', (['is_over_query_limit'], {}), '(is_over_query_limit)\n', (882, 903), False, 'from tenacity import retry, wait_random_exponential, retry_if_result\n'), ((3744, 3834), 'geode.models.distance_matrix.Result', 'm.distance_matrix.Result', ([], {'origins': 'origins', 'destinations': 'destinations', 'distances': 'result'}), '(origins=origins, destinations=destinations,\n distances=result)\n', (3768, 3834), True, 'import geode.models as m\n'), ((3567, 3579), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3575, 3579), True, 'import numpy as np\n'), ((2314, 2336), 'geode.utils.point_to_str', 'point_to_str', (['location'], {}), '(location)\n', (2326, 2336), False, 'from geode.utils import marshall_to, point_to_str\n')] |
"""
Module implements the scaler.
"""
from typing import List, Union
import numpy as np
from monty.json import MSONable
class StandardScaler(MSONable):
"""
StandardScaler follows the sklean manner with addition of
dictionary representation.
"""
def __init__(self, mean: Union[List, np.ndarray] = None, std: Union[List, np.ndarray] = None):
"""
Args:
mean: np.ndarray, mean values
std: np.ndnarray, standard deviations
"""
self.mean = mean
self.std = std
def fit(self, target: Union[List, np.ndarray]) -> None:
"""
Fit the StandardScaler to the target.
Args:
target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
mean = np.mean(target, axis=0)
std = np.std(target, axis=0)
self.mean = mean
self.std = std
def transform(self, target: np.ndarray) -> np.ndarray:
"""
Transform target according to the mean and std.
Args:
target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
if self.mean is None or self.std is None:
raise ValueError("No parameters is given.")
return (target - self.mean) / self.std
def inverse_transform(self, transformed_target: np.ndarray) -> np.ndarray:
"""
Inversely transform the target.
Args:
transformed_target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
if self.mean is None or self.std is None:
raise ValueError("No parameters is given.")
return transformed_target * self.std + self.mean
def __repr__(self):
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std})"
def as_dict(self):
"""
Dict representation of StandardScaler.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"params": {"mean": self.mean.tolist(), "std": self.std.tolist()},
}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a StandardScaler object from a dict representation of
StandardScaler created using as_dict().
Args
d (dict): Dict representation of StandardScaler.
"""
return cls(**d["params"])
class DummyScaler(MSONable):
"""
Dummy scaler does nothing.
"""
def fit(self, target: Union[List, np.ndarray]) -> None:
"""
Fit the DummyScaler to the target.
Args:
target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
return
def transform(self, target: Union[List, np.ndarray]) -> Union[List, np.ndarray]:
"""
Transform target.
Args:
target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
return target
def inverse_transform(self, transformed_target: Union[List, np.ndarray]) -> Union[List, np.ndarray]:
"""
Inversely transform the target.
Args:
transformed_target (ndarray): The (mxn) ndarray. m is the number of samples,
n is the number of feature dimensions.
"""
return transformed_target
def __repr__(self):
return f"{self.__class__.__name__}"
def as_dict(self):
"""
Serialize the instance into dictionary
Returns:
"""
d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "params": {}}
return d
@classmethod
def from_dict(cls, d):
"""
Deserialize from a dictionary
Args:
d: Dict, dictionary contain class initialization parameters
Returns:
"""
return cls()
| [
"numpy.std",
"numpy.mean"
] | [((835, 858), 'numpy.mean', 'np.mean', (['target'], {'axis': '(0)'}), '(target, axis=0)\n', (842, 858), True, 'import numpy as np\n'), ((873, 895), 'numpy.std', 'np.std', (['target'], {'axis': '(0)'}), '(target, axis=0)\n', (879, 895), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pytest
import rpy2.robjects as robjects
from spatstat_interface.interface import SpatstatInterface
from spatstat_interface.utils import to_pandas_data_frame
@pytest.fixture
def spatstat():
spatstat = SpatstatInterface(update=True)
spatstat.import_package("core", "geom", update=True)
return spatstat
def test_spatstat_ppp_to_pandas_df(spatstat):
B = [0, 1]
bound_r = robjects.FloatVector(B)
window = spatstat.geom.owin(xrange=bound_r, yrange=bound_r)
X_np = list(B[0] + (B[1] - B[0]) * np.random.rand(100))
X = robjects.FloatVector(X_np)
points_ppp = spatstat.geom.ppp(X, X, window=window)
pcf_ppp = spatstat.core.pcf_ppp(points_ppp)
pcf_pd = to_pandas_data_frame(pcf_ppp)
assert isinstance(pcf_pd, pd.DataFrame)
def test_simulate_dpp_gaussian(spatstat):
params = {"lambda": 100, "alpha": 0.05, "d": 2}
my_dpp = spatstat.core.dppGauss(**params)
bound = robjects.FloatVector([0, 1])
window = spatstat.geom.owin(xrange=bound, yrange=bound)
spatstat.core.simulate_dppm(my_dpp, W=window)
assert True
| [
"numpy.random.rand",
"spatstat_interface.interface.SpatstatInterface",
"rpy2.robjects.FloatVector",
"spatstat_interface.utils.to_pandas_data_frame"
] | [((253, 283), 'spatstat_interface.interface.SpatstatInterface', 'SpatstatInterface', ([], {'update': '(True)'}), '(update=True)\n', (270, 283), False, 'from spatstat_interface.interface import SpatstatInterface\n'), ((438, 461), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['B'], {}), '(B)\n', (458, 461), True, 'import rpy2.robjects as robjects\n'), ((595, 621), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['X_np'], {}), '(X_np)\n', (615, 621), True, 'import rpy2.robjects as robjects\n'), ((740, 769), 'spatstat_interface.utils.to_pandas_data_frame', 'to_pandas_data_frame', (['pcf_ppp'], {}), '(pcf_ppp)\n', (760, 769), False, 'from spatstat_interface.utils import to_pandas_data_frame\n'), ((968, 996), 'rpy2.robjects.FloatVector', 'robjects.FloatVector', (['[0, 1]'], {}), '([0, 1])\n', (988, 996), True, 'import rpy2.robjects as robjects\n'), ((566, 585), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (580, 585), True, 'import numpy as np\n')] |
import ultra
import move
import time
import numpy as np
move.setup()
def rotateDegRight():
move.move(100,'no','right',0.8)
time.sleep(.0181)
move.motorStop()
time.sleep(.05)
def rotateDegLeft():
move.move(100,'no','left',0.8)
time.sleep(.01935)
move.motorStop()
time.sleep(.05)
def RadarScan():
results = []
for i in range(360):
rotateDegRight()
dist = 50*ultra.checkdist() # each value is 2 cm
if dist > 50:
dist = 0
results.append([int(dist*np.sin(x*np.pi/180)),int(dist*np.cos(x*np.pi/180))])
return results
def createMapping(x_loc,y_loc,map):
points = RadarScan()
prev = points[0]
for point in points[1:]:
if point[0] == 0 and point[1] == 0:
prev = None
elif prev == None:
prev = point
else:
plotLine(prev[0],prev[1],point[0],point[1],map,x_loc,y_loc)
prev = point
def plotLine(x0, y0, x1, y1,grid,x_loc,y_loc):
num_rows, num_cols = grid.shape
grid[int(num_cols/2-1)][int(num_rows/2-1)] = 9
print(num_rows,num_cols)
dx = abs(x1-x0)
if x0<x1:
sx = 1
else:
sx = -1
dy = -abs(y1-y0)
if y0<y1:
sy = 1
else:
sy = -1
err = dx+dy
while (True):
grid[int(num_rows/2 - y_loc - y0 - 1)][int(num_cols/2 + x_loc + x0 - 1)] = 1
if (x0 == x1 and y0 == y1):
break
e2 = 2*err
if (e2 >= dy):
err += dy
x0 += sx
if (e2 <= dx):
err += dx
y0 += sy
if __name__ == '__main__':
try:
grid = np.zeros(100,100)
createMapping(0,0,grid)
print(grid)
except KeyboardInterrupt:
move.destroy() | [
"move.setup",
"numpy.zeros",
"time.sleep",
"move.move",
"move.motorStop",
"numpy.sin",
"numpy.cos",
"ultra.checkdist",
"move.destroy"
] | [((56, 68), 'move.setup', 'move.setup', ([], {}), '()\n', (66, 68), False, 'import move\n'), ((100, 134), 'move.move', 'move.move', (['(100)', '"""no"""', '"""right"""', '(0.8)'], {}), "(100, 'no', 'right', 0.8)\n", (109, 134), False, 'import move\n'), ((140, 158), 'time.sleep', 'time.sleep', (['(0.0181)'], {}), '(0.0181)\n', (150, 158), False, 'import time\n'), ((166, 182), 'move.motorStop', 'move.motorStop', ([], {}), '()\n', (180, 182), False, 'import move\n'), ((191, 207), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (201, 207), False, 'import time\n'), ((237, 270), 'move.move', 'move.move', (['(100)', '"""no"""', '"""left"""', '(0.8)'], {}), "(100, 'no', 'left', 0.8)\n", (246, 270), False, 'import move\n'), ((276, 295), 'time.sleep', 'time.sleep', (['(0.01935)'], {}), '(0.01935)\n', (286, 295), False, 'import time\n'), ((303, 319), 'move.motorStop', 'move.motorStop', ([], {}), '()\n', (317, 319), False, 'import move\n'), ((328, 344), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (338, 344), False, 'import time\n'), ((1669, 1687), 'numpy.zeros', 'np.zeros', (['(100)', '(100)'], {}), '(100, 100)\n', (1677, 1687), True, 'import numpy as np\n'), ((447, 464), 'ultra.checkdist', 'ultra.checkdist', ([], {}), '()\n', (462, 464), False, 'import ultra\n'), ((1777, 1791), 'move.destroy', 'move.destroy', ([], {}), '()\n', (1789, 1791), False, 'import move\n'), ((562, 585), 'numpy.sin', 'np.sin', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (568, 585), True, 'import numpy as np\n'), ((592, 615), 'numpy.cos', 'np.cos', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (598, 615), True, 'import numpy as np\n')] |
"""
Adapted from keras example cifar10_cnn.py
Train ResNet-18 on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10.py
"""
from __future__ import print_function
from tensorflow import keras
from tensorflow.keras.datasets import cifar10,cifar100
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# from tensorflow.keras.utils import np_utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping
import sys
import os
sys.path.insert(1, os.getcwd()+'/..')
sys.path.insert(1, os.getcwd()[:-12])
import numpy as np
import argparse
import time
import pretrained_teachers.style_nets as style_nets
lsbjob = os.getenv('LSB_JOBID')
lsbjob = '' if lsbjob is None else lsbjob
import pretrained_teachers.cifar10_resnet50_lowResBaseline as cifar10_resnet50
from dataset.dataset_utils import bad_res102
from utils.feature_learning_utils import default_parameters
default_parser = default_parameters()
parser = argparse.ArgumentParser(parents=[default_parser])
parser.add_argument('--resblocks16', default=3, type=int, help='resblocks')
parser.add_argument('--resblocks8', default=3, type=int, help='resblocks')
parser.add_argument('--n_classes', default=10, type=int, help='epochs')
parser.add_argument('--layer_norm_res16', dest='layer_norm_res16', action='store_true')
parser.add_argument('--no-layer_norm_res16', dest='layer_norm_res16', action='store_false')
parser.add_argument('--layer_norm_res8', dest='layer_norm_res8', action='store_true')
parser.add_argument('--no-layer_norm_res8', dest='layer_norm_res8', action='store_false')
parser.add_argument('--skip_conn16', dest='skip_conn16', action='store_true')
parser.add_argument('--no-skip_conn16', dest='skip_conn16', action='store_false')
parser.add_argument('--skip_conn8', dest='skip_conn8', action='store_true')
parser.add_argument('--no-skip_conn8', dest='skip_conn8', action='store_false')
parser.add_argument('--compile_on_spot', dest='compile_on_spot', action='store_true')
parser.add_argument('--no-compile_on_spot', dest='compile_on_spot', action='store_false')
parser.set_defaults(data_augmentation=True,
layer_norm_res=True,layer_norm_res16=True,layer_norm_res8=True,
layer_norm_2=True,
skip_conn=True,skip_conn8=True,skip_conn16=True,
dense_interface=False,
last_maxpool_en=False,
compile_on_spot=True,
resblocks = 5,
epochs = 200,
nl = 'elu',
stopping_patience = 30,
learning_patience = 10,
rotation_range = 5,
width_shift_range = 0.15,
height_shift_range = 0.15)
teacher_config = parser.parse_args()
teacher_config = vars(teacher_config)
def train_teacher(n_classes = teacher_config['n_classes'], network_topology=teacher_config['network_topology']):
print('teacher_config ',teacher_config)
this_run_suffix = lsbjob+'__'+teacher_config['manual_suffix']+ str(int(time.time()))
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=teacher_config['learning_patience'], min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=teacher_config['stopping_patience'])
csv_logger = CSVLogger('resnet_cifar10_{}.csv'.format(teacher_config['n_classes'],this_run_suffix))
batch_size = 32
validate_at_last = 5000
nb_classes = teacher_config['n_classes']
nb_epoch = teacher_config['epochs']
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR images are RGB.
img_channels = 3
# The data, shuffled and split between train and test sets:
if n_classes==10:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
elif n_classes==100:
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
else:
error
if teacher_config['res']!=-1:
X_train = np.array([bad_res102(xx, (teacher_config['res'], teacher_config['res'])) for xx in X_train])
X_test = np.array([bad_res102(xx, (teacher_config['res'], teacher_config['res'])) for xx in X_test])
Y_train = y_train[:-validate_at_last]
Y_val = y_train[-validate_at_last:]
X_val = X_train[-validate_at_last:].astype('float32')
X_train = X_train[:-validate_at_last].astype('float32')
# subtract mean and normalize
if network_topology == 'resnet50_on_imagenet':
X_train = cifar10_resnet50.preprocess_image_input(X_train)
X_val = cifar10_resnet50.preprocess_image_input(X_val)
else:
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_train /= teacher_config['dataset_norm']
X_val /= teacher_config['dataset_norm']
if network_topology == 'resnet50_on_imagenet':
model = cifar10_resnet50.define_compile_split_model(res=(teacher_config['res'] if teacher_config['res']!=-1 else 32),
metrics=['sparse_categorical_accuracy'],
n_classes=teacher_config['n_classes'])
# model = orram_style_nets.parametric_net_befe_v2(dropout1=teacher_config['dropout1'],
# dropout2=teacher_config['dropout2'],
# resblocks16=teacher_config['resblocks16'],
# resblocks8=teacher_config['resblocks8'],
# layer_norm_res16=teacher_config['layer_norm_res16'],
# layer_norm_res8=teacher_config['layer_norm_res8'],
# layer_norm_2=teacher_config['layer_norm_2'],
# skip_conn16=teacher_config['skip_conn16'],
# skip_conn8=teacher_config['skip_conn8'],
# dense_interface=teacher_config['dense_interface'],
# last_maxpool_en=teacher_config['last_maxpool_en'],
# nl=teacher_config['nl'],
# last_layer_size=teacher_config['last_layer_size'])
elif network_topology == 'v2':
model =style_nets.parametric_net_befe_v2(dropout1=teacher_config['dropout1'],
dropout2=teacher_config['dropout2'],
resblocks16=teacher_config['resblocks16'],
resblocks8=teacher_config['resblocks8'],
layer_norm_res16=teacher_config['layer_norm_res16'],
layer_norm_res8=teacher_config['layer_norm_res8'],
layer_norm_2=teacher_config['layer_norm_2'],
skip_conn16=teacher_config['skip_conn16'],
skip_conn8=teacher_config['skip_conn8'],
dense_interface=teacher_config['dense_interface'],
last_maxpool_en=teacher_config['last_maxpool_en'],
nl=teacher_config['nl'],
last_layer_size=teacher_config['last_layer_size'])
elif network_topology == 'default':
model =style_nets.parametric_net_befe(dropout1=teacher_config['dropout1'],
dropout2=teacher_config['dropout2'],
resblocks=teacher_config['resblocks'],
layer_norm_res=teacher_config['layer_norm_res'],
layer_norm_2=teacher_config['layer_norm_2'],
skip_conn=teacher_config['skip_conn'],
dense_interface=teacher_config['dense_interface'],
nl=teacher_config['nl'],
last_layer_size=teacher_config['last_layer_size'],
last_maxpool_en = teacher_config['last_maxpool_en'])
else:
error
if teacher_config['compile_on_spot']:
model.compile(loss='sparse_categorical_crossentropy', #todo
optimizer='adam',
metrics=['sparse_categorical_accuracy'])
if not teacher_config['data_augmentation']:
print('Not using data augmentation.')
model.fit(X_train, Y_train,
batch_size=batch_size,
epochs=nb_epoch,
validation_data=(X_val, Y_val),
shuffle=True,
callbacks=[lr_reducer, early_stopper, csv_logger])
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=teacher_config['rotation_range'], # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=teacher_config['width_shift_range'], # randomly shift images horizontally (fraction of total width)
height_shift_range=teacher_config['height_shift_range'], # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
validation_data=(X_val, Y_val),
epochs=nb_epoch, verbose=2, # max_q_size=100, #todo
callbacks=[lr_reducer, early_stopper, csv_logger])
model.save('pretrained_teachers/{}_cifar{}_teacher.hdf'.format(network_topology,n_classes))
return model
if __name__ == '__main__':
train_teacher()
| [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"utils.feature_learning_utils.default_parameters",
"argparse.ArgumentParser",
"dataset.dataset_utils.bad_res102",
"os.getcwd",
"tensorflow.keras.datasets.cifar100.load_data",
"pretrained_teachers.style_nets.parametric_net_befe",
"pretrained_te... | [((794, 816), 'os.getenv', 'os.getenv', (['"""LSB_JOBID"""'], {}), "('LSB_JOBID')\n", (803, 816), False, 'import os\n'), ((1063, 1083), 'utils.feature_learning_utils.default_parameters', 'default_parameters', ([], {}), '()\n', (1081, 1083), False, 'from utils.feature_learning_utils import default_parameters\n'), ((1093, 1142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[default_parser]'}), '(parents=[default_parser])\n', (1116, 1142), False, 'import argparse\n'), ((3386, 3462), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'min_delta': '(0.001)', 'patience': "teacher_config['stopping_patience']"}), "(min_delta=0.001, patience=teacher_config['stopping_patience'])\n", (3399, 3462), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping\n'), ((626, 637), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (635, 637), False, 'import os\n'), ((664, 675), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (673, 675), False, 'import os\n'), ((3991, 4010), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (4008, 4010), False, 'from tensorflow.keras.datasets import cifar10, cifar100\n'), ((4697, 4745), 'pretrained_teachers.cifar10_resnet50_lowResBaseline.preprocess_image_input', 'cifar10_resnet50.preprocess_image_input', (['X_train'], {}), '(X_train)\n', (4736, 4745), True, 'import pretrained_teachers.cifar10_resnet50_lowResBaseline as cifar10_resnet50\n'), ((4762, 4808), 'pretrained_teachers.cifar10_resnet50_lowResBaseline.preprocess_image_input', 'cifar10_resnet50.preprocess_image_input', (['X_val'], {}), '(X_val)\n', (4801, 4808), True, 'import pretrained_teachers.cifar10_resnet50_lowResBaseline as cifar10_resnet50\n'), ((4840, 4864), 'numpy.mean', 'np.mean', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (4847, 4864), True, 'import numpy as np\n'), ((5098, 5297), 'pretrained_teachers.cifar10_resnet50_lowResBaseline.define_compile_split_model', 'cifar10_resnet50.define_compile_split_model', ([], {'res': "(teacher_config['res'] if teacher_config['res'] != -1 else 32)", 'metrics': "['sparse_categorical_accuracy']", 'n_classes': "teacher_config['n_classes']"}), "(res=teacher_config['res'] if \n teacher_config['res'] != -1 else 32, metrics=[\n 'sparse_categorical_accuracy'], n_classes=teacher_config['n_classes'])\n", (5141, 5297), True, 'import pretrained_teachers.cifar10_resnet50_lowResBaseline as cifar10_resnet50\n'), ((9741, 10128), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(False)', 'samplewise_center': '(False)', 'featurewise_std_normalization': '(False)', 'samplewise_std_normalization': '(False)', 'zca_whitening': '(False)', 'rotation_range': "teacher_config['rotation_range']", 'width_shift_range': "teacher_config['width_shift_range']", 'height_shift_range': "teacher_config['height_shift_range']", 'horizontal_flip': '(True)', 'vertical_flip': '(False)'}), "(featurewise_center=False, samplewise_center=False,\n featurewise_std_normalization=False, samplewise_std_normalization=False,\n zca_whitening=False, rotation_range=teacher_config['rotation_range'],\n width_shift_range=teacher_config['width_shift_range'],\n height_shift_range=teacher_config['height_shift_range'],\n horizontal_flip=True, vertical_flip=False)\n", (9759, 10128), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3279, 3291), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {}), '(0.1)\n', (3286, 3291), True, 'import numpy as np\n'), ((4083, 4103), 'tensorflow.keras.datasets.cifar100.load_data', 'cifar100.load_data', ([], {}), '()\n', (4101, 4103), False, 'from tensorflow.keras.datasets import cifar10, cifar100\n'), ((6781, 7420), 'pretrained_teachers.style_nets.parametric_net_befe_v2', 'style_nets.parametric_net_befe_v2', ([], {'dropout1': "teacher_config['dropout1']", 'dropout2': "teacher_config['dropout2']", 'resblocks16': "teacher_config['resblocks16']", 'resblocks8': "teacher_config['resblocks8']", 'layer_norm_res16': "teacher_config['layer_norm_res16']", 'layer_norm_res8': "teacher_config['layer_norm_res8']", 'layer_norm_2': "teacher_config['layer_norm_2']", 'skip_conn16': "teacher_config['skip_conn16']", 'skip_conn8': "teacher_config['skip_conn8']", 'dense_interface': "teacher_config['dense_interface']", 'last_maxpool_en': "teacher_config['last_maxpool_en']", 'nl': "teacher_config['nl']", 'last_layer_size': "teacher_config['last_layer_size']"}), "(dropout1=teacher_config['dropout1'],\n dropout2=teacher_config['dropout2'], resblocks16=teacher_config[\n 'resblocks16'], resblocks8=teacher_config['resblocks8'],\n layer_norm_res16=teacher_config['layer_norm_res16'], layer_norm_res8=\n teacher_config['layer_norm_res8'], layer_norm_2=teacher_config[\n 'layer_norm_2'], skip_conn16=teacher_config['skip_conn16'], skip_conn8=\n teacher_config['skip_conn8'], dense_interface=teacher_config[\n 'dense_interface'], last_maxpool_en=teacher_config['last_maxpool_en'],\n nl=teacher_config['nl'], last_layer_size=teacher_config['last_layer_size'])\n", (6814, 7420), True, 'import pretrained_teachers.style_nets as style_nets\n'), ((3218, 3229), 'time.time', 'time.time', ([], {}), '()\n', (3227, 3229), False, 'import time\n'), ((4195, 4257), 'dataset.dataset_utils.bad_res102', 'bad_res102', (['xx', "(teacher_config['res'], teacher_config['res'])"], {}), "(xx, (teacher_config['res'], teacher_config['res']))\n", (4205, 4257), False, 'from dataset.dataset_utils import bad_res102\n'), ((4305, 4367), 'dataset.dataset_utils.bad_res102', 'bad_res102', (['xx', "(teacher_config['res'], teacher_config['res'])"], {}), "(xx, (teacher_config['res'], teacher_config['res']))\n", (4315, 4367), False, 'from dataset.dataset_utils import bad_res102\n'), ((8099, 8580), 'pretrained_teachers.style_nets.parametric_net_befe', 'style_nets.parametric_net_befe', ([], {'dropout1': "teacher_config['dropout1']", 'dropout2': "teacher_config['dropout2']", 'resblocks': "teacher_config['resblocks']", 'layer_norm_res': "teacher_config['layer_norm_res']", 'layer_norm_2': "teacher_config['layer_norm_2']", 'skip_conn': "teacher_config['skip_conn']", 'dense_interface': "teacher_config['dense_interface']", 'nl': "teacher_config['nl']", 'last_layer_size': "teacher_config['last_layer_size']", 'last_maxpool_en': "teacher_config['last_maxpool_en']"}), "(dropout1=teacher_config['dropout1'],\n dropout2=teacher_config['dropout2'], resblocks=teacher_config[\n 'resblocks'], layer_norm_res=teacher_config['layer_norm_res'],\n layer_norm_2=teacher_config['layer_norm_2'], skip_conn=teacher_config[\n 'skip_conn'], dense_interface=teacher_config['dense_interface'], nl=\n teacher_config['nl'], last_layer_size=teacher_config['last_layer_size'],\n last_maxpool_en=teacher_config['last_maxpool_en'])\n", (8129, 8580), True, 'import pretrained_teachers.style_nets as style_nets\n')] |
import pandas as pd
import numpy as np
from itertools import product, combinations
from functools import reduce
from operator import and_
class BaseSwapAuditor():
"""Baseclass for shared functionality between all swap auditors."""
def __init__(self, data, predictor, id_column, protected_classes, target_col):
# TODO: Add safety checks beyond data
# Counter for individuals stability
if not isinstance(data, pd.DataFrame):
raise ValueError("The 'data' field must be a pandas dataframe.")
self.data = data
self.predictor = predictor
self.id_column = id_column
self.target_col = target_col
self.protected_classes = protected_classes
self.individual_stability = {}
self.subgroup_stability = {}
self.all_marginals = None
self.intersectional_classes = None
self._calculate_intersectional_classes()
self.subgroup_frames = None
self._create_subgroup_datasets(data, self.intersectional_classes)
self.prediction_cols = data.columns.difference([self.id_column]+[self.target_col])
prediction_list = self.predictor.predict(self.data[self.prediction_cols])
self.prediction_dict = {name: pred for name, pred in zip(self.data[self.id_column], prediction_list)}
def calculate_stability_individual(self, id, marginal_features):
raise NotImplementedError
def _calculate_intersectional_classes(self):
# Create list of lists of possible values for each protected class
values_for_classes = [self.data[c].unique() for c in self.protected_classes]
self.intersectional_classes = [x for x in product(*values_for_classes)]
def _calculate_marginal_subsets(self, marginal_features, k=None):
# Calculates all subsets of features for use as marginals.
# TODO: Add a k cap on marginal length
all_marginals = []
if k is None:
for i in range(1, len(marginal_features)+1):
els = [list(x) for x in combinations(marginal_features, i)]
all_marginals.extend(els)
else:
raise NotImplementedError
self.all_marginals = all_marginals
def _retrieve_subgroup_individual(self, sample, protected_classes):
return tuple(int(sample.iloc[0][c]) for c in protected_classes)
def _sg_key(self, sg):
return ''.join([str(x) for x in sg])
def _marginal_key(self, marginal):
return ''.join(marginal)
def _create_subgroup_datasets(self, data, subgroups):
def apply_conditions(df, cond_list):
return df[reduce(and_, cond_list)]
list_subgroup_frames = {}
for sg in subgroups:
# For each subgroup, create a conditional list with their
# protected class values
conds = []
for i, pc in enumerate(self.protected_classes):
condition = data[pc] == sg[i]
conds.append(condition)
subgroup_frame = apply_conditions(data, conds)
list_subgroup_frames[self._sg_key(sg)] = subgroup_frame
# Also, create subgroup stability tracker here
self.subgroup_stability[self._sg_key(sg)] = (0, 0, {})
self.subgroup_frames = list_subgroup_frames
def _calculate_stability(self, original, frame):
return (frame == original).sum()
def _stringify_marginals(self, marginals, percent=True):
string = "Marginals:\n\n"
if percent:
for m, vals in marginals.items():
changed, total = vals
string += m + ": " + str(changed/total)
string += "\n"
else:
for m, vals in marginals.items():
changed, total = vals
string += m + ": " + "(" + str(changed) + ", " + str(total) + ")"
string += "\n"
return string
def _calculate_subgroup_stability(self):
for id, vals in self.individual_stability.items():
sample = self.data.loc[self.data[self.id_column].isin([id])]
subgroup = self._retrieve_subgroup_individual(sample, self.protected_classes)
all_non_changes, all_total, marginal_map = self.subgroup_stability[self._sg_key(subgroup)]
ind_non_changes, ind_total, ind_marginal_map = vals
for marginal, m_vals in ind_marginal_map.items():
a, b = m_vals
if self._marginal_key(marginal) not in marginal_map:
marginal_map[self._marginal_key(marginal)] = (0,0)
s, p = marginal_map[self._marginal_key(marginal)]
marginal_map[self._marginal_key(marginal)] = (s + a, p + b)
self.subgroup_stability[self._sg_key(subgroup)] = (all_non_changes+ind_non_changes, all_total+ind_total, marginal_map)
def _retrieve_stability(self, x, mappings, percent):
stability = None
pretty_print_marginals = ""
if x not in mappings:
raise ValueError("Individual not in stability tracker.")
all_non_changes, all_total, marginal_map = mappings[x]
if percent:
stability = all_non_changes/all_total
pretty_print_marginals = self._stringify_marginals(marginal_map, percent=True)
else:
stability = (all_non_changes, all_total)
pretty_print_marginals = self._stringify_marginals(marginal_map, percent=False)
return stability, pretty_print_marginals
def _retrieve_stability_individual(self, ind, percent=True):
return self._retrieve_stability(ind, self.individual_stability, percent)
def _retrieve_stability_subgroup(self, sg, percent=True):
return self._retrieve_stability(sg, self.subgroup_stability, percent)
def _track_metrics(self, stability, predictions, marginal, x, mappings, individual=True):
if individual:
if x not in self.individual_stability:
self.individual_stability[int(x)] = (0, 0, {})
all_non_changes, all_total, marginal_map = mappings[x]
else:
all_non_changes, all_total, marginal_map = mappings[self._sg_key(x)]
all_non_changes += stability
all_total += len(predictions)
if self._marginal_key(marginal) not in marginal_map:
marginal_map[self._marginal_key(marginal)] = (0,0)
s, p = marginal_map[self._marginal_key(marginal)]
marginal_map[self._marginal_key(marginal)] = (s + stability, p + len(predictions))
if individual:
mappings[x] = (all_non_changes, all_total, marginal_map)
else:
mappings[self._sg_key(x)] = (all_non_changes, all_total, marginal_map)
class NaiveSwapAuditor(BaseSwapAuditor):
"""Naive swap auditor.
Provides functionality to compute all of the swaps for each individual.
Approx O(n^2*(2^k-1)) time. Slow - n=1000, |pc|=2, |kmarginal|=3 ~45 mins.
"""
def __init__(self, data, predictor, id_column, protected_classes, target_col):
# Calculate the intersectional classes and feature subsets for marginals
super().__init__(data, predictor, id_column, protected_classes, target_col)
def calculate_stability_individual(self, id):
# Split data into individual and rest
sample = self.data.loc[self.data[self.id_column].isin([id])]
if id not in self.individual_stability:
self.individual_stability[int(id)] = (0, 0, {})
#Original prediction
original = self.prediction_dict[id]
# print("Original prediction: " + str(original))
subgroup = self._retrieve_subgroup_individual(sample, self.protected_classes)
for sg in self.intersectional_classes:
if subgroup != sg:
sg_frame = self.subgroup_frames[self._sg_key(sg)]
if len(sg_frame) == 0:
continue
# Here we create a copy of sgdataframe, and replace all rows values
# with x's values except marginal. Essentially, we are left
# with a bunch of duplicate, swapped x's
for marginal in self.all_marginals:
x_copy = sg_frame.copy()
x_repeated = sample.loc[sample.index.repeat(len(x_copy))]
columns_to_reassign = x_copy.columns.difference(marginal)
x_copy.loc[:,columns_to_reassign] = x_repeated.loc[:,columns_to_reassign].values
predictions = self.predictor.predict(x_copy[self.prediction_cols])
stability = self._calculate_stability(original, predictions)
# Do individual metric tracking
self._track_metrics(stability, predictions, marginal, int(id), self.individual_stability, individual=True)
# Do groupwise metric tracking
# NOTE: Removing this here, as it can be post calculated for Naive but not randomized. Makes
# For a fair comparison not to factor in runtime of this operation
# self._track_metrics(stability, predictions, marginal, sg, self.subgroup_stability, individual=False)
del x_copy
def calculate_all_stability(self, marginal_features):
# Calculate the intersectional classes and feature subsets for marginals
self._calculate_marginal_subsets(marginal_features)
for _, row in self.data.iterrows():
self.calculate_stability_individual(row[self.id_column])
class RandomizedSamplingSwapAuditor(BaseSwapAuditor):
"""Randomized swap auditor.
Runs randomized swapping process for t iterations to
achieve stability estimate within epsilon of the true value
with 1-delta probability. Defaults to a +-0.1 approximation,
with 90% success probability.
"""
def __init__(self, data, predictor, id_column, protected_classes, target_col):
# Calculate the intersectional classes and feature subsets for marginals
super().__init__(data, predictor, id_column, protected_classes, target_col)
def calculate_stability_individual(self, id, t):
# Split data into individual and rest
sample = self.data.loc[self.data[self.id_column].isin([id])]
if id not in self.individual_stability:
self.individual_stability[int(id)] = (0, 0, {})
#Original prediction
original = self.prediction_dict[id]
# print("Original prediction: " + str(original))
subgroup = self._retrieve_subgroup_individual(sample, self.protected_classes)
# Create a frame with all other samples to draw from
non_sg_frames = []
for sg in self.intersectional_classes:
if subgroup != sg:
sg_frame = self.subgroup_frames[self._sg_key(sg)]
non_sg_frames.append(sg_frame)
all_other_frames = pd.concat(non_sg_frames)
# For each marginal
for marginal in self.all_marginals:
# "For t iterations" - equivalent to t samples of other frame
other_subgroup_samples = all_other_frames.sample(n=t, replace=True)
x_repeated = sample.loc[sample.index.repeat(len(other_subgroup_samples))]
columns_to_reassign = other_subgroup_samples.columns.difference(marginal)
other_subgroup_samples.loc[:,columns_to_reassign] = x_repeated.loc[:,columns_to_reassign].values
predictions = self.predictor.predict(other_subgroup_samples[self.prediction_cols])
stability = self._calculate_stability(original, predictions)
# Do individual metric tracking
self._track_metrics(stability, predictions, marginal, int(id), self.individual_stability, individual=True)
del other_subgroup_samples
def calculate_all_stability(self, marginal_features, delta=0.1, epsilon=0.1):
# Calculate the intersectional classes and feature subsets for marginals
self._calculate_marginal_subsets(marginal_features)
n = len(self.data)
# Calculate number of t iterations with delta, epsilon, n
t = int(np.ceil((np.log((2*n)/delta))/(2*epsilon**2)))
print("Iterations: " + str(t))
for _, row in self.data.iterrows():
self.calculate_stability_individual(row[self.id_column], t)
class RandomizedGroupSwapAuditor(BaseSwapAuditor):
"""Randomized group swap auditor.
This should be the most efficient randomized algorithm, but also the most difficult
one to analyze. Performs a real swap, so no wasted iterations - every measurement
step produces information for both swapped x <=> y
"""
def __init__(self, data, predictor, id_column, protected_classes, target_col):
# Calculate the intersectional classes and feature subsets for marginals
super().__init__(data, predictor, id_column, protected_classes, target_col)
def run_group_experiment(self, marginal):
def _measure_individual_by_row(id, swap_prediction):
original = self.prediction_dict[id]
stable = (swap_prediction == original)
self._track_metrics(stability=stable, predictions=[1], marginal=marginal, x=int(id), mappings=self.individual_stability)
for sg_1 in self.intersectional_classes:
sg1_frame = self.subgroup_frames[self._sg_key(sg_1)]
for sg_2 in self.intersectional_classes:
if sg_1 != sg_2:
sg2_frame = self.subgroup_frames[self._sg_key(sg_2)]
sample_group_size = min(len(sg1_frame), len(sg2_frame))
swap_frame_sg1 = sg1_frame.sample(n=sample_group_size, replace=False)
swap_frame_sg2 = sg2_frame.sample(n=sample_group_size, replace=False)
columns_to_reassign = swap_frame_sg2.columns.difference(marginal)
if len(sg1_frame) < len(sg2_frame):
swap_frame_sg1.loc[:,columns_to_reassign] = swap_frame_sg2.loc[:,columns_to_reassign].values
swap_frame_sg2.loc[:,columns_to_reassign] = sg1_frame.loc[:,columns_to_reassign].values
else:
swap_frame_sg2.loc[:,columns_to_reassign] = swap_frame_sg1.loc[:,columns_to_reassign].values
swap_frame_sg1.loc[:,columns_to_reassign] = sg2_frame.loc[:,columns_to_reassign].values
sg1_predictions = self.predictor.predict(swap_frame_sg1[self.prediction_cols])
sg2_predictions = self.predictor.predict(swap_frame_sg2[self.prediction_cols])
swap_frame_sg1.insert(0, "Predictions", sg1_predictions, True)
swap_frame_sg2.insert(0, "Predictions", sg2_predictions, True)
swap_frame_sg1.apply(lambda x: _measure_individual_by_row(x[self.id_column], x['Predictions']), axis=1)
swap_frame_sg2.apply(lambda x: _measure_individual_by_row(x[self.id_column], x['Predictions']), axis=1)
del swap_frame_sg1
del swap_frame_sg2
def calculate_all_stability(self, marginal_features, delta=0.1, epsilon=0.1, t=None):
# Calculate the intersectional classes and feature subsets for marginals
self._calculate_marginal_subsets(marginal_features)
n = len(self.data)
# Calculate number of t iterations with delta, epsilon, n
# TODO: Verify this iteration calculation
if t is None:
m = len(self.subgroup_frames)
t = int(np.ceil((np.log((2*m)/delta))/(2*epsilon**2)))
print("Iterations: " + str(t))
for marginal in self.all_marginals:
for i in range(t):
self.run_group_experiment(marginal) | [
"numpy.log",
"itertools.combinations",
"itertools.product",
"functools.reduce",
"pandas.concat"
] | [((11113, 11137), 'pandas.concat', 'pd.concat', (['non_sg_frames'], {}), '(non_sg_frames)\n', (11122, 11137), True, 'import pandas as pd\n'), ((1683, 1711), 'itertools.product', 'product', (['*values_for_classes'], {}), '(*values_for_classes)\n', (1690, 1711), False, 'from itertools import product, combinations\n'), ((2635, 2658), 'functools.reduce', 'reduce', (['and_', 'cond_list'], {}), '(and_, cond_list)\n', (2641, 2658), False, 'from functools import reduce\n'), ((12400, 12421), 'numpy.log', 'np.log', (['(2 * n / delta)'], {}), '(2 * n / delta)\n', (12406, 12421), True, 'import numpy as np\n'), ((2045, 2079), 'itertools.combinations', 'combinations', (['marginal_features', 'i'], {}), '(marginal_features, i)\n', (2057, 2079), False, 'from itertools import product, combinations\n'), ((15912, 15933), 'numpy.log', 'np.log', (['(2 * m / delta)'], {}), '(2 * m / delta)\n', (15918, 15933), True, 'import numpy as np\n')] |
# Original implementation by <NAME> can be found using the following link: https://github.com/ryansmcgee/seirsplus
# Copyright (c) 2020 by <NAME>, <NAME>, BIOMATH, Ghent University. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as numpy
import numpy as np
import scipy as scipy
import scipy.integrate
import pandas as pd
import random
from random import choices
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import copy
import multiprocessing
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from .utils import read_coordinates_nis
from ..optimization import pso
# set color schemes
#From Color Universal Design (CUD): https://jfly.uni-koeln.de/color/
orange = "#E69F00"
light_blue = "#56B4E9"
green = "#009E73"
yellow = "#F0E442"
blue = "#0072B2"
red = "#D55E00"
pink = "#CC79A7"
black = "#000000"
Okabe_Ito = (orange, light_blue, green, yellow, blue, red, pink, black)
plt.rcParams["axes.prop_cycle"] = matplotlib.cycler('color', Okabe_Ito)
# increase font sizes
# the code below is not wrong, but kinda annoying if you continuously import
# this model in a notebook using the load_ext magic
#multiplier = 1.5
#keys = ("font.size", )
#for key in keys:
# plt.rcParams[key] *= multiplier
plt.rcParams["font.size"] = 15
plt.rcParams["lines.linewidth"] = 3
from .base import BaseModel
class COVID19_SEIRD(BaseModel):
"""
Biomath extended SEIRD model for COVID-19
Deterministic implementation
Parameters
----------
To initialise the model, provide following inputs:
states : dictionary
contains the initial values of all non-zero model states
e.g. {'S': N, 'E': np.ones(n_stratification)} with N being the total population and n_stratifications the number of stratified layers
initialising zeros is thus not required
S : susceptible
E : exposed
I : infected
A : asymptomatic
M : mild
ER: emergency room, buffer ward (hospitalized state)
C : cohort
C_icurec : cohort after recovery from ICU
ICU : intensive care
R : recovered
D : deceased
H_in : new hospitalizations
H_out : new hospital discharges
H_tot : total patients in Belgian hospitals
parameters : dictionary
containing the values of all parameters (both stratified and not)
these can be obtained with the function parameters.get_COVID19_SEIRD_parameters()
Non-stratified parameters
-------------------------
beta : probability of infection when encountering an infected person
sigma : length of the latent period
omega : length of the pre-symptomatic infectious period
zeta : effect of re-susceptibility and seasonality
a : probability of an asymptomatic cases
m : probability of an initially mild infection (m=1-a)
da : duration of the infection in case of asymptomatic
dm : duration of the infection in case of mild
der : duration of stay in emergency room/buffer ward
dc : average length of a hospital stay when not in ICU
dICU_R : average length of a hospital stay in ICU in case of recovery
dICU_D: average length of a hospital stay in ICU in case of death
dhospital : time before a patient reaches the hospital
Age-stratified parameters
--------------------
s: relative susceptibility to infection
a : probability of a subclinical infection
h : probability of hospitalisation for a mild infection
c : probability of hospitalisation in Cohort (non-ICU)
m_C : mortality in Cohort
m_ICU : mortality in ICU
"""
# ...state variables and parameters
state_names = ['S', 'E', 'I', 'A', 'M', 'ER', 'C', 'C_icurec','ICU', 'R', 'D','H_in','H_out','H_tot']
parameter_names = ['beta', 'sigma', 'omega', 'zeta','da', 'dm', 'der', 'dc_R','dc_D','dICU_R', 'dICU_D', 'dICUrec','dhospital']
parameters_stratified_names = [['s','a','h', 'c', 'm_C','m_ICU']]
stratification = ['Nc']
apply_compliance_to = 'Nc'
# ..transitions/equations
@staticmethod
def integrate(t, S, E, I, A, M, ER, C, C_icurec, ICU, R, D, H_in, H_out, H_tot,
beta, sigma, omega, zeta, da, dm, der, dc_R, dc_D, dICU_R, dICU_D, dICUrec,
dhospital, s, a, h, c, m_C, m_ICU, Nc):
"""
Biomath extended SEIRD model for COVID-19
*Deterministic implementation*
"""
# calculate total population
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
T = S + E + I + A + M + ER + C + C_icurec + ICU + R
# Compute the rates of change in every population compartment
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dS = - beta*s*np.matmul(Nc,((I+A)/T)*S) + zeta*R
dE = beta*s*np.matmul(Nc,((I+A)/T)*S) - E/sigma
dI = (1/sigma)*E - (1/omega)*I
dA = (a/omega)*I - A/da
dM = ((1-a)/omega)*I - M*((1-h)/dm) - M*h/dhospital
dER = M*(h/dhospital) - (1/der)*ER
dC = c*(1/der)*ER - (1-m_C)*C*(1/dc_R) - m_C*C*(1/dc_D)
dC_icurec = ((1-m_ICU)/dICU_R)*ICU - C_icurec*(1/dICUrec)
dICUstar = (1-c)*(1/der)*ER - (1-m_ICU)*ICU/dICU_R - m_ICU*ICU/dICU_D
dR = A/da + ((1-h)/dm)*M + (1-m_C)*C*(1/dc_R) + C_icurec*(1/dICUrec) - zeta*R
dD = (m_ICU/dICU_D)*ICU + (m_C/dc_D)*C
dH_in = M*(h/dhospital) - H_in
dH_out = (1-m_C)*C*(1/dc_R) + m_C*C*(1/dc_D) + (m_ICU/dICU_D)*ICU + C_icurec*(1/dICUrec) - H_out
dH_tot = M*(h/dhospital) - (1-m_C)*C*(1/dc_R) - m_C*C*(1/dc_D) - (m_ICU/dICU_D)*ICU - C_icurec*(1/dICUrec)
return (dS, dE, dI, dA, dM, dER, dC, dC_icurec, dICUstar, dR, dD, dH_in, dH_out, dH_tot)
class COVID19_SEIRD_sto(BaseModel):
"""
Biomath extended SEIRD model for COVID-19
Parameters
----------
To initialise the model, provide following inputs:
states : dictionary
contains the initial values of all non-zero model states
e.g. {'S': N, 'E': np.ones(n_stratification)} with N being the total population and n_stratifications the number of stratified layers
initialising zeros is thus not required
parameters : dictionary
containing the values of all parameters (both stratified and not)
these can be obtained with the function parameters.get_COVID19_SEIRD_parameters()
"""
# ...state variables and parameters
state_names = ['S', 'E', 'I', 'A', 'M', 'ER', 'C', 'C_icurec','ICU', 'R', 'D', 'H_in', 'H_out', 'H_tot']
parameter_names = ['beta', 'sigma', 'omega', 'zeta', 'da', 'dm', 'der', 'dc_R', 'dc_D', 'dICU_R', 'dICU_D', 'dICUrec', 'dhospital']
parameters_stratified_names = [['s','a','h', 'c', 'm_C','m_ICU']]
stratification = ['Nc']
apply_compliance_to = 'Nc'
# ..transitions/equations
@staticmethod
def integrate(t, S, E, I, A, M, ER, C, C_icurec, ICU, R, D, H_in, H_out, H_tot,
beta, sigma, omega, zeta, da, dm, der, dc_R, dc_D, dICU_R, dICU_D, dICUrec,
dhospital, s, a, h, c, m_C, m_ICU, Nc):
"""
BIOMATH extended SEIRD model for COVID-19
*Antwerp University stochastic implementation*
"""
# Define solver parameters
# ~~~~~~~~~~~~~~~~~~~~~~~~
l = 1.0 # length of discrete timestep
n = 1 # number of draws to average in one timestep (slows down calculations but converges to deterministic results when > 20)
T = S + E + I + A + M + ER + C + C_icurec + ICU + R # calculate total population per age bin using 2D array
# Make a dictionary containing the transitions and their propensities
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
keys = ['StoE','EtoI','ItoA','ItoM','AtoR','MtoR','MtoER','ERtoC','ERtoICU','CtoR','ICUtoCicurec','CicurectoR','CtoD','ICUtoD','RtoS']
probabilities = [1 - np.exp( - l*s*beta*np.matmul(Nc,((I+A)/T)) ),
(1 - np.exp(- l * (1/sigma) ))*np.ones(S.size),
1 - np.exp(- l * a * (1/omega) ),
1 - np.exp(- l * (1-a)* (1/omega) ),
(1 - np.exp(- l * (1/da) ))*np.ones(S.size),
(1 - np.exp(- l * (1-h)* (1/dm) ))*np.ones(S.size),
1 - np.exp(- l * h * (1/dhospital) ),
1 - np.exp(- l * c * (1/der) ),
1 - np.exp(- l * (1-c) * (1/der) ),
(1 - np.exp(- l * (1-m_C) * (1/dc_R) ))*np.ones(S.size),
(1 - np.exp(- l * (1-m_ICU) * (1/dICU_R) ))*np.ones(S.size),
(1 - np.exp(- l * (1/dICUrec) ))*np.ones(S.size),
(1 - np.exp(- l * m_C * (1/dc_D) ))*np.ones(S.size),
(1 - np.exp(- l * m_ICU * (1/dICU_D) ))*np.ones(S.size),
(1 - np.exp(- l * zeta ))*np.ones(S.size),
]
states = [S,E,I,I,A,M,M,ER,ER,C,ICU,C_icurec,C,ICU,R]
propensity={}
for i in range(len(keys)):
prop=[]
for j in range(S.size):
if states[i][j]<=0:
prop.append(0)
else:
draw=np.array([])
for l in range(n):
draw = np.append(draw,np.random.binomial(states[i][j],probabilities[i][j]))
draw = np.rint(np.mean(draw))
prop.append( draw )
propensity.update({keys[i]: np.asarray(prop)})
# calculate the states at timestep k+1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
S_new = S - propensity['StoE'] + propensity['RtoS']
E_new = E + propensity['StoE'] - propensity['EtoI']
I_new = I + propensity['EtoI'] - propensity['ItoA'] - propensity['ItoM']
A_new = A + propensity['ItoA'] - propensity['AtoR']
M_new = M + propensity['ItoM'] - propensity['MtoR'] - propensity['MtoER']
ER_new = ER + propensity['MtoER'] - propensity['ERtoC'] - propensity['ERtoICU']
C_new = C + propensity['ERtoC'] - propensity['CtoR'] - propensity['CtoD']
C_icurec_new = C_icurec + propensity['ICUtoCicurec'] - propensity['CicurectoR']
ICU_new = ICU + propensity['ERtoICU'] - propensity['ICUtoCicurec'] - propensity['ICUtoD']
R_new = R + propensity['AtoR'] + propensity['MtoR'] + propensity['CtoR'] + propensity['CicurectoR'] - propensity['RtoS']
D_new = D + propensity['ICUtoD'] + propensity['CtoD']
H_in_new = propensity['ERtoC'] + propensity['ERtoICU']
H_out_new = propensity['CtoR'] + propensity['CicurectoR']
H_tot_new = H_tot + H_in_new - H_out_new - propensity['ICUtoD'] - propensity['CtoD']
# Add protection against states < 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
output = (S_new, E_new, I_new, A_new, M_new, ER_new, C_new, C_icurec_new,ICU_new, R_new, D_new,H_in_new,H_out_new,H_tot_new)
for i in range(len(output)):
output[i][output[i]<0] = 0
return output
class COVID19_SEIRD_sto_spatial(BaseModel):
"""
BIOMATH stochastic extended SEIRD model for COVID-19, spatially explicit
Parameters
----------
To initialise the model, provide following inputs:
states : dictionary
contains the initial values of all non-zero model states
e.g. {'S': N, 'E': np.ones(n_stratification)} with N being the total population and n_stratifications the number of stratified layers
initialising zeros is thus not required
parameters : dictionary
containing the values of all parameters (both stratified and not)
these can be obtained with the function parameters.get_COVID19_SEIRD_parameters()
"""
# ...state variables and parameters
state_names = ['S', 'E', 'I', 'A', 'M', 'ER', 'C', 'C_icurec','ICU', 'R', 'D','H_in','H_out','H_tot']
parameter_names = ['beta', 'sigma', 'omega', 'zeta','da', 'dm', 'der', 'dc_R','dc_D','dICU_R', 'dICU_D', 'dICUrec','dhospital']
parameters_stratified_names = [None, ['s','a','h', 'c', 'm_C','m_ICU']]
stratification = ['place','Nc']
coordinates = [read_coordinates_nis()]
coordinates.append(None)
apply_compliance_to = 'Nc'
# ..transitions/equations
@staticmethod
def integrate(t, S, E, I, A, M, ER, C, C_icurec, ICU, R, D, H_in, H_out, H_tot,
beta, sigma, omega, zeta, da, dm, der, dc_R, dc_D, dICU_R, dICU_D, dICUrec,
dhospital, s, a, h, c, m_C, m_ICU, place, Nc):
"""
BIOMATH extended SEIRD model for COVID-19
*Antwerp University stochastic implementation*
"""
# Define solver parameters
# ~~~~~~~~~~~~~~~~~~~~~~~~
l = 1.0 # length of discrete timestep
n = 1 # number of draws to average in one timestep (slows down calculations but converges to deterministic results when > 20)
T = S + E + I + A + M + ER + C + C_icurec + ICU + R # calculate total population per age bin using 2D array
# Make a dictionary containing the propensities of the system
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
keys = ['StoE','EtoI','ItoA','ItoM','AtoR','MtoR','MtoER','ERtoC','ERtoICU','CtoR','ICUtoCicurec','CicurectoR','CtoD','ICUtoD','RtoS']
matrix_1 = np.zeros([place.shape[0],Nc.shape[0]])
for i in range(place.shape[0]):
matrix_1[i,:] = -l*s*beta*np.matmul(Nc,((I[i,:]+A[i,:])/T[i,:]))
matrix_2 = np.matmul(place,matrix_1)
probabilities = [1 - np.exp(matrix_2),
(1 - np.exp(- l * (1/sigma) ))*np.ones([place.shape[0],Nc.shape[0]]),
1 - np.exp(- l * a * (1/omega) )*np.ones([place.shape[0],Nc.shape[0]]),
1 - np.exp(- l * (1-a)* (1/omega) )*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * (1/da) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * (1-h)* (1/dm) ))*np.ones([place.shape[0],Nc.shape[0]]),
1 - np.exp(- l * h * (1/dhospital) )*np.ones([place.shape[0],Nc.shape[0]]),
1 - np.exp(- l * c * (1/der) )*np.ones([place.shape[0],Nc.shape[0]]),
1 - np.exp(- l * (1-c) * (1/der) )*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * (1-m_C) * (1/dc_R) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * (1-m_ICU) * (1/dICU_R) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * (1/dICUrec) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * m_C * (1/dc_D) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * m_ICU * (1/dICU_D) ))*np.ones([place.shape[0],Nc.shape[0]]),
(1 - np.exp(- l * zeta ))*np.ones([place.shape[0],Nc.shape[0]]),
]
states = [S,E,I,I,A,M,M,ER,ER,C,ICU,C_icurec,C,ICU,R]
propensity={}
for i in range(len(keys)):
prop=np.zeros([place.shape[0],Nc.shape[0]])
for j in range(place.shape[0]):
for k in range(Nc.shape[0]):
if states[i][j][k]<=0:
prop[j,k]=0
else:
draw=np.array([])
for l in range(n):
draw = np.append(draw,np.random.binomial(states[i][j][k],probabilities[i][j][k]))
draw = np.rint(np.mean(draw))
prop[j,k] = draw
propensity.update({keys[i]: np.asarray(prop)})
# calculate the states at timestep k+1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
S_new = S - propensity['StoE'] + propensity['RtoS']
E_new = E + propensity['StoE'] - propensity['EtoI']
I_new = I + propensity['EtoI'] - propensity['ItoA'] - propensity['ItoM']
A_new = A + propensity['ItoA'] - propensity['AtoR']
M_new = M + propensity['ItoM'] - propensity['MtoR'] - propensity['MtoER']
ER_new = ER + propensity['MtoER'] - propensity['ERtoC'] - propensity['ERtoICU']
C_new = C + propensity['ERtoC'] - propensity['CtoR'] - propensity['CtoD']
C_icurec_new = C_icurec + propensity['ICUtoCicurec'] - propensity['CicurectoR']
ICU_new = ICU + propensity['ERtoICU'] - propensity['ICUtoCicurec'] - propensity['ICUtoD']
R_new = R + propensity['AtoR'] + propensity['MtoR'] + propensity['CtoR'] + propensity['CicurectoR'] - propensity['RtoS']
D_new = D + propensity['ICUtoD'] + propensity['CtoD']
H_in_new = propensity['ERtoC'] + propensity['ERtoICU']
H_out_new = propensity['CtoR'] + propensity['CicurectoR']
H_tot_new = H_tot + H_in_new - H_out_new - propensity['ICUtoD'] - propensity['CtoD']
# Add protection against states < 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
output = (S_new, E_new, I_new, A_new, M_new, ER_new, C_new, C_icurec_new,ICU_new, R_new, D_new,H_in_new,H_out_new,H_tot_new)
for i in range(len(output)):
output[i][output[i]<0] = 0
return output
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
| [
"numpy.random.binomial",
"matplotlib.cycler",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"pandas.plotting.register_matplotlib_converters",
"numpy.mean",
"numpy.array",
"numpy.exp",
"numpy.matmul"
] | [((655, 687), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (685, 687), False, 'from pandas.plotting import register_matplotlib_converters\n'), ((1103, 1140), 'matplotlib.cycler', 'matplotlib.cycler', (['"""color"""', 'Okabe_Ito'], {}), "('color', Okabe_Ito)\n", (1120, 1140), False, 'import matplotlib\n'), ((13605, 13644), 'numpy.zeros', 'np.zeros', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (13613, 13644), True, 'import numpy as np\n'), ((13780, 13806), 'numpy.matmul', 'np.matmul', (['place', 'matrix_1'], {}), '(place, matrix_1)\n', (13789, 13806), True, 'import numpy as np\n'), ((15384, 15423), 'numpy.zeros', 'np.zeros', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (15392, 15423), True, 'import numpy as np\n'), ((4961, 4991), 'numpy.matmul', 'np.matmul', (['Nc', '((I + A) / T * S)'], {}), '(Nc, (I + A) / T * S)\n', (4970, 4991), True, 'import numpy as np\n'), ((5017, 5047), 'numpy.matmul', 'np.matmul', (['Nc', '((I + A) / T * S)'], {}), '(Nc, (I + A) / T * S)\n', (5026, 5047), True, 'import numpy as np\n'), ((8224, 8239), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8231, 8239), True, 'import numpy as np\n'), ((8269, 8297), 'numpy.exp', 'np.exp', (['(-l * a * (1 / omega))'], {}), '(-l * a * (1 / omega))\n', (8275, 8297), True, 'import numpy as np\n'), ((8327, 8361), 'numpy.exp', 'np.exp', (['(-l * (1 - a) * (1 / omega))'], {}), '(-l * (1 - a) * (1 / omega))\n', (8333, 8361), True, 'import numpy as np\n'), ((8412, 8427), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8419, 8427), True, 'import numpy as np\n'), ((8488, 8503), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8495, 8503), True, 'import numpy as np\n'), ((8533, 8565), 'numpy.exp', 'np.exp', (['(-l * h * (1 / dhospital))'], {}), '(-l * h * (1 / dhospital))\n', (8539, 8565), True, 'import numpy as np\n'), ((8595, 8621), 'numpy.exp', 'np.exp', (['(-l * c * (1 / der))'], {}), '(-l * c * (1 / der))\n', (8601, 8621), True, 'import numpy as np\n'), ((8651, 8683), 'numpy.exp', 'np.exp', (['(-l * (1 - c) * (1 / der))'], {}), '(-l * (1 - c) * (1 / der))\n', (8657, 8683), True, 'import numpy as np\n'), ((8747, 8762), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8754, 8762), True, 'import numpy as np\n'), ((8832, 8847), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8839, 8847), True, 'import numpy as np\n'), ((8906, 8921), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8913, 8921), True, 'import numpy as np\n'), ((8983, 8998), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (8990, 8998), True, 'import numpy as np\n'), ((9064, 9079), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (9071, 9079), True, 'import numpy as np\n'), ((9131, 9146), 'numpy.ones', 'np.ones', (['S.size'], {}), '(S.size)\n', (9138, 9146), True, 'import numpy as np\n'), ((13722, 13766), 'numpy.matmul', 'np.matmul', (['Nc', '((I[i, :] + A[i, :]) / T[i, :])'], {}), '(Nc, (I[i, :] + A[i, :]) / T[i, :])\n', (13731, 13766), True, 'import numpy as np\n'), ((13836, 13852), 'numpy.exp', 'np.exp', (['matrix_2'], {}), '(matrix_2)\n', (13842, 13852), True, 'import numpy as np\n'), ((13909, 13947), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (13916, 13947), True, 'import numpy as np\n'), ((14195, 14233), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14202, 14233), True, 'import numpy as np\n'), ((14293, 14331), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14300, 14331), True, 'import numpy as np\n'), ((14688, 14726), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14695, 14726), True, 'import numpy as np\n'), ((14795, 14833), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14802, 14833), True, 'import numpy as np\n'), ((14891, 14929), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14898, 14929), True, 'import numpy as np\n'), ((14990, 15028), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14997, 15028), True, 'import numpy as np\n'), ((15093, 15131), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (15100, 15131), True, 'import numpy as np\n'), ((15182, 15220), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (15189, 15220), True, 'import numpy as np\n'), ((8198, 8222), 'numpy.exp', 'np.exp', (['(-l * (1 / sigma))'], {}), '(-l * (1 / sigma))\n', (8204, 8222), True, 'import numpy as np\n'), ((8389, 8410), 'numpy.exp', 'np.exp', (['(-l * (1 / da))'], {}), '(-l * (1 / da))\n', (8395, 8410), True, 'import numpy as np\n'), ((8458, 8489), 'numpy.exp', 'np.exp', (['(-l * (1 - h) * (1 / dm))'], {}), '(-l * (1 - h) * (1 / dm))\n', (8464, 8489), True, 'import numpy as np\n'), ((8712, 8747), 'numpy.exp', 'np.exp', (['(-l * (1 - m_C) * (1 / dc_R))'], {}), '(-l * (1 - m_C) * (1 / dc_R))\n', (8718, 8747), True, 'import numpy as np\n'), ((8793, 8832), 'numpy.exp', 'np.exp', (['(-l * (1 - m_ICU) * (1 / dICU_R))'], {}), '(-l * (1 - m_ICU) * (1 / dICU_R))\n', (8799, 8832), True, 'import numpy as np\n'), ((8878, 8904), 'numpy.exp', 'np.exp', (['(-l * (1 / dICUrec))'], {}), '(-l * (1 / dICUrec))\n', (8884, 8904), True, 'import numpy as np\n'), ((8952, 8981), 'numpy.exp', 'np.exp', (['(-l * m_C * (1 / dc_D))'], {}), '(-l * m_C * (1 / dc_D))\n', (8958, 8981), True, 'import numpy as np\n'), ((9029, 9062), 'numpy.exp', 'np.exp', (['(-l * m_ICU * (1 / dICU_D))'], {}), '(-l * m_ICU * (1 / dICU_D))\n', (9035, 9062), True, 'import numpy as np\n'), ((9110, 9127), 'numpy.exp', 'np.exp', (['(-l * zeta)'], {}), '(-l * zeta)\n', (9116, 9127), True, 'import numpy as np\n'), ((9467, 9479), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9475, 9479), True, 'import numpy as np\n'), ((9749, 9765), 'numpy.asarray', 'np.asarray', (['prop'], {}), '(prop)\n', (9759, 9765), True, 'import numpy as np\n'), ((13883, 13907), 'numpy.exp', 'np.exp', (['(-l * (1 / sigma))'], {}), '(-l * (1 / sigma))\n', (13889, 13907), True, 'import numpy as np\n'), ((13976, 14004), 'numpy.exp', 'np.exp', (['(-l * a * (1 / omega))'], {}), '(-l * a * (1 / omega))\n', (13982, 14004), True, 'import numpy as np\n'), ((14005, 14043), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14012, 14043), True, 'import numpy as np\n'), ((14072, 14106), 'numpy.exp', 'np.exp', (['(-l * (1 - a) * (1 / omega))'], {}), '(-l * (1 - a) * (1 / omega))\n', (14078, 14106), True, 'import numpy as np\n'), ((14104, 14142), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14111, 14142), True, 'import numpy as np\n'), ((14172, 14193), 'numpy.exp', 'np.exp', (['(-l * (1 / da))'], {}), '(-l * (1 / da))\n', (14178, 14193), True, 'import numpy as np\n'), ((14263, 14294), 'numpy.exp', 'np.exp', (['(-l * (1 - h) * (1 / dm))'], {}), '(-l * (1 - h) * (1 / dm))\n', (14269, 14294), True, 'import numpy as np\n'), ((14360, 14392), 'numpy.exp', 'np.exp', (['(-l * h * (1 / dhospital))'], {}), '(-l * h * (1 / dhospital))\n', (14366, 14392), True, 'import numpy as np\n'), ((14393, 14431), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14400, 14431), True, 'import numpy as np\n'), ((14460, 14486), 'numpy.exp', 'np.exp', (['(-l * c * (1 / der))'], {}), '(-l * c * (1 / der))\n', (14466, 14486), True, 'import numpy as np\n'), ((14487, 14525), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14494, 14525), True, 'import numpy as np\n'), ((14554, 14586), 'numpy.exp', 'np.exp', (['(-l * (1 - c) * (1 / der))'], {}), '(-l * (1 - c) * (1 / der))\n', (14560, 14586), True, 'import numpy as np\n'), ((14585, 14623), 'numpy.ones', 'np.ones', (['[place.shape[0], Nc.shape[0]]'], {}), '([place.shape[0], Nc.shape[0]])\n', (14592, 14623), True, 'import numpy as np\n'), ((14653, 14688), 'numpy.exp', 'np.exp', (['(-l * (1 - m_C) * (1 / dc_R))'], {}), '(-l * (1 - m_C) * (1 / dc_R))\n', (14659, 14688), True, 'import numpy as np\n'), ((14756, 14795), 'numpy.exp', 'np.exp', (['(-l * (1 - m_ICU) * (1 / dICU_R))'], {}), '(-l * (1 - m_ICU) * (1 / dICU_R))\n', (14762, 14795), True, 'import numpy as np\n'), ((14863, 14889), 'numpy.exp', 'np.exp', (['(-l * (1 / dICUrec))'], {}), '(-l * (1 / dICUrec))\n', (14869, 14889), True, 'import numpy as np\n'), ((14959, 14988), 'numpy.exp', 'np.exp', (['(-l * m_C * (1 / dc_D))'], {}), '(-l * m_C * (1 / dc_D))\n', (14965, 14988), True, 'import numpy as np\n'), ((15058, 15091), 'numpy.exp', 'np.exp', (['(-l * m_ICU * (1 / dICU_D))'], {}), '(-l * m_ICU * (1 / dICU_D))\n', (15064, 15091), True, 'import numpy as np\n'), ((15161, 15178), 'numpy.exp', 'np.exp', (['(-l * zeta)'], {}), '(-l * zeta)\n', (15167, 15178), True, 'import numpy as np\n'), ((15638, 15650), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15646, 15650), True, 'import numpy as np\n'), ((15923, 15939), 'numpy.asarray', 'np.asarray', (['prop'], {}), '(prop)\n', (15933, 15939), True, 'import numpy as np\n'), ((8142, 8168), 'numpy.matmul', 'np.matmul', (['Nc', '((I + A) / T)'], {}), '(Nc, (I + A) / T)\n', (8151, 8168), True, 'import numpy as np\n'), ((9654, 9667), 'numpy.mean', 'np.mean', (['draw'], {}), '(draw)\n', (9661, 9667), True, 'import numpy as np\n'), ((15831, 15844), 'numpy.mean', 'np.mean', (['draw'], {}), '(draw)\n', (15838, 15844), True, 'import numpy as np\n'), ((9565, 9618), 'numpy.random.binomial', 'np.random.binomial', (['states[i][j]', 'probabilities[i][j]'], {}), '(states[i][j], probabilities[i][j])\n', (9583, 9618), True, 'import numpy as np\n'), ((15736, 15795), 'numpy.random.binomial', 'np.random.binomial', (['states[i][j][k]', 'probabilities[i][j][k]'], {}), '(states[i][j][k], probabilities[i][j][k])\n', (15754, 15795), True, 'import numpy as np\n')] |
from librosa import core
import numpy as np
def change_speed(input_signal, rate):
"""Change the playback speed of an audio signal
Parameters
----------
input_signal : numpy.array
Input array, must have numerical type.
rate : numeric
Desired rate of change to the speed.
To increase the speed, pass in a value greater than 1.0.
To decrease the speed, pass in a value between 0.0 and 1.0.
Returns
-------
numpy.array representing the audio signal with changed speed.
"""
if input_signal.dtype.kind not in 'iu' and input_signal.dtype.kind != 'f' :
raise TypeError("'input_signal' must be an array of integers or floats")
if rate <= 0:
raise Exception('rate must be a positive number')
# Convert input signal to a -1.0 to 1.0 float if it's an integer type
if input_signal.dtype.kind in 'iu':
i = np.iinfo('float32')
abs_max = 2 ** (i.bits - 1)
offset = i.min + abs_max
input_signal = (input_signal.astype('float32') - offset) / abs_max
# Transform signal to frequency domain
frequency_domain_signal = core.stft(input_signal)
# Change speed with the phase vocoding method
fds_changed_speed = core.phase_vocoder(frequency_domain_signal, rate)
# Transform frequency domain signal back to time domain
output_signal = core.istft(fds_changed_speed, dtype = input_signal.dtype)
return output_signal
| [
"librosa.core.phase_vocoder",
"librosa.core.istft",
"numpy.iinfo",
"librosa.core.stft"
] | [((1146, 1169), 'librosa.core.stft', 'core.stft', (['input_signal'], {}), '(input_signal)\n', (1155, 1169), False, 'from librosa import core\n'), ((1245, 1294), 'librosa.core.phase_vocoder', 'core.phase_vocoder', (['frequency_domain_signal', 'rate'], {}), '(frequency_domain_signal, rate)\n', (1263, 1294), False, 'from librosa import core\n'), ((1376, 1431), 'librosa.core.istft', 'core.istft', (['fds_changed_speed'], {'dtype': 'input_signal.dtype'}), '(fds_changed_speed, dtype=input_signal.dtype)\n', (1386, 1431), False, 'from librosa import core\n'), ((907, 926), 'numpy.iinfo', 'np.iinfo', (['"""float32"""'], {}), "('float32')\n", (915, 926), True, 'import numpy as np\n')] |
import numpy as np
import nengo
import nengo_function_space as nfs
domain = np.linspace(-1, 1, 200)
# define your function
def gaussian(mag, mean, sd):
return mag * np.exp(-(domain-mean)**2/(2*sd**2))
# build the function space
fs = nfs.FunctionSpace(
nfs.Function(
gaussian,
mean=nengo.dists.Uniform(-1, 1),
sd=nengo.dists.Uniform(0.05, 0.2),
mag=1),
n_basis=10)
model = nengo.Network()
with model:
# create an ensemble to represent the weights over the basis functions
memory = nengo.Ensemble(n_neurons=2000, dimensions=fs.n_basis)
# use separate distributions for the encoders and the evaluation points.
# TODO: why?
memory.encoders = fs.project(
nfs.Function(gaussian,
mean=nengo.dists.Uniform(-1, 1),
sd=0.05,
mag=1))
memory.eval_points = fs.project(
nfs.Function(gaussian,
mean=nengo.dists.Uniform(-1, 1),
sd=nengo.dists.Uniform(0.1, 0.2),
mag=nengo.dists.Uniform(0, 1)))
stimulus = fs.make_input([1, 0, 0.2])
nengo.Connection(stimulus.output, memory)
# set up integration
nengo.Connection(memory, memory, synapse=0.1)
# create a node to give a plot of the represented function
plot = fs.make_plot_node(domain=domain, lines=2, n_pts=50)
nengo.Connection(memory, plot[:fs.n_basis], synapse=0.1)
nengo.Connection(stimulus.output, plot[fs.n_basis:], synapse=0.1)
| [
"nengo.dists.Uniform",
"numpy.exp",
"numpy.linspace",
"nengo.Network",
"nengo.Connection",
"nengo.Ensemble"
] | [((78, 101), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (89, 101), True, 'import numpy as np\n'), ((422, 437), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (435, 437), False, 'import nengo\n'), ((538, 591), 'nengo.Ensemble', 'nengo.Ensemble', ([], {'n_neurons': '(2000)', 'dimensions': 'fs.n_basis'}), '(n_neurons=2000, dimensions=fs.n_basis)\n', (552, 591), False, 'import nengo\n'), ((1189, 1230), 'nengo.Connection', 'nengo.Connection', (['stimulus.output', 'memory'], {}), '(stimulus.output, memory)\n', (1205, 1230), False, 'import nengo\n'), ((1261, 1306), 'nengo.Connection', 'nengo.Connection', (['memory', 'memory'], {'synapse': '(0.1)'}), '(memory, memory, synapse=0.1)\n', (1277, 1306), False, 'import nengo\n'), ((1438, 1494), 'nengo.Connection', 'nengo.Connection', (['memory', 'plot[:fs.n_basis]'], {'synapse': '(0.1)'}), '(memory, plot[:fs.n_basis], synapse=0.1)\n', (1454, 1494), False, 'import nengo\n'), ((1499, 1564), 'nengo.Connection', 'nengo.Connection', (['stimulus.output', 'plot[fs.n_basis:]'], {'synapse': '(0.1)'}), '(stimulus.output, plot[fs.n_basis:], synapse=0.1)\n', (1515, 1564), False, 'import nengo\n'), ((173, 218), 'numpy.exp', 'np.exp', (['(-(domain - mean) ** 2 / (2 * sd ** 2))'], {}), '(-(domain - mean) ** 2 / (2 * sd ** 2))\n', (179, 218), True, 'import numpy as np\n'), ((310, 336), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (329, 336), False, 'import nengo\n'), ((349, 379), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(0.05)', '(0.2)'], {}), '(0.05, 0.2)\n', (368, 379), False, 'import nengo\n'), ((785, 811), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (804, 811), False, 'import nengo\n'), ((990, 1016), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1009, 1016), False, 'import nengo\n'), ((1050, 1079), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(0.1)', '(0.2)'], {}), '(0.1, 0.2)\n', (1069, 1079), False, 'import nengo\n'), ((1114, 1139), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1133, 1139), False, 'import nengo\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# test mpi4py
# exc: mpiexec.openmpi -n 6 ./test02.py
import mpi4py.MPI as mpi
from numpy import array
from point import Point
comm = mpi.COMM_WORLD
rank = comm.rank
siz = comm.size
if rank == 0:
print("[%d] nb of procs: %d" % (rank,siz))
vals = [Point(1, 5,3.14), array([3, 4, 8]), {1:'un', 2:'deux', 3:'trois'}]
comm.send(vals, dest = 1)
elif rank == 1:
vals = comm.recv(source=0)
for v in vals:
print("[%d]" % rank, v)
| [
"numpy.array",
"point.Point"
] | [((311, 328), 'point.Point', 'Point', (['(1)', '(5)', '(3.14)'], {}), '(1, 5, 3.14)\n', (316, 328), False, 'from point import Point\n'), ((329, 345), 'numpy.array', 'array', (['[3, 4, 8]'], {}), '([3, 4, 8])\n', (334, 345), False, 'from numpy import array\n')] |
import numpy as np
import sklearn.mixture
import torch
from postproc.ml_tools.gmm import GaussianMixture
import unittest
def test_em_matches_sklearn():
"""
Assert that log-probabilities (E-step) and parameter updates (M-step) approximately match those of sklearn.
"""
d = 20
n_components = np.random.randint(1, 100)
# (n, k, dis)
x = torch.randn(40, 1, d)
# (n, dis)
x_np = np.squeeze(x.data.numpy())
var_init = torch.ones(1, n_components, d) - .4
model = GaussianMixture(n_components, d, var_init=var_init)
model_sk = sklearn.mixture.GaussianMixture(n_components,
covariance_type="diag",
init_params="random",
means_init=np.squeeze(model.mu.dat.numpy()),
precisions_init=np.squeeze(1. / np.sqrt(var_init.dat.numpy())))
model_sk._initialize_parameters(x_np, np.random.RandomState())
log_prob_sk = model_sk._estimate_log_prob(x_np)
log_prob = model._estimate_log_prob(x)
# Test whether log-probabilities are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_prob.dat.numpy()),
log_prob_sk,
decimal=2,
verbose=True)
_, log_resp_sk = model_sk._e_step(x_np)
_, log_resp = model._e_step(x)
# Test whether E-steps are approximately equal
np.testing.assert_almost_equal(np.squeeze(log_resp.dat.numpy()),
log_resp_sk,
decimal=0,
verbose=True)
model_sk._m_step(x_np, log_prob_sk)
pi_sk = model_sk.weights_
mu_sk = model_sk.means_
var_sk = model_sk.means_
pi, mu, var = model._m_step(x, log_prob)
# Test whether pie ..
np.testing.assert_almost_equal(np.squeeze(pi.dat.numpy()),
pi_sk,
decimal=1,
verbose=True)
# .. mu ..
np.testing.assert_almost_equal(np.squeeze(mu.dat.numpy()),
mu_sk,
decimal=1,
verbose=True)
# .. and var are approximately equal
np.testing.assert_almost_equal(np.squeeze(var.dat.numpy()),
var_sk,
decimal=1,
verbose=True)
class CpuCheck(unittest.TestCase):
"""
Basic tests for CPU.
"""
def testPredictClasses(self):
"""
Assert that torch.FloatTensor is handled correctly.
"""
x = torch.randn(4, 2)
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1))
model.fit(x)
y = model.predict(x)
# check that dimensionality of class memberships is (n)
self.assertEqual(torch.Tensor(x.size(0)).size(), y.size())
def testPredictProbabilities(self):
"""
Assert that torch.FloatTensor is handled correctly when returning class probabilities.
"""
x = torch.randn(4, 2)
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1))
model.fit(x)
# check that y_p has dimensions (n, k)
y_p = model.predict(x, probs=True)
self.assertEqual(torch.Tensor(x.size(0), n_components).size(), y_p.size())
class GpuCheck(unittest.TestCase):
"""
Basic tests for GPU.
"""
def testPredictClasses(self):
"""
Assert that torch.cuda.FloatTensor is handled correctly.
"""
x = torch.randn(4, 2).cuda()
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1)).cuda()
model.fit(x)
y = model.predict(x)
# check that dimensionality of class memberships is (n)
self.assertEqual(torch.Tensor(x.size(0)).size(), y.size())
def testPredictProbabilities(self):
"""
Assert that torch.cuda.FloatTensor is handled correctly when returning class probabilities.
"""
x = torch.randn(4, 2).cuda()
n_components = np.random.randint(1, 100)
model = GaussianMixture(n_components, x.size(1)).cuda()
model.fit(x)
# check that y_p has dimensions (n, k)
y_p = model.predict(x, probs=True)
self.assertEqual(torch.Tensor(x.size(0), n_components).size(), y_p.size())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"torch.ones",
"torch.randn",
"numpy.random.RandomState",
"numpy.random.randint",
"postproc.ml_tools.gmm.GaussianMixture"
] | [((314, 339), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (331, 339), True, 'import numpy as np\n'), ((367, 388), 'torch.randn', 'torch.randn', (['(40)', '(1)', 'd'], {}), '(40, 1, d)\n', (378, 388), False, 'import torch\n'), ((507, 558), 'postproc.ml_tools.gmm.GaussianMixture', 'GaussianMixture', (['n_components', 'd'], {'var_init': 'var_init'}), '(n_components, d, var_init=var_init)\n', (522, 558), False, 'from postproc.ml_tools.gmm import GaussianMixture\n'), ((4692, 4707), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4705, 4707), False, 'import unittest\n'), ((458, 488), 'torch.ones', 'torch.ones', (['(1)', 'n_components', 'd'], {}), '(1, n_components, d)\n', (468, 488), False, 'import torch\n'), ((1006, 1029), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (1027, 1029), True, 'import numpy as np\n'), ((2814, 2831), 'torch.randn', 'torch.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (2825, 2831), False, 'import torch\n'), ((2855, 2880), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (2872, 2880), True, 'import numpy as np\n'), ((3293, 3310), 'torch.randn', 'torch.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (3304, 3310), False, 'import torch\n'), ((3334, 3359), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (3351, 3359), True, 'import numpy as np\n'), ((3875, 3900), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (3892, 3900), True, 'import numpy as np\n'), ((4373, 4398), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (4390, 4398), True, 'import numpy as np\n'), ((3827, 3844), 'torch.randn', 'torch.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (3838, 3844), False, 'import torch\n'), ((4325, 4342), 'torch.randn', 'torch.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (4336, 4342), False, 'import torch\n')] |
import unittest
import numpy as np
import pytest
import tiledb
from slaid.commons import Mask
from slaid.commons.base import ImageInfo, Slide
from slaid.commons.dask import Mask as DaskMask
from slaid.commons.ecvl import BasicSlide as EcvlSlide
from slaid.commons.openslide import BasicSlide as OpenSlide
IMAGE = 'tests/data/test.tif'
@pytest.mark.parametrize('slide_cls,args', [(Slide, (EcvlSlide, )),
(Slide, (OpenSlide, ))])
def test_slide_level(slide):
for i in range(slide.level_count):
array = slide[i]
assert array.size == slide.level_dimensions[i][::-1]
@pytest.mark.parametrize('image_info', [
ImageInfo.create('bgr', 'yx', 'first'),
ImageInfo.create('rgb', 'yx', 'first'),
ImageInfo.create('rgb', 'yx', 'last'),
ImageInfo.create('bgr', 'yx', 'last')
])
@pytest.mark.parametrize('slide_cls,args', [(Slide, (EcvlSlide, )),
(Slide, (OpenSlide, ))])
def test_slice_slide(slide, image_info):
for i in range(slide.level_count):
slide_array = slide[i]
expected_shape = (
3, 10,
20) if image_info.channel == ImageInfo.CHANNEL.FIRST else (10, 20,
3)
sliced_array = slide_array[:10, :20]
sliced_array = sliced_array.convert(image_info)
assert sliced_array.size == (10, 20)
assert sliced_array.array.shape == expected_shape
sliced_array = slide_array[1:10, 1:20]
sliced_array = sliced_array.convert(image_info)
assert sliced_array.size == (9, 19)
image = slide.read_region((0, 0), 0, slide.dimensions)
slide_array = slide[0][:, :]
image_array = image.to_array()
assert (slide_array.array == image_array).all()
class TestMask:
cls = Mask
@pytest.mark.skip(reason="update how mask are loaded/dumped")
def test_dumps_to_tiledb(self, array, tmp_path):
mask = self.cls(array, 1, 1, 0.8, False)
mask.to_tiledb(str(tmp_path))
with tiledb.open(str(tmp_path), 'r') as array:
assert (array == np.array(mask.array)).all()
assert array.meta['extraction_level'] == mask.extraction_level
assert array.meta['level_downsample'] == mask.level_downsample
assert array.meta['threshold'] == mask.threshold
assert 'model' not in array.meta.keys()
@pytest.mark.skip(reason="update how mask are loaded/dumped")
def test_creates_from_tiledb(self, tiledb_path):
mask = self.cls.from_tiledb(tiledb_path)
with tiledb.open(tiledb_path, 'r') as array:
assert (mask.array[:] == array[:]).all()
class TestDaskMask(TestMask):
cls = DaskMask
@pytest.mark.skip(reason="update how mask are loaded/dumped")
def test_dumps_to_tiledb(self, dask_array, tmp_path):
super().test_dumps_to_tiledb(dask_array, tmp_path)
def test_filter(mask):
filter_ = mask >= 3
assert (filter_[0, :] == 0).all()
assert (filter_[1:, :] == 1).all()
def test_filter_rescale(mask):
filter_ = mask >= 5
print(filter_._array)
filter_.rescale((9, 9))
expected = [
[False, False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, True, True, True],
[False, False, False, False, False, False, True, True, True],
[False, False, False, False, False, False, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
]
print(filter_._array)
assert (filter_[:, :] == np.array(expected)).all()
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"tiledb.open",
"numpy.array",
"slaid.commons.base.ImageInfo.create",
"pytest.mark.parametrize",
"pytest.mark.skip"
] | [((341, 435), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slide_cls,args"""', '[(Slide, (EcvlSlide,)), (Slide, (OpenSlide,))]'], {}), "('slide_cls,args', [(Slide, (EcvlSlide,)), (Slide, (\n OpenSlide,))])\n", (364, 435), False, 'import pytest\n'), ((851, 945), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""slide_cls,args"""', '[(Slide, (EcvlSlide,)), (Slide, (OpenSlide,))]'], {}), "('slide_cls,args', [(Slide, (EcvlSlide,)), (Slide, (\n OpenSlide,))])\n", (874, 945), False, 'import pytest\n'), ((1868, 1928), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""update how mask are loaded/dumped"""'}), "(reason='update how mask are loaded/dumped')\n", (1884, 1928), False, 'import pytest\n'), ((2450, 2510), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""update how mask are loaded/dumped"""'}), "(reason='update how mask are loaded/dumped')\n", (2466, 2510), False, 'import pytest\n'), ((2776, 2836), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""update how mask are loaded/dumped"""'}), "(reason='update how mask are loaded/dumped')\n", (2792, 2836), False, 'import pytest\n'), ((3949, 3964), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3962, 3964), False, 'import unittest\n'), ((678, 716), 'slaid.commons.base.ImageInfo.create', 'ImageInfo.create', (['"""bgr"""', '"""yx"""', '"""first"""'], {}), "('bgr', 'yx', 'first')\n", (694, 716), False, 'from slaid.commons.base import ImageInfo, Slide\n'), ((722, 760), 'slaid.commons.base.ImageInfo.create', 'ImageInfo.create', (['"""rgb"""', '"""yx"""', '"""first"""'], {}), "('rgb', 'yx', 'first')\n", (738, 760), False, 'from slaid.commons.base import ImageInfo, Slide\n'), ((766, 803), 'slaid.commons.base.ImageInfo.create', 'ImageInfo.create', (['"""rgb"""', '"""yx"""', '"""last"""'], {}), "('rgb', 'yx', 'last')\n", (782, 803), False, 'from slaid.commons.base import ImageInfo, Slide\n'), ((809, 846), 'slaid.commons.base.ImageInfo.create', 'ImageInfo.create', (['"""bgr"""', '"""yx"""', '"""last"""'], {}), "('bgr', 'yx', 'last')\n", (825, 846), False, 'from slaid.commons.base import ImageInfo, Slide\n'), ((2626, 2655), 'tiledb.open', 'tiledb.open', (['tiledb_path', '"""r"""'], {}), "(tiledb_path, 'r')\n", (2637, 2655), False, 'import tiledb\n'), ((3890, 3908), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (3898, 3908), True, 'import numpy as np\n'), ((2153, 2173), 'numpy.array', 'np.array', (['mask.array'], {}), '(mask.array)\n', (2161, 2173), True, 'import numpy as np\n')] |
import unittest, os, json
from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import gc
# Eliminate TF warning in tests
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class BNNInferenceTest(unittest.TestCase):
def setUp(self):
# Open up the config file.
self.root_path = os.path.dirname(os.path.abspath(__file__))+'/test_data/'
with open(self.root_path+'test.json','r') as json_f:
self.cfg = json.load(json_f)
self.batch_size = self.cfg['training_params']['batch_size']
self.normalized_param_path = self.root_path + 'normed_metadata.csv'
self.normalization_constants_path = self.root_path + 'norm.csv'
self.lens_params_path = self.root_path + 'metadata.csv'
self.lens_params = ['external_shear_gamma_ext','external_shear_psi_ext',
'lens_mass_center_x','lens_mass_center_y',
'lens_mass_e1','lens_mass_e2',
'lens_mass_gamma','lens_mass_theta_E']
self.num_params = len(self.lens_params)
self.cfg['dataset_params']['normalization_constants_path'] = 'norm.csv'
self.cfg['training_params']['final_params'] = self.lens_params
self.cfg['training_params']['bnn_type'] = 'diag'
self.tf_record_path = self.root_path+self.cfg['validation_params'][
'tf_record_path']
# Simulate training
self.final_params = ['external_shear_g1','external_shear_g2',
'lens_mass_center_x','lens_mass_center_y','lens_mass_e1',
'lens_mass_e2','lens_mass_gamma','lens_mass_theta_E_log']
model_trainer.prepare_tf_record(self.cfg, self.root_path,
self.tf_record_path,self.final_params,'train')
os.remove(self.tf_record_path)
np.random.seed(2)
tf.random.set_seed(2)
def tearDown(self):
# Cleanup for memory
self.cfg = None
tf.keras.backend.clear_session()
gc.collect()
def test_fix_flip_pairs(self):
# Check that fix_flip_pairs always selects the best possible configuration
# to return.
self.infer_class = bnn_inference.InferenceClass(self.cfg,
lite_class=True)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# Get the set of all flip pairs we want to check
flip_pairs = self.cfg['training_params']['flip_pairs']
flip_set = set()
for flip_pair in flip_pairs:
flip_set.update(flip_pair)
y_test = np.ones((self.batch_size,self.num_params))
predict_samps = np.ones((10,self.batch_size,self.num_params))
pi = 0
for flip_index in flip_set:
predict_samps[pi,:,flip_index] = -1
# Flip pairs of points.
self.infer_class.fix_flip_pairs(predict_samps,y_test,self.batch_size)
self.assertEqual(np.sum(np.abs(predict_samps-y_test)),0)
dont_flip_set = set(range(self.num_params))
dont_flip_set=dont_flip_set.difference(flip_set)
pi = 0
for flip_index in dont_flip_set:
predict_samps[pi,:,flip_index] = -1
# Flip pairs of points.
self.infer_class.fix_flip_pairs(predict_samps,y_test,self.batch_size)
self.assertEqual(np.sum(np.abs(predict_samps-y_test)),
2*self.batch_size*len(dont_flip_set))
def test_undo_param_norm(self):
# Test if normalizing the lens parameters works correctly.
self.infer_class = bnn_inference.InferenceClass(self.cfg,
lite_class=True)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
train_or_test='train'
data_tools.normalize_lens_parameters(self.lens_params,
self.lens_params_path,self.normalized_param_path,
self.normalization_constants_path,train_or_test=train_or_test)
lens_params_csv = pd.read_csv(self.lens_params_path, index_col=None)
norm_params_csv = pd.read_csv(self.normalized_param_path, index_col=None)
# Pull lens parameters out of the csv files.
lens_params_numpy = []
norms_params_numpy = []
for lens_param in self.lens_params:
lens_params_numpy.append(lens_params_csv[lens_param])
norms_params_numpy.append(norm_params_csv[lens_param])
lens_params_numpy = np.array(lens_params_numpy).T
norms_params_numpy = np.array(norms_params_numpy).T
predict_samps = np.tile(norms_params_numpy,(3,1,1))
# TODO: write a good test for al_samps!
al_samps = np.ones((3,3,self.num_params,self.num_params))
# Try to denormalize everything
self.infer_class.undo_param_norm(predict_samps,norms_params_numpy,
al_samps)
self.assertAlmostEqual(np.mean(np.abs(norms_params_numpy-
lens_params_numpy)),0)
self.assertAlmostEqual(np.mean(np.abs(predict_samps-
lens_params_numpy)),0)
# Clean up the file now that we're done
os.remove(self.normalized_param_path)
os.remove(self.normalization_constants_path)
def test_gen_samples_diag(self):
self.infer_class = bnn_inference.InferenceClass(self.cfg)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# First we have to make a fake model whose statistics are very well
# defined.
class ToyModel():
def __init__(self,mean,covariance,batch_size,al_std):
# We want to make sure our performance is consistent for a
# test
np.random.seed(4)
self.mean=mean
self.covariance = covariance
self.batch_size = batch_size
self.al_std = al_std
def predict(self,image):
# We won't actually be using the image. We just want it for
# testing.
return tf.constant(np.concatenate([np.random.multivariate_normal(
self.mean,self.covariance,self.batch_size),np.zeros((
self.batch_size,len(self.mean)))+self.al_std],axis=-1),
tf.float32)
# Start with a simple covariance matrix example.
mean = np.ones(self.num_params)*2
covariance = np.diag(np.ones(self.num_params))
al_std = -1000
diag_model = ToyModel(mean,covariance,self.batch_size,al_std)
# We don't want any flipping going on
self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))]
# Create tf record. This won't be used, but it has to be there for
# the function to be able to pull some images.
# Make fake norms data
fake_norms = {}
for lens_param in self.lens_params:
fake_norms[lens_param] = np.array([0.0,1.0])
fake_norms = pd.DataFrame(data=fake_norms)
fake_norms.to_csv(self.normalization_constants_path,index=False)
data_tools.generate_tf_record(self.root_path,self.lens_params,
self.lens_params_path,self.tf_record_path)
# Replace the real model with our fake model and generate samples
self.infer_class.model = diag_model
self.infer_class.gen_samples(10000)
# Make sure these samples follow the required statistics.
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0,
places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.diag(
covariance))),0,places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-covariance)),
0,places=1)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov)),0)
# Repeat this process again with a new covariance matrix and means
mean = np.random.rand(self.num_params)
covariance = np.random.rand(self.num_params,self.num_params)
al_std = 0
# Make sure covariance is positive semidefinite
covariance = np.dot(covariance,covariance.T)
diag_model = ToyModel(mean,covariance,self.batch_size,al_std)
self.infer_class.model = diag_model
self.infer_class.gen_samples(10000)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0,
places=1)
# Covariance is the sum of two random variables
covariance = covariance+np.eye(self.num_params)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt(
np.diag(covariance)))),0,places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-covariance)),
0,places=1)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-
np.eye(self.num_params))),0)
# Make sure our test probes things well.
wrong_mean = np.random.randn(self.num_params)
wrong_covariance = np.random.rand(self.num_params,self.num_params)
al_std = -1000
# Make sure covariance is positive semidefinite
wrong_covariance = np.dot(wrong_covariance,wrong_covariance.T)
diag_model = ToyModel(wrong_mean,wrong_covariance,self.batch_size,
al_std)
self.infer_class.model = diag_model
self.infer_class.gen_samples(10000)
self.assertGreater(np.mean(np.abs(self.infer_class.y_pred-mean)),0.05)
self.assertGreater(np.mean(np.abs(self.infer_class.y_std-np.sqrt(
np.diag(covariance)))),0.05)
self.assertGreater(np.mean(np.abs(self.infer_class.y_cov-covariance)),
0.05)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov)),0)
# Clean up the files we generated
os.remove(self.normalization_constants_path)
os.remove(self.tf_record_path)
def test_gen_samples_full(self):
self.infer_class = bnn_inference.InferenceClass(self.cfg)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# First we have to make a fake model whose statistics are very well
# defined.
class ToyModel():
def __init__(self,mean,covariance,batch_size,L_elements):
# We want to make sure our performance is consistent for a
# test
np.random.seed(6)
self.mean=mean
self.num_params = len(mean)
self.covariance = covariance
self.batch_size = batch_size
self.L_elements = L_elements
self.L_elements_len = int(self.num_params*(self.num_params+1)/2)
def predict(self,image):
# We won't actually be using the image. We just want it for
# testing.
return tf.constant(np.concatenate([np.zeros((
self.batch_size,self.num_params))+self.mean,np.zeros((
self.batch_size,self.L_elements_len))+self.L_elements],
axis=-1),tf.float32)
# Start with a simple covariance matrix example.
mean = np.ones(self.num_params)*2
covariance = np.diag(np.ones(self.num_params)*0.000001)
L_elements = np.array([np.log(1)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
full_model = ToyModel(mean,covariance,self.batch_size,L_elements)
# We don't want any flipping going on
self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))]
# Create tf record. This won't be used, but it has to be there for
# the function to be able to pull some images.
# Make fake norms data
fake_norms = {}
for lens_param in self.lens_params:
fake_norms[lens_param] = np.array([0.0,1.0])
fake_norms = pd.DataFrame(data=fake_norms)
fake_norms.to_csv(self.normalization_constants_path,index=False)
data_tools.generate_tf_record(self.root_path,self.lens_params,
self.lens_params_path,self.tf_record_path)
# Replace the real model with our fake model and generate samples
self.infer_class.model = full_model
self.infer_class.bnn_type = 'full'
# self.infer_class.gen_samples(1000)
# # Make sure these samples follow the required statistics.
# self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),
# 0,places=1)
# self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0,
# places=1)
# self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye(
# self.num_params))),0,places=1)
# self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
# self.num_params,self.num_params))
# self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye(
# self.num_params))),0)
mean = np.zeros(self.num_params)
loss_class = bnn_alexnet.LensingLossFunctions([],self.num_params)
L_elements = np.ones((1,len(L_elements)))*0.2
full_model = ToyModel(mean,covariance,self.batch_size,L_elements)
self.infer_class.model = full_model
self.infer_class.gen_samples(1000)
# Calculate the corresponding covariance matrix
_, _, L_mat = loss_class.construct_precision_matrix(
tf.constant(L_elements))
L_mat = np.linalg.inv(L_mat.numpy()[0].T)
cov_mat = np.dot(L_mat,L_mat.T)
# Make sure these samples follow the required statistics.
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean)),0,
places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt(
np.diag(cov_mat)))),0,places=1)
self.assertAlmostEqual(np.mean(np.abs((self.infer_class.y_cov-cov_mat))),
0,places=1)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-cov_mat)),
0)
# Clean up the files we generated
os.remove(self.normalization_constants_path)
os.remove(self.tf_record_path)
def test_gen_samples_gmm(self):
self.infer_class = bnn_inference.InferenceClass(self.cfg)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# First we have to make a fake model whose statistics are very well
# defined.
class ToyModel():
def __init__(self,mean1,covariance1,mean2,covariance2,batch_size,
L_elements1,L_elements2,pi_logit):
# We want to make sure our performance is consistent for a
# test
np.random.seed(6)
self.mean1=mean1
self.mean2=mean2
self.covariance1=covariance1
self.covariance2=covariance2
self.num_params = len(mean1)
self.batch_size = batch_size
self.L_elements1 = L_elements1
self.L_elements2 = L_elements2
self.pi_logit = pi_logit
self.L_elements_len = int(self.num_params*(self.num_params+1)/2)
def predict(self,image):
# We won't actually be using the image. We just want it for
# testing.
return tf.constant(np.concatenate([
np.random.multivariate_normal(self.mean1,self.covariance1,
self.batch_size),
np.zeros((
self.batch_size,self.L_elements_len))+self.L_elements1,
np.random.multivariate_normal(self.mean2,self.covariance2,
self.batch_size),
np.zeros((
self.batch_size,self.L_elements_len))+self.L_elements2,
np.zeros(
(self.batch_size,1))+self.pi_logit],axis=-1),tf.float32)
# Start with a simple covariance matrix example where both gmms
# are the same. This is just checking the base case.
mean1 = np.ones(self.num_params)*2
mean2 = np.ones(self.num_params)*2
covariance1 = np.diag(np.ones(self.num_params)*0.000001)
covariance2 = np.diag(np.ones(self.num_params)*0.000001)
L_elements1 = np.array([np.log(1)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
L_elements2 = np.array([np.log(1)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
pi_logit = 0
gmm_model = ToyModel(mean1,covariance1,mean2,covariance2,
self.batch_size,L_elements1,L_elements2,pi_logit)
# We don't want any flipping going on
self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))]
# Create tf record. This won't be used, but it has to be there for
# the function to be able to pull some images.
# Make fake norms data
fake_norms = {}
for lens_param in self.lens_params:
fake_norms[lens_param] = np.array([0.0,1.0])
fake_norms = pd.DataFrame(data=fake_norms)
fake_norms.to_csv(self.normalization_constants_path,index=False)
data_tools.generate_tf_record(self.root_path,self.lens_params,
self.lens_params_path,self.tf_record_path)
# Replace the real model with our fake model and generate samples
self.infer_class.model = gmm_model
self.infer_class.bnn_type = 'gmm'
self.infer_class.gen_samples(1000)
# Make sure these samples follow the required statistics.
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean1)),
0,places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0,
places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye(
self.num_params))),0,places=1)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye(
self.num_params))),0)
# Now we try and example where all the samples should be drawn from one
# of the two gmms because of the logit.
mean1 = np.ones(self.num_params)*2
mean2 = np.ones(self.num_params)*200
covariance1 = np.diag(np.ones(self.num_params)*0.000001)
covariance2 = np.diag(np.ones(self.num_params)*0.000001)
L_elements1 = np.array([np.log(1)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
L_elements2 = np.array([np.log(10)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
pi_logit = np.log(0.99999)-np.log(0.00001)
gmm_model = ToyModel(mean1,covariance1,mean2,covariance2,
self.batch_size,L_elements1,L_elements2,pi_logit)
self.infer_class.model = gmm_model
self.infer_class.gen_samples(1000)
# Make sure these samples follow the required statistics.
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-mean1)),
0,places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-1)),0,
places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_cov-np.eye(
self.num_params))),0,places=1)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.al_cov-np.eye(
self.num_params))),0)
# Now test that it takes a combination of them correctly
mean1 = np.ones(self.num_params)*2
mean2 = np.ones(self.num_params)*6
covariance1 = np.diag(np.ones(self.num_params)*0.000001)
covariance2 = np.diag(np.ones(self.num_params)*0.000001)
L_elements1 = np.array([np.log(10)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
L_elements2 = np.array([np.log(1)]*self.num_params+[0]*int(
self.num_params*(self.num_params-1)/2))
pi_logit = np.log(0.0001)-np.log(0.9999)
gmm_model = ToyModel(mean1,covariance1,mean2,covariance2,
self.batch_size,L_elements1,L_elements2,pi_logit)
self.infer_class.model = gmm_model
self.infer_class.gen_samples(2000)
# Make sure these samples follow the required statistics.
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_pred-4)),
0,places=1)
self.assertAlmostEqual(np.mean(np.abs(self.infer_class.y_std-np.sqrt(5))),
0,places=0)
self.assertTupleEqual(self.infer_class.al_cov.shape,(self.batch_size,
self.num_params,self.num_params))
# The first Gaussian is always favored in the current parameterization,
# so we can't test the scenario where the second is favored.
# Clean up the files we generated
os.remove(self.normalization_constants_path)
os.remove(self.tf_record_path)
def test_gen_samples_save(self):
self.infer_class = bnn_inference.InferenceClass(self.cfg)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# First we have to make a fake model whose statistics are very well
# defined.
class ToyModel():
def __init__(self,mean,covariance,batch_size,al_std):
# We want to make sure our performance is consistent for a
# test
np.random.seed(4)
self.mean=mean
self.covariance = covariance
self.batch_size = batch_size
self.al_std = al_std
def predict(self,image):
# We won't actually be using the image. We just want it for
# testing.
return tf.constant(np.concatenate([np.random.multivariate_normal(
self.mean,self.covariance,self.batch_size),np.zeros((
self.batch_size,len(self.mean)))+self.al_std],axis=-1),
tf.float32)
# Start with a simple covariance matrix example.
mean = np.ones(self.num_params)*2
covariance = np.diag(np.ones(self.num_params))
al_std = -1000
diag_model = ToyModel(mean,covariance,self.batch_size,al_std)
# We don't want any flipping going on
self.infer_class.flip_mat_list = [np.diag(np.ones(self.num_params))]
# Create tf record. This won't be used, but it has to be there for
# the function to be able to pull some images.
# Make fake norms data
fake_norms = {}
for lens_param in self.lens_params:
fake_norms[lens_param] = np.array([0.0,1.0])
fake_norms = pd.DataFrame(data=fake_norms)
fake_norms.to_csv(self.normalization_constants_path,index=False)
data_tools.generate_tf_record(self.root_path,self.lens_params,
self.lens_params_path,self.tf_record_path)
# Replace the real model with our fake model and generate samples
self.infer_class.model = diag_model
# Provide a save path to then check that we get the same data
save_path = self.root_path + 'test_gen_samps/'
self.infer_class.gen_samples(10000,save_path)
pred_1 = np.copy(self.infer_class.predict_samps)
# Generate again and make sure they are equivalent
self.infer_class.gen_samples(10000,save_path)
np.testing.assert_almost_equal(pred_1,self.infer_class.predict_samps)
# Test that none of the plotting routines break
self.infer_class.gen_coverage_plots(block=False)
plt.close('all')
self.infer_class.report_stats()
self.infer_class.plot_posterior_contours(1,block=False)
plt.close('all')
plt.close('all')
self.infer_class.comp_al_ep_unc(block=False)
plt.close('all')
self.infer_class.comp_al_ep_unc(block=False,norm_diagonal=False)
plt.close('all')
self.infer_class.plot_calibration(block=False,title='test')
plt.close('all')
# Clean up the files we generated
os.remove(self.normalization_constants_path)
os.remove(self.tf_record_path)
os.remove(save_path+'pred.npy')
os.remove(save_path+'al_cov.npy')
os.remove(save_path+'images.npy')
os.remove(save_path+'y_test.npy')
os.rmdir(save_path)
def test_calc_p_dlt(self):
self.infer_class = bnn_inference.InferenceClass(self.cfg,
lite_class=True)
# Delete the tf record file made during the initialization of the
# inference class.
os.remove(self.root_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
# Get rid of the normalization file.
os.remove(self.normalization_constants_path)
# Test that the calc_p_dlt returns the correct percentages for some
# toy examples
# Check a simple case
size = int(1e6)
self.infer_class.predict_samps = np.random.normal(size=size*2).reshape(
(size//10,10,2))
self.infer_class.predict_samps[:,:,1]=0
self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0)
self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10],
[0,0,0,0,0,0,0,0,0,0]],dtype=np.float32).T
self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2)))
percentages = [0.682689,0.954499,0.997300,0.999936,0.999999]+[1.0]*5
for p_i in range(len(percentages)):
self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i],
places=2)
# Shift the mean
size = int(1e6)
self.infer_class.predict_samps = np.random.normal(loc=2,
size=size*2).reshape((size//10,10,2))
self.infer_class.predict_samps[:,:,1]=0
self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0)
self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10],
[0,0,0,0,0,0,0,0,0,0]],dtype=np.float32).T
self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2)))
percentages = [0.682689,0,0.682689,0.954499,0.997300,0.999936]+[1.0]*4
for p_i in range(len(percentages)):
self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i],
places=2)
# Expand to higher dimensions
size = int(1e6)
self.infer_class.predict_samps = np.random.normal(loc=0,
size=size*2).reshape((size//10,10,2))
self.infer_class.predict_samps /= np.sqrt(np.sum(np.square(
self.infer_class.predict_samps),axis=-1,keepdims=True))
self.infer_class.predict_samps *= np.random.random(size=size).reshape((
size//10,10,1))*5
self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0)
self.infer_class.y_test = np.array([[1,2,3,4,5,6,7,8,9,10],[0]*10]).T
self.infer_class.calc_p_dlt(cov_emp=np.diag(np.ones(2)))
percentages = [1/5,2/5,3/5,4/5,1,1]+[1.0]*4
for p_i in range(len(percentages)):
self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i],
places=2)
# Expand to higher dimensions
size = int(1e6)
self.infer_class.predict_samps = np.random.normal(loc=0,
size=size*2).reshape((size//2,2,2))*5
self.infer_class.predict_samps[:,:,1]=0
self.infer_class.y_pred = np.mean(self.infer_class.predict_samps,axis=0)
self.infer_class.y_test = np.array([[0,np.sqrt(2)],[0]*2]).T
self.infer_class.calc_p_dlt()
percentages = [0,0.223356]
for p_i in range(len(percentages)):
self.assertAlmostEqual(percentages[p_i],self.infer_class.p_dlt[p_i],
places=2)
def test_specify_test_set_path(self):
# Pass a specific test_set_path to the inference class and make sure
# it behaves as expected.
test_set_path = self.root_path
# Check that the file doesn't already exist.
self.assertFalse(os.path.isfile(test_set_path+'tf_record_test_val'))
# We will again have to simulate training so that the desired
# normalization path exists.
model_trainer.prepare_tf_record(self.cfg, self.root_path,
self.tf_record_path,self.final_params,'train')
os.remove(self.tf_record_path)
_ = bnn_inference.InferenceClass(self.cfg,
test_set_path=test_set_path,lite_class=True)
# Check that a new tf_record was generated
self.assertTrue(os.path.isfile(test_set_path+'tf_record_test_val'))
# Check that passing a fake test_set_path raises an error.
fake_test_path = self.root_path+'fake_data'
os.mkdir(fake_test_path)
with self.assertRaises(FileNotFoundError):
_ = bnn_inference.InferenceClass(self.cfg,
test_set_path=fake_test_path,lite_class=True)
# Test cleanup
os.rmdir(fake_test_path)
os.remove(test_set_path+'tf_record_test_val')
os.remove(self.root_path+'new_metadata.csv')
os.remove(self.normalization_constants_path)
| [
"tensorflow.random.set_seed",
"os.mkdir",
"os.remove",
"numpy.random.seed",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"gc.collect",
"os.path.isfile",
"numpy.mean",
"numpy.tile",
"numpy.random.normal",
"numpy.diag",
"ovejero.data_tools.normalize_lens_parameters",
"pandas.DataFrame",
... | [((1522, 1633), 'ovejero.model_trainer.prepare_tf_record', 'model_trainer.prepare_tf_record', (['self.cfg', 'self.root_path', 'self.tf_record_path', 'self.final_params', '"""train"""'], {}), "(self.cfg, self.root_path, self.\n tf_record_path, self.final_params, 'train')\n", (1553, 1633), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((1632, 1662), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (1641, 1662), False, 'import unittest, os, json\n'), ((1666, 1683), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1680, 1683), True, 'import numpy as np\n'), ((1686, 1707), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(2)'], {}), '(2)\n', (1704, 1707), True, 'import tensorflow as tf\n'), ((1774, 1806), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1804, 1806), True, 'import tensorflow as tf\n'), ((1809, 1821), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1819, 1821), False, 'import gc\n'), ((1968, 2023), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {'lite_class': '(True)'}), '(self.cfg, lite_class=True)\n', (1996, 2023), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((2118, 2166), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (2127, 2166), False, 'import unittest, os, json\n'), ((2167, 2213), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (2176, 2213), False, 'import unittest, os, json\n'), ((2253, 2297), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (2262, 2297), False, 'import unittest, os, json\n'), ((2499, 2542), 'numpy.ones', 'np.ones', (['(self.batch_size, self.num_params)'], {}), '((self.batch_size, self.num_params))\n', (2506, 2542), True, 'import numpy as np\n'), ((2560, 2607), 'numpy.ones', 'np.ones', (['(10, self.batch_size, self.num_params)'], {}), '((10, self.batch_size, self.num_params))\n', (2567, 2607), True, 'import numpy as np\n'), ((3341, 3396), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {'lite_class': '(True)'}), '(self.cfg, lite_class=True)\n', (3369, 3396), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((3491, 3539), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (3500, 3539), False, 'import unittest, os, json\n'), ((3540, 3586), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (3549, 3586), False, 'import unittest, os, json\n'), ((3626, 3670), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (3635, 3670), False, 'import unittest, os, json\n'), ((3698, 3877), 'ovejero.data_tools.normalize_lens_parameters', 'data_tools.normalize_lens_parameters', (['self.lens_params', 'self.lens_params_path', 'self.normalized_param_path', 'self.normalization_constants_path'], {'train_or_test': 'train_or_test'}), '(self.lens_params, self.\n lens_params_path, self.normalized_param_path, self.\n normalization_constants_path, train_or_test=train_or_test)\n', (3734, 3877), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((3893, 3943), 'pandas.read_csv', 'pd.read_csv', (['self.lens_params_path'], {'index_col': 'None'}), '(self.lens_params_path, index_col=None)\n', (3904, 3943), True, 'import pandas as pd\n'), ((3964, 4019), 'pandas.read_csv', 'pd.read_csv', (['self.normalized_param_path'], {'index_col': 'None'}), '(self.normalized_param_path, index_col=None)\n', (3975, 4019), True, 'import pandas as pd\n'), ((4396, 4434), 'numpy.tile', 'np.tile', (['norms_params_numpy', '(3, 1, 1)'], {}), '(norms_params_numpy, (3, 1, 1))\n', (4403, 4434), True, 'import numpy as np\n'), ((4487, 4536), 'numpy.ones', 'np.ones', (['(3, 3, self.num_params, self.num_params)'], {}), '((3, 3, self.num_params, self.num_params))\n', (4494, 4536), True, 'import numpy as np\n'), ((4864, 4901), 'os.remove', 'os.remove', (['self.normalized_param_path'], {}), '(self.normalized_param_path)\n', (4873, 4901), False, 'import unittest, os, json\n'), ((4904, 4948), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (4913, 4948), False, 'import unittest, os, json\n'), ((5006, 5044), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {}), '(self.cfg)\n', (5034, 5044), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((5136, 5184), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (5145, 5184), False, 'import unittest, os, json\n'), ((5185, 5231), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (5194, 5231), False, 'import unittest, os, json\n'), ((5271, 5315), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (5280, 5315), False, 'import unittest, os, json\n'), ((6593, 6622), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fake_norms'}), '(data=fake_norms)\n', (6605, 6622), True, 'import pandas as pd\n'), ((6692, 6804), 'ovejero.data_tools.generate_tf_record', 'data_tools.generate_tf_record', (['self.root_path', 'self.lens_params', 'self.lens_params_path', 'self.tf_record_path'], {}), '(self.root_path, self.lens_params, self.\n lens_params_path, self.tf_record_path)\n', (6721, 6804), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((7544, 7575), 'numpy.random.rand', 'np.random.rand', (['self.num_params'], {}), '(self.num_params)\n', (7558, 7575), True, 'import numpy as np\n'), ((7591, 7639), 'numpy.random.rand', 'np.random.rand', (['self.num_params', 'self.num_params'], {}), '(self.num_params, self.num_params)\n', (7605, 7639), True, 'import numpy as np\n'), ((7717, 7749), 'numpy.dot', 'np.dot', (['covariance', 'covariance.T'], {}), '(covariance, covariance.T)\n', (7723, 7749), True, 'import numpy as np\n'), ((8543, 8575), 'numpy.random.randn', 'np.random.randn', (['self.num_params'], {}), '(self.num_params)\n', (8558, 8575), True, 'import numpy as np\n'), ((8597, 8645), 'numpy.random.rand', 'np.random.rand', (['self.num_params', 'self.num_params'], {}), '(self.num_params, self.num_params)\n', (8611, 8645), True, 'import numpy as np\n'), ((8733, 8777), 'numpy.dot', 'np.dot', (['wrong_covariance', 'wrong_covariance.T'], {}), '(wrong_covariance, wrong_covariance.T)\n', (8739, 8777), True, 'import numpy as np\n'), ((9405, 9449), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (9414, 9449), False, 'import unittest, os, json\n'), ((9452, 9482), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (9461, 9482), False, 'import unittest, os, json\n'), ((9540, 9578), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {}), '(self.cfg)\n', (9568, 9578), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((9670, 9718), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (9679, 9718), False, 'import unittest, os, json\n'), ((9719, 9765), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (9728, 9765), False, 'import unittest, os, json\n'), ((9805, 9849), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (9814, 9849), False, 'import unittest, os, json\n'), ((11330, 11359), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fake_norms'}), '(data=fake_norms)\n', (11342, 11359), True, 'import pandas as pd\n'), ((11429, 11541), 'ovejero.data_tools.generate_tf_record', 'data_tools.generate_tf_record', (['self.root_path', 'self.lens_params', 'self.lens_params_path', 'self.tf_record_path'], {}), '(self.root_path, self.lens_params, self.\n lens_params_path, self.tf_record_path)\n', (11458, 11541), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((12295, 12320), 'numpy.zeros', 'np.zeros', (['self.num_params'], {}), '(self.num_params)\n', (12303, 12320), True, 'import numpy as np\n'), ((12336, 12389), 'ovejero.bnn_alexnet.LensingLossFunctions', 'bnn_alexnet.LensingLossFunctions', (['[]', 'self.num_params'], {}), '([], self.num_params)\n', (12368, 12389), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((12770, 12792), 'numpy.dot', 'np.dot', (['L_mat', 'L_mat.T'], {}), '(L_mat, L_mat.T)\n', (12776, 12792), True, 'import numpy as np\n'), ((13367, 13411), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (13376, 13411), False, 'import unittest, os, json\n'), ((13414, 13444), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (13423, 13444), False, 'import unittest, os, json\n'), ((13501, 13539), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {}), '(self.cfg)\n', (13529, 13539), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((13631, 13679), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (13640, 13679), False, 'import unittest, os, json\n'), ((13680, 13726), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (13689, 13726), False, 'import unittest, os, json\n'), ((13766, 13810), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (13775, 13810), False, 'import unittest, os, json\n'), ((16046, 16075), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fake_norms'}), '(data=fake_norms)\n', (16058, 16075), True, 'import pandas as pd\n'), ((16145, 16257), 'ovejero.data_tools.generate_tf_record', 'data_tools.generate_tf_record', (['self.root_path', 'self.lens_params', 'self.lens_params_path', 'self.tf_record_path'], {}), '(self.root_path, self.lens_params, self.\n lens_params_path, self.tf_record_path)\n', (16174, 16257), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((19489, 19533), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (19498, 19533), False, 'import unittest, os, json\n'), ((19536, 19566), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (19545, 19566), False, 'import unittest, os, json\n'), ((19624, 19662), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {}), '(self.cfg)\n', (19652, 19662), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((19754, 19802), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (19763, 19802), False, 'import unittest, os, json\n'), ((19803, 19849), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (19812, 19849), False, 'import unittest, os, json\n'), ((19889, 19933), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (19898, 19933), False, 'import unittest, os, json\n'), ((21210, 21239), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fake_norms'}), '(data=fake_norms)\n', (21222, 21239), True, 'import pandas as pd\n'), ((21309, 21421), 'ovejero.data_tools.generate_tf_record', 'data_tools.generate_tf_record', (['self.root_path', 'self.lens_params', 'self.lens_params_path', 'self.tf_record_path'], {}), '(self.root_path, self.lens_params, self.\n lens_params_path, self.tf_record_path)\n', (21338, 21421), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((21698, 21737), 'numpy.copy', 'np.copy', (['self.infer_class.predict_samps'], {}), '(self.infer_class.predict_samps)\n', (21705, 21737), True, 'import numpy as np\n'), ((21842, 21912), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pred_1', 'self.infer_class.predict_samps'], {}), '(pred_1, self.infer_class.predict_samps)\n', (21872, 21912), True, 'import numpy as np\n'), ((22016, 22032), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22025, 22032), True, 'import matplotlib.pyplot as plt\n'), ((22127, 22143), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22136, 22143), True, 'import matplotlib.pyplot as plt\n'), ((22146, 22162), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22155, 22162), True, 'import matplotlib.pyplot as plt\n'), ((22212, 22228), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22221, 22228), True, 'import matplotlib.pyplot as plt\n'), ((22298, 22314), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22307, 22314), True, 'import matplotlib.pyplot as plt\n'), ((22379, 22395), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (22388, 22395), True, 'import matplotlib.pyplot as plt\n'), ((22435, 22479), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (22444, 22479), False, 'import unittest, os, json\n'), ((22482, 22512), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (22491, 22512), False, 'import unittest, os, json\n'), ((22515, 22548), 'os.remove', 'os.remove', (["(save_path + 'pred.npy')"], {}), "(save_path + 'pred.npy')\n", (22524, 22548), False, 'import unittest, os, json\n'), ((22549, 22584), 'os.remove', 'os.remove', (["(save_path + 'al_cov.npy')"], {}), "(save_path + 'al_cov.npy')\n", (22558, 22584), False, 'import unittest, os, json\n'), ((22585, 22620), 'os.remove', 'os.remove', (["(save_path + 'images.npy')"], {}), "(save_path + 'images.npy')\n", (22594, 22620), False, 'import unittest, os, json\n'), ((22621, 22656), 'os.remove', 'os.remove', (["(save_path + 'y_test.npy')"], {}), "(save_path + 'y_test.npy')\n", (22630, 22656), False, 'import unittest, os, json\n'), ((22657, 22676), 'os.rmdir', 'os.rmdir', (['save_path'], {}), '(save_path)\n', (22665, 22676), False, 'import unittest, os, json\n'), ((22728, 22783), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {'lite_class': '(True)'}), '(self.cfg, lite_class=True)\n', (22756, 22783), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((22878, 22926), 'os.remove', 'os.remove', (["(self.root_path + 'tf_record_test_val')"], {}), "(self.root_path + 'tf_record_test_val')\n", (22887, 22926), False, 'import unittest, os, json\n'), ((22927, 22973), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (22936, 22973), False, 'import unittest, os, json\n'), ((23013, 23057), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (23022, 23057), False, 'import unittest, os, json\n'), ((23353, 23400), 'numpy.mean', 'np.mean', (['self.infer_class.predict_samps'], {'axis': '(0)'}), '(self.infer_class.predict_samps, axis=0)\n', (23360, 23400), True, 'import numpy as np\n'), ((23971, 24018), 'numpy.mean', 'np.mean', (['self.infer_class.predict_samps'], {'axis': '(0)'}), '(self.infer_class.predict_samps, axis=0)\n', (23978, 24018), True, 'import numpy as np\n'), ((24777, 24824), 'numpy.mean', 'np.mean', (['self.infer_class.predict_samps'], {'axis': '(0)'}), '(self.infer_class.predict_samps, axis=0)\n', (24784, 24824), True, 'import numpy as np\n'), ((25346, 25393), 'numpy.mean', 'np.mean', (['self.infer_class.predict_samps'], {'axis': '(0)'}), '(self.infer_class.predict_samps, axis=0)\n', (25353, 25393), True, 'import numpy as np\n'), ((26030, 26141), 'ovejero.model_trainer.prepare_tf_record', 'model_trainer.prepare_tf_record', (['self.cfg', 'self.root_path', 'self.tf_record_path', 'self.final_params', '"""train"""'], {}), "(self.cfg, self.root_path, self.\n tf_record_path, self.final_params, 'train')\n", (26061, 26141), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((26140, 26170), 'os.remove', 'os.remove', (['self.tf_record_path'], {}), '(self.tf_record_path)\n', (26149, 26170), False, 'import unittest, os, json\n'), ((26178, 26266), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {'test_set_path': 'test_set_path', 'lite_class': '(True)'}), '(self.cfg, test_set_path=test_set_path,\n lite_class=True)\n', (26206, 26266), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((26491, 26515), 'os.mkdir', 'os.mkdir', (['fake_test_path'], {}), '(fake_test_path)\n', (26499, 26515), False, 'import unittest, os, json\n'), ((26678, 26702), 'os.rmdir', 'os.rmdir', (['fake_test_path'], {}), '(fake_test_path)\n', (26686, 26702), False, 'import unittest, os, json\n'), ((26705, 26752), 'os.remove', 'os.remove', (["(test_set_path + 'tf_record_test_val')"], {}), "(test_set_path + 'tf_record_test_val')\n", (26714, 26752), False, 'import unittest, os, json\n'), ((26753, 26799), 'os.remove', 'os.remove', (["(self.root_path + 'new_metadata.csv')"], {}), "(self.root_path + 'new_metadata.csv')\n", (26762, 26799), False, 'import unittest, os, json\n'), ((26800, 26844), 'os.remove', 'os.remove', (['self.normalization_constants_path'], {}), '(self.normalization_constants_path)\n', (26809, 26844), False, 'import unittest, os, json\n'), ((517, 534), 'json.load', 'json.load', (['json_f'], {}), '(json_f)\n', (526, 534), False, 'import unittest, os, json\n'), ((4294, 4321), 'numpy.array', 'np.array', (['lens_params_numpy'], {}), '(lens_params_numpy)\n', (4302, 4321), True, 'import numpy as np\n'), ((4347, 4375), 'numpy.array', 'np.array', (['norms_params_numpy'], {}), '(norms_params_numpy)\n', (4355, 4375), True, 'import numpy as np\n'), ((6061, 6085), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (6068, 6085), True, 'import numpy as np\n'), ((6111, 6135), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (6118, 6135), True, 'import numpy as np\n'), ((6558, 6578), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6566, 6578), True, 'import numpy as np\n'), ((8052, 8075), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (8058, 8075), True, 'import numpy as np\n'), ((10698, 10722), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (10705, 10722), True, 'import numpy as np\n'), ((11295, 11315), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (11303, 11315), True, 'import numpy as np\n'), ((12689, 12712), 'tensorflow.constant', 'tf.constant', (['L_elements'], {}), '(L_elements)\n', (12700, 12712), True, 'import tensorflow as tf\n'), ((15151, 15175), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (15158, 15175), True, 'import numpy as np\n'), ((15188, 15212), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (15195, 15212), True, 'import numpy as np\n'), ((16011, 16031), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (16019, 16031), True, 'import numpy as np\n'), ((17103, 17127), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (17110, 17127), True, 'import numpy as np\n'), ((17140, 17164), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (17147, 17164), True, 'import numpy as np\n'), ((17511, 17526), 'numpy.log', 'np.log', (['(0.99999)'], {}), '(0.99999)\n', (17517, 17526), True, 'import numpy as np\n'), ((17527, 17540), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (17533, 17540), True, 'import numpy as np\n'), ((18343, 18367), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (18350, 18367), True, 'import numpy as np\n'), ((18380, 18404), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (18387, 18404), True, 'import numpy as np\n'), ((18749, 18763), 'numpy.log', 'np.log', (['(0.0001)'], {}), '(0.0001)\n', (18755, 18763), True, 'import numpy as np\n'), ((18764, 18778), 'numpy.log', 'np.log', (['(0.9999)'], {}), '(0.9999)\n', (18770, 18778), True, 'import numpy as np\n'), ((20678, 20702), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (20685, 20702), True, 'import numpy as np\n'), ((20728, 20752), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (20735, 20752), True, 'import numpy as np\n'), ((21175, 21195), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (21183, 21195), True, 'import numpy as np\n'), ((23428, 23525), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.float32)\n', (23436, 23525), True, 'import numpy as np\n'), ((24046, 24143), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.float32'}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n dtype=np.float32)\n', (24054, 24143), True, 'import numpy as np\n'), ((24852, 24905), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0] * 10]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [0] * 10])\n', (24860, 24905), True, 'import numpy as np\n'), ((25880, 25932), 'os.path.isfile', 'os.path.isfile', (["(test_set_path + 'tf_record_test_val')"], {}), "(test_set_path + 'tf_record_test_val')\n", (25894, 25932), False, 'import unittest, os, json\n'), ((26329, 26381), 'os.path.isfile', 'os.path.isfile', (["(test_set_path + 'tf_record_test_val')"], {}), "(test_set_path + 'tf_record_test_val')\n", (26343, 26381), False, 'import unittest, os, json\n'), ((26569, 26658), 'ovejero.bnn_inference.InferenceClass', 'bnn_inference.InferenceClass', (['self.cfg'], {'test_set_path': 'fake_test_path', 'lite_class': '(True)'}), '(self.cfg, test_set_path=fake_test_path,\n lite_class=True)\n', (26597, 26658), False, 'from ovejero import bnn_inference, data_tools, bnn_alexnet, model_trainer\n'), ((407, 432), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (422, 432), False, 'import unittest, os, json\n'), ((2811, 2841), 'numpy.abs', 'np.abs', (['(predict_samps - y_test)'], {}), '(predict_samps - y_test)\n', (2817, 2841), True, 'import numpy as np\n'), ((3152, 3182), 'numpy.abs', 'np.abs', (['(predict_samps - y_test)'], {}), '(predict_samps - y_test)\n', (3158, 3182), True, 'import numpy as np\n'), ((4685, 4731), 'numpy.abs', 'np.abs', (['(norms_params_numpy - lens_params_numpy)'], {}), '(norms_params_numpy - lens_params_numpy)\n', (4691, 4731), True, 'import numpy as np\n'), ((4771, 4812), 'numpy.abs', 'np.abs', (['(predict_samps - lens_params_numpy)'], {}), '(predict_samps - lens_params_numpy)\n', (4777, 4812), True, 'import numpy as np\n'), ((5556, 5573), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (5570, 5573), True, 'import numpy as np\n'), ((6303, 6327), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (6310, 6327), True, 'import numpy as np\n'), ((7040, 7078), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean)'], {}), '(self.infer_class.y_pred - mean)\n', (7046, 7078), True, 'import numpy as np\n'), ((7228, 7271), 'numpy.abs', 'np.abs', (['(self.infer_class.y_cov - covariance)'], {}), '(self.infer_class.y_cov - covariance)\n', (7234, 7271), True, 'import numpy as np\n'), ((7429, 7460), 'numpy.abs', 'np.abs', (['self.infer_class.al_cov'], {}), '(self.infer_class.al_cov)\n', (7435, 7460), True, 'import numpy as np\n'), ((7922, 7960), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean)'], {}), '(self.infer_class.y_pred - mean)\n', (7928, 7960), True, 'import numpy as np\n'), ((8219, 8262), 'numpy.abs', 'np.abs', (['(self.infer_class.y_cov - covariance)'], {}), '(self.infer_class.y_cov - covariance)\n', (8225, 8262), True, 'import numpy as np\n'), ((8962, 9000), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean)'], {}), '(self.infer_class.y_pred - mean)\n', (8968, 9000), True, 'import numpy as np\n'), ((9135, 9178), 'numpy.abs', 'np.abs', (['(self.infer_class.y_cov - covariance)'], {}), '(self.infer_class.y_cov - covariance)\n', (9141, 9178), True, 'import numpy as np\n'), ((9330, 9361), 'numpy.abs', 'np.abs', (['self.infer_class.al_cov'], {}), '(self.infer_class.al_cov)\n', (9336, 9361), True, 'import numpy as np\n'), ((10094, 10111), 'numpy.random.seed', 'np.random.seed', (['(6)'], {}), '(6)\n', (10108, 10111), True, 'import numpy as np\n'), ((10748, 10772), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (10755, 10772), True, 'import numpy as np\n'), ((11040, 11064), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (11047, 11064), True, 'import numpy as np\n'), ((12886, 12924), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean)'], {}), '(self.infer_class.y_pred - mean)\n', (12892, 12924), True, 'import numpy as np\n'), ((13080, 13120), 'numpy.abs', 'np.abs', (['(self.infer_class.y_cov - cov_mat)'], {}), '(self.infer_class.y_cov - cov_mat)\n', (13086, 13120), True, 'import numpy as np\n'), ((13280, 13321), 'numpy.abs', 'np.abs', (['(self.infer_class.al_cov - cov_mat)'], {}), '(self.infer_class.al_cov - cov_mat)\n', (13286, 13321), True, 'import numpy as np\n'), ((14102, 14119), 'numpy.random.seed', 'np.random.seed', (['(6)'], {}), '(6)\n', (14116, 14119), True, 'import numpy as np\n'), ((15239, 15263), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (15246, 15263), True, 'import numpy as np\n'), ((15298, 15322), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (15305, 15322), True, 'import numpy as np\n'), ((15756, 15780), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (15763, 15780), True, 'import numpy as np\n'), ((16527, 16566), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean1)'], {}), '(self.infer_class.y_pred - mean1)\n', (16533, 16566), True, 'import numpy as np\n'), ((16615, 16649), 'numpy.abs', 'np.abs', (['(self.infer_class.y_std - 1)'], {}), '(self.infer_class.y_std - 1)\n', (16621, 16649), True, 'import numpy as np\n'), ((17193, 17217), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (17200, 17217), True, 'import numpy as np\n'), ((17252, 17276), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (17259, 17276), True, 'import numpy as np\n'), ((17824, 17863), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - mean1)'], {}), '(self.infer_class.y_pred - mean1)\n', (17830, 17863), True, 'import numpy as np\n'), ((17912, 17946), 'numpy.abs', 'np.abs', (['(self.infer_class.y_std - 1)'], {}), '(self.infer_class.y_std - 1)\n', (17918, 17946), True, 'import numpy as np\n'), ((18431, 18455), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (18438, 18455), True, 'import numpy as np\n'), ((18490, 18514), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (18497, 18514), True, 'import numpy as np\n'), ((19060, 19095), 'numpy.abs', 'np.abs', (['(self.infer_class.y_pred - 4)'], {}), '(self.infer_class.y_pred - 4)\n', (19066, 19095), True, 'import numpy as np\n'), ((20173, 20190), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (20187, 20190), True, 'import numpy as np\n'), ((20920, 20944), 'numpy.ones', 'np.ones', (['self.num_params'], {}), '(self.num_params)\n', (20927, 20944), True, 'import numpy as np\n'), ((23224, 23255), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(size * 2)'}), '(size=size * 2)\n', (23240, 23255), True, 'import numpy as np\n'), ((23836, 23874), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(2)', 'size': '(size * 2)'}), '(loc=2, size=size * 2)\n', (23852, 23874), True, 'import numpy as np\n'), ((24468, 24506), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'size': '(size * 2)'}), '(loc=0, size=size * 2)\n', (24484, 24506), True, 'import numpy as np\n'), ((24584, 24625), 'numpy.square', 'np.square', (['self.infer_class.predict_samps'], {}), '(self.infer_class.predict_samps)\n', (24593, 24625), True, 'import numpy as np\n'), ((23555, 23565), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (23562, 23565), True, 'import numpy as np\n'), ((24172, 24182), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (24179, 24182), True, 'import numpy as np\n'), ((24690, 24717), 'numpy.random.random', 'np.random.random', ([], {'size': 'size'}), '(size=size)\n', (24706, 24717), True, 'import numpy as np\n'), ((24942, 24952), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (24949, 24952), True, 'import numpy as np\n'), ((25211, 25249), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'size': '(size * 2)'}), '(loc=0, size=size * 2)\n', (25227, 25249), True, 'import numpy as np\n'), ((7157, 7176), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (7164, 7176), True, 'import numpy as np\n'), ((8455, 8478), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (8461, 8478), True, 'import numpy as np\n'), ((10808, 10817), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (10814, 10817), True, 'import numpy as np\n'), ((15359, 15368), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (15365, 15368), True, 'import numpy as np\n'), ((15464, 15473), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (15470, 15473), True, 'import numpy as np\n'), ((16728, 16751), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (16734, 16751), True, 'import numpy as np\n'), ((16943, 16966), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (16949, 16966), True, 'import numpy as np\n'), ((17313, 17322), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (17319, 17322), True, 'import numpy as np\n'), ((17418, 17428), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (17424, 17428), True, 'import numpy as np\n'), ((18025, 18048), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (18031, 18048), True, 'import numpy as np\n'), ((18240, 18263), 'numpy.eye', 'np.eye', (['self.num_params'], {}), '(self.num_params)\n', (18246, 18263), True, 'import numpy as np\n'), ((18551, 18561), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (18557, 18561), True, 'import numpy as np\n'), ((18657, 18666), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (18663, 18666), True, 'import numpy as np\n'), ((19174, 19184), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (19181, 19184), True, 'import numpy as np\n'), ((25434, 25444), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (25441, 25444), True, 'import numpy as np\n'), ((5831, 5905), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean', 'self.covariance', 'self.batch_size'], {}), '(self.mean, self.covariance, self.batch_size)\n', (5860, 5905), True, 'import numpy as np\n'), ((8151, 8170), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (8158, 8170), True, 'import numpy as np\n'), ((9077, 9096), 'numpy.diag', 'np.diag', (['covariance'], {}), '(covariance)\n', (9084, 9096), True, 'import numpy as np\n'), ((13015, 13031), 'numpy.diag', 'np.diag', (['cov_mat'], {}), '(cov_mat)\n', (13022, 13031), True, 'import numpy as np\n'), ((14615, 14691), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean1', 'self.covariance1', 'self.batch_size'], {}), '(self.mean1, self.covariance1, self.batch_size)\n', (14644, 14691), True, 'import numpy as np\n'), ((14781, 14857), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean2', 'self.covariance2', 'self.batch_size'], {}), '(self.mean2, self.covariance2, self.batch_size)\n', (14810, 14857), True, 'import numpy as np\n'), ((20448, 20522), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean', 'self.covariance', 'self.batch_size'], {}), '(self.mean, self.covariance, self.batch_size)\n', (20477, 20522), True, 'import numpy as np\n'), ((10478, 10522), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.num_params)'], {}), '((self.batch_size, self.num_params))\n', (10486, 10522), True, 'import numpy as np\n'), ((10538, 10586), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.L_elements_len)'], {}), '((self.batch_size, self.L_elements_len))\n', (10546, 10586), True, 'import numpy as np\n'), ((14703, 14751), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.L_elements_len)'], {}), '((self.batch_size, self.L_elements_len))\n', (14711, 14751), True, 'import numpy as np\n'), ((14868, 14916), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.L_elements_len)'], {}), '((self.batch_size, self.L_elements_len))\n', (14876, 14916), True, 'import numpy as np\n'), ((14946, 14976), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (14954, 14976), True, 'import numpy as np\n')] |
import numpy as np
import scipy.linalg as scl
import itertools
from numpy import ndarray
import sys
from scipy import stats
from typing import Sequence, Union, Any, Optional, List, Tuple, Dict
from sklearn.linear_model import Lasso as skLasso
import warnings
class Regressor:
""" Base class implementing ordinary least square regression
Ordinary least square polynomial regression using SVD decomposition.
Supports basic analysis features such as variance, degrees of freedom
and confidence intervals of β.
Attributes:
response (ndarray): The respones variable to model
orders (list of int): The polynomial order of each predictor
to use in constructing the design matrix.
β (ndarray): The coefficients once the system has been solved.
design_matrix (ndarray): The constructed design matrix
interactions (bool): Whether or not use use interactions between
the polynomials.
max_interaction (int): The maximum order of interactions to use.
If None, no restriction is used.
condition_number (float): The condition number of the projection matrix
"""
def __init__(self, predictors: Sequence[ndarray],
response: ndarray) -> None:
""" Initialize the class
Args:
predictors: A sequence of each predictor used for
predicting the response.
response: The response to model using the predictors
"""
if isinstance(predictors, np.ndarray):
self.predictors = [predictors]
else:
self.predictors = [predictor.flatten() for predictor in predictors]
self.response = response.flatten()
self.orders: Optional[Sequence[Sequence[int]]] = None
self.β: Optional[List[float]] = None
self.design_matrix: Optional[ndarray] = None
self.interactions: bool = False
self.max_interaction: Optional[int] = None
self.condition_number = 0
def fit(self, orders: Sequence[Union[int, Sequence[int]]],
interactions: bool = False,
max_interaction: Optional[int] = None) -> ndarray:
""" Perform a fitting using the exponents in orders
Args:
orders: The orders to use in construction of the
design matrix. If an element is an int,
all lower orders will be used as well.
The constant term is always present.
interactions: Whether or not to use interactions
of the polynomial degrees.
"""
# Construct the orders to fit
for i, order in enumerate(orders):
if not isiterable(order):
orders[i] = list(range(1, order+1))
self.orders = orders
# Construct the design matrix
self.max_interaction = max_interaction
if interactions or max_interaction is not None:
self.interactions = True
design_matrix = self.make_design_matrix(self.predictors)
if design_matrix.shape[0] < design_matrix.shape[1]:
warnings.warn("Number of features surpasses number of samples")
self.design_matrix = self.standardize(design_matrix)
self.β = self.solve(self.design_matrix, self.response)
return self.β
def standardize(self, matrix: ndarray) -> ndarray:
""" Standardize the predictors of the matrix
Shifts the mean of each column to zero and scales by the
standard deviation.
Also sets the `standardizer` attribute.
Args:
matrix: The matrix to standardize
Returns:
The standardized matrix.
"""
# Standardize the matrix
mean = matrix[:, 1:].mean(axis=0)
std = matrix[:, 1:].std(axis=0)
def standardizer(mat: ndarray) -> ndarray:
# Ignore the first column of constant term
mat[:, 1:] = (mat[:, 1:] - mean[np.newaxis, :])/std[np.newaxis, :]
return mat
self.standardizer = standardizer
return standardizer(matrix)
def make_design_matrix(self, predictors: List[ndarray]) -> ndarray:
""" Construct the design matrix given predictors and orders
The design matrix has the construction
1 X X² ... X^N Y Y² ... Y^m XY X²Y ... X^NY XY² X²Y² ... X^NY^N
Uses the attributes `orders` and `max_interaction`
Args:
predictors: The predictors, or basis functions, to use to
construct the design matrix.
Returns:
The design matrix
"""
matrix = vandermonde(predictors, self.orders)
# Handle interaction terms
if self.interactions:
matrix = add_interactions(matrix, self.orders,
self.max_interaction)
return matrix
def solve(self, design_matrix: ndarray, response: ndarray) -> ndarray:
""" Solve the system using inversion or SVD
Args:
design_matrix: The matrix to solve.
response: The response to model
Returns:
The solution β
"""
self.condition_number = np.linalg.cond(design_matrix)
if self.condition_number < sys.float_info.epsilon:
β = lin_reg_inv(design_matrix, response)
else:
β = lin_reg_svd(design_matrix, response)
return β
def predict(self, predictors: Union[ndarray, Sequence[ndarray]]) -> ndarray:
""" Predict the response based on fit
The resulting prediction matrix is standardized using the same
standardization applied to the design matrix
Args:
predictors: The predictors to predict from.
Returns:
The predicted values.
"""
assert self.β is not None, "Perform fitting before predicting"
if isinstance(predictors, np.ndarray):
if len(self.predictors) != 1:
raise ValueError("Must provide same amount of predictors")
shape = predictors.shape
predictors = [predictors]
else:
if len(predictors) != len(self.predictors):
raise ValueError("Must provide same amount of predictors")
shape = predictors[0].shape
predictors = [predictor.flatten() for predictor in predictors]
X = self.make_design_matrix(predictors)
X = self.standardizer(X)
# If the constant coefficient is taken care of elsewhere
if X.shape[1] == len(self.β) - 1:
y = self.β[0] + X@self.β[1:]
else:
y = X@self.β
return y.reshape(shape)
def r2(self, predictors: Optional[Sequence[ndarray]] = None,
response: Optional[ndarray] = None) -> float:
""" Calculates the R² score
Args:
predictors: If no predictors are provided, the
training predictors will be used. Must be set together
with response.
response: If no response is provided, the
training response will be used. Must be set together
with predictors.
Returns:
The R² score
Raises:
AssertionError if only one of predictors or response is provided.
"""
if predictors is not None:
ỹ = self.predict(predictors)
assert response is not None, "Must provide a response"
else:
ỹ = self.predict(self.predictors)
if response is not None:
y = response
assert predictors is not None, "Must provide predictors"
else:
y = self.response
y̅ = y.mean()
return 1 - np.sum((y - ỹ)**2) / np.sum((y - y̅)**2)
def mse(self, predictors: Optional[Sequence[ndarray]] = None,
response: Optional[ndarray] = None) -> float:
""" Calculates the mean square error (MSE)
Args:
predictors: If no predictors are provided, the
training predictors will be used. Must be set together
with response.
response: If no response is provided, the
training response will be used. Must be set together
with predictors.
Returns:
The MSE
Raises:
AssertionError if only one of predictors or response is provided.
"""
if predictors is not None:
ỹ = self.predict(predictors)
assert response is not None, "Must provide a response"
else:
ỹ = self.predict(self.predictors)
if response is not None:
y = response
assert predictors is not None, "Must provide predictors"
else:
y = self.response
return np.mean((y - ỹ)**2)
@property
def SSE(self) -> float:
""" Error sum of squares """
return np.sum((self.response - self.predict(self.predictors))**2)
@property
def sigma2(self) -> float:
""" Estimate of σ² """
N, p = self.design_matrix.shape
# Note that N - p = N - (k+1)
std_err = 1/(N - p) * self.SSE
return std_err
@property
def var(self) -> ndarray:
""" The variance Var(β) """
X = self.design_matrix
N, p = X.shape
Σ = np.linalg.inv(X.T@X)
return np.diag(Σ)*self.sigma2
@property
def tscore(self) -> ndarray:
tscore = self.β/np.sqrt(self.var)
return tscore
def ci(self, alpha: float) -> ndarray:
""" Compute the 1-2α confidence interval
Assumes t distribution of N df.
Args:
alpha: The percentile to compute
Returns:
The lower and upper limits of the CI as p×2 matrix
where p is the number of predictors + intercept.
"""
X = self.design_matrix
N, p = X.shape
zalpha = np.asarray(stats.t.interval(alpha, N - p))
σ = np.sqrt(self.var)
ci = np.zeros((p, 2))
for i, β in enumerate(self.β):
ci[i, :] = β + zalpha*σ[i]
return ci
def betadict(self) -> Dict[str, float]:
""" Get the terms of β and their coefficients """
assert self.β is not None
assert self.orders is not None
coeffs = {'const': self.β[0]}
i = 1
for order in self.orders[0]:
coeffs['x^'+str(order)] = self.β[i]
i += 1
for order in self.orders[1]:
coeffs['y^'+str(order)] = self.β[i]
i += 1
if self.interactions:
for x, y in itertools.product(*self.orders):
if self.max_interaction is not None:
if x * y > self.max_interaction:
continue
coeffs['x^'+str(x)+'y^'+str(y)] = self.β[i]
i += 1
return coeffs
def df(self) -> ndarray:
""" Compute the degrees of freedom as tr(H) """
X = self.design_matrix
assert X is not None
H = X@np.linalg.inv(X.T@X)@X.T
return np.trace(H)
class Ridge(Regressor):
""" Implement Ridge regularization on top of OLS
Attributes:
Same as Regressor.
parameter: The regularization parameter
"""
def __init__(self, predictors: Sequence[ndarray],
response: ndarray,
parameter: float) -> None:
""" Initialize the matrix
Args:
predictors: A sequence of each predictor used for
predicting the response.
response: The response to model using the predictors
parameter: The regularization parameter
"""
super().__init__(predictors, response)
self.parameter = parameter
def make_design_matrix(self, predictors: ndarray) -> ndarray:
""" Construct the design matrix given predictors and orders
Does not have a constant term
The design matrix has the construction
X X² ... X^N Y Y² ... Y^m XY X²Y ... X^NY XY² X²Y² ... X^NY^N
Uses the attributes `orders` and `max_interaction`
Args:
predictors: The predictors, or basis functions, to use to
construct the design matrix.
Returns:
The design matrix
"""
matrix = super().make_design_matrix(predictors)
# The constant coefficient can be removed
matrix = matrix[:, 1:]
return matrix
def standardize(self, matrix: ndarray) -> ndarray:
""" Standardize the predictors of the matrix
Shifts the mean of each column to zero and scales by the
standard deviation. Takes into account that the design matrix
has to constant term.
Also sets the `standardizer` attribute.
Args:
matrix: The matrix to standardize
Returns:
The standardized matrix.
"""
# Standardize the matrix
mean = matrix.mean(axis=0)
std = matrix.std(axis=0)
def standardizer(mat):
# Ignore the first column of constant term
mat = (mat - mean[np.newaxis, :])/std[np.newaxis, :]
return mat
self.standardizer = standardizer
return standardizer(matrix)
def solve(self, design_matrix: ndarray, response: ndarray) -> ndarray:
""" Solve the system using SVD
Adds regularization along the diagonal of X^T X
Args:
design_matrix: The matrix to solve.
response: The response to model
Returns:
The solution β
"""
X = design_matrix
y = response
# if np.linalg.cond(self.design_matrix) < sys.float_info.epsilon:
β = np.linalg.inv(X.T@X + self.parameter*np.eye(X.shape[1]))@X.T@y
# The constant is given by 1/N Σ y_i
β = np.array([np.mean(y), *β])
return β
def df(self) -> ndarray:
X = self.design_matrix
assert X is not None
H = X@np.linalg.inv(X.T@X + self.parameter*np.eye(X.shape[1]))@X.T
return np.trace(H)
class Lasso(Ridge):
""" Implements Lasso regularizaion
Leverages the code of Ridge, only changing how the system is solved.
The solving is relegated to sklearn.Lasso
"""
def solve(self, design_matrix: ndarray, response: ndarray) -> ndarray:
""" Solve the system using sklearn.Lasso
Adds regularization along the diagonal of X^T X
Args:
design_matrix: The matrix to solve.
response: The response to model
Returns:
The solution β
"""
X = design_matrix
y = response
clf = skLasso(alpha=self.parameter, fit_intercept=False)
clf.fit(X, y)
β = np.array([np.mean(y), *clf.coef_])
return β
def vandermonde(predictors: Sequence[ndarray],
orders: Sequence[Sequence[int]]) -> ndarray:
""" Constructs a Vandermonde matrix
Each predictor is raised to the exponents given in orders.
Args:
predictors: The predictors to use. Must have same equal length
orders: Each predictor must be accompanied by a list of exponents.
Returns:
The resulting Vandermonde matrix
Raises:
ValueError or AssertionError if the input have inequal sizes.
"""
if len(predictors) != len(orders) or not predictors:
raise ValueError("Must provide same number of predictors as orders")
size = predictors[0].size
X = np.ones((size, sum(len(order) for order in orders) + 1))
i = 1
for predictor, order in zip(predictors, orders):
assert predictor.size == size, "Predictors must have same size"
for n in order:
X[:, i] = (predictor**n).flatten()
i += 1
return X
def add_interactions(vandermonde: ndarray,
orders: Sequence[Sequence[int]],
max_interaction: Optional[int] = None) -> ndarray:
""" Add interaction terms to the vandermonde matrix
Args:
vandermonde: The polynomial vandermonde matrix to use as
basis for creating the interactions.
orders: The orders corresponding to each column in the matrix
max_interaction: The maximum degree of the interacting polynomial
If None, no restriction is used
Returns:
The complete matrix.
"""
# First column is constant term
assert len(orders) == 2, "Only two term interactions supported"
offset = len(orders[0])
for col_x, col_y in itertools.product(*orders):
if max_interaction is not None:
if col_x * col_y > max_interaction:
continue
col_y += offset
product = vandermonde[:, col_x] * vandermonde[:, col_y]
vandermonde = np.append(vandermonde, product[..., None], axis=1)
return vandermonde
def lin_reg_inv(X: ndarray, y: ndarray):
""" Solve the system Xβ = y by matrix inversion"""
return np.linalg.inv(X.T@X)@X.T@y
def lin_reg_svd(x: ndarray, y: ndarray) -> ndarray:
""" Solve the system Xβ = y by singular value decomposition"""
u, s, v = scl.svd(x)
return v.T @ scl.pinv(scl.diagsvd(s, u.shape[0], v.shape[0])) @ u.T @ y
def isiterable(obj: Any) -> bool:
try:
_ = iter(obj)
return True
except TypeError:
return False
| [
"numpy.diag",
"numpy.trace",
"numpy.sum",
"numpy.eye",
"scipy.linalg.diagsvd",
"numpy.zeros",
"numpy.linalg.cond",
"numpy.append",
"scipy.linalg.svd",
"numpy.mean",
"numpy.linalg.inv",
"scipy.stats.t.interval",
"itertools.product",
"warnings.warn",
"sklearn.linear_model.Lasso",
"numpy.... | [((16602, 16628), 'itertools.product', 'itertools.product', (['*orders'], {}), '(*orders)\n', (16619, 16628), False, 'import itertools\n'), ((17198, 17208), 'scipy.linalg.svd', 'scl.svd', (['x'], {}), '(x)\n', (17205, 17208), True, 'import scipy.linalg as scl\n'), ((5198, 5227), 'numpy.linalg.cond', 'np.linalg.cond', (['design_matrix'], {}), '(design_matrix)\n', (5212, 5227), True, 'import numpy as np\n'), ((8820, 8841), 'numpy.mean', 'np.mean', (['((y - ỹ) ** 2)'], {}), '((y - ỹ) ** 2)\n', (8827, 8841), True, 'import numpy as np\n'), ((9359, 9381), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (9372, 9381), True, 'import numpy as np\n'), ((10003, 10020), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (10010, 10020), True, 'import numpy as np\n'), ((10033, 10049), 'numpy.zeros', 'np.zeros', (['(p, 2)'], {}), '((p, 2))\n', (10041, 10049), True, 'import numpy as np\n'), ((11116, 11127), 'numpy.trace', 'np.trace', (['H'], {}), '(H)\n', (11124, 11127), True, 'import numpy as np\n'), ((14129, 14140), 'numpy.trace', 'np.trace', (['H'], {}), '(H)\n', (14137, 14140), True, 'import numpy as np\n'), ((14736, 14786), 'sklearn.linear_model.Lasso', 'skLasso', ([], {'alpha': 'self.parameter', 'fit_intercept': '(False)'}), '(alpha=self.parameter, fit_intercept=False)\n', (14743, 14786), True, 'from sklearn.linear_model import Lasso as skLasso\n'), ((16853, 16903), 'numpy.append', 'np.append', (['vandermonde', 'product[..., None]'], {'axis': '(1)'}), '(vandermonde, product[..., None], axis=1)\n', (16862, 16903), True, 'import numpy as np\n'), ((3106, 3169), 'warnings.warn', 'warnings.warn', (['"""Number of features surpasses number of samples"""'], {}), "('Number of features surpasses number of samples')\n", (3119, 3169), False, 'import warnings\n'), ((9394, 9404), 'numpy.diag', 'np.diag', (['Σ'], {}), '(Σ)\n', (9401, 9404), True, 'import numpy as np\n'), ((9490, 9507), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (9497, 9507), True, 'import numpy as np\n'), ((9958, 9988), 'scipy.stats.t.interval', 'stats.t.interval', (['alpha', '(N - p)'], {}), '(alpha, N - p)\n', (9974, 9988), False, 'from scipy import stats\n'), ((10638, 10669), 'itertools.product', 'itertools.product', (['*self.orders'], {}), '(*self.orders)\n', (10655, 10669), False, 'import itertools\n'), ((17036, 17058), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (17049, 17058), True, 'import numpy as np\n'), ((7738, 7758), 'numpy.sum', 'np.sum', (['((y - ỹ) ** 2)'], {}), '((y - ỹ) ** 2)\n', (7744, 7758), True, 'import numpy as np\n'), ((7761, 7782), 'numpy.sum', 'np.sum', (['((y - y̅) ** 2)'], {}), '((y - y̅) ** 2)\n', (7767, 7782), True, 'import numpy as np\n'), ((11076, 11098), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X)'], {}), '(X.T @ X)\n', (11089, 11098), True, 'import numpy as np\n'), ((13916, 13926), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (13923, 13926), True, 'import numpy as np\n'), ((14832, 14842), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (14839, 14842), True, 'import numpy as np\n'), ((17235, 17273), 'scipy.linalg.diagsvd', 'scl.diagsvd', (['s', 'u.shape[0]', 'v.shape[0]'], {}), '(s, u.shape[0], v.shape[0])\n', (17246, 17273), True, 'import scipy.linalg as scl\n'), ((13823, 13841), 'numpy.eye', 'np.eye', (['X.shape[1]'], {}), '(X.shape[1])\n', (13829, 13841), True, 'import numpy as np\n'), ((14090, 14108), 'numpy.eye', 'np.eye', (['X.shape[1]'], {}), '(X.shape[1])\n', (14096, 14108), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections.abc import Iterable
import distillation.architectures.feature_extractors.utils as futils
import distillation.architectures.feature_extractors.VGG_ImageNet as VGG_ImageNet
import distillation.architectures.feature_extractors.mobilenetv2 as mobilenet
class ResNetClassifier(nn.Module):
def __init__(self, model):
super(ResNetClassifier, self).__init__()
self.avgpool = model.avgpool
self.reshape = model.reshape
self.fc = model.fc
def forward(self, x):
return self.fc(self.reshape(self.avgpool(x)))
class ResNetFeatureExtractor(futils.SequentialFeatureExtractorAbstractClass):
def __init__(self, model):
feature_blocks = [
model.conv1,
model.bn1,
model.relu,
model.maxpool,
model.layer1,
model.layer2,
model.layer3,
model.layer4,
]
all_feat_names = [
'conv1',
'bn1',
'relu',
'maxpool',
'layer1',
'layer2',
'layer3',
'layer4',
]
super(ResNetFeatureExtractor, self).__init__(
all_feat_names, feature_blocks)
class VGG9Classifier(nn.Module):
def __init__(self, model):
super(VGG9Classifier, self).__init__()
self.avgpool = model._modules['classifier'][0]
self.reshape = model._modules['classifier'][1]
self.fc = model._modules['classifier'][2]
def forward(self, x):
return self.fc(self.reshape(self.avgpool(x)))
class VGG9FeatureExtractor(futils.SequentialFeatureExtractorAbstractClass):
def __init__(self, model, extract_from):
ext_frm = sorted([int(i) for i in extract_from]) # to make sure the order is right
n_splits = len(ext_frm)
all_feat_names = ['layer' + str(4 - i) for i in range(len(ext_frm))]
all_feat_names.reverse()
if list(model._modules['features']._modules.keys())[-1] not in extract_from:
n_splits+=1
all_feat_names.append('layer_x')
feature_blocks = [[] for i in range(n_splits)]
j=0
for i, m in enumerate(model._modules['features']):
feature_blocks[j].append(m)
if j<len(ext_frm) and i==ext_frm[j]:
j+=1
feature_blocks_for_RN = [nn.Sequential(*b) for b in feature_blocks]
super(VGG9FeatureExtractor, self).__init__(
all_feat_names, feature_blocks_for_RN)
def split_network( model, extract_from_layer, after_relu):
"""
This function splits the network in Feature extractor and classifier networks.
extract_from_layer: list of layer names from which features are to be extracted.
after_relu: list of booleans, True means use relu after the extraction layer specified in extract_from_layer list.
"""
if len(extract_from_layer) == 1:
feat_ext = []
clf = []
key_names = list(model._modules.keys())
name = extract_from_layer[0]
extract_from = key_names.index(name)
for i in range(extract_from+1): # Feature extractor
feat_ext.append(model._modules[key_names[i]])
i = extract_from+1
if after_relu[0]: # Include till next ReLU
if key_names[i].startswith('bn'):
feat_ext.append(model._modules[key_names[i]]) # BatchNorm
i +=1
feat_ext.append(model._modules[key_names[i]]) # ReLU
i +=1
for key in key_names[i:]: # Classifier
clf.append(model._modules[key])
if isinstance(model, mobilenet.MobileNetV2): # MobileNet2 for CIFAR
fext = nn.Sequential(*feat_ext)
feat_ext = []
for k in fext._modules.keys():
if isinstance(fext._modules[k], Iterable):
feat_ext.extend(list(fext._modules[k]))
else:
feat_ext.append(fext._modules[k])
return nn.Sequential(*feat_ext), nn.Sequential(*clf)
else: # For multiple extraction layers, implemented split only for VGG9 and ResNet34.
if isinstance(model, VGG_ImageNet.VGG): # VGG9. As in VGG9 network the layers are named by digits.
feature_ext = VGG9FeatureExtractor(model, extract_from_layer)
classifier = VGG9Classifier(model)
return feature_ext, classifier
else: # ResNet34
assert all([(x == False) for x in after_relu])
feature_ext = ResNetFeatureExtractor(model)
classifier = ResNetClassifier(model)
return feature_ext, classifier
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
def global_pooling(x, pool_type):
assert(x.dim() == 4)
if pool_type == 'max':
return F.max_pool2d(x, (x.size(2), x.size(3)))
elif pool_type == 'avg':
return F.avg_pool2d(x, (x.size(2), x.size(3)))
else:
raise ValueError('Unknown pooling type.')
class GlobalPooling(nn.Module):
def __init__(self, pool_type):
super(GlobalPooling, self).__init__()
assert(pool_type == 'avg' or pool_type == 'max')
self.pool_type = pool_type
def forward(self, x):
return global_pooling(x, pool_type=self.pool_type)
class Conv2dCos(nn.Module):
def __init__(self, in_planes, out_planes, bias=False, scale=None, learn_scale=True):
super(Conv2dCos, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
weight = torch.FloatTensor(out_planes, in_planes, 1, 1).normal_(
0.0, np.sqrt(2.0/in_planes))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(out_planes).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
if scale:
scale = torch.FloatTensor(1).fill_(scale)
self.scale = nn.Parameter(scale, requires_grad=learn_scale)
else:
self.scale = None
def forward(self, x):
weight = self.weight
x = F.normalize(x, p=2, dim=1, eps=1e-12)
weight = F.normalize(weight, p=2, dim=1, eps=1e-12)
if self.scale is not None:
weight = weight * self.scale.view(-1, 1, 1, 1)
return F.conv2d(x, weight, bias=self.bias, stride=1, padding=0)
| [
"torch.nn.Parameter",
"torch.nn.Sequential",
"torch.nn.functional.conv2d",
"torch.FloatTensor",
"torch.nn.functional.normalize",
"numpy.sqrt"
] | [((5872, 5912), 'torch.nn.Parameter', 'nn.Parameter', (['weight'], {'requires_grad': '(True)'}), '(weight, requires_grad=True)\n', (5884, 5912), True, 'import torch.nn as nn\n'), ((6353, 6390), 'torch.nn.functional.normalize', 'F.normalize', (['x'], {'p': '(2)', 'dim': '(1)', 'eps': '(1e-12)'}), '(x, p=2, dim=1, eps=1e-12)\n', (6364, 6390), True, 'import torch.nn.functional as F\n'), ((6408, 6450), 'torch.nn.functional.normalize', 'F.normalize', (['weight'], {'p': '(2)', 'dim': '(1)', 'eps': '(1e-12)'}), '(weight, p=2, dim=1, eps=1e-12)\n', (6419, 6450), True, 'import torch.nn.functional as F\n'), ((6560, 6616), 'torch.nn.functional.conv2d', 'F.conv2d', (['x', 'weight'], {'bias': 'self.bias', 'stride': '(1)', 'padding': '(0)'}), '(x, weight, bias=self.bias, stride=1, padding=0)\n', (6568, 6616), True, 'import torch.nn.functional as F\n'), ((2442, 2459), 'torch.nn.Sequential', 'nn.Sequential', (['*b'], {}), '(*b)\n', (2455, 2459), True, 'import torch.nn as nn\n'), ((3789, 3813), 'torch.nn.Sequential', 'nn.Sequential', (['*feat_ext'], {}), '(*feat_ext)\n', (3802, 3813), True, 'import torch.nn as nn\n'), ((4093, 4117), 'torch.nn.Sequential', 'nn.Sequential', (['*feat_ext'], {}), '(*feat_ext)\n', (4106, 4117), True, 'import torch.nn as nn\n'), ((4119, 4138), 'torch.nn.Sequential', 'nn.Sequential', (['*clf'], {}), '(*clf)\n', (4132, 4138), True, 'import torch.nn as nn\n'), ((5826, 5850), 'numpy.sqrt', 'np.sqrt', (['(2.0 / in_planes)'], {}), '(2.0 / in_planes)\n', (5833, 5850), True, 'import numpy as np\n'), ((6015, 6053), 'torch.nn.Parameter', 'nn.Parameter', (['bias'], {'requires_grad': '(True)'}), '(bias, requires_grad=True)\n', (6027, 6053), True, 'import torch.nn as nn\n'), ((6194, 6240), 'torch.nn.Parameter', 'nn.Parameter', (['scale'], {'requires_grad': 'learn_scale'}), '(scale, requires_grad=learn_scale)\n', (6206, 6240), True, 'import torch.nn as nn\n'), ((5753, 5799), 'torch.FloatTensor', 'torch.FloatTensor', (['out_planes', 'in_planes', '(1)', '(1)'], {}), '(out_planes, in_planes, 1, 1)\n', (5770, 5799), False, 'import torch\n'), ((5950, 5979), 'torch.FloatTensor', 'torch.FloatTensor', (['out_planes'], {}), '(out_planes)\n', (5967, 5979), False, 'import torch\n'), ((6135, 6155), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (6152, 6155), False, 'import torch\n')] |
__author__ = 'joseph'
import numpy as np
class ActivationFunction(object):
def activation(self, x):
pass
def derivative(self, x):
pass
class LinearActivation(ActivationFunction):
def activation(self, x):
return x
def derivative(self, x):
return np.ones(x.shape)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sig_deriv(x):
s = sigmoid(x)
return s * (1 - s)
class SigmoidActivation(ActivationFunction):
def activation(self, x):
return sigmoid(x)
def derivative(self, x):
return sig_deriv(x)
def relu(x):
return x * (x > 0.0)
def relu_deriv(x):
return x > 0.0
class ReluActivation(ActivationFunction):
def activation(self, x):
return relu(x)
def derivative(self, x):
return relu_deriv(x)
def leaky_relu(x):
return (x * (x > 0.0)) + (0.01 * x * (x <= 0.0))
def leaky_relu_deriv(x):
return (0.99 * (x > 0.0)) + 0.01
class LeakyReluActivation(ActivationFunction):
def activation(self, x):
return leaky_relu(x)
def derivative(self, x):
return leaky_relu_deriv(x)
def tanh_deriv(x):
tanh_squared = np.tanh(x) ** 2.0
return 1.0 - tanh_squared
class TanhActivation(ActivationFunction):
def activation(self, x):
return np.tanh(x)
def derivative(self, x):
return tanh_deriv(x)
| [
"numpy.tanh",
"numpy.exp",
"numpy.ones"
] | [((300, 316), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (307, 316), True, 'import numpy as np\n'), ((1174, 1184), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (1181, 1184), True, 'import numpy as np\n'), ((1310, 1320), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (1317, 1320), True, 'import numpy as np\n'), ((355, 365), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (361, 365), True, 'import numpy as np\n')] |
from pycolate.grid_engine import grid
import numpy as np
class MeanFieldSandpile:
def __init__(
self,
initconfig: list,
theshold: int,
dissipation_amount: int,
graphics: bool = True,
rulebook: dict = None,
):
self._theshold = theshold
self._dissipation_amount = dissipation_amount
self._rulebook = {}
self.__dedug_messages = True
self.avalanche_sizes = []
if any([i > theshold for i in initconfig]):
raise Exception("The initial configuration is not stable.")
else:
self._config = initconfig
self.__to_tople = False
self.__want_graphics = graphics
if self.__want_graphics:
if rulebook == None:
for height in range(0, theshold):
self._rulebook[height] = "white"
self._rulebook[theshold - 1] = "yellow"
self._rulebook[theshold] = "orange"
self._rulebook[theshold + 1] = "red"
else:
self._rulebook = rulebook
self.graphics = grid(self._config, self._rulebook, 30)
self._frame_list = []
self.__debug("Inital config:")
def _drive_pile(self, ignore_list: list = [], number_to_drive: int = 1):
rng = np.random.default_rng()
site_to_drive = rng.integers(0, len(self._config) - 1)
while site_to_drive in ignore_list:
site_to_drive = rng.integers(0, len(self._config) - 1)
self._config[site_to_drive] += 1
self.__snapshot()
def _relax_pile(self):
to_topple = [i > self._theshold for i in self._config]
if any(to_topple):
site_to_topple = np.where(to_topple)[0][0]
self._config[site_to_topple] -= self._dissipation_amount
self._drive_pile(ignore_list=[site_to_topple])
self.__to_tople = True
else:
self.__to_tople = False
def cycle(self, drives: int):
self.__snapshot()
for _ in range(drives):
current_avalanche_size = 0
self._drive_pile()
self._relax_pile()
while self.__to_tople:
current_avalanche_size += 1
self._relax_pile()
self.avalanche_sizes.append(current_avalanche_size)
def make_gif(self, path: str, file_name: str):
self._frame_list[0].save(
f"{path}/{file_name}.gif",
format="GIF",
append_images=self._frame_list[1:],
save_all=True,
duration=1000,
loop=0,
)
def make_frames(self, path: str):
x = 0
for image in self._frame_list:
image.save(f"{path}/{x}.png")
x += 1
def __snapshot(self):
if self.__want_graphics:
self.graphics.draw_config(self._config)
self._frame_list.append(self.graphics.image.copy())
def __debug(self, debug_message):
if self.__dedug_messages:
print(f"{debug_message}: {self._config}")
if __name__ == "__main__":
m = MeanFieldSandpile([0, 0, 0, 0], 1, 1)
m.cycle(3)
m.make_frames(
"/Users/mac/Library/Mobile Documents/com~apple~CloudDocs/Projects/pycolate/pycolate/pycolate/image_test"
)
| [
"numpy.random.default_rng",
"numpy.where",
"pycolate.grid_engine.grid"
] | [((1340, 1363), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (1361, 1363), True, 'import numpy as np\n'), ((1134, 1172), 'pycolate.grid_engine.grid', 'grid', (['self._config', 'self._rulebook', '(30)'], {}), '(self._config, self._rulebook, 30)\n', (1138, 1172), False, 'from pycolate.grid_engine import grid\n'), ((1753, 1772), 'numpy.where', 'np.where', (['to_topple'], {}), '(to_topple)\n', (1761, 1772), True, 'import numpy as np\n')] |
from statsmodels.tsa.arima_model import AR
from pandas import read_csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
def main():
filepath=os.path.abspath(os.curdir)
series = read_csv(filepath+"/2.csv", header=0, index_col=0, squeeze=True)
series.columns = ['a', 'b', 'c', 'd']
series['e'] = pow((series['a']*series['a'] + series['b']*series['b'] + series['c']*series['c']), 0.5)
df = pd.DataFrame({'$a': series['e']})
#print(df)
fd = np.array(df)
plt.plot(fd,'--*b')
model = AR(fd)
results = model.fit()
r = results.predict()
plt.plot(r,'--*r')
plt.legend(['original','smoothed'], loc=0)
plt.grid(True)
plt.xlabel("Indices",fontsize=12)
plt.ylabel("Data",fontsize=12)
plt.show()
if __name__ == '__main__':
main() | [
"pandas.DataFrame",
"os.path.abspath",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"statsmodels.tsa.arima_model.AR",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((175, 201), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (190, 201), False, 'import os\n'), ((213, 279), 'pandas.read_csv', 'read_csv', (["(filepath + '/2.csv')"], {'header': '(0)', 'index_col': '(0)', 'squeeze': '(True)'}), "(filepath + '/2.csv', header=0, index_col=0, squeeze=True)\n", (221, 279), False, 'from pandas import read_csv\n'), ((427, 460), 'pandas.DataFrame', 'pd.DataFrame', (["{'$a': series['e']}"], {}), "({'$a': series['e']})\n", (439, 460), True, 'import pandas as pd\n'), ((480, 492), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (488, 492), True, 'import numpy as np\n'), ((494, 514), 'matplotlib.pyplot.plot', 'plt.plot', (['fd', '"""--*b"""'], {}), "(fd, '--*b')\n", (502, 514), True, 'import matplotlib.pyplot as plt\n'), ((526, 532), 'statsmodels.tsa.arima_model.AR', 'AR', (['fd'], {}), '(fd)\n', (528, 532), False, 'from statsmodels.tsa.arima_model import AR\n'), ((580, 599), 'matplotlib.pyplot.plot', 'plt.plot', (['r', '"""--*r"""'], {}), "(r, '--*r')\n", (588, 599), True, 'import matplotlib.pyplot as plt\n'), ((600, 643), 'matplotlib.pyplot.legend', 'plt.legend', (["['original', 'smoothed']"], {'loc': '(0)'}), "(['original', 'smoothed'], loc=0)\n", (610, 643), True, 'import matplotlib.pyplot as plt\n'), ((644, 658), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (652, 658), True, 'import matplotlib.pyplot as plt\n'), ((660, 694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Indices"""'], {'fontsize': '(12)'}), "('Indices', fontsize=12)\n", (670, 694), True, 'import matplotlib.pyplot as plt\n'), ((695, 726), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Data"""'], {'fontsize': '(12)'}), "('Data', fontsize=12)\n", (705, 726), True, 'import matplotlib.pyplot as plt\n'), ((727, 737), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (735, 737), True, 'import matplotlib.pyplot as plt\n')] |
import json
import os
import pickle
from pathlib import Path
from typing import List, NamedTuple, Tuple
import numpy as np
import pandas as pd
import PIL.Image
import pycocotools.mask as coco_mask
from src.config.config import NUM_CLASSES
def use_sc_cam_format(data: dict, with_pillow=False):
"""
convert input data into SC-CAM format
"""
name = data["input_id"]
img = data["image"]
if with_pillow:
img = PIL.Image.fromarray(img.numpy(), mode="RGB")
label = data["target"]
return name, img, label
def get_onehot_multilabel(df: pd.DataFrame) -> Tuple[pd.DataFrame, List[np.ndarray]]:
""""""
onehot: List[np.ndarray] = []
for y in df["Label"]:
y = y.split("|")
y = list(map(int, y))
y = np.eye(NUM_CLASSES, dtype="float")[y]
y = y.sum(axis=0).astype(np.int32)
# for discussion/217806
y = np.clip(y, 0, 1)
onehot.append(y)
df["onehot"] = onehot
return df, onehot
def get_sub_label_save_path(
save_folder: str, for_round_nb: int = 1
) -> Tuple[str, str, str]:
id_list_path = "{}/label/R{}_train_filename_list.json".format(
save_folder, for_round_nb
)
label_200_path = "{}/label/R{}_train_label_200.npy".format(
save_folder, for_round_nb
)
label_20_path = "{}/label/R{}_train_label_20.npy".format(save_folder, for_round_nb)
return id_list_path, label_20_path, label_200_path
def save_sub_labels(
train_filename_list: List[str],
train_label_20: np.ndarray,
train_label_200: np.ndarray,
save_folder: str,
for_round_nb: int = 1,
) -> None:
id_list_path, label_20_path, label_200_path = get_sub_label_save_path(
save_folder, for_round_nb
)
with open(id_list_path, "w") as f:
json.dump(train_filename_list, f)
np.save(label_200_path, train_label_200)
np.save(label_20_path, train_label_20)
def load_sub_labels(
save_folder: str, round_nb: int = 1
) -> Tuple[List[str], np.ndarray, np.ndarray]:
id_list_path, label_20_path, label_200_path = get_sub_label_save_path(
save_folder, round_nb
)
with open(id_list_path, "r") as f:
train_filename_list = json.load(f)
train_label_200 = np.load(label_200_path)
train_label_20 = np.load(label_20_path)
return train_filename_list, train_label_20, train_label_200
def save_segm_label(
samples: dict,
error_samples: list,
save_folder: str,
save_name: str = "segm_full.pkl",
) -> None:
with open(os.path.join(save_folder, save_name), "wb") as f:
pickle.dump(samples, f)
with open(
os.path.join(save_folder, save_name.replace(".pkl", "_error.pkl")), "wb"
) as f:
pickle.dump(error_samples, f)
class ErrorSample(NamedTuple):
"""error sample"""
ID: str
csv_idx: int = -1
def load_segm_label(
save_folder: str, save_name: str = "segm_full.pkl"
) -> Tuple[dict, list]:
with open(os.path.join(save_folder, save_name), "rb") as f:
samples = pickle.load(f)
try:
with open(
os.path.join(save_folder, save_name.replace(".pkl", "_error.pkl")), "rb"
) as f:
error_samples = pickle.load(f)
except FileNotFoundError as e:
print(f"load hard code error sample for temporay work around {e}")
error_samples = [ErrorSample("940f418a-bba4-11e8-b2b9-ac1f6b6435d0")]
return samples, error_samples
def get_class_mask_from_ins(
class_ids: np.ndarray,
confs: np.ndarray,
rles: list,
mask_idxs: List[int],
conf_thresh: float = 0.1,
num_classes: int = 19,
is_add_true_bkg: bool = True,
nega_class=18,
) -> Tuple[np.ndarray, List[int]]:
if is_add_true_bkg:
num_classes += 1
if len(rles) == 0:
return np.empty(0)
else:
class_masks = np.zeros(
rles[0]["size"] + [num_classes], dtype=np.uint8, order="F"
)
labeled_idxs = []
labeled_classes = []
for class_id in set(class_ids):
ins_idx = np.where((class_ids == class_id) & (confs >= conf_thresh))[0]
labeled_idxs.extend(ins_idx.tolist())
class_mask = [rles[i] for i in ins_idx]
if len(class_mask) > 0:
class_masks[..., class_id] = coco_mask.decode(coco_mask.merge(class_mask))
labeled_classes.append(class_id)
# plt.imshow(coco_mask.decode(coco_mask.merge(rles)))
# plt.imshow(class_masks.sum(axis=-1))
labeled_idxs = set([mask_idxs[i] for i in set(labeled_idxs)])
un_labeled_idxs = set(mask_idxs) - set(labeled_idxs)
if len(un_labeled_idxs) > 0:
ins_idx = [mask_idxs.index(i) for i in un_labeled_idxs]
class_mask = [rles[i] for i in ins_idx]
class_masks[..., nega_class] += coco_mask.decode(coco_mask.merge(class_mask))
class_masks = np.clip(class_masks, 0, 1)
labeled_classes.append(nega_class)
# class_masks = np.ascontiguousarray(class_masks)
if is_add_true_bkg:
class_masks[..., -1] = np.where(
class_masks[..., labeled_classes + [nega_class]].sum(axis=-1) == 0, 1, 0
)
labeled_classes.append(nega_class + 1)
return class_masks, list(set(labeled_classes))
def check_sub_label_def(
train_label_20: np.ndarray, train_label_200: np.ndarray, k_center: int = 10
) -> None:
cls_200 = np.where(train_label_200 == 1)[0]
cls_20 = np.where(train_label_20 == 1)[0]
cls200_parent = cls_200 // k_center
assert np.all(cls_20 == cls200_parent)
def get_img_path(data_dir: Path, img_id: str, with_ext_data=False) -> Path:
if with_ext_data:
raise NotImplementedError
folder_name = "train"
img_path = Path(data_dir, folder_name, img_id)
return img_path
| [
"json.dump",
"numpy.load",
"numpy.save",
"json.load",
"pickle.dump",
"pycocotools.mask.merge",
"numpy.empty",
"numpy.zeros",
"numpy.clip",
"pathlib.Path",
"pickle.load",
"numpy.where",
"numpy.eye",
"os.path.join",
"numpy.all"
] | [((1823, 1863), 'numpy.save', 'np.save', (['label_200_path', 'train_label_200'], {}), '(label_200_path, train_label_200)\n', (1830, 1863), True, 'import numpy as np\n'), ((1868, 1906), 'numpy.save', 'np.save', (['label_20_path', 'train_label_20'], {}), '(label_20_path, train_label_20)\n', (1875, 1906), True, 'import numpy as np\n'), ((2233, 2256), 'numpy.load', 'np.load', (['label_200_path'], {}), '(label_200_path)\n', (2240, 2256), True, 'import numpy as np\n'), ((2278, 2300), 'numpy.load', 'np.load', (['label_20_path'], {}), '(label_20_path)\n', (2285, 2300), True, 'import numpy as np\n'), ((5469, 5500), 'numpy.all', 'np.all', (['(cls_20 == cls200_parent)'], {}), '(cls_20 == cls200_parent)\n', (5475, 5500), True, 'import numpy as np\n'), ((5676, 5711), 'pathlib.Path', 'Path', (['data_dir', 'folder_name', 'img_id'], {}), '(data_dir, folder_name, img_id)\n', (5680, 5711), False, 'from pathlib import Path\n'), ((892, 908), 'numpy.clip', 'np.clip', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (899, 908), True, 'import numpy as np\n'), ((1785, 1818), 'json.dump', 'json.dump', (['train_filename_list', 'f'], {}), '(train_filename_list, f)\n', (1794, 1818), False, 'import json\n'), ((2197, 2209), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2206, 2209), False, 'import json\n'), ((2576, 2599), 'pickle.dump', 'pickle.dump', (['samples', 'f'], {}), '(samples, f)\n', (2587, 2599), False, 'import pickle\n'), ((2716, 2745), 'pickle.dump', 'pickle.dump', (['error_samples', 'f'], {}), '(error_samples, f)\n', (2727, 2745), False, 'import pickle\n'), ((3021, 3035), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3032, 3035), False, 'import pickle\n'), ((3787, 3798), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (3795, 3798), True, 'import numpy as np\n'), ((3831, 3899), 'numpy.zeros', 'np.zeros', (["(rles[0]['size'] + [num_classes])"], {'dtype': 'np.uint8', 'order': '"""F"""'}), "(rles[0]['size'] + [num_classes], dtype=np.uint8, order='F')\n", (3839, 3899), True, 'import numpy as np\n'), ((4823, 4849), 'numpy.clip', 'np.clip', (['class_masks', '(0)', '(1)'], {}), '(class_masks, 0, 1)\n', (4830, 4849), True, 'import numpy as np\n'), ((5338, 5368), 'numpy.where', 'np.where', (['(train_label_200 == 1)'], {}), '(train_label_200 == 1)\n', (5346, 5368), True, 'import numpy as np\n'), ((5385, 5414), 'numpy.where', 'np.where', (['(train_label_20 == 1)'], {}), '(train_label_20 == 1)\n', (5393, 5414), True, 'import numpy as np\n'), ((767, 801), 'numpy.eye', 'np.eye', (['NUM_CLASSES'], {'dtype': '"""float"""'}), "(NUM_CLASSES, dtype='float')\n", (773, 801), True, 'import numpy as np\n'), ((2518, 2554), 'os.path.join', 'os.path.join', (['save_folder', 'save_name'], {}), '(save_folder, save_name)\n', (2530, 2554), False, 'import os\n'), ((2953, 2989), 'os.path.join', 'os.path.join', (['save_folder', 'save_name'], {}), '(save_folder, save_name)\n', (2965, 2989), False, 'import os\n'), ((3193, 3207), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3204, 3207), False, 'import pickle\n'), ((4024, 4082), 'numpy.where', 'np.where', (['((class_ids == class_id) & (confs >= conf_thresh))'], {}), '((class_ids == class_id) & (confs >= conf_thresh))\n', (4032, 4082), True, 'import numpy as np\n'), ((4772, 4799), 'pycocotools.mask.merge', 'coco_mask.merge', (['class_mask'], {}), '(class_mask)\n', (4787, 4799), True, 'import pycocotools.mask as coco_mask\n'), ((4270, 4297), 'pycocotools.mask.merge', 'coco_mask.merge', (['class_mask'], {}), '(class_mask)\n', (4285, 4297), True, 'import pycocotools.mask as coco_mask\n')] |
from ...isa.inst import *
import numpy as np
class Fcvt_wu_s(Inst):
name = 'fcvt.wu.s'
def golden(self):
if 'val1' in self.keys():
if self['val1'] < 0 or np.isneginf(self['val1']):
return 0
if self['val1'] > ((1<<32)-1) or np.isposinf(self['val1']) or np.isnan(self['val1']):
return (1<<32)-1
return int(self['val1'])
| [
"numpy.isneginf",
"numpy.isposinf",
"numpy.isnan"
] | [((184, 209), 'numpy.isneginf', 'np.isneginf', (["self['val1']"], {}), "(self['val1'])\n", (195, 209), True, 'import numpy as np\n'), ((281, 306), 'numpy.isposinf', 'np.isposinf', (["self['val1']"], {}), "(self['val1'])\n", (292, 306), True, 'import numpy as np\n'), ((310, 332), 'numpy.isnan', 'np.isnan', (["self['val1']"], {}), "(self['val1'])\n", (318, 332), True, 'import numpy as np\n')] |
# Author: <NAME>
#
# The purpose of this program is to, given a file name containing
# data collected from the instruments attached to the bubble chamber,
# create a data structure capable of storing the instruments and
# their readouts. The information for any given instrument should
# then be easily accessible and printable. Additional functionality
# for plotting any given instrument readout versus time is also
# included.
# import pdb
import numpy as np
# import matplotlib.pyplot as plt
# import SBCcode as sbc
np.set_printoptions(threshold=np.nan)
def DataTrim(dictionary, instrument):
'''
Given a dictionary constaining instrument data, it uses
TriggerLatch to trim away unwanted data points where the
trigger is not latched
'''
tick = 0
instrument = str(instrument)
for i in range(len(dictionary['TriggerLatch'])):
if dictionary['TriggerLatch'][i] == 0:
tick = tick + 1
trim_PT = np.zeros(tick)
trim_time = np.zeros(tick)
track = 0
if instrument in dictionary:
for value in range(len(dictionary[instrument])):
if dictionary['TriggerLatch'][value] == 0:
trim_PT[track] = dictionary[instrument][value]
trim_time[track] = dictionary['elapsed_time'][value]
track = track + 1
return trim_PT
# DataTrim(d,'PT4')
def TrimAll(dictionary):
d = {}
for i in range(1, 10):
index = str(i)
d.update({'trimPT' + str(i): DataTrim(dictionary, 'PT' + index)})
return d
def ShowIndex(dictionary):
for key in dictionary:
print(key + str(dictionary[key].shape) + str(dictionary[key].dtype))
def Pbin(dictionary, instrument, edge):
'''
sort pressures into bins of 1 psi
'''
if instrument in dictionary:
Bin = np.histogram(dictionary[instrument],
bins=edge, range=(edge[0], edge[-1]))
else:
Bin = np.histogram(np.float64([0]) + np.nan,
bins=edge, range=(edge[0], edge[-1]))
BinTime = np.zeros(len(Bin[0]))
for i in range(len(Bin[0])):
x = Bin[0][i] * 0.005
BinTime[i] = x
dictionary.update({'Bin' + instrument: Bin[1],
'Count' + instrument: Bin[0],
'BinTime' + instrument: BinTime})
'''
print Bin
plt.hist(dictionary[instrument], bins=step)
plt.show()
'''
return
def BinAll(dictionary, edge):
for i in range(1, 10):
index = str(i)
Pbin(dictionary, 'trimPT' + index, edge)
return
def tGood(dictionary, instrument='PT4'):
Pgood = np.float64(0)
tg = np.float64(0)
for i in range(1, len(dictionary[instrument])):
if abs(dictionary[instrument][i] -
dictionary['PressureSetpoint'][i]) <= 0.3:
Pgood = Pgood + 1
tg = tg + ((dictionary['elapsed_time'][i] -
dictionary['elapsed_time'][i - 1]))
return tg
def tEvent(dictionary):
Tevent = np.float64(0)
dt = np.diff(dictionary['elapsed_time'][0:2])
Tevent = np.sum(dictionary['TriggerLatch'] == 0) * dt
latch_lowtohigh = np.nonzero(np.diff(dictionary['TriggerLatch']) == 1)
time_of_compression = dictionary['elapsed_time'][latch_lowtohigh[-1]]
return Tevent
def Pevent(dictionary, instrument):
latch_lowtohigh = np.nonzero(np.diff(dictionary['TriggerLatch']) == 1)[0]
pressure_of_compression = np.float64(0) + np.nan
if instrument in dictionary and \
latch_lowtohigh.shape[0] > 0 and \
latch_lowtohigh[-1] >= 40:
pressure_of_compression = \
dictionary[instrument][latch_lowtohigh[-1] - 40]
return pressure_of_compression
def PumpActiveTime(dictionary):
tPumpPre = np.float64(0)
tPumpPost = np.float64(0)
if 'PUMP' in dictionary:
for i in range(1, int(len(dictionary['PUMP']) / 2)):
if dictionary['PUMP'][i] == 1:
tPumpPre = tPumpPre + ((dictionary['elapsed_time'][i] -
dictionary['elapsed_time'][i - 1]))
for i in range(int(len(dictionary['PUMP']) / 2), len(dictionary['PUMP'])):
if dictionary['PUMP'][i] == 1:
tPumpPost = tPumpPost + ((dictionary['elapsed_time'][i] -
dictionary['elapsed_time'][i - 1]))
tPump = np.array([tPumpPre, tPumpPost], dtype=np.float64)
return tPump
def PumpActiveCycle(dictionary):
CycleCountPre = np.int32(0)
CycleCountPost = np.int32(0)
if 'PUMPcycles' in dictionary:
for i in range(1, len(dictionary['PUMPcycles']) / 2):
dC = dictionary['PUMPcycles'][i] - dictionary['PUMPcycles'][i - 1]
if dC > 0:
CycleCountPre = CycleCountPre + dC
for i in range(len(dictionary['PUMPcycles']) / 2,
len(dictionary['PUMPcycles'])):
dC = dictionary['PUMPcycles'][i] - dictionary['PUMPcycles'][i - 1]
if dC > 0:
CycleCountPost = CycleCountPost + dC
CycleCount = np.array([CycleCountPre, CycleCountPost], dtype=np.int32)
return CycleCount
def atTime(dictionary, time, instrument):
index = 0
while time >= dictionary['elapsed_time'][index]:
index = index + 1
if time == dictionary['elapsed_time'][index - 1]:
return dictionary[instrument][index - 1]
else:
x = (dictionary[instrument][index] +
dictionary[instrument][index - 1]) / 2
print(index)
return x
def Tdata(dictionary, instrument):
Tmin = np.float64(0) + np.nan
Tmax = np.float64(0) + np.nan
Tavg = np.float64(0) + np.nan
if instrument in dictionary:
Tmin = min(dictionary[instrument])
Tmax = max(dictionary[instrument])
Tavg = np.mean(dictionary[instrument])
return (Tmin, Tavg, Tmax)
def main(dictionary, edge=np.cumsum(np.ones(88)) - 1, targetPT='PT4'):
## Issues: The 88 above should be dynamically selected to match what len(temp["BinTimetrimPT1"]) evaluated to
## below. This will work *for now*...
default_output = {'PumpActiveCycle': np.zeros(2, dtype=np.int32),
'PumpActiveTime': np.zeros(2, dtype=np.float64),
'TempData': np.zeros((8, 3), dtype=np.float64),
'tEvent': np.zeros(1, dtype=np.float64),
'tGood': np.zeros(1, dtype=np.float64),
'PressureBins': np.zeros((9, len(edge) - 1), dtype=np.float64),
'PressureEdge': edge,
'EventPressure':np.zeros(9, dtype=np.float64)}
try:
temp = TrimAll(dictionary['slowDAQ'])
BinAll(temp, edge)
TempData = np.ndarray(shape=(8, 3), dtype=float, order='C')
for i in range(1, 9):
TempData[i - 1] = Tdata(dictionary['slowDAQ'], 'T' + str(i))
PBins = np.ndarray(shape=(9, len(temp['BinTimetrimPT1'])),
dtype=float, order='C')
for i in range(1, 10):
PBins[i - 1] = temp['BinTimetrimPT' + str(i)]
PressData = np.zeros(9)
for i in range(1, 10):
PressData[i - 1] = Pevent(dictionary['slowDAQ'], 'PT' + str(i))
PAC = PumpActiveCycle(dictionary['slowDAQ'])
PAT = PumpActiveTime(dictionary['slowDAQ'])
EventTime = tEvent(dictionary['slowDAQ'])
GoodTime = tGood(dictionary['slowDAQ'], targetPT)
DataTrim = {'PumpActiveCycle': PAC,
'PumpActiveTime': PAT,
'TempData': TempData,
'tEvent': EventTime,
'tGood': GoodTime,
'PressureBins': PBins,
'PressureEdge': temp['Bintrim'+targetPT],
'EventPressure': PressData}
# print(ShowIndex(DataTrim))
# print(DataTrim['PressureBins'])
return DataTrim
except:
return default_output
| [
"numpy.set_printoptions",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.histogram",
"numpy.diff",
"numpy.array",
"numpy.mean",
"numpy.int32",
"numpy.float64",
"numpy.ndarray"
] | [((539, 576), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (558, 576), True, 'import numpy as np\n'), ((985, 999), 'numpy.zeros', 'np.zeros', (['tick'], {}), '(tick)\n', (993, 999), True, 'import numpy as np\n'), ((1017, 1031), 'numpy.zeros', 'np.zeros', (['tick'], {}), '(tick)\n', (1025, 1031), True, 'import numpy as np\n'), ((2737, 2750), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (2747, 2750), True, 'import numpy as np\n'), ((2761, 2774), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (2771, 2774), True, 'import numpy as np\n'), ((3140, 3153), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3150, 3153), True, 'import numpy as np\n'), ((3164, 3204), 'numpy.diff', 'np.diff', (["dictionary['elapsed_time'][0:2]"], {}), "(dictionary['elapsed_time'][0:2])\n", (3171, 3204), True, 'import numpy as np\n'), ((3929, 3942), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3939, 3942), True, 'import numpy as np\n'), ((3960, 3973), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3970, 3973), True, 'import numpy as np\n'), ((4555, 4604), 'numpy.array', 'np.array', (['[tPumpPre, tPumpPost]'], {'dtype': 'np.float64'}), '([tPumpPre, tPumpPost], dtype=np.float64)\n', (4563, 4604), True, 'import numpy as np\n'), ((4682, 4693), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (4690, 4693), True, 'import numpy as np\n'), ((4716, 4727), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (4724, 4727), True, 'import numpy as np\n'), ((5274, 5331), 'numpy.array', 'np.array', (['[CycleCountPre, CycleCountPost]'], {'dtype': 'np.int32'}), '([CycleCountPre, CycleCountPost], dtype=np.int32)\n', (5282, 5331), True, 'import numpy as np\n'), ((1883, 1957), 'numpy.histogram', 'np.histogram', (['dictionary[instrument]'], {'bins': 'edge', 'range': '(edge[0], edge[-1])'}), '(dictionary[instrument], bins=edge, range=(edge[0], edge[-1]))\n', (1895, 1957), True, 'import numpy as np\n'), ((3219, 3258), 'numpy.sum', 'np.sum', (["(dictionary['TriggerLatch'] == 0)"], {}), "(dictionary['TriggerLatch'] == 0)\n", (3225, 3258), True, 'import numpy as np\n'), ((3589, 3602), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (3599, 3602), True, 'import numpy as np\n'), ((5805, 5818), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (5815, 5818), True, 'import numpy as np\n'), ((5840, 5853), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (5850, 5853), True, 'import numpy as np\n'), ((5875, 5888), 'numpy.float64', 'np.float64', (['(0)'], {}), '(0)\n', (5885, 5888), True, 'import numpy as np\n'), ((6036, 6067), 'numpy.mean', 'np.mean', (['dictionary[instrument]'], {}), '(dictionary[instrument])\n', (6043, 6067), True, 'import numpy as np\n'), ((6385, 6412), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.int32'}), '(2, dtype=np.int32)\n', (6393, 6412), True, 'import numpy as np\n'), ((6454, 6483), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (6462, 6483), True, 'import numpy as np\n'), ((6519, 6553), 'numpy.zeros', 'np.zeros', (['(8, 3)'], {'dtype': 'np.float64'}), '((8, 3), dtype=np.float64)\n', (6527, 6553), True, 'import numpy as np\n'), ((6587, 6616), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float64'}), '(1, dtype=np.float64)\n', (6595, 6616), True, 'import numpy as np\n'), ((6649, 6678), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float64'}), '(1, dtype=np.float64)\n', (6657, 6678), True, 'import numpy as np\n'), ((6848, 6877), 'numpy.zeros', 'np.zeros', (['(9)'], {'dtype': 'np.float64'}), '(9, dtype=np.float64)\n', (6856, 6877), True, 'import numpy as np\n'), ((6984, 7032), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(8, 3)', 'dtype': 'float', 'order': '"""C"""'}), "(shape=(8, 3), dtype=float, order='C')\n", (6994, 7032), True, 'import numpy as np\n'), ((7370, 7381), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (7378, 7381), True, 'import numpy as np\n'), ((3300, 3335), 'numpy.diff', 'np.diff', (["dictionary['TriggerLatch']"], {}), "(dictionary['TriggerLatch'])\n", (3307, 3335), True, 'import numpy as np\n'), ((6140, 6151), 'numpy.ones', 'np.ones', (['(88)'], {}), '(88)\n', (6147, 6151), True, 'import numpy as np\n'), ((2025, 2040), 'numpy.float64', 'np.float64', (['[0]'], {}), '([0])\n', (2035, 2040), True, 'import numpy as np\n'), ((3513, 3548), 'numpy.diff', 'np.diff', (["dictionary['TriggerLatch']"], {}), "(dictionary['TriggerLatch'])\n", (3520, 3548), True, 'import numpy as np\n')] |
import random
import numpy as np
import os
import pytest
from ding.utils.plot_helper import plot
@pytest.mark.unittest
def test_plot():
rewards1 = np.array([0, 0.1, 0, 0.2, 0.4, 0.5, 0.6, 0.9, 0.9, 0.9])
rewards2 = np.array([0, 0, 0.1, 0.4, 0.5, 0.5, 0.55, 0.8, 0.9, 1])
rewards = np.concatenate((rewards1, rewards2)) # concatenation array
episode1 = range(len(rewards1))
episode2 = range(len(rewards2))
episode = np.concatenate((episode1, episode2))
data1 = {}
data1['x'] = episode
data1['y'] = rewards
data1['label'] = 'line1'
rewards3 = np.random.random(10)
rewards4 = np.random.random(10)
rewards = np.concatenate((rewards3, rewards4)) # concatenation array
episode3 = range(len(rewards1))
episode4 = range(len(rewards2))
episode = np.concatenate((episode3, episode4))
data2 = {}
data2['x'] = episode
data2['y'] = rewards
data2['label'] = 'line2'
data = [data1, data2]
plot(data, 'step', 'reward_rate', 'test_pic', './pic.jpg')
assert os.path.exists('./pic.jpg')
| [
"ding.utils.plot_helper.plot",
"os.path.exists",
"numpy.random.random",
"numpy.array",
"numpy.concatenate"
] | [((154, 210), 'numpy.array', 'np.array', (['[0, 0.1, 0, 0.2, 0.4, 0.5, 0.6, 0.9, 0.9, 0.9]'], {}), '([0, 0.1, 0, 0.2, 0.4, 0.5, 0.6, 0.9, 0.9, 0.9])\n', (162, 210), True, 'import numpy as np\n'), ((226, 281), 'numpy.array', 'np.array', (['[0, 0, 0.1, 0.4, 0.5, 0.5, 0.55, 0.8, 0.9, 1]'], {}), '([0, 0, 0.1, 0.4, 0.5, 0.5, 0.55, 0.8, 0.9, 1])\n', (234, 281), True, 'import numpy as np\n'), ((296, 332), 'numpy.concatenate', 'np.concatenate', (['(rewards1, rewards2)'], {}), '((rewards1, rewards2))\n', (310, 332), True, 'import numpy as np\n'), ((442, 478), 'numpy.concatenate', 'np.concatenate', (['(episode1, episode2)'], {}), '((episode1, episode2))\n', (456, 478), True, 'import numpy as np\n'), ((589, 609), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (605, 609), True, 'import numpy as np\n'), ((625, 645), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (641, 645), True, 'import numpy as np\n'), ((660, 696), 'numpy.concatenate', 'np.concatenate', (['(rewards3, rewards4)'], {}), '((rewards3, rewards4))\n', (674, 696), True, 'import numpy as np\n'), ((806, 842), 'numpy.concatenate', 'np.concatenate', (['(episode3, episode4)'], {}), '((episode3, episode4))\n', (820, 842), True, 'import numpy as np\n'), ((968, 1026), 'ding.utils.plot_helper.plot', 'plot', (['data', '"""step"""', '"""reward_rate"""', '"""test_pic"""', '"""./pic.jpg"""'], {}), "(data, 'step', 'reward_rate', 'test_pic', './pic.jpg')\n", (972, 1026), False, 'from ding.utils.plot_helper import plot\n'), ((1038, 1065), 'os.path.exists', 'os.path.exists', (['"""./pic.jpg"""'], {}), "('./pic.jpg')\n", (1052, 1065), False, 'import os\n')] |
from __future__ import absolute_import
from .. import BuiltinFunction, FixedNumericInput
import numpy as np
import functools
from six.moves import zip
@BuiltinFunction.register
def abs(halos, vals):
if not hasattr(vals[0], '__len__'): # Avoid norm failing if abs is called on a single number (issue 110)
return arithmetic_unary_op(vals, np.abs)
else:
return arithmetic_unary_op(vals, functools.partial(np.linalg.norm, axis=-1))
@BuiltinFunction.register
def sqrt(halos, vals):
return arithmetic_unary_op(vals, np.sqrt)
@BuiltinFunction.register
def log(halos, vals):
return arithmetic_unary_op(vals, np.log)
@BuiltinFunction.register
def log10(halos, vals):
return arithmetic_unary_op(vals, np.log10)
@BuiltinFunction.register
def subtract(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.subtract)
@BuiltinFunction.register
def add(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.add)
@BuiltinFunction.register
def divide(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.divide)
@BuiltinFunction.register
def multiply(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.multiply)
@BuiltinFunction.register
def greater(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.greater)
@BuiltinFunction.register
def less(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.less)
@BuiltinFunction.register
def equal(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.equal)
@BuiltinFunction.register
def greater_equal(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.greater_equal)
@BuiltinFunction.register
def less_equal(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.less_equal)
@BuiltinFunction.register
def logical_and(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.logical_and)
@BuiltinFunction.register
def logical_or(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.logical_or)
@BuiltinFunction.register
def logical_not(halos, vals):
return arithmetic_unary_op(vals, np.logical_not)
@BuiltinFunction.register
def power(halos, vals1, vals2):
return arithmetic_binary_op(vals1, vals2, np.power)
def arithmetic_binary_op(vals1, vals2, op):
results = []
for v1,v2 in zip(vals1, vals2):
if v1 is not None and v2 is not None:
v1 = np.asarray(v1, dtype=float)
v2 = np.asarray(v2, dtype=float)
result = op(v1,v2)
else:
result = None
results.append(result)
return results
def arithmetic_unary_op(vals1, op):
results = []
for v1 in vals1:
if v1 is not None:
v1 = np.asarray(v1, dtype=float)
result = op(v1)
else:
result = None
results.append(result)
return results | [
"six.moves.zip",
"numpy.asarray",
"functools.partial"
] | [((2373, 2390), 'six.moves.zip', 'zip', (['vals1', 'vals2'], {}), '(vals1, vals2)\n', (2376, 2390), False, 'from six.moves import zip\n'), ((412, 454), 'functools.partial', 'functools.partial', (['np.linalg.norm'], {'axis': '(-1)'}), '(np.linalg.norm, axis=-1)\n', (429, 454), False, 'import functools\n'), ((2455, 2482), 'numpy.asarray', 'np.asarray', (['v1'], {'dtype': 'float'}), '(v1, dtype=float)\n', (2465, 2482), True, 'import numpy as np\n'), ((2500, 2527), 'numpy.asarray', 'np.asarray', (['v2'], {'dtype': 'float'}), '(v2, dtype=float)\n', (2510, 2527), True, 'import numpy as np\n'), ((2768, 2795), 'numpy.asarray', 'np.asarray', (['v1'], {'dtype': 'float'}), '(v1, dtype=float)\n', (2778, 2795), True, 'import numpy as np\n')] |
"""
In this module the data for SVMs is generated and local variables declared
"""
import os
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from definitions import SEED, ROOT_DIR
NUMBER_OF_SAMPLES = 10
Xm, Ym = make_moons(NUMBER_OF_SAMPLES, random_state=SEED, noise=0.34)
data = pd.read_csv(ROOT_DIR + '/data/data_banknote_authentication.txt', header=None)
X = data.iloc[:, :-1].to_numpy()
Y = data.iloc[:, -1:].values.ravel()
x_axis_limit = [-3, 3]
y_axis_limit = [-2.5, 2.5]
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def save_decision_boundary_image(X, Y, filename: str, fitted_model):
fig, ax = plt.subplots()
title = 'Decision Boundary'
plt.xlim(x_axis_limit)
plt.ylim(y_axis_limit)
xx, yy = make_meshgrid(X[:, 0], X[:, 1])
plot_contours(ax, fitted_model, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.coolwarm, s=20, marker='x')
ax.set_title(title)
fig.savefig(filename)
def save_plot_data(X, Y, title, filename):
fig, ax = plt.subplots()
plt.scatter(X[:, 0], X[:, 1], c=Y, s=20, marker='x')
ax.set_title(title)
plt.xlim(x_axis_limit)
plt.ylim(y_axis_limit)
fig.savefig(filename)
if not os.path.isdir(ROOT_DIR + '/figures'):
os.mkdir(ROOT_DIR + '/figures')
| [
"matplotlib.pyplot.xlim",
"os.mkdir",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"os.path.isdir",
"sklearn.datasets.make_moons",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((278, 338), 'sklearn.datasets.make_moons', 'make_moons', (['NUMBER_OF_SAMPLES'], {'random_state': 'SEED', 'noise': '(0.34)'}), '(NUMBER_OF_SAMPLES, random_state=SEED, noise=0.34)\n', (288, 338), False, 'from sklearn.datasets import make_moons\n'), ((347, 424), 'pandas.read_csv', 'pd.read_csv', (["(ROOT_DIR + '/data/data_banknote_authentication.txt')"], {'header': 'None'}), "(ROOT_DIR + '/data/data_banknote_authentication.txt', header=None)\n", (358, 424), True, 'import pandas as pd\n'), ((1048, 1062), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1060, 1062), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1121), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_axis_limit'], {}), '(x_axis_limit)\n', (1107, 1121), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1148), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_axis_limit'], {}), '(y_axis_limit)\n', (1134, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1472), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1529), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'Y', 's': '(20)', 'marker': '"""x"""'}), "(X[:, 0], X[:, 1], c=Y, s=20, marker='x')\n", (1488, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1580), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_axis_limit'], {}), '(x_axis_limit)\n', (1566, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1607), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_axis_limit'], {}), '(y_axis_limit)\n', (1593, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1679), 'os.path.isdir', 'os.path.isdir', (["(ROOT_DIR + '/figures')"], {}), "(ROOT_DIR + '/figures')\n", (1656, 1679), False, 'import os\n'), ((1685, 1716), 'os.mkdir', 'os.mkdir', (["(ROOT_DIR + '/figures')"], {}), "(ROOT_DIR + '/figures')\n", (1693, 1716), False, 'import os\n'), ((694, 720), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (703, 720), True, 'import numpy as np\n'), ((722, 748), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (731, 748), True, 'import numpy as np\n')] |
import threading
import os
import numpy as np
import numpy.testing
import pytest
from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError
from qa4sm_reader.img import QA4SMImg
import pandas as pd
import matplotlib.pyplot as plt
# for profiling with cProfile, on the command line run
# python -m cProfile -o ascat_ismn_validation.profile test_validation.py
@pytest.fixture
def single_img():
testfile = '3-ERA5_LAND.swvl1_with_1-C3S.sm_with_2-ASCAT.sm.nc'
testfile_path = os.path.join(os.path.dirname(__file__), '..', 'tests',
'test_data', 'tc', testfile)
return QA4SMComparison(testfile_path)
@pytest.fixture
def double_img_paths():
first = '0-ISMN.soil moisture_with_1-C3S.sm.nc'
second = '0-ISMN.soil moisture_with_1-C3S.sm-overlap.nc'
testfile_paths = [
os.path.join(
os.path.dirname(__file__),
'..', 'tests', 'test_data', 'comparing',
i
) for i in [
first,
second]
]
# initialized with intersection
return testfile_paths
@pytest.fixture
def double_img_overlap(double_img_paths):
"""Initialized souble image comparison with intersection"""
return QA4SMComparison(double_img_paths)
def test_init(single_img):
assert isinstance(single_img.compared, list)
def test_check_ref(single_img):
assert single_img._check_ref() == {
'short_name': 'ERA5_LAND',
'pretty_name': 'ERA5-Land',
'short_version': 'ERA5_LAND_TEST',
'pretty_version': 'ERA5-Land test',
'pretty_variable': 'swvl1',
'mu': 'm³/m³',
'pretty_title': 'ERA5-Land (ERA5-Land test)',
}
def test_intersection(double_img_overlap):
assert not double_img_overlap.union
def test_geometry(double_img_overlap):
assert double_img_overlap._combine_geometry(double_img_overlap.compared) \
!= double_img_overlap._combine_geometry(double_img_overlap.compared, get_intersection=False)
def test_get_pairwise(single_img, double_img_overlap):
pair = single_img._get_pairwise("R")
assert isinstance(pair, pd.DataFrame)
assert len(pair.columns) == 3, "There should be one column for comparison term" \
"plus the column with difference values"
pair = double_img_overlap._get_pairwise("R")
assert isinstance(pair, pd.DataFrame)
assert len(pair.columns) == 3, "There should be one column for comparison term" \
"plus the column with difference values"
def test_checks(single_img, double_img_overlap):
"""No assertion, but will throw error if any of the checks are not passed"""
assert single_img.perform_checks()
double_img_overlap.perform_checks(
overlapping=True,
union=True,
pairwise=True
)
def test_wrapper(single_img, double_img_overlap):
"""
This tests the wrapper function but more in general also the
plotting functions/table
"""
methods = [
'boxplot',
'mapplot'
]
for method in methods:
out = single_img.wrapper(method, "R")
plt.close("all")
assert not out # generates a plot and returns nothing
for method in methods:
out = double_img_overlap.wrapper(method, "R")
plt.close("all")
if method == "table":
assert out is not None # generates a pandas dataframe
else:
assert not out # generates a plot and returns nothing
def test_init_union(double_img_overlap):
"""Should go at the end as it chnages the attributes"""
double_img_overlap.init_union()
assert double_img_overlap.union
def test_pairwise_methods(double_img_paths):
comp = QA4SMComparison(
double_img_paths,
get_intersection=False
) # todo: solve unexpected behavior on perform_checks
works = False
methods = [
'boxplot',
'mapplot'
]
for method in methods:
try: # they all have same behavior
comp.wrapper(
method,
metric="R"
)
except SpatialExtentError:
works = True
assert works
@pytest.fixture
def double_paths_nonoverlap():
first = '0-ISMN.soil moisture_with_1-C3S.sm.nc'
second = '0-ISMN.soil moisture_with_1-C3S.sm-nonoverlap.nc'
testfile_paths = [
os.path.join(
os.path.dirname(__file__),
'..', 'tests', 'test_data', 'comparing',
i
) for i in [
first,
second]
]
# initialize the comparison with intersection and check that no error is raised
QA4SMComparison(
testfile_paths,
get_intersection=False
)
return testfile_paths
def test_common_metrics(double_img_paths, double_img_overlap):
"""Check the common_metrics function in the comparison"""
metrics_list = []
for path in double_img_paths:
im = QA4SMImg(path)
format_dict = {short: metric_obj.pretty_name for short, metric_obj in im.metrics.items()}
metrics_list.append(format_dict)
metrics_should = {key: val for key, val in metrics_list[0].items() if key in metrics_list[1].keys()}
metrics_should_hardcoded = {
'R': "Pearson's r",
'rho': "Spearman's ρ",
'RMSD': 'Root-mean-square deviation',
# 'p_tau': 'Kendall tau p-value',
'RSS': 'Residual sum of squares',
'p_R': "Pearson's r p-value",
'mse_corr': 'Mean square error correlation',
'mse': 'Mean square error',
# 'tau': 'Kendall rank correlation',
'mse_bias': 'Mean square error bias',
'p_rho': "Spearman's ρ p-value",
'BIAS': 'Bias (difference of means)',
'urmsd': 'Unbiased root-mean-square deviation',
'mse_var': 'Mean square error variance'
}
assert double_img_overlap.common_metrics == metrics_should_hardcoded
# check if n_obs is excluded:
del metrics_should["n_obs"]
del metrics_should["tau"]
del metrics_should["p_tau"]
assert metrics_should == metrics_should_hardcoded
def test_get_reference_points(double_img_overlap):
"""Check get_reference_points function for first 10 points"""
points_should = np.array([
[0.3361, 43.9744],
[0.3361, 43.9744],
[-0.0469, 43.9936],
[-0.0469, 43.9936],
[-0.0469, 43.9936],
[-0.0469, 43.9936],
[0.8878, 43.5472],
[0.8878, 43.5472],
[2.7283, 43.1733],
[2.7283, 43.1733]
])
assert double_img_overlap.get_reference_points().shape == (61, 2)
np.testing.assert_array_equal(double_img_overlap.get_reference_points()[:10], points_should)
def test_get_data(double_img_overlap):
"""Check get_data function"""
data, ci = double_img_overlap._get_data("R").values()
assert len(data) == 2
data = data[0]
name_should = "Validation 0:\n1-C3S\n(v202012)\nVariable: sm [m³/m³]\n"
assert data.name == name_should
data_should = [
0.679918, 0.707091, 0.713081, 0.808353,
0.700307, 0.852756, 0.714132, 0.621769,
0.741732, 0.691499
]
# slightly different due to array transformation from Dataframe
numpy.testing.assert_array_almost_equal(np.array(data_should), data.iloc[:10].to_numpy(), 6)
def test_init_error(double_paths_nonoverlap):
works = False
try:
QA4SMComparison(
double_paths_nonoverlap
)
except SpatialExtentError:
works = True
assert works
# --- reload all imahs to reproduce test_simultaneous_netcdf_loading test ----
def load_extent_image(paths):
comp = QA4SMComparison(paths)
comp.visualize_extent(
intersection=True,
plot_points=True
)
def load_table(paths):
comp = QA4SMComparison(paths)
metrics = comp.common_metrics
comp = QA4SMComparison(paths)
comp.diff_table(metrics=list(metrics.keys()))
def load_plots(paths):
comp = QA4SMComparison(paths)
metrics = comp.common_metrics
first_called = list(metrics.keys())[0]
comp = QA4SMComparison(paths)
comp.wrapper(
method="boxplot",
metric=first_called
)
comp = QA4SMComparison(paths)
comp.wrapper(
method="mapplot",
metric=first_called
)
def test_simultaneous_netcdf_loading(double_img_paths):
# this test should reproduce the calls that are made simultaneously from the server, causing a problem with the
# netcdf loading function. The calls are made from the view:
# https://github.com/pstradio/qa4sm/blob/comparison2angular_issue-477/479/api/views/comparison_view.py
threading.Thread(target=load_extent_image(double_img_paths)).start()
threading.Thread(target=load_table(double_img_paths)).start()
threading.Thread(target=load_plots(double_img_paths)).start()
| [
"qa4sm_reader.comparing.QA4SMComparison",
"matplotlib.pyplot.close",
"os.path.dirname",
"qa4sm_reader.img.QA4SMImg",
"numpy.array"
] | [((625, 655), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['testfile_path'], {}), '(testfile_path)\n', (640, 655), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((1225, 1258), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['double_img_paths'], {}), '(double_img_paths)\n', (1240, 1258), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((3739, 3796), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['double_img_paths'], {'get_intersection': '(False)'}), '(double_img_paths, get_intersection=False)\n', (3754, 3796), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((4659, 4714), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['testfile_paths'], {'get_intersection': '(False)'}), '(testfile_paths, get_intersection=False)\n', (4674, 4714), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((6251, 6465), 'numpy.array', 'np.array', (['[[0.3361, 43.9744], [0.3361, 43.9744], [-0.0469, 43.9936], [-0.0469, \n 43.9936], [-0.0469, 43.9936], [-0.0469, 43.9936], [0.8878, 43.5472], [\n 0.8878, 43.5472], [2.7283, 43.1733], [2.7283, 43.1733]]'], {}), '([[0.3361, 43.9744], [0.3361, 43.9744], [-0.0469, 43.9936], [-\n 0.0469, 43.9936], [-0.0469, 43.9936], [-0.0469, 43.9936], [0.8878, \n 43.5472], [0.8878, 43.5472], [2.7283, 43.1733], [2.7283, 43.1733]])\n', (6259, 6465), True, 'import numpy as np\n'), ((7652, 7674), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (7667, 7674), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((7796, 7818), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (7811, 7818), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((7864, 7886), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (7879, 7886), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((7973, 7995), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (7988, 7995), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((8084, 8106), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (8099, 8106), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((8196, 8218), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['paths'], {}), '(paths)\n', (8211, 8218), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((510, 535), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (525, 535), False, 'import os\n'), ((3141, 3157), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3150, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3327), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3320, 3327), True, 'import matplotlib.pyplot as plt\n'), ((4960, 4974), 'qa4sm_reader.img.QA4SMImg', 'QA4SMImg', (['path'], {}), '(path)\n', (4968, 4974), False, 'from qa4sm_reader.img import QA4SMImg\n'), ((7260, 7281), 'numpy.array', 'np.array', (['data_should'], {}), '(data_should)\n', (7268, 7281), True, 'import numpy as np\n'), ((7396, 7436), 'qa4sm_reader.comparing.QA4SMComparison', 'QA4SMComparison', (['double_paths_nonoverlap'], {}), '(double_paths_nonoverlap)\n', (7411, 7436), False, 'from qa4sm_reader.comparing import QA4SMComparison, SpatialExtentError\n'), ((868, 893), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (883, 893), False, 'import os\n'), ((4411, 4436), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4426, 4436), False, 'import os\n')] |
import numpy as np
import argparse
import os
import math
import copy
from .. import MI
from .. import IO
from .. import sge
from .. import structures
from .. import modify_seed
from .. import type_conversions
from .. import matchmaker
from .. import statistic_tests
def handler():
parser = argparse.ArgumentParser()
parser.add_argument("--unique_seeds_filename", help="best representatives of each family", type=str)
parser.add_argument("--unique_profiles_filename", help="profiles of best representatives of each family",
type=str)
parser.add_argument("--rna_bin_file", help="referense transcriptome in binary format", type=str)
parser.add_argument("--exp_mask_file", help="file with binary expression file, pre-overlapped with "
"the reference transcriptome", type=str)
parser.add_argument("--optimized_seeds_folder", help="output: optimized seeds", type=str)
parser.add_argument("--optimized_profiles_folder", help="output: profiles of optimized seeds", type=str)
parser.add_argument("--optimized_MI_pv_zscores_folder", help="output: MI values, p-values and z-scores", type=str)
parser.add_argument("--robustness_array_folder", help="output: vector indicating which seeds have passed the robustness test", type=str)
parser.add_argument("--optimized_seeds_filename_template", help="", type=str)
parser.add_argument("--optimized_profiles_filename_template", help="", type=str)
parser.add_argument("--optimized_MI_pv_zscores_filename_template", help="", type=str)
parser.add_argument("--robustness_array_filename_template", help="", type=str)
parser.add_argument("--nbins", help="number of bins for discretization of expression profile", type=int)
parser.add_argument("--maxfreq", help="maximal seed frequency in the sequences analyzed", type=float)
parser.add_argument("--min_occurences", help="minimal number of seed occurence in the transcriptome"
" for a seed to be considered", type=int)
parser.add_argument("--random_noseed", help="when choosing the order of positions to optimize, "
"do not set the random number generator to a specific seed", type=bool)
parser.add_argument("--jackknife_n_permutations", help="number of permutations for pvalue calculation in "
"jackknife test", type=int)
parser.add_argument("--jackknife_max_pvalue", help="maximal pvalue for jackknife test", type=float)
parser.add_argument("--jackknife_n_samples", help="how many permutations to do in jackknife test", type=int)
parser.add_argument("--jackknife_fraction_retain", help="what fraction of the sample to retain for each test",
type=float)
parser.add_argument("--jackknife_min_fraction_passed", help="what fraction of all iterations should"
"pass to consider the motif robust", type=float)
parser.add_argument("--size_of_chunks", help="how many seeds should 1 process take on", type=float)
parser.add_argument("--indices_mode", help="compression in the index mode", type=bool)
parser.add_argument("--index_bit_width", help="number of bits per one index when compressing", type=int)
parser.set_defaults(
# unique_seeds_filename="/Users/student/Documents/hani/programs/pyteiser/data/passed_seeds/passed_seed_4-7_4-9_4-6_14-20_combined/test_1_2_seeds_unique.bin",
# unique_profiles_filename="/Users/student/Documents/hani/programs/pyteiser/data/passed_profiles/passed_profiles_4-7_4-9_4-6_14-20_combined/test_1_2_profiles_unique.bin",
#
# optimized_seeds_folder='/Users/student/Documents/hani/programs/pyteiser/data/passed_seeds/passed_seed_4-7_4-9_4-6_14-20_combined',
# optimized_profiles_folder='/Users/student/Documents/hani/programs/pyteiser/data/passed_profiles/passed_profiles_4-7_4-9_4-6_14-20_combined',
# optimized_MI_pv_zscores_folder='/Users/student/Documents/hani/programs/pyteiser/data/optimized_seeds_characteristics/seeds_4-7_4-9_4-6_14-20_individual',
# robustness_array_folder='/Users/student/Documents/hani/programs/pyteiser/data/seeds_robustness/seeds_4-7_4-9_4-6_14-20_individual',
#
# optimized_seeds_filename_template='test_1_2_seeds_optimized',
# optimized_profiles_filename_template='test_1_2_profiles_optimized',
# optimized_MI_pv_zscores_filename_template='test_1_2_characteristics',
# robustness_array_filename_template='test_1_2_robustness',
#
# rna_bin_file='/Users/student/Documents/hani/iTEISER/step_2_preprocessing/reference_files/reference_transcriptomes/binarized/Gencode_v28_GTEx_expressed_transcripts_from_coding_genes_3_utrs_fasta.bin',
# exp_mask_file='/Users/student/Documents/hani/programs/pyteiser/data/mask_files/TARBP2_decay_t_score_mask.bin',
nbins=15,
maxfreq = 0.5, # default value from Hani's program is 0.5
min_occurences=10,
n_permutations=1000, # takes 1 second per 100 permutations, Hani's default number of permutations is 1*10^6
random_noseed=0,
jackknife_n_samples = 10,
jackknife_fraction_retain = 0.66,
jackknife_n_permutations=1000,
jackknife_max_pvalue=0.0001,
jackknife_min_fraction_passed = 0.6,
size_of_chunks=10,
indices_mode=False,
index_bit_width = 24,
)
args = parser.parse_args()
return args
def chunk_up_input_files(seeds_initial, profiles_initial, size_of_chunks,
do_chunk_seeds):
seeds_number = len(seeds_initial)
print("Starting with %d initial seeds" % seeds_number)
if do_chunk_seeds:
number_of_chunks = math.ceil(seeds_number / size_of_chunks)
else:
number_of_chunks = 1
profiles_chunks = np.array_split(profiles_initial, number_of_chunks)
seed_array = np.array(seeds_initial)
seed_chunks = np.array_split(seed_array, number_of_chunks)
for pr_ch, s_ch in zip(profiles_chunks, seed_chunks):
assert(len(pr_ch) == len(s_ch))
return seed_chunks, profiles_chunks
def pick_one_chunk(task_id, seed_chunks, profiles_chunks):
chunk_number = int(task_id) - 1
print("Processing the chunk number ", chunk_number)
seed_right_chunk = seed_chunks[chunk_number]
profiles_right_chunk = profiles_chunks[chunk_number]
return seed_right_chunk, profiles_right_chunk
def are_there_better_motifs(n_modified_motifs, seqs_of_interest, discr_exp_profile, nbins,
bestmi, n_bestmotif, lastmyfreq,
min_occurences, maxfreq, do_print = True):
for curr_motif in n_modified_motifs:
current_profile, time_spent = matchmaker.calculate_profile_one_motif(curr_motif,
seqs_of_interest,
is_degenerate = True)
myfreq = current_profile.values.sum() / float(len(seqs_of_interest))
tempmi = MI.mut_info(current_profile.values, discr_exp_profile, x_bins=2, y_bins=nbins)
if tempmi > bestmi and current_profile.sum() > min_occurences and (myfreq < maxfreq or myfreq < lastmyfreq):
n_bestmotif = structures.copy_n_motif(curr_motif)
w_bestmotif = type_conversions.n_to_w_motif(n_bestmotif)
bestmi = tempmi
lastmyfreq = myfreq
if do_print:
print("New motif (MI = %.4f): %s" % (bestmi, w_bestmotif.print_sequence(return_string=True)))
# w_bestmotif.print()
# w_bestmotif.print_linear()
#print("Current frequency: %.4f" % lastmyfreq)
return bestmi, lastmyfreq, n_bestmotif
# optimize sequence of all the positions individually in random order
def optimize_motif_sequence(n_bestmotif, init_best_MI, seqs_of_interest,
discr_exp_profile, nbins, lastmyfreq,
min_occurences, maxfreq,
do_print = False, random_noseed=False):
bestmi = init_best_MI
if random_noseed:
np.random.seed(1543)
# create a random index so that we optimize each position not from left to right but rather in random order
k_inc = np.arange(n_bestmotif.length)
k_shu = np.random.permutation(k_inc)
# optimize motif
for k in range(n_bestmotif.length):
if do_print:
print("Modifying position ", k+1)
position = k_shu[k]
w_modified_motifs = modify_seed.modify_base(n_bestmotif, position)
n_modified_motifs = type_conversions.w_to_n_motifs_list(w_modified_motifs)
bestmi, lastmyfreq, n_bestmotif = are_there_better_motifs(n_modified_motifs,
seqs_of_interest, discr_exp_profile, nbins,
bestmi, n_bestmotif, lastmyfreq,
min_occurences, maxfreq,
do_print = do_print)
return bestmi, lastmyfreq, n_bestmotif
def elongate_motif(n_bestmotif, init_best_MI, seqs_of_interest,
discr_exp_profile, nbins, lastmyfreq,
min_occurences, maxfreq, do_print = False):
bestmi = init_best_MI
keep_elongating = True
# simple emulator of do {} while {} in python: https://stackoverflow.com/questions/743164/emulate-a-do-while-loop-in-python
while keep_elongating:
n_elongated_motifs = modify_seed.elongate_motif(n_bestmotif)
old_best_mi = bestmi
old_best_motif = structures.copy_n_motif(n_bestmotif)
new_bestmi, lastmyfreq, n_bestmotif = are_there_better_motifs(n_elongated_motifs,
seqs_of_interest, discr_exp_profile, nbins,
bestmi, n_bestmotif, lastmyfreq,
min_occurences, maxfreq,
do_print = do_print)
keep_elongating = ((new_bestmi >= old_best_mi) and
(n_bestmotif.length > old_best_motif.length))
return bestmi, lastmyfreq, n_bestmotif
def get_characteristics(n_bestmotif, seqs_of_interest,
discr_exp_profile, nbins, n_permutations,
do_print = False):
bestmotif_profile, _time = matchmaker.calculate_profile_one_motif(n_bestmotif, seqs_of_interest,
is_degenerate = True)
bestmotif_mi = MI.mut_info(bestmotif_profile.values, discr_exp_profile, x_bins=2, y_bins=nbins)
pvalue, z_score = statistic_tests.MI_get_pvalue_and_zscore(bestmotif_profile.values, discr_exp_profile, nbins,
bestmotif_mi, n_permutations)
if do_print:
print("The final p-value is: %.4f, z-score is: %.3f" % (pvalue, z_score))
return bestmotif_profile, bestmotif_mi, pvalue, z_score
def check_robustness(bestmotif_profile,
discr_exp_profile, nbins,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_print = False):
passed_jacknife = statistic_tests.jackknife_test(
bestmotif_profile.values, discr_exp_profile, nbins,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_print = do_print)
return passed_jacknife
def optimize_motifs(seeds_initial, profiles_initial,
discr_exp_profile, nbins, index_array, seqs_of_interest,
min_occurences, maxfreq,
n_permutations, random_noseed,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_print = True):
seeds_optimized = copy.deepcopy(seeds_initial)
profiles_optimized = np.zeros((len(seeds_initial), discr_exp_profile.shape[0]), dtype=bool)
# seed_charact_array keeps MI values, p-values and z-scores
seed_charact_array = np.zeros((len(seeds_initial), 3), dtype=np.float64)
robustness_array = np.zeros(len(seeds_initial), dtype=bool)
for i, motif in enumerate(seeds_initial):
profile = profiles_initial[i]
active_profile = profile[index_array]
n_bestmotif = type_conversions.w_to_n_motif(seeds_initial[i])
# initial mi value
init_best_MI = MI.mut_info(active_profile, discr_exp_profile, x_bins=2, y_bins=nbins)
lastmyfreq = active_profile.sum() / float(active_profile.shape[0])
if do_print:
w_bestmotif = type_conversions.n_to_w_motif(n_bestmotif)
print("Optimzing the sequence of motif %d (sequence is %s). Initial MI = %.5f" %
(i, w_bestmotif.print_sequence(return_string=True), init_best_MI))
#print("Initial frequency: %.4f" % lastmyfreq)
bestmi, lastmyfreq, n_bestmotif = optimize_motif_sequence(n_bestmotif, init_best_MI, seqs_of_interest,
discr_exp_profile, nbins, lastmyfreq,
min_occurences, maxfreq, do_print = do_print,
random_noseed = random_noseed)
if do_print:
print("Elongating motif %d" % i)
bestmi, lastmyfreq, n_bestmotif = elongate_motif(n_bestmotif, bestmi, seqs_of_interest,
discr_exp_profile, nbins, lastmyfreq,
min_occurences, maxfreq, do_print = do_print)
w_bestmotif = type_conversions.n_to_w_motif(n_bestmotif)
bestmotif_profile, bestmotif_mi, pvalue, z_score = get_characteristics(
n_bestmotif, seqs_of_interest,
discr_exp_profile, nbins, n_permutations,
do_print=do_print)
if do_print:
print("Checking robustness of the optimized motif %d (sequence %s)" %
(i, w_bestmotif.print_sequence(return_string=True)))
is_robust = check_robustness(bestmotif_profile,
discr_exp_profile, nbins,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_print = do_print)
seeds_optimized[i] = w_bestmotif
profiles_optimized[i] = bestmotif_profile.values
seed_charact_array[i, : ] = np.array([bestmotif_mi, pvalue, z_score], dtype=np.float64)
robustness_array[i] = is_robust
return seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array
def read_sequences(rna_bin_filename):
seqs_dict, seqs_order = IO.read_rna_bin_file(rna_bin_filename)
w_seqs_list = [seqs_dict[name] for name in seqs_order]
n_seqs_list = type_conversions.w_to_n_sequences_list(w_seqs_list)
return n_seqs_list
def make_output_filenames(task_id,
optimized_seeds_filename_template,
optimized_profiles_filename_template,
optimized_MI_pv_zscores_filename_template,
robustness_array_filename_template,
optimized_seeds_folder,
optimized_profiles_folder,
optimized_MI_pv_zscores_folder,
robustness_array_folder
):
seed_filename_short = "%s_%s.bin" % (optimized_seeds_filename_template, task_id)
profiles_filename_short = "%s_%s.bin" % (optimized_profiles_filename_template, task_id)
char_filename_short = "%s_%s.bin" % (optimized_MI_pv_zscores_filename_template, task_id)
robustness_filename_short = "%s_%s.bin" % (robustness_array_filename_template, task_id)
seeds_filename_full = os.path.join(optimized_seeds_folder, seed_filename_short)
profiles_filename_full = os.path.join(optimized_profiles_folder, profiles_filename_short)
char_filename_full = os.path.join(optimized_MI_pv_zscores_folder, char_filename_short)
robustness_filename_full = os.path.join(robustness_array_folder, robustness_filename_short)
return seeds_filename_full, profiles_filename_full, \
char_filename_full, robustness_filename_full
def non_sge_dependent_main(
task_id,
rna_bin_file,
exp_mask_file,
unique_seeds_filename,
nbins,
unique_profiles_filename,
indices_mode,
index_bit_width,
size_of_chunks,
optimized_seeds_filename_template,
optimized_profiles_filename_template,
optimized_MI_pv_zscores_filename_template,
robustness_array_filename_template,
optimized_seeds_folder,
optimized_profiles_folder,
optimized_MI_pv_zscores_folder,
robustness_array_folder,
min_occurences, maxfreq,
n_permutations, random_noseed,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_chunk_seeds = True
):
n_seqs_list = read_sequences(rna_bin_file)
index_array, values_array = IO.unpack_mask_file(exp_mask_file)
discr_exp_profile = MI.discretize_exp_profile(index_array, values_array, nbins=nbins)
seeds_initial = IO.read_motif_file(unique_seeds_filename)
profiles_initial = IO.unpack_profiles_file(unique_profiles_filename, indices_mode)
seqs_of_interest = [n_seqs_list[x] for x in range(index_array.shape[0]) if index_array[x]]
seed_chunks, profiles_chunks = chunk_up_input_files(seeds_initial, profiles_initial, size_of_chunks,
do_chunk_seeds = do_chunk_seeds)
seed_right_chunk, profiles_right_chunk = pick_one_chunk(task_id, seed_chunks, profiles_chunks)
seeds_filename_full, profiles_filename_full, \
char_filename_full, robustness_filename_full = make_output_filenames(task_id,
optimized_seeds_filename_template,
optimized_profiles_filename_template,
optimized_MI_pv_zscores_filename_template,
robustness_array_filename_template,
optimized_seeds_folder,
optimized_profiles_folder,
optimized_MI_pv_zscores_folder,
robustness_array_folder
)
seeds_optimized, profiles_optimized, \
seed_charact_array, robustness_array = optimize_motifs(seed_right_chunk, profiles_right_chunk,
discr_exp_profile, nbins, index_array, seqs_of_interest,
min_occurences, maxfreq,
n_permutations, random_noseed,
jackknife_n_permutations,
jackknife_max_pvalue,
jackknife_n_samples,
jackknife_fraction_retain,
jackknife_min_fraction_passed,
do_print=True)
IO.write_list_of_seeds(seeds_optimized, seeds_filename_full)
IO.write_array_of_profiles(profiles_optimized, profiles_filename_full,
indices_mode, index_bit_width)
IO.write_np_array(seed_charact_array, char_filename_full)
IO.write_np_array(robustness_array, robustness_filename_full)
def main():
args = handler()
# get the task id
env_variables_dict = sge.get_env_variables()
# run the optimization
non_sge_dependent_main(
env_variables_dict["task_id"],
args.rna_bin_file,
args.exp_mask_file,
args.unique_seeds_filename,
args.nbins,
args.unique_profiles_filename,
args.indices_mode,
args.index_bit_width,
args.size_of_chunks,
args.optimized_seeds_filename_template,
args.optimized_profiles_filename_template,
args.optimized_MI_pv_zscores_filename_template,
args.robustness_array_filename_template,
args.optimized_seeds_folder,
args.optimized_profiles_folder,
args.optimized_MI_pv_zscores_folder,
args.robustness_array_folder,
args.min_occurences,
args.maxfreq,
args.n_permutations,
args.random_noseed,
args.jackknife_n_permutations,
args.jackknife_max_pvalue,
args.jackknife_n_samples,
args.jackknife_fraction_retain,
args.jackknife_min_fraction_passed,
do_chunk_seeds = True
)
if __name__ == "__main__":
main()
| [
"copy.deepcopy",
"numpy.random.seed",
"argparse.ArgumentParser",
"math.ceil",
"numpy.arange",
"numpy.array",
"numpy.random.permutation",
"numpy.array_split",
"os.path.join"
] | [((296, 321), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (319, 321), False, 'import argparse\n'), ((6115, 6165), 'numpy.array_split', 'np.array_split', (['profiles_initial', 'number_of_chunks'], {}), '(profiles_initial, number_of_chunks)\n', (6129, 6165), True, 'import numpy as np\n'), ((6183, 6206), 'numpy.array', 'np.array', (['seeds_initial'], {}), '(seeds_initial)\n', (6191, 6206), True, 'import numpy as np\n'), ((6225, 6269), 'numpy.array_split', 'np.array_split', (['seed_array', 'number_of_chunks'], {}), '(seed_array, number_of_chunks)\n', (6239, 6269), True, 'import numpy as np\n'), ((8610, 8639), 'numpy.arange', 'np.arange', (['n_bestmotif.length'], {}), '(n_bestmotif.length)\n', (8619, 8639), True, 'import numpy as np\n'), ((8652, 8680), 'numpy.random.permutation', 'np.random.permutation', (['k_inc'], {}), '(k_inc)\n', (8673, 8680), True, 'import numpy as np\n'), ((12760, 12788), 'copy.deepcopy', 'copy.deepcopy', (['seeds_initial'], {}), '(seeds_initial)\n', (12773, 12788), False, 'import copy\n'), ((17049, 17106), 'os.path.join', 'os.path.join', (['optimized_seeds_folder', 'seed_filename_short'], {}), '(optimized_seeds_folder, seed_filename_short)\n', (17061, 17106), False, 'import os\n'), ((17136, 17200), 'os.path.join', 'os.path.join', (['optimized_profiles_folder', 'profiles_filename_short'], {}), '(optimized_profiles_folder, profiles_filename_short)\n', (17148, 17200), False, 'import os\n'), ((17226, 17291), 'os.path.join', 'os.path.join', (['optimized_MI_pv_zscores_folder', 'char_filename_short'], {}), '(optimized_MI_pv_zscores_folder, char_filename_short)\n', (17238, 17291), False, 'import os\n'), ((17323, 17387), 'os.path.join', 'os.path.join', (['robustness_array_folder', 'robustness_filename_short'], {}), '(robustness_array_folder, robustness_filename_short)\n', (17335, 17387), False, 'import os\n'), ((6012, 6052), 'math.ceil', 'math.ceil', (['(seeds_number / size_of_chunks)'], {}), '(seeds_number / size_of_chunks)\n', (6021, 6052), False, 'import math\n'), ((8464, 8484), 'numpy.random.seed', 'np.random.seed', (['(1543)'], {}), '(1543)\n', (8478, 8484), True, 'import numpy as np\n'), ((15652, 15711), 'numpy.array', 'np.array', (['[bestmotif_mi, pvalue, z_score]'], {'dtype': 'np.float64'}), '([bestmotif_mi, pvalue, z_score], dtype=np.float64)\n', (15660, 15711), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Main class to update the RADAR/STATION database and run queries to retrieve
specific data
Note that I use spark because there is currently no way to use SQL queries
with dask
"""
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext, DataFrame
# This could benefit from some tweaks especially if the database becomes larger
conf = SparkConf()
conf.set("spark.sql.autoBroadcastJoinThreshold", 1024*1024*100)
conf.setAppName('Mnist_Spark_MLP').setMaster('local[8]')
conf.setAll([('spark.executor.memory', '8g'),
('spark.executor.cores', '3'),
('spark.cores.max', '3'),
('spark.driver.memory','8g')])
conf.set("spark.sql.caseSensitive","true")
# Global imports
import glob
import yaml
import logging
logging.getLogger().setLevel(logging.INFO)
import os
import textwrap
import numpy as np
import subprocess
from datetime import datetime
import copy
import time
import fnmatch
# Local imports
from ..common import constants
from ..common.utils import chunks, timestamp_from_datestr
from ..common.utils import dict_flatten, read_df, envyaml
STATION_INFO = np.array(constants.METSTATIONS)
class TableDict(dict):
""" This is an extension of the classic python dict that automatically
calls createOrReplaceTempView once a table has been added to the dict """
def __setitem__(self, key, value):
super().__setitem__(key, value)
self[key].createOrReplaceTempView(key)
class DataFrameWithInfo(DataFrame):
def __init__(self, name, df):
super(self.__class__, self).__init__(df._jdf, df.sql_ctx)
self.info = None
self.name = name
@property
def info(self):
if self.__info == None:
cols = self.columns
rows = self.count()
times = self.select('timestamp').collect()
t0 = datetime.utcfromtimestamp(np.min(times))
t1 = datetime.utcfromtimestamp(np.max(times))
self.__info = '''
Table {:s} info
----------------
Dimension: {:d} x {:d}
Time interval: {:s} - {:s}
Columns: {:s}
'''.format(self.name, rows, len(cols), str(t0),str(t1),
','.join(cols))
return self.__info
@info.setter
def info(self, value):
self.__info = value
class Database(object):
def __init__(self, config_file = None):
"""
Creates a Database instance that can be used to load data, update
new data, run queries, etc
Parameters
----------
config_file : str (optional)
Path of the configuration file that you want to use, can also
be provided later and is needed only if you want to update the
database with new data
"""
sparkContext = SparkContext(conf = conf)
self.sqlContext = SQLContext(sparkContext)
self.tables = TableDict()
self.summaries = {}
if config_file:
self.config = envyaml(config_file)
self.config_file = config_file
@property
def config_file(self):
return self.__config_file
@config_file.setter
def config_file(self, config_file):
self.config = envyaml(config_file)
self.__config_file = config_file
def add_tables(self, filepaths_dic, get_summaries = False):
"""
Reads a set of data contained in a folder as a Spark DataFrame and
adds them to the database instance
Parameters
----------
filepaths_dic : dict
Dictionary where the keys are the name of the dataframes to add
and the values are the wildcard patterns poiting to the files
for example {'gauge': '/mainfolder/gauge/*.csv',
'radar' : '/mainfolder/radar/*.csv',
'reference' : /mainfolder/reference/*.parquet'}
will add the three tables 'gauge', 'radar' and 'reference' to the
database
"""
for table in filepaths_dic:
pattern = filepaths_dic[table]
self.tables[table] = DataFrameWithInfo(table, read_df(pattern,
dbsystem = 'spark', sqlContext = self.sqlContext))
# Register as table
self.tables[table].createOrReplaceTempView(table)
# Below is experimental
# # if get_summaries
# if get_summaries:
# summary_file = os.path.dirname(pattern)+'/.'+table
# if os.path.exists(summary_file):
# self.summaries[table] = pd.read_csv(summary_file)
# else:
# summary = self.tables[table].summary().toPandas()
# # Change timestamp to date and remove useless statistics
# dates = []
# if 'timestamp' in summary.columns:
# for i, stat in enumerate(summary['summary']):
# if stat not in ['min','max']:
# dates.append(np.nan)
# else:
# d = datetime.utcfromtimestamp(float(summary['timestamp'][i]))
# dates.append(d)
# summary['date'] = dates
# summary = summary.drop('timestamp',1)
# if 'station' in summary.columns:
# summary = summary.drop('station',1)
#
# self.summaries[table] = summary
# self.summaries[table].to_csv(summary_file, index=False)
def query(self, sql_query, to_memory = True, output_file = ''):
"""
Performs an SQL query on the database and returns the result and if
wanted writes it to a file
Parameters
----------
sql_query : str
Valid SQL query, all tables refered to in the query must be included
in the tables attribute of the database (i.e. they must first
be added with the add_tables command)
to_ memory : bool (optional)
If true will try to put the result into ram in the form of a pandas
dataframe, if the predicted size of the query is larger than
the parameter WARNING_RAM in common.constants this will be ignored
output_file : str (optional)
Full path of an output file where the query will be dumped into.
Must end either with .csv, .gz.csv, or .parquet, this will
determine the output format
Returns
----------
If the result fits in memory, it returns a pandas DataFrame, otherwise
a cached Spark DataFrame
"""
sql_query = self._parse_query(sql_query)
sqlDF = self.sqlContext.sql(sql_query)
shape = _spark_shape(sqlDF)
est_size = 10**-6 * (shape[0] * shape[1]) * 4
if to_memory and est_size > constants.WARNING_RAM:
logging.WARN("""Query output is larger than maximum allowed size,
returning uncached version dataframe instead""")
to_memory = False
if to_memory:
sqlDF = sqlDF.toPandas()
if '.csv' in output_file:
if '.gz' in output_file:
sqlDF.to_csv(output_file, compression = 'gzip',
index = False)
else:
sqlDF.to_csv(output_file,
index = False)
elif 'parquet' in output_file:
sqlDF.to_parquet(compression = 'GZIP')
else:
sqlDF = DataFrameWithInfo(sql_query, sqlDF)
if '.csv' in output_file:
if '.gz' in output_file:
sqlDF.write.csv(output_file, compression = 'GZIP',
header = True)
else:
sqlDF.write.csv(output_file, header = True)
elif 'parquet' in output_file:
sqlDF.write.parquet(output_file, compression = 'GZIP')
return sqlDF
def _parse_query(self, sql_query):
'''
Parses the query which could allow for custom keywords,
right now it just replaces UT with UNIX_TIMESTAMP
'''
# Search for Date) flags and replace with proper SQL keyword
sql_query = sql_query.replace('UT(','UNIX_TIMESTAMP(')
return sql_query
def update_station_data(self, t0, t1, output_folder):
'''
update_station_data
Populates the csv files that contain the point measurement data,
that serve as base to update the database. A different file will
be created for every station. If the file is already present the
new data will be
appended to the file.
inputs:
t0: start time in YYYYMMDD(HHMM) format (HHMM) is optional
t1: end time in YYYYMMDD(HHMM) format (HHMM) is optional
output_folder: where the files should be stored. If the directory
is not empty, the new data will be merged with existing files
if relevant
'''
if not output_folder.endswith(os.path.sep):
output_folder += os.path.sep
if not os.path.exists(output_folder):
os.makedirs(output_folder)
try:
self.config
except:
raise ValueError('Need to provide a config file to update data')
if 'GAUGE_RETRIEVAL' in self.config:
config_g = self.config['GAUGE_RETRIEVAL']
else:
raise ValueError("""Make sure you have a "GAUGE_RETRIEVAL" section in
your config file!""")
tmp_folder = self.config['TMP_FOLDER']
if config_g['STATIONS'] == 'all':
stations = STATION_INFO[:,1]
elif config_g['STATIONS'] == 'all_smn':
stations = STATION_INFO[STATION_INFO[:,6] == 'SwissMetNet',1]
elif config_g['STATIONS'] == 'all_ps':
stations = STATION_INFO[STATION_INFO[:,6] == 'PrecipStation',1]
else:
stations = config_g['STATIONS']
if type(stations) != list:
stations = [stations]
# Check if metadata file already present in folder
# Check existence of previous data
try:
# Try to read old data
current_tab = read_df(output_folder + '*.csv*',
dbsystem = 'spark',
sqlContext = self.sqlContext)
old_data_ok = True # valid data present
except:
old_data_ok = False
pass
# Check existence of config file
mdata_path = output_folder + '/.mdata.yml'
try:
old_config = envyaml(mdata_path)['GAUGE_RETRIEVAL']
except:
old_config = None
pass
overwrite = 1 # default
if old_config and old_data_ok:
if _compare_config(old_config, self.config,
['GAUGE_RETRIEVAL','NO_DATA_FILL']):
# Old data same format as new, don't overwrite
overwrite = 0
if old_config != self.config:
# Special case
tsteps_proc = np.array(current_tab.select('timestamp').collect(),
dtype=int)
tstamp_start_old = np.min(tsteps_proc)
tstamp_end_old = np.max(tsteps_proc)
tstamp_start = int(timestamp_from_datestr(t0))
tstamp_end = int(timestamp_from_datestr(t1))
if (tstamp_start > tstamp_start_old or
tstamp_end < tstamp_end_old):
warning = """
IMPORTANT: A previous set of tables was found in the indicated output folder
corresponding to a different configuration file. If you continue, the old data
will be replaced by the newly generated data, if they have the same timestamps.
HOWEVER since the new data does not temporally cover the full extent of the old data,
old and new data will coexist in the folder, which is ABSOLUTELY not recommended.
If you are not sure what to do, either
(1) delete the old data
(2) change the current config file to match the config file of the old data
(which is stored in the file .mdata.yaml in the specified output folder)
(3) Rerun retrieval of station data to overlap the time period
covered by the old data ({:s} - {:s}) and rerun the radar retrieval
Press enter to continue, q to quit ...
""".format(str(datetime.utcfromtimestamp(tstamp_start_old)),
str(datetime.utcfromtimestamp(tstamp_end_old)))
userinput = input(textwrap.dedent(warning))
if userinput == 'q':
raise KeyboardInterrupt()
# Write metadata file
mdata = copy.deepcopy(config_g)
yaml.dump(mdata, open(mdata_path,'w'))
# Split all stations in subsets
max_nb_jobs = config_g['MAX_NB_SLURM_JOBS']
stations_sub = chunks(stations, max_nb_jobs)
# Get current folder
cwd = os.path.dirname(os.path.realpath(__file__))
for i, stations in enumerate(stations_sub):
fname = tmp_folder + '/getdata_station_{:d}.job'.format(i)
file = open(fname,'w')
logging.info('Writing task file {:s}'.format(fname))
file.write(constants.SLURM_HEADER_R)
file.write('Rscript {:s}/retrieve_dwh_data.r {:s} {:s} {:f} "{:s}" "{:s}" {:s} {:d} {:d}'.format(
cwd,
t0,
t1,
config_g['MIN_R_HOURLY'],
','.join(stations),
','.join(config_g['VARIABLES']),
output_folder,
self.config['NO_DATA_FILL'],
overwrite))
file.close()
logging.info('Submitting job {:d}'.format(i))
subprocess.call('sbatch {:s}'.format(fname), shell = True)
logging.info("""All jobs have been submitted, please wait a few hours
for completion...""")
def update_reference_data(self, gauge_table_name, output_folder,
t0 = None, t1 = None):
'''
Updates the reference product table using timesteps from the gauge table
Inputs:
gauge_table_name: str
name of the gauge table, must be included in the tables of
the database, i.e. you must first add it with load_tables(..)
output_folder: str
directory where to store the computed radar tables
t0: start time in YYYYMMDD(HHMM) (optional)
starting time of the retrieval, by default all timesteps
that are in the gauge table will be used
t1: end time in YYYYMMDD(HHMM) (optional)
ending time of the retrieval, by default all timesteps
that are in the gauge table will be used
'''
if not output_folder.endswith(os.path.sep):
output_folder += os.path.sep
if not os.path.exists(output_folder):
os.makedirs(output_folder)
try:
self.config
except:
raise ValueError('Need to provide a config file to update data')
if gauge_table_name not in self.tables.keys():
raise ValueError("""No table with name {:s} was found in the
loaded tables, make sure to add that table
with load_tables first""".format(gauge_table_name))
if 'REFERENCE_RETRIEVAL' in self.config:
config_r = self.config['REFERENCE_RETRIEVAL']
else:
raise ValueError("""Make sure you have a "REFERENCE_RETRIEVAL" section in
your config file!""")
# Check existence of previous data
try:
# Try to read old data
current_tab = read_df(output_folder + '*.parquet',
dbsystem = 'spark',
sqlContext = self.sqlContext)
old_data_ok = True # valid data present
except:
old_data_ok = False
pass
logging.info('Finding unique timesteps and corresponding stations')
tab = self.tables[gauge_table_name].select(['STATION',
'TIMESTAMP']).toPandas()
if t0 != None and t1 != None and t1 > t0:
logging.info('Limiting myself to time period {:s} - {:s}'.format(
str(t0), str(t1)))
tstamp_start = int(timestamp_from_datestr(t0))
tstamp_end = int(timestamp_from_datestr(t1))
tab = tab.loc[(tab['TIMESTAMP'] > tstamp_start)
& (tab['TIMESTAMP'] <= tstamp_end)]
# Check existence of config file
mdata_path = output_folder + '/.mdata.yml'
try:
old_config = envyaml(mdata_path)
except:
old_config = None
pass
overwrite = 1 # default
if old_config and old_data_ok:
if _compare_config(old_config, self.config,
['GAUGE_RETRIEVAL','REFERENCE_RETRIEVAL','NO_DATA_FILL']):
# Old data same format as new, don't overwrite
overwrite = 0
else:
# Special case
tsteps_proc = current_tab.select('TIMESTAMP').collect()
tstamp_start_old = np.min(tsteps_proc)
tstamp_end_old = np.max(tsteps_proc)
tstamp_start = int(np.min(tab['TIMESTAMP']))
tstamp_end = int(np.max(tab['TIMESTAMP']))
if (tstamp_start > tstamp_start_old or
tstamp_end < tstamp_end_old):
warning = """
IMPORTANT: A previous set of tables was found in the indicated output folder
corresponding to a different configuration file. If you continue, the old data
will be replaced by the newly generated data, if they have the same timestamps.
HOWEVER since the new data does not temporally cover the full extent of the old data,
old and new data will coexist in the folder, which is ABSOLUTELY not recommended.
If you are not sure what to do, either
(1) delete the old data
(2) change the current config file to match the config file of the old data
(which is stored in the file .mdata.yaml in the specified output folder)
(3) Rerun retrieval of station data to overlap the time period
covered by the old data ({:s} - {:s}) and rerun the radar retrieval
Press enter to continue, q to quit ...
""".format(str(datetime.utcfromtimestamp(tstamp_start_old)),
str(datetime.utcfromtimestamp(tstamp_end_old)))
userinput = input(textwrap.dedent(warning))
if userinput == 'q':
raise KeyboardInterrupt()
# Write metadata file
mdata = copy.deepcopy(self.config)
yaml.dump(mdata, open(mdata_path,'w'))
unique_times, idx = np.unique(tab['TIMESTAMP'], return_inverse = True)
if not len(unique_times):
msg = '''All timesteps are already present in the already computed tables in the indicated output folder!'''
logging.error(textwrap.dedent(msg))
logging.error('Stopping now...')
return
all_stations = tab['STATION']
# Split tasks and write taskfiles
logging.info('Writing task files, this can take a long time')
num_jobs = config_r['MAX_NB_SLURM_JOBS']
tmp_folder = self.config['TMP_FOLDER']
# Jobs are split by days, a single day is never split over several jobs
# because the created output files are day based
ttuples = [datetime.utcfromtimestamp(float(t)).timetuple()
for t in unique_times]
days = [[str(t.tm_year) + str(t.tm_yday)] for t in ttuples]
days_to_process = np.unique(days, axis = 0)
days_to_process = list(days_to_process)
if not overwrite:
msg = '''A previous set of tables corresponding to the same config file was found, only new timestamps will be added'''
logging.warning(textwrap.dedent(msg))
# Find which days have already been processed and remove them
files = glob.glob(output_folder + '*.parquet')
for f in files:
f = os.path.splitext(os.path.basename(f))[0]
dt = datetime.strptime(os.path.basename(f),'%Y%m%d')
tt = dt.timetuple()
current_day = str(tt.tm_year) + str(tt.tm_yday)
if current_day in days_to_process:
logging.warning('Day {:s} was already computed, ignoring it...'.format(f))
days_to_process.remove(current_day)
days_per_job = max([1,int(np.round(len(days_to_process)/num_jobs))])
day_counter = 0
current_job = 0
current_day = days[0]
# Create task files
task_files = []
name_file = tmp_folder + 'task_file_reference_{:d}'.format(current_job)
task_files.append(name_file)
ftask = open(name_file,'w')
for i in range(len(unique_times)):
if days[i] not in days_to_process:
continue
if days[i] != current_day:
day_counter += 1
current_day = days[i]
if day_counter == days_per_job:
# New job
current_job += 1
ftask.close()
name_file = tmp_folder + 'task_file_reference_{:d}'.format(current_job)
logging.info('Writing task file {:s}'.format(name_file))
task_files.append(name_file)
# Open new task file
ftask = open(name_file,'w')
# Reset counters
day_counter = 0
ftask.write('{:d},{:s} \n'.format(int(unique_times[i]),
','.join(all_stations[idx == i])))
ftask.close()
# Get current folder
cwd = os.path.dirname(os.path.realpath(__file__))
# Create slurm files
for i, tf in enumerate(task_files):
fname = tmp_folder + '/getdata_reference_{:d}.job'.format(i)
file = open(fname,'w')
file.write(constants.SLURM_HEADER_PY)
file.write('python {:s}/retrieve_reference_data.py -c {:s} -t {:s} -o {:s} '.format(
cwd,
self.config_file,
tf,
output_folder))
file.close()
logging.info('Submitting job {:d}'.format(i))
subprocess.call('sbatch {:s}'.format(fname), shell = True)
def update_radar_data(self, gauge_table_name, output_folder,
t0 = None, t1 = None):
'''
Updates the radar table using timesteps from the gauge table
Inputs:
gauge_table_name: str
name of the gauge table, must be included in the tables of
the database, i.e. you must first add it with load_tables(..)
output_folder: str
directory where to store the computed radar tables
t0: start time in YYYYMMDD(HHMM) (optional)
starting time of the retrieval, by default all timesteps
that are in the gauge table will be used
t1: end time in YYYYMMDD(HHMM) (optional)
ending time of the retrieval, by default all timesteps
that are in the gauge table will be used
'''
if not output_folder.endswith(os.path.sep):
output_folder += os.path.sep
if not os.path.exists(output_folder):
os.makedirs(output_folder)
try:
self.config
except:
raise ValueError('Need to provide a config file to update data')
if gauge_table_name not in self.tables.keys():
raise ValueError("""No table with name {:s} was found in the
loaded tables, make sure to add that table
with load_tables first""".format(gauge_table_name))
if 'RADAR_RETRIEVAL' in self.config:
config_r = self.config['RADAR_RETRIEVAL']
else:
raise ValueError("""Make sure you have a "RADAR_RETRIEVAL" section in
your config file!""")
logging.info('Finding unique timesteps and corresponding stations')
tab = self.tables[gauge_table_name].select(['STATION',
'TIMESTAMP']).toPandas()
if t0 != None and t1 != None and t1 > t0:
logging.info('Limiting myself to time period {:s} - {:s}'.format(
str(t0), str(t1)))
tstamp_start = int(timestamp_from_datestr(t0))
tstamp_end = int(timestamp_from_datestr(t1))
tab = tab.loc[(tab['TIMESTAMP'] > tstamp_start)
& (tab['TIMESTAMP'] <= tstamp_end)]
# Check existence of previous data
try:
# Try to read old data
current_tab = read_df(output_folder + '*.parquet',
dbsystem = 'spark',
sqlContext = self.sqlContext)
old_data_ok = True # valid data present
except:
old_data_ok = False
pass
# Check existence of config file
mdata_path = output_folder + '/.mdata.yml'
try:
old_config = envyaml(mdata_path)
except:
old_config = None
pass
overwrite = 1 # default
if old_config and old_data_ok:
if _compare_config(old_config, self.config,
['GAUGE_RETRIEVAL','RADAR_RETRIEVAL',
'NO_DATA_FILL']):
# Old data same format as new, don't overwrite
overwrite = 0
else:
# Special case
tsteps_proc = current_tab.select('TIMESTAMP').collect()
tstamp_start_old = np.min(tsteps_proc)
tstamp_end_old = np.max(tsteps_proc)
tstamp_start = int(np.min(tab['TIMESTAMP']))
tstamp_end = int(np.max(tab['TIMESTAMP']))
if (tstamp_start > tstamp_start_old or
tstamp_end < tstamp_end_old):
warning = """
IMPORTANT: A previous set of tables was found in the indicated output folder
corresponding to a different configuration file. If you continue, the old data
will be replaced by the newly generated data, if they have the same timestamps.
HOWEVER since the new data does not temporally cover the full extent of the old data,
old and new data will coexist in the folder, which is ABSOLUTELY not recommended.
If you are not sure what to do, either
(1) delete the old data
(2) change the current config file to match the config file of the old data
(which is stored in the file .mdata.yaml in the specified output folder)
(3) Rerun retrieval of station data to overlap the time period
covered by the old data ({:s} - {:s}) and rerun the radar retrieval
Press enter to continue, q to quit ...
""".format(str(datetime.utcfromtimestamp(tstamp_start_old)),
str(datetime.utcfromtimestamp(tstamp_end_old)))
userinput = input(textwrap.dedent(warning))
if userinput == 'q':
raise KeyboardInterrupt()
# Write metadata file
mdata = copy.deepcopy(self.config)
yaml.dump(mdata, open(mdata_path,'w'))
unique_times, idx = np.unique(tab['TIMESTAMP'], return_inverse = True)
all_stations = tab['STATION']
if not len(unique_times):
msg = '''All timesteps are already present in the already computed tables in the indicated output folder!'''
logging.error(textwrap.dedent(msg))
logging.error('Stopping now...')
return
# Split tasks and write taskfiles
num_jobs = config_r['MAX_NB_SLURM_JOBS']
tmp_folder = self.config['TMP_FOLDER']
# Jobs are split by days, a single day is never split over several jobs
# because the created output files are day based
ttuples = [datetime.utcfromtimestamp(float(t)).timetuple()
for t in unique_times]
days = [[str(t.tm_year) + str(t.tm_yday)] for t in ttuples]
days_to_process = np.unique(days, axis = 0)
days_to_process = list(days_to_process)
if not overwrite:
msg = '''A previous set of tables corresponding to the same config file was found, only new timestamps will be added'''
logging.warning(textwrap.dedent(msg))
# Find which days have already been processed and remove them
files = glob.glob(output_folder + '*.parquet')
for f in files:
f = os.path.splitext(os.path.basename(f))[0]
dt = datetime.strptime(os.path.basename(f),'%Y%m%d')
tt = dt.timetuple()
current_day = str(tt.tm_year) + str(tt.tm_yday)
if current_day in days_to_process:
logging.warning('Day {:s} was already computed, ignoring it...'.format(f))
days_to_process.remove(current_day)
days_per_job = max([1,int(np.round(len(days_to_process)/num_jobs))])
day_counter = 0
current_job = 0
current_day = days[0]
logging.info('Writing task files')
# Create task files
task_files = []
name_file = tmp_folder + 'task_file_radar_{:d}'.format(current_job)
task_files.append(name_file)
ftask = open(name_file,'w')
for i in range(len(unique_times)):
if days[i] not in days_to_process:
continue
if days[i] != current_day:
day_counter += 1
current_day = days[i]
if day_counter == days_per_job:
# New job
current_job += 1
ftask.close()
name_file = tmp_folder + 'task_file_radar_{:d}'.format(current_job)
logging.info('Writing task file {:s}'.format(name_file))
task_files.append(name_file)
ftask = open(name_file,'w')
# Reset counters
day_counter = 0
ftask.write('{:d},{:s} \n'.format(int(unique_times[i]),
','.join(all_stations[idx == i])))
ftask.close()
# Get current folder
cwd = os.path.dirname(os.path.realpath(__file__))
# Create slurm files
for i, tf in enumerate(task_files):
fname = tmp_folder + '/getdata_radar_{:d}.job'.format(i)
file = open(fname,'w')
file.write(constants.SLURM_HEADER_PY)
file.write('python {:s}/retrieve_radar_data.py -c {:s} -t {:s} -o {:s} '.format(
cwd,
self.config_file,
tf,
output_folder))
file.close()
# delayed job launch
for i in range(len(task_files)):
fname = tmp_folder + '/getdata_radar_{:d}.job'.format(i)
logging.info('Submitting job {:d}'.format(i))
subprocess.call('sbatch {:s}'.format(fname), shell = True)
time.sleep(10)
if _n_running_jobs() >= config_r['MAX_SIMULTANEOUS_JOBS']:
logging.info('Too many jobs have been launched, waiting until some complete...')
while True: # Loop until less jobs are running
time.sleep(60)
if _n_running_jobs() < config_r['MAX_SIMULTANEOUS_JOBS']:
break
def _compare_config(config1, config2, keys = None):
"""
Compares the configuration of two data tables, by checking only the keys
that affect the data (i.e. the radar processing, the choice of samples)
Parameters
----------
config1 : dict
configuration dictionary 1
config2 : dict
configuration dictionary 2
keys : which dict keys to check, by default all are checked
Returns
-------
True if two configurations are equivalent, False otherwise
"""
if keys == None:
keys = list(config1.keys())
# Returns True if the config files are the same, in terms of data content
# Things like, MAX_NB_SLURM_JOBS or MAX_SIMULTANEOUS_JOBS don't matter
keys_no_data = ['MAX_NB_SLURM_JOBS','TMP_FOLDER','MAX_SIMULTANEOUS_JOBS']
c1 = dict_flatten(config1)
c2 = dict_flatten(config2)
try:
for k in c1.keys():
if k not in keys:
continue
notimportant = any([knd in k for knd in keys_no_data])
if not notimportant:
if c1[k] != c2[k]:
return False
for k in c2.keys():
if k not in keys:
continue
notimportant = any([knd in k for knd in keys_no_data])
if not notimportant:
if c1[k] != c2[k]:
return False
return True
except:
return False
def _n_running_jobs(user = '$USER', job_name = 'getdata*'):
"""
Gets the number of jobs currently running on CSCS
Parameters
----------
user : str
the user on the CSCS servers
job_name : str
name of the job, UNIX style wildcards are supported
Returns
-------
Number of jobs as an integer
"""
out = subprocess.check_output('squeue -u {:s}'.format(user),
shell=True)
out = out.decode('utf-8').split('\n')
if len(out) == 2:
return 0
count = 0
for l in out[1:-1]:
l = l.split()
if len(fnmatch.filter([l[2]],job_name)):
count += 1
return count
def _spark_shape(df):
return (df.count(),len(df.columns))
| [
"pyspark.SparkContext",
"pyspark.SparkConf",
"glob.glob",
"numpy.unique",
"logging.error",
"os.path.exists",
"datetime.datetime.utcfromtimestamp",
"numpy.max",
"copy.deepcopy",
"os.path.basename",
"os.path.realpath",
"logging.WARN",
"time.sleep",
"numpy.min",
"textwrap.dedent",
"fnmatc... | [((430, 441), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (439, 441), False, 'from pyspark import SparkConf\n'), ((1214, 1245), 'numpy.array', 'np.array', (['constants.METSTATIONS'], {}), '(constants.METSTATIONS)\n', (1222, 1245), True, 'import numpy as np\n'), ((859, 878), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (876, 878), False, 'import logging\n'), ((2993, 3016), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (3005, 3016), False, 'from pyspark import SparkContext\n'), ((3045, 3069), 'pyspark.sql.SQLContext', 'SQLContext', (['sparkContext'], {}), '(sparkContext)\n', (3055, 3069), False, 'from pyspark.sql import SQLContext, DataFrame\n'), ((13853, 13876), 'copy.deepcopy', 'copy.deepcopy', (['config_g'], {}), '(config_g)\n', (13866, 13876), False, 'import copy\n'), ((15083, 15205), 'logging.info', 'logging.info', (['"""All jobs have been submitted, please wait a few hours\n for completion..."""'], {}), '(\n """All jobs have been submitted, please wait a few hours\n for completion..."""\n )\n', (15095, 15205), False, 'import logging\n'), ((17434, 17501), 'logging.info', 'logging.info', (['"""Finding unique timesteps and corresponding stations"""'], {}), "('Finding unique timesteps and corresponding stations')\n", (17446, 17501), False, 'import logging\n'), ((20523, 20549), 'copy.deepcopy', 'copy.deepcopy', (['self.config'], {}), '(self.config)\n', (20536, 20549), False, 'import copy\n'), ((20626, 20674), 'numpy.unique', 'np.unique', (["tab['TIMESTAMP']"], {'return_inverse': '(True)'}), "(tab['TIMESTAMP'], return_inverse=True)\n", (20635, 20674), True, 'import numpy as np\n'), ((21063, 21124), 'logging.info', 'logging.info', (['"""Writing task files, this can take a long time"""'], {}), "('Writing task files, this can take a long time')\n", (21075, 21124), False, 'import logging\n'), ((21573, 21596), 'numpy.unique', 'np.unique', (['days'], {'axis': '(0)'}), '(days, axis=0)\n', (21582, 21596), True, 'import numpy as np\n'), ((26332, 26399), 'logging.info', 'logging.info', (['"""Finding unique timesteps and corresponding stations"""'], {}), "('Finding unique timesteps and corresponding stations')\n", (26344, 26399), False, 'import logging\n'), ((29921, 29947), 'copy.deepcopy', 'copy.deepcopy', (['self.config'], {}), '(self.config)\n', (29934, 29947), False, 'import copy\n'), ((30032, 30080), 'numpy.unique', 'np.unique', (["tab['TIMESTAMP']"], {'return_inverse': '(True)'}), "(tab['TIMESTAMP'], return_inverse=True)\n", (30041, 30080), True, 'import numpy as np\n'), ((30896, 30919), 'numpy.unique', 'np.unique', (['days'], {'axis': '(0)'}), '(days, axis=0)\n', (30905, 30919), True, 'import numpy as np\n'), ((31985, 32019), 'logging.info', 'logging.info', (['"""Writing task files"""'], {}), "('Writing task files')\n", (31997, 32019), False, 'import logging\n'), ((7303, 7452), 'logging.WARN', 'logging.WARN', (['"""Query output is larger than maximum allowed size,\n returning uncached version dataframe instead"""'], {}), '(\n """Query output is larger than maximum allowed size,\n returning uncached version dataframe instead"""\n )\n', (7315, 7452), False, 'import logging\n'), ((9755, 9784), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (9769, 9784), False, 'import os\n'), ((9798, 9824), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (9809, 9824), False, 'import os\n'), ((14147, 14173), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (14163, 14173), False, 'import os\n'), ((16233, 16262), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (16247, 16262), False, 'import os\n'), ((16276, 16302), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (16287, 16302), False, 'import os\n'), ((20901, 20933), 'logging.error', 'logging.error', (['"""Stopping now..."""'], {}), "('Stopping now...')\n", (20914, 20933), False, 'import logging\n'), ((21959, 21997), 'glob.glob', 'glob.glob', (["(output_folder + '*.parquet')"], {}), "(output_folder + '*.parquet')\n", (21968, 21997), False, 'import glob\n'), ((23867, 23893), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (23883, 23893), False, 'import os\n'), ((25555, 25584), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (25569, 25584), False, 'import os\n'), ((25598, 25624), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (25609, 25624), False, 'import os\n'), ((30345, 30377), 'logging.error', 'logging.error', (['"""Stopping now..."""'], {}), "('Stopping now...')\n", (30358, 30377), False, 'import logging\n'), ((31274, 31312), 'glob.glob', 'glob.glob', (["(output_folder + '*.parquet')"], {}), "(output_folder + '*.parquet')\n", (31283, 31312), False, 'import glob\n'), ((33182, 33208), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (33198, 33208), False, 'import os\n'), ((33981, 33995), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (33991, 33995), False, 'import time\n'), ((36469, 36501), 'fnmatch.filter', 'fnmatch.filter', (['[l[2]]', 'job_name'], {}), '([l[2]], job_name)\n', (36483, 36501), False, 'import fnmatch\n'), ((1990, 2003), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (1996, 2003), True, 'import numpy as np\n'), ((2049, 2062), 'numpy.max', 'np.max', (['times'], {}), '(times)\n', (2055, 2062), True, 'import numpy as np\n'), ((12009, 12028), 'numpy.min', 'np.min', (['tsteps_proc'], {}), '(tsteps_proc)\n', (12015, 12028), True, 'import numpy as np\n'), ((12062, 12081), 'numpy.max', 'np.max', (['tsteps_proc'], {}), '(tsteps_proc)\n', (12068, 12081), True, 'import numpy as np\n'), ((18732, 18751), 'numpy.min', 'np.min', (['tsteps_proc'], {}), '(tsteps_proc)\n', (18738, 18751), True, 'import numpy as np\n'), ((18785, 18804), 'numpy.max', 'np.max', (['tsteps_proc'], {}), '(tsteps_proc)\n', (18791, 18804), True, 'import numpy as np\n'), ((20867, 20887), 'textwrap.dedent', 'textwrap.dedent', (['msg'], {}), '(msg)\n', (20882, 20887), False, 'import textwrap\n'), ((21842, 21862), 'textwrap.dedent', 'textwrap.dedent', (['msg'], {}), '(msg)\n', (21857, 21862), False, 'import textwrap\n'), ((28116, 28135), 'numpy.min', 'np.min', (['tsteps_proc'], {}), '(tsteps_proc)\n', (28122, 28135), True, 'import numpy as np\n'), ((28169, 28188), 'numpy.max', 'np.max', (['tsteps_proc'], {}), '(tsteps_proc)\n', (28175, 28188), True, 'import numpy as np\n'), ((30311, 30331), 'textwrap.dedent', 'textwrap.dedent', (['msg'], {}), '(msg)\n', (30326, 30331), False, 'import textwrap\n'), ((31157, 31177), 'textwrap.dedent', 'textwrap.dedent', (['msg'], {}), '(msg)\n', (31172, 31177), False, 'import textwrap\n'), ((34083, 34168), 'logging.info', 'logging.info', (['"""Too many jobs have been launched, waiting until some complete..."""'], {}), "('Too many jobs have been launched, waiting until some complete...'\n )\n", (34095, 34168), False, 'import logging\n'), ((18857, 18881), 'numpy.min', 'np.min', (["tab['TIMESTAMP']"], {}), "(tab['TIMESTAMP'])\n", (18863, 18881), True, 'import numpy as np\n'), ((18916, 18940), 'numpy.max', 'np.max', (["tab['TIMESTAMP']"], {}), "(tab['TIMESTAMP'])\n", (18922, 18940), True, 'import numpy as np\n'), ((22143, 22162), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (22159, 22162), False, 'import os\n'), ((28241, 28265), 'numpy.min', 'np.min', (["tab['TIMESTAMP']"], {}), "(tab['TIMESTAMP'])\n", (28247, 28265), True, 'import numpy as np\n'), ((28300, 28324), 'numpy.max', 'np.max', (["tab['TIMESTAMP']"], {}), "(tab['TIMESTAMP'])\n", (28306, 28324), True, 'import numpy as np\n'), ((31458, 31477), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (31474, 31477), False, 'import os\n'), ((34247, 34261), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (34257, 34261), False, 'import time\n'), ((13635, 13659), 'textwrap.dedent', 'textwrap.dedent', (['warning'], {}), '(warning)\n', (13650, 13659), False, 'import textwrap\n'), ((20355, 20379), 'textwrap.dedent', 'textwrap.dedent', (['warning'], {}), '(warning)\n', (20370, 20379), False, 'import textwrap\n'), ((22080, 22099), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (22096, 22099), False, 'import os\n'), ((29733, 29757), 'textwrap.dedent', 'textwrap.dedent', (['warning'], {}), '(warning)\n', (29748, 29757), False, 'import textwrap\n'), ((31395, 31414), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (31411, 31414), False, 'import os\n'), ((13472, 13515), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_start_old'], {}), '(tstamp_start_old)\n', (13497, 13515), False, 'from datetime import datetime\n'), ((13553, 13594), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_end_old'], {}), '(tstamp_end_old)\n', (13578, 13594), False, 'from datetime import datetime\n'), ((20192, 20235), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_start_old'], {}), '(tstamp_start_old)\n', (20217, 20235), False, 'from datetime import datetime\n'), ((20273, 20314), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_end_old'], {}), '(tstamp_end_old)\n', (20298, 20314), False, 'from datetime import datetime\n'), ((29570, 29613), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_start_old'], {}), '(tstamp_start_old)\n', (29595, 29613), False, 'from datetime import datetime\n'), ((29651, 29692), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['tstamp_end_old'], {}), '(tstamp_end_old)\n', (29676, 29692), False, 'from datetime import datetime\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pearsonLinearCorrelationCoefficient import pearson_linear_correlation_coefficient
def linear_regression(filename: str):
df = pd.read_csv(filename, delimiter=',')
number_of_rows = df.shape[0]
df_hat = df.sum() / number_of_rows
x_hat = df_hat[0]
y_hat = df_hat[1]
df['xi-x_hat'] = df.iloc[:, 0] - x_hat
df['yi(xi-x_hat)'] = df['xi-x_hat'] * df.iloc[:, 1]
df['(xi-x_hat)2'] = df['xi-x_hat'] ** 2
df_sum = df.sum()
a = df_sum['yi(xi-x_hat)'] / df_sum['(xi-x_hat)2']
b = y_hat - x_hat * a
return a, b
def draw_linear_regression(a: float, b: float, filename: str):
df = pd.read_csv(filename, delimiter=',')
column_names = df.columns
df.plot(x=column_names[0], y=column_names[1], style='o', color='k')
minX = df.iloc[:, 0].min()
minX -= 0.1 * minX
maxX = df.iloc[:, 0].max()
maxX += 0.1 * maxX
step = (maxX - minX) / 10
regressionX = np.arange(minX, maxX, step)
regressionY = a * regressionX + b
plt.grid()
plt.plot(regressionX, regressionY, 'r', linewidth=2)
plt.xlabel(column_names[0])
plt.ylabel(column_names[1])
plt.legend(['y=f(x)', 'regression line'])
fig = plt.gcf()
fig.set_size_inches(5, 3.8)
fig.tight_layout()
plt.savefig('temp_regression_chart.png', dpi=100)
# plt.show()
print("Algorithm loaded: Linear regression")
"""
filename = '../data/pearsonLinearCorrelationCoefficient.csv'
r = pearson_linear_correlation_coefficient(filename)
print(f"Correlation coefficient: {r}")
if abs(r) < 0.5:
print("Correlation coefficient should be less than |0.5|")
exit(0)
a, b = linear_regression(filename)
print(f"a: {a}")
print(f"b: {b}")
draw_linear_regression(a, b, filename)
""" | [
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] | [((207, 243), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (218, 243), True, 'import pandas as pd\n'), ((695, 731), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (706, 731), True, 'import pandas as pd\n'), ((990, 1017), 'numpy.arange', 'np.arange', (['minX', 'maxX', 'step'], {}), '(minX, maxX, step)\n', (999, 1017), True, 'import numpy as np\n'), ((1060, 1070), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1068, 1070), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1127), 'matplotlib.pyplot.plot', 'plt.plot', (['regressionX', 'regressionY', '"""r"""'], {'linewidth': '(2)'}), "(regressionX, regressionY, 'r', linewidth=2)\n", (1083, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1159), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['column_names[0]'], {}), '(column_names[0])\n', (1142, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1191), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['column_names[1]'], {}), '(column_names[1])\n', (1174, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1237), 'matplotlib.pyplot.legend', 'plt.legend', (["['y=f(x)', 'regression line']"], {}), "(['y=f(x)', 'regression line'])\n", (1206, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1248, 1257), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1255, 1257), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1366), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""temp_regression_chart.png"""'], {'dpi': '(100)'}), "('temp_regression_chart.png', dpi=100)\n", (1328, 1366), True, 'import matplotlib.pyplot as plt\n')] |
from abc import ABC, abstractmethod
import numpy as np
from .blobs import Blob
from nptyping import NDArray, Float
from typing import Any
class BlobFactory(ABC):
"""Abstract class used by 2d propagating blob model to specify blob
parameters."""
@abstractmethod
def sample_blobs(
self, Ly: float, T: float, num_blobs: int, blob_shape: str, t_drain: float
) -> list[Blob]:
"""creates list of Blobs used in Model."""
raise NotImplementedError
class DefaultBlobFactory(BlobFactory):
"""Default implementation of BlobFactory.
Generates blob parameters for different possible random
distributions. All random variables are independent from each other
"""
def __init__(
self,
A_dist: str = "exp",
W_dist: str = "exp",
vx_dist: str = "deg",
vy_dist: str = "normal",
A_parameter: float = 1.0,
W_parameter: float = 1.0,
vx_parameter: float = 1.0,
vy_parameter: float = 1.0,
) -> None:
"""The following distributions are implemented:
exp: exponential distribution with mean 1
gamma: gamma distribution with `free_parameter` as shape parameter and mean 1
normal: normal distribution with zero mean and `free_parameter` as scale parameter
uniform: uniorm distribution with mean 1 and `free_parameter` as width
ray: rayleight distribution with mean 1
deg: array on ones
zeros: array of zeros
"""
self.A_dist = A_dist
self.W_dist = W_dist
self.vx_dist = vx_dist
self.vy_dist = vy_dist
self.A_parameter = A_parameter
self.W_parameter = W_parameter
self.vx_parameter = vx_parameter
self.vy_parameter = vy_parameter
def __draw_random_variables(
self,
dist_type: str,
free_parameter: float,
num_blobs: int,
) -> NDArray[Any, Float[64]]:
if dist_type == "exp":
return np.random.exponential(scale=1, size=num_blobs)
elif dist_type == "gamma":
return np.random.gamma(
shape=free_parameter, scale=1 / free_parameter, size=num_blobs
)
elif dist_type == "normal":
return np.random.normal(loc=0, scale=free_parameter, size=num_blobs)
elif dist_type == "uniform":
return np.random.uniform(
low=1 - free_parameter / 2, high=1 + free_parameter / 2, size=num_blobs
)
elif dist_type == "ray":
return np.random.rayleigh(scale=np.sqrt(2.0 / np.pi), size=num_blobs)
elif dist_type == "deg":
return np.ones(num_blobs)
elif dist_type == "zeros":
return np.zeros(num_blobs)
else:
raise NotImplementedError(
self.__class__.__name__ + ".distribution function not implemented"
)
def sample_blobs(
self, Ly: float, T: float, num_blobs: int, blob_shape: str, t_drain: float
) -> list[Blob]:
__amp = self.__draw_random_variables(
dist_type=self.A_dist, free_parameter=self.A_parameter, num_blobs=num_blobs
)
__width = self.__draw_random_variables(self.W_dist, self.W_parameter, num_blobs)
__vx = self.__draw_random_variables(self.vx_dist, self.vx_parameter, num_blobs)
__vy = self.__draw_random_variables(self.vy_dist, self.vy_parameter, num_blobs)
__posx = np.zeros(num_blobs)
__posy = np.random.uniform(low=0.0, high=Ly, size=num_blobs)
__t_init = np.random.uniform(low=0, high=T, size=num_blobs)
# sort blobs by __t_init
__t_init = np.sort(__t_init)
return [
Blob(
id=i,
blob_shape=blob_shape,
amplitude=__amp[i],
width_prop=__width[i],
width_perp=__width[i],
v_x=__vx[i],
v_y=__vy[i],
pos_x=__posx[i],
pos_y=__posy[i],
t_init=__t_init[i],
t_drain=t_drain,
)
for i in range(num_blobs)
]
| [
"numpy.random.uniform",
"numpy.random.exponential",
"numpy.zeros",
"numpy.ones",
"numpy.random.gamma",
"numpy.sort",
"numpy.random.normal",
"numpy.sqrt"
] | [((3462, 3481), 'numpy.zeros', 'np.zeros', (['num_blobs'], {}), '(num_blobs)\n', (3470, 3481), True, 'import numpy as np\n'), ((3499, 3550), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': 'Ly', 'size': 'num_blobs'}), '(low=0.0, high=Ly, size=num_blobs)\n', (3516, 3550), True, 'import numpy as np\n'), ((3570, 3618), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': 'T', 'size': 'num_blobs'}), '(low=0, high=T, size=num_blobs)\n', (3587, 3618), True, 'import numpy as np\n'), ((3672, 3689), 'numpy.sort', 'np.sort', (['__t_init'], {}), '(__t_init)\n', (3679, 3689), True, 'import numpy as np\n'), ((1994, 2040), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1)', 'size': 'num_blobs'}), '(scale=1, size=num_blobs)\n', (2015, 2040), True, 'import numpy as np\n'), ((2095, 2174), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': 'free_parameter', 'scale': '(1 / free_parameter)', 'size': 'num_blobs'}), '(shape=free_parameter, scale=1 / free_parameter, size=num_blobs)\n', (2110, 2174), True, 'import numpy as np\n'), ((2260, 2321), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'free_parameter', 'size': 'num_blobs'}), '(loc=0, scale=free_parameter, size=num_blobs)\n', (2276, 2321), True, 'import numpy as np\n'), ((2378, 2472), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1 - free_parameter / 2)', 'high': '(1 + free_parameter / 2)', 'size': 'num_blobs'}), '(low=1 - free_parameter / 2, high=1 + free_parameter / 2,\n size=num_blobs)\n', (2395, 2472), True, 'import numpy as np\n'), ((2666, 2684), 'numpy.ones', 'np.ones', (['num_blobs'], {}), '(num_blobs)\n', (2673, 2684), True, 'import numpy as np\n'), ((2576, 2596), 'numpy.sqrt', 'np.sqrt', (['(2.0 / np.pi)'], {}), '(2.0 / np.pi)\n', (2583, 2596), True, 'import numpy as np\n'), ((2739, 2758), 'numpy.zeros', 'np.zeros', (['num_blobs'], {}), '(num_blobs)\n', (2747, 2758), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image loader variants.
Code branched out from https://github.com/tensorflow/models/tree/master/research/deeplab
, and please refer to it for more details.
"""
import collections
import six
import tensorflow as tf
import nibabel as nib
import numpy as np
import SimpleITK as sitk
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self, image_format='nii.gz', channels=1):
"""Class constructor.
Args:
image_format: Image format.
channels: Image channels.
"""
self._image_format = image_format
def decode_image(self, image_path):
# Read the medical image (.nii.gz .dcm) containing the volume with SimpleITK
image = sitk.ReadImage(image_path)
# Access the numpy array:
image_arr = sitk.GetArrayFromImage(image)
self._decode = np.int32(image_arr)
return self._decode
def read_image_dims(self, image):
if len(image.shape) == 3:
depth, height, width = image.shape
elif len(image.shape) == 2:
height, width = image.shape
depth = 1
return (height, width, depth)
def _int64_list_feature(values):
"""Returns a TF-Feature of int64_list.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, collections.Iterable):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _bytes_list_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
def norm2bytes(value):
return value.encode() if isinstance(value, str) and six.PY3 else value
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))
def image_seg_to_tfexample(image_data, height, width, depth, num_slices, image_format=None, seg_data=None):
"""Converts one image/segmentation pair to tf example.
Args:
image_data: string of image data.
filename: image filename.
height: image height.
width: image width.
seg_data: string of semantic segmentation data.
Returns:
tf example of one image/segmentation pair.
"""
feature={
'image/encoded': _bytes_list_feature(image_data),
'image/height': _int64_list_feature(height),
'image/width': _int64_list_feature(width),
'image/depth': _int64_list_feature(depth),
'image/num_slices': _int64_list_feature(num_slices),
}
if seg_data is not None:
feature['segmentation/encoded'] = _bytes_list_feature(seg_data)
if image_format is not None:
feature['image/format'] = _bytes_list_feature(image_format)
return tf.train.Example(features=tf.train.Features(feature=feature))
| [
"tensorflow.train.Int64List",
"SimpleITK.ReadImage",
"tensorflow.train.Features",
"SimpleITK.GetArrayFromImage",
"numpy.int32"
] | [((1398, 1424), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_path'], {}), '(image_path)\n', (1412, 1424), True, 'import SimpleITK as sitk\n'), ((1476, 1505), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (1498, 1505), True, 'import SimpleITK as sitk\n'), ((1528, 1547), 'numpy.int32', 'np.int32', (['image_arr'], {}), '(image_arr)\n', (1536, 1547), True, 'import numpy as np\n'), ((2086, 2118), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'values'}), '(value=values)\n', (2104, 2118), True, 'import tensorflow as tf\n'), ((3365, 3399), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (3382, 3399), True, 'import tensorflow as tf\n')] |
import numpy as np
import pytest
import torch
from probflow.distributions import Deterministic
from probflow.utils.torch_distributions import get_TorchDeterministic
tod = torch.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_TorchDeterministic():
"""Tests the TorchDeterministic distribution"""
TorchDeterministic = get_TorchDeterministic()
dist = TorchDeterministic(loc=torch.tensor([2.0]), validate_args=True)
assert is_close(dist.mean.numpy()[0], 2.0)
assert is_close(dist.stddev, 0.0)
assert is_close(dist.variance, 0.0)
dist.expand([5, 2])
dist.rsample()
dist.log_prob(torch.tensor([1.0]))
dist.cdf(torch.tensor([1.0]))
dist.icdf(torch.tensor([1.0]))
dist.entropy()
def test_Deterministic():
"""Tests Deterministic distribution"""
# Create the distribution
dist = Deterministic()
# Check default params
assert dist.loc == 0
# Call should return backend obj
assert isinstance(dist(), tod.distribution.Distribution)
# Test methods
assert dist.prob(torch.zeros([1])).numpy() == 1.0
assert dist.prob(torch.ones([1])).numpy() == 0.0
assert dist.log_prob(torch.zeros([1])).numpy() == 0.0
assert dist.log_prob(torch.ones([1])).numpy() == -np.inf
assert dist.mean().numpy() == 0.0
# Test sampling
samples = dist.sample()
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 0
samples = dist.sample(10)
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
samples = dist.sample(torch.tensor([10]))
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
# Should be able to set params
dist = Deterministic(loc=2)
assert dist.loc == 2
assert dist.prob(2 * torch.ones([1])).numpy() == 1.0
assert dist.prob(torch.ones([1])).numpy() == 0.0
# But only with Tensor-like objs
with pytest.raises(TypeError):
dist = Deterministic(loc="lalala")
| [
"probflow.utils.torch_distributions.get_TorchDeterministic",
"torch.ones",
"numpy.abs",
"pytest.raises",
"torch.zeros",
"probflow.distributions.Deterministic",
"torch.tensor"
] | [((367, 391), 'probflow.utils.torch_distributions.get_TorchDeterministic', 'get_TorchDeterministic', ([], {}), '()\n', (389, 391), False, 'from probflow.utils.torch_distributions import get_TorchDeterministic\n'), ((883, 898), 'probflow.distributions.Deterministic', 'Deterministic', ([], {}), '()\n', (896, 898), False, 'from probflow.distributions import Deterministic\n'), ((1797, 1817), 'probflow.distributions.Deterministic', 'Deterministic', ([], {'loc': '(2)'}), '(loc=2)\n', (1810, 1817), False, 'from probflow.distributions import Deterministic\n'), ((236, 249), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (242, 249), True, 'import numpy as np\n'), ((658, 677), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (670, 677), False, 'import torch\n'), ((693, 712), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (705, 712), False, 'import torch\n'), ((729, 748), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (741, 748), False, 'import torch\n'), ((1622, 1640), 'torch.tensor', 'torch.tensor', (['[10]'], {}), '([10])\n', (1634, 1640), False, 'import torch\n'), ((2000, 2024), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2013, 2024), False, 'import pytest\n'), ((2041, 2068), 'probflow.distributions.Deterministic', 'Deterministic', ([], {'loc': '"""lalala"""'}), "(loc='lalala')\n", (2054, 2068), False, 'from probflow.distributions import Deterministic\n'), ((427, 446), 'torch.tensor', 'torch.tensor', (['[2.0]'], {}), '([2.0])\n', (439, 446), False, 'import torch\n'), ((1092, 1108), 'torch.zeros', 'torch.zeros', (['[1]'], {}), '([1])\n', (1103, 1108), False, 'import torch\n'), ((1146, 1161), 'torch.ones', 'torch.ones', (['[1]'], {}), '([1])\n', (1156, 1161), False, 'import torch\n'), ((1203, 1219), 'torch.zeros', 'torch.zeros', (['[1]'], {}), '([1])\n', (1214, 1219), False, 'import torch\n'), ((1261, 1276), 'torch.ones', 'torch.ones', (['[1]'], {}), '([1])\n', (1271, 1276), False, 'import torch\n'), ((1921, 1936), 'torch.ones', 'torch.ones', (['[1]'], {}), '([1])\n', (1931, 1936), False, 'import torch\n'), ((1868, 1883), 'torch.ones', 'torch.ones', (['[1]'], {}), '([1])\n', (1878, 1883), False, 'import torch\n')] |
import torch
import torch.nn.functional as F
import torchvision
from tqdm import tqdm
from torch.autograd import Function
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve
from skimage import filters
import numpy as np
from PIL import Image
from torchvision.utils import save_image
import os
import cv2
def pixel_values_in_mask(true_vessels, pre_vessels_decode, mask, train_or, dataset):
if train_or=='val':
true_vessels = np.squeeze(true_vessels)
pre_vessels_decode = np.squeeze(pre_vessels_decode)
if dataset=='HRF-AV':
true_vessels = (true_vessels[mask[0,...] != 0])
pre_vessels_decode = (pre_vessels_decode[mask[0,...] != 0])
else:
true_vessels = (true_vessels[mask!= 0])
pre_vessels_decode = (pre_vessels_decode[mask!= 0])
return true_vessels.flatten(), pre_vessels_decode.flatten()
def AUC_ROC(true_vessel_arr, pred_vessel_arr, average):
AUC_ROC=roc_auc_score(true_vessel_arr, pred_vessel_arr, average)
return AUC_ROC
def threshold_by_otsu(pred_vessels):
threshold=filters.threshold_otsu(pred_vessels)
pred_vessels_bin=np.zeros(pred_vessels.shape)
pred_vessels_bin[pred_vessels>=threshold]=1
return pred_vessels_bin
def AUC_PR(true_vessel_img, pred_vessel_img, average):
precision, recall, _ = precision_recall_curve(true_vessel_img.flatten(), pred_vessel_img.flatten(), pos_label=1)
AUC_prec_rec = auc(recall, precision)
return AUC_prec_rec
def misc_measures(true_vessel_arr, pred_vessel_arr):
cm=confusion_matrix(true_vessel_arr, pred_vessel_arr)
mse = mean_squared_error(true_vessel_arr, pred_vessel_arr)
try:
acc=1.*(cm[0,0]+cm[1,1])/np.sum(cm)
sensitivity=1.*cm[1,1]/(cm[1,0]+cm[1,1])
specificity=1.*cm[0,0]/(cm[0,1]+cm[0,0])
precision=1.*cm[1,1]/(cm[1,1]+cm[0,1])
G = np.sqrt(sensitivity*specificity)
F1_score_2 = 2*precision*sensitivity/(precision+sensitivity)
iou = 1.*cm[1,1]/(cm[1,0]+cm[0,1]+cm[1,1])
return acc, sensitivity, specificity, precision, G, F1_score_2, mse, iou
except:
return 0,0,0,0,0,0,0,0
def eval_net(epoch, net_G_1, net_G_A_1, net_G_V_1, net_G_2, net_G_A_2, net_G_V_2, net_G_3, net_G_A_3, net_G_V_3, net_G_4, net_G_A_4, net_G_V_4, net_G_5, net_G_A_5, net_G_V_5, net_G_6, net_G_A_6, net_G_V_6, net_G_7, net_G_A_7, net_G_V_7, net_G_8, net_G_A_8, net_G_V_8, dataset, loader, device, mode, train_or):
"""Evaluation without the densecrf with the dice coefficient"""
mask_type = torch.float32 if net_G_1.n_classes == 1 else torch.long
n_val = len(loader)
acc_a,sent_a,spet_a,pret_a,G_t_a,F1t_a,mset_a,iout_a,auc_roct_a,auc_prt_a=0,0,0,0,0,0,0,0,0,0
acc_v,sent_v,spet_v,pret_v,G_t_v,F1t_v,mset_v,iout_v,auc_roct_v,auc_prt_v=0,0,0,0,0,0,0,0,0,0
acc_u,sent_u,spet_u,pret_u,G_t_u,F1t_u,mset_u,iout_u,auc_roct_u,auc_prt_u=0,0,0,0,0,0,0,0,0,0
acc,sent,spet,pret,G_t,F1t,mset,iout,auc_roct,auc_prt=0,0,0,0,0,0,0,0,0,0
num = 0
seg_results_small_path = dataset + '/Final_pre/small_pre/'
seg_results_raw_path = dataset + '/Final_pre/raw_pre/'
if not os.path.isdir(seg_results_small_path):
os.makedirs(seg_results_small_path)
if not os.path.isdir(seg_results_raw_path):
os.makedirs(seg_results_raw_path)
mask_pred_tensor_small_all = 0
with tqdm(total=n_val, desc='Validation round', unit='batch', leave=False) as pbar:
for batch in loader:
imgs, label, mask = batch['image'], batch['label'], batch['mask']
img_name = batch['name'][0]
ori_w, ori_h = mask.shape[2], mask.shape[3]
image_transform=torch.zeros((imgs.shape[0],3,imgs.shape[2],imgs.shape[3]))
imgs = imgs.to(device=device, dtype=torch.float32)
label = label.to(device=device, dtype=mask_type)
with torch.no_grad():
num +=1
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_1(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_1(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_1(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_2(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_2(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_2(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_3(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_3(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_3(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_4(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_4(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_4(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_5(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_5(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_5(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_6(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_6(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_6(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_7(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_7(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_7(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
masks_pred_G_A, masks_pred_G_fusion_A = net_G_A_8(imgs)
masks_pred_G_V, masks_pred_G_fusion_V = net_G_V_8(imgs)
masks_pred_G_sigmoid_A_part = masks_pred_G_fusion_A.detach()
masks_pred_G_sigmoid_V_part = masks_pred_G_fusion_V.detach()
mask_pred,_,_,_ = net_G_8(imgs, masks_pred_G_sigmoid_A_part, masks_pred_G_sigmoid_V_part)
mask_pred_tensor_small = mask_pred.clone().detach()
mask_pred_tensor_small = F.softmax(mask_pred_tensor_small,dim=1)
mask_pred_tensor_small = torch.squeeze(mask_pred_tensor_small)
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
mask_pred_tensor_small_all = mask_pred_tensor_small_all/8
_,prediction_decode = torch.max(mask_pred_tensor_small_all, 0)
prediction_decode=prediction_decode.type(torch.FloatTensor)
img_r = np.zeros((prediction_decode.shape[0],prediction_decode.shape[1]))
img_g = np.zeros((prediction_decode.shape[0],prediction_decode.shape[1]))
img_b = np.zeros((prediction_decode.shape[0],prediction_decode.shape[1]))
img_r[prediction_decode==1]=255
img_b[prediction_decode==2]=255
img_g[prediction_decode==3]=255
#img_b = remove_small_objects(img_b>0, 15, connectivity=3)
#img_r = remove_small_objects(img_r>0, 15, connectivity=3)
img_ = np.concatenate((img_b[...,np.newaxis], img_g[...,np.newaxis], img_r[...,np.newaxis]), axis=2)
cv2.imwrite(seg_results_small_path+ img_name+ '.png', np.float32(img_)*255)
img_ww = cv2.resize(np.float32(img_)*255, (ori_h,ori_w), interpolation = cv2.INTER_NEAREST)
cv2.imwrite(seg_results_raw_path+ img_name+ '.png', img_ww)
if mode== 'whole':
########################################
# based on the whole images
########################################
prediction_decode = F.interpolate(torch.unsqueeze(torch.unsqueeze(prediction_decode,axis=0),axis=0),(ori_w,ori_h), mode='nearest')
prediction_decode = torch.squeeze(prediction_decode)
mask_pred_softmax_cpu_decode = prediction_decode.detach().cpu()
print(np.shape(prediction_decode))
mask_pred_softmax_cpu_decode=mask_pred_softmax_cpu_decode.numpy()
mask_pred_softmax_cpu_decode = np.squeeze(mask_pred_softmax_cpu_decode)
label_cpu = label.detach().cpu().numpy()
label_cpu = np.squeeze(label_cpu)
mask_cpu = mask.detach().cpu().numpy()
mask_cpu = np.squeeze(mask_cpu)
count_artery = np.sum(label_cpu==1)
count_vein = np.sum(label_cpu==2)
count_uncertainty = np.sum(label_cpu==3)
count_total = count_artery + count_vein + count_uncertainty
##########################################
#artery
#######################################
#print('#######',np.unique(mask_pred_softmax_cpu_decode))
label_cpu_flatten, mask_pred_softmax_cpu_decode_flatten = pixel_values_in_mask(label_cpu, mask_pred_softmax_cpu_decode, mask_cpu, train_or, dataset)
#auc_roc_a=AUC_ROC(encoded_gt_vessel_point_a,encoded_pred_vessel_point_a,average=None)
#auc_pr_a=AUC_PR(encoded_gt_vessel_point_a, encoded_pred_vessel_point_a,average=None)
auc_roc_a=0
auc_pr_a=0
auc_roc_v=0
auc_pr_v=0
auc_roc_u=0
auc_pr_u=0
label_cpu_a,label_cpu_v,label_cpu_u=np.zeros((label_cpu_flatten.shape)),np.zeros((label_cpu_flatten.shape)),np.zeros((label_cpu_flatten.shape))
pre_a,pre_v,pre_u=np.zeros((label_cpu_flatten.shape)),np.zeros((label_cpu_flatten.shape)),np.zeros((label_cpu_flatten.shape))
label_cpu_a[label_cpu_flatten==1]=1
label_cpu_v[label_cpu_flatten==2]=1
label_cpu_u[label_cpu_flatten==3]=1
pre_a[mask_pred_softmax_cpu_decode_flatten==1]=1
pre_v[mask_pred_softmax_cpu_decode_flatten==2]=1
pre_u[mask_pred_softmax_cpu_decode_flatten==3]=1
acc_ve_a, sensitivity_ve_a, specificity_ve_a, precision_ve_a, G_ve_a, F1_score_ve_a, mse_a, iou_a = misc_measures(label_cpu_a, pre_a)
acc_ve_v, sensitivity_ve_v, specificity_ve_v, precision_ve_v, G_ve_v, F1_score_ve_v, mse_v, iou_v = misc_measures(label_cpu_v, pre_v)
acc_ve_u, sensitivity_ve_u, specificity_ve_u, precision_ve_u, G_ve_u, F1_score_ve_u, mse_u, iou_u = misc_measures(label_cpu_u, pre_u)
acc_a+=acc_ve_a
sent_a+=sensitivity_ve_a
spet_a+=specificity_ve_a
pret_a+=precision_ve_a
G_t_a+=G_ve_a
F1t_a+=F1_score_ve_a
mset_a+=mse_a
iout_a+=iou_a
auc_roct_a+=auc_roc_a
auc_prt_a+=auc_pr_a
acc_v+=acc_ve_v
sent_v+=sensitivity_ve_v
spet_v+=specificity_ve_v
pret_v+=precision_ve_v
G_t_v+=G_ve_v
F1t_v+=F1_score_ve_v
mset_v+=mse_v
iout_v+=iou_v
auc_roct_v+=auc_roc_v
auc_prt_v+=auc_pr_v
if np.isnan(F1_score_ve_u):
acc_ve_u = 0
sensitivity_ve_u = 0
specificity_ve_u = 0
precision_ve_u = 0
G_ve_u = 0
F1_score_ve_u = 0
mse_u = 0
iou_u = 0
auc_roc_u = 0
auc_pr_u = 0
acc_u+=acc_ve_u
sent_u+=sensitivity_ve_u
spet_u+=specificity_ve_u
pret_u+=precision_ve_u
G_t_u+=G_ve_u
F1t_u+=F1_score_ve_u
mset_u+=mse_u
iout_u+=iou_u
auc_roct_u+=auc_roc_u
auc_prt_u+=auc_pr_u
acc+=(count_artery*acc_ve_a + count_vein*acc_ve_v + count_uncertainty*acc_ve_u)/count_total
sent+=(count_artery*sensitivity_ve_a + count_vein*sensitivity_ve_v + count_uncertainty*sensitivity_ve_u)/count_total
spet+=(count_artery*specificity_ve_a + count_vein*specificity_ve_v + count_uncertainty*specificity_ve_u)/count_total
pret+=(count_artery*precision_ve_a + count_vein*precision_ve_v + count_uncertainty*precision_ve_u)/count_total
G_t+=(count_artery*G_ve_a + count_vein*G_ve_v + count_uncertainty*G_ve_u)/count_total
F1t+=(count_artery*F1_score_ve_a + count_vein*F1_score_ve_v + count_uncertainty*F1_score_ve_u)/count_total
mset+=(count_artery*mse_a + count_vein*mse_v + count_uncertainty*mse_u)/count_total
iout+=(count_artery*iou_a + count_vein*iou_v + count_uncertainty*iou_u)/count_total
auc_roct+=(count_artery*auc_roc_a + count_vein*auc_roc_v + count_uncertainty*auc_roc_u)/count_total
auc_prt+=(count_artery*auc_pr_a + count_vein*auc_pr_v + count_uncertainty*auc_pr_u)/count_total
return acc/ n_val, sent/ n_val, spet/ n_val, pret/ n_val, G_t/ n_val, F1t/ n_val, auc_roct/ n_val, auc_prt/ n_val, mset/ n_val, iout/ n_val, \
acc_a/ n_val, sent_a/ n_val, spet_a/ n_val, pret_a/ n_val, G_t_a/ n_val, F1t_a/ n_val, auc_roct_a/ n_val, auc_prt_a/ n_val, mset_a/ n_val, iout_a/ n_val, \
acc_v/ n_val, sent_v/ n_val, spet_v/ n_val, pret_v/ n_val, G_t_v/ n_val, F1t_v/ n_val, auc_roct_v/ n_val, auc_prt_v/ n_val, mset_v/ n_val, iout_v/ n_val, \
acc_u/ n_val, sent_u/ n_val, spet_u/ n_val, pret_u/ n_val, G_t_u/ n_val, F1t_u/ n_val, auc_roct_u/ n_val, auc_prt_u/ n_val, mset_u/ n_val, iout_u/ n_val
| [
"numpy.sum",
"numpy.isnan",
"numpy.shape",
"torch.no_grad",
"skimage.filters.threshold_otsu",
"cv2.imwrite",
"torch.squeeze",
"torch.zeros",
"sklearn.metrics.mean_squared_error",
"tqdm.tqdm",
"sklearn.metrics.roc_auc_score",
"torch.max",
"numpy.squeeze",
"torch.unsqueeze",
"numpy.concate... | [((1092, 1148), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['true_vessel_arr', 'pred_vessel_arr', 'average'], {}), '(true_vessel_arr, pred_vessel_arr, average)\n', (1105, 1148), False, 'from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve\n'), ((1225, 1261), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['pred_vessels'], {}), '(pred_vessels)\n', (1247, 1261), False, 'from skimage import filters\n'), ((1283, 1311), 'numpy.zeros', 'np.zeros', (['pred_vessels.shape'], {}), '(pred_vessels.shape)\n', (1291, 1311), True, 'import numpy as np\n'), ((1583, 1605), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (1586, 1605), False, 'from sklearn.metrics import auc\n'), ((1691, 1741), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['true_vessel_arr', 'pred_vessel_arr'], {}), '(true_vessel_arr, pred_vessel_arr)\n', (1707, 1741), False, 'from sklearn.metrics import confusion_matrix, mean_squared_error\n'), ((1752, 1804), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['true_vessel_arr', 'pred_vessel_arr'], {}), '(true_vessel_arr, pred_vessel_arr)\n', (1770, 1804), False, 'from sklearn.metrics import confusion_matrix, mean_squared_error\n'), ((566, 590), 'numpy.squeeze', 'np.squeeze', (['true_vessels'], {}), '(true_vessels)\n', (576, 590), True, 'import numpy as np\n'), ((620, 650), 'numpy.squeeze', 'np.squeeze', (['pre_vessels_decode'], {}), '(pre_vessels_decode)\n', (630, 650), True, 'import numpy as np\n'), ((2016, 2050), 'numpy.sqrt', 'np.sqrt', (['(sensitivity * specificity)'], {}), '(sensitivity * specificity)\n', (2023, 2050), True, 'import numpy as np\n'), ((3314, 3351), 'os.path.isdir', 'os.path.isdir', (['seg_results_small_path'], {}), '(seg_results_small_path)\n', (3327, 3351), False, 'import os\n'), ((3361, 3396), 'os.makedirs', 'os.makedirs', (['seg_results_small_path'], {}), '(seg_results_small_path)\n', (3372, 3396), False, 'import os\n'), ((3409, 3444), 'os.path.isdir', 'os.path.isdir', (['seg_results_raw_path'], {}), '(seg_results_raw_path)\n', (3422, 3444), False, 'import os\n'), ((3454, 3487), 'os.makedirs', 'os.makedirs', (['seg_results_raw_path'], {}), '(seg_results_raw_path)\n', (3465, 3487), False, 'import os\n'), ((3546, 3615), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_val', 'desc': '"""Validation round"""', 'unit': '"""batch"""', 'leave': '(False)'}), "(total=n_val, desc='Validation round', unit='batch', leave=False)\n", (3550, 3615), False, 'from tqdm import tqdm\n'), ((1848, 1858), 'numpy.sum', 'np.sum', (['cm'], {}), '(cm)\n', (1854, 1858), True, 'import numpy as np\n'), ((3856, 3917), 'torch.zeros', 'torch.zeros', (['(imgs.shape[0], 3, imgs.shape[2], imgs.shape[3])'], {}), '((imgs.shape[0], 3, imgs.shape[2], imgs.shape[3]))\n', (3867, 3917), False, 'import torch\n'), ((4070, 4085), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4083, 4085), False, 'import torch\n'), ((4626, 4666), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (4635, 4666), True, 'import torch.nn.functional as F\n'), ((4707, 4744), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (4720, 4744), False, 'import torch\n'), ((5384, 5424), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (5393, 5424), True, 'import torch.nn.functional as F\n'), ((5465, 5502), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (5478, 5502), False, 'import torch\n'), ((6142, 6182), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (6151, 6182), True, 'import torch.nn.functional as F\n'), ((6223, 6260), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (6236, 6260), False, 'import torch\n'), ((6916, 6956), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (6925, 6956), True, 'import torch.nn.functional as F\n'), ((6997, 7034), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (7010, 7034), False, 'import torch\n'), ((7678, 7718), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (7687, 7718), True, 'import torch.nn.functional as F\n'), ((7759, 7796), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (7772, 7796), False, 'import torch\n'), ((8440, 8480), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (8449, 8480), True, 'import torch.nn.functional as F\n'), ((8521, 8558), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (8534, 8558), False, 'import torch\n'), ((9218, 9258), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (9227, 9258), True, 'import torch.nn.functional as F\n'), ((9299, 9336), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (9312, 9336), False, 'import torch\n'), ((9996, 10036), 'torch.nn.functional.softmax', 'F.softmax', (['mask_pred_tensor_small'], {'dim': '(1)'}), '(mask_pred_tensor_small, dim=1)\n', (10005, 10036), True, 'import torch.nn.functional as F\n'), ((10077, 10114), 'torch.squeeze', 'torch.squeeze', (['mask_pred_tensor_small'], {}), '(mask_pred_tensor_small)\n', (10090, 10114), False, 'import torch\n'), ((10338, 10378), 'torch.max', 'torch.max', (['mask_pred_tensor_small_all', '(0)'], {}), '(mask_pred_tensor_small_all, 0)\n', (10347, 10378), False, 'import torch\n'), ((10513, 10579), 'numpy.zeros', 'np.zeros', (['(prediction_decode.shape[0], prediction_decode.shape[1])'], {}), '((prediction_decode.shape[0], prediction_decode.shape[1]))\n', (10521, 10579), True, 'import numpy as np\n'), ((10603, 10669), 'numpy.zeros', 'np.zeros', (['(prediction_decode.shape[0], prediction_decode.shape[1])'], {}), '((prediction_decode.shape[0], prediction_decode.shape[1]))\n', (10611, 10669), True, 'import numpy as np\n'), ((10693, 10759), 'numpy.zeros', 'np.zeros', (['(prediction_decode.shape[0], prediction_decode.shape[1])'], {}), '((prediction_decode.shape[0], prediction_decode.shape[1]))\n', (10701, 10759), True, 'import numpy as np\n'), ((11127, 11227), 'numpy.concatenate', 'np.concatenate', (['(img_b[..., np.newaxis], img_g[..., np.newaxis], img_r[..., np.newaxis])'], {'axis': '(2)'}), '((img_b[..., np.newaxis], img_g[..., np.newaxis], img_r[...,\n np.newaxis]), axis=2)\n', (11141, 11227), True, 'import numpy as np\n'), ((11438, 11499), 'cv2.imwrite', 'cv2.imwrite', (["(seg_results_raw_path + img_name + '.png')", 'img_ww'], {}), "(seg_results_raw_path + img_name + '.png', img_ww)\n", (11449, 11499), False, 'import cv2\n'), ((11886, 11918), 'torch.squeeze', 'torch.squeeze', (['prediction_decode'], {}), '(prediction_decode)\n', (11899, 11918), False, 'import torch\n'), ((12179, 12219), 'numpy.squeeze', 'np.squeeze', (['mask_pred_softmax_cpu_decode'], {}), '(mask_pred_softmax_cpu_decode)\n', (12189, 12219), True, 'import numpy as np\n'), ((12306, 12327), 'numpy.squeeze', 'np.squeeze', (['label_cpu'], {}), '(label_cpu)\n', (12316, 12327), True, 'import numpy as np\n'), ((12411, 12431), 'numpy.squeeze', 'np.squeeze', (['mask_cpu'], {}), '(mask_cpu)\n', (12421, 12431), True, 'import numpy as np\n'), ((12480, 12502), 'numpy.sum', 'np.sum', (['(label_cpu == 1)'], {}), '(label_cpu == 1)\n', (12486, 12502), True, 'import numpy as np\n'), ((12530, 12552), 'numpy.sum', 'np.sum', (['(label_cpu == 2)'], {}), '(label_cpu == 2)\n', (12536, 12552), True, 'import numpy as np\n'), ((12587, 12609), 'numpy.sum', 'np.sum', (['(label_cpu == 3)'], {}), '(label_cpu == 3)\n', (12593, 12609), True, 'import numpy as np\n'), ((15436, 15459), 'numpy.isnan', 'np.isnan', (['F1_score_ve_u'], {}), '(F1_score_ve_u)\n', (15444, 15459), True, 'import numpy as np\n'), ((12021, 12048), 'numpy.shape', 'np.shape', (['prediction_decode'], {}), '(prediction_decode)\n', (12029, 12048), True, 'import numpy as np\n'), ((13503, 13536), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13511, 13536), True, 'import numpy as np\n'), ((13539, 13572), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13547, 13572), True, 'import numpy as np\n'), ((13575, 13608), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13583, 13608), True, 'import numpy as np\n'), ((13645, 13678), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13653, 13678), True, 'import numpy as np\n'), ((13681, 13714), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13689, 13714), True, 'import numpy as np\n'), ((13717, 13750), 'numpy.zeros', 'np.zeros', (['label_cpu_flatten.shape'], {}), '(label_cpu_flatten.shape)\n', (13725, 13750), True, 'import numpy as np\n'), ((11292, 11308), 'numpy.float32', 'np.float32', (['img_'], {}), '(img_)\n', (11302, 11308), True, 'import numpy as np\n'), ((11350, 11366), 'numpy.float32', 'np.float32', (['img_'], {}), '(img_)\n', (11360, 11366), True, 'import numpy as np\n'), ((11769, 11811), 'torch.unsqueeze', 'torch.unsqueeze', (['prediction_decode'], {'axis': '(0)'}), '(prediction_decode, axis=0)\n', (11784, 11811), False, 'import torch\n')] |
import math
import numpy as np
from Box2D import b2FixtureDef, b2CircleShape, b2Transform
import game.physics
class Circle:
def __init__(self, position, radius, weight, movable = True):
self.position = np.array(position)
self.velocity = np.array([0, 0])
self.radius = radius
self.weight = weight
self.density = weight / (np.pi * radius**2)
self.movable = movable
def register_in_world(self, world, friction):
if self.movable:
self.body = world.CreateDynamicBody(
fixtures=b2FixtureDef(
shape=b2CircleShape(radius=self.radius),
density=self.density, restitution=1.0, friction=0.0),
bullet=True,
position=self.position.tolist(),
fixedRotation=True,
angularDamping=5,
linearDamping=friction)
self.set_velocity(self.velocity)
else:
self.body = world.CreateStaticBody(position=self.position.tolist(), shapes=b2CircleShape(radius=self.radius))
def update(self, passedTime):
self.position = np.array(self.body.position)
def apply_force(self, force):
self.body.ApplyForce(force.tolist(), self.body.worldCenter, True)
def add_velocity(self, velocity):
self.body.ApplyLinearImpulse(velocity.tolist(), self.body.worldCenter, True)
self.velocity = np.array(self.body.linearVelocity)
def set_velocity(self, velocity):
self.body.linearVelocity = velocity.tolist()
self.velocity = velocity
def set_position(self, position):
self.position = np.array(position)
self.body.transform = [self.position.tolist(), 0]
def get_position(self):
return self.position
def get_velocity(self):
return self.body.linearVelocity
def get_left(self):
return self.position[0] - self.radius
def get_right(self):
return self.position[0] + self.radius
def get_top(self):
return self.position[1] - self.radius
def get_bottom(self):
return self.position[1] + self.radius
| [
"numpy.array",
"Box2D.b2CircleShape"
] | [((218, 236), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (226, 236), True, 'import numpy as np\n'), ((261, 277), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (269, 277), True, 'import numpy as np\n'), ((1147, 1175), 'numpy.array', 'np.array', (['self.body.position'], {}), '(self.body.position)\n', (1155, 1175), True, 'import numpy as np\n'), ((1433, 1467), 'numpy.array', 'np.array', (['self.body.linearVelocity'], {}), '(self.body.linearVelocity)\n', (1441, 1467), True, 'import numpy as np\n'), ((1656, 1674), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (1664, 1674), True, 'import numpy as np\n'), ((1053, 1086), 'Box2D.b2CircleShape', 'b2CircleShape', ([], {'radius': 'self.radius'}), '(radius=self.radius)\n', (1066, 1086), False, 'from Box2D import b2FixtureDef, b2CircleShape, b2Transform\n'), ((609, 642), 'Box2D.b2CircleShape', 'b2CircleShape', ([], {'radius': 'self.radius'}), '(radius=self.radius)\n', (622, 642), False, 'from Box2D import b2FixtureDef, b2CircleShape, b2Transform\n')] |
import librosa
import madmom
from madmom.features.beats import *
from scipy import signal
import numpy as np
def peak_picking(beat_times, total_samples, kernel_size, offset):
# smoothing the beat function
cut_off_norm = len(beat_times)/total_samples*100/2
b, a = signal.butter(1, cut_off_norm)
beat_times = signal.filtfilt(b, a, beat_times)
# creating a list of samples for the rnn beats
beat_samples = np.linspace(0, total_samples, len(beat_times), endpoint=True, dtype=int)
n_t_medians = signal.medfilt(beat_times, kernel_size=kernel_size)
offset = 0.01
peaks = []
for i in range(len(beat_times)-1):
if beat_times[i] > 0:
if beat_times[i] > beat_times[i-1]:
if beat_times[i] > beat_times[i+1]:
if beat_times[i] > (n_t_medians[i] + offset):
peaks.append(int(beat_samples[i]))
return peaks
def analyze(y, sr):
data = {}
# sample rate
data['sample_rate'] = sr
# getting duration in seconds
data['duration'] = librosa.get_duration(y=y, sr=sr)
# beats prediction
# rnn_processor = RNNBeatProcessor()
# beats = rnn_processor(y)
rnn_processor = RNNBeatProcessor(post_processor=None)
predictions = rnn_processor(y)
mm_processor = MultiModelSelectionProcessor(num_ref_predictions=None)
beats = mm_processor(predictions)
data['beat_samples'] = peak_picking(beats, len(y), 5, 0.01)
if len(data['beat_samples']) < 3:
data['beat_samples'] = peak_picking(beats, len(y), 25, 0.01)
if data['beat_samples'] == []:
data['beat_samples'] = [0]
data['number_of_beats'] = len(data['beat_samples'])
# tempo
data['tempo_float'] = (len(data['beat_samples'])-1)*60/data['duration']
data['tempo_int'] = int(data['tempo_float'])
# noisiness featues
data['zero_crossing'] = librosa.feature.zero_crossing_rate(y)[0].tolist()
data['noisiness_median'] = float(np.median(data['zero_crossing']))
data['noisiness_sum'] = sum( librosa.zero_crossings(y)/y.shape[0] )
# spectral features
notes = []
try:
chroma = librosa.feature.chroma_cqt(y, n_chroma=12, bins_per_octave=12, n_octaves=8, hop_length=512)
# CONVERSION TABLE
# 0 c 261.63
# 1 c# 277.18
# 2 d 293.66
# 3 d# 311.13
# 4 e 329.63
# 5 f 349.23
# 6 f# 369.99
# 7 g 392.00
# 8 g# 415.30
# 9 a 440.00
# 10 a# 466.16
# 11 b 493.88
for col in range(chroma.shape[1]):
notes.append(int(np.argmax(chroma[:,col])))
data['notes'] = notes
data['dominant_note'] = int(np.argmax(np.bincount(np.array(notes))))
except:
data['notes'] = [0]
data['dominant_note'] = 0
return data
| [
"librosa.zero_crossings",
"scipy.signal.filtfilt",
"numpy.argmax",
"numpy.median",
"librosa.feature.zero_crossing_rate",
"scipy.signal.medfilt",
"numpy.array",
"librosa.feature.chroma_cqt",
"scipy.signal.butter",
"librosa.get_duration"
] | [((277, 307), 'scipy.signal.butter', 'signal.butter', (['(1)', 'cut_off_norm'], {}), '(1, cut_off_norm)\n', (290, 307), False, 'from scipy import signal\n'), ((325, 358), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'beat_times'], {}), '(b, a, beat_times)\n', (340, 358), False, 'from scipy import signal\n'), ((522, 573), 'scipy.signal.medfilt', 'signal.medfilt', (['beat_times'], {'kernel_size': 'kernel_size'}), '(beat_times, kernel_size=kernel_size)\n', (536, 573), False, 'from scipy import signal\n'), ((1062, 1094), 'librosa.get_duration', 'librosa.get_duration', ([], {'y': 'y', 'sr': 'sr'}), '(y=y, sr=sr)\n', (1082, 1094), False, 'import librosa\n'), ((1977, 2009), 'numpy.median', 'np.median', (["data['zero_crossing']"], {}), "(data['zero_crossing'])\n", (1986, 2009), True, 'import numpy as np\n'), ((2150, 2245), 'librosa.feature.chroma_cqt', 'librosa.feature.chroma_cqt', (['y'], {'n_chroma': '(12)', 'bins_per_octave': '(12)', 'n_octaves': '(8)', 'hop_length': '(512)'}), '(y, n_chroma=12, bins_per_octave=12, n_octaves=8,\n hop_length=512)\n', (2176, 2245), False, 'import librosa\n'), ((2044, 2069), 'librosa.zero_crossings', 'librosa.zero_crossings', (['y'], {}), '(y)\n', (2066, 2069), False, 'import librosa\n'), ((1890, 1927), 'librosa.feature.zero_crossing_rate', 'librosa.feature.zero_crossing_rate', (['y'], {}), '(y)\n', (1924, 1927), False, 'import librosa\n'), ((2666, 2691), 'numpy.argmax', 'np.argmax', (['chroma[:, col]'], {}), '(chroma[:, col])\n', (2675, 2691), True, 'import numpy as np\n'), ((2782, 2797), 'numpy.array', 'np.array', (['notes'], {}), '(notes)\n', (2790, 2797), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 7 18:02:07 2018
@author: ron
A test function for the smoothing module
"""
from myptv.traj_smoothing_mod import smooth_trajectories
from numpy import loadtxt
def test_smoothing():
'''
A test for the smoothing module by smoothing three trajectories.
'''
fname = './tests/smoothing_test_files/trajectories'
traj_list = loadtxt(fname)
window = 5
polyorder = 2
sm = smooth_trajectories(traj_list, window, polyorder)
sm.smooth()
tr = sm.smoothed_trajs
test_n_smoothed = len(tr)
test_structure = all([len(tr[i])==11 for i in range(len(tr))])
assert test_n_smoothed and test_structure
| [
"myptv.traj_smoothing_mod.smooth_trajectories",
"numpy.loadtxt"
] | [((391, 405), 'numpy.loadtxt', 'loadtxt', (['fname'], {}), '(fname)\n', (398, 405), False, 'from numpy import loadtxt\n'), ((448, 497), 'myptv.traj_smoothing_mod.smooth_trajectories', 'smooth_trajectories', (['traj_list', 'window', 'polyorder'], {}), '(traj_list, window, polyorder)\n', (467, 497), False, 'from myptv.traj_smoothing_mod import smooth_trajectories\n')] |
"""
Implements two-dimensional functions for modeling.
.. include:: ../include/links.rst
"""
import warnings
from IPython import embed
import numpy as np
from scipy import special
from astropy.modeling import functional_models
class Sersic2D(functional_models.Sersic2D):
"""
A 2D Sersic distribution.
Args:
sb_eff (scalar-like):
The surface brightness at 1 effective (half-light) radius.
r_eff (scalar-like):
The effective (half-light) radius in *arcseconds*.
n (scalar-like):
The Sersic index.
center (scalar-like, optional):
The coordinates of the Sersic center in *arcseconds* relative to the
image center.
ellipticity (scalar-like, optional):
The ellipticity (1-b/a) of an elliptical Sersic distribution.
position_angle (scalar-like, optional):
The position angle for the elliptical Sersic distribution, defined
as the angle from N through E in degrees. The coordinate system is
defined with positive offsets (in RA) toward the east, meaning lower
pixel indices.
unity_integral (:obj:`bool`, optional):
Renormalize the distribution so that the integral is unity.
"""
def __init__(self, sb_eff, r_eff, n, center=[0,0], ellipticity=1.0, position_angle=0.,
unity_integral=False):
self.position_angle = position_angle
super().__init__(amplitude=sb_eff, r_eff=r_eff, n=n, x_0=center[0], y_0=center[1],
ellip=ellipticity, theta=np.radians(90-self.position_angle))
self.bn = None
self.integral = self.get_integral()
if unity_integral:
self.amplitude /= self.integral
self.integral = self.get_integral()
def get_integral(self):
"""
The analytic integral of the Sersic profile projected on the
sky.
"""
# Note the (1-ellipticity) factor.
self.bn = special.gammaincinv(2. * self.n, 0.5)
return 2 * np.pi * self.n * np.exp(self.bn) * self.amplitude \
* np.square(self.r_eff) * (1-self.ellip) \
* special.gamma(2*self.n) * np.power(self.bn, -2*self.n)
| [
"numpy.radians",
"numpy.power",
"numpy.square",
"numpy.exp",
"scipy.special.gammaincinv",
"scipy.special.gamma"
] | [((2027, 2065), 'scipy.special.gammaincinv', 'special.gammaincinv', (['(2.0 * self.n)', '(0.5)'], {}), '(2.0 * self.n, 0.5)\n', (2046, 2065), False, 'from scipy import special\n'), ((2263, 2293), 'numpy.power', 'np.power', (['self.bn', '(-2 * self.n)'], {}), '(self.bn, -2 * self.n)\n', (2271, 2293), True, 'import numpy as np\n'), ((1599, 1635), 'numpy.radians', 'np.radians', (['(90 - self.position_angle)'], {}), '(90 - self.position_angle)\n', (1609, 1635), True, 'import numpy as np\n'), ((2237, 2262), 'scipy.special.gamma', 'special.gamma', (['(2 * self.n)'], {}), '(2 * self.n)\n', (2250, 2262), False, 'from scipy import special\n'), ((2166, 2187), 'numpy.square', 'np.square', (['self.r_eff'], {}), '(self.r_eff)\n', (2175, 2187), True, 'import numpy as np\n'), ((2101, 2116), 'numpy.exp', 'np.exp', (['self.bn'], {}), '(self.bn)\n', (2107, 2116), True, 'import numpy as np\n')] |
import numpy as np
import math
def get_num_windows(image_height, image_width, window_height, window_width, overlap=True, overlap_corners=True):
"""Return the number of height and width windows to generate
Args:
image_height: Height of the image in pixels.
image_width: Width of the image in pixels.
window_height: The height of each generated window.
window_width: The width of each generated window.
overlap: True iff generated windows should be overlapped.
overlap_corners: True iff the corners/edges should be mirrored and contain
as many overlaps as the centre of the image.
Returns:
A 3-element list containing the total number of windows, and the number of
windows needed for the image height and width, respectively.
"""
if not overlap:
num_height_windows = math.ceil(image_height/window_height)
num_width_windows = math.ceil(image_width/window_width)
elif overlap_corners:
num_height_windows = math.ceil(image_height/(window_height/2)) + 1
num_width_windows = math.ceil(image_width/(window_width/2)) + 1
else:
num_height_windows = max(1, math.ceil(image_height/(window_height/2)) - 1)
num_width_windows = max(1, math.ceil(image_width/(window_width/2)) - 1)
return ((num_height_windows*num_width_windows), num_height_windows, num_width_windows)
def split_image_to_windows(image, window_height, window_width, overlap=True, overlap_corners=True):
"""Split an input image into multiple equal-sized windows.
The input image can have any dimensions. It will be split into multiple
equal-sized windows.
Args:
image: Input image with 3 dimensions - height, width, and channels.
window_height: The height of each generated window.
window_width: The width of each generated window.
overlap: True iff generated windows should be overlapped.
overlap_corners: True iff the corners/edges should be mirrored and contain
as many overlaps as the centre of the image.
Returns:
Numpy 4-d array, dimensions are the window index, window height, window
width, channel data. The returned shape[0] describes how many total
windows were created.
"""
img_height = image.shape[0]
img_width = image.shape[1]
(total_windows, num_height_windows, num_width_windows) = get_num_windows(img_height, img_width,
window_height, window_width,
overlap, overlap_corners)
window_images = np.zeros((total_windows, window_height, window_width, image.shape[2]), image.dtype)
src_window_height = int(window_height/2)
src_window_width = int(window_width/2)
# Training with overlapping windows, including overlaps for corners/edges, provides the
# best IoU/LB scores. If training was not done using overlaps for corners/edges, then
# predictions using overlaps without additional corner/edge windows provided the best
# IoU/LB score.
if not overlap_corners:
src_window_height = window_height
src_window_width = window_width
if overlap > 0:
src_window_height = int(src_window_height/2)
src_window_width = int(src_window_width/2)
cur_window = 0
for h in range(num_height_windows):
for w in range(num_width_windows):
# The right-most and bottom-most windows of the image may need to
# be padded. If we are at the edge of the input image, we will pad
# the output windows. Determine how much is left to copy from the
# input image, and the padding will then be the difference between
# the end of the window, and the edge of the input image.
image_edge_height = min(img_height, window_height + (h * src_window_height))
image_edge_width = min(img_width, window_width + (w * src_window_width))
window_pad_height = (window_height + (h * src_window_height)) - image_edge_height
window_pad_width = (window_width + (w * src_window_width)) - image_edge_width
window_images[cur_window] = np.pad(image[h * src_window_height:image_edge_height,
w * src_window_width:image_edge_width,:],
((0,window_pad_height),(0,window_pad_width),(0,0)),
'symmetric')
cur_window += 1
else:
cur_window = 0
for h in range(num_height_windows):
# The corners and edges need to be padded using symmetric padding.
src_top = max(0, (h - 1) * src_window_height)
src_bottom = min(img_height, (h + 1) * src_window_height)
pad_top = 0
if h == 0:
pad_top = src_window_height
pad_bottom = window_height - pad_top - (src_bottom - src_top)
for w in range(num_width_windows):
src_left = max(0, (w - 1) * src_window_width)
src_right = min(img_width, (w + 1) * src_window_width)
pad_left = 0
if w == 0:
pad_left = src_window_width
pad_right = window_width - pad_left - (src_right - src_left)
window_images[cur_window] = np.pad(image[src_top:src_bottom,src_left:src_right,:],
((pad_top,pad_bottom),(pad_left,pad_right),(0,0)),
'symmetric')
cur_window += 1
return window_images
def stitch_single_image(windows, image_height, image_width, overlap=True, overlap_corners=True):
"""Stitch together an image that had been split into multiple equal-sided windows.
Args:
windows: All the windows of the image to be stitched together.
image_height: The original height of the image to be stitched back together.
image_width: The original width of the image to be stitched back together.
overlap: True iff generated windows should be overlapped.
overlap_corners: True iff the corners/edges should be mirrored and contain
as many overlaps as the centre of the image.
Returns:
Numpy 3-d array/image of the specified image_height and image_width.
"""
window_height = windows[0].shape[0]
window_width = windows[0].shape[1]
(_, num_height_windows, num_width_windows) = get_num_windows(image_height, image_width,
window_height, window_width,
overlap, overlap_corners)
dest_img = np.zeros((image_height, image_width, windows[0].shape[2]), dtype=windows[0].dtype)
cur_window = 0
# Training with overlapping windows, including overlaps for corners/edges, provides the
# best IoU/LB scores. If training was not done using overlaps for corners/edges, then
# predictions using overlaps without additional corner/edge windows provided the best
# IoU/LB score.
dest_window_height = window_height
dest_window_width = window_width
if not overlap:
# No overlapping windows
for h in range(num_height_windows):
# dest is full-size img, src is current window
end_start_height = h * window_height
end_dest_height = min(image_height, end_start_height + window_height)
end_src_height = end_dest_height - end_start_height
for w in range(num_width_windows):
end_start_width = w * window_width
end_dest_width = min(image_width, end_start_width + window_width)
end_src_width = end_dest_width - end_start_width
src_image = windows[cur_window]
cur_window += 1
dest_img[end_start_height:end_dest_height,end_start_width:end_dest_width] = src_image[0:end_src_height,0:end_src_width]
elif overlap_corners:
dest_window_height = int(dest_window_height/2)
dest_window_width = int(dest_window_width/2)
for h in range(num_height_windows):
# dest is full-size img, src is current window
dest_top = max(0, (h - 1) * dest_window_height)
dest_bottom = min(image_height, (h + 1) * dest_window_height)
if h == 0:
src_top = dest_window_height
else:
src_top = 0
src_bottom = src_top + (dest_bottom - dest_top)
for w in range(num_width_windows):
dest_left = max(0, (w - 1) * dest_window_width)
dest_right = min(image_width, (w + 1) * dest_window_width)
if w == 0:
src_left = dest_window_width
else:
src_left = 0
src_right = src_left + (dest_right - dest_left)
# Scale down the input window, the edges won't have 4 overlapping windows,
# (they will have either 1 or 2 overlapping windows), but we discard all
# edges anyways.
src_image = windows[cur_window].copy()
src_image[:,:] = src_image[:,:] / 4
cur_window += 1
dest_img[dest_top:dest_bottom,dest_left:dest_right] += src_image[src_top:src_bottom,src_left:src_right]
else:
if image_height > dest_window_height:
dest_window_height = int(dest_window_height/2)
if image_width > dest_window_width:
dest_window_width = int(dest_window_width/2)
for h in range(num_height_windows):
# dest is full-size img, src is current window
dest_start_height = h * dest_window_height
dest_end_height = min(image_height, dest_start_height + window_height)
src_end_height = dest_end_height - dest_start_height
for w in range(num_width_windows):
dest_start_width = w * dest_window_width
dest_end_width = min(image_width, dest_start_width + window_width)
src_end_width = dest_end_width - dest_start_width
src_image = windows[cur_window].copy()
# Scale down the input window based on how many windows overlap each
# quarter of the window.
q1 = 1
q2 = 1
q3 = 1
q4 = 1
if (h == 0 and w > 0) or (w == 0 and h > 0):
q1 = 2
elif (h > 0 and w > 0):
q1 = 4
if (h == 0 and w < (num_width_windows - 1)) or (h > 0 and w == (num_width_windows - 1)):
q2 = 2
elif (h > 0 and w < (num_width_windows - 1)):
q2 = 4
if (h < (num_height_windows - 1) and w == 0) or (h == (num_height_windows - 1) and w > 0):
q3 = 2
elif (h < (num_height_windows - 1)) and w > 0:
q3 = 4
if (h < (num_height_windows - 1) and w == (num_width_windows - 1)) or (h == (num_height_windows - 1) and w < (num_width_windows - 1)):
q4 = 2
elif (h < (num_height_windows - 1)) and (w < (num_width_windows - 1)):
q4 = 4
src_image[0:dest_window_height,0:dest_window_width] = src_image[0:dest_window_height,0:dest_window_width] / q1
src_image[0:dest_window_height,dest_window_width:window_width] = src_image[0:dest_window_height,dest_window_width:window_width] / q2
src_image[dest_window_height:window_height,0:dest_window_width] = src_image[dest_window_height:window_height,0:dest_window_width] / q3
src_image[dest_window_height:window_height,dest_window_width:window_width] = src_image[dest_window_height:window_height,dest_window_width:window_width] / q4
cur_window += 1
dest_img[dest_start_height:dest_end_height,dest_start_width:dest_end_width] += src_image[0:src_end_height,0:src_end_width]
return dest_img
def stitch_all_images(windows, sizes, overlap=True, overlap_corners=True):
"""Stitch together the provided windows into a list of original-sized images.
Args:
windows: All the windows of all the images to be stitched together.
sizes: The original image sizes.
overlap: True iff generated windows should be overlapped.
overlap_corners: True iff the corners/edges should be mirrored and contain
as many overlaps as the centre of the image.
Returns:
List of numpy 3-d array/images stitched together from the input windows.
"""
stitched = []
cur_window = 0
window_height = windows[0].shape[0]
window_width = windows[0].shape[1]
for (image_height, image_width) in sizes:
(image_windows, _, _) = get_num_windows(image_height, image_width, window_height, window_width, overlap, overlap_corners)
end_window = cur_window + image_windows
stitched.append(stitch_single_image(windows[cur_window:end_window], image_height, image_width, overlap, overlap_corners))
cur_window = end_window
return stitched
| [
"numpy.pad",
"numpy.zeros",
"math.ceil"
] | [((2692, 2779), 'numpy.zeros', 'np.zeros', (['(total_windows, window_height, window_width, image.shape[2])', 'image.dtype'], {}), '((total_windows, window_height, window_width, image.shape[2]),\n image.dtype)\n', (2700, 2779), True, 'import numpy as np\n'), ((6942, 7029), 'numpy.zeros', 'np.zeros', (['(image_height, image_width, windows[0].shape[2])'], {'dtype': 'windows[0].dtype'}), '((image_height, image_width, windows[0].shape[2]), dtype=windows[0]\n .dtype)\n', (6950, 7029), True, 'import numpy as np\n'), ((870, 909), 'math.ceil', 'math.ceil', (['(image_height / window_height)'], {}), '(image_height / window_height)\n', (879, 909), False, 'import math\n'), ((936, 973), 'math.ceil', 'math.ceil', (['(image_width / window_width)'], {}), '(image_width / window_width)\n', (945, 973), False, 'import math\n'), ((1027, 1072), 'math.ceil', 'math.ceil', (['(image_height / (window_height / 2))'], {}), '(image_height / (window_height / 2))\n', (1036, 1072), False, 'import math\n'), ((1101, 1144), 'math.ceil', 'math.ceil', (['(image_width / (window_width / 2))'], {}), '(image_width / (window_width / 2))\n', (1110, 1144), False, 'import math\n'), ((4343, 4519), 'numpy.pad', 'np.pad', (['image[h * src_window_height:image_edge_height, w * src_window_width:\n image_edge_width, :]', '((0, window_pad_height), (0, window_pad_width), (0, 0))', '"""symmetric"""'], {}), "(image[h * src_window_height:image_edge_height, w * src_window_width:\n image_edge_width, :], ((0, window_pad_height), (0, window_pad_width), (\n 0, 0)), 'symmetric')\n", (4349, 4519), True, 'import numpy as np\n'), ((5553, 5682), 'numpy.pad', 'np.pad', (['image[src_top:src_bottom, src_left:src_right, :]', '((pad_top, pad_bottom), (pad_left, pad_right), (0, 0))', '"""symmetric"""'], {}), "(image[src_top:src_bottom, src_left:src_right, :], ((pad_top,\n pad_bottom), (pad_left, pad_right), (0, 0)), 'symmetric')\n", (5559, 5682), True, 'import numpy as np\n'), ((1191, 1236), 'math.ceil', 'math.ceil', (['(image_height / (window_height / 2))'], {}), '(image_height / (window_height / 2))\n', (1200, 1236), False, 'import math\n'), ((1273, 1316), 'math.ceil', 'math.ceil', (['(image_width / (window_width / 2))'], {}), '(image_width / (window_width / 2))\n', (1282, 1316), False, 'import math\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import uuid
import os
import glob
import math
import logging
import numpy as np
from os.path import join
import cv2
import argparse
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from line_detection_module.models.networks.DCNv2.dcn_v2 import DCN
BN_MOMENTUM = 0.1
mean = np.array([0.472459, 0.475080, 0.482652],
dtype=np.float32).reshape((1, 1, 3))
std = np.array([0.255084, 0.254665, 0.257073],
dtype=np.float32).reshape((1, 1, 3))
down_ratio = 4
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = DeformConv(c, o)
node = DeformConv(o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) - i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
class DLASeg(nn.Module):
def __init__(self, base_name, heads, pretrained, down_ratio, final_kernel,
last_level, head_conv, out_channel=0):
super(DLASeg, self).__init__()
assert down_ratio in [2, 4, 8, 16]
self.first_level = int(np.log2(down_ratio))
self.last_level = last_level
self.base = globals()[base_name](pretrained=pretrained)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(self.first_level, channels[self.first_level:], scales)
if out_channel == 0:
out_channel = channels[self.first_level]
self.ida_up = IDAUp(out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)])
self.heads = heads
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(channels[self.first_level], classes,
kernel_size=final_kernel, stride=1,
padding=final_kernel // 2, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(y[-1])
return [z]
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
model = DLASeg('dla{}'.format(num_layers), heads,
pretrained=False,
down_ratio=down_ratio,
final_kernel=1,
last_level=5,
head_conv=head_conv)
return model
def load_model(model, model_path, optimizer=None, resume=False,
lr=None, lr_step=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location="cpu")
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
msg = 'If you see this, your model does not fully load the ' + \
'pre-trained weight. Please make sure ' + \
'you have correctly specified --arch xxx ' + \
'or set the correct --num_classes for your own dataset.'
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, ' \
'loaded shape{}. {}'.format(
k, model_state_dict[k].shape, state_dict[k].shape, msg))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k) + msg)
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k) + msg)
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def pre_process(image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
# inp_height = (new_height | 31) + 1
inp_height = 512
# inp_width = (new_width | 31) + 1
inp_width = 512
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
# s = np.array([inp_width, inp_height], dtype=np.float32)
s = max(height, width) * 1.0
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - mean) / std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
images = torch.from_numpy(images)
meta = {'c': c, 's': s,
'out_height': inp_height // down_ratio,
'out_width': inp_width // down_ratio}
return images, meta
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=100):
batch, cat, height, width = heat.size()
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _transpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _transpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def ctdet_post_process(dets, c, s, h, w, num_classes):
# dets: batch x max_dets x dim
# return 1-based class det dict
ret = []
for i in range(dets.shape[0]):
top_preds = {}
dets[i, :, :2] = transform_preds(
dets[i, :, 0:2], c[i], s[i], (w, h))
dets[i, :, 2:4] = transform_preds(
dets[i, :, 2:4], c[i], s[i], (w, h))
classes = dets[i, :, -1]
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = np.concatenate([
dets[i, inds, :4].astype(np.float32), # coord
dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() # score
ret.append(top_preds)
return ret
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def merge_outputs(detections, num_classes, max_per_image):
# from IPython import embed;
# embed()
results = {}
for j in range(1, num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
# if len(self.scales) > 1 or self.opt.nms:
# soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, num_classes + 1)])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def post_process(dets, meta, num_classes, scale=1):
# from IPython import embed; embed()
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], num_classes)
for j in range(1, num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
# heads = {'hm': 1, 'wh': 2, 'reg': 2}
# head_conv = 256
# num_layers = 34
# scale = 1.0
# num_classes = 1
# max_per_image = 2000
| [
"torch.cat",
"cv2.warpAffine",
"numpy.sin",
"torch.nn.init.constant_",
"torch.utils.model_zoo.load_url",
"line_detection_module.models.networks.DCNv2.dcn_v2.DCN",
"torch.load",
"torch.nn.functional.max_pool2d",
"cv2.resize",
"numpy.partition",
"numpy.log2",
"torch.nn.Conv2d",
"torch.nn.Batch... | [((673, 700), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (690, 700), False, 'import logging\n'), ((948, 1037), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (957, 1037), False, 'from torch import nn\n'), ((17754, 17796), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (17764, 17796), False, 'import torch\n'), ((19722, 19756), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (19730, 19756), True, 'import numpy as np\n'), ((20123, 20162), 'numpy.array', 'np.array', (['[0, dst_w * -0.5]', 'np.float32'], {}), '([0, dst_w * -0.5], np.float32)\n', (20131, 20162), True, 'import numpy as np\n'), ((20174, 20208), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (20182, 20208), True, 'import numpy as np\n'), ((20219, 20253), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (20227, 20253), True, 'import numpy as np\n'), ((21045, 21106), 'numpy.array', 'np.array', (['[new_width // 2, new_height // 2]'], {'dtype': 'np.float32'}), '([new_width // 2, new_height // 2], dtype=np.float32)\n', (21053, 21106), True, 'import numpy as np\n'), ((21295, 21337), 'cv2.resize', 'cv2.resize', (['image', '(new_width, new_height)'], {}), '(image, (new_width, new_height))\n', (21305, 21337), False, 'import cv2\n'), ((21354, 21450), 'cv2.warpAffine', 'cv2.warpAffine', (['resized_image', 'trans_input', '(inp_width, inp_height)'], {'flags': 'cv2.INTER_LINEAR'}), '(resized_image, trans_input, (inp_width, inp_height), flags=\n cv2.INTER_LINEAR)\n', (21368, 21450), False, 'import cv2\n'), ((21625, 21649), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (21641, 21649), False, 'import torch\n'), ((22732, 22846), 'torch.cat', 'torch.cat', (['[xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[..., 0:1] / 2, ys + \n wh[..., 1:2] / 2]'], {'dim': '(2)'}), '([xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[..., 0:1] /\n 2, ys + wh[..., 1:2] / 2], dim=2)\n', (22741, 22846), False, 'import torch\n'), ((22932, 22973), 'torch.cat', 'torch.cat', (['[bboxes, scores, clses]'], {'dim': '(2)'}), '([bboxes, scores, clses], dim=2)\n', (22941, 22973), False, 'import torch\n'), ((23065, 23136), 'torch.nn.functional.max_pool2d', 'nn.functional.max_pool2d', (['heat', '(kernel, kernel)'], {'stride': '(1)', 'padding': 'pad'}), '(heat, (kernel, kernel), stride=1, padding=pad)\n', (23089, 23136), False, 'from torch import nn\n'), ((25239, 25261), 'numpy.zeros', 'np.zeros', (['coords.shape'], {}), '(coords.shape)\n', (25247, 25261), True, 'import numpy as np\n'), ((25573, 25590), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (25579, 25590), True, 'import numpy as np\n'), ((455, 512), 'numpy.array', 'np.array', (['[0.472459, 0.47508, 0.482652]'], {'dtype': 'np.float32'}), '([0.472459, 0.47508, 0.482652], dtype=np.float32)\n', (463, 512), True, 'import numpy as np\n'), ((555, 613), 'numpy.array', 'np.array', (['[0.255084, 0.254665, 0.257073]'], {'dtype': 'np.float32'}), '([0.255084, 0.254665, 0.257073], dtype=np.float32)\n', (563, 613), True, 'import numpy as np\n'), ((1214, 1324), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'bias': '(False)', 'dilation': 'dilation'}), '(inplanes, planes, kernel_size=3, stride=stride, padding=dilation,\n bias=False, dilation=dilation)\n', (1223, 1324), False, 'from torch import nn\n'), ((1402, 1446), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (1416, 1446), False, 'from torch import nn\n'), ((1467, 1488), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1474, 1488), False, 'from torch import nn\n'), ((1510, 1614), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'dilation', 'bias': '(False)', 'dilation': 'dilation'}), '(planes, planes, kernel_size=3, stride=1, padding=dilation, bias=\n False, dilation=dilation)\n', (1519, 1614), False, 'from torch import nn\n'), ((1691, 1735), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (1705, 1735), False, 'from torch import nn\n'), ((2343, 2404), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'bottle_planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, bottle_planes, kernel_size=1, bias=False)\n', (2352, 2404), False, 'from torch import nn\n'), ((2455, 2506), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['bottle_planes'], {'momentum': 'BN_MOMENTUM'}), '(bottle_planes, momentum=BN_MOMENTUM)\n', (2469, 2506), False, 'from torch import nn\n'), ((2528, 2650), 'torch.nn.Conv2d', 'nn.Conv2d', (['bottle_planes', 'bottle_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'bias': '(False)', 'dilation': 'dilation'}), '(bottle_planes, bottle_planes, kernel_size=3, stride=stride,\n padding=dilation, bias=False, dilation=dilation)\n', (2537, 2650), False, 'from torch import nn\n'), ((2728, 2779), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['bottle_planes'], {'momentum': 'BN_MOMENTUM'}), '(bottle_planes, momentum=BN_MOMENTUM)\n', (2742, 2779), False, 'from torch import nn\n'), ((2801, 2860), 'torch.nn.Conv2d', 'nn.Conv2d', (['bottle_planes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(bottle_planes, planes, kernel_size=1, bias=False)\n', (2810, 2860), False, 'from torch import nn\n'), ((2911, 2955), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (2925, 2955), False, 'from torch import nn\n'), ((2976, 2997), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2983, 2997), False, 'from torch import nn\n'), ((3846, 3907), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'bottle_planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, bottle_planes, kernel_size=1, bias=False)\n', (3855, 3907), False, 'from torch import nn\n'), ((3958, 4009), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['bottle_planes'], {'momentum': 'BN_MOMENTUM'}), '(bottle_planes, momentum=BN_MOMENTUM)\n', (3972, 4009), False, 'from torch import nn\n'), ((4031, 4173), 'torch.nn.Conv2d', 'nn.Conv2d', (['bottle_planes', 'bottle_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'bias': '(False)', 'dilation': 'dilation', 'groups': 'cardinality'}), '(bottle_planes, bottle_planes, kernel_size=3, stride=stride,\n padding=dilation, bias=False, dilation=dilation, groups=cardinality)\n', (4040, 4173), False, 'from torch import nn\n'), ((4251, 4302), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['bottle_planes'], {'momentum': 'BN_MOMENTUM'}), '(bottle_planes, momentum=BN_MOMENTUM)\n', (4265, 4302), False, 'from torch import nn\n'), ((4324, 4383), 'torch.nn.Conv2d', 'nn.Conv2d', (['bottle_planes', 'planes'], {'kernel_size': '(1)', 'bias': '(False)'}), '(bottle_planes, planes, kernel_size=1, bias=False)\n', (4333, 4383), False, 'from torch import nn\n'), ((4434, 4478), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (4448, 4478), False, 'from torch import nn\n'), ((4499, 4520), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4506, 4520), False, 'from torch import nn\n'), ((5109, 5207), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1)'], {'stride': '(1)', 'bias': '(False)', 'padding': '((kernel_size - 1) // 2)'}), '(in_channels, out_channels, 1, stride=1, bias=False, padding=(\n kernel_size - 1) // 2)\n', (5118, 5207), False, 'from torch import nn\n'), ((5246, 5296), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'momentum': 'BN_MOMENTUM'}), '(out_channels, momentum=BN_MOMENTUM)\n', (5260, 5296), False, 'from torch import nn\n'), ((5317, 5338), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5324, 5338), False, 'from torch import nn\n'), ((9959, 9981), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9972, 9981), False, 'from torch import nn\n'), ((10483, 10506), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (10496, 10506), False, 'from torch import nn\n'), ((11116, 11209), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.channels[-1]', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(self.channels[-1], num_classes, kernel_size=1, stride=1, padding=\n 0, bias=True)\n', (11125, 11209), False, 'from torch import nn\n'), ((12534, 12625), 'line_detection_module.models.networks.DCNv2.dcn_v2.DCN', 'DCN', (['chi', 'cho'], {'kernel_size': '(3, 3)', 'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'deformable_groups': '(1)'}), '(chi, cho, kernel_size=(3, 3), stride=1, padding=1, dilation=1,\n deformable_groups=1)\n', (12537, 12625), False, 'from line_detection_module.models.networks.DCNv2.dcn_v2 import DCN\n'), ((14056, 14083), 'numpy.array', 'np.array', (['scales'], {'dtype': 'int'}), '(scales, dtype=int)\n', (14064, 14083), True, 'import numpy as np\n'), ((14919, 14997), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': 'self.scale', 'mode': 'self.mode', 'align_corners': '(False)'}), '(x, scale_factor=self.scale, mode=self.mode, align_corners=False)\n', (14932, 14997), True, 'import torch.nn.functional as F\n'), ((19247, 19262), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (19253, 19262), True, 'import numpy as np\n'), ((19264, 19279), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (19270, 19279), True, 'import numpy as np\n'), ((19504, 19555), 'numpy.array', 'np.array', (['[-direct[1], direct[0]]'], {'dtype': 'np.float32'}), '([-direct[1], direct[0]], dtype=np.float32)\n', (19512, 19555), True, 'import numpy as np\n'), ((19881, 19923), 'numpy.array', 'np.array', (['[scale, scale]'], {'dtype': 'np.float32'}), '([scale, scale], dtype=np.float32)\n', (19889, 19923), True, 'import numpy as np\n'), ((20409, 20457), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]', 'np.float32'], {}), '([dst_w * 0.5, dst_h * 0.5], np.float32)\n', (20417, 20457), True, 'import numpy as np\n'), ((25511, 25558), 'numpy.array', 'np.array', (['[pt[0], pt[1], 1.0]'], {'dtype': 'np.float32'}), '([pt[0], pt[1], 1.0], dtype=np.float32)\n', (25519, 25558), True, 'import numpy as np\n'), ((5443, 5458), 'torch.cat', 'torch.cat', (['x', '(1)'], {}), '(x, 1)\n', (5452, 5458), False, 'import torch\n'), ((7130, 7165), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['stride'], {'stride': 'stride'}), '(stride, stride=stride)\n', (7142, 7165), False, 'from torch import nn\n'), ((8329, 8402), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'channels[0]'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3, channels[0], kernel_size=7, stride=1, padding=3, bias=False)\n', (8338, 8402), False, 'from torch import nn\n'), ((8438, 8487), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels[0]'], {'momentum': 'BN_MOMENTUM'}), '(channels[0], momentum=BN_MOMENTUM)\n', (8452, 8487), False, 'from torch import nn\n'), ((8501, 8522), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8508, 8522), False, 'from torch import nn\n'), ((10873, 10896), 'torch.load', 'torch.load', (['(data + name)'], {}), '(data + name)\n', (10883, 10896), False, 'import torch\n'), ((10995, 11024), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (['model_url'], {}), '(model_url)\n', (11013, 11024), True, 'import torch.utils.model_zoo as model_zoo\n'), ((12427, 12468), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['cho'], {'momentum': 'BN_MOMENTUM'}), '(cho, momentum=BN_MOMENTUM)\n', (12441, 12468), False, 'from torch import nn\n'), ((12482, 12503), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (12489, 12503), False, 'from torch import nn\n'), ((13013, 13114), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['o', 'o', '(f * 2)'], {'stride': 'f', 'padding': '(f // 2)', 'output_padding': '(0)', 'groups': 'o', 'bias': '(False)'}), '(o, o, f * 2, stride=f, padding=f // 2, output_padding=0,\n groups=o, bias=False)\n', (13031, 13114), False, 'from torch import nn\n'), ((15290, 15309), 'numpy.log2', 'np.log2', (['down_ratio'], {}), '(down_ratio)\n', (15297, 15309), True, 'import numpy as np\n'), ((20627, 20642), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (20637, 20642), True, 'import numpy as np\n'), ((20644, 20659), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (20654, 20659), True, 'import numpy as np\n'), ((20710, 20725), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (20720, 20725), True, 'import numpy as np\n'), ((20727, 20742), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (20737, 20742), True, 'import numpy as np\n'), ((26183, 26208), 'numpy.partition', 'np.partition', (['scores', 'kth'], {}), '(scores, kth)\n', (26195, 26208), True, 'import numpy as np\n'), ((7264, 7337), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=1, stride=1, bias=False)\n', (7273, 7337), False, 'from torch import nn\n'), ((7381, 7431), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {'momentum': 'BN_MOMENTUM'}), '(out_channels, momentum=BN_MOMENTUM)\n', (7395, 7431), False, 'from torch import nn\n'), ((9537, 9572), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['stride'], {'stride': 'stride'}), '(stride, stride=stride)\n', (9549, 9572), False, 'from torch import nn\n'), ((9590, 9654), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(inplanes, planes, kernel_size=1, stride=1, bias=False)\n', (9599, 9654), False, 'from torch import nn\n'), ((9698, 9742), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (9712, 9742), False, 'from torch import nn\n'), ((11881, 11909), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (11898, 11909), False, 'from torch import nn\n'), ((16580, 16704), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels[self.first_level]', 'classes'], {'kernel_size': 'final_kernel', 'stride': '(1)', 'padding': '(final_kernel // 2)', 'bias': '(True)'}), '(channels[self.first_level], classes, kernel_size=final_kernel,\n stride=1, padding=final_kernel // 2, bias=True)\n', (16589, 16704), False, 'from torch import nn\n'), ((25799, 25865), 'numpy.concatenate', 'np.concatenate', (['[detection[j] for detection in detections]'], {'axis': '(0)'}), '([detection[j] for detection in detections], axis=0)\n', (25813, 25865), True, 'import numpy as np\n'), ((26756, 26794), 'numpy.array', 'np.array', (['dets[0][j]'], {'dtype': 'np.float32'}), '(dets[0][j], dtype=np.float32)\n', (26764, 26794), True, 'import numpy as np\n'), ((10159, 10286), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'planes'], {'kernel_size': '(3)', 'stride': '(stride if i == 0 else 1)', 'padding': 'dilation', 'bias': '(False)', 'dilation': 'dilation'}), '(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1,\n padding=dilation, bias=False, dilation=dilation)\n', (10168, 10286), False, 'from torch import nn\n'), ((10352, 10396), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {'momentum': 'BN_MOMENTUM'}), '(planes, momentum=BN_MOMENTUM)\n', (10366, 10396), False, 'from torch import nn\n'), ((10414, 10435), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10421, 10435), False, 'from torch import nn\n'), ((12149, 12169), 'math.fabs', 'math.fabs', (['(i / f - c)'], {}), '(i / f - c)\n', (12158, 12169), False, 'import math\n'), ((12178, 12198), 'math.fabs', 'math.fabs', (['(j / f - c)'], {}), '(j / f - c)\n', (12187, 12198), False, 'import math\n'), ((16051, 16140), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels[self.first_level]', 'head_conv'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(channels[self.first_level], head_conv, kernel_size=3, padding=1,\n bias=True)\n', (16060, 16140), False, 'from torch import nn\n'), ((16188, 16209), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (16195, 16209), False, 'from torch import nn\n'), ((16231, 16339), 'torch.nn.Conv2d', 'nn.Conv2d', (['head_conv', 'classes'], {'kernel_size': 'final_kernel', 'stride': '(1)', 'padding': '(final_kernel // 2)', 'bias': '(True)'}), '(head_conv, classes, kernel_size=final_kernel, stride=1, padding=\n final_kernel // 2, bias=True)\n', (16240, 16339), False, 'from torch import nn\n')] |
# Year 2022
# Based on fink-broker.org code
# https://github.com/astrolabsoftware/fink-science/tree/master/fink_science/xmatch
# Adapted by <NAME>
import io
import csv
import logging
import requests
import numpy as np
import pandas as pd
def generate_csv(s: str, lists: list) -> str:
""" Make a string (CSV formatted) given lists of data and header.
Parameters
----------
s: str
String which will contain the data.
Should initially contain the CSV header.
lists: list of lists
List containing data.
Length of `lists` must correspond to the header.
Returns
----------
s: str
Updated string with one row per line.
Examples
----------
>>> header = "toto,tata\\n"
>>> lists = [[1, 2], ["cat", "dog"]]
>>> table = generate_csv(header, lists)
>>> print(table)
toto,tata
1,"cat"
2,"dog"
<BLANKLINE>
"""
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
_ = [writer.writerow(row) for row in zip(*lists)]
return s + output.getvalue().replace("\r", "")
def refine_search(
ra: list,
dec: list,
oid: list,
id_out: list,
names: list,
types: list,
sptypes: list,
redshift: list,
) -> list:
""" Create a final table by merging coordinates of objects found on the
bibliographical database, with those objects which were not found.
Parameters
----------
ra: list of float
List of RA
dec: list of float
List of Dec of the same size as ra.
oid: list of str
List of object ID (custom)
id_out: list of str
List of object ID returned by the xmatch with CDS
names: list of str
For matches, names of the celestial objects found
types: list of str
For matches, astronomical types of the celestial objects found
sptypes: list of str
For matches, spectral types of the celestial objects found
redshift: list of str
For matches, astronomical redshifts of the celestial objects found
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, name, type,sptype,redshift).
If the object is not found in Simbad, name & type
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
"""
out = []
for ra_in, dec_in, id_in in zip(ra, dec, oid):
# cast for picky Spark
ra_in, dec_in = float(ra_in), float(dec_in)
id_in = str(id_in)
# Discriminate with the objectID
if id_in in id_out:
# Return the closest object in case of many
# (smallest angular distance)
index = id_out.index(id_in)
sp_type_tmp = sptypes[index] if sptypes[index] != "" else "Unknown"
redshift_tmp = redshift[index] if redshift[index] != "" else "Unknown"
out.append(
(
id_in,
ra_in,
dec_in,
str(names[index]),
str(types[index]),
str(sp_type_tmp),
str(redshift_tmp),
)
)
else:
# Mark as unknown if no match
out.append(
(id_in, ra_in, dec_in, "Unknown", "Unknown", "Unknown", "Unknown")
)
return out
def xmatch(
ra: list, dec: list, id: list, extcatalog: str = "simbad", distmaxarcsec: int = 1
) -> (list, list):
"""
Build a catalog of (ra, dec, id) in a CSV-like string,
cross-match with `extcatalog`, and decode the output.
See http://cdsxmatch.u-strasbg.fr/ for more information.
Parameters
----------
ra: list of float
List of RA
dec: list of float
List of Dec of the same size as ra.
id: list of str
List of object ID (custom)
extcatalog: str
Name of the catalog to use for the xMatch.
See http://cdsxmatch.u-strasbg.fr/ for more information.
distmaxarcsec: int
Radius used for searching match. extcatalog sources lying within
radius of the center (ra, dec) will be considered as matches.
Returns
----------
data: list of string
Unformatted decoded data returned by the xMatch
header: list of string
Unformatted decoded header returned by the xmatch
"""
# Build a catalog of alert in a CSV-like string
table_header = """ra_in,dec_in,objectId\n"""
table = generate_csv(table_header, [ra, dec, id])
# Send the request!
r = requests.post(
"http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync",
data={
"request": "xmatch",
"distMaxArcsec": distmaxarcsec,
"selection": "all",
"RESPONSEFORMAT": "csv",
"cat2": extcatalog,
"colRA1": "ra_in",
"colDec1": "dec_in",
},
files={"cat1": table},
)
# Decode the message, and split line by line
# First line is header - last is empty
data = r.content.decode().split("\n")[1:-1]
header = r.content.decode().split("\n")[0].split(",")
return data, header
def cross_match_alerts_raw_simbad(oid: list, ra: list, dec: list) -> list:
""" Query the CDSXmatch service to find identified objects
in alerts. The catalog queried is the SIMBAD bibliographical database.
We can also use the 10,000+ VizieR tables if needed :-)
Parameters
----------
oid: list of str
List containing object ids (custom)
ra: list of float
List containing object ra coordinates
dec: list of float
List containing object dec coordinates
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, name, type).
If the object is not found in Simbad, name & type
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
Examples
----------
>>> ra = [26.8566983, 26.24497]
>>> dec = [-26.9677112, -26.7569436]
>>> id = ["1", "2"]
>>> objects = cross_match_alerts_raw(id, ra, dec)
>>> print(objects) # doctest: +NORMALIZE_WHITESPACE
[('1', 26.8566983, -26.9677112, 'TYC 6431-115-1', 'Star'),
('2', 26.24497, -26.7569436, 'Unknown', 'Unknown')]
"""
if len(ra) == 0:
return []
# Catch TimeoutError and ConnectionError
try:
data, header = xmatch(ra, dec, oid, extcatalog="simbad", distmaxarcsec=5)
except (ConnectionError, TimeoutError, ValueError) as ce:
logging.warning("XMATCH failed " + repr(ce))
return []
# Sometimes the service is down, but without TimeoutError or ConnectionError
# In that case, we grab the error message from the data.
if len(data) > 0 and "504 Gateway Time-out" in data[0]:
msg_head = "CDS xmatch service probably down"
msg_foot = "Check at http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync"
logging.warning(msg_head)
logging.warning(data[0])
logging.warning(msg_foot)
return []
# Fields of interest (their indices in the output)
if "main_id" not in header:
return []
# Fields of interest (their indices in the output)
main_id = header.index("main_id")
main_type = header.index("main_type")
oid_ind = header.index("objectId")
redshift_ind = header.index("redshift")
sp_type_ind = header.index("sp_type")
# Get the objectId of matches
id_out = [np.array(i.split(","))[oid_ind] for i in data]
# Get the names of matches
names = [np.array(i.split(","))[main_id] for i in data]
# Get the types of matches
types = [np.array(i.split(","))[main_type] for i in data]
# Get the types of matches
sp_types = [np.array(i.split(","))[sp_type_ind] for i in data]
# Get the z of matches
redshifts = [np.array(i.split(","))[redshift_ind] for i in data]
# Assign names and types to inputs
out = refine_search(ra, dec, oid, id_out, names, types, sp_types, redshifts)
return out
def cross_match_simbad(list_idx, list_ra, list_dec):
""" Cross-match list with SIMBAD
"""
# xmatch better done in list (da,dec in deg)
matches = cross_match_alerts_raw_simbad(list_idx, list_ra, list_dec)
xmatch_simbad_redshift = np.transpose(matches)[-1]
xmatch_simbad_sptype = np.transpose(matches)[-2]
xmatch_simbad_type = np.transpose(matches)[-3]
xmatch_simbad_ctlg = np.transpose(matches)[-4]
return (
xmatch_simbad_redshift,
xmatch_simbad_sptype,
xmatch_simbad_type,
xmatch_simbad_ctlg,
)
def refine_search_gaia(
ra: list,
dec: list,
oid: list,
id_out: list,
source: list,
ragaia: list,
decgaia: list,
plx: list,
plxerr: list,
gmag: list,
angDist: list,
) -> list:
""" Create a final table by merging coordinates of objects found on the
bibliographical database, with those objects which were not found.
Parameters
----------
ra: list of float
List of RA
dec: list of float
List of Dec of the same size as ra.
oid: list of str
List of object ID (custom)
id_out: list of str
List of object ID returned by the xmatch with CDS
source: list of str
List of source ID returned by the xmatch with Gaia
ragaia: list of float
List of source ra returned by the xmatch with Gaia
decgaia: list of float
List of source ra returned by the xmatch with Gaia
plx: list of float
List of source parallax returned by the xmatch with Gaia
plxerr: list of float
List of source parallax error returned by the xmatch with Gaia
gmag: list of float
List of source g magnitude error returned by the xmatch with Gaia
angDist: list of float
List of source angular distance returned by the xmatch with Gaia
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, source, ragaia, decgaia, plx, plxerr, gmag, angdist).
If the object is not found in Gaia, source, ragaia, decgaia, plx, plxerr, gmag, angdist
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
"""
out = []
for ra_in, dec_in, id_in in zip(ra, dec, oid):
# cast for picky Spark
ra_in, dec_in = float(ra_in), float(dec_in)
id_in = str(id_in)
# Discriminate with the objectID
if id_in in id_out:
# Return the closest object in case of many
# (smallest angular distance)
index = id_out.index(id_in)
source_tmp = source[index] if source[index] != "" else "Unknown"
ragaia_tmp = ragaia[index] if ragaia[index] != "" else "Unknown"
decgaia_tmp = decgaia[index] if decgaia[index] != "" else "Unknown"
plx_tmp = plx[index] if plx[index] != "" else "Unknown"
plxerr_tmp = plxerr[index] if plxerr[index] != "" else "Unknown"
gmag_tmp = gmag[index] if gmag[index] != "" else "Unknown"
angdist_tmp = angDist[index] if angDist[index] != "" else "Unknown"
out.append(
(
id_in,
ra_in,
dec_in,
source_tmp,
ragaia_tmp,
decgaia_tmp,
plx_tmp,
plxerr_tmp,
gmag_tmp,
angdist_tmp,
)
)
else:
# Mark as unknown if no match
out.append(
(
id_in,
ra_in,
dec_in,
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
"Unknown",
)
)
return out
def cross_match_alerts_raw_gaia(oid: list, ra: list, dec: list, ctlg: str) -> list:
""" Query the CDSXmatch service to find identified objects
in alerts. The catalog queried is the Gaia DR database.
Parameters
----------
oid: list of str
List containing object ids (custom)
ra: list of float
List containing object ra coordinates
dec: list of float
List containing object dec coordinates
dec: string
string with catalogue name
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, sourcename, gmag, gmagerr, parallax,parallaxerr,separation).
If the object is not found in Gaia, sourcename, gmag, gmagerr, parallax,separation
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
Examples
----------
>>> ra = [26.8566983, 26.24497]
>>> dec = [-26.9677112, -26.7569436]
>>> id = ["1", "2"]
>>> objects = cross_match_alerts_raw(id, ra, dec)
"""
if len(ra) == 0:
return []
# Catch TimeoutError and ConnectionError
try:
data, header = xmatch(ra, dec, oid, extcatalog=ctlg, distmaxarcsec=2)
except (ConnectionError, TimeoutError, ValueError) as ce:
logging.warning("XMATCH GAIA failed " + repr(ce))
return []
# Sometimes the service is down, but without TimeoutError or ConnectionError
# In that case, we grab the error message from the data.
if len(data) > 0 and "504 Gateway Time-out" in data[0]:
msg_head = "CDS xmatch service probably down"
msg_foot = "Check at http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync"
logging.warning(msg_head)
logging.warning(data[0])
logging.warning(msg_foot)
return []
# Fields of interest (their indices in the output)
if "source_id" not in header:
return []
# Fields of interest (their indices in the output)
# sourcename, gmag, gmagerr, parallax,separation
oid_ind = header.index("objectId")
source_ind = header.index("source_id")
ragaia_ind = header.index("ra")
decgaia_ind = header.index("dec")
parallax_ind = header.index("parallax")
parallaxerr_ind = header.index("parallax_error")
gmag_ind = header.index("phot_g_mean_mag")
angDist_ind = header.index("angDist")
# Get the objectId of matches
id_out = [np.array(i.split(","))[oid_ind] for i in data]
# Get the names of matches
source = [np.array(i.split(","))[source_ind] for i in data]
ragaia = [np.array(i.split(","))[ragaia_ind] for i in data]
decgaia = [np.array(i.split(","))[decgaia_ind] for i in data]
plx = [np.array(i.split(","))[parallax_ind] for i in data]
plxerr = [np.array(i.split(","))[parallaxerr_ind] for i in data]
gmag = [np.array(i.split(","))[gmag_ind] for i in data]
angDist = [np.array(i.split(","))[angDist_ind] for i in data]
out = refine_search_gaia(
ra, dec, oid, id_out, source, ragaia, decgaia, plx, plxerr, gmag, angDist
)
return out
def cross_match_gaia(list_idx, list_ra, list_dec, ctlg="vizier:I/345/gaia2"):
""" Cross-match list with SIMBAD
"""
# xmatch better done in list (da,dec in deg)
matches = cross_match_alerts_raw_gaia(list_idx, list_ra, list_dec, ctlg)
xmatch_gaia_source = np.transpose(matches)[3]
xmatch_gaia_ragaia = np.transpose(matches)[4]
xmatch_gaia_decgaia = np.transpose(matches)[5]
xmatch_gaia_plx = np.transpose(matches)[6]
xmatch_gaia_plxerr = np.transpose(matches)[7]
xmatch_gaia_gmag = np.transpose(matches)[8]
xmatch_gaia_angdist = np.transpose(matches)[9]
return (
xmatch_gaia_source,
xmatch_gaia_ragaia,
xmatch_gaia_decgaia,
xmatch_gaia_plx,
xmatch_gaia_plxerr,
xmatch_gaia_gmag,
xmatch_gaia_angdist,
)
def refine_search_usno(
ra: list, dec: list, oid: list, id_out: list, source: list, angDist: list,
) -> list:
""" Create a final table by merging coordinates of objects found on the
bibliographical database, with those objects which were not found.
Parameters
----------
ra: list of float
List of RA
dec: list of float
List of Dec of the same size as ra.
oid: list of str
List of object ID (custom)
id_out: list of str
List of object ID returned by the xmatch with CDS
source: list of str
List of source ID returned by the xmatch with USNO
angDist: list of float
List of source angular distance returned by the xmatch with USNO
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, source, angdist).
If the object is not found in Gaia, source, angdist
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
"""
out = []
for ra_in, dec_in, id_in in zip(ra, dec, oid):
# cast for picky Spark
ra_in, dec_in = float(ra_in), float(dec_in)
id_in = str(id_in)
# Discriminate with the objectID
if id_in in id_out:
# Return the closest object in case of many
# (smallest angular distance)
index = id_out.index(id_in)
source_tmp = source[index] if source[index] != "" else "Unknown"
angdist_tmp = angDist[index] if angDist[index] != "" else "Unknown"
out.append((id_in, ra_in, dec_in, source_tmp, angdist_tmp,))
else:
# Mark as unknown if no match
out.append((id_in, ra_in, dec_in, "Unknown", "Unknown",))
return out
def cross_match_alerts_raw_usno(oid: list, ra: list, dec: list, ctlg: str) -> list:
""" Query the CDSXmatch service to find identified objects
in alerts. The catalog queried is the USNO A2.0 database.
Parameters
----------
oid: list of str
List containing object ids (custom)
ra: list of float
List containing object ra coordinates
dec: list of float
List containing object dec coordinates
dec: string
string with catalogue name
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, sourcename ,angulardistance).
If the object is not found in usno, objectId, ra, dec, sourcename ,angulardistance
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
Examples
----------
>>> ra = [26.8566983, 26.24497]
>>> dec = [-26.9677112, -26.7569436]
>>> id = ["1", "2"]
>>> objects = cross_match_alerts_raw_usno(id, ra, dec)
"""
if len(ra) == 0:
return []
# Catch TimeoutError and ConnectionError
try:
data, header = xmatch(ra, dec, oid, extcatalog=ctlg, distmaxarcsec=2)
except (ConnectionError, TimeoutError, ValueError) as ce:
logging.warning("XMATCH USNO failed " + repr(ce))
return []
# Sometimes the service is down, but without TimeoutError or ConnectionError
# In that case, we grab the error message from the data.
if len(data) > 0 and "504 Gateway Time-out" in data[0]:
msg_head = "CDS xmatch service probably down"
msg_foot = "Check at http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync"
logging.warning(msg_head)
logging.warning(data[0])
logging.warning(msg_foot)
return []
# Fields of interest (their indices in the output)
if "USNO-A2.0" not in header:
return []
# Fields of interest (their indices in the output)
# sourcename, gmag, gmagerr, parallax,separation
oid_ind = header.index("objectId")
source_ind = header.index("USNO-A2.0")
angDist_ind = header.index("angDist")
# Get the objectId of matches
id_out = [np.array(i.split(","))[oid_ind] for i in data]
# Get the names of matches
source = [np.array(i.split(","))[source_ind] for i in data]
angDist = [np.array(i.split(","))[angDist_ind] for i in data]
out = refine_search_usno(ra, dec, oid, id_out, source, angDist)
return out
def cross_match_usno(list_idx, list_ra, list_dec, ctlg="vizier:I/345/usno2"):
""" Cross-match list with USNO A2.0 star catalogue
"""
# xmatch better done in list (da,dec in deg)
matches = cross_match_alerts_raw_usno(list_idx, list_ra, list_dec, ctlg)
xmatch_usno_source = np.transpose(matches)[3]
xmatch_usno_angdist = np.transpose(matches)[4]
return (
xmatch_usno_source,
xmatch_usno_angdist,
)
def cross_match_alerts_raw_generic(
oid: list, ra: list, dec: list, ctlg: str, distmaxarcsec: float
) -> list:
""" Query the CDSXmatch service to find identified objects
in alerts. The catalog queried is the WISE database.
Parameters
----------
oid: list of str
List containing object ids (custom)
ra: list of float
List containing object ra coordinates
dec: list of float
List containing object dec coordinates
dec: string
string with catalogue name
Returns
----------
out: List of Tuple
Each tuple contains (objectId, ra, dec, sourcename ,angulardistance).
If the object is not found in usno, objectId, ra, dec, sourcename ,angulardistance
are marked as Unknown. In the case several objects match
the centroid of the alert, only the closest is returned.
Examples
----------
>>> ra = [26.8566983, 26.24497]
>>> dec = [-26.9677112, -26.7569436]
>>> id = ["1", "2"]
>>> objects = cross_match_alerts_raw_usno(id, ra, dec)
"""
if len(ra) == 0:
return []
# Catch TimeoutError and ConnectionError
try:
data, header = xmatch(
ra, dec, oid, extcatalog=ctlg, distmaxarcsec=distmaxarcsec
)
except (ConnectionError, TimeoutError, ValueError) as ce:
logging.warning("XMATCH failed " + repr(ce))
return []
# Sometimes the service is down, but without TimeoutError or ConnectionError
# In that case, we grab the error message from the data.
if len(data) > 0 and "504 Gateway Time-out" in data[0]:
msg_head = "CDS xmatch service probably down"
msg_foot = "Check at http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync"
logging.warning(msg_head)
logging.warning(data[0])
logging.warning(msg_foot)
return []
data = [x.split(",") for x in data]
df_search_out = pd.DataFrame(data=np.array(data), columns=header)
if "angDist" not in df_search_out.keys():
print("Xmatch failure")
raise Exception
else:
df_search_out["angDist"] = df_search_out["angDist"].astype(float)
if ctlg == "vizier:II/358/smss":
df_search_out = df_search_out.rename(columns={"ObjectId": "ObjectId_SMSS"})
df_search_out = df_search_out.rename(columns={"objectId": "ObjectId"})
df_search_out_tmp = df_search_out.sort_values("angDist", ascending=True)
df_search_out_tmp = df_search_out_tmp.groupby("ObjectId").first()
df_search_out_tmp = df_search_out_tmp.rename(
columns={"ra": "ra_out", "dec": "dec_out"}
)
df_out_tmp = pd.DataFrame()
df_out_tmp["ObjectId"] = oid
df_out_tmp["ObjectId"] = df_out_tmp["ObjectId"].astype(str)
df_out_tmp["ra"] = ra
df_out_tmp["dec"] = dec
df_out = pd.merge(df_out_tmp, df_search_out_tmp, on="ObjectId", how="left")
df_out = df_out.fillna("Unknown")
df_out = df_out.drop(["ra_in", "dec_in"], axis=1)
return df_out
| [
"pandas.DataFrame",
"io.StringIO",
"csv.writer",
"logging.warning",
"pandas.merge",
"numpy.transpose",
"numpy.array",
"requests.post"
] | [((928, 941), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (939, 941), False, 'import io\n'), ((955, 1003), 'csv.writer', 'csv.writer', (['output'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(output, quoting=csv.QUOTE_NONNUMERIC)\n', (965, 1003), False, 'import csv\n'), ((4635, 4901), 'requests.post', 'requests.post', (['"""http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync"""'], {'data': "{'request': 'xmatch', 'distMaxArcsec': distmaxarcsec, 'selection': 'all',\n 'RESPONSEFORMAT': 'csv', 'cat2': extcatalog, 'colRA1': 'ra_in',\n 'colDec1': 'dec_in'}", 'files': "{'cat1': table}"}), "('http://cdsxmatch.u-strasbg.fr/xmatch/api/v1/sync', data={\n 'request': 'xmatch', 'distMaxArcsec': distmaxarcsec, 'selection': 'all',\n 'RESPONSEFORMAT': 'csv', 'cat2': extcatalog, 'colRA1': 'ra_in',\n 'colDec1': 'dec_in'}, files={'cat1': table})\n", (4648, 4901), False, 'import requests\n'), ((7066, 7091), 'logging.warning', 'logging.warning', (['msg_head'], {}), '(msg_head)\n', (7081, 7091), False, 'import logging\n'), ((7100, 7124), 'logging.warning', 'logging.warning', (['data[0]'], {}), '(data[0])\n', (7115, 7124), False, 'import logging\n'), ((7133, 7158), 'logging.warning', 'logging.warning', (['msg_foot'], {}), '(msg_foot)\n', (7148, 7158), False, 'import logging\n'), ((8410, 8431), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (8422, 8431), True, 'import numpy as np\n'), ((8463, 8484), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (8475, 8484), True, 'import numpy as np\n'), ((8514, 8535), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (8526, 8535), True, 'import numpy as np\n'), ((8565, 8586), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (8577, 8586), True, 'import numpy as np\n'), ((13856, 13881), 'logging.warning', 'logging.warning', (['msg_head'], {}), '(msg_head)\n', (13871, 13881), False, 'import logging\n'), ((13890, 13914), 'logging.warning', 'logging.warning', (['data[0]'], {}), '(data[0])\n', (13905, 13914), False, 'import logging\n'), ((13923, 13948), 'logging.warning', 'logging.warning', (['msg_foot'], {}), '(msg_foot)\n', (13938, 13948), False, 'import logging\n'), ((15518, 15539), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15530, 15539), True, 'import numpy as np\n'), ((15568, 15589), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15580, 15589), True, 'import numpy as np\n'), ((15619, 15640), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15631, 15640), True, 'import numpy as np\n'), ((15666, 15687), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15678, 15687), True, 'import numpy as np\n'), ((15716, 15737), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15728, 15737), True, 'import numpy as np\n'), ((15764, 15785), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15776, 15785), True, 'import numpy as np\n'), ((15815, 15836), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (15827, 15836), True, 'import numpy as np\n'), ((19542, 19567), 'logging.warning', 'logging.warning', (['msg_head'], {}), '(msg_head)\n', (19557, 19567), False, 'import logging\n'), ((19576, 19600), 'logging.warning', 'logging.warning', (['data[0]'], {}), '(data[0])\n', (19591, 19600), False, 'import logging\n'), ((19609, 19634), 'logging.warning', 'logging.warning', (['msg_foot'], {}), '(msg_foot)\n', (19624, 19634), False, 'import logging\n'), ((20632, 20653), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (20644, 20653), True, 'import numpy as np\n'), ((20683, 20704), 'numpy.transpose', 'np.transpose', (['matches'], {}), '(matches)\n', (20695, 20704), True, 'import numpy as np\n'), ((22532, 22557), 'logging.warning', 'logging.warning', (['msg_head'], {}), '(msg_head)\n', (22547, 22557), False, 'import logging\n'), ((22566, 22590), 'logging.warning', 'logging.warning', (['data[0]'], {}), '(data[0])\n', (22581, 22590), False, 'import logging\n'), ((22599, 22624), 'logging.warning', 'logging.warning', (['msg_foot'], {}), '(msg_foot)\n', (22614, 22624), False, 'import logging\n'), ((23444, 23458), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (23456, 23458), True, 'import pandas as pd\n'), ((23644, 23710), 'pandas.merge', 'pd.merge', (['df_out_tmp', 'df_search_out_tmp'], {'on': '"""ObjectId"""', 'how': '"""left"""'}), "(df_out_tmp, df_search_out_tmp, on='ObjectId', how='left')\n", (23652, 23710), True, 'import pandas as pd\n'), ((22722, 22736), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22730, 22736), True, 'import numpy as np\n')] |
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from datetime import datetime
from keras.models import load_model
model = load_model('chatbot_model.h5') #Load trained model output from train_chatbot.py as an input to chatgui.py
import json
import random
intents = json.loads(open('intents.json').read()) #Load intents from json file
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence) # tokenize the pattern - split words into array
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words] # stem each word - create short form for word
return sentence_words #return stem word Ex: if Believing, then setence_words = Belief
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
sentence_words = clean_up_sentence(sentence) # tokenize the pattern
bag = [0]*len(words) # bag of words - matrix of N words, vocabulary matrix
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
import tkinter as tk#Library to create Chatbot GUI
from tkinter import *
def send():
msg = EntryBox.get("1.0",'end-1c').strip() #Get message from textbox and store in msg variable
EntryBox.delete("0.0",END)
if msg != '':
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + '\n\n') #In Chat log, insert user message as You:<User Message>
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
res = chatbot_response(msg)
ChatLog.insert(END, "Bot: " + res + '\n\n') #In Chat log, insert Bot response as Bot:<Bot Response>
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
base = Tk()
base.title("Chat With D!")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)
head_label = Label(base, text="Welcome to D's Chat!",width="30",bd=8, bg="white", font="Verdana",anchor="center", fg="blue");
head_label.pack(pady=3)
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial" ) #Create Chat window
ChatLog.config(state=DISABLED)
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart") #Add scrollbar to Chat window
ChatLog['yscrollcommand'] = scrollbar.set
SendButton = Button(base, font=("Arial",12,'bold'), text="Send", width="10", height=5,
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
command= send ) #Create Button to send message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial") #Create box to enter message
#EntryBox.bind("<Return>", send)
#Adjust screen layout for all parameters
head_label.place(x=1,y=1,height=40)
scrollbar.place(x=376,y=45, height=400)
ChatLog.place(x=10,y=45, height=400, width=365)
EntryBox.place(x=10, y=450, height=45, width=265)
SendButton.place(x=285, y=450, height=45)
base.mainloop()
| [
"keras.models.load_model",
"nltk.stem.WordNetLemmatizer",
"random.choice",
"numpy.array",
"nltk.word_tokenize"
] | [((65, 84), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (82, 84), False, 'from nltk.stem import WordNetLemmatizer\n'), ((193, 223), 'keras.models.load_model', 'load_model', (['"""chatbot_model.h5"""'], {}), "('chatbot_model.h5')\n", (203, 223), False, 'from keras.models import load_model\n'), ((552, 580), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (570, 580), False, 'import nltk\n'), ((1427, 1440), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (1435, 1440), True, 'import numpy as np\n'), ((1598, 1611), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (1606, 1611), True, 'import numpy as np\n'), ((2138, 2167), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (2151, 2167), False, 'import random\n')] |
import numpy as np
import math
import scipy.ndimage
def frequest(im, orientim, kernel_size, minWaveLength, maxWaveLength):
"""
Based on https://pdfs.semanticscholar.org/ca0d/a7c552877e30e1c5d87dfcfb8b5972b0acd9.pdf pg.14
Function to estimate the fingerprint ridge frequency within a small block
of a fingerprint image.
An image block the same size as im with all values set to the estimated ridge spatial frequency. If a
ridge frequency cannot be found, or cannot be found within the limits set by min and max Wavlength freqim is set to zeros.
"""
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the angle again.
cosorient = np.cos(2*orientim) # np.mean(np.cos(2*orientim))
sinorient = np.sin(2*orientim) # np.mean(np.sin(2*orientim))
block_orient = math.atan2(sinorient,cosorient)/2
# Rotate the image block so that the ridges are vertical
rotim = scipy.ndimage.rotate(im,block_orient/np.pi*180 + 90,axes=(1,0),reshape = False,order = 3,mode = 'nearest')
# Now crop the image so that the rotated image does not contain any invalid regions.
cropsze = int(np.fix(rows/np.sqrt(2)))
offset = int(np.fix((rows-cropsze)/2))
rotim = rotim[offset:offset+cropsze][:,offset:offset+cropsze]
# Sum down the columns to get a projection of the grey values down the ridges.
ridge_sum = np.sum(rotim, axis = 0)
dilation = scipy.ndimage.grey_dilation(ridge_sum, kernel_size, structure=np.ones(kernel_size))
ridge_noise = np.abs(dilation - ridge_sum); peak_thresh = 2;
maxpts = (ridge_noise < peak_thresh) & (ridge_sum > np.mean(ridge_sum))
maxind = np.where(maxpts)
_, no_of_peaks = np.shape(maxind)
# Determine the spatial frequency of the ridges by dividing the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds, the frequency image is set to 0
if(no_of_peaks<2):
freq_block = np.zeros(im.shape)
else:
waveLength = (maxind[0][-1] - maxind[0][0])/(no_of_peaks - 1)
if waveLength>=minWaveLength and waveLength<=maxWaveLength:
freq_block = 1/np.double(waveLength) * np.ones(im.shape)
else:
freq_block = np.zeros(im.shape)
return(freq_block)
def ridge_freq(im, mask, orient, block_size, kernel_size, minWaveLength, maxWaveLength):
# Function to estimate the fingerprint ridge frequency across a
# fingerprint image.
rows,cols = im.shape
freq = np.zeros((rows,cols))
for row in range(0, rows - block_size, block_size):
for col in range(0, cols - block_size, block_size):
image_block = im[row:row + block_size][:, col:col + block_size]
angle_block = orient[row // block_size][col // block_size]
if angle_block:
freq[row:row + block_size][:, col:col + block_size] = frequest(image_block, angle_block, kernel_size,
minWaveLength, maxWaveLength)
freq = freq*mask
freq_1d = np.reshape(freq,(1,rows*cols))
ind = np.where(freq_1d>0)
ind = np.array(ind)
ind = ind[1,:]
non_zero_elems_in_freq = freq_1d[0][ind]
medianfreq = np.median(non_zero_elems_in_freq) * mask
return medianfreq
| [
"numpy.abs",
"numpy.sum",
"numpy.double",
"math.atan2",
"numpy.median",
"numpy.fix",
"numpy.zeros",
"numpy.ones",
"numpy.shape",
"numpy.sin",
"numpy.where",
"numpy.reshape",
"numpy.cos",
"numpy.array",
"numpy.mean",
"numpy.sqrt"
] | [((596, 608), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (604, 608), True, 'import numpy as np\n'), ((791, 811), 'numpy.cos', 'np.cos', (['(2 * orientim)'], {}), '(2 * orientim)\n', (797, 811), True, 'import numpy as np\n'), ((856, 876), 'numpy.sin', 'np.sin', (['(2 * orientim)'], {}), '(2 * orientim)\n', (862, 876), True, 'import numpy as np\n'), ((1485, 1506), 'numpy.sum', 'np.sum', (['rotim'], {'axis': '(0)'}), '(rotim, axis=0)\n', (1491, 1506), True, 'import numpy as np\n'), ((1626, 1654), 'numpy.abs', 'np.abs', (['(dilation - ridge_sum)'], {}), '(dilation - ridge_sum)\n', (1632, 1654), True, 'import numpy as np\n'), ((1762, 1778), 'numpy.where', 'np.where', (['maxpts'], {}), '(maxpts)\n', (1770, 1778), True, 'import numpy as np\n'), ((1800, 1816), 'numpy.shape', 'np.shape', (['maxind'], {}), '(maxind)\n', (1808, 1816), True, 'import numpy as np\n'), ((2654, 2676), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (2662, 2676), True, 'import numpy as np\n'), ((3231, 3265), 'numpy.reshape', 'np.reshape', (['freq', '(1, rows * cols)'], {}), '(freq, (1, rows * cols))\n', (3241, 3265), True, 'import numpy as np\n'), ((3272, 3293), 'numpy.where', 'np.where', (['(freq_1d > 0)'], {}), '(freq_1d > 0)\n', (3280, 3293), True, 'import numpy as np\n'), ((3302, 3315), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (3310, 3315), True, 'import numpy as np\n'), ((924, 956), 'math.atan2', 'math.atan2', (['sinorient', 'cosorient'], {}), '(sinorient, cosorient)\n', (934, 956), False, 'import math\n'), ((1293, 1321), 'numpy.fix', 'np.fix', (['((rows - cropsze) / 2)'], {}), '((rows - cropsze) / 2)\n', (1299, 1321), True, 'import numpy as np\n'), ((2117, 2135), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (2125, 2135), True, 'import numpy as np\n'), ((3397, 3430), 'numpy.median', 'np.median', (['non_zero_elems_in_freq'], {}), '(non_zero_elems_in_freq)\n', (3406, 3430), True, 'import numpy as np\n'), ((1586, 1606), 'numpy.ones', 'np.ones', (['kernel_size'], {}), '(kernel_size)\n', (1593, 1606), True, 'import numpy as np\n'), ((1729, 1747), 'numpy.mean', 'np.mean', (['ridge_sum'], {}), '(ridge_sum)\n', (1736, 1747), True, 'import numpy as np\n'), ((2392, 2410), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (2400, 2410), True, 'import numpy as np\n'), ((1263, 1273), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1270, 1273), True, 'import numpy as np\n'), ((2335, 2352), 'numpy.ones', 'np.ones', (['im.shape'], {}), '(im.shape)\n', (2342, 2352), True, 'import numpy as np\n'), ((2311, 2332), 'numpy.double', 'np.double', (['waveLength'], {}), '(waveLength)\n', (2320, 2332), True, 'import numpy as np\n')] |
# Code example for ICP taking a sequence of point clouds relatively close
# and build a map with them.
# It assumes that: 3D point clouds are used, they were recorded in sequence
# and they are express in sensor frame.
import numpy as np
from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
PM = pm.PointMatcher
PMIO = pm.PointMatcherIO
DP = PM.DataPoints
params = pms.Parametrizable.Parameters()
# Path of output directory (default: tests/align_sequence/)
# The output directory must already exist
# Leave empty to save in the current directory
output_base_directory = "tests/align_sequence/"
# Name of output files (default: align_sequence)
output_file_name = "align_sequence"
# Rigid transformation
rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
# Create filters manually to clean the global map
params["knn"] = "10"
params["epsilon"] = "5"
params["keepNormals"] = "0"
params["keepDensities"] = "1"
density_filter = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter", params)
params.clear()
params["maxDensity"] = "30"
max_density_subsample = PM.get().DataPointsFilterRegistrar.create("MaxDensityDataPointsFilter",
params)
params.clear()
# Main algorithm definition
icp = PM.ICP()
# load YAML config
config_file = "../data/default.yaml"
pms.validateFile(config_file)
icp.loadFromYaml(config_file)
# Loading the list of files
# file_info_list = PMIO.FileInfoVector("../data/carCloudList.csv", "../data/")
# or
file_info_list = PMIO.FileInfoVector("../data/cloudList.csv", "../data/")
map_point_cloud = DP()
new_cloud = DP()
T_to_map_from_new = np.identity(4) # assumes 3D
for i in range(len(file_info_list)):
print(f"---------------------\nLoading: {file_info_list[i].readingFileName}")
# It is assume that the point cloud is express in sensor frame
new_cloud = DP.load(file_info_list[i].readingFileName)
if map_point_cloud.getNbPoints() == 0:
map_point_cloud = new_cloud
continue
# call ICP
try:
# We use the last transformation as a prior
# this assumes that the point clouds were recorded in
# sequence.
prior = T_to_map_from_new
T_to_map_from_new = icp(new_cloud, map_point_cloud, prior)
except PM.ConvergenceError as CE:
print(f"ERROR PM.ICP failed to converge: \n\t{CE}\n\n")
continue
# This is not necessary in this example, but could be
# useful if the same matrix is composed in the loop.
T_to_map_from_new = rigid_trans.correctParameters(T_to_map_from_new)
# Move the new point cloud in the map reference
new_cloud = rigid_trans.compute(new_cloud, T_to_map_from_new)
# Merge point clouds to map
map_point_cloud.concatenate(new_cloud)
# Clean the map
map_point_cloud = density_filter.filter(map_point_cloud)
map_point_cloud = max_density_subsample.filter(map_point_cloud)
# Save the map at each iteration
output_file_name_iter = f"{output_file_name}_{i}.vtk"
print(f"outputFileName: {output_file_name_iter}")
map_point_cloud.save(f"{output_base_directory}{output_file_name_iter}")
| [
"numpy.identity",
"pypointmatcher.pointmatchersupport.Parametrizable.Parameters",
"pypointmatcher.pointmatchersupport.validateFile"
] | [((389, 420), 'pypointmatcher.pointmatchersupport.Parametrizable.Parameters', 'pms.Parametrizable.Parameters', ([], {}), '()\n', (418, 420), True, 'from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms\n'), ((1390, 1419), 'pypointmatcher.pointmatchersupport.validateFile', 'pms.validateFile', (['config_file'], {}), '(config_file)\n', (1406, 1419), True, 'from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms\n'), ((1699, 1713), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1710, 1713), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.