code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from envs.LQR import LQR
from utils import get_AB, linear_layers, random_AB
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
try:
import wandb
except:
wandb = None
torch.manual_seed(2022)
np.random.seed(2022)
'''
Seeds:
2d : 2022
3d : 2023
4d : 2023
'''
# Hyperparameters
learning_rate = 0.0003
gamma = 0.9
lmbda = 0.9
eps_clip = 0.2
K_epoch = 10
rollout_len = 3
buffer_size = 30
minibatch_size = 32
class PPO(nn.Module):
def __init__(self, in_dim=3, hidden_dim=128, out_dim=1):
super(PPO, self).__init__()
self.data = []
self.fc1 = linear_layers(layers=[in_dim, hidden_dim])
self.fc_mu = nn.Linear(hidden_dim, out_dim)
self.fc_std = nn.Linear(hidden_dim, out_dim)
self.fc_v = nn.Linear(hidden_dim, 1)
# self.weight_init()
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(0.0, 0.999))
self.optimization_step = 0
def weight_init(self):
# for p in self.fc1.modules():
# nn.init.orthogonal_(p.weight.data)
nn.init.orthogonal_(self.fc_mu.weight.data)
nn.init.orthogonal_(self.fc_std.weight.data)
def pi(self, x, softmax_dim=0):
x = F.relu(self.fc1(x))
mu = 10 * F.tanh(self.fc_mu(x))
std = F.softplus(self.fc_std(x)) + 1e-5
return mu, std
def v(self, x):
x = F.relu(self.fc1(x))
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_batch, a_batch, r_batch, s_prime_batch, prob_a_batch, done_batch = [], [], [], [], [], []
data = []
for j in range(buffer_size):
for i in range(minibatch_size):
rollout = self.data.pop()
s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []
for transition in rollout:
s, a, r, s_prime, prob_a, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
prob_a_lst.append([prob_a])
done_mask = 0 if done else 1
done_lst.append([done_mask])
s_batch.append(s_lst)
a_batch.append(a_lst)
r_batch.append(r_lst)
s_prime_batch.append(s_prime_lst)
prob_a_batch.append(prob_a_lst)
done_batch.append(done_lst)
mini_batch = torch.tensor(s_batch, dtype=torch.float), torch.tensor(a_batch, dtype=torch.float), \
torch.tensor(r_batch, dtype=torch.float), torch.tensor(s_prime_batch, dtype=torch.float), \
torch.tensor(done_batch, dtype=torch.float), torch.tensor(prob_a_batch, dtype=torch.float)
data.append(mini_batch)
return data
def calc_advantage(self, data):
data_with_adv = []
for mini_batch in data:
s, a, r, s_prime, done_mask, old_log_prob = mini_batch
with torch.no_grad():
td_target = r + gamma * self.v(s_prime) * done_mask
delta = td_target - self.v(s)
delta = delta.numpy()
advantage_lst = []
advantage = 0.0
for delta_t in delta[::-1]:
advantage = gamma * lmbda * advantage + delta_t[0]
advantage_lst.append([advantage])
advantage_lst.reverse()
advantage = torch.tensor(advantage_lst, dtype=torch.float)
data_with_adv.append((s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage))
return data_with_adv
def train_net(self):
if len(self.data) == minibatch_size * buffer_size:
data = self.make_batch()
data = self.calc_advantage(data)
for i in range(K_epoch):
for mini_batch in data:
s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage = mini_batch
mu, std = self.pi(s, softmax_dim=1)
dist = Normal(mu, std)
log_prob = dist.log_prob(a)
ratio = torch.exp(log_prob - old_log_prob) # a/b == exp(log(a)-log(b))
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1 - eps_clip, 1 + eps_clip) * advantage
loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s), td_target)
self.optimizer.zero_grad()
loss.mean().backward()
nn.utils.clip_grad_norm_(self.parameters(), 1.0)
self.optimizer.step()
self.optimization_step += 1
def main():
# weights and bias
log = False
if log and wandb:
wandb.init(project='RL',
entity='hzzheng',
tags=['2d-dt=1'])
save_dir = 'checkpoints'
# create environment
dt = 0.1
state_dim = 6
action_dim = 1
# A = np.array([[1.0]])
# B = np.array(([[dt]]))
A, B = get_AB(state_dim, action_dim, dt)
# A, B = random_AB(state_dim, action_dim)
sigma = 0.1
W = sigma * np.eye(state_dim)
# B = np.eye(2)
Q = np.eye(state_dim) * 5.0
R = np.eye(action_dim)
env = LQR(A, B, Q, R, W, state_dim)
# =====================================
subop_cost = 180
model = PPO(in_dim=state_dim, hidden_dim=128, out_dim=action_dim)
# ckpt = torch.load('checkpoints/init.pt')
# model.load_state_dict(ckpt['init'])
# print('load from init.pt')
print_interval = 10
rollout = []
avg_score = 0.0
saved_flag = False
for n_epi in range(1800):
s = env.reset(factor=1.0)
score = 0.0
for i in range(5):
for t in range(rollout_len):
mu, std = model.pi(torch.from_numpy(s).float())
dist = Normal(mu, std)
a = dist.sample()
log_prob = dist.log_prob(a)
s_prime, r, done, info = env.step(a.numpy())
rollout.append((s, a, r / 1.0, s_prime, log_prob.item(), done))
if len(rollout) == rollout_len:
model.put_data(rollout)
rollout = []
s = s_prime
score += r
# print(model.pi(torch.zeros(2)))
model.train_net()
score /= 5 * rollout_len
avg_score += score
if n_epi % print_interval == 0 and n_epi != 0:
print("# of episode :{}, avg score : {:.1f}, opt step: {}".
format(n_epi, avg_score / print_interval, model.optimization_step))
if log and wandb:
wandb.log(
{
'Avg score': avg_score / print_interval
}
)
avg_score = 0.0
if n_epi > 1000 and score > -subop_cost and not saved_flag:
state_dict = model.state_dict()
torch.save({'policy': state_dict}, save_dir +
'/ppo-{}.pt'.format(n_epi))
print('save model at %s' % (save_dir +
'/ppo-{}.pt'.format(n_epi)))
saved_flag = True
print(f'Score :{score}')
if __name__ == '__main__':
main()
| [
"envs.LQR.LQR",
"wandb.log",
"torch.from_numpy",
"numpy.random.seed",
"torch.manual_seed",
"torch.distributions.Normal",
"torch.exp",
"utils.linear_layers",
"torch.clamp",
"wandb.init",
"torch.nn.Linear",
"torch.min",
"numpy.eye",
"torch.no_grad",
"utils.get_AB",
"torch.tensor",
"tor... | [((279, 302), 'torch.manual_seed', 'torch.manual_seed', (['(2022)'], {}), '(2022)\n', (296, 302), False, 'import torch\n'), ((303, 323), 'numpy.random.seed', 'np.random.seed', (['(2022)'], {}), '(2022)\n', (317, 323), True, 'import numpy as np\n'), ((5253, 5286), 'utils.get_AB', 'get_AB', (['state_dim', 'action_dim', 'dt'], {}), '(state_dim, action_dim, dt)\n', (5259, 5286), False, 'from utils import get_AB, linear_layers, random_AB\n'), ((5443, 5461), 'numpy.eye', 'np.eye', (['action_dim'], {}), '(action_dim)\n', (5449, 5461), True, 'import numpy as np\n'), ((5472, 5501), 'envs.LQR.LQR', 'LQR', (['A', 'B', 'Q', 'R', 'W', 'state_dim'], {}), '(A, B, Q, R, W, state_dim)\n', (5475, 5501), False, 'from envs.LQR import LQR\n'), ((681, 723), 'utils.linear_layers', 'linear_layers', ([], {'layers': '[in_dim, hidden_dim]'}), '(layers=[in_dim, hidden_dim])\n', (694, 723), False, 'from utils import get_AB, linear_layers, random_AB\n'), ((745, 775), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'out_dim'], {}), '(hidden_dim, out_dim)\n', (754, 775), True, 'import torch.nn as nn\n'), ((798, 828), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'out_dim'], {}), '(hidden_dim, out_dim)\n', (807, 828), True, 'import torch.nn as nn\n'), ((849, 873), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (858, 873), True, 'import torch.nn as nn\n'), ((1155, 1198), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.fc_mu.weight.data'], {}), '(self.fc_mu.weight.data)\n', (1174, 1198), True, 'import torch.nn as nn\n'), ((1207, 1251), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['self.fc_std.weight.data'], {}), '(self.fc_std.weight.data)\n', (1226, 1251), True, 'import torch.nn as nn\n'), ((4981, 5041), 'wandb.init', 'wandb.init', ([], {'project': '"""RL"""', 'entity': '"""hzzheng"""', 'tags': "['2d-dt=1']"}), "(project='RL', entity='hzzheng', tags=['2d-dt=1'])\n", (4991, 5041), False, 'import wandb\n'), ((5365, 5382), 'numpy.eye', 'np.eye', (['state_dim'], {}), '(state_dim)\n', (5371, 5382), True, 'import numpy as np\n'), ((5411, 5428), 'numpy.eye', 'np.eye', (['state_dim'], {}), '(state_dim)\n', (5417, 5428), True, 'import numpy as np\n'), ((3662, 3708), 'torch.tensor', 'torch.tensor', (['advantage_lst'], {'dtype': 'torch.float'}), '(advantage_lst, dtype=torch.float)\n', (3674, 3708), False, 'import torch\n'), ((2664, 2704), 'torch.tensor', 'torch.tensor', (['s_batch'], {'dtype': 'torch.float'}), '(s_batch, dtype=torch.float)\n', (2676, 2704), False, 'import torch\n'), ((2706, 2746), 'torch.tensor', 'torch.tensor', (['a_batch'], {'dtype': 'torch.float'}), '(a_batch, dtype=torch.float)\n', (2718, 2746), False, 'import torch\n'), ((2775, 2815), 'torch.tensor', 'torch.tensor', (['r_batch'], {'dtype': 'torch.float'}), '(r_batch, dtype=torch.float)\n', (2787, 2815), False, 'import torch\n'), ((2817, 2863), 'torch.tensor', 'torch.tensor', (['s_prime_batch'], {'dtype': 'torch.float'}), '(s_prime_batch, dtype=torch.float)\n', (2829, 2863), False, 'import torch\n'), ((2892, 2935), 'torch.tensor', 'torch.tensor', (['done_batch'], {'dtype': 'torch.float'}), '(done_batch, dtype=torch.float)\n', (2904, 2935), False, 'import torch\n'), ((2937, 2982), 'torch.tensor', 'torch.tensor', (['prob_a_batch'], {'dtype': 'torch.float'}), '(prob_a_batch, dtype=torch.float)\n', (2949, 2982), False, 'import torch\n'), ((3220, 3235), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3233, 3235), False, 'import torch\n'), ((6085, 6100), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (6091, 6100), False, 'from torch.distributions import Normal\n'), ((6899, 6951), 'wandb.log', 'wandb.log', (["{'Avg score': avg_score / print_interval}"], {}), "({'Avg score': avg_score / print_interval})\n", (6908, 6951), False, 'import wandb\n'), ((4265, 4280), 'torch.distributions.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (4271, 4280), False, 'from torch.distributions import Normal\n'), ((4357, 4391), 'torch.exp', 'torch.exp', (['(log_prob - old_log_prob)'], {}), '(log_prob - old_log_prob)\n', (4366, 4391), False, 'import torch\n'), ((4496, 4542), 'torch.clamp', 'torch.clamp', (['ratio', '(1 - eps_clip)', '(1 + eps_clip)'], {}), '(ratio, 1 - eps_clip, 1 + eps_clip)\n', (4507, 4542), False, 'import torch\n'), ((4583, 4606), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (4592, 4606), False, 'import torch\n'), ((6033, 6052), 'torch.from_numpy', 'torch.from_numpy', (['s'], {}), '(s)\n', (6049, 6052), False, 'import torch\n')] |
# -*- coding:utf-8 -*-
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class DynaMaze(object):
def __init__(self, world_size=(6, 9), obstacle=[[0, 7], [1, 2], [1, 7], [2, 2], [2, 7], [3, 2], [4, 5]],
planning_steps=0):
self.world_size = world_size # the size of the maze world
self.obstacle = obstacle # the position of obstacle
self.planning_steps = planning_steps # the size of planning steps
self.num_action = 4 # the number of actions
self.actions = np.arange(self.num_action) # all possible actions, 0:up, 1:down, 2:left, 3:right
self.start = np.array([2, 0]) # the start state
self.goal = np.array([0, 8]) # the terminal state
self.q_value = np.zeros((self.num_action, world_size[0], world_size[1])) # the Q-value
self.gama = 0.95 # the size of discounting, gama
self.alpha = 0.1 # the size of step-size, alpha
self.epsilon = 0.1 # the size of epsilon
def select_action(self, q_value, state):
"""
using epsilon-greedy to select an action under the current state
:param q_value: the Q-value
:param state: the current state
:return:
"""
random_num = np.random.rand() # generating an random number
if random_num < self.epsilon:
return np.random.choice(self.actions)
else:
state_value = q_value[:, state[0], state[1]]
return np.random.choice([act for act, val in enumerate(state_value) if val == np.max(state_value)])
def find_next_state(self, state, action):
"""
to find the next state and return reward according to the current state and action
:param state:the current state
:param action:select an action according to the current state
:return:next state and reward
"""
x_value, y_value = state
if action == self.actions[0]: # taking the up action
x_value = max(x_value - 1, 0)
elif action == self.actions[1]: # taking the down action
x_value = min(x_value + 1, self.world_size[0] - 1)
elif action == self.actions[2]: # taking the left action
y_value = max(y_value - 1, 0)
elif action == self.actions[3]: # taking the right action
y_value = min(y_value + 1, self.world_size[1] - 1)
next_state = np.array([x_value, y_value])
if next_state.tolist() in self.obstacle: # blocked by the obstacle
next_state = state
if all(next_state == self.goal): # reach the destination
reward = 1
else:
reward = 0
return next_state, reward
def tabular_dyna_q(self, q_value, model):
"""
the Tabular Dyna-Q algorithm
:param q_value:
:param model: the model to store the previous experience
:return:
"""
state = self.start # the start state
steps = 0 # the number of steps
while any(state != self.goal): # !!!!!!
steps += 1 # tracking the number of steps per episode
action = self.select_action(q_value, state) # to select an action according to the state
next_state, reward = self.find_next_state(state, action) # to find the next state and reward
# Q-learning algorithm to update the state-action value
max_value = np.max(q_value[:, next_state[0], next_state[1]])
q_value[action, state[0], state[1]] += self.alpha*(reward + self.gama*max_value -
q_value[action, state[0], state[1]])
model.building(state, action, next_state, reward) # building the model
# sampling from the model
for num in range(self.planning_steps):
sta, act, next_sta, rew = model.sampling() # sampling from model
# Q-learning algorithm to update the state-action value
max_val = np.max(q_value[:, next_sta[0], next_sta[1]])
q_value[act, sta[0], sta[1]] += self.alpha*(rew + self.gama*max_val -
q_value[act, sta[0], sta[1]])
state = next_state
return steps
class Model(object):
def __init__(self):
self.model = {}
def building(self, state, action, next_state, reward):
"""
building the model according to the previous experiences
:param state: the current state
:param action: the selected action under the current state
:param next_state:
:param reward:
:return:
"""
if tuple(state) not in self.model.keys():
self.model[tuple(state)] = {}
self.model[tuple(state)][action] = [tuple(next_state), reward]
def sampling(self):
"""
random previously observed state and action
:return:
"""
state_index = np.random.choice(range(len(self.model.keys())))
current_state = list(self.model)[state_index]
action_index = np.random.choice(range(len(self.model[current_state].keys())))
action = list(self.model[current_state])[action_index]
next_state, reward = self.model[current_state][action]
return np.array(current_state), action, np.array(next_state), reward
if __name__ == "__main__":
runs = 20 # the number of runs
episodes = 50 # the number of episodes
plan_steps = [0, 5, 50]
original_model = Model()
all_runs_steps = np.zeros((len(plan_steps), episodes))
for run in tqdm(range(runs)):
for idx, planning in enumerate(plan_steps):
dyna_maze = DynaMaze(planning_steps=planning)
q_values = dyna_maze.q_value.copy()
for ep in range(episodes):
all_runs_steps[idx, ep] += dyna_maze.tabular_dyna_q(q_values, original_model)
all_runs_steps /= runs
# print(q_values)
plt.figure(1)
for ith in range(len(plan_steps)):
plt.plot(all_runs_steps[ith], label=r"%d planning steps" % plan_steps[ith])
plt.xlabel("Episodes")
plt.ylabel("Steps per Episode")
plt.legend()
plt.savefig("./images/Figure8-2.png")
plt.show()
plt.close()
print("Completed!!! You can check it in the 'images' directory")
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.arange",
"numpy.array",
"numpy.random.choice",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"... | [((6297, 6310), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (6307, 6310), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6460), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (6448, 6460), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6496), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Steps per Episode"""'], {}), "('Steps per Episode')\n", (6475, 6496), True, 'import matplotlib.pyplot as plt\n'), ((6501, 6513), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6511, 6513), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6555), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/Figure8-2.png"""'], {}), "('./images/Figure8-2.png')\n", (6529, 6555), True, 'import matplotlib.pyplot as plt\n'), ((6560, 6570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6568, 6570), True, 'import matplotlib.pyplot as plt\n'), ((6575, 6586), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6584, 6586), True, 'import matplotlib.pyplot as plt\n'), ((579, 605), 'numpy.arange', 'np.arange', (['self.num_action'], {}), '(self.num_action)\n', (588, 605), True, 'import numpy as np\n'), ((687, 703), 'numpy.array', 'np.array', (['[2, 0]'], {}), '([2, 0])\n', (695, 703), True, 'import numpy as np\n'), ((760, 776), 'numpy.array', 'np.array', (['[0, 8]'], {}), '([0, 8])\n', (768, 776), True, 'import numpy as np\n'), ((840, 897), 'numpy.zeros', 'np.zeros', (['(self.num_action, world_size[0], world_size[1])'], {}), '((self.num_action, world_size[0], world_size[1]))\n', (848, 897), True, 'import numpy as np\n'), ((1358, 1374), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1372, 1374), True, 'import numpy as np\n'), ((2623, 2651), 'numpy.array', 'np.array', (['[x_value, y_value]'], {}), '([x_value, y_value])\n', (2631, 2651), True, 'import numpy as np\n'), ((6358, 6432), 'matplotlib.pyplot.plot', 'plt.plot', (['all_runs_steps[ith]'], {'label': "('%d planning steps' % plan_steps[ith])"}), "(all_runs_steps[ith], label='%d planning steps' % plan_steps[ith])\n", (6366, 6432), True, 'import matplotlib.pyplot as plt\n'), ((1480, 1510), 'numpy.random.choice', 'np.random.choice', (['self.actions'], {}), '(self.actions)\n', (1496, 1510), True, 'import numpy as np\n'), ((3705, 3753), 'numpy.max', 'np.max', (['q_value[:, next_state[0], next_state[1]]'], {}), '(q_value[:, next_state[0], next_state[1]])\n', (3711, 3753), True, 'import numpy as np\n'), ((5623, 5646), 'numpy.array', 'np.array', (['current_state'], {}), '(current_state)\n', (5631, 5646), True, 'import numpy as np\n'), ((5656, 5676), 'numpy.array', 'np.array', (['next_state'], {}), '(next_state)\n', (5664, 5676), True, 'import numpy as np\n'), ((4323, 4367), 'numpy.max', 'np.max', (['q_value[:, next_sta[0], next_sta[1]]'], {}), '(q_value[:, next_sta[0], next_sta[1]])\n', (4329, 4367), True, 'import numpy as np\n'), ((1672, 1691), 'numpy.max', 'np.max', (['state_value'], {}), '(state_value)\n', (1678, 1691), True, 'import numpy as np\n')] |
import pickle
import numpy as np
from typing import List
from time import time
from pathlib import Path
from textual.app import App
from textual import events
from widgets.box import TextBox, InfoBox, GutterBox
from utils.data import SessionDatabase, SessionDatabaseEntry, index_text_to_words, read_database
from utils.srs import update_srs_database_from_latest_session, sample_ngrams_from_srs_database
from utils.ngram import get_words_with_ngrams
class SrsTyperApp(App):
"""Terminal application for personalized typing practice based on the Spaced Repetition Sysem (SRS)."""
def __init__(self, full_text: str, **kwargs):
# Raw text that is to be typed in the session.
self.full_text = full_text
self.text_length = len(full_text)
super().__init__(**kwargs)
async def on_load(self, _: events.Load) -> None:
"""Called before entering application mode."""
self.data_dir = Path("data")
if not self.data_dir.is_dir():
self.data_dir.mkdir()
# Words of the text and indices to map the current location in the text to a word.
# Both are used to save the word in which typos occur.
self.word_list, self.word_indices = index_text_to_words(self.full_text)
# Text on the left side of the curser (already typed), with formatting for rich rendering
self.formatted_input = ""
# Full text, with formatting for rich rendering
self.formatted_text = ""
# Left and right tags for rich rendering
self.correct_style = ["[bold green]", "[/bold green]"]
self.current_style = ["[black on white]", "[/black on white]"]
self.incorrect_style = ["[bold red]", "[/bold red]"]
# Current location in the full text
self.current_location = 0
# The formatted text will contain information about rendering like [bold green].
# To correctly remove characters from the formatted text, we have to remember where in the string a new text character starts.
# To delete more than one character in sequence, we keep a list of these locations, and pop off from the end.
self.delete_locations = []
# A class for storing information about what was typed in which context
self.session_database = SessionDatabase()
# Counters to calculate the accuracy
self.hits = 0
self.misses = 0
await self.bind("escape", "quit", "Quit")
async def on_mount(self) -> None:
"""Called when application mode is ready."""
self.info = InfoBox()
self.text = TextBox(self.full_text)
self.gutter = GutterBox()
grid = await self.view.dock_grid(edge="left", name="left")
grid.add_column(fraction=1, name="left")
grid.add_column(fraction=5, name="right")
grid.add_row(fraction=5, name="top")
grid.add_row(fraction=1, name="bottom")
grid.add_areas(
info_area="left, top-start|bottom-end",
text_area="right, top",
gutter_area="right, bottom",
)
grid.place(
info_area=self.info,
text_area=self.text,
gutter_area=self.gutter,
)
async def on_key(self, event: events.Key) -> None:
"""Called when a key is pressed."""
current_char = self.full_text[self.current_location]
current_input = str(event.key)
# handle special characters
if current_input == "ctrl+h":
# backspace
self.current_location = max(self.current_location - 1, 0)
try:
delete_location = self.delete_locations.pop()
except IndexError:
delete_location = 0
self.formatted_input = self.formatted_input[:delete_location]
else:
self.save_entry(current_input, current_char)
if current_input == current_char:
# correct input
formatted_char = surround_with_style(self.correct_style,
current_char)
self.hits += 1
else:
# incorrect input
if current_char.isspace():
# since we cannot color in a space, we replace it with an underscore
current_char = "_"
formatted_char = surround_with_style(self.incorrect_style,
current_char)
self.misses += 1
# new location to jump to, when deleting text is the current lenght of the text, before adding the new formatted char
self.delete_locations.append(max(len(self.formatted_input), 0))
self.formatted_input += formatted_char
self.current_location += 1
self.formatted_text = self.assemble_formatted_text()
await self.text.update(self.formatted_text)
await self.gutter.update(f"<<{current_char}>> <<{current_input}>>")
await self.info.update(
accuracy=self.get_accuracy(),
speed=self.get_speed(),
progress=self.get_progress(),
)
if self.current_location >= self.text_length:
await self.exit()
def assemble_formatted_text(self) -> str:
"""Return a renderable string based on the previous input and the remaining text."""
try:
return self.formatted_input \
+ surround_with_style(self.current_style, self.full_text[self.current_location]) \
+ self.full_text[self.current_location + 1:]
except IndexError:
return self.formatted_input
def save_entry(
self,
current_input: str,
current_char: str,
) -> None:
"""Save information about what as put in, what was expected, the current word, the characters location in the word, and a time stamp."""
current_word_index = self.word_indices[self.current_location]
current_search_location = self.current_location
while self.word_indices[current_search_location] == current_word_index:
current_search_location -= 1
location_in_word = self.current_location - current_search_location - 1
self.session_database.entries.append(
SessionDatabaseEntry(
input=current_input,
text=current_char,
correct=current_input == current_char,
word=self.word_list[current_word_index],
location_in_word=location_in_word,
time=time(),
))
def get_progress(self) -> str:
"""Percentage of typed text."""
return f"{(self.current_location/self.text_length)*100:.1f}%"
def get_accuracy(self) -> str:
"""Percentage of hits in typed text."""
try:
return f"{(self.hits/(self.hits+self.misses))*100:.1f}%"
except ZeroDivisionError:
return "100%"
def get_speed(self) -> str:
"""Characters per minute since start."""
try:
start = self.session_database.entries[0].time
return f"{int(self.current_location/(time() - start)*60)} cpm"
except IndexError:
return "0 cpm"
async def exit(self) -> None:
"""What to do on exit."""
database_save_path = self.data_dir / f"{self.session_database.date}_session_database.pkl"
with open(str(database_save_path), "wb") as output_file:
pickle.dump(self.session_database, output_file)
await self.shutdown()
async def action_quit(self) -> None:
"""What to do on quit."""
database_save_path = self.data_dir / f"{self.session_database.date}_session_database.pkl"
with open(str(database_save_path), "wb") as output_file:
pickle.dump(self.session_database, output_file)
await self.shutdown()
def surround_with_style(style: List[str], text: str) -> str:
"""Take a text and surround it with a rich text style like [bold red] text [/bold red], stored in a list."""
return style[0] + text + style[1]
if __name__ == "__main__":
num_words_in_text = 20
exploration_percentage = 0.2
word_file_path = Path("/usr/share/dict/words")
data_dir = Path("data")
srs_database_name = "srs_database.pkl"
update_srs_database_from_latest_session(data_dir=data_dir, srs_database_name=srs_database_name)
srs_database = read_database(data_dir / srs_database_name)
sampled_ngrams = sample_ngrams_from_srs_database(srs_database, int(num_words_in_text*(1-exploration_percentage)))
assert word_file_path.is_file(), f"Cannot find {word_file_path.absolute()}."
with open(str(word_file_path.absolute()), "r") as word_file:
WORDS = word_file.read().splitlines()
relevant_words = get_words_with_ngrams(sampled_ngrams, WORDS)
text_words = []
while len(sampled_ngrams) > 0:
ngram = sampled_ngrams.pop()
ngram_word_list = [ngram_word for ngram_word in relevant_words if ngram in ngram_word]
# if there is no word matching the ngram, add the ngram itself
if len(ngram_word_list) == 0:
if len(ngram) > 0:
text_words.append(ngram)
else:
word = np.random.choice(ngram_word_list)
text_words.append(word)
num_missing_words = num_words_in_text - len(text_words)
if num_missing_words > 0:
text_words.extend(np.random.choice(WORDS, size=num_missing_words))
full_text = " ".join(text_words)
SrsTyperApp.run(full_text=full_text)
| [
"pickle.dump",
"widgets.box.GutterBox",
"utils.srs.update_srs_database_from_latest_session",
"utils.data.read_database",
"widgets.box.InfoBox",
"time.time",
"utils.data.SessionDatabase",
"pathlib.Path",
"widgets.box.TextBox",
"numpy.random.choice",
"utils.data.index_text_to_words",
"utils.ngra... | [((8263, 8292), 'pathlib.Path', 'Path', (['"""/usr/share/dict/words"""'], {}), "('/usr/share/dict/words')\n", (8267, 8292), False, 'from pathlib import Path\n'), ((8308, 8320), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (8312, 8320), False, 'from pathlib import Path\n'), ((8369, 8468), 'utils.srs.update_srs_database_from_latest_session', 'update_srs_database_from_latest_session', ([], {'data_dir': 'data_dir', 'srs_database_name': 'srs_database_name'}), '(data_dir=data_dir,\n srs_database_name=srs_database_name)\n', (8408, 8468), False, 'from utils.srs import update_srs_database_from_latest_session, sample_ngrams_from_srs_database\n'), ((8484, 8527), 'utils.data.read_database', 'read_database', (['(data_dir / srs_database_name)'], {}), '(data_dir / srs_database_name)\n', (8497, 8527), False, 'from utils.data import SessionDatabase, SessionDatabaseEntry, index_text_to_words, read_database\n'), ((8863, 8907), 'utils.ngram.get_words_with_ngrams', 'get_words_with_ngrams', (['sampled_ngrams', 'WORDS'], {}), '(sampled_ngrams, WORDS)\n', (8884, 8907), False, 'from utils.ngram import get_words_with_ngrams\n'), ((940, 952), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (944, 952), False, 'from pathlib import Path\n'), ((1225, 1260), 'utils.data.index_text_to_words', 'index_text_to_words', (['self.full_text'], {}), '(self.full_text)\n', (1244, 1260), False, 'from utils.data import SessionDatabase, SessionDatabaseEntry, index_text_to_words, read_database\n'), ((2298, 2315), 'utils.data.SessionDatabase', 'SessionDatabase', ([], {}), '()\n', (2313, 2315), False, 'from utils.data import SessionDatabase, SessionDatabaseEntry, index_text_to_words, read_database\n'), ((2572, 2581), 'widgets.box.InfoBox', 'InfoBox', ([], {}), '()\n', (2579, 2581), False, 'from widgets.box import TextBox, InfoBox, GutterBox\n'), ((2602, 2625), 'widgets.box.TextBox', 'TextBox', (['self.full_text'], {}), '(self.full_text)\n', (2609, 2625), False, 'from widgets.box import TextBox, InfoBox, GutterBox\n'), ((2648, 2659), 'widgets.box.GutterBox', 'GutterBox', ([], {}), '()\n', (2657, 2659), False, 'from widgets.box import TextBox, InfoBox, GutterBox\n'), ((7532, 7579), 'pickle.dump', 'pickle.dump', (['self.session_database', 'output_file'], {}), '(self.session_database, output_file)\n', (7543, 7579), False, 'import pickle\n'), ((7861, 7908), 'pickle.dump', 'pickle.dump', (['self.session_database', 'output_file'], {}), '(self.session_database, output_file)\n', (7872, 7908), False, 'import pickle\n'), ((9311, 9344), 'numpy.random.choice', 'np.random.choice', (['ngram_word_list'], {}), '(ngram_word_list)\n', (9327, 9344), True, 'import numpy as np\n'), ((9498, 9545), 'numpy.random.choice', 'np.random.choice', (['WORDS'], {'size': 'num_missing_words'}), '(WORDS, size=num_missing_words)\n', (9514, 9545), True, 'import numpy as np\n'), ((6611, 6617), 'time.time', 'time', ([], {}), '()\n', (6615, 6617), False, 'from time import time\n'), ((7208, 7214), 'time.time', 'time', ([], {}), '()\n', (7212, 7214), False, 'from time import time\n')] |
import math
import os
import struct
import threading
import time
import wave # write to wav
import numpy as np
import pyaudio # record
from scipy.io.wavfile import read, write
if __name__ != "__main__":
from .cli_tools import print_square
if __name__ != "__main__":
from .dsp import get_rms
def clear_console():
command = 'clear'
if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls
command = 'cls'
os.system(command)
class RecordingInterrupt(Exception):
pass
class Recorder:
def __init__(self):
self.timeout = 5
self.threshold = -960
self.chunk = 1024
self.bits = 16
self.channels = 1
self.MAX_TIMEOUT = 30
self.normalize = (1 / (2 ** (self.bits - 1)))
self.data = []
# check the proper sample format
while True:
if self.bits == 8:
self.sample_format = pyaudio.paInt8
break
elif self.bits == 16:
self.sample_format = pyaudio.paInt16
break
elif self.bits == 24:
self.sample_format = pyaudio.paInt24
break
elif self.bits == 32:
self.sample_format = pyaudio.paInt32
break
else:
self.bits = int(input("Please select a valid sample format (8, 16, 24 or 32)"))
# default device
p = pyaudio.PyAudio()
self.deviceIn = p.get_default_input_device_info().get("index")
self.deviceOut = p.get_default_output_device_info().get("index")
self.devicesIn = []
self.devicesOut = []
self.channelsIn = p.get_device_info_by_index(self.deviceIn)["maxInputChannels"]
self.channelsOut = p.get_device_info_by_index(self.deviceIn)["maxInputChannels"]
self.channels = min(self.channelsOut, self.channelsIn)
p.terminate()
# not calibrated by default
self.calibrated = []
self.correction = []
for i in range(self.channels):
self.correction.append([])
self.calibrated.append(False)
# get audio info
devinfo = self.get_device_info()
# default sample rate
self.fs = 44100
self.available_inputs = devinfo.get("inputs")
self.available_outputs = devinfo.get("outputs")
self.lowtreshold = -960
self.hightreshold = 0
self.soglia = 0
self.rms = -960
self._running = False
self._is_saved = False
def set_device(self, io, index):
if io == "input":
if index in self.available_inputs:
self.deviceIn = index
elif io == "output":
if index in self.available_inputs:
self.deviceOut = index
return
def get_device_info(self):
"""
Returns a dictionary containing the information about the default input and output devices, along with all the
available devices currently connected.
Example:
>>> recorder = Recorder()
>>> information = recorder.get_device_info()
>>> default_device_index = info.get("default_input").get("index")
"""
# stored data into the recorder
self.devicesIn = []
self.devicesOut = []
self.data = []
# open the stream
p = pyaudio.PyAudio()
# get number of connected devices
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
# determine if each device is a input or output
for i in range(0, numdevices):
if p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels') > 0:
print("INPUT: %d - %s - %d "
"channel(s)" % (i,
p.get_device_info_by_host_api_device_index(0, i).get('name'),
p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')))
self.devicesIn.append(p.get_device_info_by_host_api_device_index(0, i))
if p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels') > 0:
print("OUTPUT: %d - %s - %d "
"channel(s)" % (i,
p.get_device_info_by_host_api_device_index(0, i).get('name'),
p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')))
self.devicesOut.append(p.get_device_info_by_host_api_device_index(0, i))
print("\n--> Selected INPUT device: %d - %s" % (self.deviceIn, self.devicesIn[self.deviceIn].get("name")))
print("<-- Selected OUTPUT device: %d - %s" % (self.deviceOut, self.devicesOut[self.deviceIn].get("name")))
# create dictionary with default device and available devices
audioinfo = {'inputs': self.devicesIn,
'outputs': self.devicesOut,
}
# close stream
p.terminate()
return audioinfo
def calibrate(self, channel, reference=94, timerec=10):
"""
Calibrate the microphone to have a direct conversion from dBFS to dBSPL.
This is done separately for each channel. Specify it in the function arguments.
The use of a 94dBSPL (1kHz) calibrator is strongly advised. Otherwise, please
specify another reference value.
"""
# recording time
minutes = int((timerec / 60)) % 60
hours = int((timerec / 3600))
seconds = timerec - 3600 * hours - 60 * minutes
c_path_short = os.getcwd()
tree = c_path_short.split("\\")
if len(c_path_short) > 40:
c_dir = (tree[0] + "\\" + tree[1] + "\\...\\" + tree[-2])
else:
c_dir = c_path_short
# dialog
print("Calibrating (%0.1fdBSPL):" % reference)
print("")
print("-------------------------------------------------------------------")
print("-------------------------------------------------------------------")
print("- Sample format...................%d bits" % self.bits)
print("- Sampling frequency..............%d Hz" % self.fs)
print("- Samples per buffer..............%d samples" % self.chunk)
print("- Recording time (hh:mm:ss).......%02d:%02d:%02d" % (hours, minutes, seconds))
print("- Channel:........................%d" % channel)
print("- Working directory...............%s" % c_dir)
print("-------------------------------------------------------------------")
print("-------------------------------------------------------------------")
print("")
try:
print("Place the microphone into the calibrator and press ENTER to calibrate (CTRL+C to cancel)")
except KeyboardInterrupt:
print("Calibration canceled!")
return
# instantiate stream
p = pyaudio.PyAudio() # create an interface to PortAudio API
stream = p.open(format=self.sample_format,
channels=self.channels,
rate=self.fs,
frames_per_buffer=self.chunk,
input_device_index=self.deviceIn,
input=True)
frames = [] # initialize array to store frames
# The actual recording
current = time.time()
maxtime = time.time() + timerec
sum_squares_global = 0.0
print("\nCalibrating... ", end='')
while current <= maxtime:
try:
audio_data = stream.read(self.chunk)
count = len(audio_data) / 2
data_format = "%dh" % count
shorts = struct.unpack(data_format, audio_data)
shorts_array = []
for i in range(self.channels):
shorts_array.append([])
# get intensity
for sample in range(len(shorts)):
shorts_array[sample % self.channels].append(shorts[sample])
rms = []
for i in range(len(shorts_array)):
sum_squares = 0.0
for sample in shorts_array[i]:
n = sample * self.normalize
sum_squares += n * n
if i == channel:
sum_squares_global = sum_squares
rms.append(
round(20 * math.log10(math.pow((sum_squares / self.chunk), 0.5)) + 20 * math.log10(2 ** 0.5),
2))
frames.append(audio_data)
current = time.time()
except KeyboardInterrupt:
# print("\nRecording stopped")
break
rms_global = round(
20 * math.log10(math.pow((sum_squares_global / self.chunk), 0.5)) + 20 * math.log10(2 ** 0.5), 2)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the portaudio interface
p.terminate()
wf = wave.open('temp.wav', 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(p.get_sample_size(self.sample_format))
wf.setframerate(self.fs)
wf.writeframes(b''.join(frames))
wf.close()
print('done!\n')
_, audio_data = read('temp.wav')
os.remove('temp.wav')
audio_data = audio_data[:, channel]
self.calibrated[channel] = True # microphone calibrated
self.correction[channel] = reference - get_rms(audio_data) # correction factor
print_square("Power = %0.2fdBFS\n"
"dBSPL/dBFS = %0.2f" % (get_rms(audio_data), self.correction[channel]),
margin=[4, 4, 1, 1])
return audio_data
def play_and_record(self, audio_data, audio_fs):
"""
Record while playing
"""
sem = threading.Semaphore()
def _play_wav_semaphore(file):
"""
Plays a wav file.
"""
chunk = 1024
wf = wave.open(file, 'rb')
# instantiate PyAudio
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data
file_data = wf.readframes(chunk)
# play stream
acquiring = False
print("\nPlaying...\n")
while len(file_data) > 0:
stream.write(file_data)
if not acquiring:
# print("About to acquire")
sem.release()
# print("acquired")
acquiring = True
file_data = wf.readframes(chunk)
# stop stream
stream.stop_stream()
stream.close()
# close PyAudio
p.terminate()
return
def _record_semaphore(secs, channel=0):
# print("Record before")
sem.acquire()
# print("Record after")
# instantiate stream
p = pyaudio.PyAudio() # create an interface to PortAudio API
stream = p.open(format=self.sample_format,
channels=self.channels,
rate=self.fs,
frames_per_buffer=self.chunk,
input_device_index=self.deviceIn,
input=True)
frames = [] # initialize array to store frames
# The actual recording
started = False
# print("Waiting for speech over the threshold...")
current = time.time()
timeout = 5
end = time.time() + timeout
maxtime = time.time() + secs
while current <= maxtime:
try:
rec_data = stream.read(self.chunk)
count = len(rec_data) / 2
data_format = "%dh" % count
shorts = struct.unpack(data_format, rec_data)
shorts_array = []
for i in range(self.channels):
shorts_array.append([])
# get intensity
for sample in range(len(shorts)):
shorts_array[sample % self.channels].append(shorts[sample])
rms = []
for i in range(len(shorts_array)):
sum_squares = 0.0
for sample in shorts_array[i]:
n = sample * self.normalize
sum_squares += n * n
rms.append(
round(
20 * math.log10(math.pow((sum_squares / self.chunk), 0.5)) + 20 * math.log10(2 ** 0.5),
2))
# detects sounds over the threshold
if rms[channel] > self.threshold:
end = time.time() + timeout
if not started:
started = True
maxtime = time.time() + secs
print("Recording...\n")
current = time.time()
if started:
for i in range(len(rms)):
if self.calibrated[i]:
pass
# print("%0.2f dBSPL\t"%(rms[i]+self.correction[i]), end = ' ')
else:
pass
# print("%0.2f dBFS\t"%(rms[i]), end = ' ')
# print("\n")
frames.append(rec_data)
if current >= end:
print("Silence TIMEOUT")
break
except KeyboardInterrupt:
print("\nRecording stopped")
break
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the portaudio interface
p.terminate()
# write recorded data into an array
if len(frames) > 0:
wf = wave.open(".temp_out.wav", 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(p.get_sample_size(self.sample_format))
wf.setframerate(self.fs)
wf.writeframes(b''.join(frames))
wf.close()
print('... done!')
_, self.data = read(".temp_out.wav")
os.remove(".temp_out.wav")
# self.data = self.data[]
return self.data
else:
print("No audio recorded!")
return 0
write(".temp.wav", audio_fs, audio_data)
seconds = len(audio_data) / audio_fs
sem.acquire()
play = threading.Thread(target=_play_wav_semaphore, args=(".temp.wav",))
rec = threading.Thread(target=_record_semaphore, args=(seconds,))
play.start()
rec.start()
play.join()
rec.join()
os.remove(".temp.wav")
return self.data
def save(self, filename="output.wav"):
write(filename, self.fs, np.array(self.data))
return
def play(self):
"""
Reproduces the last recorded data.
"""
if len(self.data) > 0:
play_data(self.data, self.fs)
else:
print("\nNo data to play! Record something first")
return
def record(self, seconds=None, channel=0, l_threshold=None, h_treshold=None, monitor=False):
p_pow = -960
if l_threshold is None and h_treshold is None:
l_threshold = -960
h_treshold = -960
pass
# instantiate stream
p = pyaudio.PyAudio() # create an interface to PortAudio API
stream = p.open(format=self.sample_format,
channels=self.channels,
rate=self.fs,
frames_per_buffer=self.chunk,
input_device_index=self.deviceIn,
input=True)
frames = [] # initialize array to store frames
# The actual recording
started = False
# print("Waiting for speech over the threshold...")
current = time.time()
timeout = self.timeout
end = time.time() + timeout
if seconds is None:
seconds = 10000
maxtime = time.time() + seconds
self._running = True
self._is_saved = False
while current <= maxtime and self._running:
try:
audio_data = stream.read(self.chunk)
count = len(audio_data) / 2
data_format = "%dh" % count
shorts = struct.unpack(data_format, audio_data)
shorts_array = []
for i in range(self.channels):
shorts_array.append([])
# get intensity
for sample in range(len(shorts)):
shorts_array[sample % self.channels].append(shorts[sample])
rms = []
for i in range(len(shorts_array)):
sum_squares = 0.0
for sample in shorts_array[i]:
n = sample * self.normalize
sum_squares += n * n
rms.append(
round(20 * math.log10(math.pow((sum_squares / self.chunk), 0.5)) + 20 * math.log10(2 ** 0.5),
2))
# detects sounds over the threshold
if rms[channel] > l_threshold:
end = time.time() + timeout
if not started:
started = True
maxtime = time.time() + seconds
print("\nRecording...")
current = time.time()
if started:
for i in range(len(rms)):
if monitor:
if self.calibrated[i]:
pass
# print("%0.2f dBSPL - %s\t"%(rms[i]+self.correction[i], soglia))
else:
pass
# print("%0.2f dBFS - %s\t"%(rms[i], soglia))
rms_tot = rms[channel]
self.rms = rms_tot
if monitor:
print(rms[channel])
if rms_tot > self.hightreshold and p_pow < self.hightreshold:
self.on_positive_edge()
elif rms_tot < self.lowtreshold and p_pow > self.lowtreshold:
self.on_negative_edge()
p_pow = rms_tot
# print("\n")
frames.append(audio_data)
if current >= end:
self.on_timeout()
except KeyboardInterrupt:
print("\nRecording stopped")
break
print("RECORD ENDED")
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the portaudio interface
p.terminate()
# write recorded data into an array
print("SAVING INTO RECORDER DATA")
print(len(frames))
if len(frames) > 0:
wf = wave.open(".temp.wav", 'wb')
wf.setnchannels(self.channels)
wf.setsampwidth(p.get_sample_size(self.sample_format))
wf.setframerate(self.fs)
wf.writeframes(b''.join(frames))
wf.close()
print('...done!')
_, self.data = read(".temp.wav")
os.remove(".temp.wav")
# self.data = self.data[:,0]
self._is_saved = True
return self.data
else:
print("No audio recorded!")
return 0
def on_timeout(self):
print("TIMEOUT")
pass
def on_positive_edge(self):
pass
def on_negative_edge(self):
pass
def terminate(self):
self._running = False
print("Saving...")
while not self._is_saved:
pass
print("Done!")
if __name__ == "__main__":
from play import play_data
from dsp import get_rms
from cli_tools import print_square
r = Recorder()
r.timeout = 5
r.record(30, l_threshold=-30, monitor=True)
r.save("PROVA.wav")
| [
"wave.open",
"os.remove",
"threading.Thread",
"play.play_data",
"math.pow",
"os.getcwd",
"struct.unpack",
"os.system",
"time.time",
"scipy.io.wavfile.read",
"scipy.io.wavfile.write",
"math.log10",
"numpy.array",
"pyaudio.PyAudio",
"dsp.get_rms",
"threading.Semaphore"
] | [((455, 473), 'os.system', 'os.system', (['command'], {}), '(command)\n', (464, 473), False, 'import os\n'), ((1445, 1462), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1460, 1462), False, 'import pyaudio\n'), ((3381, 3398), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3396, 3398), False, 'import pyaudio\n'), ((5654, 5665), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5663, 5665), False, 'import os\n'), ((7000, 7017), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (7015, 7017), False, 'import pyaudio\n'), ((7450, 7461), 'time.time', 'time.time', ([], {}), '()\n', (7459, 7461), False, 'import time\n'), ((9167, 9194), 'wave.open', 'wave.open', (['"""temp.wav"""', '"""wb"""'], {}), "('temp.wav', 'wb')\n", (9176, 9194), False, 'import wave\n'), ((9439, 9455), 'scipy.io.wavfile.read', 'read', (['"""temp.wav"""'], {}), "('temp.wav')\n", (9443, 9455), False, 'from scipy.io.wavfile import read, write\n'), ((9464, 9485), 'os.remove', 'os.remove', (['"""temp.wav"""'], {}), "('temp.wav')\n", (9473, 9485), False, 'import os\n'), ((10014, 10035), 'threading.Semaphore', 'threading.Semaphore', ([], {}), '()\n', (10033, 10035), False, 'import threading\n'), ((15132, 15172), 'scipy.io.wavfile.write', 'write', (['""".temp.wav"""', 'audio_fs', 'audio_data'], {}), "('.temp.wav', audio_fs, audio_data)\n", (15137, 15172), False, 'from scipy.io.wavfile import read, write\n'), ((15255, 15320), 'threading.Thread', 'threading.Thread', ([], {'target': '_play_wav_semaphore', 'args': "('.temp.wav',)"}), "(target=_play_wav_semaphore, args=('.temp.wav',))\n", (15271, 15320), False, 'import threading\n'), ((15335, 15394), 'threading.Thread', 'threading.Thread', ([], {'target': '_record_semaphore', 'args': '(seconds,)'}), '(target=_record_semaphore, args=(seconds,))\n', (15351, 15394), False, 'import threading\n'), ((15483, 15505), 'os.remove', 'os.remove', (['""".temp.wav"""'], {}), "('.temp.wav')\n", (15492, 15505), False, 'import os\n'), ((16190, 16207), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (16205, 16207), False, 'import pyaudio\n'), ((16723, 16734), 'time.time', 'time.time', ([], {}), '()\n', (16732, 16734), False, 'import time\n'), ((7480, 7491), 'time.time', 'time.time', ([], {}), '()\n', (7489, 7491), False, 'import time\n'), ((9642, 9661), 'dsp.get_rms', 'get_rms', (['audio_data'], {}), '(audio_data)\n', (9649, 9661), False, 'from dsp import get_rms\n'), ((10180, 10201), 'wave.open', 'wave.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (10189, 10201), False, 'import wave\n'), ((10252, 10269), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (10267, 10269), False, 'import pyaudio\n'), ((11361, 11378), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (11376, 11378), False, 'import pyaudio\n'), ((11937, 11948), 'time.time', 'time.time', ([], {}), '()\n', (11946, 11948), False, 'import time\n'), ((15608, 15627), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (15616, 15627), True, 'import numpy as np\n'), ((15775, 15804), 'play.play_data', 'play_data', (['self.data', 'self.fs'], {}), '(self.data, self.fs)\n', (15784, 15804), False, 'from play import play_data\n'), ((16780, 16791), 'time.time', 'time.time', ([], {}), '()\n', (16789, 16791), False, 'import time\n'), ((16876, 16887), 'time.time', 'time.time', ([], {}), '()\n', (16885, 16887), False, 'import time\n'), ((19838, 19866), 'wave.open', 'wave.open', (['""".temp.wav"""', '"""wb"""'], {}), "('.temp.wav', 'wb')\n", (19847, 19866), False, 'import wave\n'), ((20139, 20156), 'scipy.io.wavfile.read', 'read', (['""".temp.wav"""'], {}), "('.temp.wav')\n", (20143, 20156), False, 'from scipy.io.wavfile import read, write\n'), ((20169, 20191), 'os.remove', 'os.remove', (['""".temp.wav"""'], {}), "('.temp.wav')\n", (20178, 20191), False, 'import os\n'), ((7795, 7833), 'struct.unpack', 'struct.unpack', (['data_format', 'audio_data'], {}), '(data_format, audio_data)\n', (7808, 7833), False, 'import struct\n'), ((8740, 8751), 'time.time', 'time.time', ([], {}), '()\n', (8749, 8751), False, 'import time\n'), ((11991, 12002), 'time.time', 'time.time', ([], {}), '()\n', (12000, 12002), False, 'import time\n'), ((12035, 12046), 'time.time', 'time.time', ([], {}), '()\n', (12044, 12046), False, 'import time\n'), ((14561, 14593), 'wave.open', 'wave.open', (['""".temp_out.wav"""', '"""wb"""'], {}), "('.temp_out.wav', 'wb')\n", (14570, 14593), False, 'import wave\n'), ((14895, 14916), 'scipy.io.wavfile.read', 'read', (['""".temp_out.wav"""'], {}), "('.temp_out.wav')\n", (14899, 14916), False, 'from scipy.io.wavfile import read, write\n'), ((14933, 14959), 'os.remove', 'os.remove', (['""".temp_out.wav"""'], {}), "('.temp_out.wav')\n", (14942, 14959), False, 'import os\n'), ((17193, 17231), 'struct.unpack', 'struct.unpack', (['data_format', 'audio_data'], {}), '(data_format, audio_data)\n', (17206, 17231), False, 'import struct\n'), ((18321, 18332), 'time.time', 'time.time', ([], {}), '()\n', (18330, 18332), False, 'import time\n'), ((8972, 8992), 'math.log10', 'math.log10', (['(2 ** 0.5)'], {}), '(2 ** 0.5)\n', (8982, 8992), False, 'import math\n'), ((9776, 9795), 'dsp.get_rms', 'get_rms', (['audio_data'], {}), '(audio_data)\n', (9783, 9795), False, 'from dsp import get_rms\n'), ((12291, 12327), 'struct.unpack', 'struct.unpack', (['data_format', 'rec_data'], {}), '(data_format, rec_data)\n', (12304, 12327), False, 'import struct\n'), ((13538, 13549), 'time.time', 'time.time', ([], {}), '()\n', (13547, 13549), False, 'import time\n'), ((8915, 8961), 'math.pow', 'math.pow', (['(sum_squares_global / self.chunk)', '(0.5)'], {}), '(sum_squares_global / self.chunk, 0.5)\n', (8923, 8961), False, 'import math\n'), ((18094, 18105), 'time.time', 'time.time', ([], {}), '()\n', (18103, 18105), False, 'import time\n'), ((13294, 13305), 'time.time', 'time.time', ([], {}), '()\n', (13303, 13305), False, 'import time\n'), ((18225, 18236), 'time.time', 'time.time', ([], {}), '()\n', (18234, 18236), False, 'import time\n'), ((13437, 13448), 'time.time', 'time.time', ([], {}), '()\n', (13446, 13448), False, 'import time\n'), ((8616, 8636), 'math.log10', 'math.log10', (['(2 ** 0.5)'], {}), '(2 ** 0.5)\n', (8626, 8636), False, 'import math\n'), ((17912, 17932), 'math.log10', 'math.log10', (['(2 ** 0.5)'], {}), '(2 ** 0.5)\n', (17922, 17932), False, 'import math\n'), ((8566, 8605), 'math.pow', 'math.pow', (['(sum_squares / self.chunk)', '(0.5)'], {}), '(sum_squares / self.chunk, 0.5)\n', (8574, 8605), False, 'import math\n'), ((13095, 13115), 'math.log10', 'math.log10', (['(2 ** 0.5)'], {}), '(2 ** 0.5)\n', (13105, 13115), False, 'import math\n'), ((17862, 17901), 'math.pow', 'math.pow', (['(sum_squares / self.chunk)', '(0.5)'], {}), '(sum_squares / self.chunk, 0.5)\n', (17870, 17901), False, 'import math\n'), ((13045, 13084), 'math.pow', 'math.pow', (['(sum_squares / self.chunk)', '(0.5)'], {}), '(sum_squares / self.chunk, 0.5)\n', (13053, 13084), False, 'import math\n')] |
import pytest
import torch
import numpy as np
__all__ = [
# input
"schnet_batch",
"max_atoms_in_batch",
"neighbors",
"neighbor_mask",
"positions",
"cell",
"cell_offset",
"r_ij",
"f_ij",
"random_atomic_env",
"random_interatomic_distances",
"random_input_dim",
"random_output_dim",
"random_shape",
"random_float_input",
"random_int_input",
# output
"schnet_output_shape",
"interaction_output_shape",
"cfconv_output_shape",
"gaussian_smearing_shape",
]
# inputs
# from data
@pytest.fixture
def schnet_batch(example_loader):
return next(iter(example_loader))
# components of batch
@pytest.fixture
def max_atoms_in_batch(schnet_batch):
return schnet_batch["_positions"].shape[1]
@pytest.fixture
def neighbors(schnet_batch):
return schnet_batch["_neighbors"]
@pytest.fixture
def neighbor_mask(schnet_batch):
return schnet_batch["_neighbor_mask"]
@pytest.fixture
def positions(schnet_batch):
return schnet_batch["_positions"]
@pytest.fixture
def cell(schnet_batch):
return schnet_batch["_cell"]
@pytest.fixture
def cell_offset(schnet_batch):
return schnet_batch["_cell_offset"]
@pytest.fixture
def r_ij(atom_distances, positions, neighbors, cell, cell_offset, neighbor_mask):
return atom_distances(positions, neighbors, cell, cell_offset, neighbor_mask)
@pytest.fixture
def f_ij(gaussion_smearing_layer, r_ij):
return gaussion_smearing_layer(r_ij)
@pytest.fixture
def random_atomic_env(batch_size, max_atoms_in_batch, n_filters):
return torch.rand((batch_size, max_atoms_in_batch, n_filters))
@pytest.fixture
def random_interatomic_distances(batch_size, max_atoms_in_batch, cutoff):
return (
(1 - torch.rand((batch_size, max_atoms_in_batch, max_atoms_in_batch - 1)))
* 2
* cutoff
)
@pytest.fixture
def random_input_dim(random_shape):
return random_shape[-1]
@pytest.fixture
def random_output_dim():
return np.random.randint(1, 20, 1).item()
@pytest.fixture
def random_shape():
return list(np.random.randint(1, 8, 3))
@pytest.fixture
def random_float_input(random_shape):
return torch.rand(random_shape, dtype=torch.float32)
@pytest.fixture
def random_int_input(random_shape):
return torch.randint(0, 20, random_shape)
# outputs
# spk.representation
@pytest.fixture
def schnet_output_shape(batch_size, max_atoms_in_batch, n_atom_basis):
return [batch_size, max_atoms_in_batch, n_atom_basis]
@pytest.fixture
def interaction_output_shape(batch_size, max_atoms_in_batch, n_filters):
return [batch_size, max_atoms_in_batch, n_filters]
@pytest.fixture
def cfconv_output_shape(batch_size, max_atoms_in_batch, n_atom_basis):
return [batch_size, max_atoms_in_batch, n_atom_basis]
# spk.nn
@pytest.fixture
def gaussian_smearing_shape(batch_size, max_atoms_in_batch, n_gaussians):
return [batch_size, max_atoms_in_batch, max_atoms_in_batch - 1, n_gaussians]
| [
"torch.randint",
"numpy.random.randint",
"torch.rand"
] | [((1579, 1634), 'torch.rand', 'torch.rand', (['(batch_size, max_atoms_in_batch, n_filters)'], {}), '((batch_size, max_atoms_in_batch, n_filters))\n', (1589, 1634), False, 'import torch\n'), ((2178, 2223), 'torch.rand', 'torch.rand', (['random_shape'], {'dtype': 'torch.float32'}), '(random_shape, dtype=torch.float32)\n', (2188, 2223), False, 'import torch\n'), ((2289, 2323), 'torch.randint', 'torch.randint', (['(0)', '(20)', 'random_shape'], {}), '(0, 20, random_shape)\n', (2302, 2323), False, 'import torch\n'), ((2083, 2109), 'numpy.random.randint', 'np.random.randint', (['(1)', '(8)', '(3)'], {}), '(1, 8, 3)\n', (2100, 2109), True, 'import numpy as np\n'), ((1994, 2021), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)', '(1)'], {}), '(1, 20, 1)\n', (2011, 2021), True, 'import numpy as np\n'), ((1753, 1821), 'torch.rand', 'torch.rand', (['(batch_size, max_atoms_in_batch, max_atoms_in_batch - 1)'], {}), '((batch_size, max_atoms_in_batch, max_atoms_in_batch - 1))\n', (1763, 1821), False, 'import torch\n')] |
import itertools
import numpy as np
from ...data.materials.CompositionEntry import CompositionEntry
class OxidationStateGuesser:
"""Class to predict the likely oxidation states of a material, given its
input composition.
Attributes
----------
electronegativity : array-like
A list of electronegativity values (float).
oxidation_states : array-like
A 2-D numpy array containing the property values for all the elements.
"""
electronegativity = np.zeros(0)
oxidationstates = np.zeros(0, dtype=object)
def set_electronegativity(self, values):
"""Function to set the electronegativity values.
Parameters
----------
values : array-like
Numpy array containing electronegativity values for all the
elements.
Returns
-------
"""
self.electronegativity = values
def set_oxidationstates(self, values):
"""Function to set the oxidation states values.
Parameters
----------
values : array-like
2-D numpy array containing oxidation states values for all the
elements.
Returns
-------
"""
self.oxidationstates = values
def get_possible_states(self, entry):
"""Function to compute all the possible oxidation states of a material,
given its input composition.
The function works by finding all
combinations of non-zero oxidation states for each element, computing
which are the most reasonable, and finding which of those have minimum
value of
sum_{i,j} (chi_i - chi_j)*(c_i - c_j) for i < j
where chi_i is the electronegativity and c_i is the oxidation. This
biases the selection towards the more electronegative elements being
more negatively charged.
Parameters
----------
entry : CompositionEntry
A CompositionEntry object.
Returns
-------
output : array-like
A numpy array containing the list of possible oxidation states
arranged in the order mentioned above.
Raises
------
ValueError
If input is empty.
If input is not a CompositionEntry object.
If electronegativity or oxidationstates haven't been set.
"""
# Make sure entry is not empty.
if not entry:
raise ValueError("Input argument cannot be empty. Please pass a "
"valid argument.")
# Make sure entry is of type CompositionEntry.
if not isinstance(entry, CompositionEntry):
raise ValueError("Entry must be of type CompositionEntry.")
# Make sure electronegativity and oxidation states are not empty.
if not self.electronegativity.size or not self.oxidationstates.size:
raise ValueError("Electronegativity or OxidationStates values are "
"not initialized. Set them and try again.")
# Initialize list of possible states.
possible_states = []
# Get element ids and fractions.
elem_ids = entry.get_element_ids()
elem_fracs = entry.get_element_fractions()
if len(elem_ids) == 1:
return np.asarray([])
# List of all states.
states = []
for id in elem_ids:
states.append(self.oxidationstates[id])
# Generate all combinations of those charge states, only store the
# ones that are charge balanced.
for state in itertools.product(*states):
charge = np.dot(state, elem_fracs)
# If charge is balanced, add state to the list of possible states.
if abs(charge) < 1E-6:
possible_states.append(list(state))
if len(possible_states) < 2:
return np.asarray(possible_states)
# Compute the summation mentioned in the function description.
rankVal = np.zeros(len(possible_states))
for s in range(len(possible_states)):
state = possible_states[s]
tmp_val = 0.0
for i in range(len(state)):
for j in range(i+1,len(state)):
tmp_val += (self.electronegativity[elem_ids[i]] -
self.electronegativity[elem_ids[j]]) * (
state[i] - state[j])
rankVal[s] = tmp_val
# Order them based on electronegativity rank.
output = [ps for i, ps in sorted(zip(rankVal, possible_states))]
return np.asarray(output) | [
"numpy.dot",
"numpy.asarray",
"numpy.zeros",
"itertools.product"
] | [((494, 505), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (502, 505), True, 'import numpy as np\n'), ((528, 553), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'object'}), '(0, dtype=object)\n', (536, 553), True, 'import numpy as np\n'), ((3609, 3635), 'itertools.product', 'itertools.product', (['*states'], {}), '(*states)\n', (3626, 3635), False, 'import itertools\n'), ((4619, 4637), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (4629, 4637), True, 'import numpy as np\n'), ((3325, 3339), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3335, 3339), True, 'import numpy as np\n'), ((3658, 3683), 'numpy.dot', 'np.dot', (['state', 'elem_fracs'], {}), '(state, elem_fracs)\n', (3664, 3683), True, 'import numpy as np\n'), ((3907, 3934), 'numpy.asarray', 'np.asarray', (['possible_states'], {}), '(possible_states)\n', (3917, 3934), True, 'import numpy as np\n')] |
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: compute_ell_model.py
# Authors: <NAME>
#
# Requires: Python 3.x
#
###################################################################################################
import os
import sys
import numpy as np
try:
# try and import ELL from site_packages
import ell
except:
# try and use ELL_ROOT environment variable to find ELL.
ell_root = os.getenv("ELL_ROOT")
if not ell_root:
raise Exception("Please set your ELL_ROOT environment variable")
sys.path += [os.path.join(ell_root, "build", "interfaces", "python", "package")]
import ell
class ComputeModel:
""" This class wraps a .ell model, exposing the model compute function as a
transform method """
def __init__(self, model_path):
self.model_path = model_path
self.map = ell.model.Map(model_path)
self.input_shape = self.map.GetInputShape()
self.output_shape = self.map.GetOutputShape()
self.state_size = 0
if self.map.NumInputs() == 2 and self.map.NumOutputs() == 2:
# then perhaps we have a FastGRNN model with external state.
self.input_size = self.input_shape.Size()
self.output_size = self.output_shape.Size()
self.state_size = self.map.GetInputShape(1).Size()
self.output_buffer = ell.math.FloatVector(self.output_size)
self.hidden_state = ell.math.FloatVector(self.state_size)
self.new_state = ell.math.FloatVector(self.state_size)
def predict(self, x):
return self.transform(x)
def transform(self, x):
""" call the ell model with input array 'x' and return the output as numpy array """
# Send the input to the predict function and return the prediction result
if self.state_size:
i = ell.math.FloatVector(x)
self.map.ComputeMultiple([i, self.hidden_state], [self.output_buffer, self.new_state])
self.hidden_state.copy_from(self.new_state)
out_vec = self.output_buffer
else:
out_vec = self.map.Compute(x)
return np.array(out_vec)
def reset(self):
""" reset all model state """
self.map.Reset()
if self.state_size:
self.hidden_state = ell.math.FloatVector(self.state_size)
def get_metadata(self, name):
model = self.map.GetModel()
value = self.map.GetMetadataValue(name)
if value:
return value
value = model.GetMetadataValue(name)
if value:
return value
nodes = model.GetNodes()
while nodes.IsValid():
node = nodes.Get()
value = node.GetMetadataValue(name)
if value:
return value
nodes.Next()
return None
| [
"ell.model.Map",
"numpy.array",
"ell.math.FloatVector",
"os.path.join",
"os.getenv"
] | [((519, 540), 'os.getenv', 'os.getenv', (['"""ELL_ROOT"""'], {}), "('ELL_ROOT')\n", (528, 540), False, 'import os\n'), ((955, 980), 'ell.model.Map', 'ell.model.Map', (['model_path'], {}), '(model_path)\n', (968, 980), False, 'import ell\n'), ((2238, 2255), 'numpy.array', 'np.array', (['out_vec'], {}), '(out_vec)\n', (2246, 2255), True, 'import numpy as np\n'), ((652, 718), 'os.path.join', 'os.path.join', (['ell_root', '"""build"""', '"""interfaces"""', '"""python"""', '"""package"""'], {}), "(ell_root, 'build', 'interfaces', 'python', 'package')\n", (664, 718), False, 'import os\n'), ((1463, 1501), 'ell.math.FloatVector', 'ell.math.FloatVector', (['self.output_size'], {}), '(self.output_size)\n', (1483, 1501), False, 'import ell\n'), ((1534, 1571), 'ell.math.FloatVector', 'ell.math.FloatVector', (['self.state_size'], {}), '(self.state_size)\n', (1554, 1571), False, 'import ell\n'), ((1601, 1638), 'ell.math.FloatVector', 'ell.math.FloatVector', (['self.state_size'], {}), '(self.state_size)\n', (1621, 1638), False, 'import ell\n'), ((1947, 1970), 'ell.math.FloatVector', 'ell.math.FloatVector', (['x'], {}), '(x)\n', (1967, 1970), False, 'import ell\n'), ((2401, 2438), 'ell.math.FloatVector', 'ell.math.FloatVector', (['self.state_size'], {}), '(self.state_size)\n', (2421, 2438), False, 'import ell\n')] |
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from qutip import identity, sigmax, sigmaz, sigmay, tensor
from qutip.qip.operations.gates import cnot
sys.path.append("../..")
from tools import *
parser = argparse.ArgumentParser()
# name of example
parser.add_argument('--name', help='example name', type=str, default='CNOTBi')
# evolution time
parser.add_argument('--evo_time', help='evolution time', type=float, default=10)
# time steps
parser.add_argument('--n_ts', help='time steps', type=int, default=200)
# initial control file
parser.add_argument('--initial_control', help='file name of initial control', type=str, default=None)
# if sos1 property holds
parser.add_argument('--sos1', help='sos1 property holds or not', type=int, default=0)
# rounding type
parser.add_argument('--type', help='type of rounding (SUR, minup, maxswitch)', type=str, default='SUR')
# minimum up time steps
# parser.add_argument('--min_up', help='minimum up time steps', nargs='+', type=int, default=10)
parser.add_argument('--min_up', help='minimum up time steps', type=int, default=10)
# maximum number of switches
parser.add_argument('--max_switch', help='maximum number of switches', type=int, default=10)
# time limit for rounding by Gurobi
parser.add_argument('--time_limit', help='time limit for rounding by Gurobi', type=int, default=60)
args = parser.parse_args()
# args.type = "minup"
# args.initial_control = "../control/Continuous/CNOT_evotime10.0_n_ts200_ptypeCONSTANT_offset0.5_objUNIT.csv"
# args.sos1 = 1
# QuTiP control modules
# a two-qubit system with target control mode as CNOT gate with summation one constraint
# The control Hamiltonians (Qobj classes)
# Drift Hamiltonian
H_d = tensor(sigmax(), sigmax()) + tensor(sigmay(), sigmay()) + tensor(sigmaz(), sigmaz()) \
+ tensor(sigmay(), identity(2))
H_c = [tensor(sigmax(), identity(2)) - tensor(sigmay(), identity(2))]
# start point for the gate evolution
X_0 = identity(4)
# Target for the gate evolution
X_targ = cnot()
# objective value type
obj_type = "UNIT"
if not os.path.exists("../output/Rounding/"):
os.makedirs("../output/Rounding/")
if not os.path.exists("../control/Rounding/"):
os.makedirs("../control/Rounding/")
if not os.path.exists("../figure/Rounding/"):
os.makedirs("../figure/Rounding/")
output_fig = "../figure/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0]
if args.type == "SUR":
output_num = "../output/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] + "_" + str(args.sos1) + "_SUR.log"
output_control = "../control/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] + "_" + str(args.sos1) + "_SUR.csv"
if args.type == "minup":
output_num = "../output/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] \
+ "_minup" + str(args.min_up) + "_" + str(args.sos1) + ".log"
output_control = "../control/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] \
+ "_minup" + str(args.min_up) + "_" + str(args.sos1) + ".csv"
if args.type == "maxswitch":
output_num = "../output/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] \
+ "_maxswitch" + str(args.max_switch) + "_" + str(args.sos1) + ".log"
output_control = "../control/Rounding/" + args.initial_control.split('/')[-1].split('.csv')[0] \
+ "_maxswitch" + str(args.max_switch) + "_" + str(args.sos1) + ".csv"
# round the solution
b_rel = np.loadtxt(args.initial_control, delimiter=',')
round = Rounding()
round.build_rounding_optimizer(b_rel, args.evo_time, args.n_ts, args.type, args.min_up, args.max_switch,
time_limit=args.time_limit, out_fig=output_fig)
if args.sos1 == 1:
b_bin, c_time = round.rounding_with_sos1()
else:
b_bin, c_time = round.rounding_without_sos1()
# b_bin = rounding(b_rel, args.type, args.min_up / args.n_ts, args.max_switch, output_fig=output_fig)
h_d_mat = (tensor(sigmax(), sigmax()) + tensor(sigmay(), sigmay()) + tensor(sigmaz(), sigmaz())).full()
h_c_mat = [tensor(sigmax(), identity(2)).full(), tensor(sigmay(), identity(2)).full()]
bin_result = time_evolution(h_d_mat, h_c_mat, args.n_ts, args.evo_time, b_bin, X_0.full(), False, 1)
f = open(output_num, "w+")
print("computational time", c_time, file=f)
print("original objective", compute_obj_fid(X_targ, bin_result), file=f)
print("total tv norm", compute_TV_norm(b_bin), file=f)
f.close()
np.savetxt(output_control, b_bin, delimiter=',')
| [
"sys.path.append",
"qutip.sigmaz",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.savetxt",
"os.path.exists",
"qutip.sigmax",
"qutip.identity",
"qutip.sigmay",
"numpy.loadtxt",
"qutip.qip.operations.gates.cnot"
] | [((202, 226), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (217, 226), False, 'import sys\n'), ((260, 285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (283, 285), False, 'import argparse\n'), ((2011, 2022), 'qutip.identity', 'identity', (['(4)'], {}), '(4)\n', (2019, 2022), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((2066, 2072), 'qutip.qip.operations.gates.cnot', 'cnot', ([], {}), '()\n', (2070, 2072), False, 'from qutip.qip.operations.gates import cnot\n'), ((3598, 3645), 'numpy.loadtxt', 'np.loadtxt', (['args.initial_control'], {'delimiter': '""","""'}), "(args.initial_control, delimiter=',')\n", (3608, 3645), True, 'import numpy as np\n'), ((4596, 4644), 'numpy.savetxt', 'np.savetxt', (['output_control', 'b_bin'], {'delimiter': '""","""'}), "(output_control, b_bin, delimiter=',')\n", (4606, 4644), True, 'import numpy as np\n'), ((2128, 2165), 'os.path.exists', 'os.path.exists', (['"""../output/Rounding/"""'], {}), "('../output/Rounding/')\n", (2142, 2165), False, 'import os\n'), ((2172, 2206), 'os.makedirs', 'os.makedirs', (['"""../output/Rounding/"""'], {}), "('../output/Rounding/')\n", (2183, 2206), False, 'import os\n'), ((2215, 2253), 'os.path.exists', 'os.path.exists', (['"""../control/Rounding/"""'], {}), "('../control/Rounding/')\n", (2229, 2253), False, 'import os\n'), ((2260, 2295), 'os.makedirs', 'os.makedirs', (['"""../control/Rounding/"""'], {}), "('../control/Rounding/')\n", (2271, 2295), False, 'import os\n'), ((2304, 2341), 'os.path.exists', 'os.path.exists', (['"""../figure/Rounding/"""'], {}), "('../figure/Rounding/')\n", (2318, 2341), False, 'import os\n'), ((2348, 2382), 'os.makedirs', 'os.makedirs', (['"""../figure/Rounding/"""'], {}), "('../figure/Rounding/')\n", (2359, 2382), False, 'import os\n'), ((1872, 1880), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (1878, 1880), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1882, 1893), 'qutip.identity', 'identity', (['(2)'], {}), '(2)\n', (1890, 1893), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1834, 1842), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (1840, 1842), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1844, 1852), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (1850, 1852), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1910, 1918), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (1916, 1918), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1920, 1931), 'qutip.identity', 'identity', (['(2)'], {}), '(2)\n', (1928, 1931), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1942, 1950), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (1948, 1950), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1952, 1963), 'qutip.identity', 'identity', (['(2)'], {}), '(2)\n', (1960, 1963), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1776, 1784), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (1782, 1784), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1786, 1794), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (1792, 1794), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1805, 1813), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (1811, 1813), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((1815, 1823), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (1821, 1823), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4159, 4167), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (4165, 4167), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4169, 4177), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (4175, 4177), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4206, 4214), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (4212, 4214), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4216, 4227), 'qutip.identity', 'identity', (['(2)'], {}), '(2)\n', (4224, 4227), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4244, 4252), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (4250, 4252), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4254, 4265), 'qutip.identity', 'identity', (['(2)'], {}), '(2)\n', (4262, 4265), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4101, 4109), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (4107, 4109), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4111, 4119), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (4117, 4119), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4130, 4138), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (4136, 4138), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n'), ((4140, 4148), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (4146, 4148), False, 'from qutip import identity, sigmax, sigmaz, sigmay, tensor\n')] |
# This script is used to choose a mask.
import cv2
import numpy as np
import freenect
def get_video():
return freenect.sync_get_video()[0][:, :, ::-1] # RGB -> BGR
def nothing(x):
pass
# Creating a window for later use
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
# Creating track bar
cv2.createTrackbar('hmin', 'result',0,179,nothing)
cv2.createTrackbar('smin', 'result',0,255,nothing)
cv2.createTrackbar('vmin', 'result',0,255,nothing)
cv2.createTrackbar('hmax', 'result',179,179,nothing)
cv2.createTrackbar('smax', 'result',255,255,nothing)
cv2.createTrackbar('vmax', 'result',255,255,nothing)
while(1):
frame=frame=get_video()
#converting to HSV
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# get info from track bar and appy to result
hmin = cv2.getTrackbarPos('hmin','result')
smin = cv2.getTrackbarPos('smin','result')
vmin = cv2.getTrackbarPos('vmin','result')
hmax = cv2.getTrackbarPos('hmax','result')
smax = cv2.getTrackbarPos('smax','result')
vmax = cv2.getTrackbarPos('vmax','result')
# Normal masking algorithm
lower_blue = np.array([hmin,smin,vmin])
upper_blue = np.array([hmax,smax,vmax])
mask = cv2.inRange(hsv,lower_blue, upper_blue)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result',result)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| [
"cv2.createTrackbar",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"freenect.sync_get_video",
"numpy.array",
"cv2.getTrackbarPos",
"cv2.destroyAllWindows",
"cv2.inRange",
"cv2.namedWindow"
] | [((249, 274), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""'], {}), "('result')\n", (264, 274), False, 'import cv2\n'), ((377, 430), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""hmin"""', '"""result"""', '(0)', '(179)', 'nothing'], {}), "('hmin', 'result', 0, 179, nothing)\n", (395, 430), False, 'import cv2\n'), ((429, 482), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""smin"""', '"""result"""', '(0)', '(255)', 'nothing'], {}), "('smin', 'result', 0, 255, nothing)\n", (447, 482), False, 'import cv2\n'), ((481, 534), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""vmin"""', '"""result"""', '(0)', '(255)', 'nothing'], {}), "('vmin', 'result', 0, 255, nothing)\n", (499, 534), False, 'import cv2\n'), ((535, 590), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""hmax"""', '"""result"""', '(179)', '(179)', 'nothing'], {}), "('hmax', 'result', 179, 179, nothing)\n", (553, 590), False, 'import cv2\n'), ((589, 644), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""smax"""', '"""result"""', '(255)', '(255)', 'nothing'], {}), "('smax', 'result', 255, 255, nothing)\n", (607, 644), False, 'import cv2\n'), ((643, 698), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""vmax"""', '"""result"""', '(255)', '(255)', 'nothing'], {}), "('vmax', 'result', 255, 255, nothing)\n", (661, 698), False, 'import cv2\n'), ((1501, 1524), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1522, 1524), False, 'import cv2\n'), ((779, 817), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (791, 817), False, 'import cv2\n'), ((881, 917), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""hmin"""', '"""result"""'], {}), "('hmin', 'result')\n", (899, 917), False, 'import cv2\n'), ((929, 965), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""smin"""', '"""result"""'], {}), "('smin', 'result')\n", (947, 965), False, 'import cv2\n'), ((977, 1013), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""vmin"""', '"""result"""'], {}), "('vmin', 'result')\n", (995, 1013), False, 'import cv2\n'), ((1027, 1063), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""hmax"""', '"""result"""'], {}), "('hmax', 'result')\n", (1045, 1063), False, 'import cv2\n'), ((1075, 1111), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""smax"""', '"""result"""'], {}), "('smax', 'result')\n", (1093, 1111), False, 'import cv2\n'), ((1123, 1159), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""vmax"""', '"""result"""'], {}), "('vmax', 'result')\n", (1141, 1159), False, 'import cv2\n'), ((1211, 1239), 'numpy.array', 'np.array', (['[hmin, smin, vmin]'], {}), '([hmin, smin, vmin])\n', (1219, 1239), True, 'import numpy as np\n'), ((1256, 1284), 'numpy.array', 'np.array', (['[hmax, smax, vmax]'], {}), '([hmax, smax, vmax])\n', (1264, 1284), True, 'import numpy as np\n'), ((1297, 1337), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (1308, 1337), False, 'import cv2\n'), ((1353, 1393), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (1368, 1393), False, 'import cv2\n'), ((1401, 1429), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (1411, 1429), False, 'import cv2\n'), ((1440, 1454), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (1451, 1454), False, 'import cv2\n'), ((123, 148), 'freenect.sync_get_video', 'freenect.sync_get_video', ([], {}), '()\n', (146, 148), False, 'import freenect\n')] |
"""
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import theano
import numpy as np
def Gregs_Regularizer(cnn, num_classes):
print("Experimental Gregs_Regularizer:: enabled")
# assert bExperimental_Semanic_Hashing_enforcer==0, "dont use both!"
assert num_classes is not None, "You need to provide a value for <num_classes> when using 'Gregs_Regularizer' in CompileOutputFunctions()"
reg = 0
for lay in cnn.layers[:-1]:
reg += lay.Gregs_Regularizer(cnn.y, num_classes) #currently only exists for MLPs
return reg
def add_balanced_regularizers_to_NLL(cnn, regularizers=[], relative_weightings=[]):
"""
pass all regularizers as elements of a list, each of them as a symbolic theano variable.
Those will be added to the cnn/nn's NLL with relative magnitudes as specified in <relative_weightings>.
You may _almost_ forget to worry about them from now on (but remember to call cnn.Gregs_Regularizer_balance() every x weight-update steps [x may be in the order of 10 or 100 ])
"""
assert isinstance(regularizers,[list,tuple])
assert len(regularizers)==len(relative_weightings), "Nope, try again."
print("Experimental Gregs_Regularizer_balance:: enabled [You need to call CNN.Gregs_Regularizer_balance(_data_, _labels_, ...) initially and then also from time to time, to update the internal factor(s)!]")
cnn.Gregs_Regularizer_balance__current_factors = [theano.shared(np.float32(1e-5)) for x in relative_weightings]
cnn.Gregs_Regularizer_balance__relative_weightings = relative_weightings
cnn._Gregs_Regularizer_balancer_loss_getter = theano.function([cnn.x, cnn.y],[cnn.output_layer_Loss]+regularizers)
print("todo: make Gregs_Regularizer_balancer() smarter: add a call-counter and determine how often it actually needs to update the values! (save computational time, especially if it is unnecessarily called after every update step)")
print("todo: make this a stand-alone function, e.g. 'add_regularizer_balanced()'")
def Gregs_Regularizer_balancer(*args):
ret = cnn._Gregs_Regularizer_balancer_loss_getter(*args)
original_nll_, reg_losses = ret[0], ret[1:]
# print "Gregs_Regularizer_balancer():: NLL:",original_nll_,"Regularizer is:",reg_losses
for theano_fact, rel_wei in zip(cnn.Gregs_Regularizer_balance__current_factors, cnn.Gregs_Regularizer_balance__relative_weightings):
newv = rel_wei * (abs(original_nll_)/(abs(reg_losses)+1e-11))
newv = 0.8*cnn.Gregs_Regularizer_balance__current_factors.get_value() + 0.2 * newv
cnn.Gregs_Regularizer_balance__current_factors.set_value(np.float32(newv))
# print "new value for <cnn.Gregs_Regularizer_balance__current_factors> =",newv
cnn.Gregs_Regularizer_balance = Gregs_Regularizer_balancer
cnn.output_layer_Loss += cnn.Gregs_Regularizer_balance__current_factors * reg
| [
"numpy.float32",
"theano.function"
] | [((3087, 3158), 'theano.function', 'theano.function', (['[cnn.x, cnn.y]', '([cnn.output_layer_Loss] + regularizers)'], {}), '([cnn.x, cnn.y], [cnn.output_layer_Loss] + regularizers)\n', (3102, 3158), False, 'import theano\n'), ((2905, 2922), 'numpy.float32', 'np.float32', (['(1e-05)'], {}), '(1e-05)\n', (2915, 2922), True, 'import numpy as np\n'), ((4115, 4131), 'numpy.float32', 'np.float32', (['newv'], {}), '(newv)\n', (4125, 4131), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: dev/04_data_augmentation.ipynb (unless otherwise specified).
__all__ = ['Rescale', 'RandomCrop', 'ToTensor', 'Normalize', 'RandomHorizontalFlip', 'AddRandomBrightness']
# Cell
import numpy as np
import matplotlib.pyplot as plt
import cv2
from torchvision import transforms
from skimage import io, transform
from .dataset_ucf101 import UCF101
from .avi import AVI
# Cell
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
# for some reason the cv2.resize call fails when used in a DataLoader
return {'image': img, 'label': label}
# Cell
class RandomCrop(object):
"""Crop randomly the image in a sample.
NOTE: Output does not keep the same aspect ratio.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, out_ratio=0.7, p_crop=0.5):
self.out_ratio = out_ratio
self.p_crop = p_crop
def __call__(self, sample):
image, label = sample['image'], sample['label']
curr_h, curr_w = image.shape[:2]
resize_factor_w = (1-self.out_ratio)*np.random.rand()+self.out_ratio
resize_factor_h = (1-self.out_ratio)*np.random.rand()+self.out_ratio
w1 = int(curr_w*resize_factor_w)
h1 = int(curr_h*resize_factor_h)
w = np.random.randint(curr_w-w1)
h = np.random.randint(curr_h-h1)
# crop
if np.random.uniform() <= self.p_crop:
image = image[h:(h+h1),w:(w+w1),:]
return {'image': image, 'label': label}
# Cell
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
if type(label) != np.ndarray:
label = np.ndarray(label)
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image).float(),
'label': torch.from_numpy(label)}
# Cell
class Normalize(object):
"""Normalises an image.
Args:
mean and std values. Deafult values based on the default pytorch
pretrained ResNet-50 model.
old values:
- mean: np.asarray([0.485, 0.456, 0.406]
- std : np.asarray([0.229, 0.224, 0.225]
"""
def __init__(self, mean = np.asarray([0.433, 0.4045, 0.3776], np.float32),
std = np.asarray([0.1519876, 0.14855877, 0.156976], np.float32)):
self.mean = mean
self.std = std
def __call__(self, sample):
image, label = sample['image'], sample['label']
## resnet model was trained on images with mean subtracted
image = image/255.0
return {'image': (image - self.mean)/self.std,
'label': label}
# Cell
class RandomHorizontalFlip(object):
"""Randomly horizontally flip the image in a sample.
Args: prob of flipping the sample
"""
def __init__(self, p_flip = .5):
self.p = p_flip
def __call__(self, sample):
image, label = sample['image'], sample['label']
if np.random.uniform() <= self.p:
image = cv2.flip(image, 1) # one is horizontally
return {'image': image, 'label': label}
# Cell
class AddRandomBrightness(object):
"""Randomly add brightness between a minimum and maximum value.
Saturates if needed. """
def __init__(self, b_offset = 15):
self.brightness = b_offset*2 # brightness +/- b_offset
def __call__(self, sample):
image, label = sample['image'], sample['label']
image += np.random.randint(self.brightness+1) - self.brightness/2.0
#saturates:
image[image>255] = 255.0
image[image<0] = 0.0
return {'image': image, 'label': label} | [
"numpy.random.uniform",
"numpy.asarray",
"numpy.random.randint",
"skimage.transform.resize",
"numpy.random.rand",
"cv2.flip",
"numpy.ndarray"
] | [((1334, 1373), 'skimage.transform.resize', 'transform.resize', (['image', '(new_h, new_w)'], {}), '(image, (new_h, new_w))\n', (1350, 1373), False, 'from skimage import io, transform\n'), ((2244, 2274), 'numpy.random.randint', 'np.random.randint', (['(curr_w - w1)'], {}), '(curr_w - w1)\n', (2261, 2274), True, 'import numpy as np\n'), ((2285, 2315), 'numpy.random.randint', 'np.random.randint', (['(curr_h - h1)'], {}), '(curr_h - h1)\n', (2302, 2315), True, 'import numpy as np\n'), ((3311, 3358), 'numpy.asarray', 'np.asarray', (['[0.433, 0.4045, 0.3776]', 'np.float32'], {}), '([0.433, 0.4045, 0.3776], np.float32)\n', (3321, 3358), True, 'import numpy as np\n'), ((3390, 3447), 'numpy.asarray', 'np.asarray', (['[0.1519876, 0.14855877, 0.156976]', 'np.float32'], {}), '([0.1519876, 0.14855877, 0.156976], np.float32)\n', (3400, 3447), True, 'import numpy as np\n'), ((2341, 2360), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2358, 2360), True, 'import numpy as np\n'), ((2702, 2719), 'numpy.ndarray', 'np.ndarray', (['label'], {}), '(label)\n', (2712, 2719), True, 'import numpy as np\n'), ((4080, 4099), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4097, 4099), True, 'import numpy as np\n'), ((4131, 4149), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (4139, 4149), False, 'import cv2\n'), ((4574, 4612), 'numpy.random.randint', 'np.random.randint', (['(self.brightness + 1)'], {}), '(self.brightness + 1)\n', (4591, 4612), True, 'import numpy as np\n'), ((2041, 2057), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2055, 2057), True, 'import numpy as np\n'), ((2118, 2134), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2132, 2134), True, 'import numpy as np\n')] |
# <NAME>
# email: <EMAIL>
import numpy as np
class PCA:
"""
Principal component analysis (PCA) is an unsupervised linear
technique for dimensionality reduction on a data set. PCA computes
the principal directions that maximize the variance of the projected data,
and can compute the principal components by projecting the data onto them.
This gives us a reduced dimension of the data, while preserving as much of
the variance as possible.
- fit(X): fits the model on the data matrix X
- transform(X, target_d): transforms X to lower dimension target_d
- fit_transform(X, target_d): Performs fit and transform
"""
def __init__(self, svd=True):
"""
Constructor for Principal Component Analysis.
:param svd: whether to use SVD to compute PCA or not
"""
self.svd = svd
self.covariance = None # covariance matrix of data fitted, (d, d)
self.eigenvectors = None # eigenvectors of the covariance matrix
self.eigenvalues = None # eigenvalues of the covariance matrix
self.mean = 0 # mean of data fitted, shape: (d,)
self.Vh = None # unitary matrix from svd that is transposed, shape: (d, d)
self.S = None # rectangular diagonal matrix from svd, shape: (d,)
self.U = None # unitary matrix from svd, shape: (n, n)
def fit(self, X):
"""
Fits the model to X by computing the
SVD of X or by computing the eigenvectors of its
covariance matrix.
:param X: the data matrix, shape: (n, d)
"""
self.mean = np.mean(X, axis=0)
# demean
X_demean = X - self.mean
if self.svd: # note that SVD is way faster at computing PCA!
# computing our singular value decomposition
self.U, self.S, self.Vh = np.linalg.svd(X_demean, full_matrices=True)
else: # non-SVD approach, awfully slow
# compute covariance matrix
self.covariance = np.cov(X_demean.T)
# compute eigenvectors and eigenvalues of covariance
self.eigenvalues, self.eigenvectors = np.linalg.eig(self.covariance)
def transform(self, X, target_d):
"""
Performs PCA dimensionality reduction X to a lower dimension.
:param X: the data matrix, shape: (n, d)
:param target_d: the lower dimension in which we reduce to
:return: a matrix, shape: (n, target_d)
"""
X_transformed = None
X_demean = X - self.mean
if self.svd:
# find the indices of the singular values in descending order
# also truncating to get the target dimension target_d
idx = self.S.argsort()[::-1][:target_d]
# sort the rows of Vh, then project X onto Vh.T
X_transformed = X_demean.dot(self.Vh[idx].T)
else:
# sort eigenvectors by their eigenvalues in descending order for pc's
idx = self.eigenvalues.argsort()[::-1]
self.eigenvalues = self.eigenvalues[idx]
self.eigenvectors = self.eigenvectors[:,idx]
# projection
X_transformed = X_demean.dot(self.eigenvectors[:,:target_d])
return X_transformed
def fit_transform(self, X, target_d):
"""
Performs fit and transform in one routine.
:param X: the data matrix, shape: (n, d)
:param target_d: the lower dimension in which we reduce to
:return: a matrix, shape: (n, target_d)
"""
X_transformed = None
if self.svd: # svd is much faster
self.fit(X)
# sort the indices by the descending order of the singular values
idx = self.S.argsort()[::-1][:target_d]
# Notice that this is not necessarily redundant code!
# This is a different approach than svd in the transform function!
S_ = np.diag(self.S[idx])[:target_d,:target_d]
U_ = self.U[:,idx]
X_transformed = U_.dot(S_)
else: # terribly slow approach
self.fit(X)
X_transformed = self.transform(X, target_d)
return X_transformed
| [
"numpy.linalg.eig",
"numpy.linalg.svd",
"numpy.mean",
"numpy.diag",
"numpy.cov"
] | [((1602, 1620), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1609, 1620), True, 'import numpy as np\n'), ((1835, 1878), 'numpy.linalg.svd', 'np.linalg.svd', (['X_demean'], {'full_matrices': '(True)'}), '(X_demean, full_matrices=True)\n', (1848, 1878), True, 'import numpy as np\n'), ((1996, 2014), 'numpy.cov', 'np.cov', (['X_demean.T'], {}), '(X_demean.T)\n', (2002, 2014), True, 'import numpy as np\n'), ((2130, 2160), 'numpy.linalg.eig', 'np.linalg.eig', (['self.covariance'], {}), '(self.covariance)\n', (2143, 2160), True, 'import numpy as np\n'), ((3915, 3935), 'numpy.diag', 'np.diag', (['self.S[idx]'], {}), '(self.S[idx])\n', (3922, 3935), True, 'import numpy as np\n')] |
import pickle
import cv2
import numpy as np
import torch
from bullet_envs.utils import PY_MUJOCO, env_with_distractor
from SRL4RL import user_path
from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy
from SRL4RL.utils.utils import createFolder
CWH2WHC = lambda x: x.transpose(1, 2, 0)
NCWH2WHC = lambda x: x.transpose(0, 2, 3, 1)[0]
tensor2image = lambda x: NCWH2WHC(pytorch2numpy(x))
def giveEnv_name(config):
param_env = ""
param_env += "fpv " if config["fpv"] else ""
param_env += "wallDistractor " if config["wallDistractor"] else ""
param_env += "withDistractor " if config["distractor"] else ""
param_env += config["noise_type"] + " " if config["noise_type"] != "none" else ""
param_env += (
"flickering-{} ".format(config["flickering"])
if config["flickering"] > 0
else ""
)
if "randomExplor" in config:
randomExplor = config["randomExplor"]
else:
randomExplor = True
param_env += "randomExplor " if randomExplor else ""
return param_env[:-1]
def assert_args_envs(args):
if args.env_name != "TurtlebotMazeEnv-v0":
assert not args.wallDistractor, "wallDistractor with not TurtlebotMazeEnv"
if args.distractor:
assert args.env_name in env_with_distractor, "distractor not implemented"
def update_args_envs(args):
if "Turtlebot" in args.env_name:
args.fpv = True
if "Turtlebot" in args.env_name:
args.bumpDetection = True
else:
args.bumpDetection = False
if args.env_name in PY_MUJOCO:
if "with_reset" in args.__dict__:
args.with_reset = True
args.actionRepeat = 4
if args.env_name in ["HopperBulletEnv-v0", "Walker2DBulletEnv-v0"]:
args.actionRepeat = 2
elif args.env_name in ["ReacherBulletEnv-v0"]:
args.actionRepeat = 1
else:
args.actionRepeat = 1
args.color = True
args.new_env_name = args.env_name[:-3] if "-v" in args.env_name else args.env_name
args.image_size = 64
if "n_stack" not in args.__dict__:
args.n_stack = 1
# TODO: uncomment below line
args.n_stack = 3 if args.actionRepeat > 1 else 1
# TODO: comment below line
# if args.actionRepeat == 1 and args.method!= 'XSRL': args.n_stack = 3
return args
def reset_stack(obs, config):
if config["n_stack"] > 1:
nc = 3 if config["color"] else 1
shape = list(obs.shape)
if len(shape) > 3:
shape[1] *= config["n_stack"]
observation_stack = np.zeros((shape), np.float32)
for step_rep in range(config["n_stack"]):
observation_stack[:, step_rep * nc : (step_rep + 1) * nc] = obs
elif len(shape) == 3:
shape[0] *= config["n_stack"]
observation_stack = np.zeros((shape), np.float32)
for step_rep in range(config["n_stack"]):
observation_stack[step_rep * nc : (step_rep + 1) * nc] = obs
return observation_stack
def render_env(env, image_size, fpv, camera_id, color=True, downscaling=True):
image = env.render(
mode="rgb_array",
image_size=image_size,
color=color,
fpv=fpv,
downscaling=downscaling,
camera_id=camera_id,
)
return image
def renderPybullet(envs, config, tensor=True):
"""Provides as much images as envs"""
if type(envs) is list:
obs = [
env_.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
for env_ in envs
]
obs = np.array(obs).transpose(0, 3, 1, 2) / 255.0
else:
obs = envs.render(
mode="rgb_array",
image_size=config["image_size"],
color=config["color"],
fpv=config["fpv"],
camera_id=0,
)
obs = obs.transpose(2, 0, 1) / 255.0
if tensor:
obs = obs[None]
return obs
def update_video(
env=None,
im=None,
step=0,
color=True,
camera_id=0,
video_size=588,
video=None,
save_images=False,
fpv=None,
save_dir="",
concatIM=None,
downscaling=True,
):
if save_dir == "":
save_dir = user_path + "Downloads/" + env.__class__.__name__ + "/"
if save_images:
createFolder(save_dir, "")
if im is None:
im = env.render(
mode="rgb_array",
image_size=video_size,
color=color,
camera_id=camera_id,
fpv=fpv,
downscaling=downscaling,
)
assert im.shape[0] == video_size, "im.shape[0] is not in good size"
else:
if im.shape[0] != video_size:
im = im.astype(np.uint8)
im = cv2.resize(
im, dsize=(video_size, video_size), interpolation=cv2.INTER_CUBIC
)
im = im[:, :, ::-1].astype(np.uint8) if color else im.astype(np.uint8)
if type(concatIM) is np.ndarray:
"concatIM is between 0 and 255"
concatIM = concatIM.astype(np.uint8)
concatIM = cv2.resize(
concatIM, dsize=(video_size, video_size), interpolation=cv2.INTER_CUBIC
)
concatIM = (
concatIM[:, :, ::-1].astype(np.uint8)
if color
else concatIM.astype(np.uint8)
)
new_im = np.hstack([im, concatIM])
else:
new_im = im
if save_images:
cv2.imwrite(save_dir + "ob_%05d" % (step) + ".png", new_im)
if video is not None:
video.write(new_im)
def load_cifar10(file):
"""load the cifar-10 data"""
with open(file, "rb") as fo:
data_dict = pickle.load(fo, encoding="bytes")
im = data_dict[b"data"].reshape(-1, 3, 32, 32)
im = im.transpose(0, 3, 2, 1)
return im
def cutout(img, n_holes, length):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
mask = np.ones((img.shape), np.float32)
if len(img.shape) > 4:
h = img.shape[-2]
w = img.shape[-1]
n_obs = img.shape[:2]
elif len(img.shape) == 4:
h = img.shape[-2]
w = img.shape[-1]
if img.shape[0] > 1:
n_obs = img.shape[0]
else:
n_obs = None
else:
assert img.shape[-1] in [1, 3]
h = img.shape[0]
w = img.shape[1]
n_obs = None
for _ in range(n_holes):
y = np.random.randint(h, size=(n_obs))
x = np.random.randint(w, size=(n_obs))
y1 = np.clip(y - length // 2, 0, h)
y2 = np.clip(y + length // 2, 0, h)
x1 = np.clip(x - length // 2, 0, w)
x2 = np.clip(x + length // 2, 0, w)
if len(img.shape) == 4:
if img.shape[0] > 1:
for j in range(n_obs):
mask[j, :, y1[j] : y2[j], x1[j] : x2[j]] = 0.0
else:
mask[0, :, y1:y2, x1:x2] = 0.0
elif len(img.shape) > 4:
for ep in range(n_obs[0]):
for j in range(n_obs[1]):
mask[ep, j, :, y1[ep, j] : y2[ep, j], x1[ep, j] : x2[ep, j]] = 0.0
elif n_obs is None:
mask[y1:y2, x1:x2] = 0.0
img = img * mask
return img
def add_noise(x, noise_adder, config):
if config["with_noise"] or config["flickering"] > 0.0:
is_tensor = False
if torch.is_tensor(x):
is_tensor = True
device = x.device
x = pytorch2numpy(x)
if config["with_noise"]:
out = noise_adder(observation=x)
else:
out = x
if config["flickering"] > 0.0:
if len(x.shape) > 3:
flickerings = (
np.random.uniform(0, 1, size=(out.shape[0])) < config["flickering"]
)
out[flickerings] = out[flickerings] * 0
else:
if np.random.uniform(0, 1) < config["flickering"]:
out = out * 0
if is_tensor:
out = numpy2pytorch(out, differentiable=False, device=device)
else:
out = x
return out
| [
"SRL4RL.utils.nn_torch.numpy2pytorch",
"numpy.random.uniform",
"SRL4RL.utils.nn_torch.pytorch2numpy",
"cv2.imwrite",
"SRL4RL.utils.utils.createFolder",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.hstack",
"pickle.load",
"numpy.random.randint",
"numpy.array",
"torch.is_tensor",
"cv2.r... | [((6287, 6317), 'numpy.ones', 'np.ones', (['img.shape', 'np.float32'], {}), '(img.shape, np.float32)\n', (6294, 6317), True, 'import numpy as np\n'), ((380, 396), 'SRL4RL.utils.nn_torch.pytorch2numpy', 'pytorch2numpy', (['x'], {}), '(x)\n', (393, 396), False, 'from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy\n'), ((5214, 5302), 'cv2.resize', 'cv2.resize', (['concatIM'], {'dsize': '(video_size, video_size)', 'interpolation': 'cv2.INTER_CUBIC'}), '(concatIM, dsize=(video_size, video_size), interpolation=cv2.\n INTER_CUBIC)\n', (5224, 5302), False, 'import cv2\n'), ((5482, 5507), 'numpy.hstack', 'np.hstack', (['[im, concatIM]'], {}), '([im, concatIM])\n', (5491, 5507), True, 'import numpy as np\n'), ((5567, 5624), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + 'ob_%05d' % step + '.png')", 'new_im'], {}), "(save_dir + 'ob_%05d' % step + '.png', new_im)\n", (5578, 5624), False, 'import cv2\n'), ((5793, 5826), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (5804, 5826), False, 'import pickle\n'), ((6774, 6806), 'numpy.random.randint', 'np.random.randint', (['h'], {'size': 'n_obs'}), '(h, size=n_obs)\n', (6791, 6806), True, 'import numpy as np\n'), ((6821, 6853), 'numpy.random.randint', 'np.random.randint', (['w'], {'size': 'n_obs'}), '(w, size=n_obs)\n', (6838, 6853), True, 'import numpy as np\n'), ((6870, 6900), 'numpy.clip', 'np.clip', (['(y - length // 2)', '(0)', 'h'], {}), '(y - length // 2, 0, h)\n', (6877, 6900), True, 'import numpy as np\n'), ((6914, 6944), 'numpy.clip', 'np.clip', (['(y + length // 2)', '(0)', 'h'], {}), '(y + length // 2, 0, h)\n', (6921, 6944), True, 'import numpy as np\n'), ((6958, 6988), 'numpy.clip', 'np.clip', (['(x - length // 2)', '(0)', 'w'], {}), '(x - length // 2, 0, w)\n', (6965, 6988), True, 'import numpy as np\n'), ((7002, 7032), 'numpy.clip', 'np.clip', (['(x + length // 2)', '(0)', 'w'], {}), '(x + length // 2, 0, w)\n', (7009, 7032), True, 'import numpy as np\n'), ((7711, 7729), 'torch.is_tensor', 'torch.is_tensor', (['x'], {}), '(x)\n', (7726, 7729), False, 'import torch\n'), ((2567, 2594), 'numpy.zeros', 'np.zeros', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (2575, 2594), True, 'import numpy as np\n'), ((4450, 4476), 'SRL4RL.utils.utils.createFolder', 'createFolder', (['save_dir', '""""""'], {}), "(save_dir, '')\n", (4462, 4476), False, 'from SRL4RL.utils.utils import createFolder\n'), ((4890, 4967), 'cv2.resize', 'cv2.resize', (['im'], {'dsize': '(video_size, video_size)', 'interpolation': 'cv2.INTER_CUBIC'}), '(im, dsize=(video_size, video_size), interpolation=cv2.INTER_CUBIC)\n', (4900, 4967), False, 'import cv2\n'), ((7806, 7822), 'SRL4RL.utils.nn_torch.pytorch2numpy', 'pytorch2numpy', (['x'], {}), '(x)\n', (7819, 7822), False, 'from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy\n'), ((8360, 8415), 'SRL4RL.utils.nn_torch.numpy2pytorch', 'numpy2pytorch', (['out'], {'differentiable': '(False)', 'device': 'device'}), '(out, differentiable=False, device=device)\n', (8373, 8415), False, 'from SRL4RL.utils.nn_torch import numpy2pytorch, pytorch2numpy\n'), ((2835, 2862), 'numpy.zeros', 'np.zeros', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (2843, 2862), True, 'import numpy as np\n'), ((3726, 3739), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (3734, 3739), True, 'import numpy as np\n'), ((8059, 8101), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'out.shape[0]'}), '(0, 1, size=out.shape[0])\n', (8076, 8101), True, 'import numpy as np\n'), ((8238, 8261), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (8255, 8261), True, 'import numpy as np\n')] |
from typing import Dict, List
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def sns_displot(d, label, **kwds):
sns.distplot(d, hist=True, kde=False,
bins=int(180/5),
# color = 'darkblue',
label=label,
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 2}, **kwds)
def distplot(path, data, labels=None, **kwds):
if isinstance(data, (List, np.ndarray)):
data = np.array(data)
if len(data.shape) > 1:
data = data.reshape((data.shape[0], -1))
for i, d in enumerate(data):
sns_displot(d, None if labels is None else labels[i], **kwds)
else:
sns_displot(data, None if labels is None else labels, **kwds)
if isinstance(data, Dict):
for k, v in data.items():
sns_displot(v, k, **kwds)
plt.legend()
plt.savefig(path, bbox_inches="tight")
plt.close() | [
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.savefig"
] | [((867, 879), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (877, 879), True, 'import matplotlib.pyplot as plt\n'), ((884, 922), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (895, 922), True, 'import matplotlib.pyplot as plt\n'), ((927, 938), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (936, 938), True, 'import matplotlib.pyplot as plt\n'), ((451, 465), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (459, 465), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import os
import torch
import torch.nn.functional as F
from torch_geometric.data import DataLoader
from torch_geometric.transforms import OneHotDegree
import argparse
import numpy as np
import time
import yaml
from models.model_cycles import SMP
from models.gin import GIN
from datasets_generation.build_cycles import FourCyclesDataset
from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix
from models import ppgn
from models.ring_gnn import RingGNN
from easydict import EasyDict as edict
# Change the following to point to the the folder where the datasets are stored
if os.path.isdir('/datasets2/'):
rootdir = '/datasets2/CYCLE_DETECTION/'
else:
rootdir = './data/datasets_kcycle_nsamples=10000/'
yaml_file = './config_cycles.yaml'
# yaml_file = './benchmark/kernel/config4cycles.yaml'
torch.manual_seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--k', type=int, default=4,
help="Length of the cycles to detect")
parser.add_argument('--n', type=int, help='Average number of nodes in the graphs')
parser.add_argument('--save-model', action='store_true',
help='Save the model once training is done')
parser.add_argument('--wandb', action='store_true',
help="Use weights and biases library")
parser.add_argument('--gpu', type=int, help='Id of gpu device. By default use cpu')
parser.add_argument('--lr', type=float, default=0.001, help="Initial learning rate")
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--weight-decay', type=float, default=1e-4)
parser.add_argument('--clip', type=float, default=10, help="Gradient clipping")
parser.add_argument('--name', type=str, help="Name for weights and biases")
parser.add_argument('--proportion', type=float, default=1.0,
help='Proportion of the training data that is kept')
parser.add_argument('--generalization', action='store_true',
help='Evaluate out of distribution accuracy')
args = parser.parse_args()
# Log parameters
test_every_epoch = 5
print_every_epoch = 1
log_interval = 20
# Store maximum number of nodes for each pair (k, n) -- this value is used by provably powerful graph networks
max_num_nodes = {4: {12: 12, 20: 20, 28: 28, 36: 36},
6: {20: 25, 31: 38, 42: 52, 56: 65},
8: {28: 38, 50: 56, 66: 76, 72: 90}}
# Store the maximum degree for the one-hot encoding
max_degree = {4: {12: 4, 20: 6, 28: 7, 36: 7},
6: {20: 4, 31: 6, 42: 8, 56: 7},
8: {28: 4, 50: 6, 66: 7, 72: 8}}
# Store the values of n to use for generalization experiments
n_gener = {4: {'train': 20, 'val': 28, 'test': 36},
6: {'train': 31, 'val': 42, 'test': 56},
8: {'train': 50, 'val': 66, 'test': 72}}
# Handle the device
use_cuda = args.gpu is not None and torch.cuda.is_available()
if use_cuda:
device = torch.device("cuda:" + str(args.gpu))
torch.cuda.set_device(args.gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
else:
device = "cpu"
args.device = device
args.kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
print('Device used:', device)
# Load the config file of the model
with open(yaml_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['map_x_to_u'] = False # Not used here
config = edict(config)
print(config)
model_name = config['model_name']
config.pop('model_name')
if model_name == 'SMP':
model_name = config['layer_type']
if args.name is None:
if model_name != 'GIN':
args.name = model_name
else:
if config.relational_pooling > 0:
args.name = 'RP'
elif config.one_hot:
args.name = 'OneHotDeg'
elif config.identifiers:
args.name = 'OneHotNod'
elif config.random:
args.name = 'Random'
else:
args.name = 'GIN'
args.name = args.name + '_' + str(args.k)
if args.n is not None:
args.name = args.name + '_' + str(args.n)
# Create a folder for the saved models
if not os.path.isdir('./saved_models/' + args.name) and args.generalization:
os.mkdir('./saved_models/' + args.name)
if args.name:
args.wandb = True
if args.wandb:
import wandb
wandb.init(project="smp", config=config, name=args.name)
wandb.config.update(args)
if args.n is None:
args.n = n_gener[args.k]['train']
if config.num_layers == -1:
config.num_layers = args.k
def train(epoch):
""" Train for one epoch. """
model.train()
lr_scheduler(args.lr, epoch, optimizer)
loss_all = 0
if not config.relational_pooling:
for batch_idx, data in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, data.y)
loss.backward()
loss_all += loss.item() * data.num_graphs
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
return loss_all / len(train_loader.dataset)
else:
# For relational pooling, we sample several permutations of each graph
for batch_idx, data in enumerate(train_loader):
for repetition in range(config.relational_pooling):
for i in range(args.batch_size):
n_nodes = int(torch.sum(data.batch == i).item())
p = torch.randperm(n_nodes)
data.x[data.batch == i, :n_nodes] = data.x[data.batch == i, :n_nodes][p, :][:, p]
data = data.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, data.y)
loss.backward()
loss_all += loss.item() * data.num_graphs
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
return loss_all / len(train_loader.dataset)
def test(loader):
model.eval()
correct = 0
for data in loader:
data = data.to(device)
output = model(data)
pred = output.max(dim=1)[1]
correct += pred.eq(data.y).sum().item()
return correct / len(loader.dataset)
def lr_scheduler(lr, epoch, optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (0.995 ** (epoch / 5))
# Define the transform to use in the dataset
transform=None
if 'GIN' or 'RP' in model_name:
if config.one_hot:
# Cannot always be used in an inductive setting,
# because the maximal degree might be bigger than during training
degree = max_degree[args.k][args.n]
transform = OneHotDegree(degree, cat=False)
config.num_input_features = degree + 1
elif config.identifiers:
# Cannot be used in an inductive setting
transform = EyeTransform(max_num_nodes[args.k][args.n])
config.num_input_features = max_num_nodes[args.k][args.n]
elif config.random:
# Can be used in an inductive setting
transform = RandomId()
transform_val = RandomId()
transform_test = RandomId()
config.num_input_features = 1
if transform is None:
transform_val = None
transform_test = None
config.num_input_features = 1
if 'SMP' in model_name:
config.use_batch_norm = args.k > 6 or args.n > 30
model = SMP(config.num_input_features, config.num_classes, config.num_layers, config.hidden, config.layer_type,
config.hidden_final, config.dropout_prob, config.use_batch_norm, config.use_x, config.map_x_to_u,
config.num_towers, config.simplified).to(device)
elif model_name == 'PPGN':
transform = DenseAdjMatrix(max_num_nodes[args.k][args.n])
transform_val = DenseAdjMatrix(max_num_nodes[args.k][n_gener[args.k]['val']])
transform_test = DenseAdjMatrix(max_num_nodes[args.k][n_gener[args.k]['test']])
model = ppgn.Powerful(config.num_classes, config.num_layers, config.hidden,
config.hidden_final, config.dropout_prob, config.simplified)
elif model_name == 'GIN':
config.use_batch_norm = args.k > 6 or args.n > 50
model = GIN(config.num_input_features, config.num_classes, config.num_layers,
config.hidden, config.hidden_final, config.dropout_prob, config.use_batch_norm)
elif model_name == 'RING_GNN':
transform = DenseAdjMatrix(max_num_nodes[args.k][args.n])
transform_val = DenseAdjMatrix(max_num_nodes[args.k][n_gener[args.k]['val']])
transform_test = DenseAdjMatrix(max_num_nodes[args.k][n_gener[args.k]['test']])
model = RingGNN(config.num_classes, config.num_layers, config.hidden, config.hidden_final, config.dropout_prob,
config.simplified)
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.5, weight_decay=args.weight_decay)
# Load the data
print("Transform used:", transform)
batch_size = args.batch_size
if args.generalization:
train_data = FourCyclesDataset(args.k, n_gener[args.k]['train'], rootdir, train=True, transform=transform)
test_data = FourCyclesDataset(args.k, n_gener[args.k]['train'], rootdir, train=False, transform=transform)
gener_data_val = FourCyclesDataset(args.k, n_gener[args.k]['val'], rootdir, train=False, transform=transform_val)
train_loader = DataLoader(train_data, batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size, shuffle=False)
gener_val_loader = DataLoader(gener_data_val, batch_size, shuffle=False)
else:
train_data = FourCyclesDataset(args.k, args.n, rootdir, proportion=args.proportion, train=True, transform=transform)
test_data = FourCyclesDataset(args.k, args.n, rootdir, proportion=args.proportion, train=False, transform=transform)
train_loader = DataLoader(train_data, batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size, shuffle=False)
print("Starting to train")
start = time.time()
best_epoch = -1
best_generalization_acc = 0
for epoch in range(args.epochs):
epoch_start = time.time()
tr_loss = train(epoch)
if epoch % print_every_epoch == 0:
acc_train = test(train_loader)
current_lr = optimizer.param_groups[0]["lr"]
duration = time.time() - epoch_start
print(f'Time:{duration:2.2f} | {epoch:5d} | Loss: {tr_loss:2.5f} | Train Acc: {acc_train:2.5f} | LR: {current_lr:.6f}')
if epoch % test_every_epoch == 0:
acc_test = test(test_loader)
print(f'Test accuracy: {acc_test:2.5f}')
if args.generalization:
acc_generalization = test(gener_val_loader)
print("Validation generalization accuracy", acc_generalization)
if args.wandb:
wandb.log({"Epoch": epoch, "Duration": duration, "Train loss": tr_loss, "train accuracy": acc_train,
"Test acc": acc_test, 'Gene eval': acc_generalization})
if acc_generalization > best_generalization_acc:
print(f"New best generalization error + accuracy > 90% at epoch {epoch}")
# Remove existing models
folder = f'./saved_models/{args.name}/'
files_in_folder = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]
for file in files_in_folder:
try:
os.remove(folder + file)
except:
print("Could not remove file", file)
# Save new model
torch.save(model, f'./saved_models/{args.name}/epoch{epoch}.pkl')
print(f"Model saved at epoch {epoch}.")
best_epoch = epoch
else:
if args.wandb:
wandb.log({"Epoch": epoch, "Duration": duration, "Train loss": tr_loss, "train accuracy": acc_train,
"Test acc": acc_test})
else:
if args.wandb:
wandb.log({"Epoch": epoch, "Duration": duration, "Train loss": tr_loss, "train accuracy": acc_train})
cur_lr = optimizer.param_groups[0]["lr"]
print(f'{epoch:2.5f} | Loss: {tr_loss:2.5f} | Train Acc: {acc_train:2.5f} | LR: {cur_lr:.6f} | Test Acc: {acc_test:2.5f}')
print(f'Elapsed time: {(time.time() - start) / 60:.1f} minutes')
print('done!')
final_acc = test(test_loader)
print(f"Final accuracy: {final_acc}")
print("Done.")
if args.generalization:
new_n = n_gener[args.k]['test']
gener_data_test = FourCyclesDataset(args.k, new_n, rootdir, train=False, transform=transform_test)
gener_test_loader = DataLoader(gener_data_test, batch_size, shuffle=False)
model = torch.load(f"./saved_models/{args.name}/epoch{best_epoch}.pkl", map_location=device)
model.eval()
acc_test_generalization = test(gener_test_loader)
print(f"Generalization accuracy on {args.k} cycles with {new_n} nodes", acc_test_generalization)
if args.wandb:
wandb.run.summary['test_generalization'] = acc_test_generalization
| [
"wandb.log",
"os.mkdir",
"yaml.load",
"os.remove",
"numpy.random.seed",
"argparse.ArgumentParser",
"models.ring_gnn.RingGNN",
"models.utils.transforms.DenseAdjMatrix",
"os.path.join",
"torch.load",
"models.gin.GIN",
"torch.nn.functional.nll_loss",
"easydict.EasyDict",
"torch.cuda.set_devic... | [((634, 662), 'os.path.isdir', 'os.path.isdir', (['"""/datasets2/"""'], {}), "('/datasets2/')\n", (647, 662), False, 'import os\n'), ((858, 878), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (875, 878), False, 'import torch\n'), ((879, 896), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (893, 896), True, 'import numpy as np\n'), ((907, 932), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (930, 932), False, 'import argparse\n'), ((10127, 10138), 'time.time', 'time.time', ([], {}), '()\n', (10136, 10138), False, 'import time\n'), ((2967, 2992), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2990, 2992), False, 'import torch\n'), ((3061, 3092), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (3082, 3092), False, 'import torch\n'), ((3378, 3414), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3387, 3414), False, 'import yaml\n'), ((3484, 3497), 'easydict.EasyDict', 'edict', (['config'], {}), '(config)\n', (3489, 3497), True, 'from easydict import EasyDict as edict\n'), ((4285, 4324), 'os.mkdir', 'os.mkdir', (["('./saved_models/' + args.name)"], {}), "('./saved_models/' + args.name)\n", (4293, 4324), False, 'import os\n'), ((4399, 4455), 'wandb.init', 'wandb.init', ([], {'project': '"""smp"""', 'config': 'config', 'name': 'args.name'}), "(project='smp', config=config, name=args.name)\n", (4409, 4455), False, 'import wandb\n'), ((4460, 4485), 'wandb.config.update', 'wandb.config.update', (['args'], {}), '(args)\n', (4479, 4485), False, 'import wandb\n'), ((9172, 9269), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', "n_gener[args.k]['train']", 'rootdir'], {'train': '(True)', 'transform': 'transform'}), "(args.k, n_gener[args.k]['train'], rootdir, train=True,\n transform=transform)\n", (9189, 9269), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((9282, 9380), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', "n_gener[args.k]['train']", 'rootdir'], {'train': '(False)', 'transform': 'transform'}), "(args.k, n_gener[args.k]['train'], rootdir, train=False,\n transform=transform)\n", (9299, 9380), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((9398, 9498), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', "n_gener[args.k]['val']", 'rootdir'], {'train': '(False)', 'transform': 'transform_val'}), "(args.k, n_gener[args.k]['val'], rootdir, train=False,\n transform=transform_val)\n", (9415, 9498), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((9514, 9562), 'torch_geometric.data.DataLoader', 'DataLoader', (['train_data', 'batch_size'], {'shuffle': '(True)'}), '(train_data, batch_size, shuffle=True)\n', (9524, 9562), False, 'from torch_geometric.data import DataLoader\n'), ((9581, 9629), 'torch_geometric.data.DataLoader', 'DataLoader', (['test_data', 'batch_size'], {'shuffle': '(False)'}), '(test_data, batch_size, shuffle=False)\n', (9591, 9629), False, 'from torch_geometric.data import DataLoader\n'), ((9653, 9706), 'torch_geometric.data.DataLoader', 'DataLoader', (['gener_data_val', 'batch_size'], {'shuffle': '(False)'}), '(gener_data_val, batch_size, shuffle=False)\n', (9663, 9706), False, 'from torch_geometric.data import DataLoader\n'), ((9731, 9838), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', 'args.n', 'rootdir'], {'proportion': 'args.proportion', 'train': '(True)', 'transform': 'transform'}), '(args.k, args.n, rootdir, proportion=args.proportion,\n train=True, transform=transform)\n', (9748, 9838), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((9851, 9959), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', 'args.n', 'rootdir'], {'proportion': 'args.proportion', 'train': '(False)', 'transform': 'transform'}), '(args.k, args.n, rootdir, proportion=args.proportion,\n train=False, transform=transform)\n', (9868, 9959), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((9975, 10023), 'torch_geometric.data.DataLoader', 'DataLoader', (['train_data', 'batch_size'], {'shuffle': '(True)'}), '(train_data, batch_size, shuffle=True)\n', (9985, 10023), False, 'from torch_geometric.data import DataLoader\n'), ((10042, 10090), 'torch_geometric.data.DataLoader', 'DataLoader', (['test_data', 'batch_size'], {'shuffle': '(False)'}), '(test_data, batch_size, shuffle=False)\n', (10052, 10090), False, 'from torch_geometric.data import DataLoader\n'), ((10234, 10245), 'time.time', 'time.time', ([], {}), '()\n', (10243, 10245), False, 'import time\n'), ((12750, 12835), 'datasets_generation.build_cycles.FourCyclesDataset', 'FourCyclesDataset', (['args.k', 'new_n', 'rootdir'], {'train': '(False)', 'transform': 'transform_test'}), '(args.k, new_n, rootdir, train=False, transform=transform_test\n )\n', (12767, 12835), False, 'from datasets_generation.build_cycles import FourCyclesDataset\n'), ((12855, 12909), 'torch_geometric.data.DataLoader', 'DataLoader', (['gener_data_test', 'batch_size'], {'shuffle': '(False)'}), '(gener_data_test, batch_size, shuffle=False)\n', (12865, 12909), False, 'from torch_geometric.data import DataLoader\n'), ((12922, 13010), 'torch.load', 'torch.load', (['f"""./saved_models/{args.name}/epoch{best_epoch}.pkl"""'], {'map_location': 'device'}), "(f'./saved_models/{args.name}/epoch{best_epoch}.pkl',\n map_location=device)\n", (12932, 13010), False, 'import torch\n'), ((4211, 4255), 'os.path.isdir', 'os.path.isdir', (["('./saved_models/' + args.name)"], {}), "('./saved_models/' + args.name)\n", (4224, 4255), False, 'import os\n'), ((6828, 6859), 'torch_geometric.transforms.OneHotDegree', 'OneHotDegree', (['degree'], {'cat': '(False)'}), '(degree, cat=False)\n', (6840, 6859), False, 'from torch_geometric.transforms import OneHotDegree\n'), ((7865, 7910), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (['max_num_nodes[args.k][args.n]'], {}), '(max_num_nodes[args.k][args.n])\n', (7879, 7910), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((7931, 7992), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (["max_num_nodes[args.k][n_gener[args.k]['val']]"], {}), "(max_num_nodes[args.k][n_gener[args.k]['val']])\n", (7945, 7992), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8014, 8076), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (["max_num_nodes[args.k][n_gener[args.k]['test']]"], {}), "(max_num_nodes[args.k][n_gener[args.k]['test']])\n", (8028, 8076), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8089, 8222), 'models.ppgn.Powerful', 'ppgn.Powerful', (['config.num_classes', 'config.num_layers', 'config.hidden', 'config.hidden_final', 'config.dropout_prob', 'config.simplified'], {}), '(config.num_classes, config.num_layers, config.hidden, config.\n hidden_final, config.dropout_prob, config.simplified)\n', (8102, 8222), False, 'from models import ppgn\n'), ((4951, 4977), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'data.y'], {}), '(output, data.y)\n', (4961, 4977), True, 'import torch.nn.functional as F\n'), ((7005, 7048), 'models.utils.transforms.EyeTransform', 'EyeTransform', (['max_num_nodes[args.k][args.n]'], {}), '(max_num_nodes[args.k][args.n])\n', (7017, 7048), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((7536, 7788), 'models.model_cycles.SMP', 'SMP', (['config.num_input_features', 'config.num_classes', 'config.num_layers', 'config.hidden', 'config.layer_type', 'config.hidden_final', 'config.dropout_prob', 'config.use_batch_norm', 'config.use_x', 'config.map_x_to_u', 'config.num_towers', 'config.simplified'], {}), '(config.num_input_features, config.num_classes, config.num_layers,\n config.hidden, config.layer_type, config.hidden_final, config.\n dropout_prob, config.use_batch_norm, config.use_x, config.map_x_to_u,\n config.num_towers, config.simplified)\n', (7539, 7788), False, 'from models.model_cycles import SMP\n'), ((8336, 8494), 'models.gin.GIN', 'GIN', (['config.num_input_features', 'config.num_classes', 'config.num_layers', 'config.hidden', 'config.hidden_final', 'config.dropout_prob', 'config.use_batch_norm'], {}), '(config.num_input_features, config.num_classes, config.num_layers,\n config.hidden, config.hidden_final, config.dropout_prob, config.\n use_batch_norm)\n', (8339, 8494), False, 'from models.gin import GIN\n'), ((10423, 10434), 'time.time', 'time.time', ([], {}), '()\n', (10432, 10434), False, 'import time\n'), ((5829, 5855), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'data.y'], {}), '(output, data.y)\n', (5839, 5855), True, 'import torch.nn.functional as F\n'), ((7205, 7215), 'models.utils.transforms.RandomId', 'RandomId', ([], {}), '()\n', (7213, 7215), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((7240, 7250), 'models.utils.transforms.RandomId', 'RandomId', ([], {}), '()\n', (7248, 7250), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((7276, 7286), 'models.utils.transforms.RandomId', 'RandomId', ([], {}), '()\n', (7284, 7286), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8549, 8594), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (['max_num_nodes[args.k][args.n]'], {}), '(max_num_nodes[args.k][args.n])\n', (8563, 8594), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8615, 8676), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (["max_num_nodes[args.k][n_gener[args.k]['val']]"], {}), "(max_num_nodes[args.k][n_gener[args.k]['val']])\n", (8629, 8676), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8698, 8760), 'models.utils.transforms.DenseAdjMatrix', 'DenseAdjMatrix', (["max_num_nodes[args.k][n_gener[args.k]['test']]"], {}), "(max_num_nodes[args.k][n_gener[args.k]['test']])\n", (8712, 8760), False, 'from models.utils.transforms import EyeTransform, RandomId, DenseAdjMatrix\n'), ((8773, 8900), 'models.ring_gnn.RingGNN', 'RingGNN', (['config.num_classes', 'config.num_layers', 'config.hidden', 'config.hidden_final', 'config.dropout_prob', 'config.simplified'], {}), '(config.num_classes, config.num_layers, config.hidden, config.\n hidden_final, config.dropout_prob, config.simplified)\n', (8780, 8900), False, 'from models.ring_gnn import RingGNN\n'), ((12236, 12341), 'wandb.log', 'wandb.log', (["{'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train}"], {}), "({'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train})\n", (12245, 12341), False, 'import wandb\n'), ((5566, 5589), 'torch.randperm', 'torch.randperm', (['n_nodes'], {}), '(n_nodes)\n', (5580, 5589), False, 'import torch\n'), ((10940, 11104), 'wandb.log', 'wandb.log', (["{'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train, 'Test acc': acc_test, 'Gene eval':\n acc_generalization}"], {}), "({'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train, 'Test acc': acc_test, 'Gene eval':\n acc_generalization})\n", (10949, 11104), False, 'import wandb\n'), ((11790, 11855), 'torch.save', 'torch.save', (['model', 'f"""./saved_models/{args.name}/epoch{epoch}.pkl"""'], {}), "(model, f'./saved_models/{args.name}/epoch{epoch}.pkl')\n", (11800, 11855), False, 'import torch\n'), ((12024, 12151), 'wandb.log', 'wandb.log', (["{'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train, 'Test acc': acc_test}"], {}), "({'Epoch': epoch, 'Duration': duration, 'Train loss': tr_loss,\n 'train accuracy': acc_train, 'Test acc': acc_test})\n", (12033, 12151), False, 'import wandb\n'), ((12527, 12538), 'time.time', 'time.time', ([], {}), '()\n', (12536, 12538), False, 'import time\n'), ((11442, 11460), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (11452, 11460), False, 'import os\n'), ((11611, 11635), 'os.remove', 'os.remove', (['(folder + file)'], {}), '(folder + file)\n', (11620, 11635), False, 'import os\n'), ((5507, 5533), 'torch.sum', 'torch.sum', (['(data.batch == i)'], {}), '(data.batch == i)\n', (5516, 5533), False, 'import torch\n'), ((11479, 11502), 'os.path.join', 'os.path.join', (['folder', 'f'], {}), '(folder, f)\n', (11491, 11502), False, 'import os\n')] |
import os
import random
import numpy as np
import pandas as pd
import logging
import torch
from tqdm import tqdm
logger = logging.getLogger(__name__)
class Example(object):
"""A single training/test example."""
def __init__(self,
idx,
source,
target,
):
self.idx = idx
self.source = source
self.target = target
def read_examples(filename):
"""Read examples from filename."""
examples = []
df = pd.read_json(filename, lines=True)
code = df['code'].tolist()
nl = df['nl'].tolist()
for i in range(len(code)):
examples.append(
Example(
idx=i,
source=nl[i],
target=code[i],
)
)
return examples
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
example_id,
source_ids,
target_ids,
source_mask,
target_mask,
):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.source_mask = source_mask
self.target_mask = target_mask
def convert_examples_to_features(examples, tokenizer, max_source_length, max_target_length, stage=None):
features = []
for example_index, example in enumerate(tqdm(examples, desc='convert examples to features...')):
# source
source_tokens = tokenizer.tokenize(example.source)[:max_source_length - 2]
source_tokens = [tokenizer.cls_token] + source_tokens + [tokenizer.sep_token]
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
source_mask = [1] * (len(source_tokens))
padding_length = max_source_length - len(source_ids)
source_ids += [tokenizer.pad_token_id] * padding_length
source_mask += [0] * padding_length
# target
if stage == "test":
target_tokens = tokenizer.tokenize("None")
else:
target_tokens = tokenizer.tokenize(example.target)[:max_target_length - 2]
target_tokens = [tokenizer.cls_token] + target_tokens + [tokenizer.sep_token]
target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
target_mask = [1] * len(target_ids)
padding_length = max_target_length - len(target_ids)
target_ids += [tokenizer.pad_token_id] * padding_length
target_mask += [0] * padding_length
if example_index < 3:
if stage == 'train':
logger.info("*** Example ***")
logger.info("idx: {}".format(example.idx))
logger.info("source_tokens: {}".format([x.replace('\u0120', '_') for x in source_tokens]))
logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
logger.info("target_tokens: {}".format([x.replace('\u0120', '_') for x in target_tokens]))
logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
features.append(
InputFeatures(
example_index,
source_ids,
target_ids,
source_mask,
target_mask,
)
)
return features
def set_seed(seed=42):
random.seed(seed)
os.environ['PYHTONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
| [
"tqdm.tqdm",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"pandas.read_json",
"random.seed",
"logging.getLogger"
] | [((133, 160), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (150, 160), False, 'import logging\n'), ((539, 573), 'pandas.read_json', 'pd.read_json', (['filename'], {'lines': '(True)'}), '(filename, lines=True)\n', (551, 573), True, 'import pandas as pd\n'), ((3657, 3674), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3668, 3674), False, 'import random\n'), ((3726, 3746), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3740, 3746), True, 'import numpy as np\n'), ((3752, 3775), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3769, 3775), False, 'import torch\n'), ((3781, 3809), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (3803, 3809), False, 'import torch\n'), ((1510, 1564), 'tqdm.tqdm', 'tqdm', (['examples'], {'desc': '"""convert examples to features..."""'}), "(examples, desc='convert examples to features...')\n", (1514, 1564), False, 'from tqdm import tqdm\n')] |
from PIL import Image
import numpy as np
import tensorflow as tf
class Utilities(object):
"""docstring for ClassName"""
def __init__(self, file_path):
super(Utilities, self).__init__()
self.file_path = file_path
self.image_height = 28
self.image_size = (self.image_height, self.image_height)
self.x_input = tf.placeholder(tf.float32, [None, self.image_height * self.image_height]) # input images in vector shape of 784
self.y_labels = tf.placeholder(tf.float32, [None, 2])
self.keep_prob = tf.placeholder(tf.float32) # used for cnn neurons droping probability
self.batch_size = 100
self.learning_rate = 1e-3
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.num_epochs = 25
self.keep_prob_value = 0.6
self.nodes_layer_1 = 64
self.nodes_layer_2 = 32
self.augment_data = False
def load_images(self, num_images, file_names):
data = [[]] * num_images
angle_dir = 1
for i in range(num_images):
im = Image.open(self.file_path + file_names[i])
im = im.convert(mode='L')
# rotate the image by -90 or 90. This is done for generating augmented data.
if (self.augment_data):
im = im.rotate(angle_dir*90)
angle_dir = -1 * angle_dir
resized_im = im.resize(self.image_size)
flattened_im = np.asarray(resized_im).flatten()
data[i] = flattened_im
return data
def compute_model_training(self, prediction, training_data, training_label):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=self.y_labels, logits=prediction))
train_step = self.optimizer.minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(self.y_labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy = tf.multiply(accuracy, 100)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# this is for mini batch stochastic gradient descent
training_data_count = len(training_data)
num_iterations = training_data_count // self.batch_size
input_batch = []
output_label = []
for j in range(self.num_epochs):
print('Epoch %d started...' % j)
train_accuracy = 0
for i in range(num_iterations):
input_batch = training_data[i * self.batch_size: min((i + 1) * self.batch_size, training_data_count)]
output_label = training_label[i * self.batch_size: min((i + 1) * self.batch_size, training_data_count)]
# train_accuracy = accuracy.eval(feed_dict={self.x_input: input_batch, self.y_labels: output_label, self.keep_prob: 0.5})
# print('step %d, training accuracy %g' % (i, train_accuracy))
train_accuracy = train_accuracy + accuracy.eval(feed_dict={self.x_input: input_batch, self.y_labels: output_label, self.keep_prob: 1.0})
train_step.run(feed_dict={self.x_input: input_batch, self.y_labels: output_label, self.keep_prob: self.keep_prob_value})
#train_accuracy = train_accuracy/num_iterations
print('Epoch %d completed. Accuracy: %0.2f' % (j, train_accuracy / num_iterations))
return accuracy, sess
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def data_augmentation(self, label):
self.augment_data = True
image_file_names = label[0:self.augmentation_count, 0]
expected_output = label[0:self.augmentation_count, 1]
augmented_imgs = self.load_images(self.augmentation_count, image_file_names)
b = expected_output.astype(np.int).clip(min=0)
augmented_labels = np.eye(2)[b]
return augmented_imgs, augmented_labels
| [
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.argmax",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.constant",
"PIL.Image.open",
"tensorflow.placeholder",
"tensorflow.multiply",
"tensorflow.Variable",
"tensorflow.nn.max_pool",
"tensorflow.nn.conv2d",
... | [((335, 408), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.image_height * self.image_height]'], {}), '(tf.float32, [None, self.image_height * self.image_height])\n', (349, 408), True, 'import tensorflow as tf\n'), ((467, 504), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (481, 504), True, 'import tensorflow as tf\n'), ((525, 551), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (539, 551), True, 'import tensorflow as tf\n'), ((670, 712), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (692, 712), True, 'import tensorflow as tf\n'), ((1810, 1836), 'tensorflow.multiply', 'tf.multiply', (['accuracy', '(100)'], {}), '(accuracy, 100)\n', (1821, 1836), True, 'import tensorflow as tf\n'), ((1849, 1872), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1870, 1872), True, 'import tensorflow as tf\n'), ((3147, 3185), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (3166, 3185), True, 'import tensorflow as tf\n'), ((3196, 3216), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (3207, 3216), True, 'import tensorflow as tf\n'), ((3266, 3295), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (3277, 3295), True, 'import tensorflow as tf\n'), ((3306, 3326), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (3317, 3326), True, 'import tensorflow as tf\n'), ((3365, 3421), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (3377, 3421), True, 'import tensorflow as tf\n'), ((3463, 3538), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3477, 3538), True, 'import tensorflow as tf\n'), ((986, 1028), 'PIL.Image.open', 'Image.open', (['(self.file_path + file_names[i])'], {}), '(self.file_path + file_names[i])\n', (996, 1028), False, 'from PIL import Image\n'), ((1496, 1581), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'self.y_labels', 'logits': 'prediction'}), '(labels=self.y_labels, logits=prediction\n )\n', (1535, 1581), True, 'import tensorflow as tf\n'), ((1671, 1695), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (1680, 1695), True, 'import tensorflow as tf\n'), ((1697, 1724), 'tensorflow.argmax', 'tf.argmax', (['self.y_labels', '(1)'], {}), '(self.y_labels, 1)\n', (1706, 1724), True, 'import tensorflow as tf\n'), ((1755, 1794), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (1762, 1794), True, 'import tensorflow as tf\n'), ((3876, 3885), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3882, 3885), True, 'import numpy as np\n'), ((1876, 1909), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1907, 1909), True, 'import tensorflow as tf\n'), ((1304, 1326), 'numpy.asarray', 'np.asarray', (['resized_im'], {}), '(resized_im)\n', (1314, 1326), True, 'import numpy as np\n')] |
"""
Contains class Image for manipulations of grey-scale images.
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import division
from builtins import zip
from builtins import str
from past.utils import old_div
__version__ = "$Revision$"
import logging
import numpy
import scipy
import scipy.ndimage as ndimage
from pyto.core.image import Image as BaseImage
class Image(BaseImage):
"""
Manipulation of greyscale images.
"""
#############################################################
#
# Initialization
#
############################################################
def __init__(self, data=None):
"""
Saves data (image)
Argument:
- data: (ndarray) image
"""
super(Image, self).__init__(data)
#############################################################
#
# Image manipulations
#
############################################################
def limit(self, limit, mode, size=3):
"""
Limits image data.
Elements of self.data that are outside the limits (see below) are
replaced by corrected values. A corrected value is obtained as a mean
value of within-limits elements of a subarray of size given by argument
size centered at the element to be corrected. For elements near the
edges the subarray is shifted so that it does it still has the required
size. If size is even the subarray is shifted towards higher indices
in respect to the element to be corrected.
The low and high limit values are determined from limit and mode. If
mode is 'abs', the limiting value(s) is (are) given in argument limit.
If mode is is 'std', the limits are set to limit times image std
away from the image mean.
If limit is a single value, it is ised for both low and ligh limits.
Alternatively, if a list of two elements is given for limits, it
specifies the low and high limits.
Arguments:
- limit: used to determine the upper and the lower limits on image
values
- mode: mode used for the determination of the limits
- size: size of the subarray used to determine the corrected values
Updates self.data, that is overwerites the uncorrected image.
"""
# Note: only marginal speedup with data.squeeze()
# determine low and high limits
if mode == 'std':
# limits expressed as std factors
mean = self.data.mean()
std = self.data.std()
if isinstance(limit, list) or isinstance(limit, tuple):
low_limit = mean - limit[0] * std
high_limit = mean + limit[1] * std
else:
low_limit = mean - limit * std
high_limit = mean + limit * std
elif mode == 'abs':
# absolute limits
if isinstance(limit, list) or isinstance(limit, tuple):
low_limit, high_limit = limit
else:
raise TypeError("Argument limit: " + str(limit) + " has to be "\
+ "a list or a tuple in mode: " + mode + ".")
else:
raise ValueError("Mode: " + mode + " is not recognized.")
# find array elements that are outside of the limits
bad = numpy.zeros(shape=self.data.shape, dtype='bool')
if low_limit is not None:
bad = bad | (self.data < low_limit)
if high_limit is not None:
bad = bad | (self.data > high_limit)
# correct the outsiders
n_corr = 0
n_uncorr = 0
new = self.data.copy()
bad_ind = bad.nonzero() # much faster than ndenumerate
for ind in zip(*bad_ind): # followed by if val != 0
# find index limits so they don't extend outside data
aind = numpy.array(ind)
shape = numpy.array(self.data.shape)
low_ind = numpy.maximum(aind - old_div((size - 1), 2), 0)
high_ind = numpy.minimum(low_ind + size, shape)
# enlarge limits on edges (needed?)
correction = size - (high_ind - low_ind)
low_ind = numpy.where(high_ind < shape,
low_ind, low_ind - correction)
high_ind = numpy.where(low_ind > 0, high_ind,
high_ind + correction)
# make index limit slices
sl = [slice(l, h) for (l, h) in zip(low_ind, high_ind)]
# correct data
if numpy.logical_not(bad[tuple(sl)]).sum() <= 0:
logging.debug("Element " + str(ind) +
" could not be corrected.")
n_uncorr += 1
else:
mean = ndimage.mean(
self.data[tuple(sl)], bad[tuple(sl)], index=0)
new[ind] = mean
n_corr += 1
# update self.data
self.data = new
# log
if n_corr > 0:
logging.info("Corrected " + str(n_corr) + " image elements.")
if n_uncorr > 0:
logging.info("Could not correct " + str(n_uncorr)
+ " image elements.")
def getStats(self, apixel=None, counte=None):
"""
Calculates basic statistics for the data.
If args apix and counte are specified also calculates mean electrons
per A^2.
"""
self.mean = self.data.mean()
self.min = self.data.min()
self.max = self.data.max()
self.var = self.data.var()
self.std = self.data.std()
# calculate mean electrons per A^2
if (apixel is not None) and (counte is not None):
conversion = apixel * apixel * counte
self.mean_ea = self.mean / float(conversion)
return
| [
"numpy.minimum",
"past.utils.old_div",
"numpy.zeros",
"numpy.where",
"numpy.array",
"builtins.zip",
"builtins.str"
] | [((3494, 3542), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'self.data.shape', 'dtype': '"""bool"""'}), "(shape=self.data.shape, dtype='bool')\n", (3505, 3542), False, 'import numpy\n'), ((3906, 3919), 'builtins.zip', 'zip', (['*bad_ind'], {}), '(*bad_ind)\n', (3909, 3919), False, 'from builtins import zip\n'), ((4041, 4057), 'numpy.array', 'numpy.array', (['ind'], {}), '(ind)\n', (4052, 4057), False, 'import numpy\n'), ((4078, 4106), 'numpy.array', 'numpy.array', (['self.data.shape'], {}), '(self.data.shape)\n', (4089, 4106), False, 'import numpy\n'), ((4200, 4236), 'numpy.minimum', 'numpy.minimum', (['(low_ind + size)', 'shape'], {}), '(low_ind + size, shape)\n', (4213, 4236), False, 'import numpy\n'), ((4361, 4421), 'numpy.where', 'numpy.where', (['(high_ind < shape)', 'low_ind', '(low_ind - correction)'], {}), '(high_ind < shape, low_ind, low_ind - correction)\n', (4372, 4421), False, 'import numpy\n'), ((4480, 4537), 'numpy.where', 'numpy.where', (['(low_ind > 0)', 'high_ind', '(high_ind + correction)'], {}), '(low_ind > 0, high_ind, high_ind + correction)\n', (4491, 4537), False, 'import numpy\n'), ((4150, 4170), 'past.utils.old_div', 'old_div', (['(size - 1)', '(2)'], {}), '(size - 1, 2)\n', (4157, 4170), False, 'from past.utils import old_div\n'), ((4657, 4679), 'builtins.zip', 'zip', (['low_ind', 'high_ind'], {}), '(low_ind, high_ind)\n', (4660, 4679), False, 'from builtins import zip\n'), ((5225, 5236), 'builtins.str', 'str', (['n_corr'], {}), '(n_corr)\n', (5228, 5236), False, 'from builtins import str\n'), ((5332, 5345), 'builtins.str', 'str', (['n_uncorr'], {}), '(n_uncorr)\n', (5335, 5345), False, 'from builtins import str\n'), ((4813, 4821), 'builtins.str', 'str', (['ind'], {}), '(ind)\n', (4816, 4821), False, 'from builtins import str\n'), ((3211, 3221), 'builtins.str', 'str', (['limit'], {}), '(limit)\n', (3214, 3221), False, 'from builtins import str\n')] |
from src.ner.datareader import y1_set, y2_set
from src.ner.baseline_loader import entity_list
import torch
import torch.nn as nn
from tqdm import tqdm
import numpy as np
import logging
logger = logging.getLogger()
from src.conll2002_metrics import *
O_INDEX = y1_set.index("O")
B_INDEX = y1_set.index("B-Entity")
I_INDEX = y1_set.index("I-Entity")
class BaselineTrainer(object):
def __init__(self, params, ner_tagger):
self.ner_tagger = ner_tagger
self.lr = params.lr
self.optimizer = torch.optim.Adam(self.ner_tagger.parameters(), lr=self.lr)
self.loss_fn = nn.CrossEntropyLoss()
self.early_stop = params.early_stop
self.no_improvement_num = 0
self.best_f1 = 0
self.stop_training_flag = False
def train_step(self, X, lengths, y):
self.ner_tagger.train()
predictions_for_batch = self.ner_tagger(X) # (bsz, seq_len, num_entity, num_binslot)
loss_list = []
self.optimizer.zero_grad()
for i, length in enumerate(lengths):
predictions = predictions_for_batch[i,:length,:,:] # (seq_len, num_entity, num_binslot)
predictions = predictions.transpose(0,1) # (num_entity, seq_len, num_binslot)
golds = torch.LongTensor(y[i]).cuda() # (num_entity, seq_len)
predictions = predictions.contiguous()
golds = golds.contiguous()
predictions = predictions.view(predictions.size()[0]*predictions.size()[1], 3)
golds = golds.view(golds.size()[0]*golds.size()[1])
loss = self.loss_fn(predictions, golds)
loss.backward(retain_graph=True)
loss_list.append(loss.item())
self.optimizer.step()
return np.mean(loss_list)
def convert_entity_based_preds_to_original_preds(self, preds):
"""
Inputs:
preds: preditions from baseline model (num_entity, seq_len)
Outputs:
final_predictions: final predictions (seq_len)
"""
nonzero_pois = torch.nonzero(preds) # a list of 2-d positions
final_predictions = torch.LongTensor(preds.size()[1]).fill_(O_INDEX)
for poi in nonzero_pois:
entity_name_index = poi[0]
length_poi = poi[1]
pred = preds[poi[0]][poi[1]].item()
entity_name = entity_list[entity_name_index]
entity_name = "B-" + entity_name if pred == B_INDEX else "I-" + entity_name
final_predictions[length_poi] = y2_set.index(entity_name)
return final_predictions
def evaluate(self, dataloader, istestset=False):
self.ner_tagger.eval()
preds, golds = [], []
pbar = tqdm(enumerate(dataloader), total=len(dataloader))
for i, (X, lengths, y) in pbar:
X, lengths = X.cuda(), lengths.cuda()
golds.extend(y)
predictions_for_batch = self.ner_tagger(X)
for i, length in enumerate(lengths):
entity_based_preds = predictions_for_batch[i,:length,:,:] # (seq_len, num_entity, num_binslot)
entity_based_preds = entity_based_preds.transpose(0,1) # (num_entity, seq_len, num_binslot)
entity_based_preds = torch.argmax(entity_based_preds, dim=-1) # convert (num_entity, seq_len, num_binslot) ==> (num_entity, seq_len)
final_predictions = self.convert_entity_based_preds_to_original_preds(entity_based_preds)
preds.extend(final_predictions)
# labels
golds = np.concatenate(golds, axis=0)
golds = list(golds)
# final predictions
preds = list(preds)
lines = []
for pred, gold in zip(preds, golds):
pred = pred.item()
pred = y2_set[pred]
gold = y2_set[gold]
lines.append("w" + " " + pred + " " + gold)
result = conll2002_measure(lines)
f1_score = result["fb1"]
if istestset == False: # dev set
if f1_score > self.best_f1:
self.best_f1 = f1_score
self.no_improvement_num = 0
else:
self.no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (self.no_improvement_num, self.early_stop))
if self.no_improvement_num >= self.early_stop:
self.stop_training_flag = True
return f1_score, self.stop_training_flag
class BiLSTMCRFTrainer(object):
def __init__(self, params, ner_tagger):
self.ner_tagger = ner_tagger
self.lr = params.lr
self.optimizer = torch.optim.Adam(self.ner_tagger.parameters(), lr=self.lr)
self.loss_fn = nn.CrossEntropyLoss()
self.early_stop = params.early_stop
self.no_improvement_num = 0
self.best_f1 = 0
self.stop_training_flag = False
def train_step(self, X, lengths, y):
self.ner_tagger.train()
preds = self.ner_tagger(X)
## optimize ner_tagger
loss = self.ner_tagger.crf_loss(preds, lengths, y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def evaluate(self, dataloader, istestset=False):
self.ner_tagger.eval()
preds, golds = [], []
pbar = tqdm(enumerate(dataloader), total=len(dataloader))
for i, (X, lengths, y) in pbar:
golds.extend(y)
X, lengths = X.cuda(), lengths.cuda()
preds_batch = self.ner_tagger(X)
preds_batch = self.ner_tagger.crf_decode(preds_batch, lengths)
preds.extend(preds_batch)
preds = np.concatenate(preds, axis=0)
preds = list(preds)
golds = np.concatenate(golds, axis=0)
golds = list(golds)
lines = []
for pred, gold in zip(preds, golds):
slot_pred = y2_set[pred]
slot_gold = y2_set[gold]
lines.append("w" + " " + slot_pred + " " + slot_gold)
result = conll2002_measure(lines)
f1 = result["fb1"]
if istestset == False: # dev set
if f1 > self.best_f1:
self.best_f1 = f1
self.no_improvement_num = 0
else:
self.no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (self.no_improvement_num, self.early_stop))
if self.no_improvement_num >= self.early_stop:
self.stop_training_flag = True
return f1, self.stop_training_flag
| [
"numpy.concatenate",
"torch.LongTensor",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"src.ner.datareader.y1_set.index",
"torch.nonzero",
"numpy.mean",
"src.ner.datareader.y2_set.index",
"logging.getLogger"
] | [((196, 215), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (213, 215), False, 'import logging\n'), ((264, 281), 'src.ner.datareader.y1_set.index', 'y1_set.index', (['"""O"""'], {}), "('O')\n", (276, 281), False, 'from src.ner.datareader import y1_set, y2_set\n'), ((292, 316), 'src.ner.datareader.y1_set.index', 'y1_set.index', (['"""B-Entity"""'], {}), "('B-Entity')\n", (304, 316), False, 'from src.ner.datareader import y1_set, y2_set\n'), ((327, 351), 'src.ner.datareader.y1_set.index', 'y1_set.index', (['"""I-Entity"""'], {}), "('I-Entity')\n", (339, 351), False, 'from src.ner.datareader import y1_set, y2_set\n'), ((600, 621), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (619, 621), True, 'import torch.nn as nn\n'), ((1781, 1799), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (1788, 1799), True, 'import numpy as np\n'), ((2083, 2103), 'torch.nonzero', 'torch.nonzero', (['preds'], {}), '(preds)\n', (2096, 2103), False, 'import torch\n'), ((3610, 3639), 'numpy.concatenate', 'np.concatenate', (['golds'], {'axis': '(0)'}), '(golds, axis=0)\n', (3624, 3639), True, 'import numpy as np\n'), ((4806, 4827), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4825, 4827), True, 'import torch.nn as nn\n'), ((5780, 5809), 'numpy.concatenate', 'np.concatenate', (['preds'], {'axis': '(0)'}), '(preds, axis=0)\n', (5794, 5809), True, 'import numpy as np\n'), ((5854, 5883), 'numpy.concatenate', 'np.concatenate', (['golds'], {'axis': '(0)'}), '(golds, axis=0)\n', (5868, 5883), True, 'import numpy as np\n'), ((2563, 2588), 'src.ner.datareader.y2_set.index', 'y2_set.index', (['entity_name'], {}), '(entity_name)\n', (2575, 2588), False, 'from src.ner.datareader import y1_set, y2_set\n'), ((3285, 3325), 'torch.argmax', 'torch.argmax', (['entity_based_preds'], {'dim': '(-1)'}), '(entity_based_preds, dim=-1)\n', (3297, 3325), False, 'import torch\n'), ((1256, 1278), 'torch.LongTensor', 'torch.LongTensor', (['y[i]'], {}), '(y[i])\n', (1272, 1278), False, 'import torch\n')] |
import os.path as osp
import numpy as np
import random
import matplotlib.pyplot as plt
import torchvision
from torch.utils import data
from PIL import Image
Image.MAX_IMAGE_PIXELS = None # Disable DecompressionBombError
import os
class imagenetDataset(data.Dataset):
def __init__(self, root='/home/gabriel/data/imagenet', max_iters=None, crop_size=(256, 256), mean=(128, 128, 128), scale=True, mirror=True):
self.root = root
#self.list_path = list_path
self.crop_size = crop_size
#self.img_ids = [i_id.strip() for i_id in open(list_path)]
accepted_extensions = ["JPEG"]
self.img_ids = [fn for fn in os.listdir(root) if fn.split(".")[-1] in accepted_extensions]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
self.scale = scale
self.mean = mean
for name in self.img_ids:
img_file = osp.join(self.root, name)
#print img_file
#print label_file
self.files.append({
"img": img_file,
"name": name
})
#import pdb;pdb.set_trace()
def __len__(self):
return len(self.files)
def __scale__(self):
cropsize = self.crop_size
if self.scale:
r = random.random()
if r > 0.7:
cropsize = (int(self.crop_size[0] * 1.1), int(self.crop_size[1] * 1.1))
elif r < 0.3:
cropsize = (int(self.crop_size[0] * 0.8), int(self.crop_size[1] * 0.8))
return cropsize
def __getitem__(self, index):
datafiles = self.files[index]
cropsize = self.__scale__()
try:
image = Image.open(datafiles["img"]).convert('RGB')
name = datafiles["name"]
# resize
image = image.resize(cropsize, Image.BICUBIC)
#print('Image resized!')
image = np.asarray(image, np.float32)
#import pdb;pdb.set_trace()
size = image.shape
#print('convert to float 32')
#print(size)
image_rgb=image
image = image[:, :, ::-1] # change to BGR
#print('change to bgr')
image -= self.mean
#image_rgb = image_rgb[:, :, ::-1] # change to BGR
image_rgb = image_rgb.transpose((2, 0, 1))
image = image.transpose((2, 0, 1))
#print('normalized and transposed')
except Exception as e:
index = index - 1 if index > 0 else index + 1
print('Exception')
print(index)
print(datafiles["name"])
return self.__getitem__(index)
return image.copy(), image_rgb.copy(), np.array(size)
class GTA5DataSet(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_size = crop_size
self.scale = scale
self.ignore_label = ignore_label
self.mean = mean
self.is_mirror = mirror
#self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
self.img_ids = [i_id.strip() for i_id in open(list_path)]
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
self.files = []
self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5,
19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12,
26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18}
# for split in ["train", "trainval", "val"]:
for name in self.img_ids:
img_file = osp.join(self.root, "images/%s" % name)
#print img_file
#print label_file
self.files.append({
"img": img_file,
"name": name
})
def __len__(self):
return len(self.files)
def __scale__(self):
cropsize = self.crop_size
if self.scale:
r = random.random()
if r > 0.7:
cropsize = (int(self.crop_size[0] * 1.1), int(self.crop_size[1] * 1.1))
elif r < 0.3:
cropsize = (int(self.crop_size[0] * 0.8), int(self.crop_size[1] * 0.8))
return cropsize
def __getitem__(self, index):
datafiles = self.files[index]
cropsize = self.__scale__()
try:
image = Image.open(datafiles["img"]).convert('RGB')
name = datafiles["name"]
# resize
image = image.resize(cropsize, Image.BICUBIC)
image = np.asarray(image, np.float32)
size = image.shape
size_l = label.shape
image = image[:, :, ::-1] # change to BGR
image -= self.mean
image = image.transpose((2, 0, 1))
if self.is_mirror and random.random() < 0.5:
idx = [i for i in range(size[1] - 1, -1, -1)]
idx_l = [i for i in range(size_l[1] - 1, -1, -1)]
image = np.take(image, idx, axis = 2)
except Exception as e:
index = index - 1 if index > 0 else index + 1
return self.__getitem__(index)
return image.copy(), np.array(size), np.array(size), name
if __name__ == '__main__':
dst = imagenetDataset("./data", is_transform=True)
trainloader = data.DataLoader(dst, batch_size=4)
for i, data in enumerate(trainloader):
imgs, labels = data
if i == 0:
img = torchvision.utils.make_grid(imgs).numpy()
img = np.transpose(img, (1, 2, 0))
img = img[:, :, ::-1]
plt.imshow(img)
plt.show()
| [
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.imshow",
"numpy.asarray",
"numpy.transpose",
"PIL.Image.open",
"torchvision.utils.make_grid",
"random.random",
"numpy.array",
"numpy.take",
"os.path.join",
"os.listdir"
] | [((5734, 5768), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dst'], {'batch_size': '(4)'}), '(dst, batch_size=4)\n', (5749, 5768), False, 'from torch.utils import data\n'), ((974, 999), 'os.path.join', 'osp.join', (['self.root', 'name'], {}), '(self.root, name)\n', (982, 999), True, 'import os.path as osp\n'), ((1356, 1371), 'random.random', 'random.random', ([], {}), '()\n', (1369, 1371), False, 'import random\n'), ((1990, 2019), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (2000, 2019), True, 'import numpy as np\n'), ((2860, 2874), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (2868, 2874), True, 'import numpy as np\n'), ((3910, 3949), 'os.path.join', 'osp.join', (['self.root', "('images/%s' % name)"], {}), "(self.root, 'images/%s' % name)\n", (3918, 3949), True, 'import os.path as osp\n'), ((4305, 4320), 'random.random', 'random.random', ([], {}), '()\n', (4318, 4320), False, 'import random\n'), ((4929, 4958), 'numpy.asarray', 'np.asarray', (['image', 'np.float32'], {}), '(image, np.float32)\n', (4939, 4958), True, 'import numpy as np\n'), ((5595, 5609), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (5603, 5609), True, 'import numpy as np\n'), ((5611, 5625), 'numpy.array', 'np.array', (['size'], {}), '(size)\n', (5619, 5625), True, 'import numpy as np\n'), ((5937, 5965), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (5949, 5965), True, 'import numpy as np\n'), ((6012, 6027), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (6022, 6027), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6048, 6050), True, 'import matplotlib.pyplot as plt\n'), ((653, 669), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (663, 669), False, 'import os\n'), ((5383, 5410), 'numpy.take', 'np.take', (['image', 'idx'], {'axis': '(2)'}), '(image, idx, axis=2)\n', (5390, 5410), True, 'import numpy as np\n'), ((1771, 1799), 'PIL.Image.open', 'Image.open', (["datafiles['img']"], {}), "(datafiles['img'])\n", (1781, 1799), False, 'from PIL import Image\n'), ((4723, 4751), 'PIL.Image.open', 'Image.open', (["datafiles['img']"], {}), "(datafiles['img'])\n", (4733, 4751), False, 'from PIL import Image\n'), ((5208, 5223), 'random.random', 'random.random', ([], {}), '()\n', (5221, 5223), False, 'import random\n'), ((5877, 5910), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['imgs'], {}), '(imgs)\n', (5904, 5910), False, 'import torchvision\n')] |
#!/usr/bin/env python
"""
Example of running nessai with bilby on a gravitational wave likelihood. This
examples includes all 15 parameters for CBC and should take around 2 hours to
run.
Based on the Bilby example: https://git.ligo.org/lscsoft/bilby
"""
import bilby
import numpy as np
outdir = './outdir/'
label = 'full_gw_example'
bilby.core.utils.setup_logger(outdir=outdir, label=label, log_level='WARNING')
duration = 4.
sampling_frequency = 2048.
np.random.seed(151226)
# Use an injection that is similar to GW150914
injection_parameters = dict(
total_mass=66., mass_ratio=0.9, a_1=0.4, a_2=0.3, tilt_1=0.5, tilt_2=1.0,
phi_12=1.7, phi_jl=0.3, luminosity_distance=2000, theta_jn=0.4, psi=2.659,
phase=1.3, geocent_time=1126259642.413, ra=1.375, dec=-1.2108
)
waveform_arguments = dict(
waveform_approximant='IMRPhenomPv2',
reference_frequency=50.
)
# Create the waveform_generator
waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
sampling_frequency=sampling_frequency,
duration=duration,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
parameter_conversion=(bilby.gw.conversion
.convert_to_lal_binary_black_hole_parameters),
waveform_arguments=waveform_arguments
)
# Set up interferometers
ifos = bilby.gw.detector.InterferometerList(['H1', 'L1', 'V1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=injection_parameters['geocent_time'] - 3
)
ifos.inject_signal(
waveform_generator=waveform_generator, parameters=injection_parameters
)
# Set up prior
priors = bilby.gw.prior.BBHPriorDict()
priors['geocent_time'] = bilby.core.prior.Uniform(
minimum=injection_parameters['geocent_time'] - 0.1,
maximum=injection_parameters['geocent_time'] + 0.1,
name='geocent_time', latex_label='$t_c$', unit='$s$'
)
# Initialise the likelihood
# nessai supports the marginalisation included in bilby
likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=ifos, waveform_generator=waveform_generator,
priors=priors, phase_marginalization=True, distance_marginalization=True,
)
# Run sampler
# The `flow_class` should be set to `GWFlowProposal` for GW PE. This includes
# specific default reparameterisations for certain parameters. For example,
# it knows that theta_jn is angle with a sine prior.
result = bilby.core.sampler.run_sampler(
likelihood=likelihood,
priors=priors,
outdir=outdir,
injection_parameters=injection_parameters,
label=label,
conversion_function=bilby.gw.conversion.generate_all_bbh_parameters,
flow_class='GWFlowProposal',
sampler='nessai',
resume=False,
plot=True,
nlive=2000,
maximum_uninformed=4000,
seed=150914,
analytic_priors=True, # Bilby priors can be sampled from directly
flow_config=dict(model_config=dict(n_transforms=6)),
max_threads=3,
n_pool=2,
)
# Produce corner plots
result.plot_corner()
| [
"bilby.gw.likelihood.GravitationalWaveTransient",
"numpy.random.seed",
"bilby.core.utils.setup_logger",
"bilby.core.prior.Uniform",
"bilby.gw.waveform_generator.WaveformGenerator",
"bilby.gw.prior.BBHPriorDict",
"bilby.gw.detector.InterferometerList"
] | [((337, 415), 'bilby.core.utils.setup_logger', 'bilby.core.utils.setup_logger', ([], {'outdir': 'outdir', 'label': 'label', 'log_level': '"""WARNING"""'}), "(outdir=outdir, label=label, log_level='WARNING')\n", (366, 415), False, 'import bilby\n'), ((459, 481), 'numpy.random.seed', 'np.random.seed', (['(151226)'], {}), '(151226)\n', (473, 481), True, 'import numpy as np\n'), ((937, 1253), 'bilby.gw.waveform_generator.WaveformGenerator', 'bilby.gw.waveform_generator.WaveformGenerator', ([], {'sampling_frequency': 'sampling_frequency', 'duration': 'duration', 'frequency_domain_source_model': 'bilby.gw.source.lal_binary_black_hole', 'parameter_conversion': 'bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters', 'waveform_arguments': 'waveform_arguments'}), '(sampling_frequency=\n sampling_frequency, duration=duration, frequency_domain_source_model=\n bilby.gw.source.lal_binary_black_hole, parameter_conversion=bilby.gw.\n conversion.convert_to_lal_binary_black_hole_parameters,\n waveform_arguments=waveform_arguments)\n', (982, 1253), False, 'import bilby\n'), ((1319, 1375), 'bilby.gw.detector.InterferometerList', 'bilby.gw.detector.InterferometerList', (["['H1', 'L1', 'V1']"], {}), "(['H1', 'L1', 'V1'])\n", (1355, 1375), False, 'import bilby\n'), ((1670, 1699), 'bilby.gw.prior.BBHPriorDict', 'bilby.gw.prior.BBHPriorDict', ([], {}), '()\n', (1697, 1699), False, 'import bilby\n'), ((1725, 1915), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', ([], {'minimum': "(injection_parameters['geocent_time'] - 0.1)", 'maximum': "(injection_parameters['geocent_time'] + 0.1)", 'name': '"""geocent_time"""', 'latex_label': '"""$t_c$"""', 'unit': '"""$s$"""'}), "(minimum=injection_parameters['geocent_time'] - 0.1,\n maximum=injection_parameters['geocent_time'] + 0.1, name='geocent_time',\n latex_label='$t_c$', unit='$s$')\n", (1749, 1915), False, 'import bilby\n'), ((2020, 2209), 'bilby.gw.likelihood.GravitationalWaveTransient', 'bilby.gw.likelihood.GravitationalWaveTransient', ([], {'interferometers': 'ifos', 'waveform_generator': 'waveform_generator', 'priors': 'priors', 'phase_marginalization': '(True)', 'distance_marginalization': '(True)'}), '(interferometers=ifos,\n waveform_generator=waveform_generator, priors=priors,\n phase_marginalization=True, distance_marginalization=True)\n', (2066, 2209), False, 'import bilby\n')] |
import numpy as np
def crop_and_pad(img, r_offset, c_offset):
img = img[:img.shape[0] - r_offset, :img.shape[1] - c_offset]
return np.pad(img, ((0, r_offset), (c_offset, 0)), 'constant', constant_values=0)
def get_cross_correlation(img1, img2, r_offset, c_offset):
if (r_offset != 0) or (c_offset != 0):
img2 = crop_and_pad(img2, r_offset, c_offset)
cc = np.sum(img1 * img2)
return cc
| [
"numpy.pad",
"numpy.sum"
] | [((141, 215), 'numpy.pad', 'np.pad', (['img', '((0, r_offset), (c_offset, 0))', '"""constant"""'], {'constant_values': '(0)'}), "(img, ((0, r_offset), (c_offset, 0)), 'constant', constant_values=0)\n", (147, 215), True, 'import numpy as np\n'), ((383, 402), 'numpy.sum', 'np.sum', (['(img1 * img2)'], {}), '(img1 * img2)\n', (389, 402), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import glob as gb
import gzip as gz
import numpy as np
import scipy as sp
import scipy.ndimage as snd
import scipy.interpolate as si
from sys import stdout
from tqdm import tqdm
def replacement_gradient(x):
diff = np.diff(x)
out = np.zeros(x.shape)
out[:-1] += diff
out[1:] += diff
out[1:-1] /= 2.
return out
def iso_interp(filenames, metallicity, metal_weight, output_obj,
bands_dict, bands_ordered, verbose=False):
""" iso_interp(filenames, metallicity, metal_weight,
output_obj, bands_dict, bands_ordered,
verbose=False)
Interpolate isochrones defined in (initial Mass, log(age))
space onto a grid of points in (log(T_eff), log(g)).
Parameters
----------
filenames : list(string)
the files that contain the isochrones
metallicity : float
the metallicity of the isochrone
metal_weight : float
a probability weight assigned to the metallicity
output_obj :
a file output object to save the interpolated
isochrones to.
bands_dict : dict
A dictionary containing the photometric bands
to be used.
bands_ordered : list
The bands in their desired order (can be arbitrary)
verbose : bool, optional
Controls how much output to stdout this function produces
Notes
-----
Set up for use with Padova isochrones. May or may not
work with other isochrone libraries.
"""
print("[M/H]={0:.3f} , weight = {1:.3f}".format(metallicity,
metal_weight))
if isinstance(filenames, basestring):
filenames = [filenames]
logT_min = 3.3
logT_max = 4.6
logT_step = 0.025
logg_min = -1.5
logg_max = 5.5
logg_step = 0.025
logT_edges = np.arange(logT_min-logT_step/2., logT_max+logT_step,
logT_step)
logg_edges = np.arange(logg_min-logg_step/2., logg_max+logg_step,
logg_step)
interp_points_grid = np.mgrid[logT_min:logT_max+logT_step/2:logT_step,
logg_min:logg_max+logg_step/2:logg_step]
interp_points = np.array([interp_points_grid[0].flatten(),
interp_points_grid[1].flatten()])
# Find columns required
iso_data = []
photom_data = {}
for filename in filenames:
with open(filename, 'r') as f:
for x in range(100):
header_line = f.readline().split()
if "M_ini" in header_line:
break
Mi_col = header_line.index("M_ini")-1
logage_col = header_line.index("log(age/yr)")-1
logTe_col = header_line.index("logTe")-1
logg_col = header_line.index("logG")-1
photom_cols = {}
for band in bands_dict.keys():
if band in header_line:
photom_cols[bands_dict[band]] = header_line.index(band) - 1
# Read in data - need Z, age, Mi, logT, logg, r, i, Ha
iso_data.append(np.loadtxt(filename, usecols=(Mi_col, logage_col,
logTe_col, logg_col)))
print(iso_data[-1].size, iso_data[0].size, filename)
assert iso_data[-1].size == iso_data[0].size
for band in photom_cols.keys():
photom_data[band] = np.loadtxt(filename,
usecols=[photom_cols[band]])
iso_data = iso_data[-1]
# Set metallicity
# work out point weight
weights = np.zeros([iso_data.shape[0], 1])
for i in range(iso_data.shape[0]):
if i == 0:
weights[i] = ((iso_data[i+1, 0]-iso_data[i, 0])
* np.power(10, iso_data[i, 1]))
elif i == iso_data.shape[0]-1:
weights[i] = ((iso_data[i, 0]-iso_data[i-1, 0])
* np.power(10, iso_data[i, 1]))
elif iso_data[i, 0] > iso_data[i+1, 0]:
weights[i] = ((iso_data[i, 0]-iso_data[i-1, 0])
* np.power(10, iso_data[i, 1]))
elif iso_data[i-1, 0] > iso_data[i, 0]:
weights[i] = ((iso_data[i+1, 0]-iso_data[i, 0])
* np.power(10, iso_data[i, 1]))
else:
weights[i] = ((iso_data[i+1, 0]-iso_data[i-1, 0])
* np.power(10, iso_data[i, 1]))
# Index data
binned_data = [[[] for i in range(14)] for i in range(14)]
for i in range(iso_data.shape[0]):
try:
(binned_data[int(np.floor((iso_data[i, 2]-3.3)/0.1))]
[int(np.floor((iso_data[i, 3]+1.5)/0.5))]
.append(iso_data[:]))
except IndexError:
if verbose:
print("Error:", iso_data[i, 2], iso_data[i, 3])
# Re grid in hi-res
metals = np.zeros(interp_points[0].shape)+metallicity
logage = np.zeros(interp_points.T.shape[0])
Mi = np.zeros(interp_points.T.shape[0])
interp_photom = {}
for band in photom_data.keys():
interp_photom[band] = np.zeros(interp_points.T.shape[0])
for it, point in enumerate(tqdm(interp_points.T)):
selection_array = (np.power(iso_data[:, 2]-point[0], 2)*4.
+ np.power(iso_data[:, 3]-point[1], 2)) < 0.025
shortlist_iso = iso_data[selection_array]
shortlist_weights = weights[selection_array]
shortlist_photom = {}
for band in photom_data.keys():
shortlist_photom[band] = photom_data[band][selection_array]
if shortlist_iso.size == 0:
logage[it] = np.nan
Mi[it] = np.nan
for band in bands_dict.values():
interp_photom[band][it] = np.nan
else:
short_weights1 = (shortlist_weights[:, -1]/(1E-9
+ np.power(shortlist_iso[:, 2]-point[0], 2.)*36.
+ np.power(shortlist_iso[:, 3]-point[1], 2.)))
short_weights_sum1 = np.sum(short_weights1)
short_weights2 = (1./(1E-9
+ np.power(shortlist_iso[:, 2]-point[0], 2.)*36.
+ np.power(shortlist_iso[:, 3]-point[1], 2.)))
short_weights_sum2 = np.sum(short_weights2)
logage[it] = (np.sum(shortlist_iso[:, 1]*short_weights1)
/ short_weights_sum1)
Mi[it] = (np.sum(shortlist_iso[:, 0]*short_weights1)
/ short_weights_sum1)
for band in bands_dict.values():
interp_photom[band][it] = (np.sum(shortlist_photom[band]
* short_weights2)
/ short_weights_sum2)
inner_counts, xe, ye = np.histogram2d(iso_data[:, 2], iso_data[:, 3],
bins=[logT_edges, logg_edges])
outer_counts = (snd.filters.uniform_filter(inner_counts,
size=(2, 5))*25+0.1).astype(int)
inner_Jac, xe, ye = np.histogram2d(iso_data[:, 2], iso_data[:, 3],
bins=[logT_edges, logg_edges],
weights=weights[:].flatten())
outer_Jac = snd.filters.uniform_filter(inner_Jac, size=(2, 5))*25
outer_Jac[outer_Jac < 0.01] = 0.
# Save to file
output_array = [metals, Mi, logage, interp_points[0],
interp_points[1], outer_Jac.flatten()]
fmt_list = ['%.3f', '%.3f', '%.3f', '%.3f', '%.3f', '%.3e']
for band in bands_ordered:
output_array.append(interp_photom[band])
fmt_list.append('%.3f')
output_array.extend([inner_counts.flatten(),
outer_counts.flatten()])
fmt_list.extend(['%i', '%i'])
np.savetxt(output_obj, np.array(output_array).T, fmt=fmt_list)
def padova_interpolated_isomake(directories, bands_dict, output_filename,
bands_ordered=None):
""" padova_interpolated_isomake(directories, bands_dict,
output_filename,
bands_ordered=None)
Interpolate several isochrones defined in
(initial Mass, log(age)) space onto a grid of points
in (log(T_eff), log(g)).
This has been designed for use with Padova isochrones.
Parameters
----------
directories : string, list(string)
the directories that contain the isochrone
files
bands_dict : dict
A dictionary containing the photometric bands
to be used.
output_filename :
a filename object to save the interpolated
isochrones to.
bands_ordered : list, optional
The bands in their desired order. If not given
is made from the values of bands_dict.
"""
if isinstance(directories, basestring):
directories = [directories]
if bands_ordered is None:
bands_ordered = bands_dict.values()
output_obj = open(output_filename, "w")
header_string = "#\t[M/H]\tMi\tlogAge\tlogTe\tlogg\tJacobian"
for band in bands_ordered:
header_string += "\t{}".format(band)
header_string += "\tinner_count\touter_count\n"
output_obj.write(header_string)
iso_metal_dict = {}
bands_metal_dicts = {}
for band in bands_dict.keys():
bands_metal_dicts[band] = {}
# instead do this on band-by-band basis? *******************
for direc in directories:
iso_files_gz = gb.glob("{}/*.dat.gz".format(direc.rstrip("/")))
iso_files = gb.glob("{}/*.dat".format(direc.rstrip("/")))
# check for metallicity of each file
# and check which bands it has
for iso_file1 in iso_files_gz:
metal = None
iso_data = gz.open("{0}".format(iso_file1))
for line in iso_data:
split_line = line.split()
if "[M/H]" in split_line:
metal = float(split_line[split_line.index("[M/H]")+2])
if "M_ini" in split_line:
for band in bands_metal_dicts.keys():
if band in split_line:
bands_metal_dicts[band][metal] = iso_file1
for iso_file1 in iso_files:
metal = None
iso_data = open("{0}".format(iso_file1), "r")
for line in iso_data:
split_line = line.split()
if "[M/H]" in split_line:
metal = float(split_line[split_line.index("[M/H]")+2])
if "M_ini" in split_line:
for band in bands_metal_dicts.keys():
if band in split_line:
bands_metal_dicts[band][metal] = iso_file1
for metal in bands_metal_dicts[bands_metal_dicts.keys()[0]]:
filenames = []
for band in bands_metal_dicts:
if metal in bands_metal_dicts[band]:
if bands_metal_dicts[band][metal] not in filenames:
filenames.append(bands_metal_dicts[band][metal])
else:
break
else:
iso_metal_dict[metal] = filenames
print(iso_metal_dict)
keys = iso_metal_dict.keys()
keys.sort()
if len(keys) > 2:
# iso_metal_weights=dict(zip(keys, np.gradient(np.array(keys)) ) )
# in numpy 1.9.0 gradient has changed to use second order behaviour
# at boundaries which gives wrong results in this context
iso_metal_weights = dict(zip(keys,
replacement_gradient(np.array(keys))))
else:
iso_metal_weights = dict(zip(keys, np.ones(len(keys))))
print("metals and weights: ", iso_metal_weights)
# interp in metallicity order
for key in keys:
iso_interp(iso_metal_dict[key], key, iso_metal_weights[key],
output_obj, bands_dict, bands_ordered)
output_obj.close()
| [
"tqdm.tqdm",
"numpy.sum",
"numpy.histogram2d",
"numpy.power",
"numpy.floor",
"numpy.zeros",
"scipy.ndimage.filters.uniform_filter",
"numpy.diff",
"numpy.arange",
"numpy.loadtxt",
"numpy.array"
] | [((269, 279), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (276, 279), True, 'import numpy as np\n'), ((290, 307), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (298, 307), True, 'import numpy as np\n'), ((1975, 2045), 'numpy.arange', 'np.arange', (['(logT_min - logT_step / 2.0)', '(logT_max + logT_step)', 'logT_step'], {}), '(logT_min - logT_step / 2.0, logT_max + logT_step, logT_step)\n', (1984, 2045), True, 'import numpy as np\n'), ((2083, 2153), 'numpy.arange', 'np.arange', (['(logg_min - logg_step / 2.0)', '(logg_max + logg_step)', 'logg_step'], {}), '(logg_min - logg_step / 2.0, logg_max + logg_step, logg_step)\n', (2092, 2153), True, 'import numpy as np\n'), ((3715, 3747), 'numpy.zeros', 'np.zeros', (['[iso_data.shape[0], 1]'], {}), '([iso_data.shape[0], 1])\n', (3723, 3747), True, 'import numpy as np\n'), ((5052, 5086), 'numpy.zeros', 'np.zeros', (['interp_points.T.shape[0]'], {}), '(interp_points.T.shape[0])\n', (5060, 5086), True, 'import numpy as np\n'), ((5096, 5130), 'numpy.zeros', 'np.zeros', (['interp_points.T.shape[0]'], {}), '(interp_points.T.shape[0])\n', (5104, 5130), True, 'import numpy as np\n'), ((6940, 7017), 'numpy.histogram2d', 'np.histogram2d', (['iso_data[:, 2]', 'iso_data[:, 3]'], {'bins': '[logT_edges, logg_edges]'}), '(iso_data[:, 2], iso_data[:, 3], bins=[logT_edges, logg_edges])\n', (6954, 7017), True, 'import numpy as np\n'), ((4993, 5025), 'numpy.zeros', 'np.zeros', (['interp_points[0].shape'], {}), '(interp_points[0].shape)\n', (5001, 5025), True, 'import numpy as np\n'), ((5221, 5255), 'numpy.zeros', 'np.zeros', (['interp_points.T.shape[0]'], {}), '(interp_points.T.shape[0])\n', (5229, 5255), True, 'import numpy as np\n'), ((5288, 5309), 'tqdm.tqdm', 'tqdm', (['interp_points.T'], {}), '(interp_points.T)\n', (5292, 5309), False, 'from tqdm import tqdm\n'), ((7430, 7480), 'scipy.ndimage.filters.uniform_filter', 'snd.filters.uniform_filter', (['inner_Jac'], {'size': '(2, 5)'}), '(inner_Jac, size=(2, 5))\n', (7456, 7480), True, 'import scipy.ndimage as snd\n'), ((3212, 3283), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(Mi_col, logage_col, logTe_col, logg_col)'}), '(filename, usecols=(Mi_col, logage_col, logTe_col, logg_col))\n', (3222, 3283), True, 'import numpy as np\n'), ((3527, 3576), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '[photom_cols[band]]'}), '(filename, usecols=[photom_cols[band]])\n', (3537, 3576), True, 'import numpy as np\n'), ((6158, 6180), 'numpy.sum', 'np.sum', (['short_weights1'], {}), '(short_weights1)\n', (6164, 6180), True, 'import numpy as np\n'), ((6410, 6432), 'numpy.sum', 'np.sum', (['short_weights2'], {}), '(short_weights2)\n', (6416, 6432), True, 'import numpy as np\n'), ((7998, 8020), 'numpy.array', 'np.array', (['output_array'], {}), '(output_array)\n', (8006, 8020), True, 'import numpy as np\n'), ((3894, 3922), 'numpy.power', 'np.power', (['(10)', 'iso_data[i, 1]'], {}), '(10, iso_data[i, 1])\n', (3902, 3922), True, 'import numpy as np\n'), ((5409, 5447), 'numpy.power', 'np.power', (['(iso_data[:, 3] - point[1])', '(2)'], {}), '(iso_data[:, 3] - point[1], 2)\n', (5417, 5447), True, 'import numpy as np\n'), ((6460, 6504), 'numpy.sum', 'np.sum', (['(shortlist_iso[:, 1] * short_weights1)'], {}), '(shortlist_iso[:, 1] * short_weights1)\n', (6466, 6504), True, 'import numpy as np\n'), ((6573, 6617), 'numpy.sum', 'np.sum', (['(shortlist_iso[:, 0] * short_weights1)'], {}), '(shortlist_iso[:, 0] * short_weights1)\n', (6579, 6617), True, 'import numpy as np\n'), ((4051, 4079), 'numpy.power', 'np.power', (['(10)', 'iso_data[i, 1]'], {}), '(10, iso_data[i, 1])\n', (4059, 4079), True, 'import numpy as np\n'), ((5340, 5378), 'numpy.power', 'np.power', (['(iso_data[:, 2] - point[0])', '(2)'], {}), '(iso_data[:, 2] - point[0], 2)\n', (5348, 5378), True, 'import numpy as np\n'), ((6080, 6125), 'numpy.power', 'np.power', (['(shortlist_iso[:, 3] - point[1])', '(2.0)'], {}), '(shortlist_iso[:, 3] - point[1], 2.0)\n', (6088, 6125), True, 'import numpy as np\n'), ((6332, 6377), 'numpy.power', 'np.power', (['(shortlist_iso[:, 3] - point[1])', '(2.0)'], {}), '(shortlist_iso[:, 3] - point[1], 2.0)\n', (6340, 6377), True, 'import numpy as np\n'), ((6749, 6796), 'numpy.sum', 'np.sum', (['(shortlist_photom[band] * short_weights2)'], {}), '(shortlist_photom[band] * short_weights2)\n', (6755, 6796), True, 'import numpy as np\n'), ((7081, 7134), 'scipy.ndimage.filters.uniform_filter', 'snd.filters.uniform_filter', (['inner_counts'], {'size': '(2, 5)'}), '(inner_counts, size=(2, 5))\n', (7107, 7134), True, 'import scipy.ndimage as snd\n'), ((11844, 11858), 'numpy.array', 'np.array', (['keys'], {}), '(keys)\n', (11852, 11858), True, 'import numpy as np\n'), ((4217, 4245), 'numpy.power', 'np.power', (['(10)', 'iso_data[i, 1]'], {}), '(10, iso_data[i, 1])\n', (4225, 4245), True, 'import numpy as np\n'), ((4383, 4411), 'numpy.power', 'np.power', (['(10)', 'iso_data[i, 1]'], {}), '(10, iso_data[i, 1])\n', (4391, 4411), True, 'import numpy as np\n'), ((4517, 4545), 'numpy.power', 'np.power', (['(10)', 'iso_data[i, 1]'], {}), '(10, iso_data[i, 1])\n', (4525, 4545), True, 'import numpy as np\n'), ((4767, 4805), 'numpy.floor', 'np.floor', (['((iso_data[i, 3] + 1.5) / 0.5)'], {}), '((iso_data[i, 3] + 1.5) / 0.5)\n', (4775, 4805), True, 'import numpy as np\n'), ((6001, 6046), 'numpy.power', 'np.power', (['(shortlist_iso[:, 2] - point[0])', '(2.0)'], {}), '(shortlist_iso[:, 2] - point[0], 2.0)\n', (6009, 6046), True, 'import numpy as np\n'), ((6253, 6298), 'numpy.power', 'np.power', (['(shortlist_iso[:, 2] - point[0])', '(2.0)'], {}), '(shortlist_iso[:, 2] - point[0], 2.0)\n', (6261, 6298), True, 'import numpy as np\n'), ((4712, 4750), 'numpy.floor', 'np.floor', (['((iso_data[i, 2] - 3.3) / 0.1)'], {}), '((iso_data[i, 2] - 3.3) / 0.1)\n', (4720, 4750), True, 'import numpy as np\n')] |
"""
deepTorch.py: It implements different deep learning classifiers
Copyright 2016 Observational Health Data Sciences and Informatics
This file is part of PatientLevelPrediction
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
from collections import OrderedDict
import timeit
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import numpy as np
import warnings
warnings.filterwarnings("ignore")
class FocalLoss(nn.Module):
"""
Method to handle data imbalance based on paper (arXiv:1708.02002) entitled
Focal loss for dense object detection.
Loss(x, class) = - (1-softmax(x)[class])^gamma \log(softmax(x)[class])
"""
def __init__(self, gamma=5, eps=1e-7, size_average=False):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.eps = eps
self.size_average = size_average
def forward(self, input, target):
y = self.one_hot(target, input.size(-1))
logit = F.softmax(input)
logit = logit.clamp(self.eps, 1. - self.eps)
loss = -1 * y * torch.log(logit) # cross entropy
loss = loss * (1 - logit) ** self.gamma # focal loss
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
def one_hot(self, index, classes):
"""
:param index: is the labels
:param classes: number if classes
:return:
"""
size = index.size() + (classes,)
view = index.size() + (1,)
mask = torch.Tensor(*size).fill_(0)
index = index.view(*view)
ones = 1.
if isinstance(index, Variable):
ones = Variable(torch.Tensor(index.size()).fill_(1))
mask = Variable(mask, volatile=index.volatile)
if torch.cuda.is_available():
ones = ones.cuda()
mask = mask.cuda()
return mask.scatter_(1, index, ones)
def loss_function(recon_x, x, mu, logvar):
"""Loss function for varational autoencoder VAE"""
BCE = F.binary_cross_entropy(recon_x, x, size_average=False)
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def mixup_data(x, y, alpha=1.0):
'''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda
Data Augmentation method based on paper (arXiv:1710.09412) entitled
mixup: Beyond empirical risk minimization.
'''
if alpha > 0.:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.
batch_size = x.size()[0]
if torch.cuda.is_available():
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(y_a, y_b, lam):
return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def early_stop(metrics_hist, patience = 3):
if not np.all(np.isnan(metrics_hist)):
return np.nanargmin(metrics_hist) > len(metrics_hist) - patience
else:
#keep training if criterion results have all been nan so far
return False
class Estimator(object):
"""
It is used for training different deep models in the same interface.
"""
def __init__(self, model):
self.model = model
def compile(self, optimizer, loss):
self.optimizer = optimizer
self.loss_f = loss
def _fit(self, train_loader, l1regularization=False, autoencoder=False, mixup=False, vae = False):
"""
train one epoch
:param train_loader: The data loaded using DataLoader
:param l1regularization: default False
:return: the return fitted loss and accuracy
"""
loss_list = []
acc_list = []
for idx, (X, y) in enumerate(train_loader):
X_v = Variable(X)
y_v = Variable(y)
if torch.cuda.is_available():
X_v = X_v.cuda()
y_v = y_v.cuda()
if mixup:
X_v, y_v_a, y_v_b, lam = mixup_data(X_v, y_v)
X_v, y_v_a, y_v_b = Variable(X_v), Variable(y_v_a), Variable(y_v_b)
# print 'GPU id', torch.cuda.current_device()
self.optimizer.zero_grad()
# the below comemnted lines are used for multiple GPU training
# if torch.cuda.device_count() > 1:
# net = torch.nn.DataParallel(self.model, device_ids = range(torch.cuda.device_count()))
# if cuda:
# net = net.cuda()
# y_pred = net(X_v)
if autoencoder:
if vae:
y_pred, mu, logvar = self.model(X_v)
loss = loss_function(y_pred, X_v, mu, logvar)
else:
y_pred = self.model(X_v)
loss = self.loss_f(y_pred, X_v)
else:
y_pred = self.model(X_v)
loss = self.loss_f(y_pred, y_v)
if mixup:
loss_func = mixup_criterion(y_v_a, y_v_b, lam)
loss = loss_func(self.loss_f, y_pred)
if l1regularization:
l1_crit = nn.L1Loss(size_average=False)
reg_loss = 0
for param in self.model.parameters():
target = Variable(torch.from_numpy(np.zeros(param.size()).astype(np.float32)))
if torch.cuda.is_available():
target = target.cuda()
reg_loss += l1_crit(param, target)
factor = 0.0005
loss += factor * reg_loss
loss.backward()
self.optimizer.step()
loss_list.append(loss.item())
if autoencoder:
acc_list.append(0)
else:
classes = torch.topk(y_pred, 1)[1].data.cpu().numpy().flatten()
acc = self._accuracy(classes, y_v.data.cpu().numpy().flatten())
acc_list.append(acc)
del loss
del y_pred
return sum(loss_list) / len(loss_list), sum(acc_list) / len(acc_list)
def fit(self, X, y, batch_size=32, nb_epoch=10, validation_data=(), l1regularization=False, autoencoder =False, vae = False):
train_set = TensorDataset(torch.from_numpy(X.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)).long().view(-1))
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
self.model.train()
for t in range(nb_epoch):
loss, acc = self._fit(train_loader, l1regularization=l1regularization, autoencoder = autoencoder, vae = vae)
#print loss
val_log = ''
if validation_data and not autoencoder:
val_loss, auc = self.evaluate(validation_data[0], validation_data[1], batch_size)
val_log = "- val_loss: %06.4f - auc: %6.4f" % (val_loss, auc)
print(val_log)
# print("Epoch %s/%s loss: %06.4f - acc: %06.4f %s" % (t, nb_epoch, loss, acc, val_log))
def evaluate(self, X, y, batch_size=32):
y_pred = self.predict(X)
y_v = Variable(torch.from_numpy(y).long(), requires_grad=False)
if torch.cuda.is_available():
y_v = y_v.cuda()
loss = self.loss_f(y_pred, y_v)
predict = y_pred.data.cpu().numpy()[:, 1].flatten()
auc = roc_auc_score(y, predict)
return loss.item(), auc
def _accuracy(self, y_pred, y):
return float(sum(y_pred == y)) / y.shape[0]
def predict(self, X):
X = Variable(torch.from_numpy(X.astype(np.float32)))
if torch.cuda.is_available():
X = X.cuda()
y_pred = self.model(X)
return y_pred
def predict_proba(self, X):
self.model.eval()
return self.model.predict_proba(X)
class EarlyStopping(object): # pylint: disable=R0902
"""
Gives a criterion to stop training when a given metric is not
improving anymore
Args:
mode (str): One of `min`, `max`. In `min` mode, training will
be stopped when the quantity monitored has stopped
decreasing; in `max` mode it will be stopped when the
quantity monitored has stopped increasing. Default: 'min'.
patience (int): Number of epochs with no improvement after
which training is stopped. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only stop learning after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
"""
def __init__(self, mode='min', patience=3, threshold=1e-4, threshold_mode='rel'):
self.patience = patience
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.is_better = None
self.last_epoch = -1
self._init_is_better(mode=mode, threshold=threshold,
threshold_mode=threshold_mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.num_bad_epochs = 0
def step(self, metrics, epoch=None):
""" Updates early stopping state """
current = metrics
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
@property
def stop(self):
""" Should we stop learning? """
return self.num_bad_epochs > self.patience
def _cmp(self, mode, threshold_mode, threshold, a, best): # pylint: disable=R0913, R0201
if mode == 'min' and threshold_mode == 'rel':
rel_epsilon = 1. - threshold
return a < best * rel_epsilon
elif mode == 'min' and threshold_mode == 'abs':
return a < best - threshold
elif mode == 'max' and threshold_mode == 'rel':
rel_epsilon = threshold + 1.
return a > best * rel_epsilon
return a > best + threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = float('inf')
else: # mode == 'max':
self.mode_worse = (-float('inf'))
self.is_better = partial(self._cmp, mode, threshold_mode, threshold)
def state_dict(self):
""" Returns early stopping state """
return {key: value for key, value in self.__dict__.items() if key != 'is_better'}
def load_state_dict(self, state_dict):
""" Loads early stopping state """
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode, threshold=self.threshold,
threshold_mode=self.threshold_mode)
def adjust_learning_rate(learning_rate, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def batch(tensor, batch_size = 50):
""" It is used to create batch samples, each batch has batch_size samples"""
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i+1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i+1) * batch_size])
i += 1
class selu(nn.Module):
def __init__(self):
super(selu, self).__init__()
self.alpha = 1.6732632423543772848170429916717
self.scale = 1.0507009873554804934193349852946
def forward(self, x):
temp1 = self.scale * F.relu(x)
temp2 = self.scale * self.alpha * (F.elu(-1 * F.relu(-1 * x)))
return temp1 + temp2
class alpha_drop(nn.Module):
def __init__(self, p=0.05, alpha=-1.7580993408473766, fixedPointMean=0, fixedPointVar=1):
super(alpha_drop, self).__init__()
keep_prob = 1 - p
self.a = np.sqrt(
fixedPointVar / (keep_prob * ((1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))
self.b = fixedPointMean - self.a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
self.alpha = alpha
self.keep_prob = 1 - p
self.drop_prob = p
def forward(self, x):
if self.keep_prob == 1 or not self.training:
# print("testing mode, direct return")
return x
else:
random_tensor = self.keep_prob + torch.rand(x.size())
binary_tensor = Variable(torch.floor(random_tensor))
if torch.cuda.is_available():
binary_tensor = binary_tensor.cuda()
x = x.mul(binary_tensor)
ret = x + self.alpha * (1 - binary_tensor)
ret.mul_(self.a).add_(self.b)
return ret
def convert_to_3d_matrix(covariate_ids, patient_dict, y_dict = None, timeid_len = 31, cov_mean_dict = None):
"""
create matrix for temporal models.
:param covariate_ids: the covariate ids in the whole data
:param patient_dict: the dictionary contains the data for each patient
:param y_dict: if the output labels is known, it contains the labels for patients
:param timeid_len: the total number time window gaps when extracting temporal data
:return: return the raw data in 3-D format, patients x covariates x number of windows, and the patients ids
"""
D = len(covariate_ids)
N = len(patient_dict)
T = timeid_len
concept_list =list(covariate_ids)
concept_list.sort()
x_raw = np.zeros((N, D, T), dtype=float)
patient_ind = 0
p_ids = []
patient_keys = patient_dict.keys()
#print covariate_ids
for kk in patient_keys:
#print('-------------------')
vals = patient_dict[kk]
#sorted(vals)
p_ids.append(int(kk))
for timeid, meas in vals.iteritems():
int_time = int(timeid) - 1
for val in meas:
if not len(val):
continue
cov_id, cov_val = val
if cov_id not in covariate_ids:
continue
lab_ind = concept_list.index(cov_id)
if cov_mean_dict is None:
x_raw[patient_ind][lab_ind][int_time] = float(cov_val)
else:
mean_std = cov_mean_dict[cov_id]
if mean_std[1]:
x_raw[patient_ind][lab_ind][int_time] = (float(cov_val) - mean_std[0])/mean_std[1]
else:
x_raw[patient_ind][lab_ind][int_time] = float(cov_val)
patient_ind = patient_ind + 1
#impute the data using the value of previous timestamp
#fw = open('patient_var.txt', 'w')
for i in xrange(N):
for j in xrange(D):
temp = x_raw[i][j]
nonzero_inds = np.nonzero(temp)[0]
count_nonzeros = len(nonzero_inds)
#fw.write(str(i) + '\t' + str(count_nonzeros) + '\n')
if count_nonzeros == 1:
ind = nonzero_inds[0]
for k in xrange(ind + 1, T):
x_raw[i][j][k] = x_raw[i][j][ind]
elif count_nonzeros > 1:
for ind in xrange(1, count_nonzeros):
for k in xrange(nonzero_inds[ind -1] + 1, nonzero_inds[ind]):
x_raw[i][j][k] = x_raw[i][j][nonzero_inds[ind - 1]]
# For last nonzeros.
for k in xrange(nonzero_inds[-1] + 1, T):
x_raw[i][j][k] = x_raw[i][j][nonzero_inds[-1]]
#fw.close()
return x_raw, patient_keys
def forward_impute_missing_value(x_raw):
N = x_raw.shape[0]
D = x_raw.shape[1]
T = x_raw.shape[2]
for i in xrange(N):
for j in xrange(D):
temp = x_raw[i][j]
nonzero_inds = np.nonzero(temp)[0]
count_nonzeros = len(nonzero_inds)
#fw.write(str(i) + '\t' + str(count_nonzeros) + '\n')
if count_nonzeros == 1:
ind = nonzero_inds[0]
for k in xrange(ind + 1, T):
x_raw[i][j][k] = x_raw[i][j][ind]
elif count_nonzeros > 1:
for ind in xrange(1, count_nonzeros):
for k in xrange(nonzero_inds[ind -1] + 1, nonzero_inds[ind]):
x_raw[i][j][k] = x_raw[i][j][nonzero_inds[ind - 1]]
# For last nonzeros.
for k in xrange(nonzero_inds[-1] + 1, T):
x_raw[i][j][k] = x_raw[i][j][nonzero_inds[-1]]
def convert_to_temporal_format(covariates, timeid_len= 31, normalize = True, predict = False):
"""
It reads the data from covariates extracted by FeatureExtraction package and convert it to temporal data matrix
:param covariates: covariates extracted by FeatureExtraction package
:param timeid_len: the total number of window gaps when extracting temporal data
:return: return the raw data in 3-D format, patients x covariates x number of windows, and the patients ids
"""
patient_dict = OrderedDict()
print('Loading temporal data')
cov_vals_dict = {}
for row in covariates:
p_id, cov_id, time_id, cov_val = row[0], row[1], row[2], row[3]
cov_id = np.int64(cov_id)
#time_id = int(time_id)
cov_vals_dict.setdefault(cov_id, []).append(float(cov_val))
if p_id not in patient_dict:
patient_dict[p_id] = {time_id: [(cov_id, cov_val)]}
else:
if time_id not in patient_dict[p_id]:
patient_dict[p_id][time_id] = [(cov_id, cov_val)]
else:
patient_dict[p_id][time_id].append((cov_id, cov_val))
#covariate_ids.add(cov_id)
#T = 365/time_window
covariate_ids = set()
cov_mean_dict = {}
if not predict:
fw = open('covariate_mean_std.csv', 'w')
for key, val in cov_vals_dict.iteritems():
mean_val = np.mean(val)
std_val = np.std(val)
# Remove those covariates with few occurrence (<5)
if len(val) >= 5:
covariate_ids.add(key)
cov_mean_dict[key] = (mean_val, std_val)
fw.write(str(key) + ',' + str(mean_val) + ',' + str(std_val) + '\n')
fw.close()
else:
fp = open('covariate_mean_std.csv', 'r')
for line in fp:
values = line.rstrip().split(',')
key = np.int64(values[0])
covariate_ids.add(key)
cov_mean_dict[key] = (float(values[1]), float(values[2]))
fp.close()
if normalize:
x, patient_keys = convert_to_3d_matrix(covariate_ids, patient_dict, timeid_len = timeid_len, cov_mean_dict = cov_mean_dict)
else:
x, patient_keys = convert_to_3d_matrix(covariate_ids, patient_dict, timeid_len=timeid_len)
return x, patient_keys
def read_covariates(covariate_file):
patient_dict = {}
head = True
with open(covariate_file, 'r') as fp:
for line in fp:
if head:
head = False
continue
values = line.rstrip().split(',')
patient_id = values[1]
cov_id = values[2]
#time_id = int(values[-1])
# covariates in one patient has time order
patient_dict.setdefault(patient_id, []).append((cov_id))
new_patient = []
for key in patient_dict.keys():
#patient_dict[key].sort()
sort_vals = []
for val in patient_dict[key]:
if val[1] not in sort_vals:
sort_vals.append(val)
new_patient.append(sort_vals)
return new_patient
def word_embeddings(covariate_file, embedding_size=50):
import gensim.models.word2vec as w2v
modelname = "processed_%s.w2v" % ('heartfailure')
sentences = read_covariates(covariate_file)
model = w2v.Word2Vec(size=embedding_size, min_count=3, workers=4, iter=10, sg=1)
print("building word2vec vocab on %s..." % (covariate_file))
model.build_vocab(sentences)
print("training...")
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
out_file = modelname
print("writing embeddings to %s" % (out_file))
model.save(out_file)
return out_file
def read_data(filename):
covariate_ids = set()
patient_dict = {}
head = True
with open(filename) as fp:
for lines in fp:
if head:
head = False
continue
lines = lines.strip('\n').strip('\r').split(',')
try:
p_id, cov_id, time_id, cov_val = lines[1], lines[2], lines[3], lines[4]
except:
pdb.set_trace()
print(p_id, cov_id, time_id)
if p_id not in patient_dict:
patient_dict[p_id] = {}
else:
if time_id not in patient_dict[p_id]:
patient_dict[p_id][time_id] = []
else:
patient_dict[p_id][time_id].append((cov_id, cov_val))
covariate_ids.add(cov_id)
patient_dict = {k: v for k, v in patient_dict.iteritems() if v} #remove empty patients
#(15, 2000, 60) 20000 patients, 15 lab tests,
return covariate_ids, patient_dict
def split_training_validation(classes, validation_size=0.2, shuffle=False):
"""split sampels based on balnace classes"""
num_samples = len(classes)
classes = np.array(classes)
classes_unique = np.unique(classes)
num_classes = len(classes_unique)
indices = np.arange(num_samples)
# indices_folds=np.zeros([num_samples],dtype=int)
training_indice = []
training_label = []
validation_indice = []
validation_label = []
for cl in classes_unique:
indices_cl = indices[classes == cl]
num_samples_cl = len(indices_cl)
# split this class into k parts
if shuffle:
random.shuffle(indices_cl) # in-place shuffle
# module and residual
num_samples_each_split = int(num_samples_cl * validation_size)
res = num_samples_cl - num_samples_each_split
training_indice = training_indice + [val for val in indices_cl[num_samples_each_split:]]
training_label = training_label + [cl] * res
validation_indice = validation_indice + [val for val in indices_cl[:num_samples_each_split]]
validation_label = validation_label + [cl] * num_samples_each_split
training_index = np.arange(len(training_label))
random.shuffle(training_index)
training_indice = np.array(training_indice)[training_index]
training_label = np.array(training_label)[training_index]
validation_index = np.arange(len(validation_label))
random.shuffle(validation_index)
validation_indice = np.array(validation_indice)[validation_index]
validation_label = np.array(validation_label)[validation_index]
return training_indice, training_label, validation_indice, validation_label
class LogisticRegression(nn.Module):
"""
Train a logistic regression model using pytorch
"""
def __init__(self, input_size, num_classes = 2):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class MLP(nn.Module):
"""
Train a multiple-layer perceptron with one hideen layer
"""
def __init__(self, input_dim, hidden_size, num_classes = 2):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.dropout(x, p =0.5, training=self.training)
x = self.fc2(x)
x = torch.sigmoid(x)
return x
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class SNN(nn.Module):
"""
Train a multiple-layer self normalizing neural network, ref arXiv:1706.02515
"""
def __init__(self, input_dim, hidden_size, num_classes=2):
super(SNN, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_size)
self.fc2 = selu()
self.ad1 = alpha_drop()
self.fc4 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
x = self.ad1(x)
x = self.fc4(x)
x = torch.sigmoid(x)
return x
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class AutoEncoder(nn.Module):
"""
A stacked autoencoder with 2 hiddden layers and need be adapted for EHR data.
"""
def __init__(self, input_size, encoding_size):
super(AutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_size, input_size/2),
nn.ReLU(True),
nn.Linear(input_size/2, input_size/4),
nn.ReLU(True),
nn.Linear(input_size/4, encoding_size),
nn.ReLU(True)
)
self.decoder = nn.Sequential(
nn.Linear(encoding_size, input_size/4),
nn.ReLU(True),
nn.Linear(input_size/4, input_size/2),
nn.ReLU(True),
nn.Linear(input_size/2, input_size)
)
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def get_encode_features(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
encoded = self.encoder(x)
encoded = encoded.data.cpu().numpy()
return encoded
class VAE(nn.Module):
"""
A stacked variational autoencoder with 2 hiddden layers and need be adapted for EHR data.
"""
def __init__(self, input_size, encoding_size):
super(VAE, self).__init__()
self.fc1 = nn.Linear(input_size, input_size/2)
self.fc21 = nn.Linear(input_size/2, encoding_size)
self.fc22 = nn.Linear(input_size/2, encoding_size)
self.fc3 = nn.Linear(encoding_size, input_size/2)
self.fc4 = nn.Linear(input_size/2, input_size)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def encode(self, x):
h1 = self.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = self.relu(self.fc3(z))
return self.sigmoid(self.fc4(h3))
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_encode_features(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
mu, logvar = self.encode(x)
encoded = self.reparameterize(mu, logvar)
encoded = encoded.data.cpu().numpy()
return encoded
class Decoder(nn.Module):
""" VAE decoder input_size = original inputsize/16*256"""
def __init__(self, latent_size, input_size, img_channels = 1, kernel_size=(1, 4), stride=(1, 2), padding=(0, 1)):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, input_size)
self.deconv1 = nn.ConvTranspose2d(input_size, 128, kernel_size, stride=stride, padding = padding)
self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size, stride=stride, padding = padding)
self.deconv3 = nn.ConvTranspose2d(64, 32, kernel_size, stride=stride, padding = padding)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, kernel_size, stride=stride, padding = padding)
def forward(self, x): # pylint: disable=arguments-differ
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = torch.sigmoid(self.deconv4(x))
return reconstruction
class Encoder(nn.Module): # pylint: disable=too-many-instance-attributes
""" VAE encoder """
def __init__(self, latent_size, input_size, img_channels = 1, kernel_size=(1, 4), stride=(1, 2), padding=(0, 1)):
super(Encoder, self).__init__()
self.latent_size = latent_size
#self.img_size = img_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, kernel_size, stride=stride, padding = padding)
self.conv2 = nn.Conv2d(32, 64, kernel_size, stride=stride, padding = padding)
self.conv3 = nn.Conv2d(64, 128, kernel_size, stride=stride, padding = padding)
self.conv4 = nn.Conv2d(128, 256, kernel_size, stride=stride, padding = padding)
out_size = input_size / 16
self.fc_mu = nn.Linear(out_size, latent_size)
self.fc_logsigma = nn.Linear(out_size, latent_size)
def forward(self, x): # pylint: disable=arguments-differ
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class VAE_CNN(nn.Module):
""" Variational Autoencoder """
def __init__(self, latent_size, input_size):
super(VAE, self).__init__()
self.encoder = Encoder(latent_size, input_size)
input_size = input_size/16
self.decoder = Decoder(latent_size, input_size)
def forward(self, x): # pylint: disable=arguments-differ
mu, logsigma = self.encoder(x)
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
recon_x = self.decoder(z)
return recon_x, mu, logsigma
def get_encode_features(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
mu, logvar = self.encoder(x)
encoded = mu .data.cpu().numpy()
return encoded
class CNN(nn.Module):
def __init__(self, nb_filter, num_classes = 2, kernel_size = (1, 5), pool_size = (1, 3), labcounts = 32, window_size = 12, hidden_size = 200, stride = (1, 1), padding = 0):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
out1_size = (window_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
maxpool_size = (out1_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
self.layer2 = nn.Sequential(
nn.Conv2d(nb_filter, nb_filter, kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
out2_size = (maxpool_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
maxpool_size = (out2_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
self.drop1 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(int(maxpool_size*labcounts*nb_filter), hidden_size)
self.bn = nn.BatchNorm1d(hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.drop1(out)
out = self.fc1(out)
out = self.drop2(out)
out = self.bn(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
#allow multiple kernel with differnt kernel size
class CNN_MLF(nn.Module):
"""
It is a deep CNNs with three different kernel size, the outputs from the three CNNs are concatenated to fed into two fully connected layers.
"""
def __init__(self, nb_filter, num_classes = 2, kernel_size = (1, 5), pool_size = (1, 3), labcounts = 32, window_size = 12, hidden_size = 200, stride = (1, 1), padding = 0):
super(CNN_MLF, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = (1, 3), stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
out1_size = (window_size + 2*padding - (3 - 1) - 1)/stride[1] + 1
maxpool1_size = (out1_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
self.layer2 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = (1, 4), stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
out2_size = (window_size + 2*padding - (4 - 1) - 1)/stride[1] + 1 #4 is the convolve filter size
maxpool2_size = (out2_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
self.layer3 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = (1, 5), stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
out3_size = (window_size + 2*padding - (5 - 1) - 1)/stride[1] + 1
maxpool3_size = (out3_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
conv_outsize = maxpool1_size + maxpool2_size +maxpool3_size
self.drop1 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(conv_outsize*labcounts*nb_filter, hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out1 = self.layer1(x)
out2 = self.layer2(x)
out3 = self.layer3(x)
out = torch.cat((out1.view(out1.size(0), -1), out2.view(out2.size(0), -1), out3.view(out2.size(0), -1)), 1)
out = self.drop1(out)
out = self.fc1(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class CNN_LSTM(nn.Module):
"""
It is a deep network with two layer CNN, followed by LSTM layer, which further fed into two fully connected layers.
"""
def __init__(self, nb_filter, num_classes = 2, kernel_size = (1, 5), pool_size = (1, 3), labcounts = 32, window_size = 12, hidden_size = 100, stride = (1, 1), padding = 0, num_layers = 2):
super(CNN_LSTM, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size, stride = stride))
self.num_layers = num_layers
self.hidden_size = hidden_size
out1_size = (window_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
maxpool_size = (out1_size + 2*padding - (pool_size[1] - 1) - 1)/stride[1] + 1
self.downsample = nn.Conv2d(nb_filter, 1, kernel_size, stride = stride, padding = padding)
input_size = (maxpool_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
self.layer2 = nn.LSTM(input_size, hidden_size, num_layers, batch_first = True)
self.drop1 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(hidden_size, hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out = self.layer1(x)
out = self.downsample(out)
out = torch.squeeze(out, 1)
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers, out.size(0), self.hidden_size)).cuda()
c0 = Variable(torch.zeros(self.num_layers, out.size(0), self.hidden_size)).cuda()
else:
h0 = Variable(torch.zeros(self.num_layers, out.size(0), self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers, out.size(0), self.hidden_size))
out, hn = self.layer2(out, (h0, c0))
out = hn[0][-1]
out = self.drop1(out)
out = self.fc1(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class CNN_MIX(nn.Module):
"""
It is a deep network with 2 layers CNN, which works on input and time dimension, respectively, more details refer to deepDianosis in github.
"""
def __init__(self, nb_filter, num_classes = 2, kernel_size = (1, 5), pool_size = (1, 3), labcounts = 32, window_size = 12, hidden_size = 100, stride = (1, 1), padding = 0):
super(CNN_MIX, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = (labcounts, 1), stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU())
self.layer2 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = (nb_filter, 1), stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size))
out1_size = int(np.ceil(float(window_size)/pool_size[1]))
self.layer3 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU())
out2_size = (out1_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
self.drop1 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(out2_size*nb_filter*nb_filter, hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out = self.layer1(x)
out = out.view(out.size(0), out.size(2), out.size(1), out.size(3))
out = self.layer2(out)
out = out.view(out.size(0), out.size(2), out.size(1), out.size(3))
out = self.layer3(out)
out = out.view(out.size(0), -1)
out = self.drop1(out)
out = self.fc1(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class CNN_MULTI(nn.Module):
"""
It is a deep network with multiple resolution, more details refer to multiresconvnet of deepDianosis in github.
"""
def __init__(self, nb_filter, num_classes = 2, kernel_size = (1, 5), pool_size = (1, 2), labcounts = 32, window_size = 12, hidden_size = 100, stride = (1, 1), padding = 0):
super(CNN_MULTI, self).__init__()
# resolution 1
self.pool1_1 = nn.MaxPool2d(pool_size, stride = pool_size)
maxpool_size = (window_size + 2*padding - (pool_size[1] - 1) - 1)/pool_size[1] + 1
self.pool1_2 = nn.MaxPool2d(pool_size, stride = pool_size)
maxpool1_2_size = (maxpool_size + 2*padding - (pool_size[1] - 1) - 1)/pool_size[1] + 1
self.layer1 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU())
cnn1_size = (maxpool1_2_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
#resolution 2
self.pool2_1 = nn.MaxPool2d(pool_size, stride = pool_size)
maxpool2_1_size = (window_size + 2*padding - (pool_size[1] - 1) - 1)/pool_size[1] + 1
self.layer2 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU())
cnn2_size = (maxpool2_1_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
self.layer3 = nn.Sequential(
nn.Conv2d(1, nb_filter, kernel_size = kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU(),
nn.MaxPool2d(pool_size))
cnn3_size = (window_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
maxpool3_size = (cnn3_size + 2*padding - (pool_size[1] - 1) - 1)/pool_size[1] + 1
self.layer4 = nn.Sequential(
nn.Conv2d(nb_filter, nb_filter, kernel_size = kernel_size, stride = stride, padding = padding),
nn.BatchNorm2d(nb_filter),
nn.ReLU())
cnn4_size = (maxpool3_size + 2*padding - (kernel_size[1] - 1) - 1)/stride[1] + 1
merge_size = cnn1_size + cnn2_size + cnn4_size
self.drop1 = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(labcounts*nb_filter*merge_size, hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out = self.pool1_1(x)
out = self.pool1_2(out)
out1 = self.layer1(out)
out = self.pool2_1(x)
out2 = self.layer2(out)
out = self.layer3(x)
out3 = self.layer4(out)
out = torch.cat((out1.view(out1.size(0), -1), out2.view(out2.size(0), -1), out3.view(out3.size(0), -1)), 1)
out = self.drop1(out)
out = self.fc1(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
# 1x3 Convolution
def convR(in_channels, out_channels, kernel_size, stride=1, padding = (0, 1)):
return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,
padding=padding, stride=stride, bias=False)
# Residual Block
class ResidualBlock(nn.Module):
def __init__(self, in_channel, nb_filter = 16, kernel_size = (1, 3), stride=1, downsample=None):
super(ResidualBlock, self).__init__()
self.conv1 = convR(in_channel, nb_filter, kernel_size = kernel_size, stride = stride)
self.bn1 = nn.BatchNorm2d(nb_filter)
self.relu = nn.ReLU(inplace=True)
self.conv2 = convR(nb_filter, nb_filter, kernel_size = kernel_size, stride = stride)
self.bn2 = nn.BatchNorm2d(nb_filter)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet Module
class ResNet(nn.Module):
def __init__(self, block, layers, nb_filter = 16, labcounts = 12, window_size = 36, kernel_size = (1, 3), pool_size = (1, 3), num_classes=2, hidden_size = 100):
super(ResNet, self).__init__()
self.in_channels = 1
self.conv = convR(self.in_channels, nb_filter, kernel_size = kernel_size)
self.bn = nn.BatchNorm2d(nb_filter)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self.make_layer(block, nb_filter, layers[0], kernel_size = kernel_size)
self.layer2 = self.make_layer(block, nb_filter*2, layers[1], 1, kernel_size = kernel_size, in_channels = nb_filter)
self.layer3 = self.make_layer(block, nb_filter*4, layers[2], 1, kernel_size = kernel_size, in_channels = 2*nb_filter)
self.avg_pool = nn.AvgPool2d(pool_size)
avgpool2_1_size = (window_size - (pool_size[1] - 1) - 1)/pool_size[1] + 1
last_layer_size = nb_filter*4*labcounts*avgpool2_1_size
self.fc = nn.Linear(last_layer_size, hidden_size)
self.drop2 = nn.Dropout(p=0.5)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def make_layer(self, block, out_channels, blocks, stride=1, kernel_size = (1, 3), in_channels = 16):
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
convR(in_channels, out_channels, kernel_size = kernel_size, stride=stride),
nn.BatchNorm2d(out_channels))
layers = []
layers.append(block(in_channels, out_channels, kernel_size = kernel_size, stride = stride, downsample = downsample))
self.in_channels = out_channels
for i in range(1, blocks):
layers.append(block(out_channels, out_channels, kernel_size = kernel_size))
return nn.Sequential(*layers)
def forward(self, x):
x = x.view(x.size(0), 1, x.size(1), x.size(2))
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
out = self.drop2(out)
out = self.relu1(out)
out = self.fc2(out)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class GRU(nn.Module):
"""
It is a deep network with one GRU layer, which are further fed into one fully connected layers.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first = True, dropout = dropout)
self.linear = nn.Linear(hidden_size, num_classes)
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda() # 2 for bidirection
else:
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)) # 2 for bidirection
self.gru.flatten_parameters()
out, hn = self.gru(x, h0)
rearranged = hn[-1]
out = self.linear(rearranged)
out = torch.sigmoid(out)
return out
def initHidden(self, N):
return Variable(torch.randn(1, N, self.hidden_size))
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class RNN(nn.Module):
"""
It is a deep network with one LSTM layer, which are further fed into one fully connected layer.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first = True, dropout = dropout)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).cuda()
else:
h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
self.lstm.flatten_parameters()
out, hn = self.lstm(x, (h0, c0))
rearranged = hn[0][-1]
# Decode hidden state of last time step
out = self.fc(rearranged)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
class BiRNN(nn.Module):
"""
It is a deep network with one bidirectional LSTM layer, which are further fed into one fully connected layer.
"""
def __init__(self, input_size, hidden_size, num_layers, num_classes = 2, dropout = 0.5):
super(BiRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first = True, dropout = dropout, bidirectional=True)
self.fc = nn.Linear(hidden_size*2, num_classes) # 2 for bidirection
def forward(self, x):
if torch.cuda.is_available():
x = x.cuda()
h0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)).cuda() # 2 for bidirection
c0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)).cuda()
else:
h0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size)) # 2 for bidirection
c0 = Variable(torch.zeros(self.num_layers*2, x.size(0), self.hidden_size))
self.lstm.flatten_parameters()
out, hn = self.lstm(x, (h0, c0))
hn = hn[0]
rearranged = hn[-2:].view(x.size(0), -1)
# Decode hidden state of last time step
out = self.fc(rearranged)
out = torch.sigmoid(out)
return out
def predict_proba(self, x):
if type(x) is np.ndarray:
x = torch.from_numpy(x.astype(np.float32))
with torch.no_grad():
x = Variable(x)
if torch.cuda.is_available():
x = x.cuda()
y = self.forward(x)
temp = y.data.cpu().numpy()
return temp
# select model
def train_deeptorch(population, plpData, train = True, model_type = 'LogisticRegression', class_weight =0, autoencoder = True, w_decay =0.9, epochs = 1, vae = False, size = 100, loss = 'LogSoftmax', nbfilters = 4, learning_rate = 0.0001, hidden_size = 100, modelOutput = 'C:/deeptorch', seed = 1, quiet = False):
if model_type in ['LogisticRegression', 'MLP', 'SNN']:
y = population[:, 1]
X = plpData[population[:, 0], :]
trainInds = population[:, population.shape[1] - 1] > 0
if class_weight == -1:
loss = FocalLoss(gamma = 5)
else:
if class_weight == 0:
weights = float(np.count_nonzero(y))/y.shape[0]
class_weight = [1 - weights, weights]
else:
class_weight = [class_weight, 1]
class_weight = 1/torch.Tensor(class_weight)
if torch.cuda.is_available():
class_weight = class_weight.cuda()
loss=nn.CrossEntropyLoss(weight = class_weight)
print("Dataset has %s rows and %s columns" % (X.shape[0], X.shape[1]))
print("population loaded- %s rows and %s columns" % (np.shape(population)[0], np.shape(population)[1]))
###########################################################################
l1regularization = False
if train:
pred_size = int(np.sum(population[:, population.shape[1] - 1] > 0))
print("Calculating prediction for train set of size %s" % (pred_size))
test_pred = np.zeros(pred_size) # zeros length sum(population[:,population.size[1]] ==i)
for i in range(1, int(np.max(population[:, population.shape[1] - 1]) + 1), 1):
testInd = population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] == i
trainInd = (population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] != i)
train_x = X[trainInds, :][trainInd, :]
train_y = y[trainInds][trainInd]
test_x = X[trainInds, :][testInd, :]
print("Fold %s split %s in train set and %s in test set" % (i, train_x.shape[0], test_x.shape[0]))
print("Train set contains %s outcomes " % (np.sum(train_y)))
train_x = train_x.toarray()
test_x = test_x.toarray()
if autoencoder:
print('first train stakced autoencoder')
encoding_size = 256
if vae:
auto_model = VAE(input_size=train_x.shape[1], encoding_size=encoding_size)
else:
auto_model = AutoEncoder(input_size=train_x.shape[1], encoding_size=encoding_size)
if torch.cuda.is_available():
auto_model = auto_model.cuda()
clf = Estimator(auto_model)
clf.compile(optimizer=torch.optim.Adam(auto_model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=nn.MSELoss())
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, autoencoder = autoencoder, vae = vae)
#split to batch for large dataset
train_batch = batch(train_x, batch_size=32)
train_x = np.array([]).reshape(0, encoding_size)
for train in train_batch:
encode_train = auto_model.get_encode_features(train)
train_x = np.concatenate((train_x, encode_train), axis=0)
test_batch = batch(test_x, batch_size=32)
test_x = np.array([]).reshape(0, encoding_size)
for test in test_batch:
encode_Test = auto_model.get_encode_features(test)
test_x = np.concatenate((test_x, encode_Test), axis=0)
del auto_model
del clf
# train on fold
print("Training fold %s" % (i))
start_time = timeit.default_timer()
if model_type == 'LogisticRegression':
model = LogisticRegression(train_x.shape[1])
l1regularization = True
elif model_type == 'SNN':
model = SNN(train_x.shape[1], size)
else:
model = MLP(train_x.shape[1], size)
if torch.cuda.is_available():
model = model.cuda()
clf = Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=loss)
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, l1regularization = l1regularization)
ind = (population[:, population.shape[1] - 1] > 0)
ind = population[ind, population.shape[1] - 1] == i
test_input_var = torch.from_numpy(test_x.astype(np.float32))
test_batch = batch(test_x, batch_size = 32)
temp = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
temp = np.concatenate((temp, pred_test1), axis = 0)
test_pred[ind] = temp
print("Prediction complete: %s rows " % (np.shape(test_pred[ind])[0]))
print("Mean: %s prediction value" % (np.mean(test_pred[ind])))
# RETURN CV PREDICTION WHEN TRAIN == T
test_pred.shape = (population[population[:, population.shape[1] - 1] > 0, :].shape[0], 1)
prediction = np.append(population[population[:, population.shape[1] - 1] > 0, :], test_pred, axis=1)
return prediction;
# train final:
else:
print("Training final neural network model on all train data...")
print("X- %s rows and Y %s length" % (X[trainInds, :].shape[0], y[trainInds].shape[0]))
start_time = timeit.default_timer()
train_x = X[trainInds, :]
train_x = train_x.toarray()
train_y = y[trainInds]
if not os.path.exists(modelOutput):
os.makedirs(modelOutput)
if autoencoder:
encoding_size = 256
if vae:
auto_model = VAE(input_size=train_x.shape[1], encoding_size=encoding_size)
else:
auto_model = AutoEncoder(input_size=train_x.shape[1], encoding_size=encoding_size)
if torch.cuda.is_available():
auto_model = auto_model.cuda()
clf = Estimator(auto_model)
clf.compile(optimizer=torch.optim.Adam(auto_model.parameters(), lr=1e-3, weight_decay=w_decay),
loss=nn.MSELoss())
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, autoencoder=autoencoder, vae = vae)
train_batch = batch(train_x, batch_size=32)
train_x = np.array([]).reshape(0, encoding_size)
for train in train_batch:
encode_train = auto_model.get_encode_features(train)
train_x = np.concatenate((train_x, encode_train), axis=0)
joblib.dump(auto_model, os.path.join(modelOutput, 'autoencoder_model.pkl'))
del auto_model
del clf
print('the final parameter epochs %.2f weight_decay %.2f' %(epochs,w_decay))
if model_type == 'LogisticRegression':
model = LogisticRegression(train_x.shape[1])
l1regularization = True
elif model_type == 'SNN':
model = SNN(train_x.shape[1], size)
else:
model = MLP(train_x.shape[1], size)
if torch.cuda.is_available():
model = model.cuda()
clf = Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay = w_decay),
loss=loss)
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs, l1regularization = l1regularization)
end_time = timeit.default_timer()
print("Training final took: %.2f s" % (end_time - start_time))
print("Model saved to: %s" % (modelOutput))
joblib.dump(model, os.path.join(modelOutput,'model.pkl'))
# DO PREDICTION ON TRAIN:
train_batch = batch(train_x, batch_size = 32)
train_pred = []
for train in train_batch:
preds = model.predict_proba(train)[:, 1]
train_pred = np.concatenate((train_pred, preds), axis = 0)
train_pred.shape = (population[population[:, population.shape[1] - 1] > 0, :].shape[0], 1)
prediction = np.append(population[population[:, population.shape[1] - 1] > 0, :], train_pred, axis=1)
# RETURN TRAIN PREDICTION WHEN TRAIN == F
return prediction;
elif model_type in ['CNN', 'RNN', 'CNN_LSTM', 'CNN_MLF', 'CNN_MIX', 'GRU', 'BiRNN', 'CNN_MULTI', 'ResNet']:
y = population[:, 1]
X = plpData.to_dense().numpy()
X = X[np.int64(population[:, 0]), :]
trainInds = population[:, population.shape[1] - 1] > 0
if class_weight == -1:
loss = FocalLoss(gamma = 3)
else:
if class_weight == 0:
weights = float(np.count_nonzero(y))/y.shape[0]
class_weight = [1 - weights, weights]
else:
class_weight = [class_weight, 1]
class_weight = 1/torch.Tensor(class_weight)
if torch.cuda.is_available():
class_weight = class_weight.cuda()
loss=nn.CrossEntropyLoss(weight = class_weight)
if train:
test_pred = np.zeros(population[population[:, population.shape[1] - 1] > 0, :].shape[0]) # zeros length sum(population[:,population.size[1]] ==i)
for i in range(1, int(np.max(population[:, population.shape[1] - 1]) + 1), 1):
testInd = population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] == i
trainInd = (population[population[:, population.shape[1] - 1] > 0, population.shape[1] - 1] != i)
train_x = X[trainInds, :][trainInd, :]
train_y = y[trainInds][trainInd]
test_x = X[trainInds, :][testInd, :]
print("Fold %s split %s in train set and %s in test set" % (i, train_x.shape[0], test_x.shape[0]))
print("Train set contains %s outcomes " % (np.sum(train_y)))
# train on fold
learning_rate = 0.001
print("Training fold %s" % (i))
start_time = timeit.default_timer()
if model_type == 'CNN':
model = CNN(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_LSTM':
model = CNN_LSTM(nb_filter = nbfilters, labcounts=train_x.shape[1], window_size=train_x.shape[2])
elif model_type == 'CNN_MLF': # multiple kernels with different size
model = CNN_MLF(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_MIX': # mixed model from deepDiagnosis
model = CNN_MIX(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_MULTI': # multiple resolution model from deepDiagnosis
model = CNN_MULTI(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'ResNet':
print('train ResNet')
model = ResNet(ResidualBlock, [3, 3, 3], nb_filter=nbfilters, labcounts=train_x.shape[1], window_size=train_x.shape[2])
elif model_type == 'RNN':
model = RNN(train_x.shape[2], hidden_size, 2, 2)
elif model_type == 'BiRNN':
model = BiRNN(train_x.shape[2], hidden_size, 2, 2)
elif model_type == 'GRU':
model = GRU(train_x.shape[2], hidden_size, 2, 2)
else:
print('temproal data not supported by this model')
if torch.cuda.is_available():
model = model.cuda()
clf = Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = 0.0001),
loss=loss)
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs)
ind = (population[:, population.shape[1] - 1] > 0)
ind = population[ind, population.shape[1] - 1] == i
test_batch = batch(test_x, batch_size = 32)
temp = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
temp = np.concatenate((temp, pred_test1), axis = 0)
test_pred[ind] = temp
del model
print("Prediction complete: %s rows " % (np.shape(test_pred[ind])[0]))
print("Mean: %s prediction value" % (np.mean(test_pred[ind])))
# RETURN CV PREDICTION
test_pred.shape = (population[population[:, population.shape[1] - 1] > 0, :].shape[0], 1)
prediction = np.append(population[population[:, population.shape[1] - 1] > 0, :], test_pred, axis=1)
return prediction;
# train final:
else:
print("Training final neural network model on all train data...")
print("X- %s rows and Y %s length" % (X[trainInds, :].shape[0], y[trainInds].shape[0]))
start_time = timeit.default_timer()
train_x = X[trainInds, :]
train_y = y[trainInds]
learning_rate = 0.001
if model_type == 'CNN':
model = CNN(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_LSTM':
model = CNN_LSTM(nb_filter=nbfilters, labcounts=train_x.shape[1], window_size=train_x.shape[2])
elif model_type == 'CNN_MLF': # multiple kernels with different size
model = CNN_MLF(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_MIX': #mixed model from deepDiagnosis
model = CNN_MIX(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'CNN_MULTI': # multi resolution model from deepDiagnosis
model = CNN_MULTI(nb_filter = nbfilters, labcounts = train_x.shape[1], window_size = train_x.shape[2])
elif model_type == 'ResNet':
model = ResNet(ResidualBlock, [3, 3, 3], nb_filter=nbfilters, labcounts=train_x.shape[1], window_size=train_x.shape[2])
elif model_type == 'RNN':
model = RNN(train_x.shape[2], hidden_size, 2, 2)
elif model_type == 'BiRNN':
model = BiRNN(train_x.shape[2], hidden_size, 2, 2)
elif model_type == 'GRU':
model = GRU(train_x.shape[2], hidden_size, 2, 2)
else:
print('temproal data not supported by this model')
if torch.cuda.is_available():
model = model.cuda()
clf = Estimator(model)
clf.compile(optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = 0.0001),
loss=loss)
clf.fit(train_x, train_y, batch_size=32, nb_epoch=epochs)
end_time = timeit.default_timer()
print("Training final took: %.2f s" % (end_time - start_time))
# save the model:
if not os.path.exists(modelOutput):
os.makedirs(modelOutput)
print("Model saved to: %s" % (modelOutput))
joblib.dump(model, os.path.join(modelOutput,'model.pkl'))
# prediction on train:
test_batch = batch(train_x, batch_size = 32)
test_pred = []
for test in test_batch:
pred_test1 = model.predict_proba(test)[:, 1]
test_pred = np.concatenate((test_pred, pred_test1), axis = 0)
test_pred.shape = (population[population[:, population.shape[1] - 1] > 0, :].shape[0], 1)
prediction = np.append(population[population[:, population.shape[1] - 1] > 0, :], test_pred, axis=1)
return prediction;
| [
"torch.nn.Dropout",
"torch.nn.functional.binary_cross_entropy",
"numpy.sum",
"torch.nn.functional.dropout",
"torch.randn",
"numpy.isnan",
"numpy.shape",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"os.path.join",
"gensim.models.word2vec.Word2Vec",
"numpy.unique",
"torch.nn.MSELoss",
"... | [((1233, 1266), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1256, 1266), False, 'import warnings\n'), ((2893, 2947), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['recon_x', 'x'], {'size_average': '(False)'}), '(recon_x, x, size_average=False)\n', (2915, 2947), True, 'import torch.nn.functional as F\n'), ((3457, 3482), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3480, 3482), False, 'import torch\n'), ((15699, 15731), 'numpy.zeros', 'np.zeros', (['(N, D, T)'], {'dtype': 'float'}), '((N, D, T), dtype=float)\n', (15707, 15731), True, 'import numpy as np\n'), ((19243, 19256), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19254, 19256), False, 'from collections import OrderedDict\n'), ((22042, 22114), 'gensim.models.word2vec.Word2Vec', 'w2v.Word2Vec', ([], {'size': 'embedding_size', 'min_count': '(3)', 'workers': '(4)', 'iter': '(10)', 'sg': '(1)'}), '(size=embedding_size, min_count=3, workers=4, iter=10, sg=1)\n', (22054, 22114), True, 'import gensim.models.word2vec as w2v\n'), ((23621, 23638), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (23629, 23638), True, 'import numpy as np\n'), ((23660, 23678), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (23669, 23678), True, 'import numpy as np\n'), ((23731, 23753), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (23740, 23753), True, 'import numpy as np\n'), ((47539, 47649), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'padding': 'padding', 'stride': 'stride', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=kernel_size, padding=\n padding, stride=stride, bias=False)\n', (47548, 47649), True, 'import torch.nn as nn\n'), ((1811, 1827), 'torch.nn.functional.softmax', 'F.softmax', (['input'], {}), '(input)\n', (1820, 1827), True, 'import torch.nn.functional as F\n'), ((3365, 3393), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (3379, 3393), True, 'import numpy as np\n'), ((3560, 3586), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (3574, 3586), False, 'import torch\n'), ((7447, 7513), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset=train_set, batch_size=batch_size, shuffle=True)\n', (7457, 7513), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((8272, 8297), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8295, 8297), False, 'import torch\n'), ((8442, 8467), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'predict'], {}), '(y, predict)\n', (8455, 8467), False, 'from sklearn.metrics import roc_auc_score\n'), ((8688, 8713), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8711, 8713), False, 'import torch\n'), ((19431, 19447), 'numpy.int64', 'np.int64', (['cov_id'], {}), '(cov_id)\n', (19439, 19447), True, 'import numpy as np\n'), ((24740, 24765), 'numpy.array', 'np.array', (['training_indice'], {}), '(training_indice)\n', (24748, 24765), True, 'import numpy as np\n'), ((24803, 24827), 'numpy.array', 'np.array', (['training_label'], {}), '(training_label)\n', (24811, 24827), True, 'import numpy as np\n'), ((24962, 24989), 'numpy.array', 'np.array', (['validation_indice'], {}), '(validation_indice)\n', (24970, 24989), True, 'import numpy as np\n'), ((25031, 25057), 'numpy.array', 'np.array', (['validation_label'], {}), '(validation_label)\n', (25039, 25057), True, 'import numpy as np\n'), ((25390, 25424), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'num_classes'], {}), '(input_size, num_classes)\n', (25399, 25424), True, 'import torch.nn as nn\n'), ((25495, 25513), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (25508, 25513), False, 'import torch\n'), ((26104, 26137), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_size'], {}), '(input_dim, hidden_size)\n', (26113, 26137), True, 'import torch.nn as nn\n'), ((26157, 26192), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (26166, 26192), True, 'import torch.nn as nn\n'), ((26264, 26307), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (26273, 26307), True, 'import torch.nn.functional as F\n'), ((26345, 26361), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (26358, 26361), False, 'import torch\n'), ((26966, 26999), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_size'], {}), '(input_dim, hidden_size)\n', (26975, 26999), True, 'import torch.nn as nn\n'), ((27077, 27112), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (27086, 27112), True, 'import torch.nn as nn\n'), ((27184, 27227), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (27193, 27227), True, 'import torch.nn.functional as F\n'), ((27312, 27328), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (27325, 27328), False, 'import torch\n'), ((28489, 28514), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (28512, 28514), False, 'import torch\n'), ((29249, 29286), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(input_size / 2)'], {}), '(input_size, input_size / 2)\n', (29258, 29286), True, 'import torch.nn as nn\n'), ((29305, 29345), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 2)', 'encoding_size'], {}), '(input_size / 2, encoding_size)\n', (29314, 29345), True, 'import torch.nn as nn\n'), ((29364, 29404), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 2)', 'encoding_size'], {}), '(input_size / 2, encoding_size)\n', (29373, 29404), True, 'import torch.nn as nn\n'), ((29422, 29462), 'torch.nn.Linear', 'nn.Linear', (['encoding_size', '(input_size / 2)'], {}), '(encoding_size, input_size / 2)\n', (29431, 29462), True, 'import torch.nn as nn\n'), ((29480, 29517), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 2)', 'input_size'], {}), '(input_size / 2, input_size)\n', (29489, 29517), True, 'import torch.nn as nn\n'), ((29537, 29546), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (29544, 29546), True, 'import torch.nn as nn\n'), ((29570, 29582), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (29580, 29582), True, 'import torch.nn as nn\n'), ((30081, 30106), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (30104, 30106), False, 'import torch\n'), ((31029, 31063), 'torch.nn.Linear', 'nn.Linear', (['latent_size', 'input_size'], {}), '(latent_size, input_size)\n', (31038, 31063), True, 'import torch.nn as nn\n'), ((31087, 31172), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['input_size', '(128)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(input_size, 128, kernel_size, stride=stride, padding=padding\n )\n', (31105, 31172), True, 'import torch.nn as nn\n'), ((31193, 31265), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(128, 64, kernel_size, stride=stride, padding=padding)\n', (31211, 31265), True, 'import torch.nn as nn\n'), ((31291, 31362), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(64)', '(32)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(64, 32, kernel_size, stride=stride, padding=padding)\n', (31309, 31362), True, 'import torch.nn as nn\n'), ((31388, 31474), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', 'img_channels', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(32, img_channels, kernel_size, stride=stride, padding=\n padding)\n', (31406, 31474), True, 'import torch.nn as nn\n'), ((32194, 32266), 'torch.nn.Conv2d', 'nn.Conv2d', (['img_channels', '(32)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(img_channels, 32, kernel_size, stride=stride, padding=padding)\n', (32203, 32266), True, 'import torch.nn as nn\n'), ((32290, 32352), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(32, 64, kernel_size, stride=stride, padding=padding)\n', (32299, 32352), True, 'import torch.nn as nn\n'), ((32376, 32439), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(64, 128, kernel_size, stride=stride, padding=padding)\n', (32385, 32439), True, 'import torch.nn as nn\n'), ((32463, 32527), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(128, 256, kernel_size, stride=stride, padding=padding)\n', (32472, 32527), True, 'import torch.nn as nn\n'), ((32586, 32618), 'torch.nn.Linear', 'nn.Linear', (['out_size', 'latent_size'], {}), '(out_size, latent_size)\n', (32595, 32618), True, 'import torch.nn as nn\n'), ((32646, 32678), 'torch.nn.Linear', 'nn.Linear', (['out_size', 'latent_size'], {}), '(out_size, latent_size)\n', (32655, 32678), True, 'import torch.nn as nn\n'), ((33449, 33472), 'torch.randn_like', 'torch.randn_like', (['sigma'], {}), '(sigma)\n', (33465, 33472), False, 'import torch\n'), ((35041, 35058), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (35051, 35058), True, 'import torch.nn as nn\n'), ((35158, 35185), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (35172, 35185), True, 'import torch.nn as nn\n'), ((35207, 35224), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (35217, 35224), True, 'import torch.nn as nn\n'), ((35246, 35255), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (35253, 35255), True, 'import torch.nn as nn\n'), ((35275, 35310), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (35284, 35310), True, 'import torch.nn as nn\n'), ((35688, 35706), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (35701, 35706), False, 'import torch\n'), ((37880, 37897), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (37890, 37897), True, 'import torch.nn as nn\n'), ((37917, 37977), 'torch.nn.Linear', 'nn.Linear', (['(conv_outsize * labcounts * nb_filter)', 'hidden_size'], {}), '(conv_outsize * labcounts * nb_filter, hidden_size)\n', (37926, 37977), True, 'import torch.nn as nn\n'), ((37995, 38012), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (38005, 38012), True, 'import torch.nn as nn\n'), ((38034, 38043), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (38041, 38043), True, 'import torch.nn as nn\n'), ((38063, 38098), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (38072, 38098), True, 'import torch.nn as nn\n'), ((38555, 38573), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (38568, 38573), False, 'import torch\n'), ((39857, 39925), 'torch.nn.Conv2d', 'nn.Conv2d', (['nb_filter', '(1)', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(nb_filter, 1, kernel_size, stride=stride, padding=padding)\n', (39866, 39925), True, 'import torch.nn as nn\n'), ((40041, 40103), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(input_size, hidden_size, num_layers, batch_first=True)\n', (40048, 40103), True, 'import torch.nn as nn\n'), ((40127, 40144), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (40137, 40144), True, 'import torch.nn as nn\n'), ((40164, 40199), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (40173, 40199), True, 'import torch.nn as nn\n'), ((40221, 40238), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (40231, 40238), True, 'import torch.nn as nn\n'), ((40260, 40269), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (40267, 40269), True, 'import torch.nn as nn\n'), ((40289, 40324), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (40298, 40324), True, 'import torch.nn as nn\n'), ((40493, 40514), 'torch.squeeze', 'torch.squeeze', (['out', '(1)'], {}), '(out, 1)\n', (40506, 40514), False, 'import torch\n'), ((40526, 40551), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (40549, 40551), False, 'import torch\n'), ((41186, 41204), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (41199, 41204), False, 'import torch\n'), ((42802, 42819), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (42812, 42819), True, 'import torch.nn as nn\n'), ((42839, 42896), 'torch.nn.Linear', 'nn.Linear', (['(out2_size * nb_filter * nb_filter)', 'hidden_size'], {}), '(out2_size * nb_filter * nb_filter, hidden_size)\n', (42848, 42896), True, 'import torch.nn as nn\n'), ((42914, 42931), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (42924, 42931), True, 'import torch.nn as nn\n'), ((42953, 42962), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (42960, 42962), True, 'import torch.nn as nn\n'), ((42982, 43017), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (42991, 43017), True, 'import torch.nn as nn\n'), ((43549, 43567), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (43562, 43567), False, 'import torch\n'), ((44365, 44406), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'pool_size'}), '(pool_size, stride=pool_size)\n', (44377, 44406), True, 'import torch.nn as nn\n'), ((44523, 44564), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'pool_size'}), '(pool_size, stride=pool_size)\n', (44535, 44564), True, 'import torch.nn as nn\n'), ((45012, 45053), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'pool_size'}), '(pool_size, stride=pool_size)\n', (45024, 45053), True, 'import torch.nn as nn\n'), ((46240, 46257), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (46250, 46257), True, 'import torch.nn as nn\n'), ((46277, 46335), 'torch.nn.Linear', 'nn.Linear', (['(labcounts * nb_filter * merge_size)', 'hidden_size'], {}), '(labcounts * nb_filter * merge_size, hidden_size)\n', (46286, 46335), True, 'import torch.nn as nn\n'), ((46353, 46370), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (46363, 46370), True, 'import torch.nn as nn\n'), ((46392, 46401), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (46399, 46401), True, 'import torch.nn as nn\n'), ((46421, 46456), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (46430, 46456), True, 'import torch.nn as nn\n'), ((47040, 47058), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (47053, 47058), False, 'import torch\n'), ((47978, 48003), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (47992, 48003), True, 'import torch.nn as nn\n'), ((48024, 48045), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (48031, 48045), True, 'import torch.nn as nn\n'), ((48158, 48183), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (48172, 48183), True, 'import torch.nn as nn\n'), ((48938, 48963), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (48952, 48963), True, 'import torch.nn as nn\n'), ((48984, 49005), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (48991, 49005), True, 'import torch.nn as nn\n'), ((49375, 49398), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['pool_size'], {}), '(pool_size)\n', (49387, 49398), True, 'import torch.nn as nn\n'), ((49563, 49602), 'torch.nn.Linear', 'nn.Linear', (['last_layer_size', 'hidden_size'], {}), '(last_layer_size, hidden_size)\n', (49572, 49602), True, 'import torch.nn as nn\n'), ((49624, 49641), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (49634, 49641), True, 'import torch.nn as nn\n'), ((49663, 49672), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (49670, 49672), True, 'import torch.nn as nn\n'), ((49692, 49727), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (49701, 49727), True, 'import torch.nn as nn\n'), ((50434, 50456), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (50447, 50456), True, 'import torch.nn as nn\n'), ((50922, 50940), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (50935, 50940), False, 'import torch\n'), ((51684, 51762), 'torch.nn.GRU', 'nn.GRU', (['input_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)', 'dropout': 'dropout'}), '(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout)\n', (51690, 51762), True, 'import torch.nn as nn\n'), ((51789, 51824), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (51798, 51824), True, 'import torch.nn as nn\n'), ((51863, 51888), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (51886, 51888), False, 'import torch\n'), ((52299, 52317), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (52312, 52317), False, 'import torch\n'), ((53144, 53223), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)', 'dropout': 'dropout'}), '(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout)\n', (53151, 53223), True, 'import torch.nn as nn\n'), ((53246, 53281), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'num_classes'], {}), '(hidden_size, num_classes)\n', (53255, 53281), True, 'import torch.nn as nn\n'), ((53324, 53349), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (53347, 53349), False, 'import torch\n'), ((53953, 53971), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (53966, 53971), False, 'import torch\n'), ((54721, 54825), 'torch.nn.LSTM', 'nn.LSTM', (['input_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)', 'dropout': 'dropout', 'bidirectional': '(True)'}), '(input_size, hidden_size, num_layers, batch_first=True, dropout=\n dropout, bidirectional=True)\n', (54728, 54825), True, 'import torch.nn as nn\n'), ((54872, 54911), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 2)', 'num_classes'], {}), '(hidden_size * 2, num_classes)\n', (54881, 54911), True, 'import torch.nn as nn\n'), ((54974, 54999), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (54997, 54999), False, 'import torch\n'), ((55689, 55707), 'torch.sigmoid', 'torch.sigmoid', (['out'], {}), '(out)\n', (55702, 55707), False, 'import torch\n'), ((1906, 1922), 'torch.log', 'torch.log', (['logit'], {}), '(logit)\n', (1915, 1922), False, 'import torch\n'), ((2586, 2625), 'torch.autograd.Variable', 'Variable', (['mask'], {'volatile': 'index.volatile'}), '(mask, volatile=index.volatile)\n', (2594, 2625), False, 'from torch.autograd import Variable\n'), ((2641, 2666), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2664, 2666), False, 'import torch\n'), ((3893, 3915), 'numpy.isnan', 'np.isnan', (['metrics_hist'], {}), '(metrics_hist)\n', (3901, 3915), True, 'import numpy as np\n'), ((3933, 3959), 'numpy.nanargmin', 'np.nanargmin', (['metrics_hist'], {}), '(metrics_hist)\n', (3945, 3959), True, 'import numpy as np\n'), ((4799, 4810), 'torch.autograd.Variable', 'Variable', (['X'], {}), '(X)\n', (4807, 4810), False, 'from torch.autograd import Variable\n'), ((4829, 4840), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (4837, 4840), False, 'from torch.autograd import Variable\n'), ((4856, 4881), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4879, 4881), False, 'import torch\n'), ((13787, 13796), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (13793, 13796), True, 'import torch.nn.functional as F\n'), ((14724, 14749), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14747, 14749), False, 'import torch\n'), ((20119, 20131), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (20126, 20131), True, 'import numpy as np\n'), ((20154, 20165), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (20160, 20165), True, 'import numpy as np\n'), ((20607, 20626), 'numpy.int64', 'np.int64', (['values[0]'], {}), '(values[0])\n', (20615, 20626), True, 'import numpy as np\n'), ((25672, 25687), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25685, 25687), False, 'import torch\n'), ((25705, 25716), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (25713, 25716), False, 'from torch.autograd import Variable\n'), ((25732, 25757), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25755, 25757), False, 'import torch\n'), ((26514, 26529), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26527, 26529), False, 'import torch\n'), ((26547, 26558), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (26555, 26558), False, 'from torch.autograd import Variable\n'), ((26574, 26599), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (26597, 26599), False, 'import torch\n'), ((27481, 27496), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27494, 27496), False, 'import torch\n'), ((27514, 27525), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (27522, 27525), False, 'from torch.autograd import Variable\n'), ((27541, 27566), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27564, 27566), False, 'import torch\n'), ((27968, 28005), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(input_size / 2)'], {}), '(input_size, input_size / 2)\n', (27977, 28005), True, 'import torch.nn as nn\n'), ((28017, 28030), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (28024, 28030), True, 'import torch.nn as nn\n'), ((28044, 28085), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 2)', '(input_size / 4)'], {}), '(input_size / 2, input_size / 4)\n', (28053, 28085), True, 'import torch.nn as nn\n'), ((28095, 28108), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (28102, 28108), True, 'import torch.nn as nn\n'), ((28122, 28162), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 4)', 'encoding_size'], {}), '(input_size / 4, encoding_size)\n', (28131, 28162), True, 'import torch.nn as nn\n'), ((28174, 28187), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (28181, 28187), True, 'import torch.nn as nn\n'), ((28248, 28288), 'torch.nn.Linear', 'nn.Linear', (['encoding_size', '(input_size / 4)'], {}), '(encoding_size, input_size / 4)\n', (28257, 28288), True, 'import torch.nn as nn\n'), ((28300, 28313), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (28307, 28313), True, 'import torch.nn as nn\n'), ((28327, 28368), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 4)', '(input_size / 2)'], {}), '(input_size / 4, input_size / 2)\n', (28336, 28368), True, 'import torch.nn as nn\n'), ((28378, 28391), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (28385, 28391), True, 'import torch.nn as nn\n'), ((28405, 28442), 'torch.nn.Linear', 'nn.Linear', (['(input_size / 2)', 'input_size'], {}), '(input_size / 2, input_size)\n', (28414, 28442), True, 'import torch.nn as nn\n'), ((28779, 28794), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (28792, 28794), False, 'import torch\n'), ((28812, 28823), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (28820, 28823), False, 'from torch.autograd import Variable\n'), ((28839, 28864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (28862, 28864), False, 'import torch\n'), ((30396, 30411), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (30409, 30411), False, 'import torch\n'), ((30429, 30440), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (30437, 30440), False, 'from torch.autograd import Variable\n'), ((30456, 30481), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (30479, 30481), False, 'import torch\n'), ((33722, 33737), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (33735, 33737), False, 'import torch\n'), ((33755, 33766), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (33763, 33766), False, 'from torch.autograd import Variable\n'), ((33782, 33807), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (33805, 33807), False, 'import torch\n'), ((34236, 34304), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size, stride=stride, padding=padding)\n', (34245, 34304), True, 'import torch.nn as nn\n'), ((34322, 34347), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (34336, 34347), True, 'import torch.nn as nn\n'), ((34361, 34370), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (34368, 34370), True, 'import torch.nn as nn\n'), ((34384, 34422), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (34396, 34422), True, 'import torch.nn as nn\n'), ((34648, 34724), 'torch.nn.Conv2d', 'nn.Conv2d', (['nb_filter', 'nb_filter', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(nb_filter, nb_filter, kernel_size, stride=stride, padding=padding)\n', (34657, 34724), True, 'import torch.nn as nn\n'), ((34742, 34767), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (34756, 34767), True, 'import torch.nn as nn\n'), ((34781, 34790), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (34788, 34790), True, 'import torch.nn as nn\n'), ((34804, 34842), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (34816, 34842), True, 'import torch.nn as nn\n'), ((35865, 35880), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35878, 35880), False, 'import torch\n'), ((35898, 35909), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (35906, 35909), False, 'from torch.autograd import Variable\n'), ((35925, 35950), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (35948, 35950), False, 'import torch\n'), ((36581, 36656), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': '(1, 3)', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=(1, 3), stride=stride, padding=padding)\n', (36590, 36656), True, 'import torch.nn as nn\n'), ((36676, 36701), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (36690, 36701), True, 'import torch.nn as nn\n'), ((36715, 36724), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (36722, 36724), True, 'import torch.nn as nn\n'), ((36738, 36776), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (36750, 36776), True, 'import torch.nn as nn\n'), ((36990, 37065), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': '(1, 4)', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=(1, 4), stride=stride, padding=padding)\n', (36999, 37065), True, 'import torch.nn as nn\n'), ((37085, 37110), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (37099, 37110), True, 'import torch.nn as nn\n'), ((37124, 37133), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (37131, 37133), True, 'import torch.nn as nn\n'), ((37147, 37185), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (37159, 37185), True, 'import torch.nn as nn\n'), ((37430, 37505), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': '(1, 5)', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=(1, 5), stride=stride, padding=padding)\n', (37439, 37505), True, 'import torch.nn as nn\n'), ((37525, 37550), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (37539, 37550), True, 'import torch.nn as nn\n'), ((37564, 37573), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (37571, 37573), True, 'import torch.nn as nn\n'), ((37587, 37625), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (37599, 37625), True, 'import torch.nn as nn\n'), ((38732, 38747), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (38745, 38747), False, 'import torch\n'), ((38765, 38776), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (38773, 38776), False, 'from torch.autograd import Variable\n'), ((38792, 38817), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (38815, 38817), False, 'import torch\n'), ((39392, 39460), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter', 'kernel_size'], {'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size, stride=stride, padding=padding)\n', (39401, 39460), True, 'import torch.nn as nn\n'), ((39478, 39503), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (39492, 39503), True, 'import torch.nn as nn\n'), ((39517, 39526), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (39524, 39526), True, 'import torch.nn as nn\n'), ((39540, 39578), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {'stride': 'stride'}), '(pool_size, stride=stride)\n', (39552, 39578), True, 'import torch.nn as nn\n'), ((41363, 41378), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (41376, 41378), False, 'import torch\n'), ((41396, 41407), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (41404, 41407), False, 'from torch.autograd import Variable\n'), ((41423, 41448), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (41446, 41448), False, 'import torch\n'), ((42030, 42118), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': '(labcounts, 1)', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=(labcounts, 1), stride=stride, padding=\n padding)\n', (42039, 42118), True, 'import torch.nn as nn\n'), ((42133, 42158), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (42147, 42158), True, 'import torch.nn as nn\n'), ((42172, 42181), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (42179, 42181), True, 'import torch.nn as nn\n'), ((42232, 42320), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': '(nb_filter, 1)', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=(nb_filter, 1), stride=stride, padding=\n padding)\n', (42241, 42320), True, 'import torch.nn as nn\n'), ((42335, 42360), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (42349, 42360), True, 'import torch.nn as nn\n'), ((42374, 42383), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (42381, 42383), True, 'import torch.nn as nn\n'), ((42397, 42420), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {}), '(pool_size)\n', (42409, 42420), True, 'import torch.nn as nn\n'), ((42537, 42622), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=kernel_size, stride=stride, padding=padding\n )\n', (42546, 42622), True, 'import torch.nn as nn\n'), ((42637, 42662), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (42651, 42662), True, 'import torch.nn as nn\n'), ((42676, 42685), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (42683, 42685), True, 'import torch.nn as nn\n'), ((43726, 43741), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (43739, 43741), False, 'import torch\n'), ((43759, 43770), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (43767, 43770), False, 'from torch.autograd import Variable\n'), ((43786, 43811), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (43809, 43811), False, 'import torch\n'), ((44726, 44811), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=kernel_size, stride=stride, padding=padding\n )\n', (44735, 44811), True, 'import torch.nn as nn\n'), ((44826, 44851), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (44840, 44851), True, 'import torch.nn as nn\n'), ((44865, 44874), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (44872, 44874), True, 'import torch.nn as nn\n'), ((45214, 45299), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=kernel_size, stride=stride, padding=padding\n )\n', (45223, 45299), True, 'import torch.nn as nn\n'), ((45314, 45339), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (45328, 45339), True, 'import torch.nn as nn\n'), ((45353, 45362), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (45360, 45362), True, 'import torch.nn as nn\n'), ((45504, 45589), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nb_filter'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(1, nb_filter, kernel_size=kernel_size, stride=stride, padding=padding\n )\n', (45513, 45589), True, 'import torch.nn as nn\n'), ((45604, 45629), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (45618, 45629), True, 'import torch.nn as nn\n'), ((45643, 45652), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (45650, 45652), True, 'import torch.nn as nn\n'), ((45666, 45689), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['pool_size'], {}), '(pool_size)\n', (45678, 45689), True, 'import torch.nn as nn\n'), ((45917, 46009), 'torch.nn.Conv2d', 'nn.Conv2d', (['nb_filter', 'nb_filter'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(nb_filter, nb_filter, kernel_size=kernel_size, stride=stride,\n padding=padding)\n', (45926, 46009), True, 'import torch.nn as nn\n'), ((46025, 46050), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nb_filter'], {}), '(nb_filter)\n', (46039, 46050), True, 'import torch.nn as nn\n'), ((46064, 46073), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (46071, 46073), True, 'import torch.nn as nn\n'), ((47217, 47232), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (47230, 47232), False, 'import torch\n'), ((47250, 47261), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (47258, 47261), False, 'from torch.autograd import Variable\n'), ((47277, 47302), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (47300, 47302), False, 'import torch\n'), ((51099, 51114), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (51112, 51114), False, 'import torch\n'), ((51132, 51143), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (51140, 51143), False, 'from torch.autograd import Variable\n'), ((51159, 51184), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (51182, 51184), False, 'import torch\n'), ((52391, 52426), 'torch.randn', 'torch.randn', (['(1)', 'N', 'self.hidden_size'], {}), '(1, N, self.hidden_size)\n', (52402, 52426), False, 'import torch\n'), ((52567, 52582), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (52580, 52582), False, 'import torch\n'), ((52600, 52611), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (52608, 52611), False, 'from torch.autograd import Variable\n'), ((52627, 52652), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (52650, 52652), False, 'import torch\n'), ((54126, 54141), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (54139, 54141), False, 'import torch\n'), ((54159, 54170), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (54167, 54170), False, 'from torch.autograd import Variable\n'), ((54186, 54211), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (54209, 54211), False, 'import torch\n'), ((55862, 55877), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (55875, 55877), False, 'import torch\n'), ((55895, 55906), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (55903, 55906), False, 'from torch.autograd import Variable\n'), ((55922, 55947), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (55945, 55947), False, 'import torch\n'), ((56896, 56921), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (56919, 56921), False, 'import torch\n'), ((56977, 57017), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'class_weight'}), '(weight=class_weight)\n', (56996, 57017), True, 'import torch.nn as nn\n'), ((57500, 57519), 'numpy.zeros', 'np.zeros', (['pred_size'], {}), '(pred_size)\n', (57508, 57519), True, 'import numpy as np\n'), ((61096, 61187), 'numpy.append', 'np.append', (['population[population[:, population.shape[1] - 1] > 0, :]', 'test_pred'], {'axis': '(1)'}), '(population[population[:, population.shape[1] - 1] > 0, :],\n test_pred, axis=1)\n', (61105, 61187), True, 'import numpy as np\n'), ((61430, 61452), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (61450, 61452), False, 'import timeit\n'), ((63000, 63025), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (63023, 63025), False, 'import torch\n'), ((63337, 63359), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (63357, 63359), False, 'import timeit\n'), ((63913, 64005), 'numpy.append', 'np.append', (['population[population[:, population.shape[1] - 1] > 0, :]', 'train_pred'], {'axis': '(1)'}), '(population[population[:, population.shape[1] - 1] > 0, :],\n train_pred, axis=1)\n', (63922, 64005), True, 'import numpy as np\n'), ((2380, 2399), 'torch.Tensor', 'torch.Tensor', (['*size'], {}), '(*size)\n', (2392, 2399), False, 'import torch\n'), ((3500, 3526), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (3514, 3526), False, 'import torch\n'), ((14680, 14706), 'torch.floor', 'torch.floor', (['random_tensor'], {}), '(random_tensor)\n', (14691, 14706), False, 'import torch\n'), ((17018, 17034), 'numpy.nonzero', 'np.nonzero', (['temp'], {}), '(temp)\n', (17028, 17034), True, 'import numpy as np\n'), ((18006, 18022), 'numpy.nonzero', 'np.nonzero', (['temp'], {}), '(temp)\n', (18016, 18022), True, 'import numpy as np\n'), ((50081, 50109), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (50095, 50109), True, 'import torch.nn as nn\n'), ((56860, 56886), 'torch.Tensor', 'torch.Tensor', (['class_weight'], {}), '(class_weight)\n', (56872, 56886), False, 'import torch\n'), ((57353, 57403), 'numpy.sum', 'np.sum', (['(population[:, population.shape[1] - 1] > 0)'], {}), '(population[:, population.shape[1] - 1] > 0)\n', (57359, 57403), True, 'import numpy as np\n'), ((59688, 59710), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (59708, 59710), False, 'import timeit\n'), ((60007, 60032), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (60030, 60032), False, 'import torch\n'), ((61561, 61588), 'os.path.exists', 'os.path.exists', (['modelOutput'], {}), '(modelOutput)\n', (61575, 61588), False, 'import os\n'), ((61598, 61622), 'os.makedirs', 'os.makedirs', (['modelOutput'], {}), '(modelOutput)\n', (61609, 61622), False, 'import os\n'), ((61892, 61917), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (61915, 61917), False, 'import torch\n'), ((63504, 63542), 'os.path.join', 'os.path.join', (['modelOutput', '"""model.pkl"""'], {}), "(modelOutput, 'model.pkl')\n", (63516, 63542), False, 'import os\n'), ((63751, 63794), 'numpy.concatenate', 'np.concatenate', (['(train_pred, preds)'], {'axis': '(0)'}), '((train_pred, preds), axis=0)\n', (63765, 63794), True, 'import numpy as np\n'), ((64666, 64691), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (64689, 64691), False, 'import torch\n'), ((64747, 64787), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'class_weight'}), '(weight=class_weight)\n', (64766, 64787), True, 'import torch.nn as nn\n'), ((64827, 64903), 'numpy.zeros', 'np.zeros', (['population[population[:, population.shape[1] - 1] > 0, :].shape[0]'], {}), '(population[population[:, population.shape[1] - 1] > 0, :].shape[0])\n', (64835, 64903), True, 'import numpy as np\n'), ((68121, 68212), 'numpy.append', 'np.append', (['population[population[:, population.shape[1] - 1] > 0, :]', 'test_pred'], {'axis': '(1)'}), '(population[population[:, population.shape[1] - 1] > 0, :],\n test_pred, axis=1)\n', (68130, 68212), True, 'import numpy as np\n'), ((68455, 68477), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (68475, 68477), False, 'import timeit\n'), ((69918, 69943), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (69941, 69943), False, 'import torch\n'), ((70226, 70248), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (70246, 70248), False, 'import timeit\n'), ((70907, 70998), 'numpy.append', 'np.append', (['population[population[:, population.shape[1] - 1] > 0, :]', 'test_pred'], {'axis': '(1)'}), '(population[population[:, population.shape[1] - 1] > 0, :],\n test_pred, axis=1)\n', (70916, 70998), True, 'import numpy as np\n'), ((5070, 5083), 'torch.autograd.Variable', 'Variable', (['X_v'], {}), '(X_v)\n', (5078, 5083), False, 'from torch.autograd import Variable\n'), ((5085, 5100), 'torch.autograd.Variable', 'Variable', (['y_v_a'], {}), '(y_v_a)\n', (5093, 5100), False, 'from torch.autograd import Variable\n'), ((5102, 5117), 'torch.autograd.Variable', 'Variable', (['y_v_b'], {}), '(y_v_b)\n', (5110, 5117), False, 'from torch.autograd import Variable\n'), ((6152, 6181), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (6161, 6181), True, 'import torch.nn as nn\n'), ((8212, 8231), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (8228, 8231), False, 'import torch\n'), ((13851, 13865), 'torch.nn.functional.relu', 'F.relu', (['(-1 * x)'], {}), '(-1 * x)\n', (13857, 13865), True, 'import torch.nn.functional as F\n'), ((22865, 22880), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (22878, 22880), False, 'import pdb\n'), ((58585, 58610), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (58608, 58610), False, 'import torch\n'), ((60710, 60752), 'numpy.concatenate', 'np.concatenate', (['(temp, pred_test1)'], {'axis': '(0)'}), '((temp, pred_test1), axis=0)\n', (60724, 60752), True, 'import numpy as np\n'), ((62475, 62522), 'numpy.concatenate', 'np.concatenate', (['(train_x, encode_train)'], {'axis': '(0)'}), '((train_x, encode_train), axis=0)\n', (62489, 62522), True, 'import numpy as np\n'), ((62555, 62605), 'os.path.join', 'os.path.join', (['modelOutput', '"""autoencoder_model.pkl"""'], {}), "(modelOutput, 'autoencoder_model.pkl')\n", (62567, 62605), False, 'import os\n'), ((64258, 64284), 'numpy.int64', 'np.int64', (['population[:, 0]'], {}), '(population[:, 0])\n', (64266, 64284), True, 'import numpy as np\n'), ((64630, 64656), 'torch.Tensor', 'torch.Tensor', (['class_weight'], {}), '(class_weight)\n', (64642, 64656), False, 'import torch\n'), ((65680, 65702), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (65700, 65702), False, 'import timeit\n'), ((67138, 67163), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (67161, 67163), False, 'import torch\n'), ((70355, 70382), 'os.path.exists', 'os.path.exists', (['modelOutput'], {}), '(modelOutput)\n', (70369, 70382), False, 'import os\n'), ((70392, 70416), 'os.makedirs', 'os.makedirs', (['modelOutput'], {}), '(modelOutput)\n', (70403, 70416), False, 'import os\n'), ((70499, 70537), 'os.path.join', 'os.path.join', (['modelOutput', '"""model.pkl"""'], {}), "(modelOutput, 'model.pkl')\n", (70511, 70537), False, 'import os\n'), ((70742, 70789), 'numpy.concatenate', 'np.concatenate', (['(test_pred, pred_test1)'], {'axis': '(0)'}), '((test_pred, pred_test1), axis=0)\n', (70756, 70789), True, 'import numpy as np\n'), ((6403, 6428), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6426, 6428), False, 'import torch\n'), ((56706, 56725), 'numpy.count_nonzero', 'np.count_nonzero', (['y'], {}), '(y)\n', (56722, 56725), True, 'import numpy as np\n'), ((57157, 57177), 'numpy.shape', 'np.shape', (['population'], {}), '(population)\n', (57165, 57177), True, 'import numpy as np\n'), ((57182, 57202), 'numpy.shape', 'np.shape', (['population'], {}), '(population)\n', (57190, 57202), True, 'import numpy as np\n'), ((57606, 57652), 'numpy.max', 'np.max', (['population[:, population.shape[1] - 1]'], {}), '(population[:, population.shape[1] - 1])\n', (57612, 57652), True, 'import numpy as np\n'), ((58163, 58178), 'numpy.sum', 'np.sum', (['train_y'], {}), '(train_y)\n', (58169, 58178), True, 'import numpy as np\n'), ((59238, 59285), 'numpy.concatenate', 'np.concatenate', (['(train_x, encode_train)'], {'axis': '(0)'}), '((train_x, encode_train), axis=0)\n', (59252, 59285), True, 'import numpy as np\n'), ((59514, 59559), 'numpy.concatenate', 'np.concatenate', (['(test_x, encode_Test)'], {'axis': '(0)'}), '((test_x, encode_Test), axis=0)\n', (59528, 59559), True, 'import numpy as np\n'), ((60909, 60932), 'numpy.mean', 'np.mean', (['test_pred[ind]'], {}), '(test_pred[ind])\n', (60916, 60932), True, 'import numpy as np\n'), ((62133, 62145), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (62143, 62145), True, 'import torch.nn as nn\n'), ((62319, 62331), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (62327, 62331), True, 'import numpy as np\n'), ((67734, 67776), 'numpy.concatenate', 'np.concatenate', (['(temp, pred_test1)'], {'axis': '(0)'}), '((temp, pred_test1), axis=0)\n', (67748, 67776), True, 'import numpy as np\n'), ((58838, 58850), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (58848, 58850), True, 'import torch.nn as nn\n'), ((59076, 59088), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (59084, 59088), True, 'import numpy as np\n'), ((59357, 59369), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (59365, 59369), True, 'import numpy as np\n'), ((60834, 60858), 'numpy.shape', 'np.shape', (['test_pred[ind]'], {}), '(test_pred[ind])\n', (60842, 60858), True, 'import numpy as np\n'), ((64476, 64495), 'numpy.count_nonzero', 'np.count_nonzero', (['y'], {}), '(y)\n', (64492, 64495), True, 'import numpy as np\n'), ((64990, 65036), 'numpy.max', 'np.max', (['population[:, population.shape[1] - 1]'], {}), '(population[:, population.shape[1] - 1])\n', (64996, 65036), True, 'import numpy as np\n'), ((65547, 65562), 'numpy.sum', 'np.sum', (['train_y'], {}), '(train_y)\n', (65553, 65562), True, 'import numpy as np\n'), ((67951, 67974), 'numpy.mean', 'np.mean', (['test_pred[ind]'], {}), '(test_pred[ind])\n', (67958, 67974), True, 'import numpy as np\n'), ((67876, 67900), 'numpy.shape', 'np.shape', (['test_pred[ind]'], {}), '(test_pred[ind])\n', (67884, 67900), True, 'import numpy as np\n'), ((6835, 6856), 'torch.topk', 'torch.topk', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (6845, 6856), False, 'import torch\n')] |
from .dynamic_model import *
from functions.functions import *
from cost.neural_network_cost import NN_Cost
from constraints.constraints import *
from keras import layers
from keras.models import load_model
import keras
import numpy as np
from keras import backend as K
import tensorflow as tf
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
class NeuralNetworkPredictor(DynamicModel):
def __init__(self, model_file : str, N1 : int, \
N2 : int, Nu : int, ym : list, K : int, \
yn : list, lambd : list):
self.N1 = N1
self.N2 = N2
self.Nu = Nu
self.ym = ym
self.lambd = lambd
self.Hessian = np.zeros((self.Nu, self.Nu))
self.yn = yn
self.K = K
self.num_predicted_states = 3
self.constraints = Constraints()
self.model = load_model(model_file)
print(self.model.summary())
print(self.model.get_config())
self.output_size = self.model.layers[-1].output_shape[1]
self.input_size = self.model.layers[0].input_shape[1]
self.hd = len(self.model.layers) - 1
self.nd = 3
self.dd = 1
self.Hessian = np.zeros((self.output_size, self.output_size))
"""
These attributes will be part of the recursion:
"""
self.previous_first_der = 1
self.previoud_second_der = 1
super().__init__()
self.Cost = NN_Cost(self, self.lambd)
def __Phi_prime(self, x = 0):
"""
Linear output function
"""
return 1.0
def __Phi_prime_prime(self, x = 0):
"""
Linear output function
"""
return 0.0
"""
---------------------------------------------------------------------
<NAME>., and <NAME>, “Neural Generalized Predictive Control,”
Proceedings of the 1996 IEEE International Symposium on Intelligent
Control, 1996, pp. 277–281.
Calculating h'th element of the Jacobian
Calculating m'th and h'th element of the Hessian
---------------------------------------------------------------------
"""
def __partial_2_fnet_partial_nph_partial_npm(self, h, m, j):
"""
D2^2f_j(net)
------------
Du(n+h)Du(n+m)
"""
return self.__Phi_prime()*self.__partial_2_net_partial_u_nph_partial_npm(h, m, j)*\
+ self.__Phi_prime_prime() * self.__partial_net_partial_u(h, j) * \
self.__partial_net_partial_u(m, j)
def __partial_2_yn_partial_nph_partial_npm(self, h, m, j):
"""
D^2yn
---------------
Du(n+h) Du(n+m)
"""
weights = self.model.layers[j].get_weights()[0]
hid = weights.shape[1]
sum_output=0.0
for i in range(hid):
sum_output+= weights[j,i] * self.__partial_2_fnet_partial_nph_partial_npm(h, m, j)
self.previous_second_der = sum_output
return sum_output
def __partial_2_net_partial_u_nph_partial_npm(self, h, m, j):
"""
D^2 net_j
-------------
Du(n+h)Du(n+m)
"""
weights = self.model.layers[j].get_weights()[0]
sum_output=0.0
for i in range(1, min(self.K, self.dd)):
sum_output+= weights[j, i+self.nd+1] * self.previous_second_der * step(self.K-i-1)
return sum_output
def __partial_yn_partial_u(self, h, j):
"""
D yn
-----------
D u(n+h)
"""
weights = self.model.layers[j].get_weights()[0]
hid = self.model.layers[j].output_shape[1]
sum_output = 0.0
for i in range(hid):
sum_output += weights[j, i] * self.__partial_fnet_partial_u( h, j)
self.previous_first_der = sum_output
return sum_output
def __partial_fnet_partial_u(self, h, j):
"""
D f_j(net)
---------
D u(u+h)
"""
return self.__Phi_prime()*self.__partial_net_partial_u(h, j)
def __partial_net_partial_u(self, h, j):
"""
D net_j
---------
D u(n+h)
"""
weights = self.model.layers[j].get_weights()[0]
self.nd = weights.shape[1] - 1
sum_output = 0.0
for i in range(self.nd):
if (self.K - self.Nu) < i:
sum_output+= weights[j, i+1] * kronecker_delta(self.K - i, h)
else:
sum_output+=weights[j, i+1] * kronecker_delta(self.Nu, h)
for i in range(1, min(self.K, self.dd)):
sum_output+= weights[j, i+self.nd+1] * self.previous_first_der * \
step(self.K - i -1)
return sum_output
def __partial_delta_u_partial_u(self, j, h):
"""
D delta u
---------
D u(n+h)
"""
return kronecker_delta(h, j) - kronecker_delta(h, j-1)
def compute_hessian(self, u, del_u):
Hessian = np.zeros((self.Nu, self.Nu))
for h in range(self.Nu):
for m in range(self.Nu):
sum_output=0.0
for j in range(self.N1, self.N2):
sum_output += 2.*(self.__partial_yn_partial_u(h, j)*self.__partial_yn_partial_u(m, j) - \
self.__partial_2_yn_partial_nph_partial_npm(h, m, j)* \
(self.ym[j] - self.yn[j]))
for j in range(self.Nu):
sum_output += 2.*( self.lambd[j] * (self.__partial_delta_u_partial_u(j, h) * self.__partial_delta_u_partial_u(j, m) + del_u[j] * 0.0))
for j in range(self.Nu):
sum_output += kronecker_delta(h, j) * kronecker_delta(m, j) * \
( 2.0*self.constraints.s/( u[j] + self.constraints.r/2. - \
self.constraints.b)**3 + 2.*self.constraints.s/(self.constraints.r/2. + \
self.constraints.b - u[j])**3)
Hessian[m, h] = sum_output
return Hessian
def compute_jacobian(self, u, del_u):
# working on this now
dJ = []
for h in range(self.Nu):
sum_output=0.0
for j in range(self.N1, self.N2):
sum_output+=-2.*(self.ym[j]-self.yn[j])*self.__partial_yn_partial_u(h, j)
for j in range(self.Nu):
sum_output+=2.*self.lambd[j]*del_u[j]*self.__partial_delta_u_partial_u(j, h)
for j in range(self.Nu):
sum_output+=kronecker_delta(h, j) * ( -self.constraints.s/(u[j] + self.constraints.r/2. - self.constraints.b)**2 + \
self.constraints.s / (self.constraints.r/2. + self.constraints.b - u[j])**2 )
dJ+=[sum_output]
return dJ
def Fu(self, u, del_u):
jacobian = self.compute_jacobian(u, del_u)
return jacobian
def Ju(self, u, del_u):
self.Hessian = self.compute_hessian(u, del_u)
return self.Hessian
def compute_cost(self, del_u, u):
return self.Cost.compute_cost(del_u, u)
def measure(self, u):
if (u.ndim == 1):
u = np.array([u])
model_signal = load_model('../model_data/neural_network_1.hdf5')
measure = model_signal.predict(u, batch_size=1)
return measure
def predict(self, x):
return self.model.predict(x, batch_size=1)
| [
"keras.models.load_model",
"numpy.zeros",
"numpy.array",
"cost.neural_network_cost.NN_Cost",
"tensorflow.initialize_all_variables",
"tensorflow.InteractiveSession"
] | [((303, 326), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (324, 326), True, 'import tensorflow as tf\n'), ((336, 365), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (363, 365), True, 'import tensorflow as tf\n'), ((767, 795), 'numpy.zeros', 'np.zeros', (['(self.Nu, self.Nu)'], {}), '((self.Nu, self.Nu))\n', (775, 795), True, 'import numpy as np\n'), ((937, 959), 'keras.models.load_model', 'load_model', (['model_file'], {}), '(model_file)\n', (947, 959), False, 'from keras.models import load_model\n'), ((1272, 1318), 'numpy.zeros', 'np.zeros', (['(self.output_size, self.output_size)'], {}), '((self.output_size, self.output_size))\n', (1280, 1318), True, 'import numpy as np\n'), ((1521, 1546), 'cost.neural_network_cost.NN_Cost', 'NN_Cost', (['self', 'self.lambd'], {}), '(self, self.lambd)\n', (1528, 1546), False, 'from cost.neural_network_cost import NN_Cost\n'), ((5079, 5107), 'numpy.zeros', 'np.zeros', (['(self.Nu, self.Nu)'], {}), '((self.Nu, self.Nu))\n', (5087, 5107), True, 'import numpy as np\n'), ((7415, 7464), 'keras.models.load_model', 'load_model', (['"""../model_data/neural_network_1.hdf5"""'], {}), "('../model_data/neural_network_1.hdf5')\n", (7425, 7464), False, 'from keras.models import load_model\n'), ((7378, 7391), 'numpy.array', 'np.array', (['[u]'], {}), '([u])\n', (7386, 7391), True, 'import numpy as np\n')] |
#!/usr/bin/env python #
# ------------------------------------------------------------------------------------------------------#
# Created by "<NAME>" at 02:54, 06/12/2019 #
# #
# Email: <EMAIL> #
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 #
# Github: https://github.com/thieu1995 #
#-------------------------------------------------------------------------------------------------------#
import numpy as np
class Functions:
"""
This class of functions is belongs to 2-dimensional space
"""
def _ackley_n2__(self, solution=None):
"""
Class: unimodal, convex, differentiable, non-separable
Global: one global minimum fx = -200, at [0, 0]
@param solution: A numpy array include 2 items like: [10, 22]
"""
n = len(solution)
assert (n == 2, 'Ackley N. 2 function is only defined on a 2D space.')
return -200*np.exp(-0.2*np.sqrt(np.sum(solution**2)))
def _ackley_n3__(self, solution=None):
"""
Class: multimodal, non-convex, differentiable, non-separable
Global: one global minimum fx = −195.629028238419, at [±0.682584587365898,−0.36075325513719]
Link: http://benchmarkfcns.xyz/benchmarkfcns/ackleyn3fcn.html
@param solution: A numpy array include 2 items like: [10, 22]
"""
d = len(solution)
assert (d == 2, 'Ackley N. 3 function is only defined on a 2D space.')
return -200*np.exp(-0.2*np.sqrt(np.sum(solution**2))) + 5*np.exp(np.cos(3*solution[0]) + np.sin(3*solution[1]))
def _adjiman__(self, solution=None):
"""
Class: multimodal, non-convex, differentiable, non-separable
Global: if x in [-1, 2], y in [-1, 1] cube => global min fx = -2.02181, at [0, 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/adjimanfcn.html
@param solution: A numpy array include 2 items like: [10, 22]
"""
d = len(solution)
assert (d == 2, 'Adjiman function is only defined on a 2D space.')
return np.cos(solution[0]) * np.sin(solution[1]) - solution[0] / (solution[1]**2 + 1)
def _bartels_conn__(self, solution=None):
"""
Class: multimodal, non-convex, non-differentiable, non-separable
Global: one global minimum fx = 1, at [0, ..., 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/bartelsconnfcn.html
@param solution: A numpy array include 2 items like: [10, 22]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bartels conn function is only defined on a 2D space.')
return np.abs(solution[0]**2 + solution[1]**2 + solution[0] * solution[1]) + np.abs(np.sin(solution[0])) + \
np.abs(np.cos(solution[1]))
def _beale__(self, solution=None):
"""
Class: multimodal, non-convex, continuous
Global: one global minimum fx = 0, at [3, 0.5]
Link: http://benchmarkfcns.xyz/benchmarkfcns/bealefcn.html
@param solution: A numpy array include 2 items in range: [-4.5, 4.5], [-4.5, 4.5]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Beale function is only defined on a 2D space.')
return (1.5-solution[0]+solution[0]*solution[1])**2 + (2.25-solution[0]+solution[0]*solution[1]**2)**2 +\
(2.625-solution[0]+solution[0]*solution[1]**3)**2
def _bird__(self, solution=None):
"""
Class: multimodal, non-convex, non-separable, differentiable
Global: 2 global minimum fx= -106.764537, at ( 4.70104 , 3.15294 ) and ( − 1.58214 , − 3.13024 ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/birdfcn.html
@param solution: A numpy array include 2 items in range: [-2pi, 2pi], [-2pi, 2pi]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bird function is only defined on a 2D space.')
return np.sin(solution[0])*np.exp((1 - np.cos(solution[1]))**2) + \
np.cos(solution[1]) * np.exp( (1-np.sin(solution[0]))**2 ) + (solution[0] - solution[1])**2
def _bohachevskyn_n1__(self, solution=None):
"""
Class: unimodal, convex, continuous
Global: global minimum fx= 0, at ( 0, 0 )
Link: http://benchmarkfcns.xyz/benchmarkfcns/bohachevskyn1fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100], [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bohachevskyn N.1 function is only defined on a 2D space.')
return solution[0]**2 + 2*solution[1]**2 - 0.3*np.cos(3*solution[0]*np.pi) - 0.4*np.cos(4*solution[1]*np.pi) + 0.7
def _bohachevskyn_n2__(self, solution=None):
"""
Class: multi-modal, non-convex, non-separable, differentiable
Global: global minimum fx= 0, at ( 0, 0 )
Link: http://benchmarkfcns.xyz/benchmarkfcns/bohachevskyn2fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100], [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bohachevskyn N.2 function is only defined on a 2D space.')
return solution[0] ** 2 + 2 * solution[1] ** 2 - 0.3 * np.cos(3 * solution[0] * np.pi) * np.cos(4 * solution[1] * np.pi) + 0.3
def _booth__(self, solution=None):
"""
Class: unimodal, convex, non-separable, differentiable, continuous
Global: one global minimum fx= 0, at ( 1, 3 )
Link: http://benchmarkfcns.xyz/benchmarkfcns/boothfcn.html
@param solution: A numpy array include 2 items in range: [-10, 10], [-10, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Brooth function is only defined on a 2D space.')
return (solution[0]+2*solution[1]-7)**2 + (2*solution[0]+solution[1]-5)**2
def _brent__(self, solution=None):
"""
Class: unimodal, convex, non-separable, differentiable
Global: one global minimum fx= e^(-200), at ( -10 -10 )
Link: http://benchmarkfcns.xyz/benchmarkfcns/brentfcn.html
@param solution: A numpy array include 2 items in range: [-20, 0], [-20, 0]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Brent function is only defined on a 2D space.')
return (solution[0]+10)**2 + (solution[1]+10)**2 + np.exp(-solution[0]**2-solution[1]**2)
def _bukin_n6__(self, solution=None):
"""
Class: multimodal, convex, non-separable, non-differentiable, continuous
Global: one global minimum fx= 0, at ( -10, 1)
Link: http://benchmarkfcns.xyz/benchmarkfcns/bukinn6fcn.html
@param solution: A numpy array include 2 items in range: [-15, -5], [-3, 3]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bukin N.6 function is only defined on a 2D space.')
return 100*np.sqrt(np.abs(solution[1] - 0.01*solution[0]**2)) + 0.01*np.abs(solution[0]+10)
def _cross_in_tray__(self, solution=None):
"""
Class: multimodal, non-convex, non-separable, non-differentiable, continuous
Global: 4 global minimum fx= -2.06261218, at (±1.349406685353340,±1.349406608602084)
Link: http://benchmarkfcns.xyz/benchmarkfcns/crossintrayfcn.html
@param solution: A numpy array include 2 items in range: [-10, 10], [-10, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Bukin N.6 function is only defined on a 2D space.')
t1 = np.exp( np.abs(100 - np.sqrt(np.sum(solution**2))/np.pi ) )
t2 = np.sin(solution[0]) * np.cos(solution[1])
return -0.0001*(np.abs(t1*t2) + 1)**0.1
def _deckkers_aarts__(self, solution=None):
"""
Class: multimodal, non-convex, non-separable, differentiable, continuous
Global: 1 global minimum fx = −24771.09375, at ( 0 , ± 15 ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/deckkersaartsfcn.html
@param solution: A numpy array include 2 items in range: [-20, 20], [-20, 20]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Deckkers Aarts function is only defined on a 2D space.')
t1 = solution[0]**2
t2 = solution[1]**2
return 10**5*t1 + t2 - (t1 + t2)**2 + 10**(-5) * (t1 + t2)**4
def _drop_wave__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous
Global: 1 global minimum fx = −1 at ( 0 , 0 ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/dropwavefcn.html
@param solution: A numpy array include 2 items in range: [-5.2, 5.2], [-5.2, 5.2]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Drop wave function is only defined on a 2D space.')
return -(1+np.cos(12*np.sqrt(np.sum(solution*2)))) / (0.5 * np.sum(solution**2) + 2)
def _easom__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, separable
Global: 1 global minimum fx = −1 at ( pi, pi ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/easomfcn.html
@param solution: A numpy array include 2 items in range: [-100, 100], [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Easom function is only defined on a 2D space.')
return -np.cos(solution[0])*np.cos(solution[1])*np.exp(-(solution[0] - np.pi)**2 - (solution[1] - np.pi)**2)
def _egg_crate__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, separable
Global: global minimum fx = 0 at ( 0, 0 ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/eggcratefcn.html
@param solution: A numpy array include 2 items in range: [-5, 5]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Egg Crate function is only defined on a 2D space.')
return np.sum(solution**2) + 25 * (np.sin(solution[0])**2 + np.sin(solution[1])**2)
def _goldstein_price__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, non-separable
Global: global minimum fx = 3 at ( 0, -1 ) .
Link: http://benchmarkfcns.xyz/benchmarkfcns/goldsteinpricefcn.html
@param solution: A numpy array include 2 items in range: [-2, 2]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Goldstein price function is only defined on a 2D space.')
t1 = 18 - 32*solution[0] + 12*solution[0]**2 + 4*solution[1] - 36*solution[0]*solution[1] + 27*solution[1]**2
t2 = 19 - 14*solution[0]+3*solution[0]**2 - 14*solution[1] + 6*solution[0]*solution[1] + 3*solution[1]**2
t3 = (np.sum(solution) + 1)**2
return (1+t3*t2) * (30 + (2*solution[0]-3*solution[1])**2 * t1)
def _himmelblau__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous
Global: 4 global optima, fx = 0 at (3, 2), (-2.85118, 3.283186), (−3.779310,−3.283186), (3.584458,−1.848126)
Link: http://benchmarkfcns.xyz/benchmarkfcns/himmelblaufcn.html
@param solution: A numpy array include 2 items in range: [-6, 6]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Himmelblau function is only defined on a 2D space.')
return (solution[0]**2+solution[1]-11)**2 + (solution[0] + solution[1]**2 - 7)**2
def _holder_table__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, non-differentiable, non-separable
Global: 4 global optima, fx = -19.2085 at (±8.05502,±9.66459)
Link: http://benchmarkfcns.xyz/benchmarkfcns/holdertablefcn.html
@param solution: A numpy array include 2 items in range: [-10, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Holder Table function is only defined on a 2D space.')
return -np.abs(np.sin(solution[0])*np.cos(solution[1])*np.abs(1 - np.sqrt(np.sum(solution**2))/np.pi))
def _keane__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, non-separable
Global: 2 global optima, fx = 0.673667521146855 at (1.393249070031784,0), (0,1.393249070031784)
Link: http://benchmarkfcns.xyz/benchmarkfcns/kealefcn.html
@param solution: A numpy array include 2 items in range: [0, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Keane function is only defined on a 2D space.')
return -np.sin(solution[0]-solution[1])**2 * np.sin(solution[0] + solution[1])**2 / np.sqrt(np.sum(solution**2))
def _leon__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [0, 10]
Link: http://benchmarkfcns.xyz/benchmarkfcns/leonfcn.html
@param solution: A numpy array include 2 items in range: [0, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Leon function is only defined on a 2D space.')
return 100*(solution[1]-solution[0]**3)**2 + (1-solution[0])**2
def _levi_n13__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [1, 1]
Link: http://benchmarkfcns.xyz/benchmarkfcns/levin13fcn.html
@param solution: A numpy array include 2 items in range: [-10, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Levi N.13 function is only defined on a 2D space.')
return np.sin(3*solution[0]*np.pi)**2 + (solution[0]-1)**2*(1+np.sin(3*solution[1]**np.pi)**2) +\
(solution[1]-1)**2*(1 + np.sin(2*solution[1]*np.pi)**2)
def _matyas__(self, solution=None):
"""
Class: uni-modal, convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [0, 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/matyasfcn.html
@param solution: A numpy array include 2 items in range: [-10, 10]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Matyas function is only defined on a 2D space.')
return 0.26*np.sum(solution**2) - 0.48*solution[0]*solution[1]
def _mc_cormick__(self, solution=None):
"""
Class: multi-modal, convex, continuous, differentiable, non-scalable
Global: 1 global optima, fx = -1.9133 at [-0.547, -1.547]
Link: http://benchmarkfcns.xyz/benchmarkfcns/mccormickfcn.html
@param solution: A numpy array include 2 items in range: [-1.5, 4], [-3, 3]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Mc Cormick function is only defined on a 2D space.')
return np.sin(solution[0]+solution[1]) + (solution[0] - solution[1])**2 - 1.5*solution[0] + 2.5*solution[1] + 1
def _schaffer_n1__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [0, 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/schaffern1fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Scheffer N.1 function is only defined on a 2D space.')
return 0.5 + (np.sin(np.sum(solution**2)**2)**2 - 0.5) / (1 + 0.001*np.sum(solution**2))**2
def _schaffer_n2__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [0, 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/schaffern2fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Scheffer N.2 function is only defined on a 2D space.')
return 0.5 + (np.sin(solution[0]**2 - solution[1]**2)**2 - 0.5) / (1 + 0.001*np.sum(solution**2))**2
def _schaffer_n3__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0.00156685 at [0, 1.253115]
Link: http://benchmarkfcns.xyz/benchmarkfcns/schaffern3fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Scheffer N.3 function is only defined on a 2D space.')
return 0.5 + (np.sin(np.cos(np.abs( solution[0]**2 - solution[1]**2 ))) - 0.5) / (1 + 0.001*np.sum(solution**2))**2
def _schaffer_n4__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0.292579 at [0, 1.253115]
Link: http://benchmarkfcns.xyz/benchmarkfcns/schaffern4fcn.html
@param solution: A numpy array include 2 items in range: [-100, 100]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Scheffer N.4 function is only defined on a 2D space.')
return 0.5 + (np.cos(np.sin(np.abs( solution[0]**2 - solution[1]**2 ))) - 0.5) / (1 + 0.001*np.sum(solution**2))**2
def _three_hump_camel__(self, solution=None):
"""
Class: multi-modal, non-convex, continuous, differentiable, non-separable
Global: 1 global optima, fx = 0 at [0, 0]
Link: http://benchmarkfcns.xyz/benchmarkfcns/threehumpcamelfcn.html
@param solution: A numpy array include 2 items in range: [-5, 5]
@return: fx
"""
d = len(solution)
assert (d == 2, 'Scheffer N.3 function is only defined on a 2D space.')
return 2*solution[0]**2 - 1.05*solution[0]**4 + solution[0]**6/6 + solution[0]*solution[1] + solution[1]**2
| [
"numpy.abs",
"numpy.sum",
"numpy.sin",
"numpy.exp",
"numpy.cos"
] | [((6767, 6811), 'numpy.exp', 'np.exp', (['(-solution[0] ** 2 - solution[1] ** 2)'], {}), '(-solution[0] ** 2 - solution[1] ** 2)\n', (6773, 6811), True, 'import numpy as np\n'), ((8007, 8026), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (8013, 8026), True, 'import numpy as np\n'), ((8029, 8048), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (8035, 8048), True, 'import numpy as np\n'), ((9822, 9886), 'numpy.exp', 'np.exp', (['(-(solution[0] - np.pi) ** 2 - (solution[1] - np.pi) ** 2)'], {}), '(-(solution[0] - np.pi) ** 2 - (solution[1] - np.pi) ** 2)\n', (9828, 9886), True, 'import numpy as np\n'), ((10364, 10385), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (10370, 10385), True, 'import numpy as np\n'), ((2450, 2469), 'numpy.cos', 'np.cos', (['solution[0]'], {}), '(solution[0])\n', (2456, 2469), True, 'import numpy as np\n'), ((2472, 2491), 'numpy.sin', 'np.sin', (['solution[1]'], {}), '(solution[1])\n', (2478, 2491), True, 'import numpy as np\n'), ((3016, 3087), 'numpy.abs', 'np.abs', (['(solution[0] ** 2 + solution[1] ** 2 + solution[0] * solution[1])'], {}), '(solution[0] ** 2 + solution[1] ** 2 + solution[0] * solution[1])\n', (3022, 3087), True, 'import numpy as np\n'), ((3140, 3159), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (3146, 3159), True, 'import numpy as np\n'), ((7364, 7388), 'numpy.abs', 'np.abs', (['(solution[0] + 10)'], {}), '(solution[0] + 10)\n', (7370, 7388), True, 'import numpy as np\n'), ((9802, 9821), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (9808, 9821), True, 'import numpy as np\n'), ((11176, 11192), 'numpy.sum', 'np.sum', (['solution'], {}), '(solution)\n', (11182, 11192), True, 'import numpy as np\n'), ((13099, 13120), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (13105, 13120), True, 'import numpy as np\n'), ((14768, 14789), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (14774, 14789), True, 'import numpy as np\n'), ((3093, 3112), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (3099, 3112), True, 'import numpy as np\n'), ((4315, 4334), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (4321, 4334), True, 'import numpy as np\n'), ((4388, 4407), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (4394, 4407), True, 'import numpy as np\n'), ((5033, 5064), 'numpy.cos', 'np.cos', (['(4 * solution[1] * np.pi)'], {}), '(4 * solution[1] * np.pi)\n', (5039, 5064), True, 'import numpy as np\n'), ((5655, 5686), 'numpy.cos', 'np.cos', (['(4 * solution[1] * np.pi)'], {}), '(4 * solution[1] * np.pi)\n', (5661, 5686), True, 'import numpy as np\n'), ((7314, 7359), 'numpy.abs', 'np.abs', (['(solution[1] - 0.01 * solution[0] ** 2)'], {}), '(solution[1] - 0.01 * solution[0] ** 2)\n', (7320, 7359), True, 'import numpy as np\n'), ((8073, 8088), 'numpy.abs', 'np.abs', (['(t1 * t2)'], {}), '(t1 * t2)\n', (8079, 8088), True, 'import numpy as np\n'), ((9264, 9285), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (9270, 9285), True, 'import numpy as np\n'), ((9782, 9801), 'numpy.cos', 'np.cos', (['solution[0]'], {}), '(solution[0])\n', (9788, 9801), True, 'import numpy as np\n'), ((13052, 13085), 'numpy.sin', 'np.sin', (['(solution[0] + solution[1])'], {}), '(solution[0] + solution[1])\n', (13058, 13085), True, 'import numpy as np\n'), ((14130, 14161), 'numpy.sin', 'np.sin', (['(3 * solution[0] * np.pi)'], {}), '(3 * solution[0] * np.pi)\n', (14136, 14161), True, 'import numpy as np\n'), ((1341, 1362), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (1347, 1362), True, 'import numpy as np\n'), ((1921, 1944), 'numpy.cos', 'np.cos', (['(3 * solution[0])'], {}), '(3 * solution[0])\n', (1927, 1944), True, 'import numpy as np\n'), ((1945, 1968), 'numpy.sin', 'np.sin', (['(3 * solution[1])'], {}), '(3 * solution[1])\n', (1951, 1968), True, 'import numpy as np\n'), ((4999, 5030), 'numpy.cos', 'np.cos', (['(3 * solution[0] * np.pi)'], {}), '(3 * solution[0] * np.pi)\n', (5005, 5030), True, 'import numpy as np\n'), ((5621, 5652), 'numpy.cos', 'np.cos', (['(3 * solution[0] * np.pi)'], {}), '(3 * solution[0] * np.pi)\n', (5627, 5652), True, 'import numpy as np\n'), ((10392, 10411), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (10398, 10411), True, 'import numpy as np\n'), ((10417, 10436), 'numpy.sin', 'np.sin', (['solution[1]'], {}), '(solution[1])\n', (10423, 10436), True, 'import numpy as np\n'), ((12399, 12418), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (12405, 12418), True, 'import numpy as np\n'), ((12419, 12438), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (12425, 12438), True, 'import numpy as np\n'), ((13015, 13048), 'numpy.sin', 'np.sin', (['(solution[0] - solution[1])'], {}), '(solution[0] - solution[1])\n', (13021, 13048), True, 'import numpy as np\n'), ((14260, 14291), 'numpy.sin', 'np.sin', (['(2 * solution[1] * np.pi)'], {}), '(2 * solution[1] * np.pi)\n', (14266, 14291), True, 'import numpy as np\n'), ((15327, 15360), 'numpy.sin', 'np.sin', (['(solution[0] + solution[1])'], {}), '(solution[0] + solution[1])\n', (15333, 15360), True, 'import numpy as np\n'), ((16508, 16551), 'numpy.sin', 'np.sin', (['(solution[0] ** 2 - solution[1] ** 2)'], {}), '(solution[0] ** 2 - solution[1] ** 2)\n', (16514, 16551), True, 'import numpy as np\n'), ((1888, 1909), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (1894, 1909), True, 'import numpy as np\n'), ((7963, 7984), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (7969, 7984), True, 'import numpy as np\n'), ((14185, 14217), 'numpy.sin', 'np.sin', (['(3 * solution[1] ** np.pi)'], {}), '(3 * solution[1] ** np.pi)\n', (14191, 14217), True, 'import numpy as np\n'), ((15985, 16006), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (15991, 16006), True, 'import numpy as np\n'), ((16571, 16592), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (16577, 16592), True, 'import numpy as np\n'), ((17124, 17167), 'numpy.abs', 'np.abs', (['(solution[0] ** 2 - solution[1] ** 2)'], {}), '(solution[0] ** 2 - solution[1] ** 2)\n', (17130, 17167), True, 'import numpy as np\n'), ((17188, 17209), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (17194, 17209), True, 'import numpy as np\n'), ((17739, 17782), 'numpy.abs', 'np.abs', (['(solution[0] ** 2 - solution[1] ** 2)'], {}), '(solution[0] ** 2 - solution[1] ** 2)\n', (17745, 17782), True, 'import numpy as np\n'), ((17803, 17824), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (17809, 17824), True, 'import numpy as np\n'), ((4347, 4366), 'numpy.cos', 'np.cos', (['solution[1]'], {}), '(solution[1])\n', (4353, 4366), True, 'import numpy as np\n'), ((4421, 4440), 'numpy.sin', 'np.sin', (['solution[0]'], {}), '(solution[0])\n', (4427, 4440), True, 'import numpy as np\n'), ((9233, 9253), 'numpy.sum', 'np.sum', (['(solution * 2)'], {}), '(solution * 2)\n', (9239, 9253), True, 'import numpy as np\n'), ((15938, 15959), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (15944, 15959), True, 'import numpy as np\n'), ((12458, 12479), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (12464, 12479), True, 'import numpy as np\n')] |
from functools import partial
from pathlib import Path
import jax.numpy as jnp
import numpy as np
from jax import jit as jjit
from numba import jit as njit
def load_data():
root = Path(__file__).parent
with open(root / "input.txt") as in_file:
data = np.fromstring(in_file.read(), sep=",", dtype=int)
return data
def proliferate(initial_state, days):
state = np.bincount(initial_state, minlength=9)
for _ in range(days):
state = np.roll(state, -1)
state[6] += state[8]
return state
numba_proliferate = njit(proliferate)
@partial(jjit, static_argnums=1)
def jax_proliferate(initial_state, days):
arr = jnp.asarray(initial_state, dtype=jnp.int64)
state = jnp.bincount(arr, length=9)
for _ in jnp.arange(0, days):
state = jnp.roll(state, -1)
state = state.at[6].set(state[6] + state[8])
return state
def task1():
initial_state = load_data()
print(np.sum(proliferate(initial_state, 80)))
def task2():
initial_state = load_data()
print(np.sum(proliferate(initial_state, 256)))
def main():
print("----- Task 1 -----")
task1()
print("----- Task 2 -----")
task2()
if __name__ == "__main__":
main()
| [
"functools.partial",
"numpy.roll",
"jax.numpy.roll",
"jax.numpy.arange",
"jax.numpy.bincount",
"jax.numpy.asarray",
"pathlib.Path",
"numba.jit",
"numpy.bincount"
] | [((558, 575), 'numba.jit', 'njit', (['proliferate'], {}), '(proliferate)\n', (562, 575), True, 'from numba import jit as njit\n'), ((579, 610), 'functools.partial', 'partial', (['jjit'], {'static_argnums': '(1)'}), '(jjit, static_argnums=1)\n', (586, 610), False, 'from functools import partial\n'), ((389, 428), 'numpy.bincount', 'np.bincount', (['initial_state'], {'minlength': '(9)'}), '(initial_state, minlength=9)\n', (400, 428), True, 'import numpy as np\n'), ((663, 706), 'jax.numpy.asarray', 'jnp.asarray', (['initial_state'], {'dtype': 'jnp.int64'}), '(initial_state, dtype=jnp.int64)\n', (674, 706), True, 'import jax.numpy as jnp\n'), ((719, 746), 'jax.numpy.bincount', 'jnp.bincount', (['arr'], {'length': '(9)'}), '(arr, length=9)\n', (731, 746), True, 'import jax.numpy as jnp\n'), ((760, 779), 'jax.numpy.arange', 'jnp.arange', (['(0)', 'days'], {}), '(0, days)\n', (770, 779), True, 'import jax.numpy as jnp\n'), ((187, 201), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'from pathlib import Path\n'), ((471, 489), 'numpy.roll', 'np.roll', (['state', '(-1)'], {}), '(state, -1)\n', (478, 489), True, 'import numpy as np\n'), ((797, 816), 'jax.numpy.roll', 'jnp.roll', (['state', '(-1)'], {}), '(state, -1)\n', (805, 816), True, 'import jax.numpy as jnp\n')] |
#!/usr/bin/env python3
import dummy
import numpy
import mysql.connector
from io import BytesIO
# Start a MySQL database via Docker
# docker run -ti --rm --name ohmysql -e MYSQL_ROOT_PASSWORD=mikolov -e MYSQL_DATABASE=embeddings -p 3306:3306 mysql:5.7
def adapt_array(array):
"""
Using the numpy.save function to save a binary version of the array,
and BytesIO to catch the stream of data and convert it into a BLOB.
:param numpy.array array: NumPy array to turn into a BLOB
:return: NumPy array as BLOB
:rtype: BLOB
"""
out = BytesIO()
numpy.save(out, array)
out.seek(0)
return out.read()
def convert_array(blob):
"""
Using BytesIO to convert the binary version of the array back into a numpy array.
:param BLOG blob: BLOB containing a NumPy array
:return: One steaming hot NumPy array
:rtype: numpy.array
"""
out = BytesIO(blob)
out.seek(0)
return numpy.load(out)
connection = mysql.connector.connect(user='root',
password='<PASSWORD>',
host='127.0.0.1',
database='embeddings')
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS `embeddings` (`key` TEXT, `embedding` BLOB);')
#########
# Write #
#########
for key, emb in dummy.embeddings():
arr = adapt_array(emb)
cursor.execute('INSERT INTO `embeddings` (`key`, `embedding`) VALUES (%s, %s);', (key, arr))
connection.commit()
########
# Read #
########
for key, _ in dummy.embeddings():
cursor.execute('SELECT embedding FROM `embeddings` WHERE `key`=%s;', (key,))
data = cursor.fetchone()
emb = convert_array(data[0])
assert(type(emb) is numpy.ndarray)
cursor.execute('DROP TABLE `embeddings`;')
connection.close()
| [
"io.BytesIO",
"numpy.save",
"dummy.embeddings",
"numpy.load"
] | [((1352, 1370), 'dummy.embeddings', 'dummy.embeddings', ([], {}), '()\n', (1368, 1370), False, 'import dummy\n'), ((1562, 1580), 'dummy.embeddings', 'dummy.embeddings', ([], {}), '()\n', (1578, 1580), False, 'import dummy\n'), ((565, 574), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (572, 574), False, 'from io import BytesIO\n'), ((579, 601), 'numpy.save', 'numpy.save', (['out', 'array'], {}), '(out, array)\n', (589, 601), False, 'import numpy\n'), ((899, 912), 'io.BytesIO', 'BytesIO', (['blob'], {}), '(blob)\n', (906, 912), False, 'from io import BytesIO\n'), ((941, 956), 'numpy.load', 'numpy.load', (['out'], {}), '(out)\n', (951, 956), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. <NAME> (<EMAIL>) and the
# RMG Team (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This script contains unit tests of the :mod:`rmgpy.quantity` module.
"""
import unittest
import numpy
import os
from rmgpy.cantherm import CanTherm
import rmgpy.constants as constants
################################################################################
class CommonTest(unittest.TestCase):
"""
Contains unit tests of the Cantherm common functions.
"""
def test_checkConformerEnergy(self):
"""
test the checkConformerEnergy function with an list of energies.
"""
Vlist = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654, -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643, -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743, -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]
Vlist = numpy.array(Vlist, numpy.float64)
Vdiff = (Vlist[0] - numpy.min(Vlist)) * constants.E_h * constants.Na / 1000
self.assertAlmostEqual(Vdiff / 2.7805169838282797, 1, 5)
class testCanthermJob(unittest.TestCase):
"""
Contains unit tests of the Cantherm module and its interactions with other RMG modules.
"""
def setUp(self):
cantherm = CanTherm()
jobList = cantherm.loadInputFile(os.path.join(os.path.dirname(os.path.abspath(__file__)),r'files/methoxy.py'))
pdepjob = jobList[-1]
self.kineticsjob = jobList[0]
pdepjob.activeJRotor = True
network = pdepjob.network
self.Nisom = len(network.isomers)
self.Nreac = len(network.reactants)
self.Nprod = len(network.products)
self.Npath = len(network.pathReactions)
self.PathReaction2 = network.pathReactions[2]
self.TminValue = pdepjob.Tmin.value
self.Tmaxvalue = pdepjob.Tmax.value
self.TmaxUnits = pdepjob.Tmax.units
self.TlistValue = pdepjob.Tlist.value
self.PminValue = pdepjob.Pmin.value
self.Pcount = pdepjob.Pcount
self.Tcount = pdepjob.Tcount
self.GenTlist = pdepjob.generateTemperatureList()
self.PlistValue = pdepjob.Plist.value
self.maximumGrainSizeValue = pdepjob.maximumGrainSize.value
self.method = pdepjob.method
self.rmgmode = pdepjob.rmgmode
# test Cantherm's interactions with the network module
def testNisom(self):
"""
Test the number of isomers identified.
"""
self.assertEqual(self.Nisom, 2, msg=None)
def testNreac(self):
"""
Test the number of reactants identified.
"""
self.assertEqual(self.Nreac, 1, msg=None)
def testNprod(self):
"""
Test the number of products identified.
"""
self.assertEqual(self.Nprod, 1, msg=None)
def testNpathReactions(self):
"""
Test the whether or not RMG mode is turned on.
"""
self.assertEqual(self.Npath, 3, msg=None)
def testPathReactions(self):
"""
Test a path reaction label
"""
self.assertEqual(str(self.PathReaction2), 'CH2OH <=> methoxy', msg=None)
# test Cantherm's interactions with the pdep module
def testTemperaturesUnits(self):
"""
Test the Temperature Units.
"""
self.assertEqual(str(self.TmaxUnits), 'K', msg=None)
def testTemperaturesValue(self):
"""
Test the temperature value.
"""
self.assertEqual(self.TminValue, 450.0, msg=None)
def testTemperaturesList(self):
"""
Test the temperature list.
"""
self.assertEqual(numpy.array_equal(self.TlistValue, numpy.array([450, 500, 678, 700])), True, msg=None)
def testPminValue(self):
"""
Test the minimum pressure value.
"""
self.assertEqual("%0.7f" % self.PminValue, str(0.0101325), msg=None)
def testPcount(self):
"""
Test the number pressures specified.
"""
self.assertEqual(self.Pcount, 7, msg=None)
def testTcount(self):
"""
Test the number temperatures specified.
"""
self.assertEqual(self.Tcount, 4, msg=None)
def testPressureList(self):
"""
Test the pressure list.
"""
self.assertEqual(numpy.array_equal(self.PlistValue, numpy.array([0.01, 0.1, 1, 3, 10, 100, 1000])), True, msg=None)
def testGenerateTemperatureList(self):
"""
Test the generated temperature list.
"""
self.assertEqual(list(self.GenTlist), [450.0, 500.0, 678.0, 700.0], msg=None)
def testmaximumGrainSizeValue(self):
"""
Test the max grain size value.
"""
self.assertEqual(self.maximumGrainSizeValue, 0.5, msg=None)
def testMethod(self):
"""
Test the master equation solution method chosen.
"""
self.assertEqual(self.method, 'modified strong collision', msg=None)
def testRmgmode(self):
"""
Test the whether or not RMG mode is turned on.
"""
self.assertEqual(self.rmgmode, False, msg=None)
# Test cantherms interactions with the kinetics module
def testCalculateTSTRateCoefficient(self):
"""
Test the calculation of the high-pressure limit rate coef for one of the kinetics jobs at Tmin and Tmax.
"""
self.assertEqual("%0.7f" % self.kineticsjob.reaction.calculateTSTRateCoefficient(self.TminValue), str(46608.5904933), msg=None)
self.assertEqual("%0.5f" % self.kineticsjob.reaction.calculateTSTRateCoefficient(self.Tmaxvalue), str(498796.64535), msg=None)
def testTunneling(self):
"""
Test the whether or not tunneling has been included in a specific kinetics job.
"""
self.assertEqual(self.kineticsjob.reaction.transitionState.tunneling, None, msg=None)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| [
"os.path.abspath",
"unittest.TextTestRunner",
"numpy.min",
"numpy.array",
"rmgpy.cantherm.CanTherm"
] | [((2365, 2398), 'numpy.array', 'numpy.array', (['Vlist', 'numpy.float64'], {}), '(Vlist, numpy.float64)\n', (2376, 2398), False, 'import numpy\n'), ((2741, 2751), 'rmgpy.cantherm.CanTherm', 'CanTherm', ([], {}), '()\n', (2749, 2751), False, 'from rmgpy.cantherm import CanTherm\n'), ((7411, 7447), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7434, 7447), False, 'import unittest\n'), ((5153, 5186), 'numpy.array', 'numpy.array', (['[450, 500, 678, 700]'], {}), '([450, 500, 678, 700])\n', (5164, 5186), False, 'import numpy\n'), ((5823, 5868), 'numpy.array', 'numpy.array', (['[0.01, 0.1, 1, 3, 10, 100, 1000]'], {}), '([0.01, 0.1, 1, 3, 10, 100, 1000])\n', (5834, 5868), False, 'import numpy\n'), ((2831, 2856), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2846, 2856), False, 'import os\n'), ((2427, 2443), 'numpy.min', 'numpy.min', (['Vlist'], {}), '(Vlist)\n', (2436, 2443), False, 'import numpy\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
from scipy import stats
from scipy.stats import t, ttest_ind
from pandas import Series, DataFrame
from statsmodels.stats import weightstats as stests
pd.options.display.float_format = '{:.1f}'.format
df = pd.read_csv('movies.csv')
# In[2]:
#keeping only the necessary columns
df = df[['Title', 'Worldwide Gross', 'Production Budget', 'Release Date', 'Major Genre', 'Rotten Tomatoes Rating', 'IMDB Rating', 'IMDB Votes']]
#renaming some columns
df.rename(columns = {'Worldwide Gross' : 'Gross', 'Production Budget' : 'Budget', 'Release Date' : 'Date', 'Major Genre' : 'Genre', 'Rotten Tomatoes Rating' : 'RTRating', 'IMDB Rating' : 'IMDBRating', 'IMDB Votes' : 'IMDBVotes'}, inplace = True)
# In[3]:
#cropping unnecessary info from Date
df.Date = df['Date'].str.rstrip()
df.Date = df['Date'].str[-2:]
# In[4]:
#pruning unwanted rows from Date
df = df[df['Date'].str.isdecimal() == True]
#pruning unwanted rows from Gross
df = df[df['Gross'] != 'Unknown']
# In[5]:
#fixing indices after pruning (starting from 1 instead of 0)
df.index = np.arange(1, len(df) + 1)
# In[6]:
#fixing Date format
df.Date = pd.to_numeric(df['Date'], errors = 'coerce')
df.Date = df['Date'].map("{:02}".format)
df.Date = df['Date'].apply(lambda x:'20'+x if 0 <= int(x) <= 19 else '19'+x)
df.Date = df['Date'].astype(int)
# In[7]:
#converting Gross from str to float
df.Gross = df['Gross'].astype(float)
# In[8]:
#fixing scale climax on RTRating
df.RTRating = df['RTRating'].apply(lambda x: x/10)
# In[9]:
#to csv - to see how clean data looks like
df.to_csv('cleandata_movies.csv', index_label = 'ID')
# In[10]:
#making Production Budget and Gross Dataframe ---for my data mining problem ---
budget_gross_df = df[['Gross', 'Budget']]
budget_gross_df = budget_gross_df.dropna()
# In[11]:
#making a genres Dataframe splitting the one column to two (Columns: ID, First, Second)----helpful----
genres_df = df['Genre'].dropna()
genres_df = genres_df.str.split('/', expand = True)
genres_df.columns = ['First', 'Second']
second_genre = genres_df['Second'].dropna()
# In[12]:
#constructing a dictionary for Genre (key = genre : value = number of movies)
genres_hash = {}
for i in genres_df['First']:
if i not in genres_hash:
genres_hash[i] = 1
else:
genres_hash[i] += 1
for i in second_genre:
if i not in genres_hash:
genres_hash[i] = 1
else:
genres_hash[i] += 1
# In[13]:
#making the Genre - Number of Movies Dataframe so as to make the bar plot later
genre_numbers_df = pd.DataFrame.from_dict(genres_hash, orient = 'index')
genre_numbers_df.columns = ['Number of Movies']
genre_numbers_df.sort_values('Number of Movies', ascending = False, inplace = True)
genre_numbers_df.reset_index(level = 0, inplace = True)
genre_numbers_df = genre_numbers_df.rename(columns = {'index' : 'Genre'})
# In[14]:
#making Worldwide Gross histogram
sns.set_context('paper')
sns.set_style('white')
gross = df['Gross']
plt.figure(figsize=(10, 5))
sns.distplot( gross, kde = False, color = 'darkgreen', bins = 30)
plt.title('Worldwide Gross', color = 'darkgreen', fontsize = 18)
plt.xlabel('Dollars', fontsize = 14)
plt.ylabel('Number of Movies', fontsize = 14)
plt.savefig('WorldwideGross_Histogram.png')
# Skewed Non Symmetric (Non-Normal) Right-long-tailed Histogram ----Follows Geometric Distribution----
#
# - Data are located in the left side.
#
# - Scale of data: interval [0.0 – 2767891499.0] dollars.
# The spread of data is not really wide. Note that our data points, range from $0 - $2767891499
#
# - The histogram is “skewed” , there is no-mirroring of the data.
#
# - This distribution is non-symmetric. It has a long tail (on the right) relative to the other tail (left
# side has no tail at all). This phenomenon occurs, beacause the lower bounds of the data are
# significantly more frequent than the upper bounds.
# In[15]:
#making Rotten Tomatoes Rating histogram
rtr = df['RTRating'].dropna()
plt.figure(figsize=(10, 5))
sns.distplot(rtr, kde = False, color='darkred', bins = 10)
plt.title('Rotten Tomatoes Rating',color = 'darkred', fontsize = 18)
plt.xlabel('Rating', fontsize = 14)
plt.ylabel('Number of Movies', fontsize = 14)
mean = rtr.mean()
plt.axvline(mean, color='r', linestyle='--')
plt.legend({'Mean':mean})
plt.savefig('RottenTomatoesRating_Histogram.png')
# Symmetric 1 peak Unimodal short–tailed Histogram ----1 peak Unimodal Distribution ----
#
# - Data, cluster around a single mode (1 peak unimodal). Tails here, approach zero very fast. We could say
# that Rotten Tomatoes ratings are well-distributed, ratings vary a lot and overspread through whole data
# scale
#
# - Scale of data : interval [0.1,10.0] points.
#
# - No skewness.
# In[16]:
#making IMDB Rating histogram
imdbr = df['IMDBRating'].dropna()
plt.figure(figsize=(10, 5))
sns.distplot(imdbr, kde = False, color='goldenrod', bins = 8)
plt.xlim(0, 10)
plt.title('IMDB Rating',color = 'goldenrod', fontsize = 18)
plt.xlabel('Rating', fontsize = 14)
plt.ylabel('Number of Movies', fontsize=14)
mean = imdbr.mean()
plt.axvline(mean, color='r', linestyle='--')
plt.legend({'Mean':mean})
plt.savefig('IMDBRating_Histogram.png')
# Skewed Non-Symmetric Normal moderate-tailed Histogram ----follows Normal Distribution----
#
# - Data are normally distributed with a litle skewness to the right. IMDB Ratings follow the normal
# distribution, centrality is around 6 and 7 points and as we can see there is a tendency for higher
# voting than lower.
#
# - Scale of data : interval [1.4, 9.2] points**. We managed to contain the whole scale.
#
# - There is a litle right skewness and we conclude that most ratings tend to be higher than the middle rating
# point of the scale (that is, number 5).
# In[17]:
#making IMDB Votes histogram
imdbv = df['IMDBVotes'].dropna()
plt.figure(figsize=(10, 5))
sns.distplot(imdbv, kde = False, color = 'black', bins = 25)
plt.title('IMDB Votes',color = 'black', fontsize = 18)
plt.xlabel('Number of Votes', fontsize = 14)
plt.ylabel('Number of Movies', fontsize = 14)
mean = imdbv.mean()
plt.axvline(mean, color='r', linestyle='--')
plt.legend({'Mean':mean})
plt.savefig('IMDBVotes_Histogram.png')
# Skewed Non Symmetric (Non-Normal) Right-long-tailed Histogram ----follows Geometric Distribution----
#
# - Most of the data are located in the left side and with a first glance we could say that the centrality
# of the number of votes is in the first two bins.
#
# - Scale of data : interval [18, 519541] votes. We managed to contain the whole scale. Data are not
# normally spread all along the interval.
#
# - Again, data are skewed to the left and this distribution is non-symmetric. It has a long tail (on the
# right) relative to the other tail (left side has no tail at all). This phenomenon occurs, due to huge
# dispersion of our data points, huge difference between lower and upper bounds (as we explained above).
#
# In[18]:
#making Genres Bar Plot
plt.figure(figsize=(10, 5))
sns.barplot(x = 'Number of Movies', y = 'Genre', saturation = 1, data = genre_numbers_df)
plt.title('Major Genre', color = 'black', fontsize = 18)
plt.savefig('num_movie_genre_barplot.png')
# Number of Movies per Genre Barplot “Oh! Too much DRAMA”
#
# The barplot shows some interesting analytics for what kind of movie, Production Companies, prefer to
# release.
# There is a tendency to fund and therefore, produce more “Drama” and “Comedy” movies.
# The bronze metal goes to “Action” films and “Performance” and “Concert” genres come last.
# In[19]:
#merging Gross with IMDBVotes into one DataFrame
gross_votes_df = pd.merge(pd.DataFrame(gross), pd.DataFrame(imdbv), left_index = True, right_index = True)
# In[20]:
#making exp. Worldwide Gross Histogram
plt.figure(figsize=(10, 5))
sns.distplot(gross_votes_df['Gross'], kde = False, color = 'darkgreen', label = 'Gross', bins = 50)
plt.xscale('log')
plt.yscale('log')
plt.title('Worldwide Gross exp.',color = 'black', fontsize = 18)
plt.xlabel('Dollars ', fontsize = 14)
plt.ylabel('Number of Movies', fontsize = 14)
plt.savefig('Grosslog_Histogram.png')
# Worldwide Gross Histogram (exponential bins)
#
# - Data points are gathered in the left side.
#
# - The bins get “thinner” as they grow exponentially.
#
# - It seems that Worldwide Gross follow exponential regression with two outliers in the right side that managed to form small bins. Outliers should be seriously considered.
#
# - There are huge deviations from the mean and the climax of data is large enough (high upper bounds) to
# increase the mean number a lot.
#
# - They show us the great dispersion that movies can have on their revenue. Big money is for the minority!
# In[21]:
#making exp. IMDB Votes Histogram
plt.figure(figsize=(10, 5))
sns.distplot(gross_votes_df['IMDBVotes'] , kde = False, color = 'black', label = 'IMDBVotes', bins = 50)
plt.xscale('log')
plt.yscale('log')
plt.title('IMDB Votes exp.',color = 'black', fontsize = 18)
plt.xlabel('Votes ', fontsize = 14)
plt.ylabel('Number of Movies', fontsize = 14)
plt.savefig('Voteslog_Histogram.png')
# IMDB Votes Histogram (exponential bins)
#
# - IMDB votes follow exponential regression.
#
# - There are a few movies that reach up to 10^5 votes .
#
# - As the votes increase, bins contain less and less quantity of data.
#
# - Data are gathered in the left side.
#
# - The tail drops significantly and the bins get “thinner”.
#
# - There are some tiny outliers in the right side which means that there are huge deviations
# from the mean and the climax of data is large enough (high upper bounds) to increase the mean number a lot.
# In[22]:
#making exp. Worldwide Gross and IMDB Votes Scatterplot
plt.figure(figsize=(10, 5))
sns.scatterplot(x = 'IMDBVotes', y = 'Gross', data = gross_votes_df, facecolor = 'goldenrod' )
plt.xscale('log')
plt.yscale('log')
plt.ylim(20)
plt.title('IMDB Votes - Worldwide Gross',color = 'black', fontsize = 18)
plt.xlabel('Votes', fontsize = 14)
plt.ylabel('Gross ($)', fontsize = 14)
plt.savefig('VotesGrosslog_Scatterplot.png')
# Worldwide Gross and IMDB Votes Scatterplot (exponential bins)
#
# In this scatterplot, it is easy to realize that there is a high positive linear correlation between the revenues and IMDB votes. Most data points are clustered together as x and y axis increases.
# In[23]:
#making Worldwide Gross and IMDB Votes Scatterplot
plt.figure(figsize=(10, 5))
sns.scatterplot(x = 'IMDBVotes', y = 'Gross', data = gross_votes_df, facecolor = 'goldenrod' )
plt.title('IMDB Votes - Worldwide Gross',color = 'black', fontsize = 18)
plt.xlabel('Votes', fontsize = 14)
plt.ylabel('Gross ($)', fontsize = 14)
plt.savefig('VotesGross_Scatterplot.png')
# Worldwide Gross and IMDB Votes Scatterplot
#
# As we can see from the IMDB Votes – Worldwide Gross Scatterplot, there is a strong clustering in the left corner of the axis. A positive correlation.
# As vote axis increases, gross axis looks to stay around the same values.
# Therefore, we could make a hypothesis that says: “ Movies are more popular online “.
# In[24]:
#Pearson Correlation Coefficient: Worldwide Gross and IMDB Votes
pd.options.display.float_format = '{:.6f}'.format
gross_votes_df.corr(method = 'pearson')
# In[25]:
#Spearman Correlation Coefficient: Worldwide Gross and IMDB Votes
gross_votes_df.corr(method = 'spearman')
# In[26]:
#2 sample z-test: Worldwide Gross and IMDB Votes
print('H0: Movies are more popular online, than in the cinema.\n ')
ztest, pval = stests.ztest(gross_votes_df['IMDBVotes'], x2 = gross_votes_df['Gross'], value = 0, alternative = 'larger')
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
pd.options.display.float_format = '{:.1f}'.format
# In[27]:
#concatenating RTRating with IMDBRating into one DataFrame
rtr_imdbr_df = pd.concat([pd.DataFrame(rtr), pd.DataFrame(imdbr)], axis =1)
rtr_imdbr_df.dropna(inplace=True)
# In[28]:
#making RTRating and IMDBRating Scatterplot
plt.figure(figsize=(10, 5))
sns.scatterplot(x = 'RTRating', y='IMDBRating', data = rtr_imdbr_df, facecolor = 'm', edgecolor = 'black')
plt.title('Rotten Tomatoes Rating - IMDB Rating',color = 'black', fontsize = 18)
plt.xlim(0,10)
plt.ylim(0,10)
plt.xlabel('Rotten Tomatoes Rating', fontsize = 14)
plt.ylabel('IMDB Rating', fontsize = 14)
plt.savefig('RTR_IMDB_Ratings_Scatterplot.png')
# Rotten Tomatoes Rating and IMDB Rating Scatterplot
#
# As we can see from the Rotten Tomatoes – IMDB Rating Scatterplot there is a strong linear positive correlation between them. Small values of Rotten Tomatoes Rating correspond to small values of IMDB Rating. Large values of Rotten Tomatoes Rating correspond to large values of IMDB Rating.
# If we take a careful look, we see that when people vote movies under 5 in Rotten Tomatoes, people in IMDB vote (for the same movies) a litle higher. However, when people vote above 6 in Rotten Tomatoes, people in IMDB vote lower than those in Rotten, still above 6.
# In[29]:
#Pearson Correlation Coefficient: RTRating and IMDBRating
pd.options.display.float_format = '{:.6f}'.format
rtr_imdbr_df.corr(method = 'pearson')
# In[30]:
#Spearman Correlation Coefficient: RTRating and IMDBRating
rtr_imdbr_df.corr(method = 'spearman')
# In[31]:
#2 sample z-test1: RTRating and IMDBRating
print('H0: People in Rotten Tomatoes and IMDB vote similarly.\n ')
ztest, pval = stests.ztest(rtr_imdbr_df['RTRating'], x2 = rtr_imdbr_df['IMDBRating'], value = 0 , alternative = 'two-sided')
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
# In[32]:
#2 sample z-test2: RTRating and IMDBRating
print('H1: People in Rotten Tomatoes vote higher than in IMDB.\n ')
ztest, pval = stests.ztest(rtr_imdbr_df['RTRating'], x2 = rtr_imdbr_df['IMDBRating'], value = 0 , alternative = 'larger')
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H1)')
else:
print('\nAccept Null Hypothesis (H1)')
pd.options.display.float_format = '{:.1f}'.format
# In[33]:
#making one DataFrame with Genres (1 and 2 ) and Gross
gross_genre_df = pd.DataFrame(df['Gross'])
gross_genre_df['Genre1'] = genres_df[['First']]
gross_genre_df.dropna(inplace = True)
gross_genre_df['Genre2'] = genres_df[['Second']]
#print(gross_genre_df)
# In[34]:
#Grouping by genres and finding mean gross (in both genre dfs), then putting them all in one DataFrame
m_gross_genre1_df = gross_genre_df.groupby('Genre1').mean().reset_index()
m_gross_genre2_df = gross_genre_df.groupby('Genre2').mean().reset_index()
m_gross_genre2_df = m_gross_genre2_df.rename(columns = {'Genre2' : 'Genre1'})
m_gross_genre1_df = m_gross_genre1_df.append(m_gross_genre2_df, ignore_index = True)
m_gross_genre1_df = m_gross_genre1_df.rename(columns = {'Genre1' : 'Genre', 'Gross' : 'Mean Gross'})
m_gross_genre1_df.sort_values('Mean Gross', ascending = False, inplace = True)
# In[35]:
#Grouping by genres and finding std gross (in both genre dfs), then putting them all in one DataFrame
std_gross_genre1_df = gross_genre_df.groupby('Genre1').std().reset_index()
std_gross_genre2_df = gross_genre_df.groupby('Genre2').std().reset_index()
std_gross_genre2_df = std_gross_genre2_df.rename(columns = {'Genre2' : 'Genre1'})
std_gross_genre1_df = std_gross_genre1_df.append(std_gross_genre2_df, ignore_index = True)
std_gross_genre1_df = std_gross_genre1_df.rename(columns = {'Genre1' : 'Genre', 'Gross' : 'Std Gross'})
# In[36]:
#calculating the confidence intervals ci lower and ci upper
conf_int = stats.norm.interval(0.95, loc = m_gross_genre1_df['Mean Gross'], scale = std_gross_genre1_df['Std Gross']/np.sqrt(std_gross_genre1_df['Std Gross'].count()))
conf_int = np.array(conf_int)
# In[37]:
# putting ci lower and ci upper in our m_gross_genre1_df DataFrame
m_gross_genre1_df['Bottom Error Ci'] = conf_int[0]
m_gross_genre1_df['Top Error Ci'] = conf_int[1]
m_gross_genre1_df.reset_index(drop = True, inplace = True)
# In[38]:
# fixing the negative bottom error CIs and give them the Mean Gross number
# so as to be 0 when the errorbar calculates xerror (it calculates: mean - bottom error ci)
m_gross_genre1_df['Bottom Error Ci'] = m_gross_genre1_df['Bottom Error Ci'].mask(m_gross_genre1_df['Bottom Error Ci'] < 0, m_gross_genre1_df['Mean Gross'])
# In[39]:
#making the barplot of Mean Worldwide Gross per Genre
plt.figure(figsize=(10, 5))
a = sns.barplot(x = 'Mean Gross', y = 'Genre', data = m_gross_genre1_df)
plt.errorbar(x = 'Mean Gross', y = 'Genre', xerr =[m_gross_genre1_df['Bottom Error Ci'], m_gross_genre1_df['Top Error Ci']], data = m_gross_genre1_df, fmt = 'o', c = 'navy')
plt.title('Mean Worldwide Gross per Genre', color = 'black', fontsize = 18)
plt.xlabel('Mean Worldwide Gross', fontsize = 14)
plt.ylabel('Genre', fontsize = 14)
plt.savefig('mean_gross_genre_barplot.png')
# Mean Worldwide Gross per Genre Barplot
#
# Firstly, it is clear that adventure movies are far more profitable than the rest.
# Interesting is, that drama movies, the most preferable among production companies, are not that prosperous at all and that is also for comedies. Secondly, the confidence interval for adventure movies is enormous! As it comes 4rth in the production ranking (checking back on the previous barplot about popularity of genres), the reason seems to be that it has wide dispersion.
# In[40]:
#2 sample T-test :"Adventure - Action" Genre Mean Worldwide Gross
pd.options.display.float_format = '{:.6f}'.format
adv = df[df['Genre']=='Adventure']
adv = adv[['Genre', 'Gross']]
action = df[df['Genre']=='Action']
action = action[['Genre', 'Gross']]
print('H0: There is no significant difference between adventure movies mean gross and action movies mean gross.\n ')
ttest, pval = stats.ttest_ind(adv['Gross'], action['Gross'], equal_var = False)
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
# In[41]:
#2 sample T-test : "Comedy - Drama" Genre Mean Worldwide Gross
comedy = df[df['Genre']=='Comedy']
comedy = comedy[['Genre', 'Gross']]
drama = df[df['Genre']=='Drama']
drama = drama[['Genre', 'Gross']]
print('H0: There is no significant difference between comedy movies mean gross and drama movies mean gross.\n ')
ttest, pval = stats.ttest_ind(comedy['Gross'], drama['Gross'], equal_var = False)
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
# In[42]:
#2 sample T-test : "Drama - Comedy" Genre Mean Worldwide Gross
drama = df[df['Genre']=='Drama']
drama = drama[['Genre', 'Gross']]
western = df[df['Genre']=='Western']
western = western[['Genre', 'Gross']]
print('H0: There is no significant difference between drama movies mean gross and western movies mean gross.\n ')
ttest, pval = stats.ttest_ind(drama['Gross'], western['Gross'], equal_var = False)
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
pd.options.display.float_format = '{:.1f}'.format
# In[43]:
#--------my data mining problem--------making Production Budget and Worldwide Gross Scatterplot
plt.figure(figsize=(10, 5))
sns.scatterplot(x = 'Budget', y='Gross', data = budget_gross_df, facecolor = 'skyblue', edgecolor ='darkblue')
plt.title('Production Budget - Worldwide Gross',color = 'black', fontsize = 18)
plt.xlabel('Production Budget', fontsize = 14)
plt.ylabel('Worldwide Gross', fontsize = 14)
plt.savefig('Budget_Gross_Scatterplot.png')
# Production Budget and Worldwide Gross Scatterplot
#
# Scatterplot gives us a first thought that the bigger the budget the better the gross ( le8 vs le9).
# It follows linear positive correlation. However, data points are less as budget goes up.
# Most of the data points are clustered in the left down corner.
#
# In[44]:
#Pearson Correlation Coefficient: Production Budget and Worldwide Gross
pd.options.display.float_format = '{:.6f}'.format
budget_gross_df.corr(method = 'pearson')
# In[45]:
#Spearman Correlation Coefficient: Production Budget and Worldwide Gross
budget_gross_df.corr(method = 'spearman')
# In[46]:
#2 sample z-test1: Production Budget and Worldwide Gross
print('H0: Grosses are larger than budgets invested.\n ')
ztest, pval = stests.ztest(budget_gross_df['Gross'], x2 = budget_gross_df['Budget'], value = 0 , alternative = 'larger')
print('p-value: ', pval)
if pval<0.05:
print('\nReject Null Hypothesis (H0)')
else:
print('\nAccept Null Hypothesis (H0)')
# In[47]:
# making the DataFrame for the mean() RTRating and mean() IMDBRating per decade
ratings_dates_df = df[['RTRating', 'IMDBRating', 'Date']]
ratings_dates_df = ratings_dates_df.rename(columns = {'RTRating' : 'Rotten Tomatoes', 'IMDBRating': 'IMDB'})
ratings_dates_df = ratings_dates_df.groupby((ratings_dates_df.Date//10)*10).mean()
ratings_dates_df = ratings_dates_df[['Rotten Tomatoes', 'IMDB']]
ratings_dates_df.reset_index(level = 0, inplace = True)
ratings_dates_df = ratings_dates_df.rename(columns = {'Date' : 'Decade'})
ratings_dates_df = pd.melt(ratings_dates_df, id_vars = 'Decade', var_name = 'Website', value_name = 'Rating')
# In[48]:
#creating catplot for Mean Ratings per Decade
colors = ['darkred', 'black']
palette = sns.color_palette(colors)
sns.catplot(x = 'Decade', y = 'Rating', hue = 'Website', data = ratings_dates_df, palette = palette, kind = 'bar')
plt.title('Mean Ratings per Decade', color = 'black', fontsize = 18)
plt.savefig('ratings_decade_catplot.png')
# Mean Ratings per Decade 2-Barplot
#
# From this plot, we realize that ratings are higher back in the years that what they are today.
# This catplot gives us more info about the votes as it categorizes them with different colors.
# Another inference is that IMDB Votes are more symmetric than Rotten Tomatoes votes, which indicates that
# users think movies are not that bad nowadays, whereas, critics in Rotten Tomatoes seem to be dissapointed
# by recent movies.
#
# So yes, ratings are worse than what they used to be.
# In[49]:
#creating pointplot for Mean Ratings per Decade
plt.figure(figsize=(10, 5))
colors = ['darkred', 'black']
palette = sns.color_palette(colors)
sns.pointplot(x = 'Decade', y = 'Rating', hue = 'Website', data = ratings_dates_df, palette = palette, kind = 'point')
plt.title('Mean Ratings per Decade', color = 'black', fontsize = 18)
plt.savefig('ratings_decade_pointplot.png')
# Mean Ratings per Decade Pointplot
#
# Pointplot shows exactly the “line drop” we mentioned above. The red line used to be really high, even higher than the black, and nearby 1985 dropped rappidly under the black one.
#
# In[50]:
#creating Heatmap for Mean Ratings per Decade
plt.figure(figsize=(10, 5))
sns.heatmap(ratings_dates_df.pivot('Website', 'Decade', 'Rating'), cmap='Reds')
plt.title('Mean Ratings per Decade', color = 'black', fontsize = 18)
plt.savefig('ratings_decade_heatmap.png')
# Mean Ratings per Decade Heatmap
#
# The color-coding here helps us understand when users voted higher.
# Rotten Tomatoes critics voted higher from 1930 to 1960 movies and color turns whitey the last two
# decades.
# IMDB users stay in orange-beige palette throughout most of the decades, with a more whiter tone from 1980 to 2000.
#
# To sum up, recent movies are indeed low rated, the decline is evident.
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"seaborn.pointplot",
"seaborn.set_context",
"matplotlib.pyplot.errorbar",
"seaborn.set_style",
"pandas.DataFrame.from_dict",
"seaborn.scatterpl... | [((368, 393), 'pandas.read_csv', 'pd.read_csv', (['"""movies.csv"""'], {}), "('movies.csv')\n", (379, 393), True, 'import pandas as pd\n'), ((1284, 1326), 'pandas.to_numeric', 'pd.to_numeric', (["df['Date']"], {'errors': '"""coerce"""'}), "(df['Date'], errors='coerce')\n", (1297, 1326), True, 'import pandas as pd\n'), ((2713, 2764), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['genres_hash'], {'orient': '"""index"""'}), "(genres_hash, orient='index')\n", (2735, 2764), True, 'import pandas as pd\n'), ((3077, 3101), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (3092, 3101), True, 'import seaborn as sns\n'), ((3102, 3124), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (3115, 3124), True, 'import seaborn as sns\n'), ((3147, 3174), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (3157, 3174), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3233), 'seaborn.distplot', 'sns.distplot', (['gross'], {'kde': '(False)', 'color': '"""darkgreen"""', 'bins': '(30)'}), "(gross, kde=False, color='darkgreen', bins=30)\n", (3187, 3233), True, 'import seaborn as sns\n'), ((3242, 3302), 'matplotlib.pyplot.title', 'plt.title', (['"""Worldwide Gross"""'], {'color': '"""darkgreen"""', 'fontsize': '(18)'}), "('Worldwide Gross', color='darkgreen', fontsize=18)\n", (3251, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3341), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dollars"""'], {'fontsize': '(14)'}), "('Dollars', fontsize=14)\n", (3317, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3344, 3387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (3354, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3434), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""WorldwideGross_Histogram.png"""'], {}), "('WorldwideGross_Histogram.png')\n", (3402, 3434), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4200), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4183, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4201, 4255), 'seaborn.distplot', 'sns.distplot', (['rtr'], {'kde': '(False)', 'color': '"""darkred"""', 'bins': '(10)'}), "(rtr, kde=False, color='darkred', bins=10)\n", (4213, 4255), True, 'import seaborn as sns\n'), ((4261, 4326), 'matplotlib.pyplot.title', 'plt.title', (['"""Rotten Tomatoes Rating"""'], {'color': '"""darkred"""', 'fontsize': '(18)'}), "('Rotten Tomatoes Rating', color='darkred', fontsize=18)\n", (4270, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4330, 4363), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {'fontsize': '(14)'}), "('Rating', fontsize=14)\n", (4340, 4363), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (4376, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4431, 4475), 'matplotlib.pyplot.axvline', 'plt.axvline', (['mean'], {'color': '"""r"""', 'linestyle': '"""--"""'}), "(mean, color='r', linestyle='--')\n", (4442, 4475), True, 'import matplotlib.pyplot as plt\n'), ((4476, 4502), 'matplotlib.pyplot.legend', 'plt.legend', (["{'Mean': mean}"], {}), "({'Mean': mean})\n", (4486, 4502), True, 'import matplotlib.pyplot as plt\n'), ((4503, 4552), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""RottenTomatoesRating_Histogram.png"""'], {}), "('RottenTomatoesRating_Histogram.png')\n", (4514, 4552), True, 'import matplotlib.pyplot as plt\n'), ((5034, 5061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (5044, 5061), True, 'import matplotlib.pyplot as plt\n'), ((5062, 5119), 'seaborn.distplot', 'sns.distplot', (['imdbr'], {'kde': '(False)', 'color': '"""goldenrod"""', 'bins': '(8)'}), "(imdbr, kde=False, color='goldenrod', bins=8)\n", (5074, 5119), True, 'import seaborn as sns\n'), ((5125, 5140), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (5133, 5140), True, 'import matplotlib.pyplot as plt\n'), ((5141, 5197), 'matplotlib.pyplot.title', 'plt.title', (['"""IMDB Rating"""'], {'color': '"""goldenrod"""', 'fontsize': '(18)'}), "('IMDB Rating', color='goldenrod', fontsize=18)\n", (5150, 5197), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rating"""'], {'fontsize': '(14)'}), "('Rating', fontsize=14)\n", (5211, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5237, 5280), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (5247, 5280), True, 'import matplotlib.pyplot as plt\n'), ((5302, 5346), 'matplotlib.pyplot.axvline', 'plt.axvline', (['mean'], {'color': '"""r"""', 'linestyle': '"""--"""'}), "(mean, color='r', linestyle='--')\n", (5313, 5346), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5373), 'matplotlib.pyplot.legend', 'plt.legend', (["{'Mean': mean}"], {}), "({'Mean': mean})\n", (5357, 5373), True, 'import matplotlib.pyplot as plt\n'), ((5374, 5413), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IMDBRating_Histogram.png"""'], {}), "('IMDBRating_Histogram.png')\n", (5385, 5413), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (6089, 6106), True, 'import matplotlib.pyplot as plt\n'), ((6107, 6161), 'seaborn.distplot', 'sns.distplot', (['imdbv'], {'kde': '(False)', 'color': '"""black"""', 'bins': '(25)'}), "(imdbv, kde=False, color='black', bins=25)\n", (6119, 6161), True, 'import seaborn as sns\n'), ((6169, 6220), 'matplotlib.pyplot.title', 'plt.title', (['"""IMDB Votes"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('IMDB Votes', color='black', fontsize=18)\n", (6178, 6220), True, 'import matplotlib.pyplot as plt\n'), ((6224, 6266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Votes"""'], {'fontsize': '(14)'}), "('Number of Votes', fontsize=14)\n", (6234, 6266), True, 'import matplotlib.pyplot as plt\n'), ((6269, 6312), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (6279, 6312), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6380), 'matplotlib.pyplot.axvline', 'plt.axvline', (['mean'], {'color': '"""r"""', 'linestyle': '"""--"""'}), "(mean, color='r', linestyle='--')\n", (6347, 6380), True, 'import matplotlib.pyplot as plt\n'), ((6381, 6407), 'matplotlib.pyplot.legend', 'plt.legend', (["{'Mean': mean}"], {}), "({'Mean': mean})\n", (6391, 6407), True, 'import matplotlib.pyplot as plt\n'), ((6408, 6446), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IMDBVotes_Histogram.png"""'], {}), "('IMDBVotes_Histogram.png')\n", (6419, 6446), True, 'import matplotlib.pyplot as plt\n'), ((7250, 7277), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7260, 7277), True, 'import matplotlib.pyplot as plt\n'), ((7279, 7365), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Number of Movies"""', 'y': '"""Genre"""', 'saturation': '(1)', 'data': 'genre_numbers_df'}), "(x='Number of Movies', y='Genre', saturation=1, data=\n genre_numbers_df)\n", (7290, 7365), True, 'import seaborn as sns\n'), ((7370, 7422), 'matplotlib.pyplot.title', 'plt.title', (['"""Major Genre"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Major Genre', color='black', fontsize=18)\n", (7379, 7422), True, 'import matplotlib.pyplot as plt\n'), ((7428, 7470), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""num_movie_genre_barplot.png"""'], {}), "('num_movie_genre_barplot.png')\n", (7439, 7470), True, 'import matplotlib.pyplot as plt\n'), ((8054, 8081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8064, 8081), True, 'import matplotlib.pyplot as plt\n'), ((8082, 8178), 'seaborn.distplot', 'sns.distplot', (["gross_votes_df['Gross']"], {'kde': '(False)', 'color': '"""darkgreen"""', 'label': '"""Gross"""', 'bins': '(50)'}), "(gross_votes_df['Gross'], kde=False, color='darkgreen', label=\n 'Gross', bins=50)\n", (8094, 8178), True, 'import seaborn as sns\n'), ((8183, 8200), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (8193, 8200), True, 'import matplotlib.pyplot as plt\n'), ((8201, 8218), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8211, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8220, 8281), 'matplotlib.pyplot.title', 'plt.title', (['"""Worldwide Gross exp."""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Worldwide Gross exp.', color='black', fontsize=18)\n", (8229, 8281), True, 'import matplotlib.pyplot as plt\n'), ((8285, 8320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dollars """'], {'fontsize': '(14)'}), "('Dollars ', fontsize=14)\n", (8295, 8320), True, 'import matplotlib.pyplot as plt\n'), ((8323, 8366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (8333, 8366), True, 'import matplotlib.pyplot as plt\n'), ((8370, 8407), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Grosslog_Histogram.png"""'], {}), "('Grosslog_Histogram.png')\n", (8381, 8407), True, 'import matplotlib.pyplot as plt\n'), ((9053, 9080), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9063, 9080), True, 'import matplotlib.pyplot as plt\n'), ((9081, 9181), 'seaborn.distplot', 'sns.distplot', (["gross_votes_df['IMDBVotes']"], {'kde': '(False)', 'color': '"""black"""', 'label': '"""IMDBVotes"""', 'bins': '(50)'}), "(gross_votes_df['IMDBVotes'], kde=False, color='black', label=\n 'IMDBVotes', bins=50)\n", (9093, 9181), True, 'import seaborn as sns\n'), ((9187, 9204), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (9197, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9205, 9222), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (9215, 9222), True, 'import matplotlib.pyplot as plt\n'), ((9224, 9280), 'matplotlib.pyplot.title', 'plt.title', (['"""IMDB Votes exp."""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('IMDB Votes exp.', color='black', fontsize=18)\n", (9233, 9280), True, 'import matplotlib.pyplot as plt\n'), ((9284, 9317), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Votes """'], {'fontsize': '(14)'}), "('Votes ', fontsize=14)\n", (9294, 9317), True, 'import matplotlib.pyplot as plt\n'), ((9320, 9363), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Movies"""'], {'fontsize': '(14)'}), "('Number of Movies', fontsize=14)\n", (9330, 9363), True, 'import matplotlib.pyplot as plt\n'), ((9367, 9404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Voteslog_Histogram.png"""'], {}), "('Voteslog_Histogram.png')\n", (9378, 9404), True, 'import matplotlib.pyplot as plt\n'), ((10023, 10050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (10033, 10050), True, 'import matplotlib.pyplot as plt\n'), ((10051, 10141), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""IMDBVotes"""', 'y': '"""Gross"""', 'data': 'gross_votes_df', 'facecolor': '"""goldenrod"""'}), "(x='IMDBVotes', y='Gross', data=gross_votes_df, facecolor=\n 'goldenrod')\n", (10066, 10141), True, 'import seaborn as sns\n'), ((10147, 10164), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (10157, 10164), True, 'import matplotlib.pyplot as plt\n'), ((10165, 10182), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (10175, 10182), True, 'import matplotlib.pyplot as plt\n'), ((10183, 10195), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(20)'], {}), '(20)\n', (10191, 10195), True, 'import matplotlib.pyplot as plt\n'), ((10197, 10266), 'matplotlib.pyplot.title', 'plt.title', (['"""IMDB Votes - Worldwide Gross"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('IMDB Votes - Worldwide Gross', color='black', fontsize=18)\n", (10206, 10266), True, 'import matplotlib.pyplot as plt\n'), ((10270, 10302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Votes"""'], {'fontsize': '(14)'}), "('Votes', fontsize=14)\n", (10280, 10302), True, 'import matplotlib.pyplot as plt\n'), ((10305, 10341), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gross ($)"""'], {'fontsize': '(14)'}), "('Gross ($)', fontsize=14)\n", (10315, 10341), True, 'import matplotlib.pyplot as plt\n'), ((10345, 10389), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""VotesGrosslog_Scatterplot.png"""'], {}), "('VotesGrosslog_Scatterplot.png')\n", (10356, 10389), True, 'import matplotlib.pyplot as plt\n'), ((10723, 10750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (10733, 10750), True, 'import matplotlib.pyplot as plt\n'), ((10751, 10841), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""IMDBVotes"""', 'y': '"""Gross"""', 'data': 'gross_votes_df', 'facecolor': '"""goldenrod"""'}), "(x='IMDBVotes', y='Gross', data=gross_votes_df, facecolor=\n 'goldenrod')\n", (10766, 10841), True, 'import seaborn as sns\n'), ((10847, 10916), 'matplotlib.pyplot.title', 'plt.title', (['"""IMDB Votes - Worldwide Gross"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('IMDB Votes - Worldwide Gross', color='black', fontsize=18)\n", (10856, 10916), True, 'import matplotlib.pyplot as plt\n'), ((10920, 10952), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Votes"""'], {'fontsize': '(14)'}), "('Votes', fontsize=14)\n", (10930, 10952), True, 'import matplotlib.pyplot as plt\n'), ((10955, 10991), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gross ($)"""'], {'fontsize': '(14)'}), "('Gross ($)', fontsize=14)\n", (10965, 10991), True, 'import matplotlib.pyplot as plt\n'), ((10995, 11036), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""VotesGross_Scatterplot.png"""'], {}), "('VotesGross_Scatterplot.png')\n", (11006, 11036), True, 'import matplotlib.pyplot as plt\n'), ((11845, 11950), 'statsmodels.stats.weightstats.ztest', 'stests.ztest', (["gross_votes_df['IMDBVotes']"], {'x2': "gross_votes_df['Gross']", 'value': '(0)', 'alternative': '"""larger"""'}), "(gross_votes_df['IMDBVotes'], x2=gross_votes_df['Gross'], value\n =0, alternative='larger')\n", (11857, 11950), True, 'from statsmodels.stats import weightstats as stests\n'), ((12391, 12418), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (12401, 12418), True, 'import matplotlib.pyplot as plt\n'), ((12419, 12522), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""RTRating"""', 'y': '"""IMDBRating"""', 'data': 'rtr_imdbr_df', 'facecolor': '"""m"""', 'edgecolor': '"""black"""'}), "(x='RTRating', y='IMDBRating', data=rtr_imdbr_df, facecolor=\n 'm', edgecolor='black')\n", (12434, 12522), True, 'import seaborn as sns\n'), ((12527, 12604), 'matplotlib.pyplot.title', 'plt.title', (['"""Rotten Tomatoes Rating - IMDB Rating"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Rotten Tomatoes Rating - IMDB Rating', color='black', fontsize=18)\n", (12536, 12604), True, 'import matplotlib.pyplot as plt\n'), ((12608, 12623), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(10)'], {}), '(0, 10)\n', (12616, 12623), True, 'import matplotlib.pyplot as plt\n'), ((12623, 12638), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(10)'], {}), '(0, 10)\n', (12631, 12638), True, 'import matplotlib.pyplot as plt\n'), ((12638, 12687), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rotten Tomatoes Rating"""'], {'fontsize': '(14)'}), "('Rotten Tomatoes Rating', fontsize=14)\n", (12648, 12687), True, 'import matplotlib.pyplot as plt\n'), ((12690, 12728), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""IMDB Rating"""'], {'fontsize': '(14)'}), "('IMDB Rating', fontsize=14)\n", (12700, 12728), True, 'import matplotlib.pyplot as plt\n'), ((12732, 12779), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""RTR_IMDB_Ratings_Scatterplot.png"""'], {}), "('RTR_IMDB_Ratings_Scatterplot.png')\n", (12743, 12779), True, 'import matplotlib.pyplot as plt\n'), ((13809, 13917), 'statsmodels.stats.weightstats.ztest', 'stests.ztest', (["rtr_imdbr_df['RTRating']"], {'x2': "rtr_imdbr_df['IMDBRating']", 'value': '(0)', 'alternative': '"""two-sided"""'}), "(rtr_imdbr_df['RTRating'], x2=rtr_imdbr_df['IMDBRating'], value\n =0, alternative='two-sided')\n", (13821, 13917), True, 'from statsmodels.stats import weightstats as stests\n'), ((14204, 14309), 'statsmodels.stats.weightstats.ztest', 'stests.ztest', (["rtr_imdbr_df['RTRating']"], {'x2': "rtr_imdbr_df['IMDBRating']", 'value': '(0)', 'alternative': '"""larger"""'}), "(rtr_imdbr_df['RTRating'], x2=rtr_imdbr_df['IMDBRating'], value\n =0, alternative='larger')\n", (14216, 14309), True, 'from statsmodels.stats import weightstats as stests\n'), ((14596, 14621), 'pandas.DataFrame', 'pd.DataFrame', (["df['Gross']"], {}), "(df['Gross'])\n", (14608, 14621), True, 'import pandas as pd\n'), ((16188, 16206), 'numpy.array', 'np.array', (['conf_int'], {}), '(conf_int)\n', (16196, 16206), True, 'import numpy as np\n'), ((16851, 16878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (16861, 16878), True, 'import matplotlib.pyplot as plt\n'), ((16884, 16946), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Mean Gross"""', 'y': '"""Genre"""', 'data': 'm_gross_genre1_df'}), "(x='Mean Gross', y='Genre', data=m_gross_genre1_df)\n", (16895, 16946), True, 'import seaborn as sns\n'), ((16954, 17126), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': '"""Mean Gross"""', 'y': '"""Genre"""', 'xerr': "[m_gross_genre1_df['Bottom Error Ci'], m_gross_genre1_df['Top Error Ci']]", 'data': 'm_gross_genre1_df', 'fmt': '"""o"""', 'c': '"""navy"""'}), "(x='Mean Gross', y='Genre', xerr=[m_gross_genre1_df[\n 'Bottom Error Ci'], m_gross_genre1_df['Top Error Ci']], data=\n m_gross_genre1_df, fmt='o', c='navy')\n", (16966, 17126), True, 'import matplotlib.pyplot as plt\n'), ((17130, 17201), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Worldwide Gross per Genre"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Mean Worldwide Gross per Genre', color='black', fontsize=18)\n", (17139, 17201), True, 'import matplotlib.pyplot as plt\n'), ((17206, 17253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean Worldwide Gross"""'], {'fontsize': '(14)'}), "('Mean Worldwide Gross', fontsize=14)\n", (17216, 17253), True, 'import matplotlib.pyplot as plt\n'), ((17256, 17288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Genre"""'], {'fontsize': '(14)'}), "('Genre', fontsize=14)\n", (17266, 17288), True, 'import matplotlib.pyplot as plt\n'), ((17292, 17335), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mean_gross_genre_barplot.png"""'], {}), "('mean_gross_genre_barplot.png')\n", (17303, 17335), True, 'import matplotlib.pyplot as plt\n'), ((18245, 18308), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["adv['Gross']", "action['Gross']"], {'equal_var': '(False)'}), "(adv['Gross'], action['Gross'], equal_var=False)\n", (18260, 18308), False, 'from scipy import stats\n'), ((18799, 18864), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["comedy['Gross']", "drama['Gross']"], {'equal_var': '(False)'}), "(comedy['Gross'], drama['Gross'], equal_var=False)\n", (18814, 18864), False, 'from scipy import stats\n'), ((19360, 19426), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (["drama['Gross']", "western['Gross']"], {'equal_var': '(False)'}), "(drama['Gross'], western['Gross'], equal_var=False)\n", (19375, 19426), False, 'from scipy import stats\n'), ((19733, 19760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (19743, 19760), True, 'import matplotlib.pyplot as plt\n'), ((19761, 19869), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""Budget"""', 'y': '"""Gross"""', 'data': 'budget_gross_df', 'facecolor': '"""skyblue"""', 'edgecolor': '"""darkblue"""'}), "(x='Budget', y='Gross', data=budget_gross_df, facecolor=\n 'skyblue', edgecolor='darkblue')\n", (19776, 19869), True, 'import seaborn as sns\n'), ((19873, 19949), 'matplotlib.pyplot.title', 'plt.title', (['"""Production Budget - Worldwide Gross"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Production Budget - Worldwide Gross', color='black', fontsize=18)\n", (19882, 19949), True, 'import matplotlib.pyplot as plt\n'), ((19954, 19998), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Production Budget"""'], {'fontsize': '(14)'}), "('Production Budget', fontsize=14)\n", (19964, 19998), True, 'import matplotlib.pyplot as plt\n'), ((20001, 20043), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Worldwide Gross"""'], {'fontsize': '(14)'}), "('Worldwide Gross', fontsize=14)\n", (20011, 20043), True, 'import matplotlib.pyplot as plt\n'), ((20047, 20090), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Budget_Gross_Scatterplot.png"""'], {}), "('Budget_Gross_Scatterplot.png')\n", (20058, 20090), True, 'import matplotlib.pyplot as plt\n'), ((20860, 20964), 'statsmodels.stats.weightstats.ztest', 'stests.ztest', (["budget_gross_df['Gross']"], {'x2': "budget_gross_df['Budget']", 'value': '(0)', 'alternative': '"""larger"""'}), "(budget_gross_df['Gross'], x2=budget_gross_df['Budget'], value=\n 0, alternative='larger')\n", (20872, 20964), True, 'from statsmodels.stats import weightstats as stests\n'), ((21670, 21759), 'pandas.melt', 'pd.melt', (['ratings_dates_df'], {'id_vars': '"""Decade"""', 'var_name': '"""Website"""', 'value_name': '"""Rating"""'}), "(ratings_dates_df, id_vars='Decade', var_name='Website', value_name=\n 'Rating')\n", (21677, 21759), True, 'import pandas as pd\n'), ((21861, 21886), 'seaborn.color_palette', 'sns.color_palette', (['colors'], {}), '(colors)\n', (21878, 21886), True, 'import seaborn as sns\n'), ((21887, 21993), 'seaborn.catplot', 'sns.catplot', ([], {'x': '"""Decade"""', 'y': '"""Rating"""', 'hue': '"""Website"""', 'data': 'ratings_dates_df', 'palette': 'palette', 'kind': '"""bar"""'}), "(x='Decade', y='Rating', hue='Website', data=ratings_dates_df,\n palette=palette, kind='bar')\n", (21898, 21993), True, 'import seaborn as sns\n'), ((22003, 22067), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Ratings per Decade"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Mean Ratings per Decade', color='black', fontsize=18)\n", (22012, 22067), True, 'import matplotlib.pyplot as plt\n'), ((22073, 22114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ratings_decade_catplot.png"""'], {}), "('ratings_decade_catplot.png')\n", (22084, 22114), True, 'import matplotlib.pyplot as plt\n'), ((22707, 22734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (22717, 22734), True, 'import matplotlib.pyplot as plt\n'), ((22775, 22800), 'seaborn.color_palette', 'sns.color_palette', (['colors'], {}), '(colors)\n', (22792, 22800), True, 'import seaborn as sns\n'), ((22801, 22911), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': '"""Decade"""', 'y': '"""Rating"""', 'hue': '"""Website"""', 'data': 'ratings_dates_df', 'palette': 'palette', 'kind': '"""point"""'}), "(x='Decade', y='Rating', hue='Website', data=ratings_dates_df,\n palette=palette, kind='point')\n", (22814, 22911), True, 'import seaborn as sns\n'), ((22921, 22985), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Ratings per Decade"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Mean Ratings per Decade', color='black', fontsize=18)\n", (22930, 22985), True, 'import matplotlib.pyplot as plt\n'), ((22991, 23034), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ratings_decade_pointplot.png"""'], {}), "('ratings_decade_pointplot.png')\n", (23002, 23034), True, 'import matplotlib.pyplot as plt\n'), ((23319, 23346), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (23329, 23346), True, 'import matplotlib.pyplot as plt\n'), ((23428, 23492), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Ratings per Decade"""'], {'color': '"""black"""', 'fontsize': '(18)'}), "('Mean Ratings per Decade', color='black', fontsize=18)\n", (23437, 23492), True, 'import matplotlib.pyplot as plt\n'), ((23498, 23539), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ratings_decade_heatmap.png"""'], {}), "('ratings_decade_heatmap.png')\n", (23509, 23539), True, 'import matplotlib.pyplot as plt\n'), ((7920, 7939), 'pandas.DataFrame', 'pd.DataFrame', (['gross'], {}), '(gross)\n', (7932, 7939), True, 'import pandas as pd\n'), ((7941, 7960), 'pandas.DataFrame', 'pd.DataFrame', (['imdbv'], {}), '(imdbv)\n', (7953, 7960), True, 'import pandas as pd\n'), ((12249, 12266), 'pandas.DataFrame', 'pd.DataFrame', (['rtr'], {}), '(rtr)\n', (12261, 12266), True, 'import pandas as pd\n'), ((12268, 12287), 'pandas.DataFrame', 'pd.DataFrame', (['imdbr'], {}), '(imdbr)\n', (12280, 12287), True, 'import pandas as pd\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
fig, ax = plt.subplots(figsize=(8, 8))
bar_width = 0.35
def create_figure(title):
fig.suptitle('Classification: ' + title)
plt.show(block=False)
def create_plots(xlabels):
num_plots = len(xlabels)
ind = np.arange(num_plots)
plots1 = ax.bar(ind, [0] * num_plots, bar_width, color='b')
plots2 = ax.bar(ind + bar_width, [0] * num_plots, bar_width, color='g')
fontP = FontProperties()
fontP.set_size('small')
ax.set_xticks(ind + bar_width / 2)
ax.set_xticklabels(xlabels)
ax.set_xlabel('Evaluation methods')
ax.set_ylim([0, 1])
ax.set_ylabel('Average accuracy')
ax.legend((plots1[0], plots2[0]), ('Accuracy if classifiable', 'Overall accuracy'), prop=fontP, loc='upper center',
bbox_to_anchor=(0.5, -0.1), ncol=2)
return plots1, plots2
def update_figure(num_processed, num_total, num_classified):
update_title(num_processed, num_total, num_classified)
fig.canvas.draw_idle()
try:
fig.canvas.flush_events()
except NotImplementedError:
pass
def update_title(num_processed, num_total, num_classified):
percent_classified = 100 * num_classified / num_processed
title = """
after {num_processed}/{num_total} processed items
{num_classified}/{num_processed} ({percent_classified}) items classified
""".format(num_classified=num_classified,
num_processed=num_processed,
num_total=num_total,
percent_classified="{:1.2f}%".format(percent_classified)
)
ax.set_title(title)
class EvaluationPlotter(object):
def __init__(self, label, scorers):
self.scorers = scorers
xlabels = [scorer.label for scorer in scorers]
create_figure(label)
num_plots = len(xlabels)
self.plots1, self.plots2 = create_plots(xlabels)
self.labels = [0] * num_plots * 2
def update_plots(self, num_total, num_processed, num_classified):
update_figure(num_processed, num_total, num_classified)
for i, scorer in enumerate(self.scorers):
acc = scorer.accuracy
overall_acc = scorer.overall_accuracy
self.plots1[i].set_height(acc)
self.plots2[i].set_height(overall_acc)
self.update_label(i, acc)
self.update_label(i + 1, overall_acc)
def update_label(self, i, height):
if self.labels[i]:
self.labels[i].remove()
x = i + (i % 2) * 0.5 * bar_width
y = 1.01 * height
text = str("{:1.4f}".format(height))
self.labels[i] = ax.text(x, y, text, ha='center', va='bottom', color='black', fontsize=10)
| [
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.show"
] | [((163, 191), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (175, 191), True, 'import matplotlib.pyplot as plt\n'), ((286, 307), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (294, 307), True, 'import matplotlib.pyplot as plt\n'), ((376, 396), 'numpy.arange', 'np.arange', (['num_plots'], {}), '(num_plots)\n', (385, 396), True, 'import numpy as np\n'), ((550, 566), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (564, 566), False, 'from matplotlib.font_manager import FontProperties\n')] |
"""
Testing parallel evaluations.
"""
import sys
import numpy as np
import pytest
import qibo
from qibo import gates
from qibo.models import Circuit, QFT
from qibo.parallel import parallel_parametrized_execution, parallel_execution
def is_parallel_supported(backend_name): # pragma: no cover
if "GPU" in qibo.get_device():
return False
if backend_name in ("tensorflow", "qibojit"):
return False
if sys.platform in ("darwin", "win32"):
return False
return True
def test_parallel_circuit_evaluation(backend, skip_parallel): # pragma: no cover
"""Evaluate circuit for multiple input states."""
device = qibo.get_device()
backend_name = qibo.get_backend()
if skip_parallel:
pytest.skip("Skipping parallel test.")
if not is_parallel_supported(backend_name):
pytest.skip("Skipping parallel test due to unsupported configuration.")
original_threads = qibo.get_threads()
qibo.set_threads(1)
nqubits = 10
np.random.seed(0)
c = QFT(nqubits)
states = [np.random.random(2**nqubits) for i in range(5)]
r1 = []
for state in states:
r1.append(c(state))
r2 = parallel_execution(c, states=states, processes=2)
np.testing.assert_allclose(r1, r2)
qibo.set_threads(original_threads)
def test_parallel_parametrized_circuit(backend, skip_parallel): # pragma: no cover
"""Evaluate circuit for multiple parameters."""
device = qibo.get_device()
backend_name = qibo.get_backend()
if skip_parallel:
pytest.skip("Skipping parallel test.")
if not is_parallel_supported(backend_name):
pytest.skip("Skipping parallel test due to unsupported configuration.")
original_threads = qibo.get_threads()
qibo.set_threads(1)
nqubits = 5
nlayers = 10
c = Circuit(nqubits)
for l in range(nlayers):
c.add((gates.RY(q, theta=0) for q in range(nqubits)))
c.add((gates.CZ(q, q+1) for q in range(0, nqubits-1, 2)))
c.add((gates.RY(q, theta=0) for q in range(nqubits)))
c.add((gates.CZ(q, q+1) for q in range(1, nqubits-2, 2)))
c.add(gates.CZ(0, nqubits-1))
c.add((gates.RY(q, theta=0) for q in range(nqubits)))
size = len(c.get_parameters())
np.random.seed(0)
parameters = [np.random.uniform(0, 2*np.pi, size) for i in range(10)]
state = None
r1 = []
for params in parameters:
c.set_parameters(params)
r1.append(c(state))
r2 = parallel_parametrized_execution(c, parameters=parameters, initial_state=state, processes=2)
np.testing.assert_allclose(r1, r2)
qibo.set_threads(original_threads)
| [
"numpy.random.uniform",
"numpy.random.seed",
"qibo.get_threads",
"qibo.gates.CZ",
"qibo.set_threads",
"qibo.get_device",
"qibo.get_backend",
"qibo.models.Circuit",
"qibo.parallel.parallel_parametrized_execution",
"pytest.skip",
"qibo.models.QFT",
"numpy.random.random",
"qibo.gates.RY",
"nu... | [((654, 671), 'qibo.get_device', 'qibo.get_device', ([], {}), '()\n', (669, 671), False, 'import qibo\n'), ((691, 709), 'qibo.get_backend', 'qibo.get_backend', ([], {}), '()\n', (707, 709), False, 'import qibo\n'), ((930, 948), 'qibo.get_threads', 'qibo.get_threads', ([], {}), '()\n', (946, 948), False, 'import qibo\n'), ((953, 972), 'qibo.set_threads', 'qibo.set_threads', (['(1)'], {}), '(1)\n', (969, 972), False, 'import qibo\n'), ((995, 1012), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1009, 1012), True, 'import numpy as np\n'), ((1021, 1033), 'qibo.models.QFT', 'QFT', (['nqubits'], {}), '(nqubits)\n', (1024, 1033), False, 'from qibo.models import Circuit, QFT\n'), ((1173, 1222), 'qibo.parallel.parallel_execution', 'parallel_execution', (['c'], {'states': 'states', 'processes': '(2)'}), '(c, states=states, processes=2)\n', (1191, 1222), False, 'from qibo.parallel import parallel_parametrized_execution, parallel_execution\n'), ((1227, 1261), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r1', 'r2'], {}), '(r1, r2)\n', (1253, 1261), True, 'import numpy as np\n'), ((1266, 1300), 'qibo.set_threads', 'qibo.set_threads', (['original_threads'], {}), '(original_threads)\n', (1282, 1300), False, 'import qibo\n'), ((1452, 1469), 'qibo.get_device', 'qibo.get_device', ([], {}), '()\n', (1467, 1469), False, 'import qibo\n'), ((1489, 1507), 'qibo.get_backend', 'qibo.get_backend', ([], {}), '()\n', (1505, 1507), False, 'import qibo\n'), ((1728, 1746), 'qibo.get_threads', 'qibo.get_threads', ([], {}), '()\n', (1744, 1746), False, 'import qibo\n'), ((1751, 1770), 'qibo.set_threads', 'qibo.set_threads', (['(1)'], {}), '(1)\n', (1767, 1770), False, 'import qibo\n'), ((1814, 1830), 'qibo.models.Circuit', 'Circuit', (['nqubits'], {}), '(nqubits)\n', (1821, 1830), False, 'from qibo.models import Circuit, QFT\n'), ((2252, 2269), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2266, 2269), True, 'import numpy as np\n'), ((2475, 2571), 'qibo.parallel.parallel_parametrized_execution', 'parallel_parametrized_execution', (['c'], {'parameters': 'parameters', 'initial_state': 'state', 'processes': '(2)'}), '(c, parameters=parameters, initial_state=\n state, processes=2)\n', (2506, 2571), False, 'from qibo.parallel import parallel_parametrized_execution, parallel_execution\n'), ((2571, 2605), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r1', 'r2'], {}), '(r1, r2)\n', (2597, 2605), True, 'import numpy as np\n'), ((2610, 2644), 'qibo.set_threads', 'qibo.set_threads', (['original_threads'], {}), '(original_threads)\n', (2626, 2644), False, 'import qibo\n'), ((311, 328), 'qibo.get_device', 'qibo.get_device', ([], {}), '()\n', (326, 328), False, 'import qibo\n'), ((740, 778), 'pytest.skip', 'pytest.skip', (['"""Skipping parallel test."""'], {}), "('Skipping parallel test.')\n", (751, 778), False, 'import pytest\n'), ((835, 906), 'pytest.skip', 'pytest.skip', (['"""Skipping parallel test due to unsupported configuration."""'], {}), "('Skipping parallel test due to unsupported configuration.')\n", (846, 906), False, 'import pytest\n'), ((1049, 1079), 'numpy.random.random', 'np.random.random', (['(2 ** nqubits)'], {}), '(2 ** nqubits)\n', (1065, 1079), True, 'import numpy as np\n'), ((1538, 1576), 'pytest.skip', 'pytest.skip', (['"""Skipping parallel test."""'], {}), "('Skipping parallel test.')\n", (1549, 1576), False, 'import pytest\n'), ((1633, 1704), 'pytest.skip', 'pytest.skip', (['"""Skipping parallel test due to unsupported configuration."""'], {}), "('Skipping parallel test due to unsupported configuration.')\n", (1644, 1704), False, 'import pytest\n'), ((2288, 2325), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'size'], {}), '(0, 2 * np.pi, size)\n', (2305, 2325), True, 'import numpy as np\n'), ((2130, 2154), 'qibo.gates.CZ', 'gates.CZ', (['(0)', '(nqubits - 1)'], {}), '(0, nqubits - 1)\n', (2138, 2154), False, 'from qibo import gates\n'), ((2165, 2185), 'qibo.gates.RY', 'gates.RY', (['q'], {'theta': '(0)'}), '(q, theta=0)\n', (2173, 2185), False, 'from qibo import gates\n'), ((1875, 1895), 'qibo.gates.RY', 'gates.RY', (['q'], {'theta': '(0)'}), '(q, theta=0)\n', (1883, 1895), False, 'from qibo import gates\n'), ((1937, 1955), 'qibo.gates.CZ', 'gates.CZ', (['q', '(q + 1)'], {}), '(q, q + 1)\n', (1945, 1955), False, 'from qibo import gates\n'), ((2003, 2023), 'qibo.gates.RY', 'gates.RY', (['q'], {'theta': '(0)'}), '(q, theta=0)\n', (2011, 2023), False, 'from qibo import gates\n'), ((2065, 2083), 'qibo.gates.CZ', 'gates.CZ', (['q', '(q + 1)'], {}), '(q, q + 1)\n', (2073, 2083), False, 'from qibo import gates\n')] |
from PIL import Image, ImageOps
import tensorflow as tf
import cv2
import numpy as np
import os
import sys
label = ''
frame = None
def import_and_predict(image_data, model):
size = (75,75)
image = ImageOps.fit(image_data, size, Image.ANTIALIAS)
image = image.convert('RGB')
image = np.asarray(image)
image = (image.astype(np.float32) / 255.0)
img_reshape = image[np.newaxis,...]
prediction = model.predict(img_reshape)
return prediction
model = tf.keras.models.load_model('CVA1210.hdf5')
cap = cv2.VideoCapture(0)
if (cap.isOpened()):
print("Camera OK")
print("No wife, friend only. ~ 白上フブキ")
else:
cap.open()
while (True):
ret, original = cap.read()
frame = cv2.resize(original, (224, 224))
cv2.imwrite(filename='img.jpg', img=original)
image = Image.open('img.jpg')
# Display the predictions
# print("ImageNet ID: {}, Label: {}".format(inID, label))
prediction = import_and_predict(image, model)
#print(prediction)
if np.argmax(prediction) == 0:
predict="Chinese Number 3"
elif np.argmax(prediction) == 1:
predict="Chinese Number 4"
else:
predict="Chinese Number 5"
cv2.putText(original, predict, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", original)
if (cv2.waitKey(1) & 0xFF == ord('q')):
break;
cap.release()
frame = None
cv2.destroyAllWindows()
sys.exit()
| [
"cv2.resize",
"tensorflow.keras.models.load_model",
"cv2.putText",
"PIL.ImageOps.fit",
"numpy.argmax",
"cv2.imwrite",
"numpy.asarray",
"cv2.waitKey",
"cv2.imshow",
"PIL.Image.open",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"sys.exit"
] | [((536, 578), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""CVA1210.hdf5"""'], {}), "('CVA1210.hdf5')\n", (562, 578), True, 'import tensorflow as tf\n'), ((591, 610), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (607, 610), False, 'import cv2\n'), ((1478, 1501), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1499, 1501), False, 'import cv2\n'), ((1502, 1512), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1510, 1512), False, 'import sys\n'), ((228, 275), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image_data', 'size', 'Image.ANTIALIAS'], {}), '(image_data, size, Image.ANTIALIAS)\n', (240, 275), False, 'from PIL import Image, ImageOps\n'), ((329, 346), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (339, 346), True, 'import numpy as np\n'), ((779, 811), 'cv2.resize', 'cv2.resize', (['original', '(224, 224)'], {}), '(original, (224, 224))\n', (789, 811), False, 'import cv2\n'), ((816, 861), 'cv2.imwrite', 'cv2.imwrite', ([], {'filename': '"""img.jpg"""', 'img': 'original'}), "(filename='img.jpg', img=original)\n", (827, 861), False, 'import cv2\n'), ((874, 895), 'PIL.Image.open', 'Image.open', (['"""img.jpg"""'], {}), "('img.jpg')\n", (884, 895), False, 'from PIL import Image, ImageOps\n'), ((1259, 1350), 'cv2.putText', 'cv2.putText', (['original', 'predict', '(10, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', '(0, 255, 0)', '(2)'], {}), '(original, predict, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,\n 255, 0), 2)\n', (1270, 1350), False, 'import cv2\n'), ((1351, 1389), 'cv2.imshow', 'cv2.imshow', (['"""Classification"""', 'original'], {}), "('Classification', original)\n", (1361, 1389), False, 'import cv2\n'), ((1070, 1091), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1079, 1091), True, 'import numpy as np\n'), ((1142, 1163), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1151, 1163), True, 'import numpy as np\n'), ((1399, 1413), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1410, 1413), False, 'import cv2\n')] |
import glob
import numpy as np
from scipy.interpolate import PchipInterpolator
from matplotlib import pyplot as plt
plt.style.use("../template.mplstyle")
# purple - green - darkgoldenrod - blue - red
colors = ['purple', '#306B37', 'darkgoldenrod', '#3F7BB6', '#BF4145']
linestyles = [(0, (1,1.05)), (0, (3, 1, 1, 1)), (0, (1,3)), (0, (3,3.65)), (0, (3,2.772)), (0, (3, 1, 1, 1, 1, 1))]
#########################################################################################
def get_hist(data, num_bins=40, weights=[None]):
if not any(weights):
weights = np.ones(len(data))
hist, bin_edges = np.histogram(data, bins=num_bins, weights=weights)
bin_centres = 0.5*(bin_edges[1:]+bin_edges[:-1])
return hist, bin_edges, bin_centres
###
TNG_z4 = np.loadtxt("IllustrisTNG_z4.txt", unpack=True)
TNG_z5 = np.loadtxt("IllustrisTNG_z5.txt", unpack=True)
TNG_z6 = np.loadtxt("IllustrisTNG_z6.txt", unpack=True)
chains = []
for filepath in glob.iglob('../../Data/UVLF_HST_ST_model2/*__*.txt'):
data = np.loadtxt(filepath)
chains.append(data)
chains = np.vstack(np.array(chains))
data_for_lims = chains[:,12]
hist, bin_edges, bin_centres = get_hist(data_for_lims, num_bins=20, weights=chains[:,0])
xarray = np.linspace(min(bin_centres), max(bin_centres), 1000)
interpolator = PchipInterpolator(bin_centres, hist)(xarray)
A = np.cumsum(interpolator)/np.sum(interpolator)
bound68 = xarray[np.argmin(np.abs(A-0.68))] * 0.4
bound95 = xarray[np.argmin(np.abs(A-0.95))] * 0.4
plt.figure(figsize=(8.,6.5))
ax = plt.subplot(111)
ax.tick_params(axis='x', which='major', pad=6)
plt.tick_params(axis='both', which='major', labelsize=25)
plt.tick_params(axis='both', which='minor', labelsize=25)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2.2)
ax.axhline(bound68, color="black", lw=2.5, alpha=0.7)
ax.axhline(bound95, color="black", lw=2.5, alpha=0.7)
ax.plot(TNG_z4[0], TNG_z4[1], color=colors[1], ls=(0,(3,2,1.3,2)), lw=2.5, label=r"$z = 4$")
ax.plot(TNG_z5[0], TNG_z5[1], color=colors[-1], ls=(0,(3,1)), lw=2.5, label=r"$z = 5$")
ax.plot(TNG_z6[0], TNG_z6[1], color=colors[3], ls=(0,(1,1)), lw=2.5, label=r"$z = 6$")
ax.text(-17.95, 0.307, r'$\mathrm{HST\ 68\%\ CL}$', weight='bold', fontsize=20, color="black", alpha=0.75)
ax.text(-19.45, 0.391, r'$\mathrm{HST\ 95\%\ CL\ (This\ Work)}$', weight='bold', fontsize=20, color="black", alpha=0.75)
plt.xlabel(r"$M_\mathrm{UV}\ [\mathrm{mag}]$", labelpad=9, fontsize=27)
plt.ylabel(r"$\sigma_{\log_{10}(M_*)}$", labelpad=12, fontsize=29)
plt.axis(xmin=-22.8, xmax=-16.2, ymin=0., ymax=0.5)
leg = plt.legend(loc="lower left", frameon=False, markerfirst=True, prop={'size': 22}, handlelength=1.58, handletextpad=0.5, numpoints=1)
plt.savefig("Mstar_scatter.pdf") | [
"matplotlib.pyplot.subplot",
"scipy.interpolate.PchipInterpolator",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axis",
"numpy.cumsum",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.histogram",
"numpy.loadtxt",
"nump... | [((116, 153), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""../template.mplstyle"""'], {}), "('../template.mplstyle')\n", (129, 153), True, 'from matplotlib import pyplot as plt\n'), ((771, 817), 'numpy.loadtxt', 'np.loadtxt', (['"""IllustrisTNG_z4.txt"""'], {'unpack': '(True)'}), "('IllustrisTNG_z4.txt', unpack=True)\n", (781, 817), True, 'import numpy as np\n'), ((827, 873), 'numpy.loadtxt', 'np.loadtxt', (['"""IllustrisTNG_z5.txt"""'], {'unpack': '(True)'}), "('IllustrisTNG_z5.txt', unpack=True)\n", (837, 873), True, 'import numpy as np\n'), ((883, 929), 'numpy.loadtxt', 'np.loadtxt', (['"""IllustrisTNG_z6.txt"""'], {'unpack': '(True)'}), "('IllustrisTNG_z6.txt', unpack=True)\n", (893, 929), True, 'import numpy as np\n'), ((960, 1012), 'glob.iglob', 'glob.iglob', (['"""../../Data/UVLF_HST_ST_model2/*__*.txt"""'], {}), "('../../Data/UVLF_HST_ST_model2/*__*.txt')\n", (970, 1012), False, 'import glob\n'), ((1501, 1531), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8.0, 6.5)'}), '(figsize=(8.0, 6.5))\n', (1511, 1531), True, 'from matplotlib import pyplot as plt\n'), ((1535, 1551), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1546, 1551), True, 'from matplotlib import pyplot as plt\n'), ((1599, 1656), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(25)'}), "(axis='both', which='major', labelsize=25)\n", (1614, 1656), True, 'from matplotlib import pyplot as plt\n'), ((1657, 1714), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(25)'}), "(axis='both', which='minor', labelsize=25)\n", (1672, 1714), True, 'from matplotlib import pyplot as plt\n'), ((2407, 2480), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$M_\\\\mathrm{UV}\\\\ [\\\\mathrm{mag}]$"""'], {'labelpad': '(9)', 'fontsize': '(27)'}), "('$M_\\\\mathrm{UV}\\\\ [\\\\mathrm{mag}]$', labelpad=9, fontsize=27)\n", (2417, 2480), True, 'from matplotlib import pyplot as plt\n'), ((2479, 2546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_{\\\\log_{10}(M_*)}$"""'], {'labelpad': '(12)', 'fontsize': '(29)'}), "('$\\\\sigma_{\\\\log_{10}(M_*)}$', labelpad=12, fontsize=29)\n", (2489, 2546), True, 'from matplotlib import pyplot as plt\n'), ((2547, 2599), 'matplotlib.pyplot.axis', 'plt.axis', ([], {'xmin': '(-22.8)', 'xmax': '(-16.2)', 'ymin': '(0.0)', 'ymax': '(0.5)'}), '(xmin=-22.8, xmax=-16.2, ymin=0.0, ymax=0.5)\n', (2555, 2599), True, 'from matplotlib import pyplot as plt\n'), ((2606, 2741), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'frameon': '(False)', 'markerfirst': '(True)', 'prop': "{'size': 22}", 'handlelength': '(1.58)', 'handletextpad': '(0.5)', 'numpoints': '(1)'}), "(loc='lower left', frameon=False, markerfirst=True, prop={'size':\n 22}, handlelength=1.58, handletextpad=0.5, numpoints=1)\n", (2616, 2741), True, 'from matplotlib import pyplot as plt\n'), ((2739, 2771), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Mstar_scatter.pdf"""'], {}), "('Mstar_scatter.pdf')\n", (2750, 2771), True, 'from matplotlib import pyplot as plt\n'), ((612, 662), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'num_bins', 'weights': 'weights'}), '(data, bins=num_bins, weights=weights)\n', (624, 662), True, 'import numpy as np\n'), ((1025, 1045), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (1035, 1045), True, 'import numpy as np\n'), ((1090, 1106), 'numpy.array', 'np.array', (['chains'], {}), '(chains)\n', (1098, 1106), True, 'import numpy as np\n'), ((1305, 1341), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['bin_centres', 'hist'], {}), '(bin_centres, hist)\n', (1322, 1341), False, 'from scipy.interpolate import PchipInterpolator\n'), ((1355, 1378), 'numpy.cumsum', 'np.cumsum', (['interpolator'], {}), '(interpolator)\n', (1364, 1378), True, 'import numpy as np\n'), ((1379, 1399), 'numpy.sum', 'np.sum', (['interpolator'], {}), '(interpolator)\n', (1385, 1399), True, 'import numpy as np\n'), ((1427, 1443), 'numpy.abs', 'np.abs', (['(A - 0.68)'], {}), '(A - 0.68)\n', (1433, 1443), True, 'import numpy as np\n'), ((1477, 1493), 'numpy.abs', 'np.abs', (['(A - 0.95)'], {}), '(A - 0.95)\n', (1483, 1493), True, 'import numpy as np\n')] |
import itertools
import numpy as np
import pandas as pd
import os
import time
import random
import pickle
from ELM import ELMClassifier
import pickle
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import LogisticRegression
from NeuralNetwork import NeuralNetwork
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import KFold
#######################
features_check = {
"base": {
"features" : [1,2,3,4,5,6,7,8,9],
"C" : 0.001,
"n_hidden" : 50,
"y_column_idx" : 10,
"feature_file" : "../Datasets/features_extractions/base_(all).csv"
},
"base_robust": {
"features" : [2,6,8,9],
"C" : 0.001,
"n_hidden" : 10,
"y_column_idx" : 10,
"feature_file" : "../Datasets/features_extractions/base_(all).csv"
},
"all": {
"features" : [1,2,3,4,5,6,7,8,9,10,11,13,15],
"C" : 50,
"n_hidden" : 150,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
},
"novel": {
"features" : [10,11,13,15],
"C" : 0.004,
"n_hidden" : 50,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
},
"hybrid_robust": {
"features" : [2,6,8,9,10,11,13,15],
"C" : 0.01,
"n_hidden" : 100,
"y_column_idx" : 17,
"feature_file" : "../Datasets/features_extractions/median_9_2_(25-75)_vt_include.csv"
}
}
############################################
features_to_check = ["base","base_robust","all","novel","hybrid_robust"]
threshold = 0.5
learning_rate = 0.001
n_splits = 10
test_size = 0.25
path = os.path.dirname(os.path.abspath(__file__))
features_file_name = "../Datasets/features_extractions/median_9_2_(75-25)_vt_include.csv"
features_file = os.path.join(path, features_file_name)
for features_set in features_to_check:
print("\n\nChecking features - %s" % (features_set))
features_file = os.path.join(path, features_check[features_set]["feature_file"])
y_column_idx = features_check[features_set]["y_column_idx"]
n_hidden = features_check[features_set]["n_hidden"]
train = pd.read_csv(features_file)
######## Append artificial data by number of consecutive characters feature ########
if 2 in features_check[features_set]["features"]:
mal = train[train[train.columns[y_column_idx]]==1].sample(500).copy()
mal["2"] = mal["2"].apply(lambda x:x*random.randint(3,9))
train = train.append(mal, ignore_index=True)
######################################## END #######################################
use_columns = features_check[features_set]["features"]
use_columns.append(y_column_idx)
train = train[train.columns[use_columns]]
use_dataset = train.copy()
use_dataset = np.asfarray(use_dataset.values,np.dtype('Float64'))
# Normlize the dataset
scaler = MinMaxScaler().fit(use_dataset[:, :-1])
dataset_norm = scaler.transform(use_dataset[:, :-1])
# Split features and labels
X, y = use_dataset, np.transpose([use_dataset[:, -1]])
indices = np.arange(y.shape[0])
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, indices, stratify=y, test_size=test_size, random_state=42)
kf = KFold(n_splits=n_splits, random_state=None, shuffle=False)
kf.get_n_splits(X_train)
for train_index, test_index in kf.split(idx_train):
X_train_fold, X_test_fold = X_train[train_index], X_train[test_index]
y_train_fold, y_test_fold = y_train[train_index], y_train[test_index]
########################################################
elm = ELMClassifier(n_hidden=n_hidden, C= features_check[features_set]["C"], activation='relu')
ann=NeuralNetwork()
models=[ann,elm]
stacking = StackingClassifier(estimators=models, final_estimator=LogisticRegression())
stacking.fit() | [
"os.path.abspath",
"random.randint",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.dtype",
"numpy.transpose",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.model_selection.KFold",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"ELM.ELMClassifier",
"NeuralNe... | [((2334, 2372), 'os.path.join', 'os.path.join', (['path', 'features_file_name'], {}), '(path, features_file_name)\n', (2346, 2372), False, 'import os\n'), ((3761, 3819), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'random_state': 'None', 'shuffle': '(False)'}), '(n_splits=n_splits, random_state=None, shuffle=False)\n', (3766, 3819), False, 'from sklearn.model_selection import KFold\n'), ((4108, 4200), 'ELM.ELMClassifier', 'ELMClassifier', ([], {'n_hidden': 'n_hidden', 'C': "features_check[features_set]['C']", 'activation': '"""relu"""'}), "(n_hidden=n_hidden, C=features_check[features_set]['C'],\n activation='relu')\n", (4121, 4200), False, 'from ELM import ELMClassifier\n'), ((4202, 4217), 'NeuralNetwork.NeuralNetwork', 'NeuralNetwork', ([], {}), '()\n', (4215, 4217), False, 'from NeuralNetwork import NeuralNetwork\n'), ((2196, 2221), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2211, 2221), False, 'import os\n'), ((2483, 2547), 'os.path.join', 'os.path.join', (['path', "features_check[features_set]['feature_file']"], {}), "(path, features_check[features_set]['feature_file'])\n", (2495, 2547), False, 'import os\n'), ((2685, 2711), 'pandas.read_csv', 'pd.read_csv', (['features_file'], {}), '(features_file)\n', (2696, 2711), True, 'import pandas as pd\n'), ((3595, 3616), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (3604, 3616), True, 'import numpy as np\n'), ((3674, 3759), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'indices'], {'stratify': 'y', 'test_size': 'test_size', 'random_state': '(42)'}), '(X, y, indices, stratify=y, test_size=test_size,\n random_state=42)\n', (3690, 3759), False, 'from sklearn.model_selection import train_test_split\n'), ((3348, 3367), 'numpy.dtype', 'np.dtype', (['"""Float64"""'], {}), "('Float64')\n", (3356, 3367), True, 'import numpy as np\n'), ((3548, 3582), 'numpy.transpose', 'np.transpose', (['[use_dataset[:, -1]]'], {}), '([use_dataset[:, -1]])\n', (3560, 3582), True, 'import numpy as np\n'), ((4301, 4321), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4319, 4321), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3403, 3417), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3415, 3417), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2971, 2991), 'random.randint', 'random.randint', (['(3)', '(9)'], {}), '(3, 9)\n', (2985, 2991), False, 'import random\n')] |
import os
import glob
import copy
import random
import numpy as np
from scipy.stats import binned_statistic
import matplotlib.pyplot as plt
import scipy.io as scio
#######################################
#######################################
import sys
sys.path.append('../../../')
from affpose.YCB import cfg as config
from affpose.YCB.utils.dataset import ycb_dataset_utils
#######################################
#######################################
def main():
# MATLAB RESULTS DIR.
file_path = config.EVAL_FOLDER_DF_ITERATIVE + '/*.mat'
files = sorted(glob.glob(file_path))
pred_class_ids = np.zeros(shape=(len(files), 10))
pred_c = np.zeros(shape=(len(files), 10))
# for idx in range(len(files)):
for file_idx, file in enumerate(files):
meta = scio.loadmat(file)
_class_ids = meta['class_ids'].reshape(-1)
_confidence = meta['confidence'].reshape(-1)
for class_idx in range(len(_class_ids)):
pred_class_ids[file_idx, class_idx] = _class_ids[class_idx]
pred_c[file_idx, class_idx] = _confidence[class_idx]
# flatten arrays and find non zero idxs.
pred_class_ids = pred_class_ids.reshape(-1)
pred_c = pred_c.reshape(-1)
non_zero_idx = np.nonzero(pred_class_ids)
pred_class_ids = pred_class_ids[non_zero_idx]
pred_c = pred_c[non_zero_idx]
# now we want to plot predictions
for obj_id in range(1, config.NUM_OBJECTS+1):
# get pred c for class.
row_idxs = np.argwhere(pred_class_ids == obj_id).reshape(-1)
_pred_c = np.sort(pred_c[row_idxs])
fig = plt.figure(obj_id)
plt.title(f'Object Id: {obj_id}, {ycb_dataset_utils.map_obj_id_to_name(obj_id)}', fontsize=10)
plt.xlabel('$Confidence$', fontsize=10)
plt.ylabel('$Frequency$', fontsize=10)
plt.xlim(0, 1.15)
# plotting configs
_color = ycb_dataset_utils.obj_color_map(idx=obj_id)
color = [_color[0]/255, _color[1]/255, _color[2]/255, 0.75]
# get histogram.
mean = np.mean(_pred_c)
std_dev = np.std(_pred_c)
print(f'Object Id: {obj_id},\t\t Num: {len(row_idxs)}'
f'\t\t mean:{mean:.5f},\t\t std_dev:{std_dev:.5f},'
f'\t\t Name: {ycb_dataset_utils.map_obj_id_to_name(obj_id)}')
# plot data.
# plt.plot(range(len(row_idxs)), _pred_c, color=color, label=f'{arl_affpose_dataset_utils.map_obj_id_to_name(obj_id)}')
plt.hist(_pred_c, bins=10, color=color, label=f'{ycb_dataset_utils.map_obj_id_to_name(obj_id)}')
# plt.show()
if __name__ == '__main__':
main() | [
"sys.path.append",
"matplotlib.pyplot.xlim",
"affpose.YCB.utils.dataset.ycb_dataset_utils.map_obj_id_to_name",
"scipy.io.loadmat",
"numpy.std",
"numpy.nonzero",
"numpy.sort",
"matplotlib.pyplot.figure",
"numpy.mean",
"glob.glob",
"numpy.argwhere",
"matplotlib.pyplot.ylabel",
"affpose.YCB.uti... | [((261, 289), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (276, 289), False, 'import sys\n'), ((1255, 1281), 'numpy.nonzero', 'np.nonzero', (['pred_class_ids'], {}), '(pred_class_ids)\n', (1265, 1281), True, 'import numpy as np\n'), ((584, 604), 'glob.glob', 'glob.glob', (['file_path'], {}), '(file_path)\n', (593, 604), False, 'import glob\n'), ((802, 820), 'scipy.io.loadmat', 'scio.loadmat', (['file'], {}), '(file)\n', (814, 820), True, 'import scipy.io as scio\n'), ((1574, 1599), 'numpy.sort', 'np.sort', (['pred_c[row_idxs]'], {}), '(pred_c[row_idxs])\n', (1581, 1599), True, 'import numpy as np\n'), ((1615, 1633), 'matplotlib.pyplot.figure', 'plt.figure', (['obj_id'], {}), '(obj_id)\n', (1625, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Confidence$"""'], {'fontsize': '(10)'}), "('$Confidence$', fontsize=10)\n", (1755, 1784), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1831), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Frequency$"""'], {'fontsize': '(10)'}), "('$Frequency$', fontsize=10)\n", (1803, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1857), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.15)'], {}), '(0, 1.15)\n', (1848, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1946), 'affpose.YCB.utils.dataset.ycb_dataset_utils.obj_color_map', 'ycb_dataset_utils.obj_color_map', ([], {'idx': 'obj_id'}), '(idx=obj_id)\n', (1934, 1946), False, 'from affpose.YCB.utils.dataset import ycb_dataset_utils\n'), ((2056, 2072), 'numpy.mean', 'np.mean', (['_pred_c'], {}), '(_pred_c)\n', (2063, 2072), True, 'import numpy as np\n'), ((2091, 2106), 'numpy.std', 'np.std', (['_pred_c'], {}), '(_pred_c)\n', (2097, 2106), True, 'import numpy as np\n'), ((1506, 1543), 'numpy.argwhere', 'np.argwhere', (['(pred_class_ids == obj_id)'], {}), '(pred_class_ids == obj_id)\n', (1517, 1543), True, 'import numpy as np\n'), ((1676, 1720), 'affpose.YCB.utils.dataset.ycb_dataset_utils.map_obj_id_to_name', 'ycb_dataset_utils.map_obj_id_to_name', (['obj_id'], {}), '(obj_id)\n', (1712, 1720), False, 'from affpose.YCB.utils.dataset import ycb_dataset_utils\n'), ((2264, 2308), 'affpose.YCB.utils.dataset.ycb_dataset_utils.map_obj_id_to_name', 'ycb_dataset_utils.map_obj_id_to_name', (['obj_id'], {}), '(obj_id)\n', (2300, 2308), False, 'from affpose.YCB.utils.dataset import ycb_dataset_utils\n'), ((2519, 2563), 'affpose.YCB.utils.dataset.ycb_dataset_utils.map_obj_id_to_name', 'ycb_dataset_utils.map_obj_id_to_name', (['obj_id'], {}), '(obj_id)\n', (2555, 2563), False, 'from affpose.YCB.utils.dataset import ycb_dataset_utils\n')] |
import base64
import cv2 as cv
import cv2
import numpy as np
def byte_to_image(string_byte):
jpg_original = base64.b64decode(string_byte)
jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
return img
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=1):
blurred = cv.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
# image = cv.imread('D:\Python\Procesamiento imagenes\Laplaciano\BlurDetectionOpenvc\images\\0.jpg')
encoded = base64.b64encode(
open('D:\Python\Procesamiento imagenes\Laplaciano\BlurDetectionOpenvc\images\\0.jpg', "rb").read())
print("Input: " + str(encoded))
image = byte_to_image(encoded)
sharpened_image = unsharp_mask(image)
sharpened_image2 = unsharp_mask(sharpened_image)
img_as_string = cv2.imencode('.jpg', sharpened_image2)[1].tostring()
x = np.fromstring(sharpened_image2, dtype='uint8')
#decode the array into an image
img = cv2.imdecode(x, cv2.IMREAD_UNCHANGED)
imagen_as_text = base64.b64encode(open(sharpened_image, "rb").read())
print("output: " + str(imagen_as_text))
imagen_result = byte_to_image(imagen_as_text)
cv2.imwrite('D:\Python\Procesamiento imagenes\Laplaciano\BlurDetectionOpenvc\images\sharp\sharpened-tv.jpg',
imagen_result)
#
# cv.imwrite('D:\Python\Procesamiento imagenes\Laplaciano\BlurDetectionOpenvc\images\sharp\sharpened-tv.jpg',
# sharpened_image2)
| [
"cv2.GaussianBlur",
"numpy.absolute",
"cv2.imwrite",
"numpy.frombuffer",
"cv2.imdecode",
"numpy.zeros",
"numpy.ones",
"base64.b64decode",
"cv2.imencode",
"numpy.copyto",
"numpy.fromstring"
] | [((1289, 1335), 'numpy.fromstring', 'np.fromstring', (['sharpened_image2'], {'dtype': '"""uint8"""'}), "(sharpened_image2, dtype='uint8')\n", (1302, 1335), True, 'import numpy as np\n'), ((1375, 1412), 'cv2.imdecode', 'cv2.imdecode', (['x', 'cv2.IMREAD_UNCHANGED'], {}), '(x, cv2.IMREAD_UNCHANGED)\n', (1387, 1412), False, 'import cv2\n'), ((1570, 1710), 'cv2.imwrite', 'cv2.imwrite', (['"""D:\\\\Python\\\\Procesamiento imagenes\\\\Laplaciano\\\\BlurDetectionOpenvc\\\\images\\\\sharp\\\\sharpened-tv.jpg"""', 'imagen_result'], {}), "(\n 'D:\\\\Python\\\\Procesamiento imagenes\\\\Laplaciano\\\\BlurDetectionOpenvc\\\\images\\\\sharp\\\\sharpened-tv.jpg'\n , imagen_result)\n", (1581, 1710), False, 'import cv2\n'), ((115, 144), 'base64.b64decode', 'base64.b64decode', (['string_byte'], {}), '(string_byte)\n', (131, 144), False, 'import base64\n'), ((161, 204), 'numpy.frombuffer', 'np.frombuffer', (['jpg_original'], {'dtype': 'np.uint8'}), '(jpg_original, dtype=np.uint8)\n', (174, 204), True, 'import numpy as np\n'), ((215, 247), 'cv2.imdecode', 'cv2.imdecode', (['jpg_as_np'], {'flags': '(1)'}), '(jpg_as_np, flags=1)\n', (227, 247), False, 'import cv2\n'), ((360, 402), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['image', 'kernel_size', 'sigma'], {}), '(image, kernel_size, sigma)\n', (375, 402), True, 'import cv2 as cv\n'), ((509, 534), 'numpy.zeros', 'np.zeros', (['sharpened.shape'], {}), '(sharpened.shape)\n', (517, 534), True, 'import numpy as np\n'), ((756, 808), 'numpy.copyto', 'np.copyto', (['sharpened', 'image'], {'where': 'low_contrast_mask'}), '(sharpened, image, where=low_contrast_mask)\n', (765, 808), True, 'import numpy as np\n'), ((580, 604), 'numpy.ones', 'np.ones', (['sharpened.shape'], {}), '(sharpened.shape)\n', (587, 604), True, 'import numpy as np\n'), ((707, 735), 'numpy.absolute', 'np.absolute', (['(image - blurred)'], {}), '(image - blurred)\n', (718, 735), True, 'import numpy as np\n'), ((1231, 1269), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'sharpened_image2'], {}), "('.jpg', sharpened_image2)\n", (1243, 1269), False, 'import cv2\n')] |
import numpy as np
class Threshold:
def __init__(self, unit, axis, col, is_continue):
self.unit_ = unit
self.axis_ = axis
self.is_continue_ = is_continue
self.col_ = col
def get_cost(self, y, condition):
cost_true = 1
cost_false = 1
y_true = y[condition]
y_false = y[~condition]
# 이 부분부터 y가 벡터로 고정이 되어 있다.
tot_true = y_true.shape[0]
tot_false = y_false.shape[0]
if tot_true * tot_false == 0:
return 2
tot = tot_true + tot_false
for uni in np.unique(y):
count_true = np.sum(y_true == uni)
count_false = np.sum(y_false == uni)
cost_true -= np.power(count_true/tot_true, 2)
cost_false -= np.power(count_false/tot_false, 2)
cost = tot_true/tot * cost_true + tot_false/tot * cost_false
return cost
def divide(self, x, y = None):
column = np.transpose(x, self.axis_)[self.col_]
if y is not None:
if self.is_continue_:
condition = column < self.unit_
return x[condition], x[~condition], y[condition], y[~condition]
elif np.isnan(self.unit_):
condition = np.isnan(column)
return x[condition], x[~condition], y[condition], y[~condition]
else:
condition = column == self.unit_
return x[condition], x[~condition], y[condition], y[~condition]
else:
if self.is_continue_:
condition = column < self.unit_
return x[condition], x[~condition]
elif np.isnan(self.unit_):
condition = np.isnan(column)
return x[condition], x[~condition]
else:
condition = column == self.unit_
return x[condition], x[~condition] | [
"numpy.sum",
"numpy.power",
"numpy.transpose",
"numpy.isnan",
"numpy.unique"
] | [((516, 528), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (525, 528), True, 'import numpy as np\n'), ((549, 570), 'numpy.sum', 'np.sum', (['(y_true == uni)'], {}), '(y_true == uni)\n', (555, 570), True, 'import numpy as np\n'), ((591, 613), 'numpy.sum', 'np.sum', (['(y_false == uni)'], {}), '(y_false == uni)\n', (597, 613), True, 'import numpy as np\n'), ((633, 667), 'numpy.power', 'np.power', (['(count_true / tot_true)', '(2)'], {}), '(count_true / tot_true, 2)\n', (641, 667), True, 'import numpy as np\n'), ((686, 722), 'numpy.power', 'np.power', (['(count_false / tot_false)', '(2)'], {}), '(count_false / tot_false, 2)\n', (694, 722), True, 'import numpy as np\n'), ((850, 877), 'numpy.transpose', 'np.transpose', (['x', 'self.axis_'], {}), '(x, self.axis_)\n', (862, 877), True, 'import numpy as np\n'), ((1063, 1083), 'numpy.isnan', 'np.isnan', (['self.unit_'], {}), '(self.unit_)\n', (1071, 1083), True, 'import numpy as np\n'), ((1464, 1484), 'numpy.isnan', 'np.isnan', (['self.unit_'], {}), '(self.unit_)\n', (1472, 1484), True, 'import numpy as np\n'), ((1105, 1121), 'numpy.isnan', 'np.isnan', (['column'], {}), '(column)\n', (1113, 1121), True, 'import numpy as np\n'), ((1506, 1522), 'numpy.isnan', 'np.isnan', (['column'], {}), '(column)\n', (1514, 1522), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Common filters used in signal processing
These filters can be useful to smooth signal trajectories.
"""
import numpy as np
from scipy.signal import butter, lfilter
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# Butterworth Filter: "The Butterworth filter is a type of signal processing filter designed to have a frequency
# response as flat as possible in the passband." (Wikipedia: https://en.wikipedia.org/wiki/Butterworth_filter)
# Example from http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
# Taken from https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
Args:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer (>=3)
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce
a moving average smoothing.
Returns:
the smoothed signal
example:
t = linspace(-2, 2, 0.1)
x = sin(t) + randn(len(t)) * 0.1
y = smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just
y.
References:
- https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in set(['flat', 'hanning', 'hamming', 'bartlett', 'blackman']):
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y
| [
"scipy.signal.lfilter",
"numpy.ones",
"scipy.signal.butter"
] | [((871, 911), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (877, 911), False, 'from scipy.signal import butter, lfilter\n'), ((1063, 1082), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1070, 1082), False, 'from scipy.signal import butter, lfilter\n'), ((2969, 2993), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (2976, 2993), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import warnings
class Eda():
def __init__(self, data):
self.data = data
self.config = {'threshold': {
'skewness': 20,
'cardinality': 50,
'correlation': 0.9,
'missing':0,
'zeros':10,
'constant_basic':100,
'constant_large':0.9
}
}
self.features = None
self.description = None
def _check(self, data, **kwargs):
"""
"""
kwargs_default = {"empty": None,
"isntance": None}
options = {key: kwargs[key] if key in kwargs.keys() else kwargs_default[key] for key in kwargs_default.keys()}
if options["empty"]:
self._check_empty(data)
if options["instance"]:
self._check_instance(data, inst=options["instance"])
@staticmethod
def _check_empty(data):
"""
"""
if data.empty:
raise ValueError("data can not be empty")
return True
@staticmethod
def _check_instance(data, inst=pd.DataFrame):
"""
"""
if not isinstance(data, inst):
warning.warn("data is not of type pandas.DatFrame")
return True
def describe(self):
"""
args:
return:
"""
if self.data is None:
raise ValueError("there is not any data")
options = {"empty": True,
"instance": pd.DataFrame}
summary = {"Number of features": self.data.shape[1],
"Number of observations": self.data.shape[0],
"Missing Values (Num)": self.get_missingValues(self.data),
"Missing Values (%)": round((self.get_missingValues(self.data) \
/ self.data.size)*100, 2),
"Duplicate rows (Num)": self.get_duplicates(self.data).shape[0],
"Duplicate rows (%)": round((self.get_duplicates(self.data)['count'].sum() \
/ self.data.size)*100, 2),
"Zeros values (Num)": self.get_zerosValues(self.data),
"Zeros values (%)": round((self.get_zerosValues(self.data) / self.data.size) * 100, 2),
"Total size in memory (Kb)": self.data.memory_usage().sum()/1000,
"Average observations size in memory (B)": self.data.memory_usage().sum()/self.data.shape[0],
"Features numerica": self.data.select_dtypes(include=['number']).shape[1],
"Features Categorical": self.data.select_dtypes(include=["object", "category"]).shape[1],
"Features datetimes": self.data.select_dtypes(include=["datetime"]).shape[1]}
self.description = pd.Series(summary)
return self.description
@staticmethod
def get_duplicates(df: pd.DataFrame, columns = None):
"""
args:
return:
"""
duplicates = df[df.duplicated(subset=columns, keep=False)].groupby(
df.columns.values.tolist()).size().reset_index(name="count"). \
sort_values("count", ascending=False)
return duplicates
@staticmethod
def get_missingValues(df):
"""
args:
return:
"""
if type(df) == type(pd.DataFrame()):
return df.isnull().sum().sum()
elif type(df) == type(pd.Series()):
return df.isnull().sum()
@staticmethod
def get_zerosValues(df):
"""
args:
return:
"""
return df.size - np.count_nonzero(df.values)
| [
"pandas.DataFrame",
"numpy.count_nonzero",
"pandas.Series"
] | [((3076, 3094), 'pandas.Series', 'pd.Series', (['summary'], {}), '(summary)\n', (3085, 3094), True, 'import pandas as pd\n'), ((3932, 3959), 'numpy.count_nonzero', 'np.count_nonzero', (['df.values'], {}), '(df.values)\n', (3948, 3959), True, 'import numpy as np\n'), ((3654, 3668), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3666, 3668), True, 'import pandas as pd\n'), ((3753, 3764), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (3762, 3764), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 10:03:33 2018
测试对网格数据对象的操作
@author: chens
"""
import numpy as np
import matplotlib.pyplot as plt
from geoist.pfm import grdio
from geoist.pfm import pftrans
grd1=grdio.grddata()
grd1.load_surfer(r'D:\demo\demogrid.grd')
if np.ma.is_masked(grd1.data):
grd1.fill_nulls()
plt.imshow(grd1.data0)
else:
print('not null region in dataset')
plt.imshow(grd1.data)
shape = (grd1.rows, grd1.cols)
height = 0.5
x, y, gz = grd1.grd2xyz()
gzcontf = pftrans.upcontinue(x, y, gz, shape, height)
gz2d = gzcontf.reshape(grd1.rows, grd1.cols)
grd1.data = np.ma.masked_equal(gz2d, grd1.nullvalue)
grd1.export_surfer(r'D:\demo\demogrid-up1000.grd')
#grd1.data=d1*d1
#v = d1.reshape(grd1.rows*grd1.cols)
# #gridder.interpolation.fill_nans(x, y, v, xp, yp, vp):
# plt.imshow(grd1.data) #显示绘图结果
#grd1.export_surfer(r'D:\demo\demogrid3-blk.grd', flag = False)
#np.ma.getdata(gz)
| [
"matplotlib.pyplot.imshow",
"numpy.ma.masked_equal",
"geoist.pfm.pftrans.upcontinue",
"numpy.ma.is_masked",
"geoist.pfm.grdio.grddata"
] | [((217, 232), 'geoist.pfm.grdio.grddata', 'grdio.grddata', ([], {}), '()\n', (230, 232), False, 'from geoist.pfm import grdio\n'), ((278, 304), 'numpy.ma.is_masked', 'np.ma.is_masked', (['grd1.data'], {}), '(grd1.data)\n', (293, 304), True, 'import numpy as np\n'), ((505, 548), 'geoist.pfm.pftrans.upcontinue', 'pftrans.upcontinue', (['x', 'y', 'gz', 'shape', 'height'], {}), '(x, y, gz, shape, height)\n', (523, 548), False, 'from geoist.pfm import pftrans\n'), ((606, 646), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['gz2d', 'grd1.nullvalue'], {}), '(gz2d, grd1.nullvalue)\n', (624, 646), True, 'import numpy as np\n'), ((330, 352), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grd1.data0'], {}), '(grd1.data0)\n', (340, 352), True, 'import matplotlib.pyplot as plt\n'), ((401, 422), 'matplotlib.pyplot.imshow', 'plt.imshow', (['grd1.data'], {}), '(grd1.data)\n', (411, 422), True, 'import matplotlib.pyplot as plt\n')] |
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
from slicer.util import setSliceViewerLayers
import numpy as np
import SimpleITK as sitk
import sitkUtils
#
# SyntheticCTEvaluation
#
class SyntheticCTEvaluation(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Synthetic CT evaluation" # TODO make this more human readable by adding spaces
self.parent.categories = ["Quantification"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (Magna Graecia University of Catanzaro, Italy)", "<NAME> (Magna Graecia University of Catanzaro, Italy)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = '''
This module quatifies conversion accuracy of a synthetic CT algorithm.
The full validation workflow is described in Spadea, <NAME>, et al. "Deep Convolution Neural Network (DCNN) Multiplane Approach to Synthetic CT Generation From MR images—Application in Brain Proton Therapy." International Journal of Radiation Oncology* Biology* Physics 105.3 (2019): 495-503.
'''
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """ """ # replace with organization, grant and thanks.
#
# SyntheticCTEvaluationWidget
#
class SyntheticCTEvaluationWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# GT CT volume selector
#
self.gtCTSelector = slicer.qMRMLNodeComboBox()
self.gtCTSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.gtCTSelector.selectNodeUponCreation = True
self.gtCTSelector.addEnabled = False
self.gtCTSelector.removeEnabled = False
self.gtCTSelector.noneEnabled = False
self.gtCTSelector.showHidden = False
self.gtCTSelector.showChildNodeTypes = False
self.gtCTSelector.setMRMLScene( slicer.mrmlScene )
self.gtCTSelector.setToolTip( "Select the ground truth CT" )
parametersFormLayout.addRow("Ground truth CT volume: ", self.gtCTSelector)
#
# synthetic CT volume selector
#
self.sCTSelector = slicer.qMRMLNodeComboBox()
self.sCTSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.sCTSelector.selectNodeUponCreation = True
self.sCTSelector.addEnabled = False
self.sCTSelector.removeEnabled = False
self.sCTSelector.noneEnabled = False
self.sCTSelector.showHidden = False
self.sCTSelector.showChildNodeTypes = False
self.sCTSelector.setMRMLScene( slicer.mrmlScene )
self.sCTSelector.setToolTip( "Select the synthetic CT" )
parametersFormLayout.addRow("Synthetic CT volume: ", self.sCTSelector)
#
# mask label selector
#
self.maskSelector = slicer.qMRMLNodeComboBox()
self.maskSelector.nodeTypes = ["vtkMRMLLabelMapVolumeNode"]
self.maskSelector.selectNodeUponCreation = True
self.maskSelector.addEnabled = False
self.maskSelector.removeEnabled = False
self.maskSelector.noneEnabled = False
self.maskSelector.showHidden = False
self.maskSelector.showChildNodeTypes = False
self.maskSelector.setMRMLScene( slicer.mrmlScene )
self.maskSelector.setToolTip( "Select the labelmap within evaluation will be ran" )
parametersFormLayout.addRow("Mask label: ", self.maskSelector)
#
# output volume selector
#
self.outputSelector = slicer.qMRMLNodeComboBox()
self.outputSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.outputSelector.selectNodeUponCreation = True
self.outputSelector.addEnabled = True
self.outputSelector.removeEnabled = True
self.outputSelector.noneEnabled = True
self.outputSelector.showHidden = False
self.outputSelector.showChildNodeTypes = False
self.outputSelector.setMRMLScene( slicer.mrmlScene )
self.outputSelector.setToolTip( "Select or create a volume where the error map will be stored" )
parametersFormLayout.addRow("Output error map volume: ", self.outputSelector)
# MAE and ME QLabel
self.QLabelMAE = qt.QLabel("")
parametersFormLayout.addRow("Mean Absolute Error (MAE) [HU] = ", self.QLabelMAE)
self.QLabelME = qt.QLabel("")
parametersFormLayout.addRow("Mean Error (ME) [HU] = ", self.QLabelME)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Run the algorithm."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# connections
self.applyButton.connect('clicked(bool)', self.onApplyButton)
self.gtCTSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.sCTSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.maskSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
self.outputSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onSelect)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSelect()
# Create logic object
self.logic = SyntheticCTEvaluationLogic()
def cleanup(self):
self.QLabelMAE.setText("")
self.QLabelME.setText("")
def onSelect(self):
self.applyButton.enabled = self.gtCTSelector.currentNode() and self.sCTSelector.currentNode() and self.maskSelector.currentNode() and self.outputSelector.currentNode()
def onApplyButton(self):
mae, me = self.logic.run(self.gtCTSelector.currentNode().GetName(), self.sCTSelector.currentNode().GetName(), self.maskSelector.currentNode().GetName(), self.outputSelector.currentNode())
self.QLabelMAE.setText("%.1f" % mae)
self.QLabelME.setText("%.1f" % me)
#
# SyntheticCTEvaluationLogic
#
class SyntheticCTEvaluationLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
self.hasTable = False
def binarizeNumpyMask(self, img_np):
img_np = img_np.astype(np.float32)
img_np -= img_np.min()
img_np /= img_np.max()
img_np[img_np>0.5]=1
img_np[img_np<=0.5]=0
return img_np.astype(np.uint8)
def run(self, gtCTVolumeName, sCTVolumeName, maskVolumeName, outputVolume):
"""
Run accuracy assessment.
"""
# Get sitk/numpy images from Slicer
gtCT_sitk = sitk.Cast(sitkUtils.PullVolumeFromSlicer(gtCTVolumeName), sitk.sitkFloat32)
sCT_sitk = sitk.Cast(sitkUtils.PullVolumeFromSlicer(sCTVolumeName), sitk.sitkFloat32)
mask_sitk = sitk.Cast(sitkUtils.PullVolumeFromSlicer(maskVolumeName), sitk.sitkLabelUInt8)
mask_sitk = sitk.LabelMapToBinary(mask_sitk)
#TODO: investigate better if mask is binary or not here
gtCT = sitk.GetArrayFromImage(gtCT_sitk).astype(np.float32)
sCT = sitk.GetArrayFromImage(sCT_sitk).astype(np.float32)
mask = self.binarizeNumpyMask(sitk.GetArrayFromImage(mask_sitk))
# Compute MAE and ME
img_difference = gtCT - sCT
img_difference[mask==0]=-1000
img_difference_sitk = sitk.GetImageFromArray(img_difference)
img_difference_sitk.CopyInformation(gtCT_sitk)
img_difference[mask==0]=np.nan
mae = np.nanmean(np.abs(img_difference).flatten())
me = np.nanmean(img_difference.flatten())
# If the table does not exist, create it
if self.hasTable == False:
self.hasTable = True
# Create table
self.tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
self.table = self.tableNode.GetTable()
arrX = vtk.vtkFloatArray()
arrX.SetName("HU")
self.table.AddColumn(arrX)
arrY = vtk.vtkFloatArray()
arrY.SetName("DSC")
self.table.AddColumn(arrY)
# Create plot node
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", "Bone threshold segmentation")
plotSeriesNode.SetAndObserveTableNodeID(self.tableNode.GetID())
plotSeriesNode.SetXColumnName("HU")
plotSeriesNode.SetYColumnName("DSC")
plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)
plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode.SetUniqueColor()
# Create plot chart node
self.plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode")
self.plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
self.plotChartNode.SetTitle('Bone threshold assessment')
self.plotChartNode.SetXAxisTitle('[HU]')
self.plotChartNode.SetYAxisTitle('DSC')
self.plotChartNode.LegendVisibilityOff()
# Fill table with DSC value for bone
thrs = np.arange(100.0, 1100.0, 100.0)
self.table.SetNumberOfRows(len(thrs))
overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()
for i, thr in enumerate(thrs):
gtCT_bin = sitk.BinaryThreshold(sitk.Mask(gtCT_sitk, mask_sitk, outsideValue=-1000), lowerThreshold=thr, upperThreshold=1500.0, insideValue=1, outsideValue=0)
sCT_bin = sitk.BinaryThreshold(sitk.Mask(sCT_sitk, mask_sitk, outsideValue=-1000), lowerThreshold=thr, upperThreshold=1500.0, insideValue=1, outsideValue=0)
overlap_measures_filter.Execute(gtCT_bin, sCT_bin)
dsc = overlap_measures_filter.GetDiceCoefficient()
# TODO: empty table before each run?
self.table.SetValue(i, 0, thr)
self.table.SetValue(i, 1, dsc)
# Switch to a layout that contains a plot view to create a plot widget
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(self.plotChartNode.GetID())
# Show diff image
outputVolume = sitkUtils.PushVolumeToSlicer(img_difference_sitk, outputVolume)
setSliceViewerLayers(background=outputVolume)
displayNode = outputVolume.GetDisplayNode()
displayNode.SetAndObserveColorNodeID('vtkMRMLColorTableNodeRainbow')
return mae, me
class SyntheticCTEvaluationTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_SyntheticCTEvaluation1()
def test_SyntheticCTEvaluation1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
SampleData.downloadFromURL(
nodeNames='FA',
fileNames='FA.nrrd',
uris='http://slicer.kitware.com/midas3/download?items=5767',
checksums='SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = SyntheticCTEvaluationLogic()
self.assertIsNotNone( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
| [
"numpy.abs",
"qt.QPushButton",
"sitkUtils.PushVolumeToSlicer",
"slicer.mrmlScene.AddNewNodeByClass",
"numpy.arange",
"slicer.modules.plots.logic",
"slicer.util.setSliceViewerLayers",
"SimpleITK.LabelMapToBinary",
"slicer.app.layoutManager",
"slicer.util.getNode",
"SimpleITK.GetArrayFromImage",
... | [((1927, 1953), 'ctk.ctkCollapsibleButton', 'ctk.ctkCollapsibleButton', ([], {}), '()\n', (1951, 1953), False, 'import vtk, qt, ctk, slicer\n'), ((2138, 2181), 'qt.QFormLayout', 'qt.QFormLayout', (['parametersCollapsibleButton'], {}), '(parametersCollapsibleButton)\n', (2152, 2181), False, 'import vtk, qt, ctk, slicer\n'), ((2247, 2273), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (2271, 2273), False, 'import vtk, qt, ctk, slicer\n'), ((2875, 2901), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (2899, 2901), False, 'import vtk, qt, ctk, slicer\n'), ((3479, 3505), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (3503, 3505), False, 'import vtk, qt, ctk, slicer\n'), ((4118, 4144), 'slicer.qMRMLNodeComboBox', 'slicer.qMRMLNodeComboBox', ([], {}), '()\n', (4142, 4144), False, 'import vtk, qt, ctk, slicer\n'), ((4773, 4786), 'qt.QLabel', 'qt.QLabel', (['""""""'], {}), "('')\n", (4782, 4786), False, 'import vtk, qt, ctk, slicer\n'), ((4892, 4905), 'qt.QLabel', 'qt.QLabel', (['""""""'], {}), "('')\n", (4901, 4905), False, 'import vtk, qt, ctk, slicer\n'), ((5035, 5058), 'qt.QPushButton', 'qt.QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (5049, 5058), False, 'import vtk, qt, ctk, slicer\n'), ((7590, 7622), 'SimpleITK.LabelMapToBinary', 'sitk.LabelMapToBinary', (['mask_sitk'], {}), '(mask_sitk)\n', (7611, 7622), True, 'import SimpleITK as sitk\n'), ((7998, 8036), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['img_difference'], {}), '(img_difference)\n', (8020, 8036), True, 'import SimpleITK as sitk\n'), ((9608, 9639), 'numpy.arange', 'np.arange', (['(100.0)', '(1100.0)', '(100.0)'], {}), '(100.0, 1100.0, 100.0)\n', (9617, 9639), True, 'import numpy as np\n'), ((9713, 9751), 'SimpleITK.LabelOverlapMeasuresImageFilter', 'sitk.LabelOverlapMeasuresImageFilter', ([], {}), '()\n', (9749, 9751), True, 'import SimpleITK as sitk\n'), ((10445, 10471), 'slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (10469, 10471), False, 'import vtk, qt, ctk, slicer\n'), ((10839, 10902), 'sitkUtils.PushVolumeToSlicer', 'sitkUtils.PushVolumeToSlicer', (['img_difference_sitk', 'outputVolume'], {}), '(img_difference_sitk, outputVolume)\n', (10867, 10902), False, 'import sitkUtils\n'), ((10907, 10952), 'slicer.util.setSliceViewerLayers', 'setSliceViewerLayers', ([], {'background': 'outputVolume'}), '(background=outputVolume)\n', (10927, 10952), False, 'from slicer.util import setSliceViewerLayers\n'), ((11493, 11518), 'slicer.mrmlScene.Clear', 'slicer.mrmlScene.Clear', (['(0)'], {}), '(0)\n', (11515, 11518), False, 'import vtk, qt, ctk, slicer\n'), ((12462, 12681), 'SampleData.downloadFromURL', 'SampleData.downloadFromURL', ([], {'nodeNames': '"""FA"""', 'fileNames': '"""FA.nrrd"""', 'uris': '"""http://slicer.kitware.com/midas3/download?items=5767"""', 'checksums': '"""SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560"""'}), "(nodeNames='FA', fileNames='FA.nrrd', uris=\n 'http://slicer.kitware.com/midas3/download?items=5767', checksums=\n 'SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')\n", (12488, 12681), False, 'import SampleData\n'), ((12775, 12808), 'slicer.util.getNode', 'slicer.util.getNode', ([], {'pattern': '"""FA"""'}), "(pattern='FA')\n", (12794, 12808), False, 'import vtk, qt, ctk, slicer\n'), ((7323, 7369), 'sitkUtils.PullVolumeFromSlicer', 'sitkUtils.PullVolumeFromSlicer', (['gtCTVolumeName'], {}), '(gtCTVolumeName)\n', (7353, 7369), False, 'import sitkUtils\n'), ((7414, 7459), 'sitkUtils.PullVolumeFromSlicer', 'sitkUtils.PullVolumeFromSlicer', (['sCTVolumeName'], {}), '(sCTVolumeName)\n', (7444, 7459), False, 'import sitkUtils\n'), ((7505, 7551), 'sitkUtils.PullVolumeFromSlicer', 'sitkUtils.PullVolumeFromSlicer', (['maskVolumeName'], {}), '(maskVolumeName)\n', (7535, 7551), False, 'import sitkUtils\n'), ((7844, 7877), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['mask_sitk'], {}), '(mask_sitk)\n', (7866, 7877), True, 'import SimpleITK as sitk\n'), ((8375, 8429), 'slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLTableNode"""'], {}), "('vtkMRMLTableNode')\n", (8409, 8429), False, 'import vtk, qt, ctk, slicer\n'), ((8488, 8507), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (8505, 8507), False, 'import vtk, qt, ctk, slicer\n'), ((8580, 8599), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (8597, 8599), False, 'import vtk, qt, ctk, slicer\n'), ((8708, 8802), 'slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLPlotSeriesNode"""', '"""Bone threshold segmentation"""'], {}), "('vtkMRMLPlotSeriesNode',\n 'Bone threshold segmentation')\n", (8742, 8802), False, 'import vtk, qt, ctk, slicer\n'), ((9214, 9272), 'slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLPlotChartNode"""'], {}), "('vtkMRMLPlotChartNode')\n", (9248, 9272), False, 'import vtk, qt, ctk, slicer\n'), ((7695, 7728), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['gtCT_sitk'], {}), '(gtCT_sitk)\n', (7717, 7728), True, 'import SimpleITK as sitk\n'), ((7758, 7790), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['sCT_sitk'], {}), '(sCT_sitk)\n', (7780, 7790), True, 'import SimpleITK as sitk\n'), ((9826, 9877), 'SimpleITK.Mask', 'sitk.Mask', (['gtCT_sitk', 'mask_sitk'], {'outsideValue': '(-1000)'}), '(gtCT_sitk, mask_sitk, outsideValue=-1000)\n', (9835, 9877), True, 'import SimpleITK as sitk\n'), ((9990, 10040), 'SimpleITK.Mask', 'sitk.Mask', (['sCT_sitk', 'mask_sitk'], {'outsideValue': '(-1000)'}), '(sCT_sitk, mask_sitk, outsideValue=-1000)\n', (9999, 10040), True, 'import SimpleITK as sitk\n'), ((10493, 10521), 'slicer.modules.plots.logic', 'slicer.modules.plots.logic', ([], {}), '()\n', (10519, 10521), False, 'import vtk, qt, ctk, slicer\n'), ((8145, 8167), 'numpy.abs', 'np.abs', (['img_difference'], {}), '(img_difference)\n', (8151, 8167), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import torch as torch
def make_data_gap(seed, data_count=100):
import GPy
npr.seed(0)
x = np.hstack([np.linspace(-5, -2, int(data_count/2)), np.linspace(2, 5, int(data_count/2))])
x = x[:, np.newaxis]
k = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.)
K = k.K(x)
L = np.linalg.cholesky(K + 1e-5 * np.eye(data_count))
# draw a noise free random function from a GP
eps = np.random.randn(data_count)
f = L @ eps
# use a homoskedastic Gaussian noise model N(f(x)_i, \sigma^2). \sigma^2 = 0.1
eps_noise = np.sqrt(0.1) * np.random.randn(data_count)
y = f + eps_noise
y = y[:, np.newaxis]
plt.plot(x, f, 'ko', ms=2)
plt.plot(x, y, 'ro')
plt.title("GP generated Data")
plt.pause(1)
return torch.FloatTensor(x), torch.FloatTensor(y), torch.FloatTensor(x), torch.FloatTensor(y)
def make_data_sine(seed, data_count=450):
# fix the random seed
np.random.seed(seed)
noise_var = 0.1
X = np.linspace(-4, 4, data_count)
y = 1*np.sin(X) + np.sqrt(noise_var)*npr.randn(data_count)
train_count = int (0.2 * data_count)
idx = npr.permutation(range(data_count))
X_train = X[idx[:train_count], np.newaxis ]
X_test = X[ idx[train_count:], np.newaxis ]
y_train = y[ idx[:train_count] ]
y_test = y[ idx[train_count:] ]
mu = np.mean(X_train, 0)
std = np.std(X_train, 0)
X_train = (X_train - mu) / std
X_test = (X_test - mu) / std
mu = np.mean(y_train, 0)
std = np.std(y_train, 0)
# mu = 0
# std = 1
y_train = (y_train - mu) / std
y_test = (y_test -mu) / std
train_stats = dict()
train_stats['mu'] = torch.FloatTensor([mu])
train_stats['sigma'] = torch.FloatTensor([std])
return torch.FloatTensor(X_train), torch.FloatTensor(y_train), torch.FloatTensor(X_test), torch.FloatTensor(y_test),\
train_stats | [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.std",
"torch.FloatTensor",
"numpy.mean",
"numpy.sin",
"GPy.kern.RBF",
"numpy.linspace",
"numpy.eye",
"matplotlib.pyplot.pause",
"numpy.sqrt"
] | [((162, 173), 'numpy.random.seed', 'npr.seed', (['(0)'], {}), '(0)\n', (170, 173), True, 'import numpy.random as npr\n'), ((305, 361), 'GPy.kern.RBF', 'GPy.kern.RBF', ([], {'input_dim': '(1)', 'variance': '(1.0)', 'lengthscale': '(1.0)'}), '(input_dim=1, variance=1.0, lengthscale=1.0)\n', (317, 361), False, 'import GPy\n'), ((494, 521), 'numpy.random.randn', 'np.random.randn', (['data_count'], {}), '(data_count)\n', (509, 521), True, 'import numpy as np\n'), ((735, 761), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f', '"""ko"""'], {'ms': '(2)'}), "(x, f, 'ko', ms=2)\n", (743, 761), True, 'import matplotlib.pyplot as plt\n'), ((766, 786), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {}), "(x, y, 'ro')\n", (774, 786), True, 'import matplotlib.pyplot as plt\n'), ((791, 821), 'matplotlib.pyplot.title', 'plt.title', (['"""GP generated Data"""'], {}), "('GP generated Data')\n", (800, 821), True, 'import matplotlib.pyplot as plt\n'), ((826, 838), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (835, 838), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1031), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1025, 1031), True, 'import numpy as np\n'), ((1061, 1091), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'data_count'], {}), '(-4, 4, data_count)\n', (1072, 1091), True, 'import numpy as np\n'), ((1421, 1440), 'numpy.mean', 'np.mean', (['X_train', '(0)'], {}), '(X_train, 0)\n', (1428, 1440), True, 'import numpy as np\n'), ((1451, 1469), 'numpy.std', 'np.std', (['X_train', '(0)'], {}), '(X_train, 0)\n', (1457, 1469), True, 'import numpy as np\n'), ((1547, 1566), 'numpy.mean', 'np.mean', (['y_train', '(0)'], {}), '(y_train, 0)\n', (1554, 1566), True, 'import numpy as np\n'), ((1577, 1595), 'numpy.std', 'np.std', (['y_train', '(0)'], {}), '(y_train, 0)\n', (1583, 1595), True, 'import numpy as np\n'), ((1739, 1762), 'torch.FloatTensor', 'torch.FloatTensor', (['[mu]'], {}), '([mu])\n', (1756, 1762), True, 'import torch as torch\n'), ((1790, 1814), 'torch.FloatTensor', 'torch.FloatTensor', (['[std]'], {}), '([std])\n', (1807, 1814), True, 'import torch as torch\n'), ((640, 652), 'numpy.sqrt', 'np.sqrt', (['(0.1)'], {}), '(0.1)\n', (647, 652), True, 'import numpy as np\n'), ((655, 682), 'numpy.random.randn', 'np.random.randn', (['data_count'], {}), '(data_count)\n', (670, 682), True, 'import numpy as np\n'), ((850, 870), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (867, 870), True, 'import torch as torch\n'), ((872, 892), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (889, 892), True, 'import torch as torch\n'), ((894, 914), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (911, 914), True, 'import torch as torch\n'), ((916, 936), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (933, 936), True, 'import torch as torch\n'), ((1826, 1852), 'torch.FloatTensor', 'torch.FloatTensor', (['X_train'], {}), '(X_train)\n', (1843, 1852), True, 'import torch as torch\n'), ((1854, 1880), 'torch.FloatTensor', 'torch.FloatTensor', (['y_train'], {}), '(y_train)\n', (1871, 1880), True, 'import torch as torch\n'), ((1882, 1907), 'torch.FloatTensor', 'torch.FloatTensor', (['X_test'], {}), '(X_test)\n', (1899, 1907), True, 'import torch as torch\n'), ((1909, 1934), 'torch.FloatTensor', 'torch.FloatTensor', (['y_test'], {}), '(y_test)\n', (1926, 1934), True, 'import torch as torch\n'), ((1102, 1111), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (1108, 1111), True, 'import numpy as np\n'), ((1114, 1132), 'numpy.sqrt', 'np.sqrt', (['noise_var'], {}), '(noise_var)\n', (1121, 1132), True, 'import numpy as np\n'), ((1133, 1154), 'numpy.random.randn', 'npr.randn', (['data_count'], {}), '(data_count)\n', (1142, 1154), True, 'import numpy.random as npr\n'), ((413, 431), 'numpy.eye', 'np.eye', (['data_count'], {}), '(data_count)\n', (419, 431), True, 'import numpy as np\n')] |
import numpy as np
from environment import Environment
import sys
def get_epsilon_greedy_policy(Q, epsilon, num_actions):
def policyFunction(observation):
A = np.ones(num_actions, dtype=float) * epsilon / num_actions
best_action = np.argmax(Q[observation])
A[best_action] += (1.0 - epsilon)
return A
return policyFunction
def q_learning(env, num_episodes, max_episode_length, learning_rate, discount_factor, epsilon):
Q = np.zeros((env.num_states, env.num_actions))
policy = get_epsilon_greedy_policy(Q, epsilon, env.num_actions)
for _ in range(0, num_episodes):
state = env.reset()
for _ in range(0, max_episode_length):
action_probs = policy(state)
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, is_terminal = env.step(action)
best_next_action = np.argmax(Q[next_state])
td_target = reward + discount_factor * Q[next_state][best_next_action]
Q[state][action] += learning_rate * (td_target - Q[state][action])
if is_terminal == 1:
break
state = env.curr_state
V = np.zeros(env.num_states)
for s in range(0, env.num_states):
V[s] = np.max(Q[s])
policy = np.zeros((env.num_states, env.num_actions))
for s in range(0, env.num_states):
best_action = np.argmax(Q[s])
policy[s][best_action] = 1.0
return policy, V, Q
if __name__ == "__main__":
maze_input = sys.argv[1]
value_file = sys.argv[2]
q_value_file = sys.argv[3]
policy_file = sys.argv[4]
num_episodes = int(sys.argv[5])
max_episode_length = int(sys.argv[6])
learning_rate = float(sys.argv[7])
discount_factor = float(sys.argv[8])
epsilon = float(sys.argv[9])
env = Environment(maze_input)
policy, V, Q = q_learning(env, num_episodes, max_episode_length, learning_rate, discount_factor, epsilon)
new_policy = np.array(np.argmax(policy, axis=1), np.float32)
value_string = ""
policy_string = ""
Q_string = ""
for k, v in env.state_graph.items():
k = int(k)
value_string += str(v[0]) + " " + str(v[1]) + " " + str(V[k]) + "\n"
policy_string += str(v[0]) + " " + str(v[1]) + " " + str(new_policy[k]) + "\n"
for a in range(0, env.num_actions):
Q_string += str(v[0]) + " " + str(v[1]) + " " + str(a) + " " + str(Q[k][a]) + "\n"
value_string = value_string.strip()
policy_string = policy_string.strip()
Q_string = Q_string.strip()
with open(value_file, 'w') as outfile:
outfile.writelines(value_string)
with open(policy_file, 'w') as outfile:
outfile.writelines(policy_string)
with open(q_value_file, 'w') as outfile:
outfile.writelines(Q_string) | [
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"environment.Environment"
] | [((473, 516), 'numpy.zeros', 'np.zeros', (['(env.num_states, env.num_actions)'], {}), '((env.num_states, env.num_actions))\n', (481, 516), True, 'import numpy as np\n'), ((1208, 1232), 'numpy.zeros', 'np.zeros', (['env.num_states'], {}), '(env.num_states)\n', (1216, 1232), True, 'import numpy as np\n'), ((1314, 1357), 'numpy.zeros', 'np.zeros', (['(env.num_states, env.num_actions)'], {}), '((env.num_states, env.num_actions))\n', (1322, 1357), True, 'import numpy as np\n'), ((1846, 1869), 'environment.Environment', 'Environment', (['maze_input'], {}), '(maze_input)\n', (1857, 1869), False, 'from environment import Environment\n'), ((257, 282), 'numpy.argmax', 'np.argmax', (['Q[observation]'], {}), '(Q[observation])\n', (266, 282), True, 'import numpy as np\n'), ((1287, 1299), 'numpy.max', 'np.max', (['Q[s]'], {}), '(Q[s])\n', (1293, 1299), True, 'import numpy as np\n'), ((1419, 1434), 'numpy.argmax', 'np.argmax', (['Q[s]'], {}), '(Q[s])\n', (1428, 1434), True, 'import numpy as np\n'), ((2007, 2032), 'numpy.argmax', 'np.argmax', (['policy'], {'axis': '(1)'}), '(policy, axis=1)\n', (2016, 2032), True, 'import numpy as np\n'), ((920, 944), 'numpy.argmax', 'np.argmax', (['Q[next_state]'], {}), '(Q[next_state])\n', (929, 944), True, 'import numpy as np\n'), ((177, 210), 'numpy.ones', 'np.ones', (['num_actions'], {'dtype': 'float'}), '(num_actions, dtype=float)\n', (184, 210), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
data = np.load("./cifar-10-test-data.npz")
labels = data.f.labels
data = data.f.data
w = tf.io.TFRecordWriter("./cifar-10-test-data.tfrecords")
for i in range(10000):
example = tf.train.Example(
features=tf.train.Features(
feature={
"data": tf.train.Feature(bytes_list=tf.train.BytesList(value=[data[i].tobytes()])),
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[labels[i]])),
}
)
)
w.write(example.SerializeToString())
w.close()
def map_func(example):
feature_map = {
'data': tf.FixedLenFeature((), tf.string),
'label': tf.FixedLenFeature((), tf.int64)
}
parsed_example = tf.parse_single_example(example, features=feature_map)
data = tf.decode_raw(parsed_example["data"], out_type=tf.uint8)
data = tf.reshape(data, [32, 32, 3])
label = parsed_example["label"]
return data, label
| [
"numpy.load",
"tensorflow.train.Int64List",
"tensorflow.reshape",
"tensorflow.decode_raw",
"tensorflow.parse_single_example",
"tensorflow.FixedLenFeature",
"tensorflow.io.TFRecordWriter"
] | [((52, 87), 'numpy.load', 'np.load', (['"""./cifar-10-test-data.npz"""'], {}), "('./cifar-10-test-data.npz')\n", (59, 87), True, 'import numpy as np\n'), ((136, 190), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['"""./cifar-10-test-data.tfrecords"""'], {}), "('./cifar-10-test-data.tfrecords')\n", (156, 190), True, 'import tensorflow as tf\n'), ((752, 806), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example'], {'features': 'feature_map'}), '(example, features=feature_map)\n', (775, 806), True, 'import tensorflow as tf\n'), ((818, 874), 'tensorflow.decode_raw', 'tf.decode_raw', (["parsed_example['data']"], {'out_type': 'tf.uint8'}), "(parsed_example['data'], out_type=tf.uint8)\n", (831, 874), True, 'import tensorflow as tf\n'), ((886, 915), 'tensorflow.reshape', 'tf.reshape', (['data', '[32, 32, 3]'], {}), '(data, [32, 32, 3])\n', (896, 915), True, 'import tensorflow as tf\n'), ((640, 673), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {}), '((), tf.string)\n', (658, 673), True, 'import tensorflow as tf\n'), ((692, 724), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64'], {}), '((), tf.int64)\n', (710, 724), True, 'import tensorflow as tf\n'), ((457, 494), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[labels[i]]'}), '(value=[labels[i]])\n', (475, 494), True, 'import tensorflow as tf\n')] |
import sys
import petsc4py
petsc4py.init(sys.argv)
# from scipy.io import savemat, loadmat
# from src.ref_solution import *
# import warnings
# from memory_profiler import profile
# from time import time
import pickle
import numpy as np
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic, StokesFlowObj
from petsc4py import PETSc
from src.geo import *
from src.myio import *
from src.objComposite import *
from src.myvtk import *
from src.StokesFlowMethod import *
from src.stokesletsInPipe import *
def print_case_info(**problem_kwargs):
fileHandle = problem_kwargs['fileHandle']
print_solver_info(**problem_kwargs)
print_sphere_info(fileHandle, **problem_kwargs)
return True
def get_problem_kwargs(**main_kwargs):
problem_kwargs = get_solver_kwargs()
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'try_dual')
OptDB.setValue('f', fileHandle)
problem_kwargs['fileHandle'] = fileHandle
kwargs_list = (main_kwargs, get_vtk_tetra_kwargs(), get_sphere_kwargs(),)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
return problem_kwargs
def main_fun(**main_kwargs):
OptDB = PETSc.Options()
# main_kwargs['matrix_method'] = 'pf_dualPotential'
main_kwargs['matrix_method'] = 'pf'
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
print_case_info(**problem_kwargs)
GreenFunThreshold = 50
# place a force in the tunnel to solve boundary condition
b = OptDB.getReal('b', 0.1)
stokeslets_post = np.array((b, 0, 0)).reshape(1, 3)
stokeslets_f = np.array((1, 0, 0))
stokeslets_f_petsc = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
stokeslets_f_petsc.setSizes(3)
stokeslets_f_petsc.setFromOptions()
stokeslets_f_petsc.setUp()
stokeslets_f_petsc[:] = stokeslets_f[:]
stokeslets_f_petsc.assemble()
img_stokeslets_post = np.array((2 - b, 0, 0)).reshape(1, 3)
img_stokeslets_f = np.array((0, 0, 0))
img_stokeslets_f_petsc = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
img_stokeslets_f_petsc.setSizes(3)
img_stokeslets_f_petsc.setFromOptions()
img_stokeslets_f_petsc.setUp()
img_stokeslets_f_petsc[:] = img_stokeslets_f[:]
img_stokeslets_f_petsc.assemble()
# Tunnel geo
nt = OptDB.getReal('nt', 3)
tfct = OptDB.getReal('tfct', 1)
dth = 2 * np.pi / nt
tunnel_length = 2
tunnel_u_geo = tunnel_geo() # pf, force geo
if 'dualPotential' in matrix_method:
tunnel_u_geo.set_dof(4)
epsilon = OptDB.getReal('et', 1)
tunnel_f_geo = tunnel_u_geo.create_deltatheta(dth=dth, radius=1, length=tunnel_length, epsilon=epsilon,
with_cover=2, factor=tfct)
# 1). offset stokeslets velocity
m0_petsc = light_stokeslets_matrix_3d_petsc(tunnel_u_geo.get_nodes(), stokeslets_post)
u0_petsc = m0_petsc.createVecLeft()
m0_petsc.mult(stokeslets_f_petsc, u0_petsc)
scatter, temp = PETSc.Scatter().toAll(u0_petsc)
scatter.scatterBegin(u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
scatter.scatterEnd(u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
u0 = temp.getArray()
u0_petsc.destroy()
img_m0_petsc = light_stokeslets_matrix_3d_petsc(tunnel_u_geo.get_nodes(), img_stokeslets_post)
img_u0_petsc = img_m0_petsc.createVecLeft()
img_m0_petsc.mult(img_stokeslets_f_petsc, img_u0_petsc)
scatter, temp = PETSc.Scatter().toAll(img_u0_petsc)
scatter.scatterBegin(img_u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
scatter.scatterEnd(img_u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
img_u0 = temp.getArray()
img_u0_petsc.destroy()
# 2). stokeslets in pipe velocity
greenFun = detail(threshold=GreenFunThreshold, b=np.sqrt(np.sum(stokeslets_post ** 2)))
greenFun.solve_prepare()
u1, u2, u3 = greenFun.solve_uxyz(tunnel_u_geo.get_nodes())
tunnel_u_geo.set_velocity((u1 * stokeslets_f[0] + u2 * stokeslets_f[1] + u3 * stokeslets_f[2]).flatten() - u0 - img_u0)
# tunnel_u_geo.set_velocity(u0 + img_u0)
obj_tunnel = obj_dic[matrix_method]()
obj_tunnel.set_data(tunnel_f_geo, tunnel_u_geo, name='tunnel')
problem = problem_dic[matrix_method](**problem_kwargs)
problem.add_obj(obj_tunnel)
problem.show_velocity(length_factor=1, show_nodes=False)
problem.print_info()
problem.create_matrix()
problem.solve()
tunnel_geo_check = tunnel_geo() # pf, force geo
dth = 2 * np.pi / 30
tunnel_geo_check.create_deltatheta(dth=dth, radius=1, length=tunnel_length, epsilon=0, with_cover=1)
# 1). offset stokeslets velocity
m0_petsc = light_stokeslets_matrix_3d_petsc(tunnel_geo_check.get_nodes(), stokeslets_post)
u0_petsc = m0_petsc.createVecLeft()
m0_petsc.mult(stokeslets_f_petsc, u0_petsc)
scatter, temp = PETSc.Scatter().toAll(u0_petsc)
scatter.scatterBegin(u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
scatter.scatterEnd(u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
u0 = temp.getArray()
u0_petsc.destroy()
img_m0_petsc = light_stokeslets_matrix_3d_petsc(tunnel_geo_check.get_nodes(), img_stokeslets_post)
img_u0_petsc = img_m0_petsc.createVecLeft()
img_m0_petsc.mult(img_stokeslets_f_petsc, img_u0_petsc)
scatter, temp = PETSc.Scatter().toAll(img_u0_petsc)
scatter.scatterBegin(img_u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
scatter.scatterEnd(img_u0_petsc, temp, False, PETSc.Scatter.Mode.FORWARD)
img_u0 = temp.getArray()
img_u0_petsc.destroy()
# 2). stokeslets in pipe velocity
greenFun = detail(threshold=GreenFunThreshold, b=np.sqrt(np.sum(stokeslets_post ** 2)))
greenFun.solve_prepare()
u1, u2, u3 = greenFun.solve_uxyz(tunnel_geo_check.get_nodes())
tunnel_geo_check.set_velocity((u1 * stokeslets_f[0] + u2 * stokeslets_f[1] + u3 * stokeslets_f[2]).flatten() - u0 - img_u0)
obj_check = obj_dic[matrix_method]()
obj_check.set_data(tunnel_geo_check, tunnel_geo_check, name='full')
tunnel_err = problem.vtk_check(fileHandle + '_Check_tunnel', obj_check)
PETSc.Sys.Print('velocity error of tunnel (total, x, y, z): ', next(tunnel_err))
return True
if __name__ == '__main__':
main_fun()
| [
"numpy.sum",
"petsc4py.init",
"petsc4py.PETSc.Vec",
"petsc4py.PETSc.Scatter",
"petsc4py.PETSc.Options",
"numpy.array"
] | [((28, 51), 'petsc4py.init', 'petsc4py.init', (['sys.argv'], {}), '(sys.argv)\n', (41, 51), False, 'import petsc4py\n'), ((823, 838), 'petsc4py.PETSc.Options', 'PETSc.Options', ([], {}), '()\n', (836, 838), False, 'from petsc4py import PETSc\n'), ((1229, 1244), 'petsc4py.PETSc.Options', 'PETSc.Options', ([], {}), '()\n', (1242, 1244), False, 'from petsc4py import PETSc\n'), ((1729, 1748), 'numpy.array', 'np.array', (['(1, 0, 0)'], {}), '((1, 0, 0))\n', (1737, 1748), True, 'import numpy as np\n'), ((2087, 2106), 'numpy.array', 'np.array', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (2095, 2106), True, 'import numpy as np\n'), ((1676, 1695), 'numpy.array', 'np.array', (['(b, 0, 0)'], {}), '((b, 0, 0))\n', (1684, 1695), True, 'import numpy as np\n'), ((1774, 1785), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ([], {}), '()\n', (1783, 1785), False, 'from petsc4py import PETSc\n'), ((2026, 2049), 'numpy.array', 'np.array', (['(2 - b, 0, 0)'], {}), '((2 - b, 0, 0))\n', (2034, 2049), True, 'import numpy as np\n'), ((2136, 2147), 'petsc4py.PETSc.Vec', 'PETSc.Vec', ([], {}), '()\n', (2145, 2147), False, 'from petsc4py import PETSc\n'), ((3099, 3114), 'petsc4py.PETSc.Scatter', 'PETSc.Scatter', ([], {}), '()\n', (3112, 3114), False, 'from petsc4py import PETSc\n'), ((3556, 3571), 'petsc4py.PETSc.Scatter', 'PETSc.Scatter', ([], {}), '()\n', (3569, 3571), False, 'from petsc4py import PETSc\n'), ((4956, 4971), 'petsc4py.PETSc.Scatter', 'PETSc.Scatter', ([], {}), '()\n', (4969, 4971), False, 'from petsc4py import PETSc\n'), ((5417, 5432), 'petsc4py.PETSc.Scatter', 'PETSc.Scatter', ([], {}), '()\n', (5430, 5432), False, 'from petsc4py import PETSc\n'), ((3905, 3933), 'numpy.sum', 'np.sum', (['(stokeslets_post ** 2)'], {}), '(stokeslets_post ** 2)\n', (3911, 3933), True, 'import numpy as np\n'), ((5766, 5794), 'numpy.sum', 'np.sum', (['(stokeslets_post ** 2)'], {}), '(stokeslets_post ** 2)\n', (5772, 5794), True, 'import numpy as np\n')] |
import os
import numpy as np
import basicIO as bio
import random
def writeRTFile(filename, R, T, S=None):
with open(filename, 'w') as fp:
fp.write('%f %f %f %f %f %f %f %f %f\n' % (
R[0, 0], R[0, 1], R[0, 2],
R[1, 0], R[1, 1], R[1, 2],
R[2, 0], R[2, 1], R[2, 2]
))
fp.write('%f %f %f\n' % (T[0], T[1], T[2]))
if not (S==None):
fp.write('%f\n' % S)
def isInlier(srcPoint, tarPoint, R, T, thresh=0.01):
flag = False
diffPoint = R.dot(srcPoint) + T - tarPoint
diffDis = np.linalg.norm(diffPoint)
if diffDis < thresh:
flag = True
return flag, diffDis
def sim3AlignRansac(X, Y, sampleDim=0, goalInlierRatio=0.5, sampleRatio=0.3, maxIterations=100, inlierThresh=0.01, randomSeed=None):
bestCnt = 0
bestAlignDis = -1
bestR = np.identity(3)
bestT = np.zeros(3, )
bestS = 1
if not sampleDim==0:
X = X.T
Y = Y.T
if not (X.shape[0]==3 and Y.shape[0]==3):
print('X.shape[0] !=3 || Y.shape[0] != 3\n')
return bestR, bestT, bestS, bestAlignDis
if not (X.shape[1] == Y.shape[1]):
print('unmatched sample numbers of X and Y.\n')
return bestR, bestT, bestS, bestAlignDis
numTotal = X.shape[1]
goalInliers = int(numTotal * goalInlierRatio)
numSamples = int(numTotal * sampleRatio)
if numSamples < 3:
print('numSamples < 3\n')
return bestR, bestT, bestS, bestAlignDis
bestCnt = 0
bestAlignDis = -1
random.seed(randomSeed)
totalIdx = list(range(numTotal))
for i in range(maxIterations):
sampledIdx = random.sample(totalIdx, numSamples)
R, t, s = sim3Align(X[:, sampledIdx], Y[:, sampledIdx])
cnt = 0
alignDisSum = 0
for j in range(numTotal):
inlierFlag, alignDis = isInlier(X[:, j], Y[:, j], s*R, t, thresh=inlierThresh)
if inlierFlag:
cnt += 1
alignDisSum += alignDis
if cnt > bestCnt:
bestCnt = cnt
bestR = R
bestT = t
bestS = s
bestAlignDis = alignDisSum/cnt
if cnt > goalInliers:
break
print('took iterations:', i + 1, 'bestAlignDis:', bestAlignDis, 'bestCnt:', bestCnt)
return bestR, bestT, bestS, bestAlignDis
def sim3Align(X, Y, sampleDim = 0):
if not sampleDim==0:
X = X.T
Y = Y.T
if not (X.shape[0]==3 and Y.shape[0]==3):
raise Exception('X.shape[0] !=3 || Y.shape[0] != 3\n')
if not (X.shape[1] == Y.shape[1]):
raise Exception('unmatched sample numbers of X and Y.\n')
# some basic quantity
numSamples = X.shape[1]
muX = X.sum(axis=1) / numSamples
muY = Y.sum(axis=1) / numSamples
muXTiled = np.tile(muX.reshape(3,1), numSamples)
sigma2X = np.power(X-muXTiled, 2).sum()/numSamples
covXY = np.zeros((3, 3))
for i in range(numSamples):
cov = np.matmul((Y[:, i] - muY).reshape(3, 1), (X[:, i] - muX).reshape(1, 3))
covXY += cov
covXY /= numSamples
# svd
U, D, V = np.linalg.svd(covXY)
r = np.linalg.matrix_rank(covXY)
S = np.identity(3)
if np.linalg.det(covXY)<0: S[2,2] = -1
R = np.matmul(U, np.matmul(S, V))
diagD = np.diag(D)
s = np.trace(np.matmul(diagD, S)) / sigma2X
t = muY - s*R.dot(muX)
return R, t, s
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
def readRT(filename):
floatLines = bio.readFloatLines(filename)
RLine = floatLines[0]
R = np.array([[RLine[0], RLine[1], RLine[2]],
[RLine[3], RLine[4], RLine[5]],
[RLine[6], RLine[7], RLine[8]]])
T = np.array(floatLines[1])
return R, T
def transformPointsList(pointsList, R, T):
transformedPoints = []
for point in pointsList:
newPoint = R.dot(point) + T
transformedPoints.append(newPoint)
return transformedPoints
def readTrajectoryTUM(filename):
lines = []
with open(filename) as fp:
lines = fp.readlines()
trajLines = []
for line in lines:
line = line.strip()
if len(line) < 1:
continue
line_parts = line.split(' ')
traj = []
traj.append(line_parts[0])
for i in range(1, len(line_parts)):
traj.append(float(line_parts[i]))
trajLines.append(traj)
return trajLines
def getMatchedFramePositon(alignTrajList, refTrajList):
# loca traj name List
refTrajNameList = []
for traj in refTrajList:
refTrajNameList.append(traj[0])
# match global traj with local traj
matchedFrameName = []
matchedAlignFramePos = []
matchedRefFramePos = []
for traj in alignTrajList:
name = traj[0]
refIdx = -1
if name in refTrajNameList:
refIdx = refTrajNameList.index(name)
if refIdx >= 0:
matchedFrameName.append(name)
# align pos WC
alignTWC = [traj[1], traj[2], traj[3]]
matchedAlignFramePos.append(alignTWC)
# ref pose WC
refTraj = refTrajList[refIdx]
refTWC = [refTraj[1], refTraj[2], refTraj[3]]
matchedRefFramePos.append(refTWC)
return matchedFrameName, matchedAlignFramePos, matchedRefFramePos
def accuAdjTrans2Global(RList, TList):
numParts = len(RList)
globalRList = []
globalTList = []
for i in range(numParts):
if i == 0:
globalRList.append(RList[i])
globalTList.append(TList[i])
else:
globalT = (globalRList[i-1].dot(TList[i]) + globalTList[i-1]).copy()
globalR = (globalRList[i-1].dot(RList[i])).copy()
globalRList.append(globalR)
globalTList.append(globalT)
return globalRList, globalTList
if __name__ == "__main__":
workDir = "E:/lzx-data/TUM/rgbd_dataset_freiburg1_room"
trajFileName = "CameraTrajectory"
trajFile = os.path.join(workDir, trajFileName+".txt")
trajRotMatFile = os.path.join(workDir, trajFileName + "_rotmat.txt")
| [
"numpy.argmax",
"random.sample",
"numpy.power",
"numpy.zeros",
"numpy.identity",
"numpy.linalg.eigh",
"basicIO.readFloatLines",
"numpy.linalg.svd",
"random.seed",
"numpy.linalg.norm",
"numpy.linalg.matrix_rank",
"numpy.array",
"numpy.linalg.det",
"numpy.diag",
"numpy.matmul",
"os.path.... | [((588, 613), 'numpy.linalg.norm', 'np.linalg.norm', (['diffPoint'], {}), '(diffPoint)\n', (602, 613), True, 'import numpy as np\n'), ((880, 894), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (891, 894), True, 'import numpy as np\n'), ((908, 919), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (916, 919), True, 'import numpy as np\n'), ((1585, 1608), 'random.seed', 'random.seed', (['randomSeed'], {}), '(randomSeed)\n', (1596, 1608), False, 'import random\n'), ((3017, 3033), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3025, 3033), True, 'import numpy as np\n'), ((3233, 3253), 'numpy.linalg.svd', 'np.linalg.svd', (['covXY'], {}), '(covXY)\n', (3246, 3253), True, 'import numpy as np\n'), ((3263, 3291), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['covXY'], {}), '(covXY)\n', (3284, 3291), True, 'import numpy as np\n'), ((3301, 3315), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (3312, 3315), True, 'import numpy as np\n'), ((3416, 3426), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (3423, 3426), True, 'import numpy as np\n'), ((3854, 3871), 'numpy.linalg.eigh', 'np.linalg.eigh', (['K'], {}), '(K)\n', (3868, 3871), True, 'import numpy as np\n'), ((4022, 4464), 'numpy.array', 'np.array', (['[[1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[1] * qvec[2] - 2 * qvec\n [0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], [2 *\n qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1] ** 2 - 2 * \n qvec[3] ** 2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], [2 * qvec\n [3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 * qvec\n [0] * qvec[1], 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]]'], {}), '([[1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2, 2 * qvec[1] * qvec[2] -\n 2 * qvec[0] * qvec[3], 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 1 - 2 * qvec[1] ** 2 - \n 2 * qvec[3] ** 2, 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], [2 *\n qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 2 * qvec[2] * qvec[3] + 2 *\n qvec[0] * qvec[1], 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]])\n', (4030, 4464), True, 'import numpy as np\n'), ((4563, 4591), 'basicIO.readFloatLines', 'bio.readFloatLines', (['filename'], {}), '(filename)\n', (4581, 4591), True, 'import basicIO as bio\n'), ((4632, 4743), 'numpy.array', 'np.array', (['[[RLine[0], RLine[1], RLine[2]], [RLine[3], RLine[4], RLine[5]], [RLine[6],\n RLine[7], RLine[8]]]'], {}), '([[RLine[0], RLine[1], RLine[2]], [RLine[3], RLine[4], RLine[5]], [\n RLine[6], RLine[7], RLine[8]]])\n', (4640, 4743), True, 'import numpy as np\n'), ((4788, 4811), 'numpy.array', 'np.array', (['floatLines[1]'], {}), '(floatLines[1])\n', (4796, 4811), True, 'import numpy as np\n'), ((7155, 7199), 'os.path.join', 'os.path.join', (['workDir', "(trajFileName + '.txt')"], {}), "(workDir, trajFileName + '.txt')\n", (7167, 7199), False, 'import os\n'), ((7220, 7271), 'os.path.join', 'os.path.join', (['workDir', "(trajFileName + '_rotmat.txt')"], {}), "(workDir, trajFileName + '_rotmat.txt')\n", (7232, 7271), False, 'import os\n'), ((1707, 1742), 'random.sample', 'random.sample', (['totalIdx', 'numSamples'], {}), '(totalIdx, numSamples)\n', (1720, 1742), False, 'import random\n'), ((3326, 3346), 'numpy.linalg.det', 'np.linalg.det', (['covXY'], {}), '(covXY)\n', (3339, 3346), True, 'import numpy as np\n'), ((3386, 3401), 'numpy.matmul', 'np.matmul', (['S', 'V'], {}), '(S, V)\n', (3395, 3401), True, 'import numpy as np\n'), ((3616, 3795), 'numpy.array', 'np.array', (['[[Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [Rzx + Rxz,\n Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx +\n Ryy + Rzz]]'], {}), '([[Rxx - Ryy - Rzz, 0, 0, 0], [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], [\n Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], [Ryz - Rzy, Rzx - Rxz, Rxy -\n Ryx, Rxx + Ryy + Rzz]])\n', (3624, 3795), True, 'import numpy as np\n'), ((3445, 3464), 'numpy.matmul', 'np.matmul', (['diagD', 'S'], {}), '(diagD, S)\n', (3454, 3464), True, 'import numpy as np\n'), ((3906, 3924), 'numpy.argmax', 'np.argmax', (['eigvals'], {}), '(eigvals)\n', (3915, 3924), True, 'import numpy as np\n'), ((2963, 2988), 'numpy.power', 'np.power', (['(X - muXTiled)', '(2)'], {}), '(X - muXTiled, 2)\n', (2971, 2988), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Date: October 1, 2020
Email: <EMAIL>
Scope: App for Tensorflow Image classifier
"""
import numpy as np
from loguru import logger
from PIL import Image
from io import BytesIO
import tensorflow as tf
from tensorflow.keras.applications.imagenet_utils import decode_predictions
class DoggoModel(object):
def __init__(self):
self._load_local_model()
def _load_local_model(self):
self.model = tf.keras.applications.MobileNetV2(weights="imagenet")
def _pre_process(self, image):
logger.debug('Pre-processing image')
# Resize our image
image = np.asarray(image.resize((224, 224)))[..., :3]
image = np.expand_dims(image, 0)
image = image / 127.5 - 1.0
return image
def _predict(self, image):
logger.debug('Predicting...')
prediction = self.model.predict(image)
predictions = decode_predictions(prediction, 2)[0]
return predictions
def _post_process(self, predictions) -> list:
logger.debug('Post-processing')
response = []
for i, res in enumerate(predictions):
resp = {}
resp['class'] = res[1]
resp['confidence'] = f"{res[2]*100:0.2f} %"
response.append(resp)
return response
def predict(self, payload):
if payload is None:
raise ValueError(NO_VALID_PAYLOAD.format(payload))
pre_processed_payload = self._pre_process(payload)
prediction = self._predict(pre_processed_payload)
logger.info(prediction)
post_processed_result = self._post_process(prediction)
return post_processed_result
| [
"tensorflow.keras.applications.imagenet_utils.decode_predictions",
"numpy.expand_dims",
"loguru.logger.info",
"loguru.logger.debug",
"tensorflow.keras.applications.MobileNetV2"
] | [((438, 491), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (471, 491), True, 'import tensorflow as tf\n'), ((540, 576), 'loguru.logger.debug', 'logger.debug', (['"""Pre-processing image"""'], {}), "('Pre-processing image')\n", (552, 576), False, 'from loguru import logger\n'), ((682, 706), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (696, 706), True, 'import numpy as np\n'), ((804, 833), 'loguru.logger.debug', 'logger.debug', (['"""Predicting..."""'], {}), "('Predicting...')\n", (816, 833), False, 'from loguru import logger\n'), ((1026, 1057), 'loguru.logger.debug', 'logger.debug', (['"""Post-processing"""'], {}), "('Post-processing')\n", (1038, 1057), False, 'from loguru import logger\n'), ((1546, 1569), 'loguru.logger.info', 'logger.info', (['prediction'], {}), '(prediction)\n', (1557, 1569), False, 'from loguru import logger\n'), ((903, 936), 'tensorflow.keras.applications.imagenet_utils.decode_predictions', 'decode_predictions', (['prediction', '(2)'], {}), '(prediction, 2)\n', (921, 936), False, 'from tensorflow.keras.applications.imagenet_utils import decode_predictions\n')] |
from typing import List
import random
import pyrosetta
import rosetta
import yaml
from pyrosetta.rosetta.core.scoring.dssp import Dssp
from pyrosetta.rosetta.core.fragment import *
from pyrosetta import *
from pyrosetta.rosetta.protocols import *
from scipy.spatial import distance
import Levenshtein as levenshtein
import numpy as np
import pandas as pd
import math
from jmetal.core.problem import FloatProblem
from jmetal.core.solution import FloatSolution
from pyrosetta.rosetta.protocols.simple_moves import ClassicFragmentMover
def get_num_side_chain_angles(residue):
if (residue == 'GLY' or residue == 'G') or (residue == 'ALA' or residue == 'A'):
return 0
elif (residue == 'SER' or residue == 'S') or (residue == 'CYS' or residue == 'C') or (
residue == 'PRO' or residue == 'P') or (residue == 'THR' or residue == 'T') or (
residue == 'VAL' or residue == 'V'):
return 1
elif (residue == 'ILE' or residue == 'I') or (residue == 'LEU' or residue == 'L') or (
residue == 'ASP' or residue == 'D') or (residue == 'ASN' or residue == 'N') or (
residue == 'PHE' or residue == 'F') or (residue == 'TYR' or residue == 'Y') or (
residue == 'HIS' or residue == 'H') or (residue == 'TRP' or residue == 'W'):
return 2
elif (residue == 'MET' or residue == 'M') or (residue == 'GLU' or residue == 'E') or (
residue == 'GLN' or residue == 'Q'):
return 3
elif (residue == 'LYS' or residue == 'K') or (residue == 'ARG' or residue == 'R'):
return 4
else:
return 0
class ProteinStructurePrediction(FloatProblem):
def __init__(self, protein_config_file: str = None, energy_type: str = 'centroid', use_contact_maps: bool = False):
""" jMetal common structure """
super(ProteinStructurePrediction, self).__init__()
self.number_of_objectives = 1
self.number_of_constraints = 0
self.obj_directions = [self.MINIMIZE]
self.obj_labels = ['Rosetta Energy Unit']
FloatSolution.lower_bound = self.lower_bound
FloatSolution.upper_bound = self.upper_bound
""" Specific configurations """
self.rosetta_energy_type = energy_type
self.pose = None
self.temp_pose = None
self.scorefxn = None
self.fasta = None
self.secondary_structure = None
self.protein_name = None
self.contact_map_file = None
self.contact_map_df = None
self.cm_l2 = None
self.mc_l2 = {}
self.aa_pair_l2 = []
self.read_parameters(protein_config_file)
self.use_contact_maps = use_contact_maps
if self.use_contact_maps:
self.read_contact_map()
self.start_rosetta_energy_function()
self.lower_bound = [-180 for _ in range(self.number_of_variables)]
self.upper_bound = [180 for _ in range(self.number_of_variables)]
self.centroid_switch = pyrosetta.SwitchResidueTypeSetMover("centroid")
self.fa_switch = pyrosetta.SwitchResidueTypeSetMover("fa_standard")
def read_parameters(self, protein_config_file: str):
if protein_config_file is None:
raise FileNotFoundError('Protein information can not be None')
with open(protein_config_file, 'r') as stream:
try:
config = yaml.load(stream, Loader=yaml.FullLoader)
self.protein_name = config['protein_name']
self.fasta = config['fasta']
self.secondary_structure = config['secondary_structure']
self.contact_map_file = config['contact_map']
except yaml.YAMLError as error:
raise error
def read_contact_map(self):
self.contact_map_df = pd.read_csv(self.contact_map_file, sep=' ', usecols=[0, 1, 2], names=['i', 'j', 'prob'])
l_size = int(len(self.contact_map_df) / 2)
self.cm_l2 = self.contact_map_df.iloc[:l_size]
self.cm_l2 = self.cm_l2.to_numpy()
def calculate_SASA(self, pose):
self.temp_pose.assign(pose)
if self.rosetta_energy_type == "centroid":
self.fa_switch.apply(self.temp_pose)
sasa_score = rosetta.core.scoring.calc_total_sasa(self.temp_pose, 1.5)
else:
sasa_score = rosetta.core.scoring.calc_total_sasa(pose, 1.5)
return sasa_score
def evaluate(self, solution: FloatSolution) -> FloatSolution:
if self.rosetta_energy_type == 'centroid':
self.evaluate_by_centroid(solution)
elif self.rosetta_energy_type == 'ref2015':
self.evaluate_by_full_atom(solution)
return solution
def get_name(self) -> str:
return 'PSP' + self.protein_name
def start_rosetta_energy_function(self):
pyrosetta.init()
if self.rosetta_energy_type == 'centroid':
self.pose = pyrosetta.pose_from_sequence(self.fasta, 'centroid')
self.temp_pose = pyrosetta.pose_from_sequence(self.fasta, 'centroid')
self.scorefxn = pyrosetta.create_score_function('score3')
self.number_of_variables = len(self.fasta) * 2
return 'centroid'
elif self.rosetta_energy_type == 'ref2015':
self.pose = pyrosetta.pose_from_sequence(self.fasta, 'fa_standard')
self.scorefxn = pyrosetta.create_score_function('ref2015')
self.number_of_variables = 0
for residue in self.fasta:
self.number_of_variables += get_num_side_chain_angles(residue) + 2
return 'ref2015'
else:
raise pyrosetta.PyRosettaException
def evaluate_by_centroid(self, solution: FloatSolution) -> FloatSolution:
pose = self.create_pose_centroid(solution.variables)
secondary_structure_score = self.get_ss_reinforcement(pose)
contact_score = 0.0
if self.use_contact_maps:
contact_score = self.get_CM_score(pose)
rosetta_score = float(format(self.scorefxn(pose), '.4f'))
final_score = rosetta_score + secondary_structure_score + contact_score # + sasa_score
solution.objectives[0] = final_score
solution.attributes["ss2_score"] = secondary_structure_score
solution.attributes["contact"] = contact_score
return solution
def create_pose_centroid(self, variables: []) -> object:
pose = self.pose
index = 0
for i, res in enumerate(self.fasta):
pose.set_phi(i + 1, variables[index])
pose.set_psi(i + 1, variables[index + 1])
pose.set_omega(i + 1, 180)
index += 2
return pose
def evaluate_by_full_atom(self, solution: FloatSolution) -> FloatSolution:
pose = self.create_pose_full_atom(solution.variables)
secondary_structure_score, secondary_structure_description = self.get_ss_reinforcement(pose)
score = float(format(self.scorefxn(pose), '.4f')) + secondary_structure_score
score_sasa = self.calculate_SASA(pose)
contact_score = self.weights_CM(pose)
solution.objectives[0] = score
solution.attributes["ss2"] = secondary_structure_description
solution.attributes["ss2_score"] = secondary_structure_score
solution.attributes["sasa"] = score_sasa
solution.attributes["contact"] = contact_score
return solution
def create_pose_full_atom(self, variable: []):
pose = self.pose
index = 0
for i, res in enumerate(self.fasta):
size = 2 + get_num_side_chain_angles(res)
try:
aux = variable[index:index + size]
pose.set_phi(i + 1, aux[0])
pose.set_psi(i + 1, aux[1])
pose.set_omega(i + 1, 180)
except IndexError:
print(res + ' : ' + str(size) + ' : ' + str(index))
for c in range(0, len(aux[2:])):
pose.set_chi(c + 1, i + 1, aux[2 + c])
index += size
return pose
'''
def get_ss_reinforcement(self, pose):
secondary_structure = Dssp(pose)
secondary_structure.insert_ss_into_pose(pose)
ss2 = pose.secstruct()
ss2_diff = levenshtein.distance(ss2, self.secondary_structure)
total_possible = len(ss2) * 10
total_diff = ss2_diff * -10
ss2_score = total_possible + total_diff
return ss2_score, ss2
'''
def get_ss_reinforcement(self, pose):
reinforcement = 0.0
secondary_structure = Dssp(pose)
secondary_structure.insert_ss_into_pose(pose)
ss2 = pose.secstruct()
for i in range(0, len(self.secondary_structure)):
if self.secondary_structure[i] in ['H', 'G', 'I']:
if ss2[i] == 'H':
reinforcement += -10.0
else:
reinforcement += 10.0
elif self.secondary_structure[i] in ['E', 'B', 'b']:
if ss2[i] == 'E':
reinforcement += -10.0
else:
reinforcement += 10.0
elif self.secondary_structure[i] in ['C', 'T'] and ss2[i] != 'L':
reinforcement += 10.0
return reinforcement
def get_CM_score(self, pose):
d_clash = 3.8
d_con = 8
total = 0
for idx, row in enumerate(self.cm_l2):
i = pose.residue(int(row[0]))
j = pose.residue(int(row[1]))
if 'GLY' in i.name():
x = i.xyz("CA")
else:
x = i.xyz("CB")
if 'GLY' in j.name():
y = j.xyz("CA")
else:
y = j.xyz("CB")
dist = np.linalg.norm(x - y)
if dist <= d_clash:
val = pow(8, float(row[2]))
total += val * (d_clash - dist)
elif d_clash < dist <= d_con:
total += -pow(8, float(row[2]))
else:
total += pow(8, float(row[2])) * math.log(dist - d_con + 1)
return total
def get_custom_information(self):
custom_info = {"protein": self.protein_name, "fasta": self.fasta,
"secondary_structure": self.secondary_structure, "energy_type": self.rosetta_energy_type}
return custom_info
class ProteinStructurePredictionFragment(ProteinStructurePrediction):
def __init__(self, protein_config_file: str = None, energy_type: str = 'centroid'):
self.fragment_3mer_file = None
self.fragment_9mer_file = None
self.fragment_3mer = None
self.fragment_9mer = None
super().__init__(protein_config_file, energy_type)
# self.set_3mer_frags()
# self.set_9mer_frags()
### Fragment Loading ###
self.fragment_3mer = ConstantLengthFragSet(3)
self.fragment_3mer.read_fragment_file(self.fragment_3mer_file)
self.fragment_9mer = ConstantLengthFragSet(9)
self.fragment_9mer.read_fragment_file(self.fragment_9mer_file)
self.movemap = pyrosetta.MoveMap()
self.movemap.set_bb(True)
self.movemap.set_chi(False)
self.movemap.set_bb_true_range(1, len(self.fasta))
self.mover_3mer = ClassicFragmentMover(self.fragment_3mer, self.movemap)
self.mover_9mer = ClassicFragmentMover(self.fragment_9mer, self.movemap)
# Método responsável por perturbar as soluções usando fragmentos do Rosetta.
def generate_diversity(self, solution: FloatSolution) -> FloatSolution:
pose = self.create_pose_centroid(variables=solution.variables)
pose = self.rosetta_fragment_mover(pose=pose, number_of_modifications=random.randint(1, len(self.fasta)))
modified = self.extract_back_bone_angles(pose)
solution.variables = modified
return solution
def read_parameters(self, protein_config_file: str):
if protein_config_file is None:
raise FileNotFoundError('Protein information can not be None')
with open(protein_config_file, 'r') as stream:
try:
config = yaml.load(stream, Loader=yaml.FullLoader)
self.protein_name = config['protein_name']
self.fasta = config['fasta']
self.secondary_structure = config['secondary_structure']
self.contact_map_file = config['contact_map']
self.fragment_3mer_file = config['fragment_3mer']
self.fragment_9mer_file = config['fragment_9mer']
except yaml.YAMLError as error:
raise error
def rosetta_fragment_mover(self, pose: pyrosetta.Pose = None, number_of_modifications: int = 0) -> pyrosetta.Pose:
_r = random.random()
for i in range(number_of_modifications):
if _r <= 0.5:
self.mover_3mer.apply(pose)
else:
self.mover_9mer.apply(pose)
return pose
def extract_back_bone_angles(self, pose) -> List:
angles = []
for i in range(0, len(self.fasta)):
angles.append(pose.phi(i + 1))
angles.append(pose.psi(i + 1))
return angles
def set_9mer_frags(self):
arr = self.fragment_parser(self.fragment_9mer_file)
self.fragment_9mer = self.pandas_fragment_parser(arr, 9)
def set_3mer_frags(self):
arr = self.fragment_parser(self.fragment_3mer_file)
self.fragment_3mer = self.pandas_fragment_parser(arr, 3)
def fragment_parser(self, file_path):
fragment_file = open(file_path)
arr = []
for idx, line in enumerate(fragment_file.readlines()):
strip_line = line.strip()
if len(strip_line) != 93:
continue
arr.append(strip_line.split())
fragment_file.close()
return arr
def pandas_fragment_parser(self, list_of_fragments: list, fragment_size: int) -> pd.DataFrame:
size = int(len(list_of_fragments) / fragment_size)
fragment_groups = np.empty(size, dtype=pd.DataFrame)
arr_idx = 0
df = None
for i in range(0, len(list_of_fragments), fragment_size):
df = pd.DataFrame(list_of_fragments[i:i + fragment_size])
df = df.drop([_ for _ in range(8, len(df.columns))], axis=1)
df.columns = ["PDB_ID", "CHAIN", "RES", "AA", "SS", "PHI", "PSI", "OMEGA"]
fragment_groups[arr_idx] = df.copy(deep=True)
arr_idx += 1
return df
class ProteinStructurePredictionMultiObjective(ProteinStructurePrediction):
def __init__(self, protein_config_file: str = None, energy_type: str = 'centroid'):
super().__init__(protein_config_file, energy_type)
self.number_of_objectives = 2
self.obj_labels = ['<NAME>', 'Bonded-Energy']
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
def evaluate(self, solution: FloatSolution) -> FloatSolution:
if self.rosetta_energy_type == 'centroid':
self.evaluate_by_centroid(solution)
elif self.rosetta_energy_type == 'ref2015':
self.evaluate_by_full_atom(solution)
return solution
def evaluate_by_centroid(self, solution: FloatSolution) -> FloatSolution:
pose = self.create_pose_centroid(solution.variables)
self.scorefxn(pose)
energy_vals = pose.energies().total_energies_array()
vdw = energy_vals[0][0]
cenpack = energy_vals[0][1]
pair = energy_vals[0][2]
env = energy_vals[0][3]
cbeta = energy_vals[0][4]
rg = energy_vals[0][5] * 3
hs_pair = energy_vals[0][6]
ss_pair = energy_vals[0][7]
rsigma = energy_vals[0][8]
sheet = energy_vals[0][9]
other_sum = cenpack + pair + env + cbeta + rg + hs_pair + ss_pair + rsigma + sheet
solution.objectives[0] = vdw
solution.objectives[1] = other_sum + self.get_ss_reinforcement(pose)
return solution
def get_name(self) -> str:
return 'PSP_MO.' + self.protein_name
class AggregatedProteinStructurePredictionProblem(ProteinStructurePrediction):
def __init__(self, protein_config_file: str = None, energy_type: str = 'centroid',
score_weights: List[float] = None):
super().__init__(protein_config_file=protein_config_file, energy_type=energy_type)
self.score_weights = score_weights
def evaluate_by_centroid(self, solution: FloatSolution) -> FloatSolution:
pose = self.create_pose_centroid(solution.variables)
self.scorefxn(pose)
energy_vals = pose.energies().total_energies_array()
vdw = energy_vals[0][0]
cenpack = energy_vals[0][1]
pair = energy_vals[0][2]
env = energy_vals[0][3]
cbeta = energy_vals[0][4]
rg = energy_vals[0][5] * 3
hs_pair = energy_vals[0][6]
ss_pair = energy_vals[0][7]
rsigma = energy_vals[0][8]
sheet = energy_vals[0][9]
other_sum = cenpack + pair + env + cbeta + rg + hs_pair + ss_pair + rsigma + sheet
objective2 = other_sum + self.get_ss_reinforcement(pose)
solution.attributes['obj_0'] = vdw
solution.attributes['obj_1'] = objective2
solution.objectives[0] = self.score_weights[0] * vdw + self.score_weights[1] * objective2
return solution | [
"pyrosetta.init",
"pandas.DataFrame",
"yaml.load",
"pyrosetta.create_score_function",
"pandas.read_csv",
"numpy.empty",
"pyrosetta.SwitchResidueTypeSetMover",
"pyrosetta.MoveMap",
"pyrosetta.rosetta.protocols.simple_moves.ClassicFragmentMover",
"random.random",
"numpy.linalg.norm",
"pyrosetta.... | [((2974, 3021), 'pyrosetta.SwitchResidueTypeSetMover', 'pyrosetta.SwitchResidueTypeSetMover', (['"""centroid"""'], {}), "('centroid')\n", (3009, 3021), False, 'import pyrosetta\n'), ((3047, 3097), 'pyrosetta.SwitchResidueTypeSetMover', 'pyrosetta.SwitchResidueTypeSetMover', (['"""fa_standard"""'], {}), "('fa_standard')\n", (3082, 3097), False, 'import pyrosetta\n'), ((3786, 3878), 'pandas.read_csv', 'pd.read_csv', (['self.contact_map_file'], {'sep': '""" """', 'usecols': '[0, 1, 2]', 'names': "['i', 'j', 'prob']"}), "(self.contact_map_file, sep=' ', usecols=[0, 1, 2], names=['i',\n 'j', 'prob'])\n", (3797, 3878), True, 'import pandas as pd\n'), ((4814, 4830), 'pyrosetta.init', 'pyrosetta.init', ([], {}), '()\n', (4828, 4830), False, 'import pyrosetta\n'), ((8604, 8614), 'pyrosetta.rosetta.core.scoring.dssp.Dssp', 'Dssp', (['pose'], {}), '(pose)\n', (8608, 8614), False, 'from pyrosetta.rosetta.core.scoring.dssp import Dssp\n'), ((11141, 11160), 'pyrosetta.MoveMap', 'pyrosetta.MoveMap', ([], {}), '()\n', (11158, 11160), False, 'import pyrosetta\n'), ((11317, 11371), 'pyrosetta.rosetta.protocols.simple_moves.ClassicFragmentMover', 'ClassicFragmentMover', (['self.fragment_3mer', 'self.movemap'], {}), '(self.fragment_3mer, self.movemap)\n', (11337, 11371), False, 'from pyrosetta.rosetta.protocols.simple_moves import ClassicFragmentMover\n'), ((11398, 11452), 'pyrosetta.rosetta.protocols.simple_moves.ClassicFragmentMover', 'ClassicFragmentMover', (['self.fragment_9mer', 'self.movemap'], {}), '(self.fragment_9mer, self.movemap)\n', (11418, 11452), False, 'from pyrosetta.rosetta.protocols.simple_moves import ClassicFragmentMover\n'), ((12804, 12819), 'random.random', 'random.random', ([], {}), '()\n', (12817, 12819), False, 'import random\n'), ((14108, 14142), 'numpy.empty', 'np.empty', (['size'], {'dtype': 'pd.DataFrame'}), '(size, dtype=pd.DataFrame)\n', (14116, 14142), True, 'import numpy as np\n'), ((4222, 4279), 'rosetta.core.scoring.calc_total_sasa', 'rosetta.core.scoring.calc_total_sasa', (['self.temp_pose', '(1.5)'], {}), '(self.temp_pose, 1.5)\n', (4258, 4279), False, 'import rosetta\n'), ((4319, 4366), 'rosetta.core.scoring.calc_total_sasa', 'rosetta.core.scoring.calc_total_sasa', (['pose', '(1.5)'], {}), '(pose, 1.5)\n', (4355, 4366), False, 'import rosetta\n'), ((4907, 4959), 'pyrosetta.pose_from_sequence', 'pyrosetta.pose_from_sequence', (['self.fasta', '"""centroid"""'], {}), "(self.fasta, 'centroid')\n", (4935, 4959), False, 'import pyrosetta\n'), ((4989, 5041), 'pyrosetta.pose_from_sequence', 'pyrosetta.pose_from_sequence', (['self.fasta', '"""centroid"""'], {}), "(self.fasta, 'centroid')\n", (5017, 5041), False, 'import pyrosetta\n'), ((5070, 5111), 'pyrosetta.create_score_function', 'pyrosetta.create_score_function', (['"""score3"""'], {}), "('score3')\n", (5101, 5111), False, 'import pyrosetta\n'), ((9794, 9815), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (9808, 9815), True, 'import numpy as np\n'), ((14265, 14317), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_fragments[i:i + fragment_size]'], {}), '(list_of_fragments[i:i + fragment_size])\n', (14277, 14317), True, 'import pandas as pd\n'), ((3370, 3411), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (3379, 3411), False, 'import yaml\n'), ((5279, 5334), 'pyrosetta.pose_from_sequence', 'pyrosetta.pose_from_sequence', (['self.fasta', '"""fa_standard"""'], {}), "(self.fasta, 'fa_standard')\n", (5307, 5334), False, 'import pyrosetta\n'), ((5363, 5405), 'pyrosetta.create_score_function', 'pyrosetta.create_score_function', (['"""ref2015"""'], {}), "('ref2015')\n", (5394, 5405), False, 'import pyrosetta\n'), ((12185, 12226), 'yaml.load', 'yaml.load', (['stream'], {'Loader': 'yaml.FullLoader'}), '(stream, Loader=yaml.FullLoader)\n', (12194, 12226), False, 'import yaml\n'), ((10098, 10124), 'math.log', 'math.log', (['(dist - d_con + 1)'], {}), '(dist - d_con + 1)\n', (10106, 10124), False, 'import math\n')] |
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
import seaborn as sns
import torch
import numpy as np
import sklearn
from sklearn.manifold import TSNE
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
# We'll hack a bit with the t-SNE code in sklearn 0.15.2.
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold.t_sne import (_joint_probabilities,
_kl_divergence)
import matplotlib.cm as cm
from matplotlib.lines import Line2D
def visualize_TSNE(source_feat, target_feat, source_label, target_label, path, class_names):
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
num_source = source_feat.size()[0]
num_target = target_feat.size()[1]
X = np.concatenate([source_feat.cpu().numpy(), target_feat.cpu().numpy()])
# y_source = np.zeros((source_feat.size()[0], )) + 2
# y_target = np.ones((target_feat.size()[0], )) * 5
# y = np.concatenate([y_source, y_target])
y = np.concatenate([source_label.numpy(), target_label.numpy()])
digits_proj = TSNE(random_state=1).fit_transform(X)
# We choose a color palette with seaborn.
# palette = np.array(sns.color_palette("Paired"))
# palette = plt.get_cmap('Set3')
class_names = np.array(class_names)
# We create a scatter plot.
f = plt.figure(figsize=(6, 6))
ax = plt.subplot(aspect='equal')
# sc = ax.scatter(digits_proj[:num_source, 0], digits_proj[:num_source, 1], lw=0, s=40, marker='o',
# c=palette[y[:num_source].astype(np.int)], alpha=0.5)
# sc1 = ax.scatter(digits_proj[num_source:, 0], digits_proj[num_source:, 1], lw=0, s=40, marker='^',
# c=palette[y[num_source:].astype(np.int)], alpha=0.5)
index = y[:num_source].astype(np.int)
c = ax.scatter(digits_proj[:num_source, 0], digits_proj[:num_source, 1], lw=0, s=40, marker='o',
c=y[:num_source], cmap='Set3', alpha=0.8)
sc1 = ax.scatter(digits_proj[num_source:, 0], digits_proj[num_source:, 1], lw=0, s=40, marker='^',
c=y[num_source:], cmap='Set3', alpha=0.8)
# customized = []
# for i in range(len(class_names)):
# line = Line2D([0],[0],color=cm.Set3(i), label=class_names[i])
# customized.append(line)
#
# ax.legend(customized)
# plt.xlim(-25, 25)
# plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
plt.savefig(path)
txts = []
# We add the labels for each digit.
# txts = ["back_pack", "bike", "bike_helmet", "bookcase", "bottle",
# "calculator", "desk_chair","desk_lamp","desktop_computer","file_cabinet","unk"]
# for i in range(len(txts)):
# # Position of each label.
# xtext, ytext = np.median(digits_proj[y == i, :], axis=0)
# txt = ax.text(xtext, ytext, txts[i], fontsize=12)
# txt.set_path_effects([
# PathEffects.Stroke(linewidth=5, foreground="w"),
# PathEffects.Normal()])
# txts.append(txt)
| [
"seaborn.set_style",
"matplotlib.pyplot.subplot",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.figure",
"numpy.array",
"seaborn.set_palette",
"seaborn.set_context",
"matplotlib.pyplot.savefig"
] | [((650, 675), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (663, 675), True, 'import seaborn as sns\n'), ((680, 704), 'seaborn.set_palette', 'sns.set_palette', (['"""muted"""'], {}), "('muted')\n", (695, 704), True, 'import seaborn as sns\n'), ((709, 781), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2.5}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2.5})\n", (724, 781), True, 'import seaborn as sns\n'), ((1382, 1403), 'numpy.array', 'np.array', (['class_names'], {}), '(class_names)\n', (1390, 1403), True, 'import numpy as np\n'), ((1445, 1471), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1455, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1508), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'aspect': '"""equal"""'}), "(aspect='equal')\n", (1492, 1508), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2549), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2543, 2549), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1208), 'sklearn.manifold.TSNE', 'TSNE', ([], {'random_state': '(1)'}), '(random_state=1)\n', (1192, 1208), False, 'from sklearn.manifold import TSNE\n')] |
#!/usr/bin/env python3
"""
Multi-compartmental OLM cell example
File: olm-example.py
Copyright 2021 NeuroML contributors
Authors: <NAME>, <NAME>
"""
from neuroml import (NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location)
from CellBuilder import (create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id)
from pyneuroml import pynml
from pyneuroml.lems import LEMSSimulation
import numpy as np
def main():
"""Main function
Include the NeuroML model into a LEMS simulation file, run it, plot some
data.
"""
# Simulation bits
sim_id = "olm_example_sim"
simulation = LEMSSimulation(sim_id=sim_id, duration=600, dt=0.01, simulation_seed=123)
# Include the NeuroML model file
simulation.include_neuroml2_file(create_olm_network())
# Assign target for the simulation
simulation.assign_simulation_target("single_olm_cell_network")
# Recording information from the simulation
simulation.create_output_file(id="output0", file_name=sim_id + ".dat")
simulation.add_column_to_output_file("output0", column_id="pop0_0_v", quantity="pop0[0]/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg0_soma_0",
quantity="pop0/0/olm/0/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg1_soma_0",
quantity="pop0/0/olm/1/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg0_axon_0",
quantity="pop0/0/olm/2/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg1_axon_0",
quantity="pop0/0/olm/3/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg0_dend_0",
quantity="pop0/0/olm/4/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg1_dend_0",
quantity="pop0/0/olm/6/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg0_dend_1",
quantity="pop0/0/olm/5/v")
simulation.add_column_to_output_file("output0",
column_id="pop0_0_v_Seg1_dend_1",
quantity="pop0/0/olm/7/v")
# Save LEMS simulation to file
sim_file = simulation.save_to_file()
# Run the simulation using the NEURON simulator
pynml.run_lems_with_jneuroml_neuron(sim_file, max_memory="2G", nogui=True,
plot=False, skip_run=False)
# Plot the data
plot_data(sim_id)
def plot_data(sim_id):
"""Plot the sim data.
Load the data from the file and plot the graph for the membrane potential
using the pynml generate_plot utility function.
:sim_id: ID of simulaton
"""
data_array = np.loadtxt(sim_id + ".dat")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 1]], "Membrane potential (soma seg 0)", show_plot_already=False, save_figure_to=sim_id + "_seg0_soma0-v.png", xaxis="time (s)", yaxis="membrane potential (V)")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 2]], "Membrane potential (soma seg 1)", show_plot_already=False, save_figure_to=sim_id + "_seg1_soma0-v.png", xaxis="time (s)", yaxis="membrane potential (V)")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 3]], "Membrane potential (axon seg 0)", show_plot_already=False, save_figure_to=sim_id + "_seg0_axon0-v.png", xaxis="time (s)", yaxis="membrane potential (V)")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 4]], "Membrane potential (axon seg 1)", show_plot_already=False, save_figure_to=sim_id + "_seg1_axon0-v.png", xaxis="time (s)", yaxis="membrane potential (V)")
def create_olm_network():
"""Create the network
:returns: name of network nml file
"""
net_doc = NeuroMLDocument(id="network",
notes="OLM cell network")
net_doc_fn = "olm_example_net.nml"
net_doc.includes.append(IncludeType(href=create_olm_cell()))
# Create a population: convenient to create many cells of the same type
pop = Population(id="pop0", notes="A population for our cell",
component="olm", size=1, type="populationList")
pop.instances.append(Instance(id=1, location=Location(0., 0., 0.)))
# Input
pulsegen = PulseGenerator(id="pg_olm", notes="Simple pulse generator", delay="100ms", duration="100ms", amplitude="0.08nA")
exp_input = ExplicitInput(target="pop0[0]", input="pg_olm")
net = Network(id="single_olm_cell_network", note="A network with a single population")
net_doc.pulse_generators.append(pulsegen)
net.explicit_inputs.append(exp_input)
net.populations.append(pop)
net_doc.networks.append(net)
pynml.write_neuroml2_file(nml2_doc=net_doc, nml2_file_name=net_doc_fn, validate=True)
return net_doc_fn
def create_olm_cell():
"""Create the complete cell.
:returns: cell object
"""
nml_cell_doc = NeuroMLDocument(id="oml_cell")
cell = create_cell("olm")
nml_cell_file = cell.id + ".cell.nml"
# Add two soma segments
diam = 10.0
soma_0 = add_segment(cell,
prox=[0.0, 0.0, 0.0, diam],
dist=[0.0, 10., 0.0, diam],
name="Seg0_soma_0",
group="soma_0")
soma_1 = add_segment(cell,
prox=None,
dist=[0.0, 10. + 10., 0.0, diam],
name="Seg1_soma_0",
parent=soma_0,
group="soma_0")
# Add axon segments
diam = 1.5
axon_0 = add_segment(cell,
prox=[0.0, 0.0, 0.0, diam],
dist=[0.0, -75, 0.0, diam],
name="Seg0_axon_0",
parent=soma_0,
fraction_along=0.0,
group="axon_0")
axon_1 = add_segment(cell,
prox=None,
dist=[0.0, -150, 0.0, diam],
name="Seg1_axon_0",
parent=axon_0,
group="axon_0")
# Add 2 dendrite segments
diam = 3.0
dend_0_0 = add_segment(cell,
prox=[0.0, 20, 0.0, diam],
dist=[100, 120, 0.0, diam],
name="Seg0_dend_0",
parent=soma_1,
fraction_along=1,
group="dend_0")
dend_1_0 = add_segment(cell,
prox=None,
dist=[177, 197, 0.0, diam],
name="Seg1_dend_0",
parent=dend_0_0,
fraction_along=1,
group="dend_0")
dend_0_1 = add_segment(cell,
prox=[0.0, 20, 0.0, diam],
dist=[-100, 120, 0.0, diam],
name="Seg0_dend_1",
parent=soma_1,
fraction_along=1,
group="dend_1")
dend_1_1 = add_segment(cell,
prox=None,
dist=[-177, 197, 0.0, diam],
name="Seg1_dend_1",
parent=dend_0_1,
fraction_along=1,
group="dend_1")
# XXX: For segment groups to be correctly mapped to sections in NEURON,
# they must include the correct neurolex ID
for section_name in ["soma_0", "axon_0", "dend_0", "dend_1"]:
section_group = get_seg_group_by_id(section_name, cell)
section_group.neuro_lex_id = 'sao864921383'
den_seg_group = get_seg_group_by_id("dendrite_group", cell)
den_seg_group.includes.append(Include(segment_groups="dend_0"))
den_seg_group.includes.append(Include(segment_groups="dend_1"))
den_seg_group.properties.append(Property(tag="color", value="0.8 0 0"))
ax_seg_group = get_seg_group_by_id("axon_group", cell)
ax_seg_group.includes.append(Include(segment_groups="axon_0"))
ax_seg_group.properties.append(Property(tag="color", value="0 0.8 0"))
soma_seg_group = get_seg_group_by_id("soma_group", cell)
soma_seg_group.includes.append(Include(segment_groups="soma_0"))
soma_seg_group.properties.append(Property(tag="color", value="0 0 0.8"))
# Other cell properties
set_init_memb_potential(cell, "-67mV")
set_resistivity(cell, "0.15 kohm_cm")
set_specific_capacitance(cell, "1.3 uF_per_cm2")
# channels
# leak
add_channel_density(cell, nml_cell_doc,
cd_id="leak_all",
cond_density="0.01 mS_per_cm2",
ion_channel="leak_chan",
ion_chan_def_file="olm-example/leak_chan.channel.nml",
erev="-67mV",
ion="non_specific")
# HCNolm_soma
add_channel_density(cell, nml_cell_doc,
cd_id="HCNolm_soma",
cond_density="0.5 mS_per_cm2",
ion_channel="HCNolm",
ion_chan_def_file="olm-example/HCNolm.channel.nml",
erev="-32.9mV",
ion="h",
group="soma_group")
# Kdrfast_soma
add_channel_density(cell, nml_cell_doc,
cd_id="Kdrfast_soma",
cond_density="73.37 mS_per_cm2",
ion_channel="Kdrfast",
ion_chan_def_file="olm-example/Kdrfast.channel.nml",
erev="-77mV",
ion="k",
group="soma_group")
# Kdrfast_dendrite
add_channel_density(cell, nml_cell_doc,
cd_id="Kdrfast_dendrite",
cond_density="105.8 mS_per_cm2",
ion_channel="Kdrfast",
ion_chan_def_file="olm-example/Kdrfast.channel.nml",
erev="-77mV",
ion="k",
group="dendrite_group")
# Kdrfast_axon
add_channel_density(cell, nml_cell_doc,
cd_id="Kdrfast_axon",
cond_density="117.392 mS_per_cm2",
ion_channel="Kdrfast",
ion_chan_def_file="olm-example/Kdrfast.channel.nml",
erev="-77mV",
ion="k",
group="axon_group")
# KvAolm_soma
add_channel_density(cell, nml_cell_doc,
cd_id="KvAolm_soma",
cond_density="4.95 mS_per_cm2",
ion_channel="KvAolm",
ion_chan_def_file="olm-example/KvAolm.channel.nml",
erev="-77mV",
ion="k",
group="soma_group")
# KvAolm_dendrite
add_channel_density(cell, nml_cell_doc,
cd_id="KvAolm_dendrite",
cond_density="2.8 mS_per_cm2",
ion_channel="KvAolm",
ion_chan_def_file="olm-example/KvAolm.channel.nml",
erev="-77mV",
ion="k",
group="dendrite_group")
# Nav_soma
add_channel_density(cell, nml_cell_doc,
cd_id="Nav_soma",
cond_density="10.7 mS_per_cm2",
ion_channel="Nav",
ion_chan_def_file="olm-example/Nav.channel.nml",
erev="50mV",
ion="na",
group="soma_group")
# Nav_dendrite
add_channel_density(cell, nml_cell_doc,
cd_id="Nav_dendrite",
cond_density="23.4 mS_per_cm2",
ion_channel="Nav",
ion_chan_def_file="olm-example/Nav.channel.nml",
erev="50mV",
ion="na",
group="dendrite_group")
# Nav_axon
add_channel_density(cell, nml_cell_doc,
cd_id="Nav_axon",
cond_density="17.12 mS_per_cm2",
ion_channel="Nav",
ion_chan_def_file="olm-example/Nav.channel.nml",
erev="50mV",
ion="na",
group="axon_group")
nml_cell_doc.cells.append(cell)
pynml.write_neuroml2_file(nml_cell_doc, nml_cell_file, True, True)
return nml_cell_file
if __name__ == "__main__":
main()
| [
"CellBuilder.create_cell",
"CellBuilder.set_init_memb_potential",
"CellBuilder.set_resistivity",
"neuroml.ExplicitInput",
"neuroml.PulseGenerator",
"numpy.loadtxt",
"CellBuilder.set_specific_capacitance",
"neuroml.Property",
"neuroml.Network",
"pyneuroml.pynml.run_lems_with_jneuroml_neuron",
"py... | [((767, 840), 'pyneuroml.lems.LEMSSimulation', 'LEMSSimulation', ([], {'sim_id': 'sim_id', 'duration': '(600)', 'dt': '(0.01)', 'simulation_seed': '(123)'}), '(sim_id=sim_id, duration=600, dt=0.01, simulation_seed=123)\n', (781, 840), False, 'from pyneuroml.lems import LEMSSimulation\n'), ((2956, 3062), 'pyneuroml.pynml.run_lems_with_jneuroml_neuron', 'pynml.run_lems_with_jneuroml_neuron', (['sim_file'], {'max_memory': '"""2G"""', 'nogui': '(True)', 'plot': '(False)', 'skip_run': '(False)'}), "(sim_file, max_memory='2G', nogui=True,\n plot=False, skip_run=False)\n", (2991, 3062), False, 'from pyneuroml import pynml\n'), ((3379, 3406), 'numpy.loadtxt', 'np.loadtxt', (["(sim_id + '.dat')"], {}), "(sim_id + '.dat')\n", (3389, 3406), True, 'import numpy as np\n'), ((3411, 3638), 'pyneuroml.pynml.generate_plot', 'pynml.generate_plot', (['[data_array[:, 0]]', '[data_array[:, 1]]', '"""Membrane potential (soma seg 0)"""'], {'show_plot_already': '(False)', 'save_figure_to': "(sim_id + '_seg0_soma0-v.png')", 'xaxis': '"""time (s)"""', 'yaxis': '"""membrane potential (V)"""'}), "([data_array[:, 0]], [data_array[:, 1]],\n 'Membrane potential (soma seg 0)', show_plot_already=False,\n save_figure_to=sim_id + '_seg0_soma0-v.png', xaxis='time (s)', yaxis=\n 'membrane potential (V)')\n", (3430, 3638), False, 'from pyneuroml import pynml\n'), ((3630, 3857), 'pyneuroml.pynml.generate_plot', 'pynml.generate_plot', (['[data_array[:, 0]]', '[data_array[:, 2]]', '"""Membrane potential (soma seg 1)"""'], {'show_plot_already': '(False)', 'save_figure_to': "(sim_id + '_seg1_soma0-v.png')", 'xaxis': '"""time (s)"""', 'yaxis': '"""membrane potential (V)"""'}), "([data_array[:, 0]], [data_array[:, 2]],\n 'Membrane potential (soma seg 1)', show_plot_already=False,\n save_figure_to=sim_id + '_seg1_soma0-v.png', xaxis='time (s)', yaxis=\n 'membrane potential (V)')\n", (3649, 3857), False, 'from pyneuroml import pynml\n'), ((3849, 4076), 'pyneuroml.pynml.generate_plot', 'pynml.generate_plot', (['[data_array[:, 0]]', '[data_array[:, 3]]', '"""Membrane potential (axon seg 0)"""'], {'show_plot_already': '(False)', 'save_figure_to': "(sim_id + '_seg0_axon0-v.png')", 'xaxis': '"""time (s)"""', 'yaxis': '"""membrane potential (V)"""'}), "([data_array[:, 0]], [data_array[:, 3]],\n 'Membrane potential (axon seg 0)', show_plot_already=False,\n save_figure_to=sim_id + '_seg0_axon0-v.png', xaxis='time (s)', yaxis=\n 'membrane potential (V)')\n", (3868, 4076), False, 'from pyneuroml import pynml\n'), ((4068, 4295), 'pyneuroml.pynml.generate_plot', 'pynml.generate_plot', (['[data_array[:, 0]]', '[data_array[:, 4]]', '"""Membrane potential (axon seg 1)"""'], {'show_plot_already': '(False)', 'save_figure_to': "(sim_id + '_seg1_axon0-v.png')", 'xaxis': '"""time (s)"""', 'yaxis': '"""membrane potential (V)"""'}), "([data_array[:, 0]], [data_array[:, 4]],\n 'Membrane potential (axon seg 1)', show_plot_already=False,\n save_figure_to=sim_id + '_seg1_axon0-v.png', xaxis='time (s)', yaxis=\n 'membrane potential (V)')\n", (4087, 4295), False, 'from pyneuroml import pynml\n'), ((4399, 4454), 'neuroml.NeuroMLDocument', 'NeuroMLDocument', ([], {'id': '"""network"""', 'notes': '"""OLM cell network"""'}), "(id='network', notes='OLM cell network')\n", (4414, 4454), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((4675, 4783), 'neuroml.Population', 'Population', ([], {'id': '"""pop0"""', 'notes': '"""A population for our cell"""', 'component': '"""olm"""', 'size': '(1)', 'type': '"""populationList"""'}), "(id='pop0', notes='A population for our cell', component='olm',\n size=1, type='populationList')\n", (4685, 4783), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((4900, 5016), 'neuroml.PulseGenerator', 'PulseGenerator', ([], {'id': '"""pg_olm"""', 'notes': '"""Simple pulse generator"""', 'delay': '"""100ms"""', 'duration': '"""100ms"""', 'amplitude': '"""0.08nA"""'}), "(id='pg_olm', notes='Simple pulse generator', delay='100ms',\n duration='100ms', amplitude='0.08nA')\n", (4914, 5016), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((5030, 5077), 'neuroml.ExplicitInput', 'ExplicitInput', ([], {'target': '"""pop0[0]"""', 'input': '"""pg_olm"""'}), "(target='pop0[0]', input='pg_olm')\n", (5043, 5077), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((5089, 5174), 'neuroml.Network', 'Network', ([], {'id': '"""single_olm_cell_network"""', 'note': '"""A network with a single population"""'}), "(id='single_olm_cell_network', note='A network with a single population'\n )\n", (5096, 5174), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((5328, 5417), 'pyneuroml.pynml.write_neuroml2_file', 'pynml.write_neuroml2_file', ([], {'nml2_doc': 'net_doc', 'nml2_file_name': 'net_doc_fn', 'validate': '(True)'}), '(nml2_doc=net_doc, nml2_file_name=net_doc_fn,\n validate=True)\n', (5353, 5417), False, 'from pyneuroml import pynml\n'), ((5548, 5578), 'neuroml.NeuroMLDocument', 'NeuroMLDocument', ([], {'id': '"""oml_cell"""'}), "(id='oml_cell')\n", (5563, 5578), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((5590, 5608), 'CellBuilder.create_cell', 'create_cell', (['"""olm"""'], {}), "('olm')\n", (5601, 5608), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((5709, 5823), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': '[0.0, 0.0, 0.0, diam]', 'dist': '[0.0, 10.0, 0.0, diam]', 'name': '"""Seg0_soma_0"""', 'group': '"""soma_0"""'}), "(cell, prox=[0.0, 0.0, 0.0, diam], dist=[0.0, 10.0, 0.0, diam],\n name='Seg0_soma_0', group='soma_0')\n", (5720, 5823), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((5933, 6053), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': 'None', 'dist': '[0.0, 10.0 + 10.0, 0.0, diam]', 'name': '"""Seg1_soma_0"""', 'parent': 'soma_0', 'group': '"""soma_0"""'}), "(cell, prox=None, dist=[0.0, 10.0 + 10.0, 0.0, diam], name=\n 'Seg1_soma_0', parent=soma_0, group='soma_0')\n", (5944, 6053), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((6225, 6373), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': '[0.0, 0.0, 0.0, diam]', 'dist': '[0.0, -75, 0.0, diam]', 'name': '"""Seg0_axon_0"""', 'parent': 'soma_0', 'fraction_along': '(0.0)', 'group': '"""axon_0"""'}), "(cell, prox=[0.0, 0.0, 0.0, diam], dist=[0.0, -75, 0.0, diam],\n name='Seg0_axon_0', parent=soma_0, fraction_along=0.0, group='axon_0')\n", (6236, 6373), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((6533, 6646), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': 'None', 'dist': '[0.0, -150, 0.0, diam]', 'name': '"""Seg1_axon_0"""', 'parent': 'axon_0', 'group': '"""axon_0"""'}), "(cell, prox=None, dist=[0.0, -150, 0.0, diam], name=\n 'Seg1_axon_0', parent=axon_0, group='axon_0')\n", (6544, 6646), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((6829, 6974), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': '[0.0, 20, 0.0, diam]', 'dist': '[100, 120, 0.0, diam]', 'name': '"""Seg0_dend_0"""', 'parent': 'soma_1', 'fraction_along': '(1)', 'group': '"""dend_0"""'}), "(cell, prox=[0.0, 20, 0.0, diam], dist=[100, 120, 0.0, diam],\n name='Seg0_dend_0', parent=soma_1, fraction_along=1, group='dend_0')\n", (6840, 6974), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((7149, 7280), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': 'None', 'dist': '[177, 197, 0.0, diam]', 'name': '"""Seg1_dend_0"""', 'parent': 'dend_0_0', 'fraction_along': '(1)', 'group': '"""dend_0"""'}), "(cell, prox=None, dist=[177, 197, 0.0, diam], name='Seg1_dend_0',\n parent=dend_0_0, fraction_along=1, group='dend_0')\n", (7160, 7280), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((7455, 7601), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': '[0.0, 20, 0.0, diam]', 'dist': '[-100, 120, 0.0, diam]', 'name': '"""Seg0_dend_1"""', 'parent': 'soma_1', 'fraction_along': '(1)', 'group': '"""dend_1"""'}), "(cell, prox=[0.0, 20, 0.0, diam], dist=[-100, 120, 0.0, diam],\n name='Seg0_dend_1', parent=soma_1, fraction_along=1, group='dend_1')\n", (7466, 7601), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((7775, 7908), 'CellBuilder.add_segment', 'add_segment', (['cell'], {'prox': 'None', 'dist': '[-177, 197, 0.0, diam]', 'name': '"""Seg1_dend_1"""', 'parent': 'dend_0_1', 'fraction_along': '(1)', 'group': '"""dend_1"""'}), "(cell, prox=None, dist=[-177, 197, 0.0, diam], name=\n 'Seg1_dend_1', parent=dend_0_1, fraction_along=1, group='dend_1')\n", (7786, 7908), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((8394, 8437), 'CellBuilder.get_seg_group_by_id', 'get_seg_group_by_id', (['"""dendrite_group"""', 'cell'], {}), "('dendrite_group', cell)\n", (8413, 8437), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((8670, 8709), 'CellBuilder.get_seg_group_by_id', 'get_seg_group_by_id', (['"""axon_group"""', 'cell'], {}), "('axon_group', cell)\n", (8689, 8709), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((8874, 8913), 'CellBuilder.get_seg_group_by_id', 'get_seg_group_by_id', (['"""soma_group"""', 'cell'], {}), "('soma_group', cell)\n", (8893, 8913), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((9094, 9132), 'CellBuilder.set_init_memb_potential', 'set_init_memb_potential', (['cell', '"""-67mV"""'], {}), "(cell, '-67mV')\n", (9117, 9132), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((9137, 9174), 'CellBuilder.set_resistivity', 'set_resistivity', (['cell', '"""0.15 kohm_cm"""'], {}), "(cell, '0.15 kohm_cm')\n", (9152, 9174), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((9179, 9227), 'CellBuilder.set_specific_capacitance', 'set_specific_capacitance', (['cell', '"""1.3 uF_per_cm2"""'], {}), "(cell, '1.3 uF_per_cm2')\n", (9203, 9227), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((9259, 9472), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""leak_all"""', 'cond_density': '"""0.01 mS_per_cm2"""', 'ion_channel': '"""leak_chan"""', 'ion_chan_def_file': '"""olm-example/leak_chan.channel.nml"""', 'erev': '"""-67mV"""', 'ion': '"""non_specific"""'}), "(cell, nml_cell_doc, cd_id='leak_all', cond_density=\n '0.01 mS_per_cm2', ion_channel='leak_chan', ion_chan_def_file=\n 'olm-example/leak_chan.channel.nml', erev='-67mV', ion='non_specific')\n", (9278, 9472), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((9629, 9854), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""HCNolm_soma"""', 'cond_density': '"""0.5 mS_per_cm2"""', 'ion_channel': '"""HCNolm"""', 'ion_chan_def_file': '"""olm-example/HCNolm.channel.nml"""', 'erev': '"""-32.9mV"""', 'ion': '"""h"""', 'group': '"""soma_group"""'}), "(cell, nml_cell_doc, cd_id='HCNolm_soma', cond_density=\n '0.5 mS_per_cm2', ion_channel='HCNolm', ion_chan_def_file=\n 'olm-example/HCNolm.channel.nml', erev='-32.9mV', ion='h', group=\n 'soma_group')\n", (9648, 9854), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((10031, 10259), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Kdrfast_soma"""', 'cond_density': '"""73.37 mS_per_cm2"""', 'ion_channel': '"""Kdrfast"""', 'ion_chan_def_file': '"""olm-example/Kdrfast.channel.nml"""', 'erev': '"""-77mV"""', 'ion': '"""k"""', 'group': '"""soma_group"""'}), "(cell, nml_cell_doc, cd_id='Kdrfast_soma', cond_density=\n '73.37 mS_per_cm2', ion_channel='Kdrfast', ion_chan_def_file=\n 'olm-example/Kdrfast.channel.nml', erev='-77mV', ion='k', group=\n 'soma_group')\n", (10050, 10259), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((10440, 10674), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Kdrfast_dendrite"""', 'cond_density': '"""105.8 mS_per_cm2"""', 'ion_channel': '"""Kdrfast"""', 'ion_chan_def_file': '"""olm-example/Kdrfast.channel.nml"""', 'erev': '"""-77mV"""', 'ion': '"""k"""', 'group': '"""dendrite_group"""'}), "(cell, nml_cell_doc, cd_id='Kdrfast_dendrite',\n cond_density='105.8 mS_per_cm2', ion_channel='Kdrfast',\n ion_chan_def_file='olm-example/Kdrfast.channel.nml', erev='-77mV', ion=\n 'k', group='dendrite_group')\n", (10459, 10674), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((10853, 11083), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Kdrfast_axon"""', 'cond_density': '"""117.392 mS_per_cm2"""', 'ion_channel': '"""Kdrfast"""', 'ion_chan_def_file': '"""olm-example/Kdrfast.channel.nml"""', 'erev': '"""-77mV"""', 'ion': '"""k"""', 'group': '"""axon_group"""'}), "(cell, nml_cell_doc, cd_id='Kdrfast_axon', cond_density=\n '117.392 mS_per_cm2', ion_channel='Kdrfast', ion_chan_def_file=\n 'olm-example/Kdrfast.channel.nml', erev='-77mV', ion='k', group=\n 'axon_group')\n", (10872, 11083), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((11259, 11483), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""KvAolm_soma"""', 'cond_density': '"""4.95 mS_per_cm2"""', 'ion_channel': '"""KvAolm"""', 'ion_chan_def_file': '"""olm-example/KvAolm.channel.nml"""', 'erev': '"""-77mV"""', 'ion': '"""k"""', 'group': '"""soma_group"""'}), "(cell, nml_cell_doc, cd_id='KvAolm_soma', cond_density=\n '4.95 mS_per_cm2', ion_channel='KvAolm', ion_chan_def_file=\n 'olm-example/KvAolm.channel.nml', erev='-77mV', ion='k', group='soma_group'\n )\n", (11278, 11483), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((11663, 11893), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""KvAolm_dendrite"""', 'cond_density': '"""2.8 mS_per_cm2"""', 'ion_channel': '"""KvAolm"""', 'ion_chan_def_file': '"""olm-example/KvAolm.channel.nml"""', 'erev': '"""-77mV"""', 'ion': '"""k"""', 'group': '"""dendrite_group"""'}), "(cell, nml_cell_doc, cd_id='KvAolm_dendrite',\n cond_density='2.8 mS_per_cm2', ion_channel='KvAolm', ion_chan_def_file=\n 'olm-example/KvAolm.channel.nml', erev='-77mV', ion='k', group=\n 'dendrite_group')\n", (11682, 11893), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((12067, 12277), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Nav_soma"""', 'cond_density': '"""10.7 mS_per_cm2"""', 'ion_channel': '"""Nav"""', 'ion_chan_def_file': '"""olm-example/Nav.channel.nml"""', 'erev': '"""50mV"""', 'ion': '"""na"""', 'group': '"""soma_group"""'}), "(cell, nml_cell_doc, cd_id='Nav_soma', cond_density=\n '10.7 mS_per_cm2', ion_channel='Nav', ion_chan_def_file=\n 'olm-example/Nav.channel.nml', erev='50mV', ion='na', group='soma_group')\n", (12086, 12277), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((12459, 12682), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Nav_dendrite"""', 'cond_density': '"""23.4 mS_per_cm2"""', 'ion_channel': '"""Nav"""', 'ion_chan_def_file': '"""olm-example/Nav.channel.nml"""', 'erev': '"""50mV"""', 'ion': '"""na"""', 'group': '"""dendrite_group"""'}), "(cell, nml_cell_doc, cd_id='Nav_dendrite', cond_density=\n '23.4 mS_per_cm2', ion_channel='Nav', ion_chan_def_file=\n 'olm-example/Nav.channel.nml', erev='50mV', ion='na', group=\n 'dendrite_group')\n", (12478, 12682), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((12855, 13066), 'CellBuilder.add_channel_density', 'add_channel_density', (['cell', 'nml_cell_doc'], {'cd_id': '"""Nav_axon"""', 'cond_density': '"""17.12 mS_per_cm2"""', 'ion_channel': '"""Nav"""', 'ion_chan_def_file': '"""olm-example/Nav.channel.nml"""', 'erev': '"""50mV"""', 'ion': '"""na"""', 'group': '"""axon_group"""'}), "(cell, nml_cell_doc, cd_id='Nav_axon', cond_density=\n '17.12 mS_per_cm2', ion_channel='Nav', ion_chan_def_file=\n 'olm-example/Nav.channel.nml', erev='50mV', ion='na', group='axon_group')\n", (12874, 13066), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((13266, 13332), 'pyneuroml.pynml.write_neuroml2_file', 'pynml.write_neuroml2_file', (['nml_cell_doc', 'nml_cell_file', '(True)', '(True)'], {}), '(nml_cell_doc, nml_cell_file, True, True)\n', (13291, 13332), False, 'from pyneuroml import pynml\n'), ((8281, 8320), 'CellBuilder.get_seg_group_by_id', 'get_seg_group_by_id', (['section_name', 'cell'], {}), '(section_name, cell)\n', (8300, 8320), False, 'from CellBuilder import create_cell, add_segment, add_channel_density, set_init_memb_potential, set_resistivity, set_specific_capacitance, get_seg_group_by_id\n'), ((8472, 8504), 'neuroml.Include', 'Include', ([], {'segment_groups': '"""dend_0"""'}), "(segment_groups='dend_0')\n", (8479, 8504), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((8540, 8572), 'neuroml.Include', 'Include', ([], {'segment_groups': '"""dend_1"""'}), "(segment_groups='dend_1')\n", (8547, 8572), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((8610, 8648), 'neuroml.Property', 'Property', ([], {'tag': '"""color"""', 'value': '"""0.8 0 0"""'}), "(tag='color', value='0.8 0 0')\n", (8618, 8648), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((8743, 8775), 'neuroml.Include', 'Include', ([], {'segment_groups': '"""axon_0"""'}), "(segment_groups='axon_0')\n", (8750, 8775), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((8812, 8850), 'neuroml.Property', 'Property', ([], {'tag': '"""color"""', 'value': '"""0 0.8 0"""'}), "(tag='color', value='0 0.8 0')\n", (8820, 8850), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((8949, 8981), 'neuroml.Include', 'Include', ([], {'segment_groups': '"""soma_0"""'}), "(segment_groups='soma_0')\n", (8956, 8981), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((9021, 9059), 'neuroml.Property', 'Property', ([], {'tag': '"""color"""', 'value': '"""0 0 0.8"""'}), "(tag='color', value='0 0 0.8')\n", (9029, 9059), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n'), ((4850, 4873), 'neuroml.Location', 'Location', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (4858, 4873), False, 'from neuroml import NeuroMLDocument, IncludeType, Population, PulseGenerator, ExplicitInput, Network, SegmentGroup, Member, Property, Include, Instance, Location\n')] |
import sys
import os
import numpy
import soundfile
import librosa
from PIL import Image
def main():
d = sys.argv[1]
files = (os.path.join(d, f) for f in os.listdir(d) if os.path.splitext(f)[1] in (".png", ".jpg"))
for f in files:
print(f)
y = img_to_audio(f)
out_f = os.path.splitext(f)[0] + ".wav"
soundfile.write(out_f, y, 22050)
def img_to_audio(x):
img = Image.open(x).convert("L")
spec = numpy.array(img).astype(numpy.float32)/255
y = librosa.istft(spec)
y /= numpy.max(numpy.abs(y))
return y
if __name__ == "__main__":
main() | [
"numpy.abs",
"PIL.Image.open",
"librosa.istft",
"numpy.array",
"os.path.splitext",
"soundfile.write",
"os.path.join",
"os.listdir"
] | [((500, 519), 'librosa.istft', 'librosa.istft', (['spec'], {}), '(spec)\n', (513, 519), False, 'import librosa\n'), ((135, 153), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (147, 153), False, 'import os\n'), ((345, 377), 'soundfile.write', 'soundfile.write', (['out_f', 'y', '(22050)'], {}), '(out_f, y, 22050)\n', (360, 377), False, 'import soundfile\n'), ((539, 551), 'numpy.abs', 'numpy.abs', (['y'], {}), '(y)\n', (548, 551), False, 'import numpy\n'), ((163, 176), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (173, 176), False, 'import os\n'), ((411, 424), 'PIL.Image.open', 'Image.open', (['x'], {}), '(x)\n', (421, 424), False, 'from PIL import Image\n'), ((305, 324), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (321, 324), False, 'import os\n'), ((449, 465), 'numpy.array', 'numpy.array', (['img'], {}), '(img)\n', (460, 465), False, 'import numpy\n'), ((180, 199), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (196, 199), False, 'import os\n')] |
import numpy as np
from scipy.spatial.distance import cosine as cosine
class Evaluator(object):
def __init__(self,
query_dict_fn,
gallery_dict_fn,
topks=[3, 5, 10],
extract_feature=False):
"""Create the empty array to count
Args:
query_dict_fn(dict): the mapping of the index to the id of each
query_embed.
gallery_dict_fn(dict): the mapping of the index to the id of each
gallery_embed.
tops_type(int): default retrieve top3, top5.
extract_feature(bool): whether to save extracted garment feature
or not.
"""
self.topks = topks
""" recall@k = ture_positive/k"""
self.recall = dict()
for k in topks:
self.recall[k] = []
self.query_dict, self.query_id2idx = self.get_id_dict(query_dict_fn)
self.gallery_dict, self.gallery_id2idx = self.get_id_dict(
gallery_dict_fn)
self.extract_feature = extract_feature
def load_dict(self, fn):
dic = dict()
rf = open(fn).readlines()
for i, line in enumerate(rf):
dic[i] = int(line.strip('\n'))
return dic
def inverse_dict(self, idx2id):
""" invert "idx2id" dict to "id2idx" dict """
id2idx = dict()
for k, v in idx2id.items(): # k:idx v:id
if v not in id2idx:
id2idx[v] = [k]
else:
id2idx[v].append(k)
return id2idx
def single_query(self, query_id, query_feat, gallery_embeds, query_idx):
query_dist = []
for j, feat in enumerate(gallery_embeds):
cosine_dist = cosine(
feat.reshape(1, -1), query_feat.reshape(1, -1))
query_dist.append(cosine_dist)
query_dist = np.array(query_dist)
order = np.argsort(query_dist)
single_recall = dict()
print(self.query_id2idx[query_id])
for k in self.topks:
retrieved_idxes = order[:k]
tp = 0
relevant_num = len(self.gallery_id2idx[query_id])
for idx in retrieved_idxes:
retrieved_id = self.gallery_dict[idx]
if query_id == retrieved_id:
tp += 1
single_recall[k] = float(tp) / relevant_num
return single_recall
def show_results(self):
print('--------------- Retrieval Evaluation ------------')
for k in self.topks:
recall = 100 * float(sum(self.recall[k])) / len(self.recall[k])
print('Recall@%d = %.2f' % (k, recall))
def evaluate(self, query_embeds, gallery_embeds):
for i, query_feat in enumerate(query_embeds):
query_id = self.query_dict[i]
single_recall = self.single_query(query_id, query_feat,
gallery_embeds, i)
for k in self.topks:
self.recall[k].append(single_recall[k])
self.show_results()
self.show_results()
def show_retrieved_images(self, query_feat, gallery_embeds):
query_dist = []
for i, feat in enumerate(gallery_embeds):
cosine_dist = cosine(
feat.reshape(1, -1), query_feat.reshape(1, -1))
query_dist.append(cosine_dist)
query_dist = np.array(query_dist)
order = np.argsort(query_dist)
for k in self.topks:
retrieved_idxes = order[:k]
for idx in retrieved_idxes:
retrieved_id = self.gallery_dict[idx]
print('retrieved id', retrieved_id)
def get_id_dict(self, id_file):
ids = []
id_fn = open(id_file).readlines()
id2idx, idx2id = {}, {}
for idx, line in enumerate(id_fn):
img_id = int(line.strip('\n'))
ids.append(img_id)
idx2id[idx] = img_id
if img_id not in id2idx:
id2idx[img_id] = [idx]
else:
id2idx[img_id].append(idx)
return idx2id, id2idx
| [
"numpy.argsort",
"numpy.array"
] | [((1888, 1908), 'numpy.array', 'np.array', (['query_dist'], {}), '(query_dist)\n', (1896, 1908), True, 'import numpy as np\n'), ((1926, 1948), 'numpy.argsort', 'np.argsort', (['query_dist'], {}), '(query_dist)\n', (1936, 1948), True, 'import numpy as np\n'), ((3419, 3439), 'numpy.array', 'np.array', (['query_dist'], {}), '(query_dist)\n', (3427, 3439), True, 'import numpy as np\n'), ((3456, 3478), 'numpy.argsort', 'np.argsort', (['query_dist'], {}), '(query_dist)\n', (3466, 3478), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
Module with post-processing related functions called from within the NFC
algorithm.
"""
__author__ = '<NAME>'
__all__ = ['cube_planet_free']
import numpy as np
from ..metrics import cube_inject_companions
import math
from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show
def cube_planet_free(planet_parameter, cube, angs, psfn, plsc, imlib='opencv',
interpolation='lanczos4',transmission=None):
"""
Return a cube in which we have injected negative fake companion at the
position/flux given by planet_parameter.
Parameters
----------
planet_parameter: numpy.array or list
The (r, theta, flux) for all known companions. For a 4d cube r,
theta and flux must all be 1d arrays with length equal to cube.shape[0];
i.e. planet_parameter should have shape: (n_pl,3,n_ch).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfsn: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
cpf : numpy.array
The cube with negative companions injected at the position given in
planet_parameter.
"""
cpf = np.zeros_like(cube)
planet_parameter = np.array(planet_parameter)
if cube.ndim == 4:
if planet_parameter.shape[3] != cube.shape[0]:
raise TypeError("Input planet parameter with wrong dimensions.")
for i in range(planet_parameter.shape[0]):
if i == 0:
cube_temp = cube
else:
cube_temp = cpf
if cube.ndim == 4:
for j in cube.shape[0]:
cpf[j] = cube_inject_companions(cube_temp[j], psfn[j], angs,
flevel=-planet_parameter[i, 2, j],
plsc=plsc,
rad_dists=[planet_parameter[i, 0, j]],
n_branches=1,
theta=planet_parameter[i, 1, j],
imlib=imlib,
interpolation=interpolation,
verbose=False,
transmission=transmission)
else:
cpf = cube_inject_companions(cube_temp, psfn, angs,
flevel=-planet_parameter[i, 2], plsc=plsc,
rad_dists=[planet_parameter[i, 0]],
n_branches=1, theta=planet_parameter[i, 1],
imlib=imlib, interpolation=interpolation,
verbose=False, transmission=transmission)
return cpf
def radial_to_eq(r=1, t=0, rError=0, tError=0, display=False):
"""
Convert the position given in (r,t) into \delta RA and \delta DEC, as
well as the corresponding uncertainties.
t = 0 deg (resp. 90 deg) points toward North (resp. East).
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate.
rError: float
The error bar related to r.
tError: float
The error bar related to t.
display: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError,r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if display:
plot(u,v,'ks',x,y,'r')
plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')
plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)),'ok')
plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)),'ok')
plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r,
np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')
plot([0,r*np.cos(nu+math.radians(tError*0))],
[0,r*np.sin(nu+math.radians(tError*0))],'k')
axes().set_aspect('equal')
lim = np.amax([a,b]) * 2.
xlim([ra-lim,ra+lim])
ylim([dec-lim,dec+lim])
gca().invert_xaxis()
show()
return ((ra,np.mean([raErrorInf,raErrorSup])),
(dec,np.mean([decErrorInf,decErrorSup])))
def cart_to_polar(y, x, ceny=0, cenx=0):
"""
Convert cartesian into polar coordinates (r,theta) with
respect to a given center (cenx,ceny).
Parameters
----------
x,y: float
The cartesian coordinates.
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-ceny)**2 + (x-cenx)**2)
theta = np.degrees(np.arctan2(y-ceny, x-cenx))
return r, np.mod(theta,360)
def polar_to_cart(r, theta, ceny=0, cenx=0):
"""
Convert polar coordinates with respect to the center (cenx,ceny) into
cartesian coordinates (x,y) with respect to the bottom left corner of the
image..
Parameters
----------
r,theta: float
The polar coordinates.
Returns
-------
out : tuple
The cartesian coordinates (x,y) with respect to the bottom left corner
of the image..
"""
x = r*np.cos(np.deg2rad(theta)) + cenx
y = r*np.sin(np.deg2rad(theta)) + ceny
return x,y
def ds9index_to_polar(y, x, ceny=0, cenx=0):
"""
Convert pixel index read on image displayed with DS9 into polar coordinates
(r,theta) with respect to a given center (cenx,ceny).
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-0.5-ceny)**2 + (x-0.5-cenx)**2)
theta = np.degrees(np.arctan2(y-0.5-ceny, x-0.5-cenx))
return r, np.mod(theta,360)
def polar_to_ds9index(r, theta, ceny=0, cenx=0):
"""
Convert position (r,theta) in an image with respect to a given center
(cenx,ceny) into position in the image displayed with DS9.
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
x_ds9 = r*np.cos(np.deg2rad(theta)) + 0.5 + cenx
y_ds9 = r*np.sin(np.deg2rad(theta)) + 0.5 + ceny
return x_ds9, y_ds9 | [
"numpy.arctan2",
"numpy.amin",
"matplotlib.pyplot.axes",
"numpy.mean",
"numpy.sin",
"matplotlib.pyplot.gca",
"numpy.zeros_like",
"math.radians",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.mod",
"numpy.cos",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.pl... | [((1573, 1592), 'numpy.zeros_like', 'np.zeros_like', (['cube'], {}), '(cube)\n', (1586, 1592), True, 'import numpy as np\n'), ((1617, 1643), 'numpy.array', 'np.array', (['planet_parameter'], {}), '(planet_parameter)\n', (1625, 1643), True, 'import numpy as np\n'), ((4188, 4219), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(5000)'], {}), '(0, 2 * np.pi, 5000)\n', (4199, 4219), True, 'import numpy as np\n'), ((5898, 5940), 'numpy.sqrt', 'np.sqrt', (['((y - ceny) ** 2 + (x - cenx) ** 2)'], {}), '((y - ceny) ** 2 + (x - cenx) ** 2)\n', (5905, 5940), True, 'import numpy as np\n'), ((7565, 7619), 'numpy.sqrt', 'np.sqrt', (['((y - 0.5 - ceny) ** 2 + (x - 0.5 - cenx) ** 2)'], {}), '((y - 0.5 - ceny) ** 2 + (x - 0.5 - cenx) ** 2)\n', (7572, 7619), True, 'import numpy as np\n'), ((4406, 4416), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (4413, 4416), True, 'import numpy as np\n'), ((4434, 4444), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (4441, 4444), True, 'import numpy as np\n'), ((4471, 4481), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (4478, 4481), True, 'import numpy as np\n'), ((4500, 4510), 'numpy.amax', 'np.amax', (['y'], {}), '(y)\n', (4507, 4510), True, 'import numpy as np\n'), ((4556, 4583), 'matplotlib.pyplot.plot', 'plot', (['u', 'v', '"""ks"""', 'x', 'y', '"""r"""'], {}), "(u, v, 'ks', x, y, 'r')\n", (4560, 4583), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((5236, 5262), 'matplotlib.pyplot.xlim', 'xlim', (['[ra - lim, ra + lim]'], {}), '([ra - lim, ra + lim])\n', (5240, 5262), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((5266, 5294), 'matplotlib.pyplot.ylim', 'ylim', (['[dec - lim, dec + lim]'], {}), '([dec - lim, dec + lim])\n', (5270, 5294), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((5327, 5333), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5331, 5333), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((5956, 5986), 'numpy.arctan2', 'np.arctan2', (['(y - ceny)', '(x - cenx)'], {}), '(y - ceny, x - cenx)\n', (5966, 5986), True, 'import numpy as np\n'), ((6004, 6022), 'numpy.mod', 'np.mod', (['theta', '(360)'], {}), '(theta, 360)\n', (6010, 6022), True, 'import numpy as np\n'), ((7631, 7673), 'numpy.arctan2', 'np.arctan2', (['(y - 0.5 - ceny)', '(x - 0.5 - cenx)'], {}), '(y - 0.5 - ceny, x - 0.5 - cenx)\n', (7641, 7673), True, 'import numpy as np\n'), ((7686, 7704), 'numpy.mod', 'np.mod', (['theta', '(360)'], {}), '(theta, 360)\n', (7692, 7704), True, 'import numpy as np\n'), ((3988, 4003), 'math.radians', 'math.radians', (['t'], {}), '(t)\n', (4000, 4003), False, 'import math\n'), ((4028, 4043), 'math.radians', 'math.radians', (['t'], {}), '(t)\n', (4040, 4043), False, 'import math\n'), ((4099, 4114), 'math.radians', 'math.radians', (['t'], {}), '(t)\n', (4111, 4114), False, 'import math\n'), ((5208, 5223), 'numpy.amax', 'np.amax', (['[a, b]'], {}), '([a, b])\n', (5215, 5223), True, 'import numpy as np\n'), ((5359, 5392), 'numpy.mean', 'np.mean', (['[raErrorInf, raErrorSup]'], {}), '([raErrorInf, raErrorSup])\n', (5366, 5392), True, 'import numpy as np\n'), ((5411, 5446), 'numpy.mean', 'np.mean', (['[decErrorInf, decErrorSup]'], {}), '([decErrorInf, decErrorSup])\n', (5418, 5446), True, 'import numpy as np\n'), ((4153, 4173), 'math.radians', 'math.radians', (['tError'], {}), '(tError)\n', (4165, 4173), False, 'import math\n'), ((4605, 4615), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (4611, 4615), True, 'import numpy as np\n'), ((4630, 4640), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (4636, 4640), True, 'import numpy as np\n'), ((4673, 4683), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (4679, 4683), True, 'import numpy as np\n'), ((4698, 4708), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (4704, 4708), True, 'import numpy as np\n'), ((5167, 5173), 'matplotlib.pyplot.axes', 'axes', ([], {}), '()\n', (5171, 5173), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((5298, 5303), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (5301, 5303), False, 'from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show\n'), ((6519, 6536), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (6529, 6536), True, 'import numpy as np\n'), ((6562, 6579), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (6572, 6579), True, 'import numpy as np\n'), ((4254, 4264), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (4260, 4264), True, 'import numpy as np\n'), ((4286, 4296), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (4292, 4296), True, 'import numpy as np\n'), ((4335, 4345), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (4341, 4345), True, 'import numpy as np\n'), ((4367, 4377), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (4373, 4377), True, 'import numpy as np\n'), ((4952, 4984), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10000)'], {}), '(0, 2 * np.pi, 10000)\n', (4963, 4984), True, 'import numpy as np\n'), ((5008, 5040), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10000)'], {}), '(0, 2 * np.pi, 10000)\n', (5019, 5040), True, 'import numpy as np\n'), ((8685, 8702), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (8695, 8702), True, 'import numpy as np\n'), ((8738, 8755), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (8748, 8755), True, 'import numpy as np\n'), ((4239, 4251), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (4245, 4251), True, 'import numpy as np\n'), ((4271, 4283), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (4277, 4283), True, 'import numpy as np\n'), ((4320, 4332), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (4326, 4332), True, 'import numpy as np\n'), ((4352, 4364), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (4358, 4364), True, 'import numpy as np\n'), ((4742, 4762), 'math.radians', 'math.radians', (['tError'], {}), '(tError)\n', (4754, 4762), False, 'import math\n'), ((4791, 4811), 'math.radians', 'math.radians', (['tError'], {}), '(tError)\n', (4803, 4811), False, 'import math\n'), ((4846, 4866), 'math.radians', 'math.radians', (['tError'], {}), '(tError)\n', (4858, 4866), False, 'import math\n'), ((4895, 4915), 'math.radians', 'math.radians', (['tError'], {}), '(tError)\n', (4907, 4915), False, 'import math\n'), ((5075, 5099), 'math.radians', 'math.radians', (['(tError * 0)'], {}), '(tError * 0)\n', (5087, 5099), False, 'import math\n'), ((5129, 5153), 'math.radians', 'math.radians', (['(tError * 0)'], {}), '(tError * 0)\n', (5141, 5153), False, 'import math\n')] |
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import settings
import datetime
import random
import time
import gym
import os
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate
from keras.models import Model, load_model, Sequential
from keras.initializers import RandomUniform
from keras.callbacks import TensorBoard
from keras.utils import plot_model
from keras.optimizers import Adam
from collections import deque
from matplotlib import style
from keras import backend
class CustomTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 0
self._log_write_dir = self.log_dir
self.writer = tf.summary.create_file_writer(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
def on_train_batch_end(self, batch, logs=None):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
def _write_logs(self, logs, index):
with self.writer.as_default():
for name, value in logs.items():
tf.summary.scalar(name, value, step=index)
self.step += 1
self.writer.flush()
class Agent:
def __init__(self,
input_shape,
action_space,
alpha,
beta,
gamma=0.99,
dense1=256,
dense2=256,
dropout_actor=0.2,
dropout_critic=0.2,
episode_offset=0,
record_game=False):
dt = datetime.datetime.timetuple(datetime.datetime.now())
self.runtime_name = f"{dt.tm_mon:>02}-{dt.tm_mday:>02}--" \
f"{dt.tm_hour:>02}-{dt.tm_min:>02}-{dt.tm_sec:>02}"
self.input_shape = input_shape
self.action_space = action_space
self.alpha = alpha
self.beta = beta
self.gamma = gamma
if settings.ALLOW_TRAIN and not record_game:
self.actor_tb = CustomTensorBoard(log_dir=f"tensorlogs/{settings.MODEL_NAME}-{episode_offset}-Actor")
self.critic_tb = CustomTensorBoard(log_dir=f"tensorlogs/{settings.MODEL_NAME}-{episode_offset}-Critic")
self.memory = deque(maxlen=settings.MAX_BATCH_SIZE)
if settings.LOAD_MODEL:
try:
layers = np.load(f"{settings.MODEL_NAME}/model/layers.npy", allow_pickle=True)
self.dense1, self.dense2, self.dropout_actor, self.dropout_critic = layers
print(f"Loaded layers shapes: {settings.MODEL_NAME}: {layers}")
except FileNotFoundError:
self.dense1, self.dense2 = dense1, dense2
self.dropout_actor, self.dropout_critic = dropout_actor, dropout_critic
self.dense1, self.dense2 = int(self.dense1), int(self.dense2)
self.dropout_actor, self.dropout_critic = float(self.dropout_actor), float(self.dropout_critic)
self.actor, self.critic, self.policy = self.create_actor_critic_network()
loaded = self.load_model()
if loaded:
print(f"Loading weights: {settings.MODEL_NAME}")
else:
print(f"Not loaded weights: {settings.MODEL_NAME}")
else:
self.dense1, self.dense2 = dense1, dense2
self.dropout_actor, self.dropout_critic = dropout_actor, dropout_critic
print(f"New model: {settings.MODEL_NAME}")
self.actor, self.critic, self.policy = self.create_actor_critic_network()
def create_actor_critic_network(self):
"""Custom Loss function"""
"Inputs"
input_layer = Input(shape=self.input_shape)
delta = Input(shape=[1, ])
"Actor"
act_dense1 = Dense(self.dense1, activation='relu')(input_layer)
act_dense2 = Dense(self.dense2, activation='relu')(act_dense1)
"Critic"
crit_dense1 = Dense(500, activation='relu')(input_layer)
crit_dense2 = Dense(500, activation='relu')(crit_dense1)
'Outputs'
probs = Dense(self.action_space, activation='softmax')(act_dense2)
values = Dense(1, activation='linear')(crit_dense2)
"Backend"
def custom_loss(y_true, y_pred):
"""Loss based on delta input"""
out = backend.clip(y_pred, 1e-8, 1 - 1e-8)
loglike = y_true * backend.log(out)
loss = backend.sum(-loglike * delta)
return loss
actor = Model(inputs=[input_layer, delta], outputs=[probs])
policy = Model(inputs=[input_layer], outputs=[probs])
critic = Model(inputs=[input_layer], outputs=[values])
actor.compile(optimizer=Adam(self.alpha), loss=custom_loss, metrics=['accuracy'])
critic.compile(optimizer=Adam(self.beta), loss='mse', metrics=['accuracy'])
os.makedirs(f"{settings.MODEL_NAME}/model", exist_ok=True)
plot_model(actor, f"{settings.MODEL_NAME}/model/actor.png")
plot_model(critic, f"{settings.MODEL_NAME}/model/critic.png")
plot_model(policy, f"{settings.MODEL_NAME}/model/policy.png")
with open(f"{settings.MODEL_NAME}/model/actor-summary.txt", 'w') as file:
actor.summary(print_fn=lambda x: file.write(x + '\n'))
with open(f"{settings.MODEL_NAME}/model/critic-summary.txt", 'w') as file:
critic.summary(print_fn=lambda x: file.write(x + '\n'))
with open(f"{settings.MODEL_NAME}/model/policy-summary.txt", 'w') as file:
policy.summary(print_fn=lambda x: file.write(x + '\n'))
return actor, critic, policy
def actor_critic_train(self, train_data):
Old_states = []
New_states = []
Rewards = []
Dones = []
Actions = []
for old_state, action, reward, new_state, done in train_data:
Old_states.append(old_state)
New_states.append(new_state)
Actions.append(action)
Rewards.append(reward)
Dones.append(int(not done))
Old_states = np.array(Old_states)
Rewards = np.array(Rewards)
Actions = np.array(Actions)
Dones = np.array(Dones)
current_critic_value = self.critic.predict([Old_states]).ravel()
# future_actions = self.choose_action_list([New_states])
future_critic_values = self.critic.predict([New_states]).ravel()
target = Rewards + self.gamma * future_critic_values * Dones
delta = target - current_critic_value
target_actions = np.zeros((len(Actions), self.action_space))
for ind, act in enumerate(Actions):
target_actions[ind][act] = 1
self.critic.fit([Old_states], target, verbose=0, callbacks=[self.critic_tb])
self.actor.fit([Old_states, delta], target_actions, verbose=0, callbacks=[self.actor_tb])
def save_model(self):
if not settings.SAVE_MODEL:
return False
while True:
try:
self.actor.save_weights(f"{settings.MODEL_NAME}/model/actor-weights")
break
except OSError:
time.sleep(0.2)
while True:
try:
self.critic.save_weights(f"{settings.MODEL_NAME}/model/critic-weights")
break
except OSError:
time.sleep(0.2)
while True:
try:
self.policy.save_weights(f"{settings.MODEL_NAME}/model/policy-weights")
break
except OSError:
time.sleep(0.2)
np.save(f"{settings.MODEL_NAME}/model/layers.npy",
(self.dense1, self.dense2, self.dropout_actor, self.dropout_critic)
)
return True
def choose_action_list(self, States):
probs = self.policy.predict([States])
actions = np.array([np.random.choice(settings.ACTION_SPACE, p=p) for p in probs])
return actions
def load_model(self):
if os.path.isfile(f"{settings.MODEL_NAME}/model/actor-weights") and \
os.path.isfile(f"{settings.MODEL_NAME}/model/critic-weights") and \
os.path.isfile(f"{settings.MODEL_NAME}/model/policy-weights"):
while True:
try:
self.actor.load_weights(f"{settings.MODEL_NAME}/model/actor-weights")
break
except OSError:
time.sleep(0.2)
while True:
try:
self.critic.load_weights(f"{settings.MODEL_NAME}/model/critic-weights")
break
except OSError:
time.sleep(0.2)
while True:
try:
self.policy.load_weights(f"{settings.MODEL_NAME}/model/policy-weights")
break
except OSError:
time.sleep(0.2)
return True
else:
return False
def train(self):
"""Train model if memory is at minimum size"""
if not settings.STEP_TRAIN:
self.actor_critic_train(list(self.memory))
elif len(self.memory) < settings.MIN_BATCH_SIZE:
return None
elif len(self.memory) > settings.MAX_BATCH_SIZE:
data = random.sample(self.memory, settings.MAX_BATCH_SIZE)
self.actor_critic_train(data)
else:
self.actor_critic_train(list(self.memory))
if settings.CLEAR_MEMORY_AFTER_TRAIN:
self.memory.clear()
def add_memmory(self, data):
self.memory.append(data)
def training():
eps_iter = iter(np.linspace(settings.RAMP_EPS, settings.END_EPS, settings.EPS_INTERVAL))
time_start = time.time()
draw_timer = time.time()
emergency_break = False
for episode in range(0, settings.EPOCHS):
try:
if time.time() > draw_timer + settings.SHOW_INTERVAL * 60:
render = True
draw_timer = time.time()
else:
render = False
if episode == settings.EPOCHS - 1 or emergency_break:
eps = 0
render = True
if settings.SHOW_LAST:
input("Last agent is waiting...")
elif episode == 0 and settings.SHOW_FIRST or not settings.ALLOW_TRAIN:
eps = 0
render = True
elif settings.ENABLE_EPS:
if episode < settings.EPS_INTERVAL / 4:
eps = settings.FIRST_EPS
else:
try:
eps = next(eps_iter)
except StopIteration:
eps_iter = iter(
np.linspace(settings.INITIAL_SMALL_EPS, settings.END_EPS, settings.EPS_INTERVAL))
eps = next(eps_iter)
else:
eps = 0
if render and settings.RENDER_WITH_ZERO_EPS:
eps = 0
Games = [] # Close screen
States = []
for loop_ind in range(settings.SIM_COUNT):
game = gym.make('LunarLander-v2')
state = game.reset()
Games.append(game)
States.append(state)
Scores = [0] * len(Games)
step = 0
All_score = []
All_steps = []
episode_time = time.time()
while len(Games):
if time.time() - episode_time > settings.TIMEOUT_AGENT:
print(f"Timeout episode {episode}!")
stop_loop = True
else:
stop_loop = False
step += 1
Old_states = np.array(States)
if eps > np.random.random():
Actions = np.random.randint(0, settings.ACTION_SPACE, size=len(Old_states))
else:
Actions = agent.choose_action_list(Old_states)
Dones = []
Rewards = []
States = []
for g_index, game in enumerate(Games):
# print(Actions[g_index])
state, reward, done, info = game.step(action=Actions[g_index])
Rewards.append(reward)
Scores[g_index] += reward
Dones.append(done)
States.append(state)
if render:
Games[0].render()
# print(Actions[0])
time.sleep(settings.RENDER_DELAY)
if settings.ALLOW_TRAIN:
for old_s, act, rew, st, don in zip(Old_states, Actions, Rewards, States, Dones):
agent.add_memmory((old_s, act, rew, st, don))
if settings.STEP_TRAIN:
agent.train()
for ind_d in range(len(Games) - 1, -1, -1):
if Dones[ind_d] or stop_loop:
if ind_d == 0 and render:
render = False
Games[0].close()
All_score.append(Scores[ind_d])
All_steps.append(step)
stats['episode'].append(episode + episode_offset)
stats['eps'].append(eps)
stats['score'].append(Scores[ind_d])
stats['flighttime'].append(step)
Scores.pop(ind_d)
Games.pop(ind_d)
States.pop(ind_d)
if not settings.STEP_TRAIN and settings.ALLOW_TRAIN:
agent.train()
if not (episode + episode_offset) % 5 and episode > 0 and settings.ALLOW_TRAIN:
agent.save_model()
np.save(f"{settings.MODEL_NAME}/last-episode-num.npy", episode + episode_offset)
except KeyboardInterrupt:
emergency_break = True
print(f"Step-Ep[{episode + episode_offset:^7} of {settings.EPOCHS + episode_offset}], "
f"Eps: {eps:>1.3f} "
f"avg-score: {np.mean(All_score):^8.1f}, "
f"avg-steps: {np.mean(All_steps):^7.1f}, "
f"time-left: {(settings.TRAIN_MAX_MIN_DURATION * 60 - (time.time() - time_start)) / 60:>04.1f} min"
)
time_end = time.time()
if emergency_break:
break
elif settings.TRAIN_MAX_MIN_DURATION and (time_end - time_start) / 60 > settings.TRAIN_MAX_MIN_DURATION:
emergency_break = True
print(f"Run ended: {settings.MODEL_NAME}-{episode_offset}")
print(f"Time elapsed: {(time_end - time_start) / 60:3.1f}m, "
f"{(time_end - time_start) / (episode + 1) * 1000 / 60:3.1f} min per 1k epochs")
if settings.ALLOW_TRAIN:
agent.save_model()
np.save(f"{settings.MODEL_NAME}/last-episode-num.npy", episode + 1 + episode_offset)
return stats
def moving_average(array, window_size=None, multi_agents=1):
size = len(array)
if not window_size or window_size and size < window_size:
window_size = size // 4
if window_size < 1:
return array
while len(array) % window_size or window_size % multi_agents:
window_size -= 1
if window_size < 1:
window_size = 1
break
output = []
for sample_num in range(multi_agents - 1, len(array), multi_agents):
if sample_num < window_size:
output.append(np.mean(array[:sample_num + 1]))
else:
output.append(np.mean(array[sample_num - window_size: sample_num + 1]))
if len(array) % window_size:
output.append(np.mean(array[-window_size:]))
return output
def validate_stats_len(stat_dict):
num = len(stat_dict['episode'])
num = num % settings.SIM_COUNT
if num > 0:
stat_dict['episode'] = stat_dict['episode'][:-num]
stat_dict['eps'] = stat_dict['eps'][:-num]
stat_dict['score'] = stat_dict['score'][:-num]
stat_dict['flighttime'] = stat_dict['flighttime'][:-num]
return stat_dict
def plot_results(stats):
print("Plotting data now...")
stats = validate_stats_len(stats)
style.use('ggplot')
plt.figure(figsize=(20, 11))
X = range(stats['episode'][0], stats['episode'][-1] + 1)
plt.subplot(311)
plt.suptitle(f"{settings.MODEL_NAME}\nStats - {stats['episode'][0]}")
plt.scatter(
np.array(stats['episode']),
stats['score'],
alpha=0.2, marker='s', c='b', s=10, label="Score"
)
plt.plot(X, moving_average(stats['score'], multi_agents=settings.SIM_COUNT), label='Average', linewidth=3)
plt.legend(loc=2)
plt.subplot(312)
plt.scatter(stats['episode'], stats['flighttime'], label='Flight-time', color='b', marker='o', s=10, alpha=0.5)
plt.plot(X, moving_average(stats['flighttime'], multi_agents=settings.SIM_COUNT), label='Average',
linewidth=3)
plt.legend(loc=2)
plt.subplot(313)
effectiveness = [score / moves for score, moves in zip(stats['score'], stats['flighttime'])]
plt.scatter(stats['episode'], effectiveness, label='Effectiveness', color='b', marker='o', s=10, alpha=0.5)
plt.plot(X, moving_average(effectiveness, multi_agents=settings.SIM_COUNT), label='Average', linewidth=3)
plt.xlabel("Epoch")
plt.subplots_adjust(hspace=0.3)
plt.legend(loc=2)
if settings.SAVE_PICS:
plt.savefig(f"{settings.MODEL_NAME}/scores-{agent.runtime_name}.png")
if settings.SOUND_ALERT:
os.system("play -nq -t alsa synth 0.2 sine 550")
if __name__ == "__main__":
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = False
config.gpu_options.per_process_gpu_memory_fraction = 0.2
sess = tf.compat.v1.Session(config=config)
try:
if settings.LOAD_MODEL:
episode_offset = np.load(f"{settings.MODEL_NAME}/last-episode-num.npy", allow_pickle=True)
else:
episode_offset = 0
except FileNotFoundError:
episode_offset = 0
os.makedirs(f"{settings.MODEL_NAME}", exist_ok=True)
stats = {
"episode": [],
"eps": [],
"score": [],
"flighttime": []}
agent = Agent(alpha=settings.ALPHA, beta=settings.BETA, gamma=settings.GAMMA,
input_shape=settings.INPUT_SHAPE,
action_space=settings.ACTION_SPACE,
dense1=settings.DENSE1,
dense2=settings.DENSE2,
dropout_actor=settings.DROPOUT1,
dropout_critic=settings.DROPOUT2,
episode_offset=episode_offset)
stats = training()
plot_results(stats)
| [
"numpy.load",
"matplotlib.style.use",
"matplotlib.pyplot.suptitle",
"random.sample",
"keras.models.Model",
"matplotlib.pyplot.figure",
"os.path.isfile",
"numpy.mean",
"keras.layers.Input",
"collections.deque",
"tensorflow.compat.v1.Session",
"keras.utils.plot_model",
"numpy.linspace",
"num... | [((10462, 10473), 'time.time', 'time.time', ([], {}), '()\n', (10471, 10473), False, 'import time\n'), ((10491, 10502), 'time.time', 'time.time', ([], {}), '()\n', (10500, 10502), False, 'import time\n'), ((16979, 16998), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (16988, 16998), False, 'from matplotlib import style\n'), ((17003, 17031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 11)'}), '(figsize=(20, 11))\n', (17013, 17031), True, 'import matplotlib.pyplot as plt\n'), ((17098, 17114), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (17109, 17114), True, 'import matplotlib.pyplot as plt\n'), ((17119, 17191), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""{settings.MODEL_NAME}\nStats - {stats[\'episode\'][0]}"""'], {}), '(f"""{settings.MODEL_NAME}\nStats - {stats[\'episode\'][0]}""")\n', (17131, 17191), True, 'import matplotlib.pyplot as plt\n'), ((17458, 17475), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (17468, 17475), True, 'import matplotlib.pyplot as plt\n'), ((17481, 17497), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (17492, 17497), True, 'import matplotlib.pyplot as plt\n'), ((17502, 17617), 'matplotlib.pyplot.scatter', 'plt.scatter', (["stats['episode']", "stats['flighttime']"], {'label': '"""Flight-time"""', 'color': '"""b"""', 'marker': '"""o"""', 's': '(10)', 'alpha': '(0.5)'}), "(stats['episode'], stats['flighttime'], label='Flight-time',\n color='b', marker='o', s=10, alpha=0.5)\n", (17513, 17617), True, 'import matplotlib.pyplot as plt\n'), ((17747, 17764), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (17757, 17764), True, 'import matplotlib.pyplot as plt\n'), ((17770, 17786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (17781, 17786), True, 'import matplotlib.pyplot as plt\n'), ((17888, 18000), 'matplotlib.pyplot.scatter', 'plt.scatter', (["stats['episode']", 'effectiveness'], {'label': '"""Effectiveness"""', 'color': '"""b"""', 'marker': '"""o"""', 's': '(10)', 'alpha': '(0.5)'}), "(stats['episode'], effectiveness, label='Effectiveness', color=\n 'b', marker='o', s=10, alpha=0.5)\n", (17899, 18000), True, 'import matplotlib.pyplot as plt\n'), ((18110, 18129), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (18120, 18129), True, 'import matplotlib.pyplot as plt\n'), ((18134, 18165), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (18153, 18165), True, 'import matplotlib.pyplot as plt\n'), ((18170, 18187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (18180, 18187), True, 'import matplotlib.pyplot as plt\n'), ((18423, 18449), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (18447, 18449), True, 'import tensorflow as tf\n'), ((18566, 18601), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (18586, 18601), True, 'import tensorflow as tf\n'), ((18854, 18906), 'os.makedirs', 'os.makedirs', (['f"""{settings.MODEL_NAME}"""'], {'exist_ok': '(True)'}), "(f'{settings.MODEL_NAME}', exist_ok=True)\n", (18865, 18906), False, 'import os\n'), ((833, 876), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['self.log_dir'], {}), '(self.log_dir)\n', (862, 876), True, 'import tensorflow as tf\n'), ((2976, 3013), 'collections.deque', 'deque', ([], {'maxlen': 'settings.MAX_BATCH_SIZE'}), '(maxlen=settings.MAX_BATCH_SIZE)\n', (2981, 3013), False, 'from collections import deque\n'), ((4408, 4437), 'keras.layers.Input', 'Input', ([], {'shape': 'self.input_shape'}), '(shape=self.input_shape)\n', (4413, 4437), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4454, 4470), 'keras.layers.Input', 'Input', ([], {'shape': '[1]'}), '(shape=[1])\n', (4459, 4470), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((5233, 5284), 'keras.models.Model', 'Model', ([], {'inputs': '[input_layer, delta]', 'outputs': '[probs]'}), '(inputs=[input_layer, delta], outputs=[probs])\n', (5238, 5284), False, 'from keras.models import Model, load_model, Sequential\n'), ((5302, 5346), 'keras.models.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[probs]'}), '(inputs=[input_layer], outputs=[probs])\n', (5307, 5346), False, 'from keras.models import Model, load_model, Sequential\n'), ((5364, 5409), 'keras.models.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[values]'}), '(inputs=[input_layer], outputs=[values])\n', (5369, 5409), False, 'from keras.models import Model, load_model, Sequential\n'), ((5594, 5652), 'os.makedirs', 'os.makedirs', (['f"""{settings.MODEL_NAME}/model"""'], {'exist_ok': '(True)'}), "(f'{settings.MODEL_NAME}/model', exist_ok=True)\n", (5605, 5652), False, 'import os\n'), ((5661, 5720), 'keras.utils.plot_model', 'plot_model', (['actor', 'f"""{settings.MODEL_NAME}/model/actor.png"""'], {}), "(actor, f'{settings.MODEL_NAME}/model/actor.png')\n", (5671, 5720), False, 'from keras.utils import plot_model\n'), ((5729, 5790), 'keras.utils.plot_model', 'plot_model', (['critic', 'f"""{settings.MODEL_NAME}/model/critic.png"""'], {}), "(critic, f'{settings.MODEL_NAME}/model/critic.png')\n", (5739, 5790), False, 'from keras.utils import plot_model\n'), ((5799, 5860), 'keras.utils.plot_model', 'plot_model', (['policy', 'f"""{settings.MODEL_NAME}/model/policy.png"""'], {}), "(policy, f'{settings.MODEL_NAME}/model/policy.png')\n", (5809, 5860), False, 'from keras.utils import plot_model\n'), ((6792, 6812), 'numpy.array', 'np.array', (['Old_states'], {}), '(Old_states)\n', (6800, 6812), True, 'import numpy as np\n'), ((6831, 6848), 'numpy.array', 'np.array', (['Rewards'], {}), '(Rewards)\n', (6839, 6848), True, 'import numpy as np\n'), ((6867, 6884), 'numpy.array', 'np.array', (['Actions'], {}), '(Actions)\n', (6875, 6884), True, 'import numpy as np\n'), ((6901, 6916), 'numpy.array', 'np.array', (['Dones'], {}), '(Dones)\n', (6909, 6916), True, 'import numpy as np\n'), ((8300, 8424), 'numpy.save', 'np.save', (['f"""{settings.MODEL_NAME}/model/layers.npy"""', '(self.dense1, self.dense2, self.dropout_actor, self.dropout_critic)'], {}), "(f'{settings.MODEL_NAME}/model/layers.npy', (self.dense1, self.\n dense2, self.dropout_actor, self.dropout_critic))\n", (8307, 8424), True, 'import numpy as np\n'), ((10372, 10443), 'numpy.linspace', 'np.linspace', (['settings.RAMP_EPS', 'settings.END_EPS', 'settings.EPS_INTERVAL'], {}), '(settings.RAMP_EPS, settings.END_EPS, settings.EPS_INTERVAL)\n', (10383, 10443), True, 'import numpy as np\n'), ((15120, 15131), 'time.time', 'time.time', ([], {}), '()\n', (15129, 15131), False, 'import time\n'), ((15613, 15701), 'numpy.save', 'np.save', (['f"""{settings.MODEL_NAME}/last-episode-num.npy"""', '(episode + 1 + episode_offset)'], {}), "(f'{settings.MODEL_NAME}/last-episode-num.npy', episode + 1 +\n episode_offset)\n", (15620, 15701), True, 'import numpy as np\n'), ((17218, 17244), 'numpy.array', 'np.array', (["stats['episode']"], {}), "(stats['episode'])\n", (17226, 17244), True, 'import numpy as np\n'), ((18224, 18293), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{settings.MODEL_NAME}/scores-{agent.runtime_name}.png"""'], {}), "(f'{settings.MODEL_NAME}/scores-{agent.runtime_name}.png')\n", (18235, 18293), True, 'import matplotlib.pyplot as plt\n'), ((18332, 18380), 'os.system', 'os.system', (['"""play -nq -t alsa synth 0.2 sine 550"""'], {}), "('play -nq -t alsa synth 0.2 sine 550')\n", (18341, 18380), False, 'import os\n'), ((2338, 2361), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2359, 2361), False, 'import datetime\n'), ((4511, 4548), 'keras.layers.Dense', 'Dense', (['self.dense1'], {'activation': '"""relu"""'}), "(self.dense1, activation='relu')\n", (4516, 4548), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4583, 4620), 'keras.layers.Dense', 'Dense', (['self.dense2'], {'activation': '"""relu"""'}), "(self.dense2, activation='relu')\n", (4588, 4620), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4673, 4702), 'keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (4678, 4702), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4738, 4767), 'keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (4743, 4767), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4816, 4862), 'keras.layers.Dense', 'Dense', (['self.action_space'], {'activation': '"""softmax"""'}), "(self.action_space, activation='softmax')\n", (4821, 4862), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((4892, 4921), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (4897, 4921), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Input, concatenate\n'), ((5058, 5096), 'keras.backend.clip', 'backend.clip', (['y_pred', '(1e-08)', '(1 - 1e-08)'], {}), '(y_pred, 1e-08, 1 - 1e-08)\n', (5070, 5096), False, 'from keras import backend\n'), ((5162, 5191), 'keras.backend.sum', 'backend.sum', (['(-loglike * delta)'], {}), '(-loglike * delta)\n', (5173, 5191), False, 'from keras import backend\n'), ((8713, 8773), 'os.path.isfile', 'os.path.isfile', (['f"""{settings.MODEL_NAME}/model/actor-weights"""'], {}), "(f'{settings.MODEL_NAME}/model/actor-weights')\n", (8727, 8773), False, 'import os\n'), ((8796, 8857), 'os.path.isfile', 'os.path.isfile', (['f"""{settings.MODEL_NAME}/model/critic-weights"""'], {}), "(f'{settings.MODEL_NAME}/model/critic-weights')\n", (8810, 8857), False, 'import os\n'), ((8880, 8941), 'os.path.isfile', 'os.path.isfile', (['f"""{settings.MODEL_NAME}/model/policy-weights"""'], {}), "(f'{settings.MODEL_NAME}/model/policy-weights')\n", (8894, 8941), False, 'import os\n'), ((12149, 12160), 'time.time', 'time.time', ([], {}), '()\n', (12158, 12160), False, 'import time\n'), ((16449, 16478), 'numpy.mean', 'np.mean', (['array[-window_size:]'], {}), '(array[-window_size:])\n', (16456, 16478), True, 'import numpy as np\n'), ((18673, 18746), 'numpy.load', 'np.load', (['f"""{settings.MODEL_NAME}/last-episode-num.npy"""'], {'allow_pickle': '(True)'}), "(f'{settings.MODEL_NAME}/last-episode-num.npy', allow_pickle=True)\n", (18680, 18746), True, 'import numpy as np\n'), ((1808, 1850), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'value'], {'step': 'index'}), '(name, value, step=index)\n', (1825, 1850), True, 'import tensorflow as tf\n'), ((3089, 3158), 'numpy.load', 'np.load', (['f"""{settings.MODEL_NAME}/model/layers.npy"""'], {'allow_pickle': '(True)'}), "(f'{settings.MODEL_NAME}/model/layers.npy', allow_pickle=True)\n", (3096, 3158), True, 'import numpy as np\n'), ((5126, 5142), 'keras.backend.log', 'backend.log', (['out'], {}), '(out)\n', (5137, 5142), False, 'from keras import backend\n'), ((5443, 5459), 'keras.optimizers.Adam', 'Adam', (['self.alpha'], {}), '(self.alpha)\n', (5447, 5459), False, 'from keras.optimizers import Adam\n'), ((5534, 5549), 'keras.optimizers.Adam', 'Adam', (['self.beta'], {}), '(self.beta)\n', (5538, 5549), False, 'from keras.optimizers import Adam\n'), ((8590, 8634), 'numpy.random.choice', 'np.random.choice', (['settings.ACTION_SPACE'], {'p': 'p'}), '(settings.ACTION_SPACE, p=p)\n', (8606, 8634), True, 'import numpy as np\n'), ((10606, 10617), 'time.time', 'time.time', ([], {}), '()\n', (10615, 10617), False, 'import time\n'), ((10721, 10732), 'time.time', 'time.time', ([], {}), '()\n', (10730, 10732), False, 'import time\n'), ((11872, 11898), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (11880, 11898), False, 'import gym\n'), ((12472, 12488), 'numpy.array', 'np.array', (['States'], {}), '(States)\n', (12480, 12488), True, 'import numpy as np\n'), ((14574, 14659), 'numpy.save', 'np.save', (['f"""{settings.MODEL_NAME}/last-episode-num.npy"""', '(episode + episode_offset)'], {}), "(f'{settings.MODEL_NAME}/last-episode-num.npy', episode + episode_offset\n )\n", (14581, 14659), True, 'import numpy as np\n'), ((16262, 16293), 'numpy.mean', 'np.mean', (['array[:sample_num + 1]'], {}), '(array[:sample_num + 1])\n', (16269, 16293), True, 'import numpy as np\n'), ((16335, 16390), 'numpy.mean', 'np.mean', (['array[sample_num - window_size:sample_num + 1]'], {}), '(array[sample_num - window_size:sample_num + 1])\n', (16342, 16390), True, 'import numpy as np\n'), ((7861, 7876), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (7871, 7876), False, 'import time\n'), ((8068, 8083), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8078, 8083), False, 'import time\n'), ((8276, 8291), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (8286, 8291), False, 'import time\n'), ((10025, 10076), 'random.sample', 'random.sample', (['self.memory', 'settings.MAX_BATCH_SIZE'], {}), '(self.memory, settings.MAX_BATCH_SIZE)\n', (10038, 10076), False, 'import random\n'), ((12514, 12532), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (12530, 12532), True, 'import numpy as np\n'), ((13283, 13316), 'time.sleep', 'time.sleep', (['settings.RENDER_DELAY'], {}), '(settings.RENDER_DELAY)\n', (13293, 13316), False, 'import time\n'), ((14885, 14903), 'numpy.mean', 'np.mean', (['All_score'], {}), '(All_score)\n', (14892, 14903), True, 'import numpy as np\n'), ((14942, 14960), 'numpy.mean', 'np.mean', (['All_steps'], {}), '(All_steps)\n', (14949, 14960), True, 'import numpy as np\n'), ((9156, 9171), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9166, 9171), False, 'import time\n'), ((9388, 9403), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9398, 9403), False, 'import time\n'), ((9620, 9635), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9630, 9635), False, 'import time\n'), ((12210, 12221), 'time.time', 'time.time', ([], {}), '()\n', (12219, 12221), False, 'import time\n'), ((15040, 15051), 'time.time', 'time.time', ([], {}), '()\n', (15049, 15051), False, 'import time\n'), ((11479, 11564), 'numpy.linspace', 'np.linspace', (['settings.INITIAL_SMALL_EPS', 'settings.END_EPS', 'settings.EPS_INTERVAL'], {}), '(settings.INITIAL_SMALL_EPS, settings.END_EPS, settings.EPS_INTERVAL\n )\n', (11490, 11564), True, 'import numpy as np\n')] |
import os
import platform
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from imio.load import load_any
from brainreg.cli import main as brainreg_run
test_data_dir = Path(os.getcwd()) / "tests" / "data"
brain_data_dir = test_data_dir / "brain data"
expected_niftyreg_output_dir = (
test_data_dir / "registration_output" / platform.system()
)
x_pix = "40"
y_pix = "40"
z_pix = "50"
relative_tolerance = 0.01
absolute_tolerance = 10
check_less_precise_pd = 1
# This will do a single run of brainreg when pytest is run
# The outputs are then tested in a separate test below
@pytest.fixture(scope="session")
def niftyreg_output_path(tmp_path_factory):
test_output_dir = tmp_path_factory.mktemp("output_dir")
brainreg_args = [
"brainreg",
str(brain_data_dir),
str(test_output_dir),
"-v",
z_pix,
y_pix,
x_pix,
"--orientation",
"psl",
"--n-free-cpus",
"0",
"--atlas",
"allen_mouse_100um",
"-d",
str(brain_data_dir),
]
sys.argv = brainreg_args
brainreg_run()
return test_output_dir
@pytest.mark.parametrize(
"image",
[
"boundaries.tiff",
"deformation_field_0.tiff",
"deformation_field_1.tiff",
"deformation_field_2.tiff",
"downsampled.tiff",
"downsampled_brain data.tiff",
"downsampled_standard.tiff",
"downsampled_standard_brain data.tiff",
"registered_atlas.tiff",
"registered_hemispheres.tiff",
],
)
def test_images_output(niftyreg_output_path, image):
are_images_equal(image, niftyreg_output_path, expected_niftyreg_output_dir)
def test_volumes_output(niftyreg_output_path):
pd.testing.assert_frame_equal(
pd.read_csv(os.path.join(niftyreg_output_path, "volumes.csv")),
pd.read_csv(os.path.join(expected_niftyreg_output_dir, "volumes.csv")),
)
def are_images_equal(image_name, output_directory, test_output_directory):
image = load_any(
os.path.join(output_directory, image_name),
)
test_image = load_any(
os.path.join(test_output_directory, image_name),
)
np.testing.assert_allclose(
image, test_image, rtol=relative_tolerance, atol=absolute_tolerance
)
| [
"os.getcwd",
"numpy.testing.assert_allclose",
"pytest.fixture",
"platform.system",
"pytest.mark.parametrize",
"brainreg.cli.main",
"os.path.join"
] | [((626, 657), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (640, 657), False, 'import pytest\n'), ((1176, 1509), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""image"""', "['boundaries.tiff', 'deformation_field_0.tiff', 'deformation_field_1.tiff',\n 'deformation_field_2.tiff', 'downsampled.tiff',\n 'downsampled_brain data.tiff', 'downsampled_standard.tiff',\n 'downsampled_standard_brain data.tiff', 'registered_atlas.tiff',\n 'registered_hemispheres.tiff']"], {}), "('image', ['boundaries.tiff',\n 'deformation_field_0.tiff', 'deformation_field_1.tiff',\n 'deformation_field_2.tiff', 'downsampled.tiff',\n 'downsampled_brain data.tiff', 'downsampled_standard.tiff',\n 'downsampled_standard_brain data.tiff', 'registered_atlas.tiff',\n 'registered_hemispheres.tiff'])\n", (1199, 1509), False, 'import pytest\n'), ((372, 389), 'platform.system', 'platform.system', ([], {}), '()\n', (387, 389), False, 'import platform\n'), ((1131, 1145), 'brainreg.cli.main', 'brainreg_run', ([], {}), '()\n', (1143, 1145), True, 'from brainreg.cli import main as brainreg_run\n'), ((2214, 2314), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['image', 'test_image'], {'rtol': 'relative_tolerance', 'atol': 'absolute_tolerance'}), '(image, test_image, rtol=relative_tolerance, atol\n =absolute_tolerance)\n', (2240, 2314), True, 'import numpy as np\n'), ((2070, 2112), 'os.path.join', 'os.path.join', (['output_directory', 'image_name'], {}), '(output_directory, image_name)\n', (2082, 2112), False, 'import os\n'), ((2155, 2202), 'os.path.join', 'os.path.join', (['test_output_directory', 'image_name'], {}), '(test_output_directory, image_name)\n', (2167, 2202), False, 'import os\n'), ((216, 227), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (225, 227), False, 'import os\n'), ((1825, 1874), 'os.path.join', 'os.path.join', (['niftyreg_output_path', '"""volumes.csv"""'], {}), "(niftyreg_output_path, 'volumes.csv')\n", (1837, 1874), False, 'import os\n'), ((1897, 1954), 'os.path.join', 'os.path.join', (['expected_niftyreg_output_dir', '"""volumes.csv"""'], {}), "(expected_niftyreg_output_dir, 'volumes.csv')\n", (1909, 1954), False, 'import os\n')] |
from __future__ import annotations
from typing import *
import logging
import random
from itertools import product
from collections import defaultdict
import torch
from torch import nn
import numpy as np
from ..components import *
from ..lm import LanguageModel
from ..util import batch
from .. import util
logger = logging.getLogger(__name__)
class PatternModel:
def __init__(
self,
pattern_bank: PatternBank,
device: str,
lm: LanguageModel,
max_layer: int,
gen_emb: bool = True,
force_single_token: bool = False,
vocab_file: str = None,
conditional_prompt: bool = False,
) -> None:
self.device = device
self.pattern_bank = pattern_bank.hollow()
self.lm = lm
self.lm.fix()
if gen_emb:
self.pattern_bank.gen_relaxed_emb(self.emb, min(lm.n_layer, max_layer), lm.dim, device)
# Not for back-prop. Shape: [#pat]
self.weights = torch.ones(size=[len(self)], requires_grad=False, device=self.device) / len(self)
# Don't train word_embedding
self.emb.weight.requires_grad_(False)
# Length distribution
self.blank_length_voc = list()
self.blank_length_prob: Optional[np.ndarray] = None
self.force_single_token = force_single_token
self.conditional_prompt = conditional_prompt
# Restrict vocab
if vocab_file is not None:
limited_vocab = open(vocab_file).read().split('\n')
mask_ids = util.tokenizer.convert_tokens_to_ids(limited_vocab)
assert len(limited_vocab) == len(mask_ids)
self.vocab_mask = torch.zeros([util.tokenizer.vocab_size], device=self.device, dtype=torch.bool)
self.vocab_mask[mask_ids] = True
else:
self.vocab_mask = torch.ones([util.tokenizer.vocab_size], device=self.device, dtype=torch.bool)
def randomize_prompt(self):
mean, std = self.emb.weight.mean(0), self.emb.weight.std(0)
for pat in self.pattern_bank:
for idx in range(len(pat.vector)):
pat.vector[idx, :] = torch.normal(mean, std)
pat.vector = pat.vector.detach().clone()
pat.vector.requires_grad_(True)
def __len__(self) -> int:
return len(self.pattern_bank)
@property
def emb(self) -> nn.Embedding:
return self.lm.emb
@property
def emb_dim(self) -> int:
return self.emb.embedding_dim
def parameters(self):
for pat in self.pattern_bank:
# yield pat.vector
yield pat.bias
def iter_pattern_relation(
self,
rel: Union[Iterable[RelationInstance], RelationInstance, List[List[str]]],
batch_size: int,
shuffle: bool,
iter_method: str,
responsibility: Optional[torch.Tensor] = None,
tqdm_desc: Optional[str] = None,
slot_mask: Optional[List[bool]] = None,
) -> Iterable[Dict[str, Union[torch.Tensor, int]]]:
"""
:param rel:
:param batch_size:
:param shuffle:
:param iter_method:
:param slot_mask:
:param responsibility: Shape [#rel, #pat]
:param tqdm_desc:
:return:
"""
if isinstance(rel, RelationInstance):
rel = [rel]
if responsibility is not None:
assert responsibility.shape == (len(rel), len(self.pattern_bank))
iter_func = {'zip': zip, 'product': product}[iter_method]
if iter_method == 'zip':
assert len(rel) == len(self.pattern_bank)
rel_indices, pat_indices = list(range(len(rel))), list(range(len(self.pattern_bank)))
to_iter = list(iter_func(rel_indices, pat_indices))
if shuffle:
random.shuffle(to_iter)
for rel_pat_indices in batch(to_iter, batch_size, use_tqdm=tqdm_desc is not None, tqdm_desc=tqdm_desc):
weights, word_vector_list, label_list, label_mask_list, bias_list = list(), list(), list(), list(), list()
for rel_idx, pat_idx in rel_pat_indices:
ri = rel[rel_idx]
if isinstance(ri, RelationInstance):
entities = ri.entities
else:
entities = ri
pat = self.pattern_bank.bank[pat_idx]
word_vector, labels, label_mask, bias_vector = fill_relaxed_pattern(
pat, entities, pat.bias, self.emb, self.device, slot_mask
)
word_vector_list.append(word_vector)
bias_list.append(bias_vector)
label_list.append(labels)
label_mask_list.append(label_mask)
if responsibility is not None:
weights.append(float(responsibility[rel_idx, pat_idx]))
inputs = self.lm.prepare_inputs(word_vector_list, label_mask_list, label_list, bias_list)
if responsibility is not None:
weight_tensor = torch.tensor(weights, device=self.device)
else:
weight_tensor = None
ret = self.lm.relax_forward(
weights=weight_tensor,
**inputs
)
yield ret
def compute_responsibility(
self,
relation_bank: RelationBank,
batch_size: int,
) -> torch.Tensor:
n_rel = len(relation_bank)
ret_list = list()
with torch.no_grad():
for rst in self.iter_pattern_relation(
relation_bank, batch_size, False, 'product', slot_mask=[False, True]
):
ret_list.append(rst['log_target_dist'])
ret = torch.cat(ret_list, dim=0)
log_res = ret.reshape(n_rel, len(self.pattern_bank))
log_res = log_res + self.weights.log()
log_res = log_res - log_res.logsumexp(dim=1, keepdim=True)
res = log_res.exp()
# Shape: [#rel, #pat]
return res
def compute_precision(self, relation_bank: RelationBank, rank: int):
entities = [x.entities for x in relation_bank.bank]
predictions = []
n_correct = 0
for ret, (_, answer) in zip(
self.iter_pattern_relation(entities, 1, False, 'product', None, slot_mask=[False, True]), entities
):
topk = ret['word_dist'][0].topk(rank)[1].tolist()
label_mask = ret['label_mask'][0]
assert int(label_mask.sum()) == 1
label_idx = int(torch.arange(len(label_mask), device=label_mask.device)[label_mask])
pred = topk[label_idx]
predictions.append(pred)
gold = util.tokenizer._convert_token_to_id(answer)
if gold in pred:
n_correct += 1
return n_correct / len(entities)
def compute_pattern_ppl(self) -> Tuple[torch.Tensor, float]:
relation_entities = [pat.rel_in_corpus for pat in self.pattern_bank]
pat_ppl = list()
total_log_ppl = list()
total_token = 0
with torch.no_grad():
for ret in self.iter_pattern_relation(relation_entities, 1, False, 'zip', None, 'Pattern PPL'):
log_ppl = ret['log_target_dist'][0]
log_ppl = log_ppl / ret['num_tokens']
total_token += ret['num_tokens']
pat_ppl.append(float(torch.exp(-log_ppl)))
total_log_ppl.append(float(ret['log_target_dist'][0]))
pat_ppl = torch.tensor(pat_ppl, device=self.device)
avg_ppl = float(np.exp(-np.sum(total_log_ppl) / total_token))
return pat_ppl, avg_ppl
def fit_weight(
self,
responsibility: torch.Tensor
) -> NoReturn:
weights = responsibility.sum(dim=0)
self.weights = weights / weights.sum()
def effective_pattern_num(self):
# return float(self.weights.sum()**2 / (self.weights**2).sum())
return float(torch.exp(-torch.log(self.weights) @ self.weights))
def dump(self):
return {
'weights': self.weights.cpu().detach(),
'pattern_bank': self.pattern_bank.dump()
}
def load(self, states):
self.weights: torch.Tensor = states['weights'].to(device=self.device)
self.pattern_bank = states['pattern_bank']
self.pattern_bank.to_device(self.device)
def collect_blank_length(self, relation_bank: RelationBank):
cnt = defaultdict(int)
for ri in relation_bank:
lengths = tuple(len(util.tokenizer.encode(ent))-2 for ent in ri.entities)
cnt[lengths] += 1
cnt = list(cnt.items())
cnt.sort(key=lambda x: x[1])
prob = list()
for lengths, c in cnt:
self.blank_length_voc.append(lengths)
prob.append(c / len(relation_bank))
self.blank_length_prob = np.array(prob)
def sample_lengths(self):
sample_vec = np.random.multinomial(1, self.blank_length_prob)
select_idx = int(np.arange(len(self.blank_length_voc))[sample_vec.astype(np.bool)])
select = self.blank_length_voc[select_idx]
if self.force_single_token:
select = [1 for _ in select]
return select
def top_by_weights(self, weights: torch.Tensor, top: int, verbose: int, additional_info=""):
top_k = torch.argsort(-weights)[:top].cpu().tolist()
if verbose > 0:
logger.info(f'Top {top} pattern list ({additional_info}): {top_k}')
if verbose > 1:
logger.info('They are:')
for idx in top_k:
pat = self.pattern_bank[idx]
to_print = f'No. {idx}: {pat}.'
if verbose > 2:
to_print += f'Original entities: {pat.rel_in_corpus}'
logger.info(to_print)
return top_k
def conditional_generate_single_slot(
self,
batch_size: int,
rel_bank: RelationBank,
freq: Optional[np.ndarray] = None,
):
single_option = self.force_single_token
answers = torch.tensor(util.tokenizer.convert_tokens_to_ids([rel_ins.entities[1] for rel_ins in rel_bank]))
self.force_single_token = True
all_dist = list()
with torch.no_grad():
for ret in self.iter_pattern_relation(rel_bank, batch_size, False, 'product', None, None, [False, True]):
lm = ret['label_mask']
target_mask = torch.arange(lm.shape[1], device=lm.device).unsqueeze(0).expand_as(lm)[lm]
target_dist = ret['word_dist'].gather(
1, target_mask.unsqueeze(1).unsqueeze(2).expand(-1, 1, ret['word_dist'].shape[2])
).squeeze(1)
all_dist.append(target_dist.detach().clone().cpu())
all_dist = torch.cat(all_dist, 0)
all_dist.T[~self.vocab_mask] = 0.0
all_dist = all_dist.reshape([len(answers), -1, all_dist.shape[1]]).log()
weights = self.weights
if self.conditional_prompt:
weights = weights * self.conditional_prompt_prob(batch_size, rel_bank)
all_dist = (all_dist.permute(0, 2, 1) + weights.cpu().log()).permute(0, 2, 1)
target_dist = all_dist.logsumexp(1)
if freq is not None:
freq_dist = torch.tensor(freq, dtype=torch.float32)
target_dist = freq_dist.unsqueeze(0).log() + target_dist
pred = (-target_dist).argsort(1)
answer_ranks = (pred.T == answers).T
answer_ranks = torch.arange(answer_ranks.shape[1]).unsqueeze(0).expand_as(answer_ranks)[answer_ranks]+1
topk = (
torch.arange(pred.shape[1]).unsqueeze(0).expand(answer_ranks.shape[0], -1) >=
answer_ranks.unsqueeze(1).expand(-1, pred.shape[1])
).sum(0)
self.force_single_token = single_option
# The following line is super time consuming!
# pred = [util.tokenizer.convert_ids_to_tokens(line) for line in pred]
return pred, answer_ranks, topk
def sample_entities(
self,
batch_size: int,
output_format: str = 'JSON', # Or tsv
):
word_vector_list, label_mask_list, label_list, length_list, token_id_list = \
list(), list(), list(), list(), list()
selected_pattern_idx = list()
for i in range(batch_size):
pat_idx = int(self.weights.multinomial(1)[0])
selected_pattern_idx.append(pat_idx)
lengths = self.sample_lengths()
length_list.append(lengths)
pat = self.pattern_bank[pat_idx]
entities = [' '.join([util.tokenizer.mask_token]*le) for le in lengths]
word_vector, labels, label_mask = fill_relaxed_pattern(pat, entities, self.emb, self.device)
word_vector_list.append(word_vector)
label_mask_list.append(label_mask)
label_list.append(labels)
token_id_list.append(pat.token_ids)
inputs = self.lm.prepare_inputs(word_vector_list, label_mask_list, label_list, token_id_list)
with torch.no_grad():
samples = self.lm.sample(**inputs)
rst = list()
for pat_idx, lengths, sample in zip(selected_pattern_idx, length_list, samples):
cur = list()
i = 0
for length_ent in lengths:
entity = util.tokenizer.convert_tokens_to_string(sample[i:length_ent+i]).strip()
cur.append(entity)
i += length_ent
cur.append(str(self.pattern_bank[pat_idx]))
rst.append(cur)
if output_format == 'JSON':
return rst
return '\n'.join(['\t'.join(line) for line in rst])
def conditional_prompt_prob(
self,
batch_size: int,
rel_bank: RelationBank,
):
masked_rel_bank = rel_bank.clone()
for rel_ins in masked_rel_bank:
rel_ins.entities[1] = util.tokenizer.mask_token
all_log_dist = []
with torch.no_grad():
for ret in self.iter_pattern_relation(masked_rel_bank, batch_size, False, 'product', None, None, [True, False]):
all_log_dist.append(ret['log_target_dist'])
log_dist = torch.cat(all_log_dist)
log_dist: torch.Tensor = log_dist.reshape([len(rel_bank), -1])
log_dist = log_dist.sum(dim=0)
prob = torch.softmax(log_dist, 0)
return prob
| [
"torch.ones",
"numpy.sum",
"random.shuffle",
"numpy.random.multinomial",
"torch.argsort",
"torch.cat",
"logging.getLogger",
"torch.softmax",
"collections.defaultdict",
"torch.normal",
"torch.exp",
"numpy.array",
"torch.arange",
"torch.zeros",
"torch.no_grad",
"torch.log",
"torch.tens... | [((320, 347), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (337, 347), False, 'import logging\n'), ((5722, 5748), 'torch.cat', 'torch.cat', (['ret_list'], {'dim': '(0)'}), '(ret_list, dim=0)\n', (5731, 5748), False, 'import torch\n'), ((7496, 7537), 'torch.tensor', 'torch.tensor', (['pat_ppl'], {'device': 'self.device'}), '(pat_ppl, device=self.device)\n', (7508, 7537), False, 'import torch\n'), ((8445, 8461), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8456, 8461), False, 'from collections import defaultdict\n'), ((8864, 8878), 'numpy.array', 'np.array', (['prob'], {}), '(prob)\n', (8872, 8878), True, 'import numpy as np\n'), ((8931, 8979), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'self.blank_length_prob'], {}), '(1, self.blank_length_prob)\n', (8952, 8979), True, 'import numpy as np\n'), ((10809, 10831), 'torch.cat', 'torch.cat', (['all_dist', '(0)'], {}), '(all_dist, 0)\n', (10818, 10831), False, 'import torch\n'), ((14230, 14253), 'torch.cat', 'torch.cat', (['all_log_dist'], {}), '(all_log_dist)\n', (14239, 14253), False, 'import torch\n'), ((14379, 14405), 'torch.softmax', 'torch.softmax', (['log_dist', '(0)'], {}), '(log_dist, 0)\n', (14392, 14405), False, 'import torch\n'), ((1705, 1783), 'torch.zeros', 'torch.zeros', (['[util.tokenizer.vocab_size]'], {'device': 'self.device', 'dtype': 'torch.bool'}), '([util.tokenizer.vocab_size], device=self.device, dtype=torch.bool)\n', (1716, 1783), False, 'import torch\n'), ((1873, 1950), 'torch.ones', 'torch.ones', (['[util.tokenizer.vocab_size]'], {'device': 'self.device', 'dtype': 'torch.bool'}), '([util.tokenizer.vocab_size], device=self.device, dtype=torch.bool)\n', (1883, 1950), False, 'import torch\n'), ((3819, 3842), 'random.shuffle', 'random.shuffle', (['to_iter'], {}), '(to_iter)\n', (3833, 3842), False, 'import random\n'), ((5484, 5499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5497, 5499), False, 'import torch\n'), ((7068, 7083), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7081, 7083), False, 'import torch\n'), ((10257, 10272), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10270, 10272), False, 'import torch\n'), ((11289, 11328), 'torch.tensor', 'torch.tensor', (['freq'], {'dtype': 'torch.float32'}), '(freq, dtype=torch.float32)\n', (11301, 11328), False, 'import torch\n'), ((13077, 13092), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13090, 13092), False, 'import torch\n'), ((14009, 14024), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14022, 14024), False, 'import torch\n'), ((2174, 2197), 'torch.normal', 'torch.normal', (['mean', 'std'], {}), '(mean, std)\n', (2186, 2197), False, 'import torch\n'), ((5040, 5081), 'torch.tensor', 'torch.tensor', (['weights'], {'device': 'self.device'}), '(weights, device=self.device)\n', (5052, 5081), False, 'import torch\n'), ((7385, 7404), 'torch.exp', 'torch.exp', (['(-log_ppl)'], {}), '(-log_ppl)\n', (7394, 7404), False, 'import torch\n'), ((7570, 7591), 'numpy.sum', 'np.sum', (['total_log_ppl'], {}), '(total_log_ppl)\n', (7576, 7591), True, 'import numpy as np\n'), ((7964, 7987), 'torch.log', 'torch.log', (['self.weights'], {}), '(self.weights)\n', (7973, 7987), False, 'import torch\n'), ((9336, 9359), 'torch.argsort', 'torch.argsort', (['(-weights)'], {}), '(-weights)\n', (9349, 9359), False, 'import torch\n'), ((11507, 11542), 'torch.arange', 'torch.arange', (['answer_ranks.shape[1]'], {}), '(answer_ranks.shape[1])\n', (11519, 11542), False, 'import torch\n'), ((10461, 10504), 'torch.arange', 'torch.arange', (['lm.shape[1]'], {'device': 'lm.device'}), '(lm.shape[1], device=lm.device)\n', (10473, 10504), False, 'import torch\n'), ((11629, 11656), 'torch.arange', 'torch.arange', (['pred.shape[1]'], {}), '(pred.shape[1])\n', (11641, 11656), False, 'import torch\n')] |
import numpy as np
def mutation(exits, locations, mutpb):
"""Mutation that alters the routes of the guides of a single chromosome."""
# Feasible starting cells
# The space the agents are evacuating from has been divided into 3m x 3m cells, and we have checked which of these are feasible.
cells = np.array(
[3, 4, 5, 6, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52 ,53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 71, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86, 87, 93, 94, 95, 96])
# Feasible exits
feasible_exits = np.array([0, 1, 2, 3, 4, 5])
# Number of guides/genes
n_guides = len(locations)
# Number of exits
n_exits = 6
# Loop through genes of the chromosome and mutate each with probability mutpb.
for i in range(n_guides):
if np.random.rand(1)[0] <= mutpb:
# Either alter the initial location or exit of the guide (with equal probability)
cell_or_exit = np.random.rand(1)[0]
if cell_or_exit > 0.5:
# Move the guide's location randomly
delete_element = np.where(cells == locations[i])
available_cells = np.delete(cells, delete_element, None)
random_cell = available_cells[np.random.randint(len(available_cells))]
# Generate a random location from the available locations
locations[i] = random_cell
else:
# Move the guide's exit randomly
delete_element = np.where(feasible_exits == exits[i])
available_exits = np.delete(feasible_exits, delete_element, None)
random_exit = available_exits[np.random.randint(len(available_exits))]
# Generate a random exit from the available exits
exits[i] = random_exit
return exits, locations
| [
"numpy.random.rand",
"numpy.where",
"numpy.array",
"numpy.delete"
] | [((316, 628), 'numpy.array', 'np.array', (['[3, 4, 5, 6, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32,\n 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 71, 72,\n 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86, 87, 93, 94, 95, 96]'], {}), '([3, 4, 5, 6, 12, 13, 14, 15, 16, 17, 21, 22, 23, 24, 25, 26, 27, \n 28, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48,\n 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67,\n 68, 71, 72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86, 87, 93, 94, 95, 96]\n )\n', (324, 628), True, 'import numpy as np\n'), ((663, 691), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (671, 691), True, 'import numpy as np\n'), ((916, 933), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (930, 933), True, 'import numpy as np\n'), ((1069, 1086), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1083, 1086), True, 'import numpy as np\n'), ((1213, 1244), 'numpy.where', 'np.where', (['(cells == locations[i])'], {}), '(cells == locations[i])\n', (1221, 1244), True, 'import numpy as np\n'), ((1279, 1317), 'numpy.delete', 'np.delete', (['cells', 'delete_element', 'None'], {}), '(cells, delete_element, None)\n', (1288, 1317), True, 'import numpy as np\n'), ((1625, 1661), 'numpy.where', 'np.where', (['(feasible_exits == exits[i])'], {}), '(feasible_exits == exits[i])\n', (1633, 1661), True, 'import numpy as np\n'), ((1696, 1743), 'numpy.delete', 'np.delete', (['feasible_exits', 'delete_element', 'None'], {}), '(feasible_exits, delete_element, None)\n', (1705, 1743), True, 'import numpy as np\n')] |
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
def normcdf(x_min=-4, x_max=4, mean=0, std=1, y_max=0.45, xlabel='x', ylabel='pdf(x)', legend_size=12,
lb=-10, ub=10, font_size=20, alpha=1, fill_color='skyblue', bg_color='white',
title='Normal Distribution ', fig_w=8, fig_l=8, grid=True, title_size=20, label_size=16,
tick_size=12):
"""
Normal Distribution
parameters
----------
x_min: The x-axis min value. The default value is -4.
x_max: The x-axis max value. The default value is 4.
mean: The Mean value. The default value is 0
std: The Standard deviation value. The default value is 1.
y_max: The y-axix max value. The default value is 0.45.
xlabel: The x-axis label. The default value is 'x'.
ylabel: The y-axis label. The default value is 'pdf(x)'.
legend_size: The legend font size. The default value is 12.
lb: The lower bound value. The default value is -10.
up: The lower bound value. The default value is 10.
font_size: The title font size. The default value is 20.
alpha: Alpha(transparency) value. The default value is 1.
fill_color: The filling color. The default value is 'skyblue'.
bg_color: The background color. If it is not white, it will show the probability. The default value is 'white'.
title: The figure title. The default value is 'Normal Distribution '.
fig_w: The Matplotlib `figsize` width. The default value is 8.
fig_l: The Matplotlib `figsize` length. The default value is 8.
grid: Use 'True' or 'False' to show the grid. The default value is 'True'.
title_size: The x and y-axis title size. The default value is 20.
label_size: The label font size. The default value is 16.
tick_size: The x and y-axis tick size. The default value is 12.
examples
--------
import statfig as sf
sf.normcdf()
sf.normcdf(x_min=-4, x_max=10, mean=3, std=2, y_max=0.25,
xlabel='x', ylabel='pdf(x)', lb=-10, ub=2, font_size=20, alpha=0.5, fill_color='g',
title='P(X<2) where ', fig_w=10, fig_l=5)
sf.normcdf(x_min=-4, x_max=10, mean=3, std=2, y_max=0.25,
xlabel='x', ylabel='pdf(x)', lb=-10, ub=2, font_size=20, fill_color='#73f562', alpha=1,
bg_color='#f7636f')
sf.normcdf(mean=1, std=2, lb=0.5, ub=2, y_max=0.25, x_min=-6, x_max=10, bg_color='#fccda7')
sf.normcdf(mean=3, std=2, lb=4, ub=10, y_max=0.25, x_min=-4, x_max=10)
"""
fig, ax = plt.subplots(1, 1, figsize=(fig_w, fig_l))
# for distribution curve
x = np.arange(x_min, x_max, 0.1)
ax.plot(x, norm.pdf(x, loc=mean, scale=std), label=None)
# title
title = title + ' X~N({}, {}\u00b2)'.format(mean, std, 2)
ax.set_title(title, fontsize=font_size)
ax.set(xlabel=xlabel, ylabel=ylabel)
# probability
prob = round(norm(mean, std).cdf(ub) - norm(mean, std).cdf(lb), 2)
# fill background
# if the background is not white, w or #fff set the label to 1- prob
prob_com = 1-prob
bg_prob = 'P(x)=%.2f' % prob_com
bg_label = None if bg_color == 'white' or bg_color == 'w' or bg_color == '#fff' else bg_prob
ax.fill_between(x, norm.pdf(x, loc=mean, scale=std),
alpha=alpha, color=bg_color, label=bg_label)
# for fill_between
px = np.arange(lb, ub, 0.01)
ax.set_ylim(0, y_max)
ax.set_xlim(x_min, x_max)
ax.fill_between(px, norm.pdf(px, loc=mean, scale=std),
alpha=alpha, color=fill_color, label='P(x)=%.2f' % prob)
ax.legend(fontsize=legend_size)
ax.set_title(title, fontsize=font_size)
ax.set(xlabel=xlabel, ylabel=ylabel)
plt.rc('axes', titlesize=title_size) # fontsize of the axes title
plt.rc('axes', labelsize=label_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=tick_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=tick_size) # fontsize of the tick labels
ax.grid(grid)
plt.show()
def normpdf_std(val=[1, 2, 3, 4], x_min=-4, x_max=4, fig_w=8, fig_l=8, grid=True, xlabel='x', ylabel='pdf(x)',
title='Normal Distribution', legend_size=12, font_size=20, label_size=16,
tick_size=12, y_max=0.6, title_size=20):
"""
Normal Distribution with different standard deviations
parameters
----------
val: The Degree of freedom values to display. The default value is [1,2,3,4].
x_min: The x-axis min value. The default value is -4.
x_max: The x-axis max value. The default value is 4.
y_max: The y-axix max value. The default value is 0.45.
xlabel: The x-axis label. The default value is 'x'.
ylabel: The y-axis label. The default value is 'pdf(x)'.
legend_size: The legend font size. The default value is 12.
font_size: The title font size. The default value is 20.
title: The figure title. The default value is 'Normal Distribution '.
fig_w: The Matplotlib `figsize` width. The default value is 8.
fig_l: The Matplotlib `figsize` length. The default value is 8.
grid: Use 'True' or 'False' to show the grid. The default value is 'True'.
title_size: The x and y-axis title size. The default value is 20.
label_size: Label font size. The default value is 16.
tick_size: The x and y-axis tick size. The default value is 12.
examples
--------
import statfig as sf
sf.normpdf_std()
"""
fig, ax = plt.subplots(1, 1, figsize=(fig_w, fig_l))
x = np.linspace(x_min, x_max, 100)
for s in val:
ax.plot(x, norm.pdf(x, scale=s), label='std=%.1f' % s)
ax.set_ylim(0, y_max)
ax.set_xlim(x_min, x_max)
ax.legend(fontsize=legend_size)
ax.set_title(title, fontsize=font_size)
ax.set(xlabel=xlabel, ylabel=ylabel)
plt.rc('axes', titlesize=title_size) # fontsize of the axes title
plt.rc('axes', labelsize=label_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=tick_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=tick_size) # fontsize of the tick labels
ax.grid(grid)
plt.show()
def normpdf_mean(val=[0, 1, 2, 3], x_min=-10, x_max=10, y_max=0.6, xlabel='x', ylabel='pdf(x)', legend_size=12,
font_size=20, title='Normal Distribution', fig_w=8, fig_l=8, grid=True,
title_size=20, label_size=16, tick_size=12):
"""
Normal Distribution with different means
parameters
----------
val: The Mean values to display. The default value is [0,1,2,3].
x_min: The x-axis min value. The default value is -10.
x_max: The x-axis max value. The default value is 10.
y_max: The y-axix max value. The default value is 0.45.
xlabel: The x-axis label. The default value is 'x'.
ylabel: The y-axis label. The default value is 'pdf(x)'.
legend_size: The legend font size. The default value is 12.
font_size: The title font size. The default value is 20.
title: The figure title. The default value is 'Normal Distribution '.
fig_w: The Matplotlib `figsize` width. The default value is 8.
fig_l: The Matplotlib `figsize` length. The default value is 8.
grid: Use 'True' or 'False' to show the grid. The default value is 'True'.
title_size: The x and y-axis title size. The default value is 20.
label_size: Label font size. The default value is 16.
tick_size: The x and y-axis tick size. The default value is 12.
examples
--------
import statfig as sf
sf.normpdf_mean()
y_max=0.45, xlabel='x', ylabel='pdf(x)', legend_size=12,
lb=-10, ub=10, font_size=20, alpha=1, fill_color='skyblue', bg_color='white',
title='Normal Distribution ', fig_w=8, fig_l=8,
"""
fig, ax = plt.subplots(1, 1, figsize=(fig_w, fig_l))
x = np.linspace(x_min, x_max, 100)
for mean in val:
ax.plot(x, norm.pdf(x, loc=mean), label='mean=%.1f' % mean)
ax.set_ylim(0, y_max)
ax.legend(fontsize=legend_size)
ax.set_title(title, fontsize=font_size)
ax.set(xlabel=xlabel, ylabel=ylabel)
plt.rc('axes', titlesize=title_size) # fontsize of the axes title
plt.rc('axes', labelsize=label_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=tick_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=tick_size) # fontsize of the tick labels
ax.grid(grid)
plt.show()
| [
"scipy.stats.norm",
"matplotlib.pyplot.show",
"scipy.stats.norm.pdf",
"numpy.arange",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((2520, 2562), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(fig_w, fig_l)'}), '(1, 1, figsize=(fig_w, fig_l))\n', (2532, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2628), 'numpy.arange', 'np.arange', (['x_min', 'x_max', '(0.1)'], {}), '(x_min, x_max, 0.1)\n', (2609, 2628), True, 'import numpy as np\n'), ((3349, 3372), 'numpy.arange', 'np.arange', (['lb', 'ub', '(0.01)'], {}), '(lb, ub, 0.01)\n', (3358, 3372), True, 'import numpy as np\n'), ((3693, 3729), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'title_size'}), "('axes', titlesize=title_size)\n", (3699, 3729), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3802), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'label_size'}), "('axes', labelsize=label_size)\n", (3772, 3802), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3879), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'tick_size'}), "('xtick', labelsize=tick_size)\n", (3849, 3879), True, 'import matplotlib.pyplot as plt\n'), ((3917, 3953), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'tick_size'}), "('ytick', labelsize=tick_size)\n", (3923, 3953), True, 'import matplotlib.pyplot as plt\n'), ((4010, 4020), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4018, 4020), True, 'import matplotlib.pyplot as plt\n'), ((5476, 5518), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(fig_w, fig_l)'}), '(1, 1, figsize=(fig_w, fig_l))\n', (5488, 5518), True, 'import matplotlib.pyplot as plt\n'), ((5527, 5557), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(100)'], {}), '(x_min, x_max, 100)\n', (5538, 5557), True, 'import numpy as np\n'), ((5825, 5861), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'title_size'}), "('axes', titlesize=title_size)\n", (5831, 5861), True, 'import matplotlib.pyplot as plt\n'), ((5898, 5934), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'label_size'}), "('axes', labelsize=label_size)\n", (5904, 5934), True, 'import matplotlib.pyplot as plt\n'), ((5975, 6011), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'tick_size'}), "('xtick', labelsize=tick_size)\n", (5981, 6011), True, 'import matplotlib.pyplot as plt\n'), ((6049, 6085), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'tick_size'}), "('ytick', labelsize=tick_size)\n", (6055, 6085), True, 'import matplotlib.pyplot as plt\n'), ((6143, 6153), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6151, 6153), True, 'import matplotlib.pyplot as plt\n'), ((7805, 7847), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(fig_w, fig_l)'}), '(1, 1, figsize=(fig_w, fig_l))\n', (7817, 7847), True, 'import matplotlib.pyplot as plt\n'), ((7856, 7886), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(100)'], {}), '(x_min, x_max, 100)\n', (7867, 7886), True, 'import numpy as np\n'), ((8132, 8168), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'title_size'}), "('axes', titlesize=title_size)\n", (8138, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8205, 8241), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'label_size'}), "('axes', labelsize=label_size)\n", (8211, 8241), True, 'import matplotlib.pyplot as plt\n'), ((8282, 8318), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'tick_size'}), "('xtick', labelsize=tick_size)\n", (8288, 8318), True, 'import matplotlib.pyplot as plt\n'), ((8356, 8392), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'tick_size'}), "('ytick', labelsize=tick_size)\n", (8362, 8392), True, 'import matplotlib.pyplot as plt\n'), ((8450, 8460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8458, 8460), True, 'import matplotlib.pyplot as plt\n'), ((2644, 2676), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'mean', 'scale': 'std'}), '(x, loc=mean, scale=std)\n', (2652, 2676), False, 'from scipy.stats import norm\n'), ((3217, 3249), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'mean', 'scale': 'std'}), '(x, loc=mean, scale=std)\n', (3225, 3249), False, 'from scipy.stats import norm\n'), ((3453, 3486), 'scipy.stats.norm.pdf', 'norm.pdf', (['px'], {'loc': 'mean', 'scale': 'std'}), '(px, loc=mean, scale=std)\n', (3461, 3486), False, 'from scipy.stats import norm\n'), ((5596, 5616), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'scale': 's'}), '(x, scale=s)\n', (5604, 5616), False, 'from scipy.stats import norm\n'), ((7928, 7949), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'mean'}), '(x, loc=mean)\n', (7936, 7949), False, 'from scipy.stats import norm\n'), ((2886, 2901), 'scipy.stats.norm', 'norm', (['mean', 'std'], {}), '(mean, std)\n', (2890, 2901), False, 'from scipy.stats import norm\n'), ((2912, 2927), 'scipy.stats.norm', 'norm', (['mean', 'std'], {}), '(mean, std)\n', (2916, 2927), False, 'from scipy.stats import norm\n')] |
from skimage.io.collection import ImageCollection
from blending import blend_src2dst
import numpy as np
import cv2
size = 7
for i in range(size):
for j in range(size):
src = f'man{i}'
dst = f'man{j}'
img_src, img_dst, _, img_dst_mix_grad, _ = blend_src2dst(src,dst)
if j==0:
img_array = np.hstack([img_src,img_dst_mix_grad])
else:
img_array = np.hstack([img_array,img_dst_mix_grad])
if i==0:
img_all_array = img_array
else:
img_all_array = np.vstack([img_all_array,img_array])
i = 0
img_all_dst = np.ones((256,256,3))*255
for j in range(size):
src = f'man{i}'
dst = f'man{j}'
_, img_dst, _, _, _ = blend_src2dst(src,dst)
img_all_dst = np.hstack([img_all_dst,img_dst])
img_all_array = np.vstack([img_all_dst,img_all_array])
cv2.imwrite('I2G/vis.jpg',img_all_array)
#cv2.imshow('I',img_all_array)
#cv2.waitKey(0) | [
"cv2.imwrite",
"numpy.ones",
"numpy.hstack",
"numpy.vstack",
"blending.blend_src2dst"
] | [((797, 836), 'numpy.vstack', 'np.vstack', (['[img_all_dst, img_all_array]'], {}), '([img_all_dst, img_all_array])\n', (806, 836), True, 'import numpy as np\n'), ((839, 880), 'cv2.imwrite', 'cv2.imwrite', (['"""I2G/vis.jpg"""', 'img_all_array'], {}), "('I2G/vis.jpg', img_all_array)\n", (850, 880), False, 'import cv2\n'), ((592, 614), 'numpy.ones', 'np.ones', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (599, 614), True, 'import numpy as np\n'), ((705, 728), 'blending.blend_src2dst', 'blend_src2dst', (['src', 'dst'], {}), '(src, dst)\n', (718, 728), False, 'from blending import blend_src2dst\n'), ((747, 780), 'numpy.hstack', 'np.hstack', (['[img_all_dst, img_dst]'], {}), '([img_all_dst, img_dst])\n', (756, 780), True, 'import numpy as np\n'), ((273, 296), 'blending.blend_src2dst', 'blend_src2dst', (['src', 'dst'], {}), '(src, dst)\n', (286, 296), False, 'from blending import blend_src2dst\n'), ((534, 571), 'numpy.vstack', 'np.vstack', (['[img_all_array, img_array]'], {}), '([img_all_array, img_array])\n', (543, 571), True, 'import numpy as np\n'), ((337, 375), 'numpy.hstack', 'np.hstack', (['[img_src, img_dst_mix_grad]'], {}), '([img_src, img_dst_mix_grad])\n', (346, 375), True, 'import numpy as np\n'), ((413, 453), 'numpy.hstack', 'np.hstack', (['[img_array, img_dst_mix_grad]'], {}), '([img_array, img_dst_mix_grad])\n', (422, 453), True, 'import numpy as np\n')] |
import os, glob, functools
import librosa
import torch
from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler
import pytorch_lightning as pl
import numpy as np
from diffsynth.f0 import process_f0
def mix_iterable(dl_a, dl_b):
for i, j in zip(dl_a, dl_b):
yield i
yield j
class ReiteratableWrapper():
def __init__(self, f, length):
self._f = f
self.length = length
def __iter__(self):
# make generator
return self._f()
def __len__(self):
return self.length
class WaveParamDataset(Dataset):
def __init__(self, base_dir, sample_rate=16000, length=4.0, params=True, f0=False):
self.base_dir = base_dir
self.audio_dir = os.path.join(base_dir, 'audio')
self.raw_files = sorted(glob.glob(os.path.join(self.audio_dir, '*.wav')))
print('loaded {0} files'.format(len(self.raw_files)))
self.length = length
self.sample_rate = sample_rate
self.params = params
self.f0 = f0
if f0:
self.f0_dir = os.path.join(base_dir, 'f0')
assert os.path.exists(self.f0_dir)
# all the f0 files should already be written
# with the same name as the audio
self.f0_files = sorted(glob.glob(os.path.join(self.f0_dir, '*.pt')))
if params:
self.param_dir = os.path.join(base_dir, 'param')
assert os.path.exists(self.param_dir)
# all the files should already be written
self.param_files = sorted(glob.glob(os.path.join(self.param_dir, '*.pt')))
def __getitem__(self, idx):
raw_path = self.raw_files[idx]
audio, _sr = librosa.load(raw_path, sr=self.sample_rate, duration=self.length)
assert audio.shape[0] == self.length * self.sample_rate
data = {'audio': audio}
if self.f0:
f0, periodicity = torch.load(self.f0_files[idx])
f0_hz = process_f0(f0, periodicity)
data['BFRQ'] = f0_hz.unsqueeze(-1)
if self.params:
params = torch.load(self.param_files[idx])
data['params'] = params
return data
def __len__(self):
return len(self.raw_files)
class IdOodDataModule(pl.LightningDataModule):
def __init__(self, id_dir, ood_dir, train_type, batch_size, sample_rate=16000, length=4.0, num_workers=8, splits=[.8, .1, .1], f0=False):
super().__init__()
self.id_dir = id_dir
self.ood_dir = ood_dir
assert train_type in ['id', 'ood', 'mixed']
self.train_type = train_type
self.splits = splits
self.sr = sample_rate
self.l = length
self.batch_size = batch_size
self.num_workers = num_workers
self.f0 = f0
def create_split(self, dataset):
dset_l = len(dataset)
split_sizes = [int(dset_l*self.splits[0]), int(dset_l*self.splits[1])]
split_sizes.append(dset_l - split_sizes[0] - split_sizes[1])
# should be seeded fine but probably better to split test set in some other way
dset_train, dset_valid, dset_test = random_split(dataset, lengths=split_sizes)
return {'train': dset_train, 'valid': dset_valid, 'test': dset_test}
def setup(self, stage):
id_dat = WaveParamDataset(self.id_dir, self.sr, self.l, True, self.f0)
id_datasets = self.create_split(id_dat)
# ood should be the same size as in-domain
ood_dat = WaveParamDataset(self.ood_dir, self.sr, self.l, False, self.f0)
indices = np.random.choice(len(ood_dat), len(id_dat), replace=False)
ood_dat = Subset(ood_dat, indices)
ood_datasets = self.create_split(ood_dat)
self.id_datasets = id_datasets
self.ood_datasets = ood_datasets
assert len(id_datasets['train']) == len(ood_datasets['train'])
if self.train_type == 'mixed':
dat_len = len(id_datasets['train'])
indices = np.random.choice(dat_len, dat_len//2, replace=False)
self.train_set = ConcatDataset([Subset(id_datasets['train'], indices), Subset(ood_datasets['train'], indices)])
def train_dataloader(self):
if self.train_type=='id':
return DataLoader(self.id_datasets['train'], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
elif self.train_type=='ood':
return DataLoader(self.ood_datasets['train'], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
elif self.train_type=='mixed':
id_indices = list(range(len(self.train_set)//2))
ood_indices = list(range(len(self.train_set)//2, len(self.train_set)))
id_samp = SubsetRandomSampler(id_indices)
ood_samp = SubsetRandomSampler(ood_indices)
id_batch_samp = BatchSampler(id_samp, batch_size=self.batch_size, drop_last=False)
ood_batch_samp = BatchSampler(ood_samp, batch_size=self.batch_size, drop_last=False)
generator = functools.partial(mix_iterable, id_batch_samp, ood_batch_samp)
b_sampler = ReiteratableWrapper(generator, len(id_batch_samp)+len(ood_batch_samp))
return DataLoader(self.train_set, batch_sampler=b_sampler, num_workers=self.num_workers)
def val_dataloader(self):
return [DataLoader(self.id_datasets["valid"], batch_size=self.batch_size, num_workers=self.num_workers),
DataLoader(self.ood_datasets["valid"], batch_size=self.batch_size, num_workers=self.num_workers)]
def test_dataloader(self):
return [DataLoader(self.id_datasets["test"], batch_size=self.batch_size, num_workers=self.num_workers),
DataLoader(self.ood_datasets["test"], batch_size=self.batch_size, num_workers=self.num_workers)] | [
"torch.utils.data.Subset",
"diffsynth.f0.process_f0",
"numpy.random.choice",
"torch.utils.data.BatchSampler",
"functools.partial",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"librosa.load",
"torch.utils.data.random_split",
"torch.utils.data.SubsetRandomSampler",
"os.path.jo... | [((778, 809), 'os.path.join', 'os.path.join', (['base_dir', '"""audio"""'], {}), "(base_dir, 'audio')\n", (790, 809), False, 'import os, glob, functools\n'), ((1741, 1806), 'librosa.load', 'librosa.load', (['raw_path'], {'sr': 'self.sample_rate', 'duration': 'self.length'}), '(raw_path, sr=self.sample_rate, duration=self.length)\n', (1753, 1806), False, 'import librosa\n'), ((3171, 3213), 'torch.utils.data.random_split', 'random_split', (['dataset'], {'lengths': 'split_sizes'}), '(dataset, lengths=split_sizes)\n', (3183, 3213), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((3675, 3699), 'torch.utils.data.Subset', 'Subset', (['ood_dat', 'indices'], {}), '(ood_dat, indices)\n', (3681, 3699), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((1113, 1141), 'os.path.join', 'os.path.join', (['base_dir', '"""f0"""'], {}), "(base_dir, 'f0')\n", (1125, 1141), False, 'import os, glob, functools\n'), ((1161, 1188), 'os.path.exists', 'os.path.exists', (['self.f0_dir'], {}), '(self.f0_dir)\n', (1175, 1188), False, 'import os, glob, functools\n'), ((1421, 1452), 'os.path.join', 'os.path.join', (['base_dir', '"""param"""'], {}), "(base_dir, 'param')\n", (1433, 1452), False, 'import os, glob, functools\n'), ((1472, 1502), 'os.path.exists', 'os.path.exists', (['self.param_dir'], {}), '(self.param_dir)\n', (1486, 1502), False, 'import os, glob, functools\n'), ((1953, 1983), 'torch.load', 'torch.load', (['self.f0_files[idx]'], {}), '(self.f0_files[idx])\n', (1963, 1983), False, 'import torch\n'), ((2004, 2031), 'diffsynth.f0.process_f0', 'process_f0', (['f0', 'periodicity'], {}), '(f0, periodicity)\n', (2014, 2031), False, 'from diffsynth.f0 import process_f0\n'), ((2124, 2157), 'torch.load', 'torch.load', (['self.param_files[idx]'], {}), '(self.param_files[idx])\n', (2134, 2157), False, 'import torch\n'), ((4010, 4064), 'numpy.random.choice', 'np.random.choice', (['dat_len', '(dat_len // 2)'], {'replace': '(False)'}), '(dat_len, dat_len // 2, replace=False)\n', (4026, 4064), True, 'import numpy as np\n'), ((4273, 4386), 'torch.utils.data.DataLoader', 'DataLoader', (["self.id_datasets['train']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'shuffle': '(True)'}), "(self.id_datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=True)\n", (4283, 4386), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5419, 5518), 'torch.utils.data.DataLoader', 'DataLoader', (["self.id_datasets['valid']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), "(self.id_datasets['valid'], batch_size=self.batch_size,\n num_workers=self.num_workers)\n", (5429, 5518), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5532, 5632), 'torch.utils.data.DataLoader', 'DataLoader', (["self.ood_datasets['valid']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), "(self.ood_datasets['valid'], batch_size=self.batch_size,\n num_workers=self.num_workers)\n", (5542, 5632), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5678, 5776), 'torch.utils.data.DataLoader', 'DataLoader', (["self.id_datasets['test']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), "(self.id_datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers)\n", (5688, 5776), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5790, 5889), 'torch.utils.data.DataLoader', 'DataLoader', (["self.ood_datasets['test']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), "(self.ood_datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers)\n", (5800, 5889), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((852, 889), 'os.path.join', 'os.path.join', (['self.audio_dir', '"""*.wav"""'], {}), "(self.audio_dir, '*.wav')\n", (864, 889), False, 'import os, glob, functools\n'), ((4465, 4579), 'torch.utils.data.DataLoader', 'DataLoader', (["self.ood_datasets['train']"], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'shuffle': '(True)'}), "(self.ood_datasets['train'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=True)\n", (4475, 4579), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((1337, 1370), 'os.path.join', 'os.path.join', (['self.f0_dir', '"""*.pt"""'], {}), "(self.f0_dir, '*.pt')\n", (1349, 1370), False, 'import os, glob, functools\n'), ((1605, 1641), 'os.path.join', 'os.path.join', (['self.param_dir', '"""*.pt"""'], {}), "(self.param_dir, '*.pt')\n", (1617, 1641), False, 'import os, glob, functools\n'), ((4107, 4144), 'torch.utils.data.Subset', 'Subset', (["id_datasets['train']", 'indices'], {}), "(id_datasets['train'], indices)\n", (4113, 4144), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((4146, 4184), 'torch.utils.data.Subset', 'Subset', (["ood_datasets['train']", 'indices'], {}), "(ood_datasets['train'], indices)\n", (4152, 4184), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((4809, 4840), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['id_indices'], {}), '(id_indices)\n', (4828, 4840), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((4864, 4896), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['ood_indices'], {}), '(ood_indices)\n', (4883, 4896), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((4925, 4991), 'torch.utils.data.BatchSampler', 'BatchSampler', (['id_samp'], {'batch_size': 'self.batch_size', 'drop_last': '(False)'}), '(id_samp, batch_size=self.batch_size, drop_last=False)\n', (4937, 4991), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5021, 5088), 'torch.utils.data.BatchSampler', 'BatchSampler', (['ood_samp'], {'batch_size': 'self.batch_size', 'drop_last': '(False)'}), '(ood_samp, batch_size=self.batch_size, drop_last=False)\n', (5033, 5088), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n'), ((5113, 5175), 'functools.partial', 'functools.partial', (['mix_iterable', 'id_batch_samp', 'ood_batch_samp'], {}), '(mix_iterable, id_batch_samp, ood_batch_samp)\n', (5130, 5175), False, 'import os, glob, functools\n'), ((5290, 5376), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_set'], {'batch_sampler': 'b_sampler', 'num_workers': 'self.num_workers'}), '(self.train_set, batch_sampler=b_sampler, num_workers=self.\n num_workers)\n', (5300, 5376), False, 'from torch.utils.data import Subset, Dataset, DataLoader, random_split, ConcatDataset, SubsetRandomSampler, BatchSampler\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 10:35:25 2014
@author: eegroopm
"""
import numpy as np
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Circle, Arrow
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSlot, pyqtSignal
class MplCanvas(FigureCanvas):
def __init__(self):
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
super(MplCanvas, self).__init__(self.fig)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
class matplotlibWidget(QtGui.QWidget):
distances = pyqtSignal(str,str,str,str)
def __init__(self, common, Diffraction, parent = None):
QtGui.QWidget.__init__(self, parent)
self.update(EDXdataset) # necessary?
self.canvas = MplCanvas()
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
self.Plot_initialize()
self.canvas.mpl_connect('pick_event', self.on_pick)
self.x1 = None; self.y1 = None
def update(self, Diffraction):
self.DSpaces = self.common.DSpaces
self.Forbidden = self.common.Forbidden
self.u = self.common.u
self.v = self.common.v
self.w = self.common.w
self.E = self.common.beamenergy
self.L = self.common.camlength
self.const = self.common.camconst
self.lam = self.common.wavelength
self.ZoneAxis = self.common.ZoneAxis
self.Diffraction = Diffraction
#self.canvas.mpl_connect('button_press_event', self.on_pick)
#
# def setupToolbar(self,canvas,frame):
# """Setup a custom toolbar"""
# # Create the navigation toolbar, tied to the canvas
# self.mpl_toolbar = NavigationToolbar(canvas, frame)
# #add widgets to toolbar
# self.comboBox_rotate = QtGui.QComboBox()
# self.checkBox_labels = QtGui.QCheckBox()
# self.mpl_toolbar.addWidget(self.comboBox_rotate)
# self.mpl_toolbar.addWidget(self.checkBox_labels)
# #add toolbar to tabs
# self.verticalLayout.addWidget(self.mpl_toolbar)
def Plot_initialize(self):
"""Initialize parameters of Matplotlib widget such as axes labels"""
#label = u'Distance (\u212B\u207B\u00B9)'
label = r'Distance ($\AA^{-1}$)' #use matplotlib's mathtex rendering: Å⁻¹
self.canvas.ax.set_xlabel(label,fontsize=14)
self.canvas.ax.set_ylabel(label,fontsize=14)
self.canvas.ax.tick_params(axis='both', which='major', labelsize=14, length=6)
#self.Plot.xaxis.set_units(u'Å⁻¹')
#self.Plot.yaxis.set_units(u'Å⁻¹')
self.canvas.fig.tight_layout()
def calc(self,ind1,ind2):
"""Calculates angles from picks"""
p1 = list(self.DSpaces.loc[ind1,['x','y']])
p2 = list(self.DSpaces.loc[ind2,['x','y']])
recip_d = round(np.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2),3) #calc distance
real_d = 1.0/recip_d
film_d = self.lam*self.L/real_d*self.const
#angle = round(np.degrees(self.Diffraction.AngleAmbiguity(p2[0]-p1[0],p2[1]-p1[1])),1)
angle = round(np.degrees(np.arctan2((p2[1]-p1[1]),(p2[0]-p1[0]))),2)
return recip_d, real_d,film_d, angle, p1, p2
@pyqtSlot()
def on_done_pick(self,recip_d, real_d,film_d, angle):
self.distances.emit(recip_d, real_d,film_d, angle)
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#self.DSpaces = self.common.DSpaces
if isinstance(event.artist, Line2D):
thisline = event.artist
if self.x1 == None:
if self.common._x2:
#Remove recently done circles
self.arr.remove()
del self.arr
l = self.canvas.ax.lines.pop(-1)
del l
l = self.canvas.ax.lines.pop(-1)
del l
self.canvas.draw()
self.common._x2=False
self.x1 = thisline.get_xdata()
self.y1 = thisline.get_ydata()
self.ind1 = event.ind[0]
self.canvas.ax.plot(self.x1[self.ind1],self.y1[self.ind1], linestyle = '', marker='o', markersize = 10,color='r')
self.canvas.draw()
elif self.x1 != None:
self.update(self.common,self.Diffraction)
self.common._x2 = True
self.ind2 = event.ind[0]
#make names shorter
#x1 = self.x1[self.ind1]; x2 = self._x2[self.ind2]; y1 = self.y1[self.ind1]; y2 = self.y2[self.ind2]
recip_d, real_d,film_d, angle, p1, p2 = self.calc(self.ind1,self.ind2)
#reset x1 and y1
self.x1 = None; self.y1 = None
#plot colored circle
self.canvas.ax.plot(p2[0],p2[1], linestyle = '', marker='o', markersize = 10,color='r')
#plot arrow between selected points
self.arr = Arrow(p1[0],p1[1],p2[0]-p1[0],p2[1]-p1[1],facecolor='r',width = 1/(5*self.common.a)) #maybe include factor of miller indices. higher miller = larger x,yrange
self.canvas.ax.add_patch(self.arr)
self.canvas.draw()
self.on_done_pick(str(recip_d), str(round(real_d,2)),str(round(film_d,2)), str(angle))
import sys
from PyQt4 import QtGui, QtCore
# simple test of spinbox and connections
class spindemo(QtGui.QWidget):
def __init__(self, parent = None):
super(spindemo, self).__init__(parent)
layout = QtGui.QVBoxLayout()
self.l1 = QtGui.QLabel("current value:")
self.l1.setAlignment(QtCore.Qt.AlignCenter)
layout.addWidget(self.l1)
self.sp = QtGui.QSpinBox()
layout.addWidget(self.sp)
self.sp.valueChanged.connect(self.valuechange)
self.setLayout(layout)
self.setWindowTitle("SpinBox demo")
def valuechange(self):
self.l1.setText("current value:"+str(self.sp.value()))
print('New value is', str(self.sp.value()))
def main():
app = QtGui.QApplication(sys.argv)
ex = spindemo()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
if QtCore.QCoreApplication.instance() != None:
app = QtCore.QCoreApplication.instance()
else:
app = QtGui.QApplication(sys.argv)
mygui = spindemo()
mygui.show()
app.exec_() | [
"matplotlib.patches.Arrow",
"matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg.setSizePolicy",
"PyQt4.QtGui.QSpinBox",
"PyQt4.QtCore.QCoreApplication.instance",
"matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg.__init__",
"PyQt4.QtGui.QLabel",
"numpy.arctan2",
"PyQt4.QtGui.QVBoxLayout",
"PyQt... | [((824, 854), 'PyQt4.QtCore.pyqtSignal', 'pyqtSignal', (['str', 'str', 'str', 'str'], {}), '(str, str, str, str)\n', (834, 854), False, 'from PyQt4.QtCore import pyqtSlot, pyqtSignal\n'), ((3591, 3601), 'PyQt4.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (3599, 3601), False, 'from PyQt4.QtCore import pyqtSlot, pyqtSignal\n'), ((6577, 6605), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6595, 6605), False, 'from PyQt4 import QtGui, QtCore\n'), ((6710, 6744), 'PyQt4.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ([], {}), '()\n', (6742, 6744), False, 'from PyQt4 import QtGui, QtCore\n'), ((6764, 6798), 'PyQt4.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ([], {}), '()\n', (6796, 6798), False, 'from PyQt4 import QtGui, QtCore\n'), ((6815, 6843), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6833, 6843), False, 'from PyQt4 import QtGui, QtCore\n'), ((471, 479), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (477, 479), False, 'from matplotlib.figure import Figure\n'), ((582, 619), 'matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', (['self', 'self.fig'], {}), '(self, self.fig)\n', (603, 619), True, 'from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n'), ((628, 723), 'matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', (['self', 'QtGui.QSizePolicy.Expanding', 'QtGui.QSizePolicy.Expanding'], {}), '(self, QtGui.QSizePolicy.Expanding, QtGui.\n QSizePolicy.Expanding)\n', (654, 723), True, 'from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n'), ((726, 759), 'matplotlib.backends.backend_qt4agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', (['self'], {}), '(self)\n', (753, 759), True, 'from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\n'), ((920, 956), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (942, 956), False, 'from PyQt4 import QtGui, QtCore\n'), ((1064, 1083), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (1081, 1083), False, 'from PyQt4 import QtGui, QtCore\n'), ((6074, 6093), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (6091, 6093), False, 'from PyQt4 import QtGui, QtCore\n'), ((6110, 6140), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""current value:"""'], {}), "('current value:')\n", (6122, 6140), False, 'from PyQt4 import QtGui, QtCore\n'), ((6239, 6255), 'PyQt4.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {}), '()\n', (6253, 6255), False, 'from PyQt4 import QtGui, QtCore\n'), ((3191, 3243), 'numpy.sqrt', 'np.sqrt', (['((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)'], {}), '((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)\n', (3198, 3243), True, 'import numpy as np\n'), ((3471, 3511), 'numpy.arctan2', 'np.arctan2', (['(p2[1] - p1[1])', '(p2[0] - p1[0])'], {}), '(p2[1] - p1[1], p2[0] - p1[0])\n', (3481, 3511), True, 'import numpy as np\n'), ((5487, 5586), 'matplotlib.patches.Arrow', 'Arrow', (['p1[0]', 'p1[1]', '(p2[0] - p1[0])', '(p2[1] - p1[1])'], {'facecolor': '"""r"""', 'width': '(1 / (5 * self.common.a))'}), "(p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1], facecolor='r', width=1 /\n (5 * self.common.a))\n", (5492, 5586), False, 'from matplotlib.patches import Patch, Rectangle, Circle, Arrow\n')] |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../Data/tmp/mnist/', one_hot=True)
LR = 0.05
numSteps = 30000
batchSize = 256
displaySteps = 1000
examplesToShow = 10
imgSize = 28
numH1 = 256
numH2 = 128
numInputs = 28 * 28
X = tf.placeholder(tf.float32, [None, numInputs])
weights = {
'eH1': tf.Variable(tf.random_normal([numInputs, numH1])),
'eH2': tf.Variable(tf.random_normal([numH1, numH2])),
'dH1': tf.Variable(tf.random_normal([numH2, numH1])),
'dH2': tf.Variable(tf.random_normal([numH1, numInputs]))
}
biases = {
'eH1': tf.Variable(tf.zeros([numH1])),
'eH2': tf.Variable(tf.zeros([numH2])),
'dH1': tf.Variable(tf.zeros([numH1])),
'dH2': tf.Variable(tf.zeros([numInputs]))
}
def encoder(x):
en = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['eH1']), biases['eH1']))
en = tf.nn.sigmoid(tf.add(tf.matmul(en, weights['eH2']), biases['eH2']))
return en
def decoder(x):
de = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['dH1']), biases['dH1']))
de = tf.nn.sigmoid(tf.add(tf.matmul(de, weights['dH2']), biases['dH2']))
return de
encoder = encoder(X)
decoder = decoder(encoder)
yPred = decoder
yTrue = X
loss = tf.reduce_mean(tf.pow(yTrue - yPred, 2))
optimizer = tf.train.RMSPropOptimizer(LR).minimize(loss)
print('Number of trainable parameters: {}'.format(
np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1, numSteps + 1):
batchX, _ = mnist.train.next_batch(batchSize)
_, l = sess.run([optimizer, loss], feed_dict={X: batchX})
if i % displaySteps == 0 or i == 1:
print('Step {} Loss: {}'.format(i, l))
n = 4
canvasOrig = np.empty((28 * n, 28 * n))
canvasRecon = np.empty((28 * n, 28 * n))
for i in range(n):
batchX, _ = mnist.test.next_batch(n)
g = sess.run(decoder, feed_dict={X: batchX})
for j in range(n):
canvasOrig[i * 28:(i + 1) * 28, j * 28:(j + 1)
* 28] = batchX[j].reshape([28, 28])
for j in range(n):
canvasRecon[i * 28:(i + 1) * 28, j * 28:(j + 1)
* 28] = g[j].reshape([28, 28])
print("Original Images")
plt.figure(figsize=(n, n))
plt.imshow(canvasOrig, origin="upper", cmap="gray")
plt.show()
print("Reconstructed Images")
plt.figure(figsize=(n, n))
plt.imshow(canvasRecon, origin="upper", cmap="gray")
plt.show()
| [
"matplotlib.pyplot.show",
"tensorflow.trainable_variables",
"tensorflow.global_variables_initializer",
"numpy.empty",
"matplotlib.pyplot.imshow",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.Session",
"tensorflow.pow",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"tensorflow.zeros",... | [((143, 204), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""../Data/tmp/mnist/"""'], {'one_hot': '(True)'}), "('../Data/tmp/mnist/', one_hot=True)\n", (168, 204), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((352, 397), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, numInputs]'], {}), '(tf.float32, [None, numInputs])\n', (366, 397), True, 'import tensorflow as tf\n'), ((1311, 1335), 'tensorflow.pow', 'tf.pow', (['(yTrue - yPred)', '(2)'], {}), '(yTrue - yPred, 2)\n', (1317, 1335), True, 'import tensorflow as tf\n'), ((1535, 1547), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1545, 1547), True, 'import tensorflow as tf\n'), ((1887, 1913), 'numpy.empty', 'np.empty', (['(28 * n, 28 * n)'], {}), '((28 * n, 28 * n))\n', (1895, 1913), True, 'import numpy as np\n'), ((1932, 1958), 'numpy.empty', 'np.empty', (['(28 * n, 28 * n)'], {}), '((28 * n, 28 * n))\n', (1940, 1958), True, 'import numpy as np\n'), ((2459, 2485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(n, n)'}), '(figsize=(n, n))\n', (2469, 2485), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2541), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvasOrig'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(canvasOrig, origin='upper', cmap='gray')\n", (2500, 2541), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2554, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2622), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(n, n)'}), '(figsize=(n, n))\n', (2606, 2622), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2679), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvasRecon'], {'origin': '"""upper"""', 'cmap': '"""gray"""'}), "(canvasRecon, origin='upper', cmap='gray')\n", (2637, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2694), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2692, 2694), True, 'import matplotlib.pyplot as plt\n'), ((434, 470), 'tensorflow.random_normal', 'tf.random_normal', (['[numInputs, numH1]'], {}), '([numInputs, numH1])\n', (450, 470), True, 'import tensorflow as tf\n'), ((496, 528), 'tensorflow.random_normal', 'tf.random_normal', (['[numH1, numH2]'], {}), '([numH1, numH2])\n', (512, 528), True, 'import tensorflow as tf\n'), ((554, 586), 'tensorflow.random_normal', 'tf.random_normal', (['[numH2, numH1]'], {}), '([numH2, numH1])\n', (570, 586), True, 'import tensorflow as tf\n'), ((612, 648), 'tensorflow.random_normal', 'tf.random_normal', (['[numH1, numInputs]'], {}), '([numH1, numInputs])\n', (628, 648), True, 'import tensorflow as tf\n'), ((687, 704), 'tensorflow.zeros', 'tf.zeros', (['[numH1]'], {}), '([numH1])\n', (695, 704), True, 'import tensorflow as tf\n'), ((730, 747), 'tensorflow.zeros', 'tf.zeros', (['[numH2]'], {}), '([numH2])\n', (738, 747), True, 'import tensorflow as tf\n'), ((773, 790), 'tensorflow.zeros', 'tf.zeros', (['[numH1]'], {}), '([numH1])\n', (781, 790), True, 'import tensorflow as tf\n'), ((816, 837), 'tensorflow.zeros', 'tf.zeros', (['[numInputs]'], {}), '([numInputs])\n', (824, 837), True, 'import tensorflow as tf\n'), ((1349, 1378), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['LR'], {}), '(LR)\n', (1374, 1378), True, 'import tensorflow as tf\n'), ((1570, 1603), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1601, 1603), True, 'import tensorflow as tf\n'), ((889, 917), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['eH1']"], {}), "(x, weights['eH1'])\n", (898, 917), True, 'import tensorflow as tf\n'), ((965, 994), 'tensorflow.matmul', 'tf.matmul', (['en', "weights['eH2']"], {}), "(en, weights['eH2'])\n", (974, 994), True, 'import tensorflow as tf\n'), ((1074, 1102), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['dH1']"], {}), "(x, weights['dH1'])\n", (1083, 1102), True, 'import tensorflow as tf\n'), ((1150, 1179), 'tensorflow.matmul', 'tf.matmul', (['de', "weights['dH2']"], {}), "(de, weights['dH2'])\n", (1159, 1179), True, 'import tensorflow as tf\n'), ((1500, 1524), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1522, 1524), True, 'import tensorflow as tf\n')] |
import argparse
import pathlib
import mbrl
import mbrl.models
import mbrl.planning
import mbrl.util.common
import mbrl.util.mujoco
import matplotlib.pyplot as plt
import numpy as np
import torch
class Agent_Renderer():
def __init__(self, exp_dir, num_steps):
self.max_steps = num_steps
exp_path = pathlib.Path(exp_dir)
self.cfg = mbrl.util.common.load_hydra_cfg(exp_path)
self.env, self.term_fn, self.reward_fn = mbrl.util.mujoco.make_env(self.cfg)
self.dynamics_model = mbrl.util.common.create_one_dim_tr_model(
self.cfg,
self.env.observation_space.shape,
self.env.action_space.shape,
model_dir=exp_path,
)
self.model_env = mbrl.models.ModelEnv(
self.env,
self.dynamics_model,
self.term_fn,
self.reward_fn,
generator=torch.Generator(self.dynamics_model.device),
)
agent_cfg = self.cfg
if (
agent_cfg.algorithm.agent._target_
== "mbrl.planning.TrajectoryOptimizerAgent"
):
self.agent = mbrl.planning.create_trajectory_optim_agent_for_model(
self.model_env,
agent_cfg.algorithm.agent,
num_particles=agent_cfg.algorithm.num_particles,
)
else:
agent_cfg = mbrl.util.common.load_hydra_cfg(agent_dir)
if (
agent_cfg.algorithm.agent._target_
== "mbrl.planning.TrajectoryOptimizerAgent"
):
agent_cfg.algorithm.agent.planning_horizon = lookahead
self.agent = mbrl.planning.create_trajectory_optim_agent_for_model(
self.model_env,
agent_cfg.algorithm.agent,
num_particles=agent_cfg.algorithm.num_particles,
)
# Set up recording
#self.vis_path = self.exp_path / "render"
def run_exp(self):
steps = 0
obs = self.env.reset()
self.agent.reset()
done = False
total_reward = 0.0
while not done and steps < self.max_steps:
# --- Doing env step using the agent and adding to model dataset ---
action = self.agent.act(obs)
next_obs, reward, done, info = self.env.step(action)
obs = next_obs
total_reward += reward
steps += 1
self.env.render(mode="rgb_array")
return np.float32(total_reward)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--experiments_dir",
type=str,
default=None,
help="The directory where the original experiment was run. Should contain both the model and the configs.",
)
parser.add_argument(
"--num_steps",
type=int,
default=200,
help="The number of steps to render.",
)
args = parser.parse_args()
experiment = Agent_Renderer(args.experiments_dir, args.num_steps)
reward = experiment.run_exp()
print(f"This experiment generated a cumulative reward of {reward}")
| [
"argparse.ArgumentParser",
"numpy.float32",
"mbrl.planning.create_trajectory_optim_agent_for_model",
"pathlib.Path",
"mbrl.util.mujoco.make_env",
"mbrl.util.common.create_one_dim_tr_model",
"torch.Generator",
"mbrl.util.common.load_hydra_cfg"
] | [((2583, 2608), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2606, 2608), False, 'import argparse\n'), ((320, 341), 'pathlib.Path', 'pathlib.Path', (['exp_dir'], {}), '(exp_dir)\n', (332, 341), False, 'import pathlib\n'), ((361, 402), 'mbrl.util.common.load_hydra_cfg', 'mbrl.util.common.load_hydra_cfg', (['exp_path'], {}), '(exp_path)\n', (392, 402), False, 'import mbrl\n'), ((452, 487), 'mbrl.util.mujoco.make_env', 'mbrl.util.mujoco.make_env', (['self.cfg'], {}), '(self.cfg)\n', (477, 487), False, 'import mbrl\n'), ((519, 657), 'mbrl.util.common.create_one_dim_tr_model', 'mbrl.util.common.create_one_dim_tr_model', (['self.cfg', 'self.env.observation_space.shape', 'self.env.action_space.shape'], {'model_dir': 'exp_path'}), '(self.cfg, self.env.\n observation_space.shape, self.env.action_space.shape, model_dir=exp_path)\n', (559, 657), False, 'import mbrl\n'), ((2514, 2538), 'numpy.float32', 'np.float32', (['total_reward'], {}), '(total_reward)\n', (2524, 2538), True, 'import numpy as np\n'), ((1136, 1285), 'mbrl.planning.create_trajectory_optim_agent_for_model', 'mbrl.planning.create_trajectory_optim_agent_for_model', (['self.model_env', 'agent_cfg.algorithm.agent'], {'num_particles': 'agent_cfg.algorithm.num_particles'}), '(self.model_env,\n agent_cfg.algorithm.agent, num_particles=agent_cfg.algorithm.num_particles)\n', (1189, 1285), False, 'import mbrl\n'), ((1383, 1425), 'mbrl.util.common.load_hydra_cfg', 'mbrl.util.common.load_hydra_cfg', (['agent_dir'], {}), '(agent_dir)\n', (1414, 1425), False, 'import mbrl\n'), ((891, 934), 'torch.Generator', 'torch.Generator', (['self.dynamics_model.device'], {}), '(self.dynamics_model.device)\n', (906, 934), False, 'import torch\n'), ((1669, 1818), 'mbrl.planning.create_trajectory_optim_agent_for_model', 'mbrl.planning.create_trajectory_optim_agent_for_model', (['self.model_env', 'agent_cfg.algorithm.agent'], {'num_particles': 'agent_cfg.algorithm.num_particles'}), '(self.model_env,\n agent_cfg.algorithm.agent, num_particles=agent_cfg.algorithm.num_particles)\n', (1722, 1818), False, 'import mbrl\n')] |
""" Implements a waypoint controller, which controls lateral and longitudinal position of glider. Also adds a
controller wrapper, which wraps control parameters, environment and controller into one object
"""
import numpy as np
from parameters import params_triangle_soaring, params_environment
from parameters.params_triangle_soaring import TaskParameters
from hierarchical_policy.vertex_tracker import params_vertex_tracker
from hierarchical_policy.vertex_tracker.params_vertex_tracker import ControlParameters
class ControllerWrapper:
""" Controller wrapper for control parameters, environment and controller.
Attributes
----------
waypoint_controller : WaypointController
Contains longitudinal and lateral controller
env : object
OpenAI gym training environment
_params_agent : AgentParameters
Waypoint controller parameters
"""
def __init__(self, environment):
self.waypoint_controller = WaypointController()
self.env = environment
self._params_agent = params_vertex_tracker.AgentParameters()
def select_action(self):
"""
Returns
-------
action : ndarray
Command for roll angle and angle-of-attack
"""
phi_cmd, alpha_cmd = self.waypoint_controller.get_control(self.env.state, self.env.active_vertex)
action = np.array([self.wrap_to_interval(phi_cmd, self._params_agent.ACTION_SPACE[0, :] * np.pi / 180),
self.wrap_to_interval(alpha_cmd, self._params_agent.ACTION_SPACE[1, :] * np.pi / 180)])
return action
@staticmethod
def wrap_to_interval(value, source_interval, target_interval=np.array([-1, 1])):
""" Maps source_interval to target_interval
Parameters
----------
value : float
Control value
source_interval : ndarray
Original interval of value
target_interval :
Target interval of value
Returns
-------
wrapped_value : float
Control command, mapped from source_interval to target_interval
"""
wrapped_value = np.interp(value, (source_interval.min(), source_interval.max()),
(target_interval.min(), target_interval.max()))
return wrapped_value
class WaypointController:
""" Controller wrapper for control parameters, environment and controller.
Attributes
----------
_params_task : TaskParameters
Parameters which describe soaring task
_params_glider : object
Parameters which describe physical properties of glider
_params_physics : PhysicsParameters
Physical constants like gravity and air density
_params_control : ControlParameters
Parameters for controller like maximum control parameters
"""
def __init__(self):
self._params_task = params_triangle_soaring.TaskParameters()
self._params_glider = params_environment.GliderParameters()
self._params_physics = params_environment.PhysicsParameters()
self._params_control = params_vertex_tracker.ControlParameters()
def get_control(self, state, active_vertex_id):
""" Calculates roll angle command with lateral controller. Calculates angle of attack with longitudinal
controller
Parameters
----------
state : ndarray
State of glider which contains position and velocity
active_vertex_id : int
ID of current target vertex
Returns
-------
phi_cmd : float
Commanded roll angle
alpha_cmd : float
Commanded angle-of-attack
"""
phi_cmd = self.controller_lat(state, active_vertex_id)
alpha_cmd = self.controller_lon(phi_cmd)
return phi_cmd, alpha_cmd
def controller_lat(self, state, active_vertex_id):
""" Controls lateral movement of glider with roll angle to hit target vertex sector
Parameters
----------
state : ndarray
State of glider which contains position and velocity
active_vertex_id : int
ID of current target vertex
Returns
-------
phi_cmd : float
Commanded roll angle
"""
position = state[0:3]
velocity = state[3:6]
chi = np.arctan2(velocity[1], velocity[0])
chi_cmd = self.guidance(position, active_vertex_id)
chi_error = chi_cmd - chi
if chi_error > np.pi:
chi_error -= (2 * np.pi)
elif chi_error < -np.pi:
chi_error += (2 * np.pi)
speed = np.linalg.norm(velocity)
if speed < 10:
phi_cmd = chi_error * self._params_control.K_CHI / 10
else:
phi_cmd = chi_error * self._params_control.K_CHI / speed
phi_cmd = np.clip(phi_cmd, -self._params_control.PHI_MAX, self._params_control.PHI_MAX)
return phi_cmd
def guidance(self, position, active_vertex_id):
""" Calculates chi command for lateral controller
Parameters
----------
position : ndarray
Position of glider
active_vertex_id : int
ID of current target vertex
Returns
-------
chi_cmd : float
Commanded azimuth to hit target vertex
"""
# stretching triangle slightly ensures hitting the sectors (especially vertex #2)
waypoint = self._params_control.STRETCH * self._params_task.TRIANGLE[:, (active_vertex_id - 1)]
g_los = waypoint - position[0:2]
chi_cmd = np.arctan2(g_los[1], g_los[0])
return chi_cmd
def controller_lon(self, phi):
""" Controls lateral movement of glider with angle-of-attack to fly at point of best glide
Parameters
----------
phi : float
Commanded roll angle
Returns
-------
alpha_cmd : float
Commanded angle-of-attack
"""
alpha_bestGlide = ((self._params_glider.ST + 2)
* np.sqrt(self._params_glider.CD0 * self._params_glider.OE / self._params_glider.ST)) \
/ (2 * np.sqrt(np.pi))
# add turn compensation
alpha_turn = -(self._params_physics.G / self._params_glider.Z_ALPHA) * (1 / np.cos(phi) - 1)
alpha_cmd = alpha_bestGlide + alpha_turn
# limit control command
alpha_cmd = np.clip(alpha_cmd, -self._params_control.AOA_MIN, self._params_control.AOA_MAX)
return alpha_cmd
| [
"hierarchical_policy.vertex_tracker.params_vertex_tracker.AgentParameters",
"numpy.arctan2",
"parameters.params_triangle_soaring.TaskParameters",
"numpy.clip",
"parameters.params_environment.PhysicsParameters",
"numpy.array",
"hierarchical_policy.vertex_tracker.params_vertex_tracker.ControlParameters",
... | [((1082, 1121), 'hierarchical_policy.vertex_tracker.params_vertex_tracker.AgentParameters', 'params_vertex_tracker.AgentParameters', ([], {}), '()\n', (1119, 1121), False, 'from hierarchical_policy.vertex_tracker import params_vertex_tracker\n'), ((1728, 1745), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (1736, 1745), True, 'import numpy as np\n'), ((2955, 2995), 'parameters.params_triangle_soaring.TaskParameters', 'params_triangle_soaring.TaskParameters', ([], {}), '()\n', (2993, 2995), False, 'from parameters import params_triangle_soaring, params_environment\n'), ((3026, 3063), 'parameters.params_environment.GliderParameters', 'params_environment.GliderParameters', ([], {}), '()\n', (3061, 3063), False, 'from parameters import params_triangle_soaring, params_environment\n'), ((3095, 3133), 'parameters.params_environment.PhysicsParameters', 'params_environment.PhysicsParameters', ([], {}), '()\n', (3131, 3133), False, 'from parameters import params_triangle_soaring, params_environment\n'), ((3165, 3206), 'hierarchical_policy.vertex_tracker.params_vertex_tracker.ControlParameters', 'params_vertex_tracker.ControlParameters', ([], {}), '()\n', (3204, 3206), False, 'from hierarchical_policy.vertex_tracker import params_vertex_tracker\n'), ((4436, 4472), 'numpy.arctan2', 'np.arctan2', (['velocity[1]', 'velocity[0]'], {}), '(velocity[1], velocity[0])\n', (4446, 4472), True, 'import numpy as np\n'), ((4723, 4747), 'numpy.linalg.norm', 'np.linalg.norm', (['velocity'], {}), '(velocity)\n', (4737, 4747), True, 'import numpy as np\n'), ((4940, 5017), 'numpy.clip', 'np.clip', (['phi_cmd', '(-self._params_control.PHI_MAX)', 'self._params_control.PHI_MAX'], {}), '(phi_cmd, -self._params_control.PHI_MAX, self._params_control.PHI_MAX)\n', (4947, 5017), True, 'import numpy as np\n'), ((5696, 5726), 'numpy.arctan2', 'np.arctan2', (['g_los[1]', 'g_los[0]'], {}), '(g_los[1], g_los[0])\n', (5706, 5726), True, 'import numpy as np\n'), ((6544, 6623), 'numpy.clip', 'np.clip', (['alpha_cmd', '(-self._params_control.AOA_MIN)', 'self._params_control.AOA_MAX'], {}), '(alpha_cmd, -self._params_control.AOA_MIN, self._params_control.AOA_MAX)\n', (6551, 6623), True, 'import numpy as np\n'), ((6173, 6260), 'numpy.sqrt', 'np.sqrt', (['(self._params_glider.CD0 * self._params_glider.OE / self._params_glider.ST)'], {}), '(self._params_glider.CD0 * self._params_glider.OE / self.\n _params_glider.ST)\n', (6180, 6260), True, 'import numpy as np\n'), ((6292, 6306), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (6299, 6306), True, 'import numpy as np\n'), ((6425, 6436), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (6431, 6436), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Kendall's tau from SciPy. """
import absl # Here to have a nice missing dependency error message early on
import datasets
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
import six # Here to have a nice missing dependency error message early on
from scipy.stats import kendalltau
_CITATION = """
"""
_DESCRIPTION = """\
Calculate Kendall’s tau, a correlation measure for ordinal data.
Kendall’s tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate strong disagreement.
This is the 1945 “tau-b” version of Kendall’s tau [2], which can account for ties and which reduces to the 1938 “tau-a” version [1] in absence of ties.
This metrics is a wrapper around SciPy implementation:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kendalltau.html
"""
_KWARGS_DESCRIPTION = """
Calculate Kendall’s tau, a correlation measure for ordinal data.
Args:
predictions: list of predictions to score. Each predictions
should be a list of list of rankings.
references: list of reference for each prediction. Each predictions
should be a list of list of rankings.
Returns:
tau: The tau statistic,
pvalue: The two-sided p-value for a hypothesis test whose null hypothesis is an absence of association, tau = 0.
"""
class KendallTau(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int8")),
"references": datasets.Sequence(datasets.Value("int8")),
}
),
codebase_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kendalltau.html"],
reference_urls=["https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient"],
)
def _compute(self, predictions, references, initial_lexsort=None, nan_policy="propagate", method="auto"):
result = {"tau": np.array([]), "pvalue": np.array([])}
for prediction, reference in zip(predictions, references):
tau, pvalue = kendalltau(
x=prediction, y=reference, initial_lexsort=initial_lexsort, nan_policy=nan_policy, method=method
)
result["tau"] = np.append(result["tau"], tau)
result["pvalue"] = np.append(result["pvalue"], pvalue)
result["tau"] = result["tau"].mean()
result["pvalue"] = result["pvalue"].mean()
return result
| [
"datasets.Value",
"scipy.stats.kendalltau",
"numpy.append",
"numpy.array"
] | [((2812, 2824), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2820, 2824), True, 'import numpy as np\n'), ((2836, 2848), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2844, 2848), True, 'import numpy as np\n'), ((2944, 3056), 'scipy.stats.kendalltau', 'kendalltau', ([], {'x': 'prediction', 'y': 'reference', 'initial_lexsort': 'initial_lexsort', 'nan_policy': 'nan_policy', 'method': 'method'}), '(x=prediction, y=reference, initial_lexsort=initial_lexsort,\n nan_policy=nan_policy, method=method)\n', (2954, 3056), False, 'from scipy.stats import kendalltau\n'), ((3111, 3140), 'numpy.append', 'np.append', (["result['tau']", 'tau'], {}), "(result['tau'], tau)\n", (3120, 3140), True, 'import numpy as np\n'), ((3172, 3207), 'numpy.append', 'np.append', (["result['pvalue']", 'pvalue'], {}), "(result['pvalue'], pvalue)\n", (3181, 3207), True, 'import numpy as np\n'), ((2320, 2342), 'datasets.Value', 'datasets.Value', (['"""int8"""'], {}), "('int8')\n", (2334, 2342), False, 'import datasets\n'), ((2397, 2419), 'datasets.Value', 'datasets.Value', (['"""int8"""'], {}), "('int8')\n", (2411, 2419), False, 'import datasets\n')] |
"""
=======================
Twinkling star movie
=======================
An animation of stars twinkling in the Kepler field
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.io import fits
import kepler_data as kd
import glob
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
plt.rcParams['axes.facecolor'] = 'black'
def radec_to_lb(ra, dec):
"""
Transform to ra and dec to l and b.
"""
c_icrs = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
l = c_icrs.galactic.l.deg
b = c_icrs.galactic.b.deg
return l, b
def get_ra_and_dec_from_headers(filenames):
ra, dec = [], []
for path in filenames:
fname = glob.glob("{0}/*llc.fits".format(path))[0]
with fits.open(fname) as hdul:
hdr = hdul[0].header
ra.append(hdr["RA_OBJ"])
dec.append(hdr["DEC_OBJ"])
return np.array(ra), np.array(dec)
def get_flux_array(nstars, ntimes, kepids, filenames):
fluxes = np.zeros((nstars, ntimes))
for i, star in enumerate(kepids.kepid.values[:nstars]):
print(i, "of", len(kepids.kepid.values[:nstars]))
_, flux, _ = kd.load_kepler_data(filenames[i])
short_flux = flux[1000::10][:ntimes]
# Scale the fluxes so they are between 0 and 1.
short_flux = short_flux/(max(np.abs(short_flux))*2) + .5
fluxes[i, :] = short_flux
return fluxes
def plot_l_b(l, b, alphas, times):
"""
l, b are 1d arrays containing the positions of the stars.
Alphas are 2d arrays with the fluxes shape = ((nstars, times)).
Each row is a different star and each column is a different epoch.
t is the number of times or frames or fluxes.
"""
for t in range(times):
plt.clf()
plt.figure(figsize=(20, 20))
for i, star in enumerate(l):
plt.plot(l[i], b[i], "w.", ms=10, alpha=alphas[i, t])
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig("twinkle_movie/frame_{}".format(str(t).zfill(4)))
def save_as_movie(framerate=10, quality=25):
"""
Make the movie file
"""
os.system("/Applications/ffmpeg -r {0} -f image2 -s 1920x1080 -i "\
"twinkle_movie/frame_%04d.png -vcodec libx264 -crf {1} -pix_fmt "\
"yuv420p twinkle_movie.mp4".format(framerate, quality))
if __name__ == "__main__":
# df = pd.read_csv("planets.csv", skiprows=69)
"""
Exoplanet host targets.
"""
# m = df.pl_kepflag.values == 1
# kepler = df.iloc[m]
# ra, dec = kepler.ra.values, kepler.dec.values
"""
Asteroseismology targets.
"""
# kepids = pd.read_csv("kepids.csv")
# nstars = len(kepids.kepid.values)
# filenames = ["/Users/ruthangus/.kplr/data/lightcurves/{0}"
# .format(str(kepids.kepid.values[i]).zfill(9))
# for i in range(nstars)]
# ntimes = 100
# nstars = 525
# ra, dec = get_ra_and_dec_from_headers(filenames)
# fluxes = get_flux_array(nstars, ntimes, kepids, filenames)
# l, b = radec_to_lb(ra, dec)
# plot_l_b(l[:nstars], b[:nstars], fluxes, ntimes)
save_as_movie(framerate=10, quality=25)
| [
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"astropy.io.fits.open",
"matplotlib.pyplot.gca",
"kepler_data.load_kepler_data",
"astropy.coordinates.SkyCoord"
] | [((540, 568), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['plotpar'], {}), '(plotpar)\n', (559, 568), True, 'import matplotlib.pyplot as plt\n'), ((707, 767), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': '(ra * u.degree)', 'dec': '(dec * u.degree)', 'frame': '"""icrs"""'}), "(ra=ra * u.degree, dec=dec * u.degree, frame='icrs')\n", (715, 767), False, 'from astropy.coordinates import SkyCoord\n'), ((1242, 1268), 'numpy.zeros', 'np.zeros', (['(nstars, ntimes)'], {}), '((nstars, ntimes))\n', (1250, 1268), True, 'import numpy as np\n'), ((1144, 1156), 'numpy.array', 'np.array', (['ra'], {}), '(ra)\n', (1152, 1156), True, 'import numpy as np\n'), ((1158, 1171), 'numpy.array', 'np.array', (['dec'], {}), '(dec)\n', (1166, 1171), True, 'import numpy as np\n'), ((1408, 1441), 'kepler_data.load_kepler_data', 'kd.load_kepler_data', (['filenames[i]'], {}), '(filenames[i])\n', (1427, 1441), True, 'import kepler_data as kd\n'), ((2000, 2009), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2007, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2046), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (2028, 2046), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1022), 'astropy.io.fits.open', 'fits.open', (['fname'], {}), '(fname)\n', (1015, 1022), False, 'from astropy.io import fits\n'), ((2096, 2149), 'matplotlib.pyplot.plot', 'plt.plot', (['l[i]', 'b[i]', '"""w."""'], {'ms': '(10)', 'alpha': 'alphas[i, t]'}), "(l[i], b[i], 'w.', ms=10, alpha=alphas[i, t])\n", (2104, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2167), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1599), 'numpy.abs', 'np.abs', (['short_flux'], {}), '(short_flux)\n', (1587, 1599), True, 'import numpy as np\n')] |
import os
import numpy as np
from easydict import EasyDict as edict
config = edict()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config.TRAIN = edict()
#### below are params for dataiter
config.TRAIN.process_num = 5
config.TRAIN.prefetch_size = 20
############
config.TRAIN.num_gpu = 1
config.TRAIN.batch_size = 128
config.TRAIN.log_interval = 10 ##10 iters for a log msg
config.TRAIN.epoch = 1000
config.TRAIN.lr_value_every_epoch = [0.00001,0.0001,0.001,0.0001,0.00001,0.000001,0.0000001] ####lr policy
config.TRAIN.lr_decay_every_epoch = [1,2,100,150,200,250]
config.TRAIN.weight_decay_factor = 5.e-4 ####l2
config.TRAIN.vis=False #### if to check the training data
config.TRAIN.mix_precision=False ##use mix precision to speedup, tf1.14 at least
config.TRAIN.opt='Adam' ##Adam or SGD
config.MODEL = edict()
config.MODEL.model_path = './model/' ## save directory
config.MODEL.hin = 64 # input size during training , 128,160, depends on
config.MODEL.win = 64
config.MODEL.out_channel=2+3 # output vector 68 points , 3 headpose ,4 cls params,(left eye, right eye, mouth, big mouth open)
#### 'ShuffleNetV2_1.0' 'ShuffleNetV2_0.5' or MobileNetv2,
config.MODEL.net_structure='ShuffleNetV2_0.75'
config.MODEL.pretrained_model=None
config.DATA = edict()
config.DATA.root_path=''
config.DATA.train_txt_path='train.json'
config.DATA.val_txt_path='val.json'
############the model is trained with RGB mode
config.DATA.PIXEL_MEAN = [127., 127., 127.] ###rgb
config.DATA.PIXEL_STD = [127., 127., 127.]
config.DATA.base_extend_range=[0.2,0.3] ###extand
config.DATA.scale_factor=[0.7,1.35] ###scales
config.DATA.symmetry = [(0, 0)]
weights=[1.]
weights_xy=[[x,x] for x in weights]
config.DATA.weights = np.array(weights_xy,dtype=np.float32).reshape([-1])
config.MODEL.pruning=False ## pruning flag add l1 reg to bn/beta, no use for tmp
config.MODEL.pruning_bn_reg=0.00005
| [
"numpy.array",
"easydict.EasyDict"
] | [((80, 87), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (85, 87), True, 'from easydict import EasyDict as edict\n'), ((145, 152), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (150, 152), True, 'from easydict import EasyDict as edict\n'), ((1016, 1023), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1021, 1023), True, 'from easydict import EasyDict as edict\n'), ((1557, 1564), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1562, 1564), True, 'from easydict import EasyDict as edict\n'), ((2075, 2113), 'numpy.array', 'np.array', (['weights_xy'], {'dtype': 'np.float32'}), '(weights_xy, dtype=np.float32)\n', (2083, 2113), True, 'import numpy as np\n')] |
import gc
from .Query import QueryMethod, get_unlabeled_idx
import numpy as np
from scipy.spatial import distance_matrix
from keras.models import Model
from keras import backend as K
class CoreSetSampling(QueryMethod):
"""
An implementation of the greedy core set query strategy.
"""
def __init__(self, model, input_shape, num_labels, gpu):
super().__init__(model, input_shape, num_labels, gpu)
def greedy_k_center(self, labeled, unlabeled, amount):
greedy_indices = []
# get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):
min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
for j in range(1, labeled.shape[0], 100):
if j + 100 < labeled.shape[0]:
dist = distance_matrix(labeled[j:j+100, :], unlabeled)
else:
dist = distance_matrix(labeled[j:, :], unlabeled)
min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
# iteratively insert the farthest index and recalculate the minimum distances:
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
for i in range(amount-1):
dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled)
min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
return np.array(greedy_indices)
def query(self, x_train, y_train, labeled_idx, amount):
unlabeled_idx = get_unlabeled_idx(x_train, labeled_idx)
# get the input to the final layer, this will be our 'learned representation' which we fit k-center greedy to
representation_model = Model(inputs=self.model.input, outputs=self.model.layers[-1].input)
representation = representation_model.predict(x_train, verbose=0)
new_indices = self.greedy_k_center(representation[labeled_idx, :], representation[unlabeled_idx, :], amount)
return np.hstack((labeled_idx, unlabeled_idx[new_indices]))
class CoreSetMIPSampling(QueryMethod):
"""
An implementation of the core set query strategy with the MIP formulation using gurobi as our optimization solver.
"""
def __init__(self, model, input_shape, num_labels, gpu):
super().__init__(model, input_shape, num_labels, gpu)
self.subsample = False
def greedy_k_center(self, labeled, unlabeled, amount):
greedy_indices = []
# get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):
min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
for j in range(1, labeled.shape[0], 100):
if j + 100 < labeled.shape[0]:
dist = distance_matrix(labeled[j:j+100, :], unlabeled)
else:
dist = distance_matrix(labeled[j:, :], unlabeled)
min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
# iteratively insert the farthest index and recalculate the minimum distances:
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
for i in range(amount-1):
if i%1000==0:
print("At Point " + str(i))
dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled)
min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
return np.array(greedy_indices, dtype=int), np.max(min_dist)
def get_distance_matrix(self, X, Y):
x_input = K.placeholder((X.shape))
y_input = K.placeholder(Y.shape)
dot = K.dot(x_input, K.transpose(y_input))
x_norm = K.reshape(K.sum(K.pow(x_input, 2), axis=1), (-1, 1))
y_norm = K.reshape(K.sum(K.pow(y_input, 2), axis=1), (1, -1))
dist_mat = x_norm + y_norm - 2.0*dot
sqrt_dist_mat = K.sqrt(K.clip(dist_mat, min_value=0, max_value=10000))
dist_func = K.function([x_input, y_input], [sqrt_dist_mat])
return dist_func([X, Y])[0]
def get_neighborhood_graph(self, representation, delta):
graph = {}
print(representation.shape)
for i in range(0, representation.shape[0], 1000):
if i+1000 > representation.shape[0]:
distances = self.get_distance_matrix(representation[i:], representation)
amount = representation.shape[0] - i
else:
distances = self.get_distance_matrix(representation[i:i+1000], representation)
amount = 1000
distances = np.reshape(distances, (amount, -1))
for j in range(i, i+amount):
graph[j] = [(idx, distances[j-i, idx]) for idx in np.reshape(np.where(distances[j-i, :] <= delta),(-1))]
print("Finished Building Graph!")
return graph
def get_graph_max(self, representation, delta):
print("Getting Graph Maximum...")
maximum = 0
for i in range(0, representation.shape[0], 1000):
print("At Point " + str(i))
if i+1000 > representation.shape[0]:
distances = self.get_distance_matrix(representation[i:], representation)
else:
distances = self.get_distance_matrix(representation[i:i+1000], representation)
distances = np.reshape(distances, (-1))
distances[distances > delta] = 0
maximum = max(maximum, np.max(distances))
return maximum
def get_graph_min(self, representation, delta):
print("Getting Graph Minimum...")
minimum = 10000
for i in range(0, representation.shape[0], 1000):
print("At Point " + str(i))
if i+1000 > representation.shape[0]:
distances = self.get_distance_matrix(representation[i:], representation)
else:
distances = self.get_distance_matrix(representation[i:i+1000], representation)
distances = np.reshape(distances, (-1))
distances[distances < delta] = 10000
minimum = min(minimum, np.min(distances))
return minimum
def mip_model(self, representation, labeled_idx, budget, delta, outlier_count, greedy_indices=None):
import gurobipy as gurobi
model = gurobi.Model("Core Set Selection")
# set up the variables:
points = {}
outliers = {}
for i in range(representation.shape[0]):
if i in labeled_idx:
points[i] = model.addVar(ub=1.0, lb=1.0, vtype="B", name="points_{}".format(i))
else:
points[i] = model.addVar(vtype="B", name="points_{}".format(i))
for i in range(representation.shape[0]):
outliers[i] = model.addVar(vtype="B", name="outliers_{}".format(i))
outliers[i].start = 0
# initialize the solution to be the greedy solution:
if greedy_indices is not None:
for i in greedy_indices:
points[i].start = 1.0
# set the outlier budget:
model.addConstr(sum(outliers[i] for i in outliers) <= outlier_count, "budget")
# build the graph and set the constraints:
model.addConstr(sum(points[i] for i in range(representation.shape[0])) == budget, "budget")
neighbors = {}
graph = {}
print("Updating Neighborhoods In MIP Model...")
for i in range(0, representation.shape[0], 1000):
print("At Point " + str(i))
if i+1000 > representation.shape[0]:
distances = self.get_distance_matrix(representation[i:], representation)
amount = representation.shape[0] - i
else:
distances = self.get_distance_matrix(representation[i:i+1000], representation)
amount = 1000
distances = np.reshape(distances, (amount, -1))
for j in range(i, i+amount):
graph[j] = [(idx, distances[j-i, idx]) for idx in np.reshape(np.where(distances[j-i, :] <= delta),(-1))]
neighbors[j] = [points[idx] for idx in np.reshape(np.where(distances[j-i, :] <= delta),(-1))]
neighbors[j].append(outliers[j])
model.addConstr(sum(neighbors[j]) >= 1, "coverage+outliers")
model.__data = points, outliers
model.Params.MIPFocus = 1
model.params.TIME_LIMIT = 180
return model, graph
def mip_model_subsample(self, data, subsample_num, budget, dist, delta, outlier_count, greedy_indices=None):
import gurobipy as gurobi
model = gurobi.Model("Core Set Selection")
# calculate neighberhoods:
data_1, data_2 = np.where(dist <= delta)
# set up the variables:
points = {}
outliers = {}
for i in range(data.shape[0]):
if i >= subsample_num:
points[i] = model.addVar(ub=1.0, lb=1.0, vtype="B", name="points_{}".format(i))
else:
points[i] = model.addVar(vtype="B", name="points_{}".format(i))
for i in range(data.shape[0]):
outliers[i] = model.addVar(vtype="B", name="outliers_{}".format(i))
outliers[i].start = 0
# initialize the solution to be the greedy solution:
if greedy_indices is not None:
for i in greedy_indices:
points[i].start = 1.0
# set up the constraints:
model.addConstr(sum(points[i] for i in range(data.shape[0])) == budget, "budget")
neighbors = {}
for i in range(data.shape[0]):
neighbors[i] = []
neighbors[i].append(outliers[i])
for i in range(len(data_1)):
neighbors[data_1[i]].append(points[data_2[i]])
for i in range(data.shape[0]):
model.addConstr(sum(neighbors[i]) >= 1, "coverage+outliers")
model.addConstr(sum(outliers[i] for i in outliers) <= outlier_count, "budget")
model.setObjective(sum(outliers[i] for i in outliers), gurobi.GRB.MINIMIZE)
model.__data = points, outliers
model.Params.MIPFocus = 1
return model
def query_regular(self, X_train, Y_train, labeled_idx, amount):
import gurobipy as gurobi
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
# use the learned representation for the k-greedy-center algorithm:
representation_model = Model(inputs=self.model.input, outputs=self.model.get_layer('softmax').input)
representation = representation_model.predict(X_train, batch_size=128, verbose=0)
print("Calculating Greedy K-Center Solution...")
new_indices, max_delta = self.greedy_k_center(representation[labeled_idx], representation[unlabeled_idx], amount)
new_indices = unlabeled_idx[new_indices]
outlier_count = int(X_train.shape[0] / 10000)
# outlier_count = 250
submipnodes = 20000
# iteratively solve the MIP optimization problem:
eps = 0.01
upper_bound = max_delta
lower_bound = max_delta / 2.0
print("Building MIP Model...")
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, upper_bound, outlier_count, greedy_indices=new_indices)
model.Params.SubMIPNodes = submipnodes
points, outliers = model.__data
model.optimize()
indices = [i for i in graph if points[i].X == 1]
current_delta = upper_bound
while upper_bound - lower_bound > eps:
print("upper bound is {ub}, lower bound is {lb}".format(ub=upper_bound, lb=lower_bound))
if model.getAttr(gurobi.GRB.Attr.Status) in [gurobi.GRB.INFEASIBLE, gurobi.GRB.TIME_LIMIT]:
print("Optimization Failed - Infeasible!")
lower_bound = max(current_delta, self.get_graph_min(representation, current_delta))
current_delta = (upper_bound + lower_bound) / 2.0
del model
gc.collect()
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, current_delta, outlier_count, greedy_indices=indices)
points, outliers = model.__data
model.Params.SubMIPNodes = submipnodes
else:
print("Optimization Succeeded!")
upper_bound = min(current_delta, self.get_graph_max(representation, current_delta))
current_delta = (upper_bound + lower_bound) / 2.0
indices = [i for i in graph if points[i].X == 1]
del model
gc.collect()
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, current_delta, outlier_count, greedy_indices=indices)
points, outliers = model.__data
model.Params.SubMIPNodes = submipnodes
if upper_bound - lower_bound > eps:
model.optimize()
return np.array(indices)
def query_subsample(self, X_train, Y_train, labeled_idx, amount):
import gurobipy as gurobi
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
submipnodes = 20000
subsample_num = 30000
subsample_idx = np.random.choice(unlabeled_idx, subsample_num, replace=False)
subsample = np.vstack((X_train[labeled_idx], X_train[subsample_idx]))
new_labeled_idx = np.arange(len(labeled_idx))
new_indices = self.query_regular(subsample, Y_train, new_labeled_idx, amount)
return np.array(subsample_idx[new_indices - len(labeled_idx)])
def query(self, x_train, y_train, labeled_idx, amount):
if self.subsample:
return self.query_subsample(x_train, y_train, labeled_idx, amount)
else:
return self.query_regular(x_train, y_train, labeled_idx, amount)
| [
"keras.backend.placeholder",
"numpy.argmax",
"gc.collect",
"keras.backend.function",
"keras.models.Model",
"numpy.hstack",
"gurobipy.Model",
"scipy.spatial.distance_matrix",
"numpy.min",
"numpy.where",
"numpy.array",
"numpy.max",
"keras.backend.transpose",
"numpy.random.choice",
"keras.b... | [((1365, 1384), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (1374, 1384), True, 'import numpy as np\n'), ((1869, 1893), 'numpy.array', 'np.array', (['greedy_indices'], {}), '(greedy_indices)\n', (1877, 1893), True, 'import numpy as np\n'), ((2170, 2237), 'keras.models.Model', 'Model', ([], {'inputs': 'self.model.input', 'outputs': 'self.model.layers[-1].input'}), '(inputs=self.model.input, outputs=self.model.layers[-1].input)\n', (2175, 2237), False, 'from keras.models import Model\n'), ((2444, 2496), 'numpy.hstack', 'np.hstack', (['(labeled_idx, unlabeled_idx[new_indices])'], {}), '((labeled_idx, unlabeled_idx[new_indices]))\n', (2453, 2496), True, 'import numpy as np\n'), ((3770, 3789), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (3779, 3789), True, 'import numpy as np\n'), ((4459, 4481), 'keras.backend.placeholder', 'K.placeholder', (['X.shape'], {}), '(X.shape)\n', (4472, 4481), True, 'from keras import backend as K\n'), ((4502, 4524), 'keras.backend.placeholder', 'K.placeholder', (['Y.shape'], {}), '(Y.shape)\n', (4515, 4524), True, 'from keras import backend as K\n'), ((4860, 4907), 'keras.backend.function', 'K.function', (['[x_input, y_input]', '[sqrt_dist_mat]'], {}), '([x_input, y_input], [sqrt_dist_mat])\n', (4870, 4907), True, 'from keras import backend as K\n'), ((7195, 7229), 'gurobipy.Model', 'gurobi.Model', (['"""Core Set Selection"""'], {}), "('Core Set Selection')\n", (7207, 7229), True, 'import gurobipy as gurobi\n'), ((9492, 9526), 'gurobipy.Model', 'gurobi.Model', (['"""Core Set Selection"""'], {}), "('Core Set Selection')\n", (9504, 9526), True, 'import gurobipy as gurobi\n'), ((9588, 9611), 'numpy.where', 'np.where', (['(dist <= delta)'], {}), '(dist <= delta)\n', (9596, 9611), True, 'import numpy as np\n'), ((13855, 13872), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (13863, 13872), True, 'import numpy as np\n'), ((14127, 14188), 'numpy.random.choice', 'np.random.choice', (['unlabeled_idx', 'subsample_num'], {'replace': '(False)'}), '(unlabeled_idx, subsample_num, replace=False)\n', (14143, 14188), True, 'import numpy as np\n'), ((14209, 14266), 'numpy.vstack', 'np.vstack', (['(X_train[labeled_idx], X_train[subsample_idx])'], {}), '((X_train[labeled_idx], X_train[subsample_idx]))\n', (14218, 14266), True, 'import numpy as np\n'), ((1169, 1193), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (1175, 1193), True, 'import numpy as np\n'), ((1677, 1701), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (1683, 1701), True, 'import numpy as np\n'), ((1789, 1808), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (1798, 1808), True, 'import numpy as np\n'), ((3574, 3598), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (3580, 3598), True, 'import numpy as np\n'), ((4152, 4176), 'numpy.min', 'np.min', (['min_dist'], {'axis': '(0)'}), '(min_dist, axis=0)\n', (4158, 4176), True, 'import numpy as np\n'), ((4264, 4283), 'numpy.argmax', 'np.argmax', (['min_dist'], {}), '(min_dist)\n', (4273, 4283), True, 'import numpy as np\n'), ((4344, 4379), 'numpy.array', 'np.array', (['greedy_indices'], {'dtype': 'int'}), '(greedy_indices, dtype=int)\n', (4352, 4379), True, 'import numpy as np\n'), ((4381, 4397), 'numpy.max', 'np.max', (['min_dist'], {}), '(min_dist)\n', (4387, 4397), True, 'import numpy as np\n'), ((4554, 4574), 'keras.backend.transpose', 'K.transpose', (['y_input'], {}), '(y_input)\n', (4565, 4574), True, 'from keras import backend as K\n'), ((4792, 4838), 'keras.backend.clip', 'K.clip', (['dist_mat'], {'min_value': '(0)', 'max_value': '(10000)'}), '(dist_mat, min_value=0, max_value=10000)\n', (4798, 4838), True, 'from keras import backend as K\n'), ((5481, 5516), 'numpy.reshape', 'np.reshape', (['distances', '(amount, -1)'], {}), '(distances, (amount, -1))\n', (5491, 5516), True, 'import numpy as np\n'), ((6235, 6260), 'numpy.reshape', 'np.reshape', (['distances', '(-1)'], {}), '(distances, -1)\n', (6245, 6260), True, 'import numpy as np\n'), ((6882, 6907), 'numpy.reshape', 'np.reshape', (['distances', '(-1)'], {}), '(distances, -1)\n', (6892, 6907), True, 'import numpy as np\n'), ((8750, 8785), 'numpy.reshape', 'np.reshape', (['distances', '(amount, -1)'], {}), '(distances, (amount, -1))\n', (8760, 8785), True, 'import numpy as np\n'), ((915, 964), 'scipy.spatial.distance_matrix', 'distance_matrix', (['labeled[j:j + 100, :]', 'unlabeled'], {}), '(labeled[j:j + 100, :], unlabeled)\n', (930, 964), False, 'from scipy.spatial import distance_matrix\n'), ((1004, 1046), 'scipy.spatial.distance_matrix', 'distance_matrix', (['labeled[j:, :]', 'unlabeled'], {}), '(labeled[j:, :], unlabeled)\n', (1019, 1046), False, 'from scipy.spatial import distance_matrix\n'), ((3320, 3369), 'scipy.spatial.distance_matrix', 'distance_matrix', (['labeled[j:j + 100, :]', 'unlabeled'], {}), '(labeled[j:j + 100, :], unlabeled)\n', (3335, 3369), False, 'from scipy.spatial import distance_matrix\n'), ((3409, 3451), 'scipy.spatial.distance_matrix', 'distance_matrix', (['labeled[j:, :]', 'unlabeled'], {}), '(labeled[j:, :], unlabeled)\n', (3424, 3451), False, 'from scipy.spatial import distance_matrix\n'), ((4609, 4626), 'keras.backend.pow', 'K.pow', (['x_input', '(2)'], {}), '(x_input, 2)\n', (4614, 4626), True, 'from keras import backend as K\n'), ((4679, 4696), 'keras.backend.pow', 'K.pow', (['y_input', '(2)'], {}), '(y_input, 2)\n', (4684, 4696), True, 'from keras import backend as K\n'), ((6343, 6360), 'numpy.max', 'np.max', (['distances'], {}), '(distances)\n', (6349, 6360), True, 'import numpy as np\n'), ((6994, 7011), 'numpy.min', 'np.min', (['distances'], {}), '(distances)\n', (7000, 7011), True, 'import numpy as np\n'), ((12871, 12883), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12881, 12883), False, 'import gc\n'), ((13485, 13497), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13495, 13497), False, 'import gc\n'), ((1091, 1111), 'numpy.min', 'np.min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (1097, 1111), True, 'import numpy as np\n'), ((3496, 3516), 'numpy.min', 'np.min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (3502, 3516), True, 'import numpy as np\n'), ((5635, 5673), 'numpy.where', 'np.where', (['(distances[j - i, :] <= delta)'], {}), '(distances[j - i, :] <= delta)\n', (5643, 5673), True, 'import numpy as np\n'), ((8904, 8942), 'numpy.where', 'np.where', (['(distances[j - i, :] <= delta)'], {}), '(distances[j - i, :] <= delta)\n', (8912, 8942), True, 'import numpy as np\n'), ((9014, 9052), 'numpy.where', 'np.where', (['(distances[j - i, :] <= delta)'], {}), '(distances[j - i, :] <= delta)\n', (9022, 9052), True, 'import numpy as np\n')] |
"""
This module is a plugin to read images from .mat files in napari
"""
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import dask.array as da
import h5py
import numpy as np
import scipy.io as sio
from pluggy import HookimplMarker
LayerData = Union[Tuple[Any], Tuple[Any, Dict], Tuple[Any, Dict, str]]
PathLike = Union[str, List[str]]
ReaderFunction = Callable[[PathLike], List[LayerData]]
napari_hook_implementation = HookimplMarker("napari")
MAT_EXTENSIONS = '.mat'
@napari_hook_implementation
def napari_get_reader(path: PathLike) -> Optional[ReaderFunction]:
"""A basic implementation of the napari_get_reader hook specification."""
if isinstance(path, list):
# reader plugins may be handed single path, or a list of paths.
# if it is a list, it is assumed to be an image stack...
# so we are only going to look at the first file.
path = path[0]
if not path.endswith(MAT_EXTENSIONS):
# if we know we cannot read the file, we immediately return None.
return None
# otherwise we return the *function* that can read ``path``.
return reader_function
def load_mat_vars(file_path: str) -> Dict:
"""Load image variables from .mat file into dictionary
Args:
file_path (str): path to .mat file
Returns:
Dict: dictionary of image variables. Empty if file contains no images
"""
try:
# Load variable details before loading
mat_vars = sio.whosmat(file_path)
# Check if each variable is image based on shape
is_image_list = [shape_is_image(var[1]) for var in mat_vars]
var_list = [var[0] for var in mat_vars]
# Filter list of variables whether they are images
var_list = [i for (i, v) in zip(var_list, is_image_list) if v]
if len(var_list) > 0:
mat_dict = sio.loadmat(
file_path, variable_names=var_list, squeeze_me=True
)
for var in list(mat_dict.keys()):
if not hasattr(mat_dict[var], 'shape'):
del mat_dict[var]
else:
mat_dict = {}
except NotImplementedError:
mat_file = h5py.File(file_path, mode='r')
var_list = list(mat_file.keys())
# discard #refs# entry
try:
var_list.remove("#refs#")
except ValueError:
pass
is_image_list = [
shape_is_image(mat_file[var].shape) for var in var_list
]
# Filter list of variables whether they are images
var_list = [i for (i, v) in zip(var_list, is_image_list) if v]
mat_dict = {}
for var in var_list:
array_size = mat_file[var].size
chunk_size = mat_file[var].chunks
chunk_size = update_chunk_size(array_size, chunk_size)
array = da.from_array(mat_file[var], chunks=chunk_size).squeeze()
# .mat are saved in reverse order
array = rearrange_da_dims(array)
mat_dict[var] = array
return mat_dict
def reader_function(path: PathLike) -> List[LayerData]:
"""Take a path or list of paths and return a list of LayerData tuples."""
paths = [path] if isinstance(path, str) else path
# Generate list to hold potential images from each path provided
data_list = [None for __ in range(len(paths))]
for i, _path in enumerate(paths):
mat_dict = load_mat_vars(_path)
if not mat_dict:
continue
var_list = list(mat_dict.keys())
data = [None for __ in var_list]
for j, var in enumerate(var_list):
array = mat_dict[var]
# optional kwargs for the corresponding viewer.add_* method
meta = {"name": var}
if len(array.shape) == 3 or len(array.shape) == 2:
meta["contrast_limits"] = array_contrast_limits(array)
elif len(array.shape) == 4:
meta["channel_axis"] = 3
# Set contrast min/max for each channel
num_channels = array.shape[3]
contrast_limits = [None for __ in range(num_channels)]
for chann_index in range(num_channels):
contrast_limits[chann_index] = array_contrast_limits(
array[:, :, :, chann_index]
)
meta["contrast_limits"] = contrast_limits
if isinstance(array, da.Array):
meta["is_pyramid"] = False
data[j] = (prep_array(array), meta)
data_list[i] = data
# Return None if no .mat files could be read
if all(value is None for value in data_list):
return None
# Flatten potential list of lists
data_list = [item for sublist in data_list for item in sublist]
return data_list
def shape_is_image(shape: Sequence, min_size: int = 20) -> bool:
"""Checks if shape of array provided is at least 2D
Args:
shape (Sequence): shape of array to check
Returns:
bool : Whether shape belongs to at least 2D image
"""
dims = np.sum(np.array(shape) > min_size)
return dims >= 2
def prep_array(array: np.ndarray) -> np.ndarray:
"""Correct images after loading to match Python
Args:
array (np.ndarray): array of at least two dimensions
Returns:
np.ndarray: Corrected array
"""
# Boolean/logical arrays from Matlab are read as uint8
if array.dtype == "uint8":
if array.max() == 1:
array = array.astype("bool")
# Rearrange dimensions if 3D or higher
array = rearrange_dims(array)
return array
def rearrange_dims(array: np.ndarray) -> np.ndarray:
"""If image is more than 2D, move third dimension to first
Args:
array (np.ndarray): Multidimensional array
Returns:
np.ndarray: Array with rearranged axes
"""
if len(array.shape) > 2:
# If third dimension is longer than first two, move to first position
if np.all(array.shape[2] > np.array(array.shape[0:2])):
array = np.moveaxis(array, 2, 0)
return array
def rearrange_da_dims(array: da.Array) -> da.Array:
"""Flip dask array dims from HDF5 .mat files & move slices dim to 0th pos.
Args:
array (da.Array): 3-dimensional or more dask array
Returns:
da.Array: array with rearranged dimensions
"""
array_shape = array.shape
# Current dimension order
dims = np.arange(len(array_shape))
# Flip dims as if array dims were flipped to recover orig. saved dimensions
dims_flipped = np.flip(dims)
# breakpoint()
if len(array_shape) > 2:
# Flip array shape to recover original saved shape
array_shape_flipped = np.flip(array_shape)
# Find largest dimension (slices of stack) in flipped array shape
slices_flipped_ind = np.argmax(array_shape_flipped)
# Get slices dimension
slices_dim = dims_flipped[slices_flipped_ind]
# Remove slices dimensions from dims_flipped
dims_flipped = np.delete(dims_flipped, slices_flipped_ind)
# Insert slices dimensions to first dimension of dims_flipped
dims_flipped = np.insert(dims_flipped, 0, slices_dim)
# Determine which dimensions are no longer in agreement
move_positions = dims != dims_flipped
# If any dimensions need to be rearranged, move them
if np.any(move_positions):
array = da.moveaxis(
array,
source=dims_flipped[move_positions],
destination=dims[move_positions],
)
elif len(array_shape) == 2:
array = da.moveaxis(array, dims, dims_flipped)
return array
def array_contrast_limits(array, axis=0, num_samples=100) -> List[float]:
"""Determine min/max of numpy/dask arrays along axis if n-dimensional
Args:
dask_array (Union[np.ndarray, dask.array]): n-dimensional array
axis (int): Axis along n-dimensional array to sample min/max
num_samples (int): Number of slices to sample from if large array.
Returns:
List[float]: min/max of array
"""
if not isinstance(array, da.Array) and not isinstance(array, np.ndarray):
raise TypeError("dask/numpy array expected")
if len(array.shape) > 2:
if num_samples is None:
num_samples = array.shape[axis]
num_samples = min(num_samples, array.shape[axis])
random_samples = np.random.choice(
array.shape[axis], num_samples, replace=False
)
# Sort random samples for dask slicing efficiency
random_samples = np.sort(random_samples)
# If unsigned int, use 0 as lower bound
if np.issubdtype(array.dtype, np.unsignedinteger):
contrast_min = 0
else:
contrast_min = array[random_samples].min()
if isinstance(array, da.Array):
contrast_min = contrast_min.compute()
contrast_max = array[random_samples].max()
if isinstance(array, da.Array):
contrast_max = contrast_max.compute()
elif len(array.shape) == 2:
if num_samples is None:
num_samples = array.size
num_samples = min(num_samples, array.size)
row_ind = np.random.randint(0, array.shape[0], num_samples)
col_ind = np.random.randint(0, array.shape[1], num_samples)
if isinstance(array, da.Array):
contrast_min = array.vindex[row_ind, col_ind].min().compute()
contrast_max = array.vindex[row_ind, col_ind].max().compute()
else:
contrast_min = array[row_ind, col_ind].min()
contrast_max = array[row_ind, col_ind].max()
else:
raise ValueError("Array of dimensions >= 2 required.")
return [contrast_min, contrast_max]
def update_chunk_size(array_size: Sequence, chunk_size: Sequence) -> List:
"""Determines new chunk size when loading dask array.
Potentially increases slice axis chunk size to 10 if 1.
This makes loading array faster for user.
Args:
array_size (Sequence): array size of dask array
chunk_size (Sequence): original chunk size of dask array
Returns:
List: Updated (or original) chunk size
"""
chunk_size = list(chunk_size)
slice_index = np.argmax(array_size)
if chunk_size[slice_index] == 1:
chunk_size[slice_index] = 10
return chunk_size
| [
"h5py.File",
"numpy.moveaxis",
"numpy.flip",
"scipy.io.whosmat",
"numpy.argmax",
"pluggy.HookimplMarker",
"scipy.io.loadmat",
"dask.array.moveaxis",
"numpy.insert",
"numpy.any",
"numpy.sort",
"numpy.random.randint",
"numpy.array",
"numpy.random.choice",
"dask.array.from_array",
"numpy.... | [((470, 494), 'pluggy.HookimplMarker', 'HookimplMarker', (['"""napari"""'], {}), "('napari')\n", (484, 494), False, 'from pluggy import HookimplMarker\n'), ((6783, 6796), 'numpy.flip', 'np.flip', (['dims'], {}), '(dims)\n', (6790, 6796), True, 'import numpy as np\n'), ((10612, 10633), 'numpy.argmax', 'np.argmax', (['array_size'], {}), '(array_size)\n', (10621, 10633), True, 'import numpy as np\n'), ((1534, 1556), 'scipy.io.whosmat', 'sio.whosmat', (['file_path'], {}), '(file_path)\n', (1545, 1556), True, 'import scipy.io as sio\n'), ((6938, 6958), 'numpy.flip', 'np.flip', (['array_shape'], {}), '(array_shape)\n', (6945, 6958), True, 'import numpy as np\n'), ((7064, 7094), 'numpy.argmax', 'np.argmax', (['array_shape_flipped'], {}), '(array_shape_flipped)\n', (7073, 7094), True, 'import numpy as np\n'), ((7260, 7303), 'numpy.delete', 'np.delete', (['dims_flipped', 'slices_flipped_ind'], {}), '(dims_flipped, slices_flipped_ind)\n', (7269, 7303), True, 'import numpy as np\n'), ((7399, 7437), 'numpy.insert', 'np.insert', (['dims_flipped', '(0)', 'slices_dim'], {}), '(dims_flipped, 0, slices_dim)\n', (7408, 7437), True, 'import numpy as np\n'), ((7624, 7646), 'numpy.any', 'np.any', (['move_positions'], {}), '(move_positions)\n', (7630, 7646), True, 'import numpy as np\n'), ((8709, 8772), 'numpy.random.choice', 'np.random.choice', (['array.shape[axis]', 'num_samples'], {'replace': '(False)'}), '(array.shape[axis], num_samples, replace=False)\n', (8725, 8772), True, 'import numpy as np\n'), ((8882, 8905), 'numpy.sort', 'np.sort', (['random_samples'], {}), '(random_samples)\n', (8889, 8905), True, 'import numpy as np\n'), ((8967, 9013), 'numpy.issubdtype', 'np.issubdtype', (['array.dtype', 'np.unsignedinteger'], {}), '(array.dtype, np.unsignedinteger)\n', (8980, 9013), True, 'import numpy as np\n'), ((1921, 1985), 'scipy.io.loadmat', 'sio.loadmat', (['file_path'], {'variable_names': 'var_list', 'squeeze_me': '(True)'}), '(file_path, variable_names=var_list, squeeze_me=True)\n', (1932, 1985), True, 'import scipy.io as sio\n'), ((2256, 2286), 'h5py.File', 'h5py.File', (['file_path'], {'mode': '"""r"""'}), "(file_path, mode='r')\n", (2265, 2286), False, 'import h5py\n'), ((5238, 5253), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (5246, 5253), True, 'import numpy as np\n'), ((6250, 6274), 'numpy.moveaxis', 'np.moveaxis', (['array', '(2)', '(0)'], {}), '(array, 2, 0)\n', (6261, 6274), True, 'import numpy as np\n'), ((7669, 7763), 'dask.array.moveaxis', 'da.moveaxis', (['array'], {'source': 'dims_flipped[move_positions]', 'destination': 'dims[move_positions]'}), '(array, source=dims_flipped[move_positions], destination=dims[\n move_positions])\n', (7680, 7763), True, 'import dask.array as da\n'), ((7876, 7914), 'dask.array.moveaxis', 'da.moveaxis', (['array', 'dims', 'dims_flipped'], {}), '(array, dims, dims_flipped)\n', (7887, 7914), True, 'import dask.array as da\n'), ((9535, 9584), 'numpy.random.randint', 'np.random.randint', (['(0)', 'array.shape[0]', 'num_samples'], {}), '(0, array.shape[0], num_samples)\n', (9552, 9584), True, 'import numpy as np\n'), ((9604, 9653), 'numpy.random.randint', 'np.random.randint', (['(0)', 'array.shape[1]', 'num_samples'], {}), '(0, array.shape[1], num_samples)\n', (9621, 9653), True, 'import numpy as np\n'), ((6200, 6226), 'numpy.array', 'np.array', (['array.shape[0:2]'], {}), '(array.shape[0:2])\n', (6208, 6226), True, 'import numpy as np\n'), ((2933, 2980), 'dask.array.from_array', 'da.from_array', (['mat_file[var]'], {'chunks': 'chunk_size'}), '(mat_file[var], chunks=chunk_size)\n', (2946, 2980), True, 'import dask.array as da\n')] |
#!/usr/bin/env python
"""
.. module:: shapes
:synopsis: Python functions to create simple shapes from LDraw primitives.
.. moduleauthor:: <NAME>
"""
import math
import numbers
import numpy
import opensdraw.lcad_language.curveFunctions as curveFunctions
import opensdraw.lcad_language.geometry as geometry
import opensdraw.lcad_language.interpreter as interpreter
import opensdraw.lcad_language.parts as parts
import opensdraw.lcad_language.lcadTypes as lcadTypes
lcad_functions = {}
#
# Helper functions.
#
def createVectors(matrices, vector):
vectors = [vector]
for mm in matrices:
vectors.append(numpy.dot(mm, vector))
return vectors
def matrixXVectors(matrix, vectors, truncate = True):
results_list = []
if truncate:
for vec in vectors:
results_list.append(numpy.dot(matrix, vec)[:3])
else:
for vec in vectors:
results_list.append(numpy.dot(matrix, vec))
return results_list
def renderShape(curve, group, matrix, vectors, stepper, stop):
cm = numpy.dot(matrix, stepper.getMatrix())
last_v = matrixXVectors(cm, vectors)
n_vert = len(last_v) - 1
pos = stepper.nextPos()
while (pos < stop):
cm = numpy.dot(matrix, stepper.getMatrix())
cur_v = matrixXVectors(cm, vectors)
for i in range(n_vert):
#group.addPart(parts.Line(None, numpy.append(last_v[i], cur_v[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_v[i], [last_v[i+1], cur_v[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_v[i+1], [cur_v[i+1], cur_v[i]]), 16), True)
pos = stepper.nextPos()
last_v = cur_v
cm = numpy.dot(matrix, curve.call(None, stop))
cur_v = matrixXVectors(cm, vectors)
for i in range(n_vert):
#group.addPart(parts.Line(None, numpy.append(last_v[i], cur_v[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_v[i], [last_v[i+1], cur_v[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_v[i+1], [cur_v[i+1], cur_v[i]]), 16), True)
def rotationMatrices():
matrices = []
d_angle = math.radians(22.5)
angle = d_angle
while (angle < (2.0 * math.pi + 0.1 * d_angle)):
matrices.append(geometry.rotationMatrixZ(angle))
angle += d_angle
return matrices
#
# Helper classes.
#
class Stepper(object):
"""
Handles dynamically stepping along the curve
to minimize the number of sub-sections.
"""
def __init__(self, curve, start, stop):
self.curve = curve
self.pos = start
self.step = 1.0
self.stop = stop
self.mm = curve.call(None, start)
def anglesDiffer(self, new_mm):
diff = 0.0
for i in range(3):
diff += numpy.dot(self.mm[:3,i], new_mm[:3,i])
diff = 3.0 - diff
if (diff > 1.0e-4):
return True
else:
return False
def getMatrix(self):
return self.mm
def nextPos(self):
cur = self.pos
new_mm = self.mm
while (cur < self.stop) and not self.anglesDiffer(new_mm):
cur += self.step
new_mm = self.curve.call(None, cur)
if (cur > self.stop):
self.pos = self.stop
else:
if ((cur - self.step) > self.pos):
self.pos = cur - self.step
else:
self.pos += self.step
self.mm = self.curve.call(None, self.pos)
return self.pos
#
# Opensdraw functions.
#
class Axle(interpreter.LCadFunction):
"""
**axle** - Draw an axle using LDraw primitives.
:param curve: The curve that the axle should follow.
:param start: The starting point on the curve.
:param stop: The stopping point on the curve.
:param orientation: (optional) Angle in degrees in the XY plane.
The axle will have the color 16.
Usage::
(axle curve 0 10) ; Draw an axle along curve from 0 to 10 (LDU).
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "axle")
self.vectors = [numpy.array([6, 0, 0, 1]),
numpy.array([5.602, 2, 0, 1]),
numpy.array([2, 2, 0, 1]),
numpy.array([2, 5.602, 0, 1]),
numpy.array([0, 6, 0, 1]),
numpy.array([-2, 5.602, 0, 1]),
numpy.array([-2, 2, 0, 1]),
numpy.array([-5.602, 2, 0, 1]),
numpy.array([-6, 0, 0, 1]),
numpy.array([-5.602, -2, 0, 1]),
numpy.array([-2, -2, 0, 1]),
numpy.array([-2, -5.602, 0, 1]),
numpy.array([0, -6, 0, 1]),
numpy.array([2, -5.602, 0, 1]),
numpy.array([2, -2, 0, 1]),
numpy.array([5.602, -2, 0, 1]),
numpy.array([6, 0, 0, 1])]
self.setSignature([[interpreter.LCadFunction],
[numbers.Number],
[numbers.Number],
["optional", [numbers.Number]]])
def call(self, model, curve, start, stop, orientation = 0.0):
if (orientation != 0.0):
vectors = matrixXVectors(geometry.rotationMatrixZ(math.radians(orientation)),
self.vectors,
truncate = False)
else:
vectors = self.vectors
group = model.curGroup()
matrix = group.matrix()
stepper = Stepper(curve, start, stop)
cm = numpy.dot(matrix, stepper.getMatrix())
lastv = matrixXVectors(cm, vectors)
n_vert = len(lastv) - 1
pos = stepper.nextPos()
while (pos < stop):
cm = numpy.dot(matrix, stepper.getMatrix())
curv = matrixXVectors(cm, vectors)
for i in range(n_vert):
group.addPart(parts.Line(None, numpy.append(lastv[i], curv[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(lastv[i], [lastv[i+1], curv[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(lastv[i+1], [curv[i+1], curv[i]]), 16), True)
pos = stepper.nextPos()
lastv = curv
cm = numpy.dot(matrix, curve.call(None, stop))
curv = matrixXVectors(cm, vectors)
for i in range(n_vert):
group.addPart(parts.Line(None, numpy.append(lastv[i], curv[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(lastv[i], [lastv[i+1], curv[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(lastv[i+1], [curv[i+1], curv[i]]), 16), True)
lcad_functions["axle"] = Axle()
class FlatCable(interpreter.LCadFunction):
"""
**flat-cable** - Draw a flat cable (i.e. EV3 or NXT style) using LDraw primitives.
:param curve: The curve that the cable should follow.
:param start: The starting point on the curve.
:param stop: The stopping point on the curve.
:param width: The width of the cable.
:param radius: The edge radius of the cable.
:param orientation: (optional) Angle in degrees in the XY plane, default is 0 (the long axis of the cable is along the X axis).
The flat cable will have the color 16.
Usage::
(flat-cable curve 0 10 4 1) ; Draw a 4 LDU wide flat cable with 1 LDU radius edges.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "flat-cable")
self.setSignature([[interpreter.LCadFunction],
[numbers.Number],
[numbers.Number],
[numbers.Number],
[numbers.Number],
["optional", [numbers.Number]]])
def call(self, model, curve, start, stop, width, radius, orientation = 0.0):
group = model.curGroup()
matrix = group.matrix()
stepper = Stepper(curve, start, stop)
# Create vectors for a single segment of the cable.
x_start = 0.5 * width
cable_vecs = []
# First edge.
for i in range(9):
angle = math.radians(270 - 22.5 * i)
cable_vecs.append(numpy.array([radius * math.cos(angle) - x_start,
radius * math.sin(angle),
0,
1.0]))
# Second edge.
for i in range(9):
angle = math.radians(90 - 22.5 * i)
cable_vecs.append(numpy.array([radius * math.cos(angle) + x_start,
radius * math.sin(angle),
0,
1.0]))
cable_vecs.append(cable_vecs[0])
cable_vecs.reverse()
# Rotate the cable if necessary.
if (orientation != 0):
cable_vecs = matrixXVectors(geometry.rotationMatrixZ(math.radians(orientation)),
cable_vecs,
truncate = False)
# Draw the cable.
renderShape(curve, group, matrix, cable_vecs, stepper, stop)
lcad_functions["flat-cable"] = FlatCable()
class RibbonCable(interpreter.LCadFunction):
"""
**ribbon-cable** - Draw a ribbon cable using LDraw primitives.
:param curve: The curve that the cable should follow.
:param start: The starting point on the curve.
:param stop: The stopping point on the curve.
:param strands: The number of strands in the cable.
:param radius: The radius of a single strand in the cable.
:param orientation: (optional) Angle in degrees in the XY plane, default is 0 (the long axis of the cable is along the X axis).
The ribbon cable will have the color 16.
Usage::
(ribbon-cable curve 0 10 4 1) ; Draw a 4 stranded ribbon cable with each strand
; having a radius of 1 LDU.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "ribbon-cable")
self.setSignature([[interpreter.LCadFunction],
[numbers.Number],
[numbers.Number],
[numbers.Number],
[numbers.Number],
["optional", [numbers.Number]]])
def call(self, model, curve, start, stop, strands, radius, orientation = 0.0):
group = model.curGroup()
matrix = group.matrix()
stepper = Stepper(curve, start, stop)
# Create vectors for a single segment of the cable.
cable_width = radius * (strands - 1) * math.sqrt(2)
x_inc = cable_width/(strands - 1)
x_start = -0.5 * cable_width
cable_vecs = []
cur_x = x_start
i = 0
# Up one side.
while (i < strands):
# Create vectors for edge cables.
if (i == 0):
for j in range(6):
angle = math.radians(180.0 - 22.5 * j)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
elif (i == (strands - 1)):
for j in range(6):
angle = math.radians(135 - 22.5 * j)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
# Create vectors for center cables.
else:
for j in range(4):
angle = math.radians(135 - 22.5 * j)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
cur_x += x_inc
i += 1
# Down the other.
while (i > 0):
cur_x -= x_inc
i -= 1
# Create vectors for edge cables.
if (i == 0):
for j in range(7):
angle = math.radians(-22.5 * j - 45.0)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
elif (i == (strands - 1)):
for j in range(6):
angle = math.radians(-22.5 * j)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
# Create vectors for center cables.
else:
for j in range(4):
angle = math.radians(-22.5 * j - 45.0)
cable_vecs.append(numpy.array([cur_x + radius * math.cos(angle),
radius * math.sin(angle),
0,
1.0]))
cable_vecs.reverse()
# Rotate the cable if necessary.
if (orientation != 0):
cable_vecs = matrixXVectors(geometry.rotationMatrixZ(math.radians(orientation)),
cable_vecs,
truncate = False)
# Draw the cable.
renderShape(curve, group, matrix, cable_vecs, stepper, stop)
lcad_functions["ribbon-cable"] = RibbonCable()
class Ring(interpreter.LCadFunction):
"""
**ring** - Draw a ring using LDraw primitives.
:param m1: Transform matrix for the first edge of the ring.
:param v1: Vector for the first edge of the ring.
:param m2: Transform matrix for the second edge of the ring.
:param v2: Vector for the second edge of the ring.
:param ccw: Counterclockwise winding (t/nil).
The ring will have the color 16.
Usage::
(ring m1 v1 m2 v2 t) ; Draw a ring with edge 1 defined by m1, v1
; and edge 2 defined by m2, v2, with ccw winding.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "ring")
self.matrices = rotationMatrices()
self.setSignature([[lcadTypes.LCadMatrix],
[lcadTypes.LCadVector],
[lcadTypes.LCadMatrix],
[lcadTypes.LCadVector],
[lcadTypes.LCadBoolean]])
def call(self, model, m1, v1, m2, v2, ccw):
group = model.curGroup()
matrix = group.matrix()
m1 = numpy.dot(matrix, m1)
m2 = numpy.dot(matrix, m2)
p1 = numpy.dot(m1, v1)
p2 = numpy.dot(m2, v2)
if interpreter.isTrue(ccw):
for mz in self.matrices:
p3 = numpy.dot(m1, numpy.dot(mz, v1))
p4 = numpy.dot(m2, numpy.dot(mz, v2))
group.addPart(parts.Triangle(None, numpy.append(p1[0:3], [p2[0:3], p3[:3]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(p3[0:3], [p2[0:3], p4[:3]]), 16), True)
p1 = p3
p2 = p4
else:
for mz in self.matrices:
p3 = numpy.dot(m1, numpy.dot(mz, v1))
p4 = numpy.dot(m2, numpy.dot(mz, v2))
group.addPart(parts.Triangle(None, numpy.append(p2[:3], [p1[:3], p3[:3]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(p2[:3], [p3[:3], p4[:3]]), 16), True)
p1 = p3
p2 = p4
lcad_functions["ring"] = Ring()
class Rod(interpreter.LCadFunction):
"""
**rod** - Draw a rod using LDraw primitives.
:param curve: The curve that the rod should follow.
:param start: The starting point on the curve.
:param stop: The stopping point on the curve.
:param radius: The radius of the rod.
The rod will have the color 16.
Usage::
(rod curve 0 10 2) ; Draw a 2 LDU diameter rod from 0 to 10 along curve.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "rod")
self.matrices = rotationMatrices()
self.setSignature([[interpreter.LCadFunction],
[numbers.Number],
[numbers.Number],
[numbers.Number]])
def call(self, model, curve, start, stop, radius):
group = model.curGroup()
matrix = group.matrix()
stepper = Stepper(curve, start, stop)
# Create vectors.
vectors = createVectors(self.matrices, numpy.array([radius, 0, 0, 1]))
# Draw.
renderShape(curve, group, matrix, vectors, stepper, stop)
lcad_functions["rod"] = Rod()
class Tube(interpreter.LCadFunction):
"""
**tube** - Draw a tube using LDraw primitives.
:param curve: The curve that the tube should follow.
:param start: The starting point on the curve.
:param stop: The stopping point on the curve.
:param inner_radius: The inner radius of the tube.
:param outer_radius: The outer radius of the tube.
The tube will have the color 16.
Usage::
(tube curve 0 10 2 3) ; Draw a 2 LDU inner diameter, 3 LDU outer diameter
; tube from 0 to 10 along curve.
"""
def __init__(self):
interpreter.LCadFunction.__init__(self, "tube")
self.matrices = rotationMatrices()
self.setSignature([[interpreter.LCadFunction],
[numbers.Number],
[numbers.Number],
[numbers.Number],
[numbers.Number]])
def call(self, model, curve, start, stop, inner_radius, outer_radius):
group = model.curGroup()
matrix = group.matrix()
stepper = Stepper(curve, start, stop)
# Create vectors.
inner_vecs = createVectors(self.matrices, numpy.array([inner_radius, 0, 0, 1]))
outer_vecs = createVectors(self.matrices, numpy.array([outer_radius, 0, 0, 1]))
# Starting ring.
cm = numpy.dot(matrix, stepper.getMatrix())
last_inner = matrixXVectors(cm, inner_vecs)
last_outer = matrixXVectors(cm, outer_vecs)
n_vert = len(last_inner) - 1
pos = stepper.nextPos()
while (pos < stop):
cm = numpy.dot(matrix, stepper.getMatrix())
cur_inner = matrixXVectors(cm, inner_vecs)
cur_outer = matrixXVectors(cm, outer_vecs)
for i in range(n_vert):
# Inner wall.
group.addPart(parts.Triangle(None, numpy.append(last_inner[i+1], [last_inner[i], cur_inner[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_inner[i+1], [cur_inner[i], cur_inner[i+1]]), 16), True)
# Outer wall.
group.addPart(parts.Line(None, numpy.append(last_outer[i], cur_outer[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_outer[i], [last_outer[i+1], cur_outer[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_outer[i+1], [cur_outer[i+1], cur_outer[i]]), 16), True)
pos = stepper.nextPos()
last_inner = cur_inner
last_outer = cur_outer
cm = numpy.dot(matrix, curve.call(None, stop))
cur_inner = matrixXVectors(cm, inner_vecs)
cur_outer = matrixXVectors(cm, outer_vecs)
for i in range(n_vert):
# Inner wall.
group.addPart(parts.Triangle(None, numpy.append(last_inner[i+1], [last_inner[i], cur_inner[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_inner[i+1], [cur_inner[i], cur_inner[i+1]]), 16), True)
# Outer wall.
group.addPart(parts.Line(None, numpy.append(last_outer[i], cur_outer[i]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_outer[i], [last_outer[i+1], cur_outer[i]]), 16), True)
group.addPart(parts.Triangle(None, numpy.append(last_outer[i+1], [cur_outer[i+1], cur_outer[i]]), 16), True)
lcad_functions["tube"] = Tube()
| [
"math.sqrt",
"math.radians",
"opensdraw.lcad_language.interpreter.LCadFunction.__init__",
"math.sin",
"opensdraw.lcad_language.interpreter.isTrue",
"numpy.append",
"numpy.array",
"math.cos",
"opensdraw.lcad_language.geometry.rotationMatrixZ",
"numpy.dot"
] | [((2166, 2184), 'math.radians', 'math.radians', (['(22.5)'], {}), '(22.5)\n', (2178, 2184), False, 'import math\n'), ((4050, 4097), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""axle"""'], {}), "(self, 'axle')\n", (4083, 4097), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((7558, 7611), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""flat-cable"""'], {}), "(self, 'flat-cable')\n", (7591, 7611), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((10252, 10307), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""ribbon-cable"""'], {}), "(self, 'ribbon-cable')\n", (10285, 10307), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((14972, 15019), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""ring"""'], {}), "(self, 'ring')\n", (15005, 15019), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((15449, 15470), 'numpy.dot', 'numpy.dot', (['matrix', 'm1'], {}), '(matrix, m1)\n', (15458, 15470), False, 'import numpy\n'), ((15484, 15505), 'numpy.dot', 'numpy.dot', (['matrix', 'm2'], {}), '(matrix, m2)\n', (15493, 15505), False, 'import numpy\n'), ((15520, 15537), 'numpy.dot', 'numpy.dot', (['m1', 'v1'], {}), '(m1, v1)\n', (15529, 15537), False, 'import numpy\n'), ((15551, 15568), 'numpy.dot', 'numpy.dot', (['m2', 'v2'], {}), '(m2, v2)\n', (15560, 15568), False, 'import numpy\n'), ((15580, 15603), 'opensdraw.lcad_language.interpreter.isTrue', 'interpreter.isTrue', (['ccw'], {}), '(ccw)\n', (15598, 15603), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((16940, 16986), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""rod"""'], {}), "(self, 'rod')\n", (16973, 16986), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((18218, 18265), 'opensdraw.lcad_language.interpreter.LCadFunction.__init__', 'interpreter.LCadFunction.__init__', (['self', '"""tube"""'], {}), "(self, 'tube')\n", (18251, 18265), True, 'import opensdraw.lcad_language.interpreter as interpreter\n'), ((624, 645), 'numpy.dot', 'numpy.dot', (['mm', 'vector'], {}), '(mm, vector)\n', (633, 645), False, 'import numpy\n'), ((2282, 2313), 'opensdraw.lcad_language.geometry.rotationMatrixZ', 'geometry.rotationMatrixZ', (['angle'], {}), '(angle)\n', (2306, 2313), True, 'import opensdraw.lcad_language.geometry as geometry\n'), ((2808, 2848), 'numpy.dot', 'numpy.dot', (['self.mm[:3, i]', 'new_mm[:3, i]'], {}), '(self.mm[:3, i], new_mm[:3, i])\n', (2817, 2848), False, 'import numpy\n'), ((4123, 4148), 'numpy.array', 'numpy.array', (['[6, 0, 0, 1]'], {}), '([6, 0, 0, 1])\n', (4134, 4148), False, 'import numpy\n'), ((4174, 4203), 'numpy.array', 'numpy.array', (['[5.602, 2, 0, 1]'], {}), '([5.602, 2, 0, 1])\n', (4185, 4203), False, 'import numpy\n'), ((4229, 4254), 'numpy.array', 'numpy.array', (['[2, 2, 0, 1]'], {}), '([2, 2, 0, 1])\n', (4240, 4254), False, 'import numpy\n'), ((4280, 4309), 'numpy.array', 'numpy.array', (['[2, 5.602, 0, 1]'], {}), '([2, 5.602, 0, 1])\n', (4291, 4309), False, 'import numpy\n'), ((4335, 4360), 'numpy.array', 'numpy.array', (['[0, 6, 0, 1]'], {}), '([0, 6, 0, 1])\n', (4346, 4360), False, 'import numpy\n'), ((4386, 4416), 'numpy.array', 'numpy.array', (['[-2, 5.602, 0, 1]'], {}), '([-2, 5.602, 0, 1])\n', (4397, 4416), False, 'import numpy\n'), ((4442, 4468), 'numpy.array', 'numpy.array', (['[-2, 2, 0, 1]'], {}), '([-2, 2, 0, 1])\n', (4453, 4468), False, 'import numpy\n'), ((4494, 4524), 'numpy.array', 'numpy.array', (['[-5.602, 2, 0, 1]'], {}), '([-5.602, 2, 0, 1])\n', (4505, 4524), False, 'import numpy\n'), ((4550, 4576), 'numpy.array', 'numpy.array', (['[-6, 0, 0, 1]'], {}), '([-6, 0, 0, 1])\n', (4561, 4576), False, 'import numpy\n'), ((4602, 4633), 'numpy.array', 'numpy.array', (['[-5.602, -2, 0, 1]'], {}), '([-5.602, -2, 0, 1])\n', (4613, 4633), False, 'import numpy\n'), ((4659, 4686), 'numpy.array', 'numpy.array', (['[-2, -2, 0, 1]'], {}), '([-2, -2, 0, 1])\n', (4670, 4686), False, 'import numpy\n'), ((4712, 4743), 'numpy.array', 'numpy.array', (['[-2, -5.602, 0, 1]'], {}), '([-2, -5.602, 0, 1])\n', (4723, 4743), False, 'import numpy\n'), ((4769, 4795), 'numpy.array', 'numpy.array', (['[0, -6, 0, 1]'], {}), '([0, -6, 0, 1])\n', (4780, 4795), False, 'import numpy\n'), ((4821, 4851), 'numpy.array', 'numpy.array', (['[2, -5.602, 0, 1]'], {}), '([2, -5.602, 0, 1])\n', (4832, 4851), False, 'import numpy\n'), ((4877, 4903), 'numpy.array', 'numpy.array', (['[2, -2, 0, 1]'], {}), '([2, -2, 0, 1])\n', (4888, 4903), False, 'import numpy\n'), ((4929, 4959), 'numpy.array', 'numpy.array', (['[5.602, -2, 0, 1]'], {}), '([5.602, -2, 0, 1])\n', (4940, 4959), False, 'import numpy\n'), ((4985, 5010), 'numpy.array', 'numpy.array', (['[6, 0, 0, 1]'], {}), '([6, 0, 0, 1])\n', (4996, 5010), False, 'import numpy\n'), ((8303, 8331), 'math.radians', 'math.radians', (['(270 - 22.5 * i)'], {}), '(270 - 22.5 * i)\n', (8315, 8331), False, 'import math\n'), ((8647, 8674), 'math.radians', 'math.radians', (['(90 - 22.5 * i)'], {}), '(90 - 22.5 * i)\n', (8659, 8674), False, 'import math\n'), ((10915, 10927), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (10924, 10927), False, 'import math\n'), ((17463, 17493), 'numpy.array', 'numpy.array', (['[radius, 0, 0, 1]'], {}), '([radius, 0, 0, 1])\n', (17474, 17493), False, 'import numpy\n'), ((18810, 18846), 'numpy.array', 'numpy.array', (['[inner_radius, 0, 0, 1]'], {}), '([inner_radius, 0, 0, 1])\n', (18821, 18846), False, 'import numpy\n'), ((18898, 18934), 'numpy.array', 'numpy.array', (['[outer_radius, 0, 0, 1]'], {}), '([outer_radius, 0, 0, 1])\n', (18909, 18934), False, 'import numpy\n'), ((918, 940), 'numpy.dot', 'numpy.dot', (['matrix', 'vec'], {}), '(matrix, vec)\n', (927, 940), False, 'import numpy\n'), ((1943, 1993), 'numpy.append', 'numpy.append', (['last_v[i]', '[last_v[i + 1], cur_v[i]]'], {}), '(last_v[i], [last_v[i + 1], cur_v[i]])\n', (1955, 1993), False, 'import numpy\n'), ((2047, 2100), 'numpy.append', 'numpy.append', (['last_v[i + 1]', '[cur_v[i + 1], cur_v[i]]'], {}), '(last_v[i + 1], [cur_v[i + 1], cur_v[i]])\n', (2059, 2100), False, 'import numpy\n'), ((820, 842), 'numpy.dot', 'numpy.dot', (['matrix', 'vec'], {}), '(matrix, vec)\n', (829, 842), False, 'import numpy\n'), ((1467, 1517), 'numpy.append', 'numpy.append', (['last_v[i]', '[last_v[i + 1], cur_v[i]]'], {}), '(last_v[i], [last_v[i + 1], cur_v[i]])\n', (1479, 1517), False, 'import numpy\n'), ((1575, 1628), 'numpy.append', 'numpy.append', (['last_v[i + 1]', '[cur_v[i + 1], cur_v[i]]'], {}), '(last_v[i + 1], [cur_v[i + 1], cur_v[i]])\n', (1587, 1628), False, 'import numpy\n'), ((5380, 5405), 'math.radians', 'math.radians', (['orientation'], {}), '(orientation)\n', (5392, 5405), False, 'import math\n'), ((6558, 6589), 'numpy.append', 'numpy.append', (['lastv[i]', 'curv[i]'], {}), '(lastv[i], curv[i])\n', (6570, 6589), False, 'import numpy\n'), ((6649, 6696), 'numpy.append', 'numpy.append', (['lastv[i]', '[lastv[i + 1], curv[i]]'], {}), '(lastv[i], [lastv[i + 1], curv[i]])\n', (6661, 6696), False, 'import numpy\n'), ((6754, 6804), 'numpy.append', 'numpy.append', (['lastv[i + 1]', '[curv[i + 1], curv[i]]'], {}), '(lastv[i + 1], [curv[i + 1], curv[i]])\n', (6766, 6804), False, 'import numpy\n'), ((9179, 9204), 'math.radians', 'math.radians', (['orientation'], {}), '(orientation)\n', (9191, 9204), False, 'import math\n'), ((11258, 11288), 'math.radians', 'math.radians', (['(180.0 - 22.5 * j)'], {}), '(180.0 - 22.5 * j)\n', (11270, 11288), False, 'import math\n'), ((12696, 12726), 'math.radians', 'math.radians', (['(-22.5 * j - 45.0)'], {}), '(-22.5 * j - 45.0)\n', (12708, 12726), False, 'import math\n'), ((14029, 14054), 'math.radians', 'math.radians', (['orientation'], {}), '(orientation)\n', (14041, 14054), False, 'import math\n'), ((15689, 15706), 'numpy.dot', 'numpy.dot', (['mz', 'v1'], {}), '(mz, v1)\n', (15698, 15706), False, 'import numpy\n'), ((15743, 15760), 'numpy.dot', 'numpy.dot', (['mz', 'v2'], {}), '(mz, v2)\n', (15752, 15760), False, 'import numpy\n'), ((16116, 16133), 'numpy.dot', 'numpy.dot', (['mz', 'v1'], {}), '(mz, v1)\n', (16125, 16133), False, 'import numpy\n'), ((16170, 16187), 'numpy.dot', 'numpy.dot', (['mz', 'v2'], {}), '(mz, v2)\n', (16179, 16187), False, 'import numpy\n'), ((20460, 20522), 'numpy.append', 'numpy.append', (['last_inner[i + 1]', '[last_inner[i], cur_inner[i]]'], {}), '(last_inner[i + 1], [last_inner[i], cur_inner[i]])\n', (20472, 20522), False, 'import numpy\n'), ((20580, 20645), 'numpy.append', 'numpy.append', (['last_inner[i + 1]', '[cur_inner[i], cur_inner[i + 1]]'], {}), '(last_inner[i + 1], [cur_inner[i], cur_inner[i + 1]])\n', (20592, 20645), False, 'import numpy\n'), ((20724, 20765), 'numpy.append', 'numpy.append', (['last_outer[i]', 'cur_outer[i]'], {}), '(last_outer[i], cur_outer[i])\n', (20736, 20765), False, 'import numpy\n'), ((20825, 20887), 'numpy.append', 'numpy.append', (['last_outer[i]', '[last_outer[i + 1], cur_outer[i]]'], {}), '(last_outer[i], [last_outer[i + 1], cur_outer[i]])\n', (20837, 20887), False, 'import numpy\n'), ((20945, 21010), 'numpy.append', 'numpy.append', (['last_outer[i + 1]', '[cur_outer[i + 1], cur_outer[i]]'], {}), '(last_outer[i + 1], [cur_outer[i + 1], cur_outer[i]])\n', (20957, 21010), False, 'import numpy\n'), ((6059, 6090), 'numpy.append', 'numpy.append', (['lastv[i]', 'curv[i]'], {}), '(lastv[i], curv[i])\n', (6071, 6090), False, 'import numpy\n'), ((6154, 6201), 'numpy.append', 'numpy.append', (['lastv[i]', '[lastv[i + 1], curv[i]]'], {}), '(lastv[i], [lastv[i + 1], curv[i]])\n', (6166, 6201), False, 'import numpy\n'), ((6263, 6313), 'numpy.append', 'numpy.append', (['lastv[i + 1]', '[curv[i + 1], curv[i]]'], {}), '(lastv[i + 1], [curv[i + 1], curv[i]])\n', (6275, 6313), False, 'import numpy\n'), ((11665, 11693), 'math.radians', 'math.radians', (['(135 - 22.5 * j)'], {}), '(135 - 22.5 * j)\n', (11677, 11693), False, 'import math\n'), ((12115, 12143), 'math.radians', 'math.radians', (['(135 - 22.5 * j)'], {}), '(135 - 22.5 * j)\n', (12127, 12143), False, 'import math\n'), ((13103, 13126), 'math.radians', 'math.radians', (['(-22.5 * j)'], {}), '(-22.5 * j)\n', (13115, 13126), False, 'import math\n'), ((13548, 13578), 'math.radians', 'math.radians', (['(-22.5 * j - 45.0)'], {}), '(-22.5 * j - 45.0)\n', (13560, 13578), False, 'import math\n'), ((15813, 15853), 'numpy.append', 'numpy.append', (['p1[0:3]', '[p2[0:3], p3[:3]]'], {}), '(p1[0:3], [p2[0:3], p3[:3]])\n', (15825, 15853), False, 'import numpy\n'), ((15917, 15957), 'numpy.append', 'numpy.append', (['p3[0:3]', '[p2[0:3], p4[:3]]'], {}), '(p3[0:3], [p2[0:3], p4[:3]])\n', (15929, 15957), False, 'import numpy\n'), ((16240, 16278), 'numpy.append', 'numpy.append', (['p2[:3]', '[p1[:3], p3[:3]]'], {}), '(p2[:3], [p1[:3], p3[:3]])\n', (16252, 16278), False, 'import numpy\n'), ((16342, 16380), 'numpy.append', 'numpy.append', (['p2[:3]', '[p3[:3], p4[:3]]'], {}), '(p2[:3], [p3[:3], p4[:3]])\n', (16354, 16380), False, 'import numpy\n'), ((19509, 19571), 'numpy.append', 'numpy.append', (['last_inner[i + 1]', '[last_inner[i], cur_inner[i]]'], {}), '(last_inner[i + 1], [last_inner[i], cur_inner[i]])\n', (19521, 19571), False, 'import numpy\n'), ((19633, 19698), 'numpy.append', 'numpy.append', (['last_inner[i + 1]', '[cur_inner[i], cur_inner[i + 1]]'], {}), '(last_inner[i + 1], [cur_inner[i], cur_inner[i + 1]])\n', (19645, 19698), False, 'import numpy\n'), ((19785, 19826), 'numpy.append', 'numpy.append', (['last_outer[i]', 'cur_outer[i]'], {}), '(last_outer[i], cur_outer[i])\n', (19797, 19826), False, 'import numpy\n'), ((19890, 19952), 'numpy.append', 'numpy.append', (['last_outer[i]', '[last_outer[i + 1], cur_outer[i]]'], {}), '(last_outer[i], [last_outer[i + 1], cur_outer[i]])\n', (19902, 19952), False, 'import numpy\n'), ((20014, 20079), 'numpy.append', 'numpy.append', (['last_outer[i + 1]', '[cur_outer[i + 1], cur_outer[i]]'], {}), '(last_outer[i + 1], [cur_outer[i + 1], cur_outer[i]])\n', (20026, 20079), False, 'import numpy\n'), ((8463, 8478), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (8471, 8478), False, 'import math\n'), ((8806, 8821), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (8814, 8821), False, 'import math\n'), ((8384, 8399), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8392, 8399), False, 'import math\n'), ((8727, 8742), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8735, 8742), False, 'import math\n'), ((11434, 11449), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (11442, 11449), False, 'import math\n'), ((12872, 12887), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (12880, 12887), False, 'import math\n'), ((11357, 11372), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (11365, 11372), False, 'import math\n'), ((11839, 11854), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (11847, 11854), False, 'import math\n'), ((12289, 12304), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (12297, 12304), False, 'import math\n'), ((12795, 12810), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (12803, 12810), False, 'import math\n'), ((13272, 13287), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (13280, 13287), False, 'import math\n'), ((13724, 13739), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (13732, 13739), False, 'import math\n'), ((11762, 11777), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (11770, 11777), False, 'import math\n'), ((12212, 12227), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (12220, 12227), False, 'import math\n'), ((13195, 13210), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (13203, 13210), False, 'import math\n'), ((13647, 13662), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (13655, 13662), False, 'import math\n')] |
import numpy as np
from NeuralNetwork import NeuralNetwork
import scipy.misc
import time
data_file_train = open(name="../../Downloads/mnist_train.csv", mode="r")
data_list_train = data_file_train.readlines()
data_file_train.close()
# self created image (png) with a number in it
img_array = scipy.misc.imread(name="../../Documents/two.png", flatten=True)
img_data = 255.0 - img_array.reshape(784)
img_data = (img_data / 255.0 * 0.99) + 0.01
# number of input, hidden and output nodes
INPUT_NODES = 784
HIDDEN_NODES = 200
OUTPUT_NODES = 10
for lr in range(2, 9, 1):
# learning rate
LEARNING_RATE = lr * 0.05
print("setting learning rate to {0}".format(LEARNING_RATE))
# create instance of neural networkr
nn = NeuralNetwork(INPUT_NODES, HIDDEN_NODES, OUTPUT_NODES, LEARNING_RATE)
# epochs is the number of times the training data set is used for training
EPOCHS = 5
for e in range(EPOCHS):
start_time = time.time()
for record in data_list_train:
all_values = record.split(",")
# adjust greyscale 0-255 to value from 0.01 to 1 and add 0.01 to prevent zero values
inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = np.zeros(OUTPUT_NODES) + 0.01
# set target number
targets[int(all_values[0])] = 0.99
# and train
nn.train(inputs, targets)
duration = time.time() - start_time
print("run {0} is complete, duration was {1:0.2f} seconds".format(e, duration))
# query with user picture
outputs = nn.query(img_data)
# get the output = result with max
label = np.argmax(outputs)
print(label)
| [
"numpy.argmax",
"numpy.asfarray",
"numpy.zeros",
"time.time",
"NeuralNetwork.NeuralNetwork"
] | [((734, 803), 'NeuralNetwork.NeuralNetwork', 'NeuralNetwork', (['INPUT_NODES', 'HIDDEN_NODES', 'OUTPUT_NODES', 'LEARNING_RATE'], {}), '(INPUT_NODES, HIDDEN_NODES, OUTPUT_NODES, LEARNING_RATE)\n', (747, 803), False, 'from NeuralNetwork import NeuralNetwork\n'), ((1654, 1672), 'numpy.argmax', 'np.argmax', (['outputs'], {}), '(outputs)\n', (1663, 1672), True, 'import numpy as np\n'), ((949, 960), 'time.time', 'time.time', ([], {}), '()\n', (958, 960), False, 'import time\n'), ((1426, 1437), 'time.time', 'time.time', ([], {}), '()\n', (1435, 1437), False, 'import time\n'), ((1235, 1257), 'numpy.zeros', 'np.zeros', (['OUTPUT_NODES'], {}), '(OUTPUT_NODES)\n', (1243, 1257), True, 'import numpy as np\n'), ((1162, 1189), 'numpy.asfarray', 'np.asfarray', (['all_values[1:]'], {}), '(all_values[1:])\n', (1173, 1189), True, 'import numpy as np\n')] |
"""
Created on Mon Mar 27 21:02:14 2017
@author: Robert
"""
"""
This code creates a number of classification plots with different
penalty parameters, so you can visualise how the algorithm punishes
overfitting. I compiled some images I created into a gif, which can be
found elsewhere in this repo.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-deep')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
# Reading in data
ds = pd.read_csv("Social_Network_Ads.csv")
X = ds.iloc[:, 2:4].values
y = ds.iloc[:,4].values
# Splitting and scaling
X_train, X_test, y_train, y_test = train_test_split(X,y)
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.fit_transform(X_test)
# Plot
from sklearn.svm import SVC
from matplotlib.colors import ListedColormap
numplots = 30
increment = 10
for c in range(increment, increment*numplots + 1,increment):
clf = SVC(kernel='rbf', C = c/10)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print((cm[0][0]+cm[1][1])/sum(sum(cm)))
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(X_set[:,0].min() - 1,
X_set[:,0].max() + 1,
step = 0.01),
np.arange(X_set[:,1].min() - 1,
X_set[:,1].max() + 1,
step = 0.01))
boundary = clf.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape)
fig = plt.figure(int(c/10))
plt.contourf(X1, X2, boundary, alpha = 0.75,
cmap = ListedColormap(('#fc7a74', '#6ff785')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i,j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i),
label = j, s = 8)
title = "Penalty parameter = " + str(c/10) + ", error = " + str((cm[0][0]+cm[1][1])/sum(sum(cm)))
plt.title(title)
plt.xlabel('Age')
plt.ylabel('Salary')
plt.legend()
plt.figure()
fname = str(c/10) + '.png'
fig.savefig(fname)
plt.cla()
| [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cla",
"sklearn.svm.SVC",
"sklear... | [((383, 412), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-deep"""'], {}), "('seaborn-deep')\n", (396, 412), True, 'import matplotlib.pyplot as plt\n'), ((584, 621), 'pandas.read_csv', 'pd.read_csv', (['"""Social_Network_Ads.csv"""'], {}), "('Social_Network_Ads.csv')\n", (595, 621), True, 'import pandas as pd\n'), ((734, 756), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (750, 756), False, 'from sklearn.model_selection import train_test_split\n'), ((764, 780), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (778, 780), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1038, 1065), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'C': '(c / 10)'}), "(kernel='rbf', C=c / 10)\n", (1041, 1065), False, 'from sklearn.svm import SVC\n'), ((1138, 1170), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1154, 1170), False, 'from sklearn.metrics import confusion_matrix\n'), ((2205, 2221), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2214, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (2236, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Salary"""'], {}), "('Salary')\n", (2258, 2268), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2285), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2283, 2285), True, 'import matplotlib.pyplot as plt\n'), ((2290, 2302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2300, 2302), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2370), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2368, 2370), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1923), 'numpy.unique', 'np.unique', (['y_set'], {}), '(y_set)\n', (1916, 1923), True, 'import numpy as np\n'), ((1776, 1814), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('#fc7a74', '#6ff785')"], {}), "(('#fc7a74', '#6ff785'))\n", (1790, 1814), False, 'from matplotlib.colors import ListedColormap\n'), ((2014, 2046), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('red', 'green')"], {}), "(('red', 'green'))\n", (2028, 2046), False, 'from matplotlib.colors import ListedColormap\n')] |
import numpy as np
def block_augment(sample):
"""Performs contrast/brightness augmentation on img.
Args:
sample: Dictionary of (np array: <ch,z,x,y>) image and mask
"""
l = sample["image"].shape[1]
if np.random.uniform() < 0.2:
r = np.random.rand()
xloc = np.random.randint(l//2-50,l//2+50)
yloc = np.random.randint(l//2-50,l//2+50)
sample["image"][:,:xloc,:yloc] = sample["image"][:,:xloc,:yloc] - (np.random.rand() - 0.5)*0.5
sample["image"][:,:xloc,yloc:] = sample["image"][:,:xloc,yloc:] - (np.random.rand() - 0.5)*0.5
sample["image"][:,xloc:,yloc:] = sample["image"][:,xloc:,yloc:] - (np.random.rand() - 0.5)*0.5
sample["image"][:,xloc:,:yloc] = sample["image"][:,xloc:,:yloc] - (np.random.rand() - 0.5)*0.5
sample["image"] = np.clip(sample["image"], 0, 1)
return sample | [
"numpy.random.rand",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.clip"
] | [((213, 232), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (230, 232), True, 'import numpy as np\n'), ((247, 263), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (261, 263), True, 'import numpy as np\n'), ((273, 316), 'numpy.random.randint', 'np.random.randint', (['(l // 2 - 50)', '(l // 2 + 50)'], {}), '(l // 2 - 50, l // 2 + 50)\n', (290, 316), True, 'import numpy as np\n'), ((317, 360), 'numpy.random.randint', 'np.random.randint', (['(l // 2 - 50)', '(l // 2 + 50)'], {}), '(l // 2 - 50, l // 2 + 50)\n', (334, 360), True, 'import numpy as np\n'), ((762, 792), 'numpy.clip', 'np.clip', (["sample['image']", '(0)', '(1)'], {}), "(sample['image'], 0, 1)\n", (769, 792), True, 'import numpy as np\n'), ((422, 438), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (436, 438), True, 'import numpy as np\n'), ((519, 535), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (533, 535), True, 'import numpy as np\n'), ((616, 632), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (630, 632), True, 'import numpy as np\n'), ((713, 729), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (727, 729), True, 'import numpy as np\n')] |
import heapq
import numpy as np
from .matrix_serializer import load_vocabulary
class Embedding:
"""
Base class for all embeddings. SGNS can be directly instantiated with it.
"""
def __init__(self, path, normalize=True):
self.m = np.load(path + '.npy')
if normalize:
self.normalize()
self.dim = self.m.shape[1]
self.wi, self.iw = load_vocabulary(path + '.vocab')
def normalize(self):
norm = np.sqrt(np.sum(self.m * self.m, axis=1))
self.m = self.m / norm[:, np.newaxis]
self.norm = norm
def normalize_columns(self):
norm = np.sqrt(np.sum(self.m * self.m, axis=1))
self.m = self.m / norm[:, np.newaxis]
self.norm = norm
def represent(self, w):
if w in self.wi:
return self.m[self.wi[w], :]
else:
return np.zeros(self.dim)
def similarity(self, w1, w2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w1).dot(self.represent(w2))
def closest(self, w, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w))
return heapq.nlargest(n, zip(scores, self.iw))
class SVDEmbedding(Embedding):
"""
SVD embeddings.
Enables controlling the weighted exponent of the eigenvalue matrix (eig).
Context embeddings can be created with "transpose".
"""
def __init__(self, path, normalize=True, eig=0.0, transpose=False):
if transpose:
ut = np.load(path + '.vt.npy')
self.wi, self.iw = load_vocabulary(path + '.contexts.vocab')
else:
ut = np.load(path + '.ut.npy')
self.wi, self.iw = load_vocabulary(path + '.words.vocab')
self.s = np.load(path + '.s.npy')
if eig == 0.0:
self.m = ut.T
elif eig == 1.0:
self.m = self.s * ut.T
else:
self.m = np.power(self.s, eig) * ut.T
self.dim = self.m.shape[1]
if normalize:
self.normalize()
class EnsembleEmbedding(Embedding):
"""
Adds the vectors of two distinct embeddings (of the same dimensionality) to create a new representation.
Commonly used by adding the context embeddings to the word embeddings.
"""
def __init__(self, emb1, emb2, normalize=False):
"""
Assume emb1.dim == emb2.dim
"""
self.dim = emb1.dim
vocab1 = emb1.wi.viewkeys()
vocab2 = emb2.wi.viewkeys()
joint_vocab = list(vocab1 & vocab2)
only_vocab1 = list(vocab1 - vocab2)
only_vocab2 = list(vocab2 - vocab1)
self.iw = joint_vocab + only_vocab1 + only_vocab2
self.wi = dict([(w, i) for i, w in enumerate(self.iw)])
m_joint = emb1.m[[emb1.wi[w] for w in joint_vocab]] + \
emb2.m[[emb2.wi[w] for w in joint_vocab]]
m_only1 = emb1.m[[emb1.wi[w] for w in only_vocab1]]
m_only2 = emb2.m[[emb2.wi[w] for w in only_vocab2]]
self.m = np.vstack([m_joint, m_only1, m_only2])
if normalize:
self.normalize()
class DualEmbeddingWrapper:
"""
Wraps word and context embeddings to allow investigation of first-order similarity.
"""
def __init__(self, ew, ec):
self.ew = ew
self.ec = ec
def closest_contexts(self, w, n=10):
scores = self.ec.m.dot(self.ew.represent(w))
pairs = zip(scores, self.ec.iw)[1:]
return heapq.nlargest(n, pairs)
def similarity_first_order(self, w, c):
return self.ew.represent(w).dot(self.ec.represent(c))
| [
"numpy.load",
"numpy.sum",
"numpy.power",
"numpy.zeros",
"heapq.nlargest",
"numpy.vstack"
] | [((271, 293), 'numpy.load', 'np.load', (["(path + '.npy')"], {}), "(path + '.npy')\n", (278, 293), True, 'import numpy as np\n'), ((1885, 1909), 'numpy.load', 'np.load', (["(path + '.s.npy')"], {}), "(path + '.s.npy')\n", (1892, 1909), True, 'import numpy as np\n'), ((3174, 3212), 'numpy.vstack', 'np.vstack', (['[m_joint, m_only1, m_only2]'], {}), '([m_joint, m_only1, m_only2])\n', (3183, 3212), True, 'import numpy as np\n'), ((3646, 3670), 'heapq.nlargest', 'heapq.nlargest', (['n', 'pairs'], {}), '(n, pairs)\n', (3660, 3670), False, 'import heapq\n'), ((496, 527), 'numpy.sum', 'np.sum', (['(self.m * self.m)'], {'axis': '(1)'}), '(self.m * self.m, axis=1)\n', (502, 527), True, 'import numpy as np\n'), ((662, 693), 'numpy.sum', 'np.sum', (['(self.m * self.m)'], {'axis': '(1)'}), '(self.m * self.m, axis=1)\n', (668, 693), True, 'import numpy as np\n'), ((902, 920), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (910, 920), True, 'import numpy as np\n'), ((1637, 1662), 'numpy.load', 'np.load', (["(path + '.vt.npy')"], {}), "(path + '.vt.npy')\n", (1644, 1662), True, 'import numpy as np\n'), ((1770, 1795), 'numpy.load', 'np.load', (["(path + '.ut.npy')"], {}), "(path + '.ut.npy')\n", (1777, 1795), True, 'import numpy as np\n'), ((2062, 2083), 'numpy.power', 'np.power', (['self.s', 'eig'], {}), '(self.s, eig)\n', (2070, 2083), True, 'import numpy as np\n')] |
from sklearn.cluster import KMeans
import numpy as np
X = np.array([[1, 2], [1, 4], [1, 0],
[4, 2], [4, 4], [4, 0]])
kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
kmeans.labels_
kmeans.predict([[0, 0], [4, 4]])
kmeans.cluster_centers_
| [
"sklearn.cluster.KMeans",
"numpy.array"
] | [((58, 116), 'numpy.array', 'np.array', (['[[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]]'], {}), '([[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]])\n', (66, 116), True, 'import numpy as np\n'), ((140, 176), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)', 'random_state': '(0)'}), '(n_clusters=2, random_state=0)\n', (146, 176), False, 'from sklearn.cluster import KMeans\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 1 23:32:25 2018
@author: <NAME>
"""
from sympy import *
import numpy as np
from scipy import integrate
import matplotlib
matplotlib.use('TkAgg')
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
import tkFont
else:
import tkinter as Tk
import tkinter.font as tkFont
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import pickle
from Tkinter import *
import os
import compiler
from compiler import *
def entry(S0_entry,I0_entry,R0_entry,infection_entry,removed_entry, Start_entry, Stop_entry, Steps_entry):
init, time = initial_conditions_entry(S0_entry,I0_entry,R0_entry, Start_entry, Stop_entry, Steps_entry)
t = np.linspace(time[0],time[1],time[2])
args = (infection_entry, removed_entry)
X = integrate.odeint(solvr, init, t, args)
fig = Figure()
a = fig.add_subplot(3,1,1)
ax=a.plot(t,X[:,0])
a.legend(ax,['Susceptible'],loc=0)
a.set_ylabel('population')
P=parameters_entry(infection_entry,removed_entry)
_a=str(P[0])
_b=str(P[1])
alpha=r'$\alpha=%s$'%(_a)
beta=r'$\beta=%s$'%(_b)
_S0=str(init[0])
_I0=str(init[1])
_R0=str(init[2])
S0='S(0) = %s' %(_S0)
I0='I(0) = %s' %(_I0)
R0='R(0) = %s' %(_R0)
a.set_title("Initial conditions: "+S0+", "+I0+", "+R0+"\n"+"Parameters: "+alpha+", "+beta)
b = fig.add_subplot(3,1,2)
bx=b.plot(t,X[:,1])
b.legend(bx,['Infected'],loc=0)
b.set_ylabel('population')
c = fig.add_subplot(3,1,3)
cx=c.plot(t,X[:,2])
c.legend(cx,['Removed'],loc=0)
c.set_ylabel('population')
c.set_xlabel('time')
root=Tk()
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
#basic model
def parameters_entry(infection_entry, removed_entry): #extracts user inputs of infection and removal rates
R=[0,0]
# strip() removes any leading or trailing whitespace
# text_contents = infection_rate
# make sure that all char can be in a typical number
while True:
infection_rate= infection_entry.get()
removed_rate= removed_entry.get()
if all(c in '+-.0123456789' for c in infection_rate)and all(c in '+-.0123456789' for c in removed_rate):
break
# a float contains a period (US)
if '.' in infection_rate:
i= float(infection_rate)
else:
i= int(infection_rate)
if '.' in removed_rate:
r= float(removed_rate)
else:
r= int(removed_rate)
R[0]=i
R[1]=r
return R
def initial_conditions_entry(a,b,c,d,e,f): #extracts user inputs of infection and removing rates
print("this is functions initial conditions entry")
C=[0,0,0]
T=[0,0,0]
while True:
S0_= a.get()
I0_= b.get()
R0_= c.get()
Start_= d.get()
Stop_= e.get()
Steps_= f.get()
if all(c in '+-.0123456789' for c in S0_)and all(c in '+-.0123456789' for c in I0_)and all(c in '+-.0123456789' for c in R0_):
break
# a float contains a period (US)
if '.' in S0_:
S0= float(S0_)
else:
S0= int(S0_)
if '.' in I0_:
I0= float(I0_)
else:
I0= int(I0_)
if '.' in R0_:
R0= float(R0_)
else:
R0= int(R0_)
if '.' in Start_:
Start_= float(Start_)
else:
Start_= int(Start_)
if '.' in Stop_:
Stop_= float(Stop_)
else:
Stop_= int(Stop_)
if '.' in Steps_:
Steps_= float(Steps_)
else:
Steps_= int(Steps_)
C[0]=S0
C[1]=I0
C[2]=R0
T[0]=Start_
T[1]=Stop_
T[2]=Steps_
return C, T
def solvr(X, t, infection_entry, removed_entry):
S, I, R = X
N = S+I+R
print(N)
P=parameters_entry(infection_entry, removed_entry)
E = [-P[0]/N*S*I,P[0]/N*S*I-P[1]*I, P[1]*I]
return E
#return [0.5*S-S*I*0.01,-0.5*I+S*I*0.01,0] y0=[80,100,0] a=0.5 b=0.01 t0=0 tf=50
#############################################################################################################
#extension model
#def old_ext_entry(paras_list, eqn_ic_list): #do time steps later
# num_init = []
# for v1, v2 in eqn_ic_list.values():
# while true:
# if all(c in '+-.0123456789' for c in v2):
# break
# # a float contains a period (US)
# if '.' in v2:
# v2= float(v2)
# else:
# v2= int(v2)
# num_init.append(v2)
#
# num_args = []
# for v in paras_list.values():
# while true:
# if all(c in '+-.0123456789' for c in v):
# break
# # a float contains a period (US)
# if '.' in v:
# v= float(v)
# else:
# v= int(v)
# num_args.append(v)
#
# t = np.linspace(0,50,5000)
# print("inputs into odeint in ext")
# print(num_init)
# print(num_args)
#
# X = integrate.odeint(ext_solvr, num_init, t)
#
# fig = Figure()
# a = fig.add_subplot(111)
# ax=a.plot(t,X)
# a.set_xlabel('time')
# a.set_ylabel('population')
# root=Tk()
# canvas = FigureCanvasTkAgg(fig, master=root)
# canvas.show()
# canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
# toolbar = NavigationToolbar2TkAgg(canvas, root)
# toolbar.update()
# canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
def ext_entry():
num_init = [1,2,3,4,5]
args = (0.1, 0.2, 0.3, 0.4, 0.5)
t = np.linspace(0,50,5000)
X = integrate.odeint(ext_solvr, num_init, t, args)
fig = Figure()
a = fig.add_subplot(111)
ax=a.plot(t,X)
a.set_xlabel('time')
a.set_ylabel('population')
a.legend(ax,['A','B','C','D','E'],loc=0)
root=Tk()
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
def extract_eqn_ic (var,eqn,ic, eqn_ic_list):
eqn_ic_list[var]=[eqn, ic]
return eqn_ic_list
def extract_paras(name,num,paras_list):
paras_list[name]=num
return paras_list
def ext_solvr(X, t, *args):
P = [0.1, 0.2, 0.3, 0.4, 0.5]
A, B, C, D, E = X
M = [P[0]*A,
P[1]*B,
P[2]*C,
P[3]*D,
P[4]*E]
print(M)
return M
| [
"scipy.integrate.odeint",
"matplotlib.figure.Figure",
"matplotlib.use",
"numpy.linspace",
"matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg",
"tkinter",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((193, 216), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (207, 216), False, 'import matplotlib\n'), ((802, 840), 'numpy.linspace', 'np.linspace', (['time[0]', 'time[1]', 'time[2]'], {}), '(time[0], time[1], time[2])\n', (813, 840), True, 'import numpy as np\n'), ((891, 929), 'scipy.integrate.odeint', 'integrate.odeint', (['solvr', 'init', 't', 'args'], {}), '(solvr, init, t, args)\n', (907, 929), False, 'from scipy import integrate\n'), ((940, 948), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (946, 948), False, 'from matplotlib.figure import Figure\n'), ((1746, 1750), 'tkinter', 'Tk', ([], {}), '()\n', (1748, 1750), True, 'import tkinter as Tk\n'), ((1764, 1799), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'root'}), '(fig, master=root)\n', (1781, 1799), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n'), ((1895, 1932), 'matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg', 'NavigationToolbar2TkAgg', (['canvas', 'root'], {}), '(canvas, root)\n', (1918, 1932), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n'), ((5782, 5806), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(5000)'], {}), '(0, 50, 5000)\n', (5793, 5806), True, 'import numpy as np\n'), ((5814, 5860), 'scipy.integrate.odeint', 'integrate.odeint', (['ext_solvr', 'num_init', 't', 'args'], {}), '(ext_solvr, num_init, t, args)\n', (5830, 5860), False, 'from scipy import integrate\n'), ((5876, 5884), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (5882, 5884), False, 'from matplotlib.figure import Figure\n'), ((6043, 6047), 'tkinter', 'Tk', ([], {}), '()\n', (6045, 6047), True, 'import tkinter as Tk\n'), ((6061, 6096), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'root'}), '(fig, master=root)\n', (6078, 6096), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n'), ((6192, 6229), 'matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg', 'NavigationToolbar2TkAgg', (['canvas', 'root'], {}), '(canvas, root)\n', (6215, 6229), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\n')] |
# Andrei, 2018
"""
Interactive script to view multiple cameras.
sudo python -m pip install --upgrade pip setuptools wheel
sudo /usr/local/bin/pip install opencv-python
"""
from argparse import ArgumentParser
import cv2
import numpy as np
import os
import time
CROSS_COLOR = np.array([0, 0, 255], dtype=np.uint8)
FONT_SCALE = 1.5
FONT = cv2.FONT_HERSHEY_PLAIN
FONT_COLOR = (0, 255, 0)
if __name__ == "__main__":
arg_parser = ArgumentParser()
arg_parser.add_argument('cameras', help='Camera ids', type=int, nargs="+")
arg_parser.add_argument('--scale', default=1., type=float, help='View resize factor.')
arg_parser.add_argument('--fps', default=30, type=int, help='What FPS to read from video at.')
arg_parser.add_argument('--res', default=[1920, 1080], type=int,
nargs=2, help='Video resolution.')
arg_parser.add_argument('--cross-size', default=2, type=int, help='Cross pixel size.')
arg_parser.add_argument('--save', default="data/", help='Save folder.')
args = arg_parser.parse_args()
cameras = args.cameras
scale = args.scale
fps = args.fps
res = args.res
cross_size = args.cross_size
save_folder = args.save
caps = dict({})
for camera in cameras:
cap = cv2.VideoCapture("/dev/video{}".format(camera))
ret, frame = cap.read()
if not ret:
print("[ERROR] Camera {} not working!".format(camera))
continue
print("Camera {} working!".format(camera))
# Configure camera
cap.set(cv2.CAP_PROP_FPS, fps)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, res[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, res[1])
cap.set(cv2.CAP_PROP_BUFFERSIZE, 0) # Set camera buffer size to 0
caps[camera] = cap
save_path = os.path.join(save_folder, "{}_camera_{}_off_{}ms.jpg")
save_screen_shot = False
show_cross = True
show_menu = True
def screen_shot():
global save_screen_shot
save_screen_shot = True
def toggle_cross():
global show_cross
show_cross = not show_cross
def toggle_menu():
global show_menu
show_menu = not show_menu
menu = dict({
27: (quit, "Key [ESC]: Exit"), # if the 'ESC' key is pressed, Quit
ord('s'): (screen_shot, "Key [s]: Save screen shots"),
ord('c'): (toggle_cross, "Key [c]: Toggle cross"),
ord('m'): (toggle_menu, "Key [m]: Toggle menu"),
})
menu_text = "\n".join([x[1] for x in menu.values()])
while True:
key = cv2.waitKey(1) & 0xFF
# if the 'ESC' key is pressed, Quit
if key in menu.keys():
menu[key][0]()
elif key != 255:
print("Unknown key: {}".format(key))
tp_common = time.time()
for camera, cap in caps.items():
ret, frame = cap.read()
view_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)
h, w = view_frame.shape[:2]
if show_cross:
view_frame[h//2-cross_size: h//2+cross_size, :] = CROSS_COLOR
view_frame[:, w//2-cross_size: w//2+cross_size] = CROSS_COLOR
# view_frame[:, (w//4)*1-cross_size: (w//4)*1+cross_size] = CROSS_COLOR
# view_frame[:, (w//4)*3-cross_size: (w//4)*3+cross_size] = CROSS_COLOR
if save_screen_shot:
view_frame[:, :, 2] += 100
off = int((time.time() - tp_common) * 1000)
cv2.imwrite(save_path.format(tp_common, camera, off), frame)
if show_menu:
y0, dy = 50, 50
for i, line in enumerate(menu_text.split('\n')):
y = y0 + i * dy
cv2.putText(view_frame, line, (50, y), FONT, FONT_SCALE, FONT_COLOR)
cv2.imshow("Camera: {}".format(camera), view_frame)
# Reset save screen shot
save_screen_shot = False
| [
"cv2.putText",
"argparse.ArgumentParser",
"cv2.waitKey",
"time.time",
"numpy.array",
"os.path.join",
"cv2.resize"
] | [((290, 327), 'numpy.array', 'np.array', (['[0, 0, 255]'], {'dtype': 'np.uint8'}), '([0, 0, 255], dtype=np.uint8)\n', (298, 327), True, 'import numpy as np\n'), ((445, 461), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (459, 461), False, 'from argparse import ArgumentParser\n'), ((1806, 1860), 'os.path.join', 'os.path.join', (['save_folder', '"""{}_camera_{}_off_{}ms.jpg"""'], {}), "(save_folder, '{}_camera_{}_off_{}ms.jpg')\n", (1818, 1860), False, 'import os\n'), ((2782, 2793), 'time.time', 'time.time', ([], {}), '()\n', (2791, 2793), False, 'import time\n'), ((2562, 2576), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2573, 2576), False, 'import cv2\n'), ((2898, 2943), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': 'scale', 'fy': 'scale'}), '(frame, (0, 0), fx=scale, fy=scale)\n', (2908, 2943), False, 'import cv2\n'), ((3739, 3807), 'cv2.putText', 'cv2.putText', (['view_frame', 'line', '(50, y)', 'FONT', 'FONT_SCALE', 'FONT_COLOR'], {}), '(view_frame, line, (50, y), FONT, FONT_SCALE, FONT_COLOR)\n', (3750, 3807), False, 'import cv2\n'), ((3449, 3460), 'time.time', 'time.time', ([], {}), '()\n', (3458, 3460), False, 'import time\n')] |
#!/usr/bin/env python
"""
Info: This script uses the pretrained ResNet50 model as a feature extractor to predict sign language letters. It takes the pretrained ResNet50 model and adds new classification layers and trains this model on the ASL corpus consisting of images of English letters depicted in the American Sign Language.
Parameters:
(optional) train_data: str <name-of-training-data>, default = "asl_alphabet_train_subset"
(optional) test_data: str <name-of-test-data>, default = "asl_alphabet_test_subset"
(optional) augment_data: str <perform-data-augmentation-true-false>, default = "False"
(optional) batch_size: int <size-of-batches>, default = 32
(optional) n_epochs: int <number-of-epochs>, default = 15
(optional) output_filename: str <name-of-classification-report>, default = "classification_report.txt"
Usage:
$ python cnn-asl.py
Output:
- model_summary.txt: a summary of the model architecture.
- model_architecture.png: a visual representation of the model architecture.
- model_loss_accuracy_history.png: a plot showing the loss and accuracy learning curves of the model during training.
- classification_report.txt: classification metrics of the model performance.
- saved_model.json: the model saved as a JSON-file.
- model_weights.h5: the model weights saved in the HDF5 format.
"""
### DEPENDENCIES ###
# Core libraries
import os
import sys
sys.path.append(os.path.join(".."))
# Matplotlib, numpy, OpenCV, pandas, glob, contextlib
import matplotlib.pyplot as plt
import numpy as np
import cv2
import pandas as pd
import glob
from contextlib import redirect_stdout
# Scikit-learn
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
# TensorFlow
import tensorflow as tf
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier # a wrapper for sci-kit learn that imports the KerasClassifier
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import (load_img,
img_to_array,
ImageDataGenerator)
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from tensorflow.keras.layers import (Flatten,
Dense,
Dropout)
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Conv2D,
MaxPooling2D,
GlobalAveragePooling2D,
MaxPool2D,
Activation,
Flatten,
Dense)
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import backend as K
from tensorflow.keras.models import model_from_json
# argparse
import argparse
### MAIN FUNCTION ###
def main():
### ARGPARSE ###
# Initialize ArgumentParser class
ap = argparse.ArgumentParser()
# Argument 1: Training data
ap.add_argument("-t", "--train_data",
type = str,
required = False, # the argument is not required
help = "Name of training data folder",
default = "asl_alphabet_train_subset") # default is a subset of the training dataset
# Argument 2: Validation data
ap.add_argument("-te", "--test_data",
type = str,
required = False, # the argument is not required
help = "Path to the training data",
default = "asl_alphabet_test_subset") # default is a subset of the training dataset
# Argument 3: Augment data
ap.add_argument("-a", "--augment_data",
type = str,
required = False, # the argument is not required
help = "Specify whether you want to perform data augmentation: True/False.",
default = "False") # data augmentation is not performed by default
# Argument 2: Batch size
ap.add_argument("-b", "--batch_size",
type = int,
required = False, # the argument is not required
help = "Define the size of the batches",
default = 32) # default batch size
# Argument 3: Number of epochs
ap.add_argument("-n", "--n_epochs",
type = int,
required = False, # the argument is not required
help = "Define the number of epochs",
default = 15) # default number of epochs
# Argument 4: Output filename (classification report)
ap.add_argument("-o", "--output_filename",
type = str,
required = False, # the argument is not required
help = "Define the name of the output file (the model classification report)",
default = "classification_report.txt") # default output filename
# Parse arguments
args = vars(ap.parse_args())
# Save input parameters
train_data = os.path.join("..", "data", "subset_asl_sign_language", args["train_data"])
test_data = os.path.join("..", "data", "subset_asl_sign_language", args["test_data"])
augment_data = args["augment_data"]
batch_size = args["batch_size"]
n_epochs = args["n_epochs"]
output_filename = args["output_filename"]
# Create output directory if it does not already exist
if not os.path.exists(os.path.join("..", "output")):
os.mkdir(os.path.join("..", "output"))
# Start message
print("\n[INFO] Initializing...")
# Instantiate the CNN_classifier class
classifier = CNN_classifier(train_data, test_data)
# Create list of label names from the directory names in the training data folder
labels = classifier.list_labels()
# Create training and test data (X) and labels (y)
print("\n[INFO] Preparing training and validation data...")
X_train, y_train = classifier.create_XY(train_data, labels, dimension=224)
X_test, y_test = classifier.create_XY(test_data, labels, dimension=224)
# Normalize images and binarize labels
print("\n[INFO] Normalizing images and binarizing labels...")
X_train_scaled, X_test_scaled, y_train_binarized, y_test_binarized = classifier.normalize_binarize(X_train, y_train, X_test, y_test)
# Define ResNet50 model and add new classification layers
print("\n[INFO] Loading the pretrained ResNet50 model and adding new classification layers...")
model = classifier.build_ResNet50()
# Save model summary to output directory
print("\n[INFO] Saving model summary to 'output' directory...")
classifier.save_model_summary(model)
# Create new, artificial data with data augmentation if the user has specified so
datagen = classifier.perform_data_augmentation(X_train_scaled, augment_data)
# Train model
print("\n[INFO] Training model...")
model_history = classifier.train_model(model, batch_size, n_epochs, datagen, augment_data, X_train_scaled, X_test_scaled, y_train_binarized, y_test_binarized)
# Plot loss/accuracy during training and saving to output
print("\n[INFO] Plotting loss/accuracy history of model during training and saving plot to 'output' directory...")
classifier.plot_training_history(model_history, n_epochs)
# Evaluate model
print(f"\n[INFO] Evaluating model and saving classification metrics as {output_filename} to 'output' directory...")
classifier.evaluate_model(y_test_binarized, labels, output_filename, model, batch_size, X_test_scaled)
# Save model as json-file to output directory
print("\n[INFO] Saving model as JSON-file to 'output' directory...")
classifier.save_model(model)
# User message
print("\n[INFO] Done! Results can be found in the 'output' directory. \n")
# Creating Neural network classifier class
class CNN_classifier:
def __init__(self, train_data, test_data):
# Receive inputs: Image and labels
self.train_data = train_data
self.test_data = test_data
def list_labels(self):
"""
This method defines the label names by listing the names of the folders within training directory without listing hidden files.
"""
# Create empty list
labels = []
# For every name in training directory
for name in os.listdir(self.train_data):
# If it does not start with . (which hidden files do)
if not name.startswith('.'):
labels.append(name)
# Sort labels alphabetically
labels = sorted(labels)
return labels
def create_XY(self, data, labels, dimension=224):
"""
This method creates trainX, trainY as well as testX and testY. It creates X, which is an array of images (corresponding to trainX and testX) and Y which is a list of the image labels (corresponding to trainY and testY). Hence, with this we can create the training and validation datasets.
"""
# Create empty array, X, for the images, and an empty list, y, for the image labels
X = np.empty((0, dimension, dimension, 3))
y = []
# For each artist name listed in label_names
for name in labels:
# Get all images for each artist
images = glob.glob(os.path.join(data, name, "*.jpg"))
# For each image in images
for image in images:
# Load image
loaded_img = cv2.imread(image)
# Resize image to the specified dimension
resized_img = cv2.resize(loaded_img, (dimension, dimension), interpolation = cv2.INTER_AREA) # INTER_AREA means that it is resizing using pixel-area relation which was a suggested method by Ross
# Create array of image
image_array = np.array([np.array(resized_img)])
# Append to trainX array and trainY list
X = np.vstack((X, image_array))
y.append(name)
return X, y
def normalize_binarize(self, X_train, y_train, X_test, y_test):
"""
This method normalizes the training and validation data and binarizes the training and test labels. Normalizing is done by dividing by 255 to compress the pixel intensity values down between 0 and 1 rather than 0 and 255. Binarizing is performed using the LabelBinarizer function from sklearn. We binarize the labels to convert them into one-hot vectors.
"""
# Normalize training and test data
X_train_scaled = (X_train - X_train.min())/(X_train.max() - X_train.min()).astype("float")
X_test_scaled = (X_test - X_test.min())/(X_test.max() - X_test.min()).astype("float")
# Binarize training and test labels
label_binarizer = LabelBinarizer() # intialize binarizer
y_train_binarized = label_binarizer.fit_transform(y_train) # binarizing training image labels
y_test_binarized = label_binarizer.fit_transform(y_test) # binarizing validation image labels
return X_train_scaled, X_test_scaled, y_train_binarized, y_test_binarized
def build_ResNet50(self):
"""
This method loads the pretrained ResNet50 model and adds new classifier layers
"""
# Clear existing session
tf.keras.backend.clear_session()
# Load pretrained ResNet50 model without classifier layers
model = ResNet50(include_top=False, # this means that we are not including the fully-connected layer which is at the top of the network.
pooling='avg', # average pooling
input_shape=(224, 224, 3))
# Mark loaded layers as not trainable, because we do not want to retrain the network and updating the weights. We just want to use the weights that have already been trained.
for layer in model.layers:
layer.trainable = False
# Add new classifier layers to replace the fully-connected layer that we have not included
# First we take the output of the final layer in the pretrained ResNet50 and use as input for the flattening layer
flat1 = Flatten()(model.layers[-1].output) # -1 means that we are taking the output of the previous layer
# Add class1 layer with 256 nodes and relu as activation function
class1 = Dense(256,
activation='relu')(flat1) # the flattening layer is the input layer
drop1 = Dropout(0.2)(class1) # adding dropout layer to reduce overfitting
output = Dense(26, # we have 26 classes, i.e. 26 letters to predict
activation='softmax')(drop1)
# Define new model
model = Model(inputs=model.inputs,
outputs=output)
# Compile new model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def save_model_summary(self, model):
"""
This method simply takes the model, creates a summary and saves it to the output directory.
"""
# Save model summary to output directory
output_path_summary = os.path.join("..", "output", "model_summary.txt")
with open(output_path_summary, 'w') as f:
with redirect_stdout(f):
model.summary()
# Visualize model architecture and save to output directory
output_path_model = os.path.join("..", "output", "model_architecture.png")
plot = plot_model(model,
to_file = output_path_model,
show_shapes=True,
show_layer_names=True)
def perform_data_augmentation(self, X_train_scaled, augment_data):
"""
This method uses the TensorFlow DataGenerator to create new, artificial images based on the original data to increase the amount of data and prevent the model form overfitting the trainig data. I have tried to make the augmentation as "realistic" as possible to prevent generating data that the model would never encounter.
"""
if augment_data == "True":
# User message
print("\n[INFO] Performing data augmentation to create new data...")
# Initialize the data augmentation object
datagen = ImageDataGenerator(zoom_range = 0.15, # zooming
width_shift_range = 0.2, # horizontal shift
height_shift_range = 0.2, # vertical shift
horizontal_flip=True) # mirroring image
# Perform the data augmentation
datagen.fit(X_train_scaled)
return datagen
# If the user has not specified that they want to perform data augmentation
if augment_data == "False":
return None
def train_model(self, model, batch_size, n_epochs, datagen, augment_data, X_train_scaled, X_test_scaled, y_train_binarized, y_test_binarized):
"""
This method trains the ResNet50 model with the new classifier layers on the scaled training images and validates the model on the scaled validation data.
"""
# If the user has chosen to perform data augmentation
if augment_data == "True":
# Train model on the original and augmented data
model_history = model.fit(datagen.flow(X_train_scaled, y_train_binarized, batch_size = batch_size),
validation_data=(X_test_scaled, y_test_binarized),
epochs=n_epochs,
verbose=1) # show progress bars to allow the user to follow along
return model_history
# If the user has not performed data augmentation
if augment_data == "False":
# Train model on the original data only
model_history = model.fit(X_train_scaled, y_train_binarized,
validation_data=(X_test_scaled, y_test_binarized),
batch_size = batch_size,
epochs=n_epochs,
verbose=1) # show progress bars to allow the user to follow along
return model_history
def plot_training_history(self, model_history, n_epochs):
"""
This method plots the loss/accuracy curves of the model during training and saves the plot to the output directory. The code was developed for use in class and has been modified for this project.
"""
# Visualize performance using matplotlib
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, n_epochs), model_history.history["loss"], label="train_loss")
plt.plot(np.arange(0, n_epochs), model_history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, n_epochs), model_history.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, n_epochs), model_history.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join("..", "output", "model_loss_accuracy_history.png"))
def evaluate_model(self, y_test_binarized, labels, output_filename, model, batch_size, X_test_scaled):
"""
This method evaluates the model performance, prints the classification report to the terminal, and saves it as a txt-file to the output directory.
"""
# Compute predictions
y_predictions = model.predict(X_test_scaled, batch_size=batch_size)
# Classification report
classification_metrics = classification_report(y_test_binarized.argmax(axis=1),
y_predictions.argmax(axis=1),
target_names=labels)
# Print classification report to the terminal
print(classification_metrics)
# Save classification report to output directory
out_path = os.path.join("..", "output", output_filename) # Define output path
# Save classification metrics to output directory
with open(out_path, "w") as f:
f.write(f"Below are the classification metrics for the classifier:\n\n{classification_metrics}")
def save_model(self, model):
"""
This method saves the model as a json-file as well as the model weights in the HDF5 format in the output directory. The HDF5 format contains the model weights grouped by layer names. This means that once the model has been trained with the new classification layer, it can be loaded from the saved files, which saves a lot of time.
"""
# Convert model to json
model_json = model.to_json()
# Save model as json-file
out_model_path = os.path.join("..", "output", "saved_model.json")
with open(out_model_path, "w") as json_file:
json_file.write(model_json)
# The weights of the model also need to be saved to the HDF5 format
out_weights_path = os.path.join("..", "output", "model_weights.h5")
model.save_weights(out_weights_path)
# Define behaviour when called from command line
if __name__=="__main__":
main() | [
"matplotlib.pyplot.title",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"sklearn.preprocessing.LabelBinarizer",
"argparse.ArgumentParser",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.Dense",
"numpy.empty",
"matplotlib.pyplot.style.use",
"matplotlib.pyplo... | [((1447, 1465), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (1459, 1465), False, 'import os\n'), ((3418, 3443), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3441, 3443), False, 'import argparse\n'), ((5600, 5674), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""subset_asl_sign_language"""', "args['train_data']"], {}), "('..', 'data', 'subset_asl_sign_language', args['train_data'])\n", (5612, 5674), False, 'import os\n'), ((5691, 5764), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""subset_asl_sign_language"""', "args['test_data']"], {}), "('..', 'data', 'subset_asl_sign_language', args['test_data'])\n", (5703, 5764), False, 'import os\n'), ((9005, 9032), 'os.listdir', 'os.listdir', (['self.train_data'], {}), '(self.train_data)\n', (9015, 9032), False, 'import os\n'), ((9795, 9833), 'numpy.empty', 'np.empty', (['(0, dimension, dimension, 3)'], {}), '((0, dimension, dimension, 3))\n', (9803, 9833), True, 'import numpy as np\n'), ((11558, 11574), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (11572, 11574), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((12085, 12117), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (12115, 12117), True, 'import tensorflow as tf\n'), ((12202, 12271), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'pooling': '"""avg"""', 'input_shape': '(224, 224, 3)'}), "(include_top=False, pooling='avg', input_shape=(224, 224, 3))\n", (12210, 12271), False, 'from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\n'), ((13538, 13580), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'output'}), '(inputs=model.inputs, outputs=output)\n', (13543, 13580), False, 'from tensorflow.keras.models import Model\n'), ((14064, 14113), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""model_summary.txt"""'], {}), "('..', 'output', 'model_summary.txt')\n", (14076, 14113), False, 'import os\n'), ((14359, 14413), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""model_architecture.png"""'], {}), "('..', 'output', 'model_architecture.png')\n", (14371, 14413), False, 'import os\n'), ((14431, 14520), 'tensorflow.keras.utils.plot_model', 'plot_model', (['model'], {'to_file': 'output_path_model', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), '(model, to_file=output_path_model, show_shapes=True,\n show_layer_names=True)\n', (14441, 14520), False, 'from tensorflow.keras.utils import plot_model\n'), ((17786, 17818), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (17799, 17818), True, 'import matplotlib.pyplot as plt\n'), ((17827, 17839), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17837, 17839), True, 'import matplotlib.pyplot as plt\n'), ((18226, 18265), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy"""'], {}), "('Training Loss and Accuracy')\n", (18235, 18265), True, 'import matplotlib.pyplot as plt\n'), ((18274, 18295), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (18284, 18295), True, 'import matplotlib.pyplot as plt\n'), ((18304, 18331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (18314, 18331), True, 'import matplotlib.pyplot as plt\n'), ((18340, 18352), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18350, 18352), True, 'import matplotlib.pyplot as plt\n'), ((18361, 18379), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18377, 18379), True, 'import matplotlib.pyplot as plt\n'), ((19354, 19399), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', 'output_filename'], {}), "('..', 'output', output_filename)\n", (19366, 19399), False, 'import os\n'), ((20197, 20245), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""saved_model.json"""'], {}), "('..', 'output', 'saved_model.json')\n", (20209, 20245), False, 'import os\n'), ((20455, 20503), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""model_weights.h5"""'], {}), "('..', 'output', 'model_weights.h5')\n", (20467, 20503), False, 'import os\n'), ((6009, 6037), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""'], {}), "('..', 'output')\n", (6021, 6037), False, 'import os\n'), ((6057, 6085), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""'], {}), "('..', 'output')\n", (6069, 6085), False, 'import os\n'), ((12955, 12964), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (12962, 12964), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Activation, Flatten, Dense\n'), ((13154, 13183), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (13159, 13183), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Activation, Flatten, Dense\n'), ((13281, 13293), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (13288, 13293), False, 'from tensorflow.keras.layers import Flatten, Dense, Dropout\n'), ((13374, 13405), 'tensorflow.keras.layers.Dense', 'Dense', (['(26)'], {'activation': '"""softmax"""'}), "(26, activation='softmax')\n", (13379, 13405), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Activation, Flatten, Dense\n'), ((15280, 15388), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'zoom_range': '(0.15)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'horizontal_flip': '(True)'}), '(zoom_range=0.15, width_shift_range=0.2,\n height_shift_range=0.2, horizontal_flip=True)\n', (15298, 15388), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator\n'), ((17857, 17879), 'numpy.arange', 'np.arange', (['(0)', 'n_epochs'], {}), '(0, n_epochs)\n', (17866, 17879), True, 'import numpy as np\n'), ((17949, 17971), 'numpy.arange', 'np.arange', (['(0)', 'n_epochs'], {}), '(0, n_epochs)\n', (17958, 17971), True, 'import numpy as np\n'), ((18043, 18065), 'numpy.arange', 'np.arange', (['(0)', 'n_epochs'], {}), '(0, n_epochs)\n', (18052, 18065), True, 'import numpy as np\n'), ((18138, 18160), 'numpy.arange', 'np.arange', (['(0)', 'n_epochs'], {}), '(0, n_epochs)\n', (18147, 18160), True, 'import numpy as np\n'), ((18400, 18463), 'os.path.join', 'os.path.join', (['""".."""', '"""output"""', '"""model_loss_accuracy_history.png"""'], {}), "('..', 'output', 'model_loss_accuracy_history.png')\n", (18412, 18463), False, 'import os\n'), ((10019, 10052), 'os.path.join', 'os.path.join', (['data', 'name', '"""*.jpg"""'], {}), "(data, name, '*.jpg')\n", (10031, 10052), False, 'import os\n'), ((10194, 10211), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (10204, 10211), False, 'import cv2\n'), ((10309, 10385), 'cv2.resize', 'cv2.resize', (['loaded_img', '(dimension, dimension)'], {'interpolation': 'cv2.INTER_AREA'}), '(loaded_img, (dimension, dimension), interpolation=cv2.INTER_AREA)\n', (10319, 10385), False, 'import cv2\n'), ((10689, 10716), 'numpy.vstack', 'np.vstack', (['(X, image_array)'], {}), '((X, image_array))\n', (10698, 10716), True, 'import numpy as np\n'), ((14190, 14208), 'contextlib.redirect_stdout', 'redirect_stdout', (['f'], {}), '(f)\n', (14205, 14208), False, 'from contextlib import redirect_stdout\n'), ((10579, 10600), 'numpy.array', 'np.array', (['resized_img'], {}), '(resized_img)\n', (10587, 10600), True, 'import numpy as np\n')] |
"""
How to convert a duration table to a survival table, then compute the Kaplan-Meier and
Nelson-Aalen estimates. Accompanies the article at
https://crosstab.io/articles/durations-to-survivals.
"""
import altair as alt
import numpy as np
import pandas as pd
## Load data
durations = pd.read_parquet("data/retailrocket_durations.parquet")
print(durations.head())
## Round days up for a meaningful grouping
durations["duration_days"] = np.ceil(durations["duration_days"]).astype(int)
grp = durations.groupby("duration_days")
survival = pd.DataFrame(
{"num_obs": grp.size(), "events": grp["endpoint_observed"].sum()}
)
print(survival.head())
# Get the total number of observations that had happened *before* each row. The
# complement of that is the number still at-risk of the event *in* each row.
num_subjects = len(durations)
prior_count = survival["num_obs"].cumsum().shift(1, fill_value=0)
survival.insert(0, "at_risk", num_subjects - prior_count)
print(survival.head())
# Keep only the rows with at least one event, and insert an initial row of 0's.
survival = survival.loc[survival["events"] > 0]
# The number of censored includes units censored at any duration between an event
# duration and the next event duration. This has to be backed out of the number at-risk:
# if the number at-risk falls from 15 to 11, but only one event was observed, then the
# number of censored in that interval must be 3.
survival["censored"] = (
survival["at_risk"]
- survival["at_risk"].shift(-1, fill_value=0)
- survival["events"]
)
print(survival)
## Kaplan-Meier survival function estimate
inverse_hazard = 1 - survival["events"] / survival["at_risk"]
survival["survival_proba"] = inverse_hazard.cumprod()
print(survival)
survival["conversion_pct"] = 100 * (1 - survival["survival_proba"])
fig = (
alt.Chart(survival.reset_index())
.mark_line(interpolate="step-after")
.encode(
x=alt.X("duration_days", axis=alt.Axis(title="Duration (days)")),
y=alt.Y(
"survival_proba",
axis=alt.Axis(title="Survival probability"),
scale=alt.Scale(zero=False),
),
)
)
fig.save("rocketretail_survival.svg")
fig = (
alt.Chart(survival.reset_index())
.mark_line(interpolate="step-after")
.encode(
x=alt.X("duration_days", axis=alt.Axis(title="Duration (days)")),
y=alt.Y("conversion_pct", axis=alt.Axis(title="Conversion rate (%)")),
)
)
fig.save("rocketretail_conversion.svg")
## Aalen-Nelson cumulative hazard estimate
survival["cumulative_hazard"] = (survival["events"] / survival["at_risk"]).cumsum()
fig = (
alt.Chart(survival.reset_index())
.mark_line(interpolate="step-after")
.encode(
x=alt.X("duration_days", axis=alt.Axis(title="Duration (days)")),
y=alt.Y("cumulative_hazard", axis=alt.Axis(title="Cumulative hazard")),
)
)
fig.save("rocketretail_hazard.svg")
| [
"numpy.ceil",
"altair.Scale",
"altair.Axis",
"pandas.read_parquet"
] | [((287, 341), 'pandas.read_parquet', 'pd.read_parquet', (['"""data/retailrocket_durations.parquet"""'], {}), "('data/retailrocket_durations.parquet')\n", (302, 341), True, 'import pandas as pd\n'), ((440, 475), 'numpy.ceil', 'np.ceil', (["durations['duration_days']"], {}), "(durations['duration_days'])\n", (447, 475), True, 'import numpy as np\n'), ((1954, 1987), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Duration (days)"""'}), "(title='Duration (days)')\n", (1962, 1987), True, 'import altair as alt\n'), ((2054, 2092), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Survival probability"""'}), "(title='Survival probability')\n", (2062, 2092), True, 'import altair as alt\n'), ((2112, 2133), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)'}), '(zero=False)\n', (2121, 2133), True, 'import altair as alt\n'), ((2332, 2365), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Duration (days)"""'}), "(title='Duration (days)')\n", (2340, 2365), True, 'import altair as alt\n'), ((2407, 2444), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Conversion rate (%)"""'}), "(title='Conversion rate (%)')\n", (2415, 2444), True, 'import altair as alt\n'), ((2764, 2797), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Duration (days)"""'}), "(title='Duration (days)')\n", (2772, 2797), True, 'import altair as alt\n'), ((2842, 2877), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Cumulative hazard"""'}), "(title='Cumulative hazard')\n", (2850, 2877), True, 'import altair as alt\n')] |
import logging
import math
import random
from itertools import count
from typing import Type
import numpy as np
import torch
from collections import namedtuple
from gym import Env, Space, spaces
from torch import nn
from torch import optim
from tqdm import tqdm
from utilities import *
logger = logging.getLogger(__name__)
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class DQNHyperparameters(object):
"""
Store the hyperparameters of a DQN run
"""
def __init__(self, batch_size=128, gamma=0.999, eps_start=0.9, eps_end=0.05, eps_decay=200, target_update=10,
memory_size=10000, data_type=torch.float32):
"""
Args:
batch_size: How many transitions to train a batch on
gamma: The discount on the q value of future states
eps_start: The initial probability of selecting a random action
eps_end: The final probability of selecting a random action
eps_decay: The rate of decay of epsilon
target_update: update the policy net after this many episodes
"""
self.BATCH_SIZE = batch_size
self.GAMMA = gamma
self.EPS_START = eps_start
self.EPS_END = eps_end
self.EPS_DECAY = eps_decay
self.TARGET_UPDATE = target_update
self.memory_size = memory_size
self.data_type = data_type
def calc_eps(self, steps):
"""
Calculate the probability of choosing a random action (epsilon)
Args:
steps: the number of steps that the model has taken.
"""
eps_threshold = self.EPS_END + (self.EPS_START - self.EPS_END) * \
math.exp(-1. * steps / self.EPS_DECAY)
return eps_threshold
class BatchData(object):
def __init__(self, non_final_next_states, non_final_mask, state_batch, action_batch, reward_batch):
self.non_final_next_states = non_final_next_states
self.non_final_mask = non_final_mask
self.state_batch = state_batch
self.action_batch = action_batch
self.reward_batch = reward_batch
def send_to(self, device):
if self.non_final_next_states is not None:
self.non_final_next_states = self.non_final_next_states.to(device)
self.non_final_mask = self.non_final_mask.to(device)
self.state_batch = self.state_batch.to(device)
self.action_batch = self.action_batch.to(device, torch.long)
self.reward_batch = self.reward_batch.to(device)
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, transition):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = transition
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
# noinspection PyCallingNonCallable,PyUnresolvedReferences
def get_batch(self, batch_size):
# get a batch worth of experiences and transpose them into into lists for a batch
transitions = self.sample(batch_size)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# get the mask of all the non-final next states so we can compute their outputs
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), dtype=torch.uint8)
try:
non_final_next_states_np = np.concatenate([s for s in batch.next_state if s is not None])
# TODO add this to the nets themselves
# non_final_next_states = image_batch_to_device_and_format(non_final_next_states, device)
non_final_next_states = torch.from_numpy(non_final_next_states_np)
except:
non_final_next_states = None
# concatenate all the training data and convert to torch
state_batch_np = np.concatenate(batch.state)
state_batch = torch.from_numpy(state_batch_np)
# TODO this in models
# state_batch = image_batch_to_device_and_format(state_batch)
action_batch = torch.from_numpy(np.concatenate(batch.action))
reward_batch_np = np.concatenate(batch.reward, axis=None)
reward_batch = torch.from_numpy(reward_batch_np)
return BatchData(non_final_next_states, non_final_mask, state_batch, action_batch, reward_batch)
def __len__(self):
return len(self.memory)
def calc_channel_swap(input_space: Space):
if isinstance(input_space, spaces.Discrete):
in_space_shape = (input_space.n,)
else:
in_space_shape = input_space.shape
def swap_function(x):
return to_batch_shape(x)
# check for input shape channel order
if len(in_space_shape) == 3:
channel_index = np.argmin(in_space_shape).item()
logger.debug("Shape: %s Channel index: %s" % (in_space_shape, channel_index))
if channel_index != 0:
# move that index to the first
a = (in_space_shape[channel_index], *in_space_shape[:channel_index], *in_space_shape[channel_index+1:])
in_space_shape = a
def swap_function(x):
return to_batch_shape(np.moveaxis(x, channel_index, 0))
return in_space_shape, swap_function
class DQNNet(nn.Module):
def __init__(self, input_shape, action_shape, **kwargs):
super().__init__()
self.input_shape = input_shape
self.action_shape = action_shape
def forward(self, *x):
raise NotImplementedError
class DQNTrainingState(object):
def __init__(self, model_class: Type[DQNNet], env: Env, device,
hyper: DQNHyperparameters, optimizer_type=optim.RMSprop, frameskip=4, verbose=False):
self.env = env
self.device = device
self.hyper = hyper
self.memory = ReplayMemory(hyper.memory_size)
self.training_steps = 0
self.verbose = verbose
input_shape, swap_function = calc_channel_swap(env.observation_space)
action_space = env.action_space
self.swap_function = swap_function
if isinstance(action_space, spaces.Discrete):
action_shape = (action_space.n,)
else:
action_shape = action_space.shape
num_actions = np.prod(action_shape)
self.frameskip = frameskip
self.model_class = model_class
self.policy_net = model_class(input_shape, num_actions)
self.target_net = model_class(input_shape, num_actions)
self.policy_net.to(device)
self.target_net.to(device)
self.optimizer = optimizer_type(self.policy_net.parameters(), lr=0.001)
def update_target(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
# noinspection PyCallingNonCallable
def optimize_model(self) -> float:
# if we haven't sampled enough to make a full batch, skip optimization for now
if len(self.memory) < self.hyper.BATCH_SIZE:
return 0.0
batch_data = self.memory.get_batch(self.hyper.BATCH_SIZE)
batch_data.send_to(self.device)
# Compute V(s_{t+1}) for all next states.
# get the values of the next states
next_state_values = torch.zeros(self.hyper.BATCH_SIZE, device=self.device)
if batch_data.non_final_next_states is not None:
with torch.no_grad():
next_state_values[batch_data.non_final_mask] = \
self.target_net(batch_data.non_final_next_states).max(1)[0].detach()
# Compute the expected Q values using the next state's values
adjusted_next_values = next_state_values * self.hyper.GAMMA
expected_state_action_values = adjusted_next_values + batch_data.reward_batch
# zero the gradient before doing the current state
self.optimizer.zero_grad()
# compute the actual Q values using the current state
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
# use the gather operation to get a tensor of the actions taken. backprop will go through the gather.
state_values = self.policy_net(batch_data.state_batch)
state_action_values = state_values.gather(1, batch_data.action_batch)
esav = expected_state_action_values.view(self.hyper.BATCH_SIZE, 1)
# Compute Huber loss
# loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
loss = state_action_values - esav
# loss = loss.data.unsqueeze(1)
# Optimize the model
# loss.backward()
state_action_values.backward(loss)
# for param in self.policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
abs_loss = loss.abs()
mean_loss = abs_loss.mean().detach()
if math.isnan(mean_loss):
logger.error("Loss is NaN, something terrible happened")
return mean_loss
def _reset_env(self):
observation = self.env.reset()
if observation is not None:
observation = self.swap_function(observation)
return observation
# noinspection PyCallingNonCallable
def _take_action(self, action):
observation, reward, done, misc = self.env.step(action.item())
if observation is not None:
observation = self.swap_function(observation)
reward = torch.tensor([[reward]], dtype=self.hyper.data_type)
return (observation, reward, done, misc, action)
def _take_random_action(self):
action = self.env.action_space.sample()
action = np.array([[action]])
return self._take_action(action)
def _take_net_action(self, state):
with torch.no_grad():
formatted_screen = torch.from_numpy(state).to(self.device)
actions = self.policy_net(formatted_screen)
action = actions.max(1)[1].view(1, 1).cpu()
return self._take_action(action)
# noinspection PyCallingNonCallable
def run_episode(self, test=False) -> (float, int):
# Initialize the environment and state
observation = self._reset_env()
total_loss = 0
total_reward = 0
# do each step
for t in count(1):
last_observation = observation
# Select and perform an action
sample = random.random()
eps = self.hyper.calc_eps(self.training_steps)
if sample > eps or test:
observation, reward, done, misc, action = self._take_net_action(last_observation)
else:
observation, reward, done, misc, action = self._take_random_action()
self.env.render("human")
total_reward += reward
reward = torch.tensor([[reward]], dtype=self.hyper.data_type)
# if this is not testing the network, store the data and train
if not test:
# Store the transition in memory
self.memory.push((last_observation, action, observation, reward))
# Perform one step of the optimization (on the target network)
total_loss += self.optimize_model()
self.training_steps += 1
if done:
# calculate average loss and return it
return (total_loss / t, total_reward)
def train_for_episodes(self, episodes):
with tqdm(range(episodes), total=episodes, unit="episode") as t:
for episode in t:
episode_loss, total_reward = self.run_episode()
string = 'loss: %f, %f' % (episode_loss, total_reward)
t.set_postfix_str(string)
# Update the target network
if episode % self.hyper.TARGET_UPDATE == 0:
logger.debug("Updating target weights")
self.update_target()
# log_tensors(logger)
def save_model(self, path):
logger.info("Saving Model to %s" % path)
torch.save(self.policy_net.state_dict(), path)
def load_model(self, path):
logger.info("Loading Model from %s" % path)
self.policy_net.load_state_dict(torch.load(path))
self.update_target()
| [
"math.isnan",
"math.exp",
"numpy.moveaxis",
"random.sample",
"torch.load",
"logging.getLogger",
"numpy.prod",
"itertools.count",
"random.random",
"numpy.argmin",
"numpy.array",
"collections.namedtuple",
"torch.zeros",
"torch.tensor",
"torch.no_grad",
"numpy.concatenate",
"torch.from_... | [((299, 326), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (316, 326), False, 'import logging\n'), ((341, 410), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('state', 'action', 'next_state', 'reward')"], {}), "('Transition', ('state', 'action', 'next_state', 'reward'))\n", (351, 410), False, 'from collections import namedtuple\n'), ((3020, 3058), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (3033, 3058), False, 'import random\n'), ((4280, 4307), 'numpy.concatenate', 'np.concatenate', (['batch.state'], {}), '(batch.state)\n', (4294, 4307), True, 'import numpy as np\n'), ((4330, 4362), 'torch.from_numpy', 'torch.from_numpy', (['state_batch_np'], {}), '(state_batch_np)\n', (4346, 4362), False, 'import torch\n'), ((4559, 4598), 'numpy.concatenate', 'np.concatenate', (['batch.reward'], {'axis': 'None'}), '(batch.reward, axis=None)\n', (4573, 4598), True, 'import numpy as np\n'), ((4622, 4655), 'torch.from_numpy', 'torch.from_numpy', (['reward_batch_np'], {}), '(reward_batch_np)\n', (4638, 4655), False, 'import torch\n'), ((6660, 6681), 'numpy.prod', 'np.prod', (['action_shape'], {}), '(action_shape)\n', (6667, 6681), True, 'import numpy as np\n'), ((7609, 7663), 'torch.zeros', 'torch.zeros', (['self.hyper.BATCH_SIZE'], {'device': 'self.device'}), '(self.hyper.BATCH_SIZE, device=self.device)\n', (7620, 7663), False, 'import torch\n'), ((9254, 9275), 'math.isnan', 'math.isnan', (['mean_loss'], {}), '(mean_loss)\n', (9264, 9275), False, 'import math\n'), ((9818, 9870), 'torch.tensor', 'torch.tensor', (['[[reward]]'], {'dtype': 'self.hyper.data_type'}), '([[reward]], dtype=self.hyper.data_type)\n', (9830, 9870), False, 'import torch\n'), ((10029, 10049), 'numpy.array', 'np.array', (['[[action]]'], {}), '([[action]])\n', (10037, 10049), True, 'import numpy as np\n'), ((10658, 10666), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (10663, 10666), False, 'from itertools import count\n'), ((3837, 3899), 'numpy.concatenate', 'np.concatenate', (['[s for s in batch.next_state if s is not None]'], {}), '([s for s in batch.next_state if s is not None])\n', (3851, 3899), True, 'import numpy as np\n'), ((4089, 4131), 'torch.from_numpy', 'torch.from_numpy', (['non_final_next_states_np'], {}), '(non_final_next_states_np)\n', (4105, 4131), False, 'import torch\n'), ((4503, 4531), 'numpy.concatenate', 'np.concatenate', (['batch.action'], {}), '(batch.action)\n', (4517, 4531), True, 'import numpy as np\n'), ((10144, 10159), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10157, 10159), False, 'import torch\n'), ((10776, 10791), 'random.random', 'random.random', ([], {}), '()\n', (10789, 10791), False, 'import random\n'), ((11183, 11235), 'torch.tensor', 'torch.tensor', (['[[reward]]'], {'dtype': 'self.hyper.data_type'}), '([[reward]], dtype=self.hyper.data_type)\n', (11195, 11235), False, 'import torch\n'), ((12606, 12622), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (12616, 12622), False, 'import torch\n'), ((1736, 1775), 'math.exp', 'math.exp', (['(-1.0 * steps / self.EPS_DECAY)'], {}), '(-1.0 * steps / self.EPS_DECAY)\n', (1744, 1775), False, 'import math\n'), ((5167, 5192), 'numpy.argmin', 'np.argmin', (['in_space_shape'], {}), '(in_space_shape)\n', (5176, 5192), True, 'import numpy as np\n'), ((7738, 7753), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7751, 7753), False, 'import torch\n'), ((5580, 5612), 'numpy.moveaxis', 'np.moveaxis', (['x', 'channel_index', '(0)'], {}), '(x, channel_index, 0)\n', (5591, 5612), True, 'import numpy as np\n'), ((10192, 10215), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (10208, 10215), False, 'import torch\n')] |
from os.path import abspath
from seisflows.tools.array import count_zeros
import numpy as np
class Base(object):
""" Abstract base class for line search
Variables
x - list of step lenths from current line search
f - correpsonding list of function values
m - how many step lengths in current line search?
n - how many model updates in optimization problem?
gtg - dot product of gradient with itself
gtp - dot product of gradient and search direction
Status codes
status > 0 : finished
status == 0 : not finished
status < 0 : failed
"""
def __init__(self,
step_count_max=10,
step_len_max=np.inf,
path=abspath('.')):
# maximum number of trial steps
self.step_count_max = step_count_max
# optional maximum step length safeguard
self.step_len_max = step_len_max
# prepare output log
self.writer = Writer(path)
self.func_vals = []
self.step_lens = []
self.gtg = []
self.gtp = []
def clear_history(self):
""" Clears line search history
"""
self.func_vals = []
self.step_lens = []
self.gtg = []
self.gtp = []
def search_history(self, sort=True):
""" A convenience function, collects information needed to determine
search status and calculate step length
"""
i = self.step_count
j = count_zeros(self.step_lens)-1
k = len(self.step_lens)
x = np.array(self.step_lens[k-i-1:k])
f = np.array(self.func_vals[k-i-1:k])
if sort:
f = f[abs(x).argsort()]
x = x[abs(x).argsort()]
return x, f, self.gtg, self.gtp, i, j
def initialize(self, step_len, func_val, gtg, gtp):
# update search history
self.step_count = 0
self.step_lens += [step_len]
self.func_vals += [func_val]
self.gtg += [gtg]
self.gtp += [gtp]
self.writer(step_len, func_val)
return self.calculate_step()
def update(self, step_len, func_val):
# update search history
self.step_count += 1
self.step_lens += [step_len]
self.func_vals += [func_val]
self.writer(step_len, func_val)
return self.calculate_step()
def calculate_step(self):
raise NotImplementedError('Must be implemented by subclass')
class Writer(object):
""" Utility for writing one or more columns to text file
"""
def __init__(self, path='./output.optim'):
self.iter = 0
self.filename = abspath(path)
self.write_header()
def __call__(self, steplen=None, funcval=None):
with open(self.filename, 'a') as fileobj:
if self.iter == 0:
self.iter += 1
fmt = '%10d %10.3e %10.3e\n'
fileobj.write(fmt % (self.iter, steplen, funcval))
elif steplen == 0.:
self.iter += 1
fmt = '%10d %10.3e %10.3e\n'
fileobj.write(fmt % (self.iter, steplen, funcval))
else:
fmt = 12*' ' + '%10.3e %10.3e\n'
fileobj.write(fmt % (steplen, funcval))
def write_header(self):
# write header
headers = []
headers += ['ITER']
headers += ['STEPLEN']
headers += ['MISFIT']
with open(self.filename, 'a') as fileobj:
for header in headers:
fmt = '%%%ds ' % 10
fileobj.write('%10s ' % header)
fileobj.write('\n')
for _ in range(len(headers)):
fileobj.write('%10s ' % (10*'='))
fileobj.write('\n')
def newline(self):
with open(self.filename, 'a') as fileobj:
fileobj.write('\n')
| [
"os.path.abspath",
"seisflows.tools.array.count_zeros",
"numpy.array"
] | [((790, 802), 'os.path.abspath', 'abspath', (['"""."""'], {}), "('.')\n", (797, 802), False, 'from os.path import abspath\n'), ((1626, 1663), 'numpy.array', 'np.array', (['self.step_lens[k - i - 1:k]'], {}), '(self.step_lens[k - i - 1:k])\n', (1634, 1663), True, 'import numpy as np\n'), ((1672, 1709), 'numpy.array', 'np.array', (['self.func_vals[k - i - 1:k]'], {}), '(self.func_vals[k - i - 1:k])\n', (1680, 1709), True, 'import numpy as np\n'), ((2712, 2725), 'os.path.abspath', 'abspath', (['path'], {}), '(path)\n', (2719, 2725), False, 'from os.path import abspath\n'), ((1552, 1579), 'seisflows.tools.array.count_zeros', 'count_zeros', (['self.step_lens'], {}), '(self.step_lens)\n', (1563, 1579), False, 'from seisflows.tools.array import count_zeros\n')] |
# External Dependencies
import numpy as np
import numba
@numba.jit(nopython = True)
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# rolling standard deviation
def stdev(arr, period, ddof = 1):
rolling_windows = rolling_window(arr,period).std(axis = 1, ddof = ddof) # corrected sample standard deviation
results = np.concatenate((np.array([np.nan] * (period-1)), rolling_windows))
return(results)
# Simple Moving Average
def SMA(arr, period):
results = rolling_window(arr,period).mean(axis = 1)
results = np.concatenate((np.array([np.nan] * (period-1)), results))
return(results)
# Bollinger Bands
def Boll(dfHigh, dfLow, dfClose, period = 20, stdev_dist = 2):
typicalPrice = (dfHigh + dfClose + dfLow) / 3
middle = SMA(typicalPrice, period)
stdev = stdev(typicalPrice, period, ddof = 0)
upper = middle + (stdev_dist * stdev)
lower = middle - (stdev_dist * stdev)
return ((upper, middle, lower))
# Simple Average - With numba optimization
@numba.jit(nopython = True)
def SMA_numba(arr, period):
results = np.zeros(len(arr))
for i in range(period - 1, len(results)):
window = arr[i - period + 1 : i + 1]
results[i] = np.sum(window)
results[:period-1] = np.nan
return(results / period)
# Exponential Moving Average
@numba.jit(nopython = True)
def EMA(arr,period, alpha = False):
if (alpha == True):
alpha = 1 / period
elif (alpha == False):
alpha = 2 / (period + 1)
exp_weights = np.zeros(len(arr))
exp_weights[period - 1] = np.mean(arr[:period])
for i in range(period,len(exp_weights) + 1):
exp_weights[i] = exp_weights[i-1]*(1-alpha) + ((alpha)*(arr[i]))
exp_weights[:period-1] = np.nan
return(exp_weights)
# Average True Range
@numba.jit(nopython = True)
def ATR(dfHigh, dfLow, dfClose, period = 14):
high_minus_low = (dfHigh - dfLow)[1:]
high_minus_close_prev = np.abs(dfHigh[1:] - dfClose[:-1])
low_minus_close_prev = np.abs(dfLow[1:] - dfClose[:-1])
arr = np.zeros(len(high_minus_low))
for i in range(len(high_minus_low)):
arr[i] = max(high_minus_low[i],
high_minus_close_prev[i],
low_minus_close_prev[i]
)
results = EMA(arr, period, alpha = True)
return (results)
# Moving Average Convergence Divergence
@numba.jit(nopython = True)
def MACD(arr, fast_period=12, slow_period=26, signal_period=9, percent = True):
fast_EMA = EMA(arr, fast_period)
slow_EMA = EMA(arr,slow_period)
macd = fast_EMA - slow_EMA
if (percent):
macd *= 100 / slow_EMA
signal = np.concatenate((macd[:slow_period-1],EMA(macd[slow_period-1:],signal_period)))
hist = macd - signal
return((macd, signal, hist))
# Relative Strength Index
@numba.jit(nopython = True)
def RSI(arr, period = 21):
delta = np.diff(arr)
up, down = np.copy(delta), np.copy(delta)
up[up < 0] = 0
down[down > 0] = 0
# Exponential Weighted windows mean with centre of mass = period - 1 -> alpha = 1 / (period)
alpha = 1 / (period)
rUp = EMA(up, period, alpha = alpha)
rDown = np.abs(EMA(down, period, alpha = alpha))
result = 100 - (100 / (1+ rUp / rDown))
#append nan that was lost in np.diff
result = np.concatenate((np.array([np.nan]), result))
return(result)
# Commodity Channel Index
@numba.jit()
def CCI(dfHigh, dfLow, dfClose, period = 20, scaling = 0.015):
'''
Similar to TTR package in R, central tendency measure uses mean
'''
typicalPrice = (dfHigh + dfClose + dfLow) / 3
rolling_windows = rolling_window(typicalPrice,period)
central_tendency_arr = SMA_numba(typicalPrice, period)[period - 1:]
abs_deviation_arr = np.abs((rolling_windows.T - central_tendency_arr).T)
mean_abs_deviation = np.zeros(len(abs_deviation_arr))
# once numba has a way of reducing along axes, can switch this away
for i in range(len(rolling_windows)):
mean_abs_deviation[i] = np.mean(abs_deviation_arr[i])
result = (typicalPrice[period-1:] - central_tendency_arr) / (mean_abs_deviation * scaling)
result = np.concatenate((np.array([np.nan] * (period-1)), result))
return(result)
# Stochastic Momentum Indicator
@numba.jit(nopython = True)
def SMI(dfHigh, dfLow, dfClose, period = 13, fast_period = 2, slow_period = 25, signal_period = 9):
rolling_high = rolling_window(dfHigh,period)
rolling_low = rolling_window(dfLow,period)
centre = np.zeros(len(rolling_high))
HL_diff = np.copy(centre)
for i in range(len(centre)):
centre[i] = (np.max(rolling_high[i]) + np.min(rolling_low[i])) / 2
HL_diff[i] = np.max(rolling_high[i]) - np.min(rolling_low[i])
centre = np.concatenate((((dfHigh[:period-1] + dfLow[:period-1]) / 2),centre))
HL_diff = np.concatenate(((dfHigh[:period-1] - dfLow[:period-1]),HL_diff))
c_diff = dfClose - centre
num1 = EMA(c_diff,slow_period)
den1 = EMA(HL_diff,slow_period)
num2 = EMA(num1[slow_period-1:], fast_period)
den2 = EMA(den1[slow_period-1:], fast_period) / 2
SMI = 100 * (num2 / den2)
signal = EMA(SMI[1:], signal_period)
SMI = np.concatenate((np.array([np.nan] * (slow_period-1)), SMI))
signal = np.concatenate((np.array([np.nan] * (slow_period)), signal))
return((SMI,signal))
# Rate of Change
@numba.jit(nopython = True)
def ROC(arr, period = 1, continuous_compound = True):
if (continuous_compound is True):
result = np.log(arr[period:] / arr[:-period]) * 100
else:
result = (arr[period:] - arr[:-period]) /arr[:-period] * 100
result = np.concatenate((np.array([np.nan] * period), result))
return(result)
| [
"numpy.abs",
"numpy.sum",
"numpy.copy",
"numpy.log",
"numpy.max",
"numpy.mean",
"numpy.lib.stride_tricks.as_strided",
"numba.jit",
"numpy.diff",
"numpy.array",
"numpy.min",
"numpy.concatenate"
] | [((58, 82), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (67, 82), False, 'import numba\n'), ((1158, 1182), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1167, 1182), False, 'import numba\n'), ((1465, 1489), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1474, 1489), False, 'import numba\n'), ((1934, 1958), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1943, 1958), False, 'import numba\n'), ((2514, 2538), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2523, 2538), False, 'import numba\n'), ((2952, 2976), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2961, 2976), False, 'import numba\n'), ((3527, 3538), 'numba.jit', 'numba.jit', ([], {}), '()\n', (3536, 3538), False, 'import numba\n'), ((4398, 4422), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4407, 4422), False, 'import numba\n'), ((5499, 5523), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5508, 5523), False, 'import numba\n'), ((232, 296), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (263, 296), True, 'import numpy as np\n'), ((1707, 1728), 'numpy.mean', 'np.mean', (['arr[:period]'], {}), '(arr[:period])\n', (1714, 1728), True, 'import numpy as np\n'), ((2077, 2110), 'numpy.abs', 'np.abs', (['(dfHigh[1:] - dfClose[:-1])'], {}), '(dfHigh[1:] - dfClose[:-1])\n', (2083, 2110), True, 'import numpy as np\n'), ((2138, 2170), 'numpy.abs', 'np.abs', (['(dfLow[1:] - dfClose[:-1])'], {}), '(dfLow[1:] - dfClose[:-1])\n', (2144, 2170), True, 'import numpy as np\n'), ((3018, 3030), 'numpy.diff', 'np.diff', (['arr'], {}), '(arr)\n', (3025, 3030), True, 'import numpy as np\n'), ((3890, 3942), 'numpy.abs', 'np.abs', (['(rolling_windows.T - central_tendency_arr).T'], {}), '((rolling_windows.T - central_tendency_arr).T)\n', (3896, 3942), True, 'import numpy as np\n'), ((4677, 4692), 'numpy.copy', 'np.copy', (['centre'], {}), '(centre)\n', (4684, 4692), True, 'import numpy as np\n'), ((4885, 4957), 'numpy.concatenate', 'np.concatenate', (['((dfHigh[:period - 1] + dfLow[:period - 1]) / 2, centre)'], {}), '(((dfHigh[:period - 1] + dfLow[:period - 1]) / 2, centre))\n', (4899, 4957), True, 'import numpy as np\n'), ((4969, 5036), 'numpy.concatenate', 'np.concatenate', (['(dfHigh[:period - 1] - dfLow[:period - 1], HL_diff)'], {}), '((dfHigh[:period - 1] - dfLow[:period - 1], HL_diff))\n', (4983, 5036), True, 'import numpy as np\n'), ((1358, 1372), 'numpy.sum', 'np.sum', (['window'], {}), '(window)\n', (1364, 1372), True, 'import numpy as np\n'), ((3046, 3060), 'numpy.copy', 'np.copy', (['delta'], {}), '(delta)\n', (3053, 3060), True, 'import numpy as np\n'), ((3062, 3076), 'numpy.copy', 'np.copy', (['delta'], {}), '(delta)\n', (3069, 3076), True, 'import numpy as np\n'), ((4148, 4177), 'numpy.mean', 'np.mean', (['abs_deviation_arr[i]'], {}), '(abs_deviation_arr[i])\n', (4155, 4177), True, 'import numpy as np\n'), ((505, 538), 'numpy.array', 'np.array', (['([np.nan] * (period - 1))'], {}), '([np.nan] * (period - 1))\n', (513, 538), True, 'import numpy as np\n'), ((709, 742), 'numpy.array', 'np.array', (['([np.nan] * (period - 1))'], {}), '([np.nan] * (period - 1))\n', (717, 742), True, 'import numpy as np\n'), ((3451, 3469), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (3459, 3469), True, 'import numpy as np\n'), ((4303, 4336), 'numpy.array', 'np.array', (['([np.nan] * (period - 1))'], {}), '([np.nan] * (period - 1))\n', (4311, 4336), True, 'import numpy as np\n'), ((4822, 4845), 'numpy.max', 'np.max', (['rolling_high[i]'], {}), '(rolling_high[i])\n', (4828, 4845), True, 'import numpy as np\n'), ((4848, 4870), 'numpy.min', 'np.min', (['rolling_low[i]'], {}), '(rolling_low[i])\n', (4854, 4870), True, 'import numpy as np\n'), ((5337, 5375), 'numpy.array', 'np.array', (['([np.nan] * (slow_period - 1))'], {}), '([np.nan] * (slow_period - 1))\n', (5345, 5375), True, 'import numpy as np\n'), ((5410, 5442), 'numpy.array', 'np.array', (['([np.nan] * slow_period)'], {}), '([np.nan] * slow_period)\n', (5418, 5442), True, 'import numpy as np\n'), ((5635, 5671), 'numpy.log', 'np.log', (['(arr[period:] / arr[:-period])'], {}), '(arr[period:] / arr[:-period])\n', (5641, 5671), True, 'import numpy as np\n'), ((5786, 5813), 'numpy.array', 'np.array', (['([np.nan] * period)'], {}), '([np.nan] * period)\n', (5794, 5813), True, 'import numpy as np\n'), ((4747, 4770), 'numpy.max', 'np.max', (['rolling_high[i]'], {}), '(rolling_high[i])\n', (4753, 4770), True, 'import numpy as np\n'), ((4773, 4795), 'numpy.min', 'np.min', (['rolling_low[i]'], {}), '(rolling_low[i])\n', (4779, 4795), True, 'import numpy as np\n')] |
import os
import csv
import cv2
import keras
import sklearn
import numpy as np
from scipy import ndimage
from random import shuffle
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
from sklearn.model_selection import train_test_split
from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout
rate = 0.25
batch_size = 64
samples = []
#read in csv data points for all camera images and steering angles
with open('../CarND-Behavioral-Cloning-P3/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
shuffle(samples) # randomize the loaded data points
#creates training dataset 80% , and validation dataset 20%
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
#Generator function produces batches of the datasets to be fed to the model.
#it reduces the memory requirements by providing data points on demand.
#The data in the batches is augmented by flipping all camera images and adding
#steering angles to the left and right cmaera images. THese images are added
#to provide the corrective effect to return to the center of track when car is off center
def generator(samples, batch_size):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
clr_images = []
steer_angles = []
for batch_sample in batch_samples:
i = 0
steer_adj = [0.0, 0.4, -0.4] #center, left and right images steering angle adjustment
while i < 3:
fname = '../CarND-Behavioral-Cloning-P3/data/IMG/'+batch_sample[i].split('/')[-1]
image = ndimage.imread(fname) #read in image as RGB
steer_angle = float(batch_sample[3]) + steer_adj[i]
clr_images.append(image)
steer_angles.append(steer_angle)
clr_images.append(cv2.flip(image,1)) #flips image along its vertical axis
steer_angles.append(steer_angle * -1.0) #negates the steering angle for the reversed image
i+=1
X_train = np.array(clr_images)
y_train = np.array(steer_angles)
yield sklearn.utils.shuffle(X_train, y_train) #releases batches of data points unlike return that produces the entire data set
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size)
validation_generator = generator(validation_samples, batch_size)
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160,320,3)))
# trim image to only see section with road
model.add(Cropping2D(cropping=((70,25), (0,0))))
#covnet layers executes increasingly complex pattern recognition on images
model.add(Conv2D(24,(5,5), strides=(2,2), activation='relu'))
# dropout drops some data flows flowing through the model to reduce overfiiting to the data
model.add(Dropout(rate))
model.add(Conv2D(36,(5,5), strides=(2,2), activation='relu'))
model.add(Dropout(rate))
model.add(Conv2D(48,(5,5), strides=(2,2), activation='relu'))
model.add(Dropout(rate))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(Dropout(rate))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1)) #fully connected layers produces the steering angles as output
model.compile(loss='mse', optimizer='adam')
#calls are made to the batch generator for training and validation data points
#the history_object variable stores the training and validation loss for each epoch
history_object = model.fit_generator(train_generator, steps_per_epoch =
len(train_samples)//batch_size, validation_data =
validation_generator,
validation_steps = len(validation_samples)//batch_size,
epochs=5, verbose=1)
model.save('model.h5')
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
exit()
| [
"matplotlib.pyplot.title",
"csv.reader",
"keras.layers.Cropping2D",
"random.shuffle",
"sklearn.model_selection.train_test_split",
"keras.layers.Flatten",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.layers.Conv2D",
"matplotlib.pyplot.ylabel",
"cv2.flip",... | [((599, 615), 'random.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (606, 615), False, 'from random import shuffle\n'), ((748, 788), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (764, 788), False, 'from sklearn.model_selection import train_test_split\n'), ((2713, 2725), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2723, 2725), False, 'from keras.models import Sequential, Model\n'), ((4200, 4240), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4208, 4240), True, 'import matplotlib.pyplot as plt\n'), ((4241, 4285), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4249, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4286, 4328), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (4295, 4328), True, 'import matplotlib.pyplot as plt\n'), ((4329, 4366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (4339, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4367, 4386), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4377, 4386), True, 'import matplotlib.pyplot as plt\n'), ((4387, 4452), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (4397, 4452), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4461, 4463), True, 'import matplotlib.pyplot as plt\n'), ((534, 553), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (544, 553), False, 'import csv\n'), ((2816, 2876), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (2822, 2876), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((2929, 2968), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (2939, 2968), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3053, 3106), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(24, (5, 5), strides=(2, 2), activation='relu')\n", (3059, 3106), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3207, 3220), 'keras.layers.Dropout', 'Dropout', (['rate'], {}), '(rate)\n', (3214, 3220), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3233, 3286), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(36, (5, 5), strides=(2, 2), activation='relu')\n", (3239, 3286), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3295, 3308), 'keras.layers.Dropout', 'Dropout', (['rate'], {}), '(rate)\n', (3302, 3308), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3320, 3373), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5, 5)'], {'strides': '(2, 2)', 'activation': '"""relu"""'}), "(48, (5, 5), strides=(2, 2), activation='relu')\n", (3326, 3373), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3382, 3395), 'keras.layers.Dropout', 'Dropout', (['rate'], {}), '(rate)\n', (3389, 3395), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3407, 3444), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3413, 3444), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3454, 3491), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (3460, 3491), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3501, 3514), 'keras.layers.Dropout', 'Dropout', (['rate'], {}), '(rate)\n', (3508, 3514), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3526, 3535), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3533, 3535), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3547, 3557), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (3552, 3557), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3570, 3579), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (3575, 3579), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3591, 3600), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (3596, 3600), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((3612, 3620), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3617, 3620), False, 'from keras.layers import Flatten, Dense, Lambda, Conv2D, Cropping2D, Dropout\n'), ((1324, 1340), 'random.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (1331, 1340), False, 'from random import shuffle\n'), ((2318, 2338), 'numpy.array', 'np.array', (['clr_images'], {}), '(clr_images)\n', (2326, 2338), True, 'import numpy as np\n'), ((2361, 2383), 'numpy.array', 'np.array', (['steer_angles'], {}), '(steer_angles)\n', (2369, 2383), True, 'import numpy as np\n'), ((2402, 2441), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2423, 2441), False, 'import sklearn\n'), ((1850, 1871), 'scipy.ndimage.imread', 'ndimage.imread', (['fname'], {}), '(fname)\n', (1864, 1871), False, 'from scipy import ndimage\n'), ((2103, 2121), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2111, 2121), False, 'import cv2\n')] |
from __future__ import division, print_function
import numpy as np
import struct
import time
from . import minnow
from . import minh
from . import bit
def create_int_record(fname, text, xs):
f = minnow.create(fname)
f.header(struct.pack("<qq", 0xdeadbeef, len(xs)))
f.header(text)
for i in range(len(xs)):
f.fixed_size_group(np.int64, len(xs[i]))
f.data(xs[i])
f.header(np.array([len(x) for x in xs], dtype=np.int64))
f.close()
def create_group_record(fname, ix, fx, text):
f = minnow.create(fname)
ni, nf = len(ix) // 4, len(fx) // 2
f.header(struct.pack("<qq", 4, ni))
f.fixed_size_group(np.int32, ni)
for i in range(4):
f.data(ix[i * ni: (i + 1) * ni])
f.header(struct.pack("<qq", 2, nf))
f.fixed_size_group(np.float64, nf)
for i in range(2):
f.data(fx[i * nf: (i + 1) * nf])
f.header(text)
f.close()
def read_int_record(fname):
f = minnow.open(fname)
magic, blocks = f.header(0, "qq")
text = f.header(1, "s")
lengths = f.header(2, np.int64)
xs = [f.data(i) for i in range(blocks)]
return text, xs
def read_group_record(fname):
f = minnow.open(fname)
bi, ni = f.header(0, "qq")
bf, nf = f.header(1, "qq")
text = f.header(2, "s")
xi = np.zeros(ni * bi, dtype=np.int64)
xf = np.zeros(nf * bf, dtype=np.float32)
for i in range(bi):
xi[i * ni: (i + 1) * ni] = f.data(i)
for i in range(bf):
xf[i * nf: (i + 1) * nf] = f.data(i + bi)
return xi, xf, text
def test_int_record():
fname = "../test_files/int_record.test"
xs = [np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([6, 7, 8, 9], dtype=np.int64),
np.array([10, 11, 12], dtype=np.int64)]
text = b"I am a cat and I like to meow."
create_int_record(fname, text, xs)
rd_text, rd_xs = read_int_record(fname)
assert (rd_text == text.decode("ascii"))
for i in range(len(xs)):
assert (np.all(xs[i] == rd_xs[i]))
def test_group_record():
fname = "../test_files/group_files.test"
ix = np.arange(20, dtype=np.int32)
fx = np.array(np.arange(10) / 10.0, dtype=np.float64)
text = b"I'm a caaaat"
create_group_record(fname, ix, fx, text)
rd_ix, rd_fx, rd_text = read_group_record(fname)
assert (text.decode("ascii") == rd_text)
assert (np.all(rd_ix == ix))
assert (np.all(np.abs(fx - rd_fx) < 1e-6))
def test_bit_array():
bits = np.arange(7, 64, dtype=np.int)
x = np.arange(100, dtype=np.int)
for b in bits:
arr = bit.array(b, x)
y = bit.from_array(arr, b, len(x))
assert (np.all(x == y))
def bench_bit_array():
x = np.arange(100000, dtype=np.uint64) % 100
N = 1000
for bits in [8, 11, 16, 23, 32, 45, 64]:
t0 = time.time()
for _ in range(N):
bit.array(bits, x)
t1 = time.time()
dt = (t1 - t0) / N
print("%d bits: %g MB/s" % (bits, (8 * len(x) / dt) / 1e6))
def write_bit_int_record(fname, x1, x2, x3):
f = minnow.create(fname)
f.int_group(len(x1))
f.data(x1)
f.header(struct.pack("<q", len(x2)))
f.int_group(len(x2[0]))
for i in range(len(x2)): f.data(x2[i])
f.int_group(len(x3))
f.data(x3)
f.close()
def read_bit_int_record(fname):
f = minnow.open(fname)
x2_len = f.header(0, np.int64)
x1 = f.data(0)
x2 = [None] * x2_len
for i in range(x2_len): x2[i] = f.data(1 + i)
x3 = f.data(x2_len + 1)
f.close()
return x1, x2, x3
def test_bit_int_record():
fname = "../test_files/bit_int_record.test"
x1 = np.array([100, 101, 102, 104], dtype=int)
x2 = [np.array([1024, 1024, 1024]), np.array([0, 1023, 500])]
x3 = np.array([-1000000, -500000])
write_bit_int_record(fname, x1, x2, x3)
rd_x1, rd_x2, rd_x3 = read_bit_int_record(fname)
assert (np.all(x1 == rd_x1))
assert (np.all(rd_x2[0] == x2[0]))
assert (np.all(rd_x2[1] == x2[1]))
assert (np.all(rd_x3 == x3))
def create_q_float_record(fname, limit, dx1, dx2, x1, x2):
f = minnow.create(fname)
f.header(struct.pack("<ffffqq", dx1, dx2, limit[0], limit[1],
len(x1), len(x2)))
f.float_group(len(x1[0]), limit, dx1)
for i in range(len(x1)): f.data(x1[i])
f.float_group(len(x2[0]), limit, dx2)
for i in range(len(x2)): f.data(x2[i])
f.close()
def open_q_float_record(fname):
f = minnow.open(fname)
dx1, dx2, low, high, x1_len, x2_len = f.header(0, "ffffqq")
x1, x2 = [None] * x1_len, [None] * x2_len
for i1 in range(x1_len):
x1[i1] = f.data(i1)
for i2 in range(x2_len):
x2[i2] = f.data(i2 + x1_len)
f.close()
return x1, x2
def test_q_float_record():
fname = "../test_files/q_float_record.test"
limit = (-50, 100)
dx1, dx2 = 1.0, 10.0
x1 = [
np.array([-50, 0, 50, 49]),
np.array([25, 25, 25, 25])
]
x2 = [
np.array([-50, 0, 50, 49, 0]),
np.array([1, 2, 3, 4, 5]),
np.array([0, 20, 0, 20, 0])
]
create_q_float_record(fname, limit, dx1, dx2, x1, x2)
rd_x1, rd_x2 = open_q_float_record(fname)
assert (len(x1) == len(rd_x1))
for i in range(len(x1)):
assert (len(x1[i]) == len(rd_x1[i]))
assert (np.all(eps_eq(x1[i], rd_x1[i], dx1)))
assert (len(x2) == len(rd_x2))
for i in range(len(x2)):
assert (len(x2[i]) == len(rd_x2[i]))
assert (np.all(eps_eq(x2[i], rd_x2[i], dx2)))
def eps_eq(x, y, eps): return (x + eps > y) & (x - eps < y)
def test_periodic_min():
pixels = 20
data = [
[0, 1, 2, 3],
[10, 11, 12, 13],
[18, 19, 0, 1],
[1, 0, 19, 18],
[1, 19, 18, 0],
]
mins = [0, 10, 18, 18, 18]
for i in range(len(data)):
min = bit.periodic_min(data[i], pixels)
assert (min == mins[i])
def test_minh_reader_writer():
fname = "../test_files/reader_writer_minh.test"
names = ["int64", "float32", "int", "float", "log"]
text = ("Cats are the best. Don't we love them?!@#$%^&*(),.." +
"..[]{};':\"|\\/-=_+`~meow meow meow")
cells, boundary, L = 4, 10.0, 100.0
columns = [
minh.Column(minnow.int64_group),
minh.Column(minnow.float32_group),
minh.Column(minnow.int_group),
minh.Column(minnow.float_group, 0, 100, 200, 1),
minh.Column(minnow.float_group, 1, 10, 14, 0.01)
]
block1 = [
np.array([100, 200, 300, 400, 500], dtype=np.int64),
np.array([150, 250, 350, 450, 550], dtype=np.float32),
np.array([-30, -35, -25, -10, -20], dtype=np.int64),
np.array([100, 200, 125, 150, 100], dtype=np.float32),
np.array([1e10, 1e11, 1e11, 1e14, 3e13], dtype=np.float32)
]
block2 = [
np.array([125, 225, 325], dtype=np.int64),
np.array([1750, 2750, 3750], dtype=np.float32),
np.array([1000, 1000, 1000], dtype=np.int64),
np.array([100, 100, 100], dtype=np.float32),
np.array([1e14, 1e14, 1e14], dtype=np.float32)
]
joined_blocks = [np.hstack([block1[i], block2[i]]) for i in range(5)]
blocks = [block1, block2]
wr = minh.create(fname)
wr.header(names, text, columns)
wr.geometry(L, boundary, cells)
for block in blocks: wr.block(block)
wr.close()
blocks += [joined_blocks]
rd = minh.open(fname)
assert (rd.names == names)
assert (rd.text == text)
assert (rd.blocks == 2)
assert (rd.length == 8)
assert (rd.cells == 4)
assert (rd.boundary == 10.0)
assert (rd.L == 100.0)
for i in range(rd.blocks):
assert (rd.block_lengths[i] == [5, 3][i])
for i in range(len(columns)):
assert (column_eq(columns[i], rd.columns[i]))
for b in range(len(blocks)):
block = blocks[b]
if b < 2:
rd_int64, rd_float32, rd_int, rd_float, rd_log = rd.block(b, names)
else:
rd_int64, rd_float32, rd_int, rd_float, rd_log = rd.read(names)
assert (len(rd_int64) == len(block[0]))
assert (np.all(rd_int64 == block[0]))
assert (len(rd_float32) == len(block[1]))
assert (np.all(eps_eq(rd_float32, block[1], 1e-3)))
assert (len(rd_int) == len(block[2]))
assert (np.all(rd_int == block[2]))
assert (len(rd_float) == len(block[3]))
assert (np.all(eps_eq(rd_float, block[3], 1)))
assert (len(rd_log) == len(block[4]))
assert (np.all(eps_eq(np.log10(rd_log), np.log10(block[4]), 0.01)))
def column_eq(c1, c2):
return (c1.type == c2.type and c1.log == c2.log and
eps_eq(c1.dx, c2.dx, 1e-5) and
eps_eq(c1.low, c2.low, 1e-5) and
eps_eq(c1.high, c2.high, 1e-5))
def test_origin():
fname = "../test_files/reader_writer_minh.test"
rd = minh.open(fname)
rd.boundary, rd.cells, rd.L = 10.0, 5, 100.0
assert (rd.cell_width() == 20.0)
assert (rd.block_width() == 40.0)
assert (np.all(rd.cell_origin(25 + 5 + 1) == np.array([20, 20, 20])))
assert (np.all(rd.block_origin(25 + 5 + 1) == np.array([10, 10, 10])))
assert (np.all(rd.cell_origin(0) == np.array([0, 0, 0])))
assert (np.all(rd.block_origin(0) == np.array([90, 90, 90])))
def test_normalize():
L, width, origin = 100.0, 20.0, np.array([0, 50, 90], dtype=float)
x = np.array([-1, 99, 5, 15, 21], dtype=float)
y = np.array([49, 49, 55, 65, 71], dtype=float)
z = np.array([89, 89, 95, 5, 11], dtype=float)
coord = np.array([x, y, z], dtype=float)
norm = minh.normalize_coords(coord, L, origin, width)
out = np.array([0, 0, 5, 15, 20])
for k in range(3):
assert (np.all(out == norm[k]))
L, width, origin = 100.0, 70.0, np.array([90, 90, 90], dtype=float)
x = np.array([90, 0, 20, 30, 50, 60], dtype=float)
y = np.array([90, 0, 20, 30, 50, 60], dtype=float)
z = np.array([90, 0, 20, 30, 50, 60], dtype=float)
coord = np.array([x, y, z])
norm = minh.normalize_coords(coord, L, origin, width)
assert (np.all(norm[0] == np.array([0, 10, 30, 40, 60, 70], dtype=float)))
L, width, origin = 100.0, 70.0, np.array([40, 40, 40], dtype=float)
x = np.array([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)
y = np.array([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)
z = np.array([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)
coord = np.array([x, y, z])
norm = minh.normalize_coords(coord, L, origin, width)
assert (np.all(norm[0] == np.array([0, 10, 20, 30, 40, 50, 60, 70], dtype=float)))
if __name__ == "__main__":
test_int_record()
test_group_record()
test_bit_array()
test_periodic_min()
test_bit_int_record()
test_q_float_record()
test_minh_reader_writer()
test_origin()
test_normalize()
# bench_bit_array()
| [
"numpy.abs",
"numpy.zeros",
"struct.pack",
"time.time",
"numpy.hstack",
"numpy.array",
"numpy.arange",
"numpy.log10",
"numpy.all"
] | [((1298, 1331), 'numpy.zeros', 'np.zeros', (['(ni * bi)'], {'dtype': 'np.int64'}), '(ni * bi, dtype=np.int64)\n', (1306, 1331), True, 'import numpy as np\n'), ((1341, 1376), 'numpy.zeros', 'np.zeros', (['(nf * bf)'], {'dtype': 'np.float32'}), '(nf * bf, dtype=np.float32)\n', (1349, 1376), True, 'import numpy as np\n'), ((2133, 2162), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.int32'}), '(20, dtype=np.int32)\n', (2142, 2162), True, 'import numpy as np\n'), ((2405, 2424), 'numpy.all', 'np.all', (['(rd_ix == ix)'], {}), '(rd_ix == ix)\n', (2411, 2424), True, 'import numpy as np\n'), ((2508, 2538), 'numpy.arange', 'np.arange', (['(7)', '(64)'], {'dtype': 'np.int'}), '(7, 64, dtype=np.int)\n', (2517, 2538), True, 'import numpy as np\n'), ((2548, 2576), 'numpy.arange', 'np.arange', (['(100)'], {'dtype': 'np.int'}), '(100, dtype=np.int)\n', (2557, 2576), True, 'import numpy as np\n'), ((3667, 3708), 'numpy.array', 'np.array', (['[100, 101, 102, 104]'], {'dtype': 'int'}), '([100, 101, 102, 104], dtype=int)\n', (3675, 3708), True, 'import numpy as np\n'), ((3784, 3813), 'numpy.array', 'np.array', (['[-1000000, -500000]'], {}), '([-1000000, -500000])\n', (3792, 3813), True, 'import numpy as np\n'), ((3925, 3944), 'numpy.all', 'np.all', (['(x1 == rd_x1)'], {}), '(x1 == rd_x1)\n', (3931, 3944), True, 'import numpy as np\n'), ((3958, 3983), 'numpy.all', 'np.all', (['(rd_x2[0] == x2[0])'], {}), '(rd_x2[0] == x2[0])\n', (3964, 3983), True, 'import numpy as np\n'), ((3997, 4022), 'numpy.all', 'np.all', (['(rd_x2[1] == x2[1])'], {}), '(rd_x2[1] == x2[1])\n', (4003, 4022), True, 'import numpy as np\n'), ((4036, 4055), 'numpy.all', 'np.all', (['(rd_x3 == x3)'], {}), '(rd_x3 == x3)\n', (4042, 4055), True, 'import numpy as np\n'), ((9401, 9443), 'numpy.array', 'np.array', (['[-1, 99, 5, 15, 21]'], {'dtype': 'float'}), '([-1, 99, 5, 15, 21], dtype=float)\n', (9409, 9443), True, 'import numpy as np\n'), ((9452, 9495), 'numpy.array', 'np.array', (['[49, 49, 55, 65, 71]'], {'dtype': 'float'}), '([49, 49, 55, 65, 71], dtype=float)\n', (9460, 9495), True, 'import numpy as np\n'), ((9504, 9546), 'numpy.array', 'np.array', (['[89, 89, 95, 5, 11]'], {'dtype': 'float'}), '([89, 89, 95, 5, 11], dtype=float)\n', (9512, 9546), True, 'import numpy as np\n'), ((9559, 9591), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'float'}), '([x, y, z], dtype=float)\n', (9567, 9591), True, 'import numpy as np\n'), ((9661, 9688), 'numpy.array', 'np.array', (['[0, 0, 5, 15, 20]'], {}), '([0, 0, 5, 15, 20])\n', (9669, 9688), True, 'import numpy as np\n'), ((9834, 9880), 'numpy.array', 'np.array', (['[90, 0, 20, 30, 50, 60]'], {'dtype': 'float'}), '([90, 0, 20, 30, 50, 60], dtype=float)\n', (9842, 9880), True, 'import numpy as np\n'), ((9889, 9935), 'numpy.array', 'np.array', (['[90, 0, 20, 30, 50, 60]'], {'dtype': 'float'}), '([90, 0, 20, 30, 50, 60], dtype=float)\n', (9897, 9935), True, 'import numpy as np\n'), ((9944, 9990), 'numpy.array', 'np.array', (['[90, 0, 20, 30, 50, 60]'], {'dtype': 'float'}), '([90, 0, 20, 30, 50, 60], dtype=float)\n', (9952, 9990), True, 'import numpy as np\n'), ((10004, 10023), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (10012, 10023), True, 'import numpy as np\n'), ((10242, 10296), 'numpy.array', 'np.array', (['[40, 50, 60, 70, 80, 90, 0, 10]'], {'dtype': 'float'}), '([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)\n', (10250, 10296), True, 'import numpy as np\n'), ((10305, 10359), 'numpy.array', 'np.array', (['[40, 50, 60, 70, 80, 90, 0, 10]'], {'dtype': 'float'}), '([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)\n', (10313, 10359), True, 'import numpy as np\n'), ((10368, 10422), 'numpy.array', 'np.array', (['[40, 50, 60, 70, 80, 90, 0, 10]'], {'dtype': 'float'}), '([40, 50, 60, 70, 80, 90, 0, 10], dtype=float)\n', (10376, 10422), True, 'import numpy as np\n'), ((10436, 10455), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (10444, 10455), True, 'import numpy as np\n'), ((605, 630), 'struct.pack', 'struct.pack', (['"""<qq"""', '(4)', 'ni'], {}), "('<qq', 4, ni)\n", (616, 630), False, 'import struct\n'), ((747, 772), 'struct.pack', 'struct.pack', (['"""<qq"""', '(2)', 'nf'], {}), "('<qq', 2, nf)\n", (758, 772), False, 'import struct\n'), ((1624, 1662), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'dtype': 'np.int64'}), '([1, 2, 3, 4], dtype=np.int64)\n', (1632, 1662), True, 'import numpy as np\n'), ((1674, 1703), 'numpy.array', 'np.array', (['[5]'], {'dtype': 'np.int64'}), '([5], dtype=np.int64)\n', (1682, 1703), True, 'import numpy as np\n'), ((1715, 1753), 'numpy.array', 'np.array', (['[6, 7, 8, 9]'], {'dtype': 'np.int64'}), '([6, 7, 8, 9], dtype=np.int64)\n', (1723, 1753), True, 'import numpy as np\n'), ((1765, 1803), 'numpy.array', 'np.array', (['[10, 11, 12]'], {'dtype': 'np.int64'}), '([10, 11, 12], dtype=np.int64)\n', (1773, 1803), True, 'import numpy as np\n'), ((2025, 2050), 'numpy.all', 'np.all', (['(xs[i] == rd_xs[i])'], {}), '(xs[i] == rd_xs[i])\n', (2031, 2050), True, 'import numpy as np\n'), ((2686, 2700), 'numpy.all', 'np.all', (['(x == y)'], {}), '(x == y)\n', (2692, 2700), True, 'import numpy as np\n'), ((2735, 2769), 'numpy.arange', 'np.arange', (['(100000)'], {'dtype': 'np.uint64'}), '(100000, dtype=np.uint64)\n', (2744, 2769), True, 'import numpy as np\n'), ((2848, 2859), 'time.time', 'time.time', ([], {}), '()\n', (2857, 2859), False, 'import time\n'), ((2931, 2942), 'time.time', 'time.time', ([], {}), '()\n', (2940, 2942), False, 'import time\n'), ((3719, 3747), 'numpy.array', 'np.array', (['[1024, 1024, 1024]'], {}), '([1024, 1024, 1024])\n', (3727, 3747), True, 'import numpy as np\n'), ((3749, 3773), 'numpy.array', 'np.array', (['[0, 1023, 500]'], {}), '([0, 1023, 500])\n', (3757, 3773), True, 'import numpy as np\n'), ((4916, 4942), 'numpy.array', 'np.array', (['[-50, 0, 50, 49]'], {}), '([-50, 0, 50, 49])\n', (4924, 4942), True, 'import numpy as np\n'), ((4952, 4978), 'numpy.array', 'np.array', (['[25, 25, 25, 25]'], {}), '([25, 25, 25, 25])\n', (4960, 4978), True, 'import numpy as np\n'), ((5004, 5033), 'numpy.array', 'np.array', (['[-50, 0, 50, 49, 0]'], {}), '([-50, 0, 50, 49, 0])\n', (5012, 5033), True, 'import numpy as np\n'), ((5043, 5068), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (5051, 5068), True, 'import numpy as np\n'), ((5078, 5105), 'numpy.array', 'np.array', (['[0, 20, 0, 20, 0]'], {}), '([0, 20, 0, 20, 0])\n', (5086, 5105), True, 'import numpy as np\n'), ((6515, 6566), 'numpy.array', 'np.array', (['[100, 200, 300, 400, 500]'], {'dtype': 'np.int64'}), '([100, 200, 300, 400, 500], dtype=np.int64)\n', (6523, 6566), True, 'import numpy as np\n'), ((6576, 6629), 'numpy.array', 'np.array', (['[150, 250, 350, 450, 550]'], {'dtype': 'np.float32'}), '([150, 250, 350, 450, 550], dtype=np.float32)\n', (6584, 6629), True, 'import numpy as np\n'), ((6639, 6690), 'numpy.array', 'np.array', (['[-30, -35, -25, -10, -20]'], {'dtype': 'np.int64'}), '([-30, -35, -25, -10, -20], dtype=np.int64)\n', (6647, 6690), True, 'import numpy as np\n'), ((6700, 6753), 'numpy.array', 'np.array', (['[100, 200, 125, 150, 100]'], {'dtype': 'np.float32'}), '([100, 200, 125, 150, 100], dtype=np.float32)\n', (6708, 6753), True, 'import numpy as np\n'), ((6763, 6879), 'numpy.array', 'np.array', (['[10000000000.0, 100000000000.0, 100000000000.0, 100000000000000.0, \n 30000000000000.0]'], {'dtype': 'np.float32'}), '([10000000000.0, 100000000000.0, 100000000000.0, 100000000000000.0,\n 30000000000000.0], dtype=np.float32)\n', (6771, 6879), True, 'import numpy as np\n'), ((6852, 6893), 'numpy.array', 'np.array', (['[125, 225, 325]'], {'dtype': 'np.int64'}), '([125, 225, 325], dtype=np.int64)\n', (6860, 6893), True, 'import numpy as np\n'), ((6903, 6949), 'numpy.array', 'np.array', (['[1750, 2750, 3750]'], {'dtype': 'np.float32'}), '([1750, 2750, 3750], dtype=np.float32)\n', (6911, 6949), True, 'import numpy as np\n'), ((6959, 7003), 'numpy.array', 'np.array', (['[1000, 1000, 1000]'], {'dtype': 'np.int64'}), '([1000, 1000, 1000], dtype=np.int64)\n', (6967, 7003), True, 'import numpy as np\n'), ((7013, 7056), 'numpy.array', 'np.array', (['[100, 100, 100]'], {'dtype': 'np.float32'}), '([100, 100, 100], dtype=np.float32)\n', (7021, 7056), True, 'import numpy as np\n'), ((7066, 7156), 'numpy.array', 'np.array', (['[100000000000000.0, 100000000000000.0, 100000000000000.0]'], {'dtype': 'np.float32'}), '([100000000000000.0, 100000000000000.0, 100000000000000.0], dtype=\n np.float32)\n', (7074, 7156), True, 'import numpy as np\n'), ((7141, 7174), 'numpy.hstack', 'np.hstack', (['[block1[i], block2[i]]'], {}), '([block1[i], block2[i]])\n', (7150, 7174), True, 'import numpy as np\n'), ((8125, 8153), 'numpy.all', 'np.all', (['(rd_int64 == block[0])'], {}), '(rd_int64 == block[0])\n', (8131, 8153), True, 'import numpy as np\n'), ((8329, 8355), 'numpy.all', 'np.all', (['(rd_int == block[2])'], {}), '(rd_int == block[2])\n', (8335, 8355), True, 'import numpy as np\n'), ((9358, 9392), 'numpy.array', 'np.array', (['[0, 50, 90]'], {'dtype': 'float'}), '([0, 50, 90], dtype=float)\n', (9366, 9392), True, 'import numpy as np\n'), ((9729, 9751), 'numpy.all', 'np.all', (['(out == norm[k])'], {}), '(out == norm[k])\n', (9735, 9751), True, 'import numpy as np\n'), ((9790, 9825), 'numpy.array', 'np.array', (['[90, 90, 90]'], {'dtype': 'float'}), '([90, 90, 90], dtype=float)\n', (9798, 9825), True, 'import numpy as np\n'), ((10198, 10233), 'numpy.array', 'np.array', (['[40, 40, 40]'], {'dtype': 'float'}), '([40, 40, 40], dtype=float)\n', (10206, 10233), True, 'import numpy as np\n'), ((2181, 2194), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2190, 2194), True, 'import numpy as np\n'), ((2445, 2463), 'numpy.abs', 'np.abs', (['(fx - rd_fx)'], {}), '(fx - rd_fx)\n', (2451, 2463), True, 'import numpy as np\n'), ((9070, 9092), 'numpy.array', 'np.array', (['[20, 20, 20]'], {}), '([20, 20, 20])\n', (9078, 9092), True, 'import numpy as np\n'), ((9145, 9167), 'numpy.array', 'np.array', (['[10, 10, 10]'], {}), '([10, 10, 10])\n', (9153, 9167), True, 'import numpy as np\n'), ((9210, 9229), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (9218, 9229), True, 'import numpy as np\n'), ((9273, 9295), 'numpy.array', 'np.array', (['[90, 90, 90]'], {}), '([90, 90, 90])\n', (9281, 9295), True, 'import numpy as np\n'), ((10112, 10158), 'numpy.array', 'np.array', (['[0, 10, 30, 40, 60, 70]'], {'dtype': 'float'}), '([0, 10, 30, 40, 60, 70], dtype=float)\n', (10120, 10158), True, 'import numpy as np\n'), ((10544, 10598), 'numpy.array', 'np.array', (['[0, 10, 20, 30, 40, 50, 60, 70]'], {'dtype': 'float'}), '([0, 10, 20, 30, 40, 50, 60, 70], dtype=float)\n', (10552, 10598), True, 'import numpy as np\n'), ((8538, 8554), 'numpy.log10', 'np.log10', (['rd_log'], {}), '(rd_log)\n', (8546, 8554), True, 'import numpy as np\n'), ((8556, 8574), 'numpy.log10', 'np.log10', (['block[4]'], {}), '(block[4])\n', (8564, 8574), True, 'import numpy as np\n')] |
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()) + "/../")
import os
import errno
import subprocess
import urllib.request
import pandas as pd
import numpy as np
import random
import general_utils
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
from general_utils.workspace_utils import is_work_in_cluster
from csv_modules.csv_writer import write_in_file
from general_utils.download_utils import download_pdb
from general_utils.pdb_utils import get_chains_pdb
from pdb_to_mrc.pdb_2_mrc import pdb_to_mrc_chains
from process_mrc.generate import get_mrc_synthetic_segments_pdb
from general_utils.database_utils import get_chains_pdb_db
from general_utils.temp_utils import clean_work_dir, gen_dir, free_dir
def test_pdb(pdb_name):
pdb_name = pdb_name.lower()
print("\n\n\n Enter:" + pdb_name + "\n\n\n", flush=True)
get_chains_pdb_db(pdb_name)
know_pdb_path = os.path.dirname(__file__) + '/../files/pdb_list.csv'
write_in_file(know_pdb_path, ["Name", "OK"], [[pdb_name, 1]])
print("\n\n\n Finish:" + pdb_name + "\n\n\n", flush=True)
def gen_update_pdb_list():
# Parale
comm = MPI.COMM_WORLD
size = comm.Get_size()
with MPICommExecutor(comm, root=0, worker_size=size) as executor:
if executor is not None:
path_dir = gen_dir()
try:
with open(path_dir + "/data.txt", 'w') as fp:
pass
except:
pass
urllib.request.urlretrieve("ftp://ftp.wwpdb.org/pub/pdb/derived_data/index/author.idx", path_dir + "/data.txt")
with open(path_dir + "/data.txt") as f:
content = f.readlines()
real_pdb_name = []
for i in content:
if i.find(" ;") != -1:
split_data = i.split(" ;")
if len(split_data[0]) == 4:
real_pdb_name.append(split_data[0].lower())
free_dir(path_dir)
# real_pdb_name = ["5wob"]
know_pdb_path = os.path.dirname(__file__) + '/../files/pdb_list.csv'
if os.path.exists(know_pdb_path):
pd_data_frame = pd.read_csv(know_pdb_path)
actual_pdb_list = pd_data_frame["Name"].tolist()
else:
actual_pdb_list = []
real_pdb_name = np.unique(real_pdb_name, axis=0).tolist()
print(len(actual_pdb_list))
print(len(real_pdb_name))
real_pdb_name = np.setdiff1d(real_pdb_name, actual_pdb_list).tolist()
# real_pdb_name = ["1ao2"]
print("Todo do:", len(real_pdb_name), flush=True)
random.shuffle(real_pdb_name)
# real_pdb_name = ["1h1k"]
parallel_jobs = []
for pdb_name in real_pdb_name:
parallel_jobs.append([pdb_name, executor.submit(test_pdb, pdb_name)])
for f in parallel_jobs:
try:
f[1].result()
except subprocess.CalledProcessError as e:
print("Error agregado: ", f[0], e, flush=True)
write_in_file(know_pdb_path, ["Name", "OK"], [[f[0], 0]])
except ValueError as e:
print("Error agregado: ", f[0], e, flush=True)
write_in_file(know_pdb_path, ["Name", "OK"], [[f[0], 0]])
except Exception as e:
print("Check", f[0], type(e), e, flush=True)
if e.errno == errno.ENOSPC or e.errno == errno.EDQUOT or errno.ENOMEM:
print("Error ignorado: ", f[0], e, flush=True)
else:
print("Error agregado: ", f[0], e, flush=True)
write_in_file(know_pdb_path, ["Name", "OK"], [[f[0], 0]])
if __name__ == '__main__':
if is_work_in_cluster():
###general_utils.temp_utils.global_temp_dir = None
general_utils.temp_utils.global_temp_dir = "/work/lcastillo/temp_gen_create_list"
###general_utils.temp_utils.global_temp_dir = "/tmp"
else:
general_utils.temp_utils.global_temp_dir = None
clean_work_dir()
gen_update_pdb_list()
| [
"mpi4py.futures.MPICommExecutor",
"csv_modules.csv_writer.write_in_file",
"pandas.read_csv",
"random.shuffle",
"os.path.dirname",
"os.path.exists",
"numpy.setdiff1d",
"pathlib.Path",
"general_utils.temp_utils.gen_dir",
"general_utils.database_utils.get_chains_pdb_db",
"general_utils.temp_utils.f... | [((881, 908), 'general_utils.database_utils.get_chains_pdb_db', 'get_chains_pdb_db', (['pdb_name'], {}), '(pdb_name)\n', (898, 908), False, 'from general_utils.database_utils import get_chains_pdb_db\n'), ((982, 1043), 'csv_modules.csv_writer.write_in_file', 'write_in_file', (['know_pdb_path', "['Name', 'OK']", '[[pdb_name, 1]]'], {}), "(know_pdb_path, ['Name', 'OK'], [[pdb_name, 1]])\n", (995, 1043), False, 'from csv_modules.csv_writer import write_in_file\n'), ((3469, 3489), 'general_utils.workspace_utils.is_work_in_cluster', 'is_work_in_cluster', ([], {}), '()\n', (3487, 3489), False, 'from general_utils.workspace_utils import is_work_in_cluster\n'), ((3751, 3767), 'general_utils.temp_utils.clean_work_dir', 'clean_work_dir', ([], {}), '()\n', (3765, 3767), False, 'from general_utils.temp_utils import clean_work_dir, gen_dir, free_dir\n'), ((927, 952), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (942, 952), False, 'import os\n'), ((1201, 1248), 'mpi4py.futures.MPICommExecutor', 'MPICommExecutor', (['comm'], {'root': '(0)', 'worker_size': 'size'}), '(comm, root=0, worker_size=size)\n', (1216, 1248), False, 'from mpi4py.futures import MPICommExecutor\n'), ((1309, 1318), 'general_utils.temp_utils.gen_dir', 'gen_dir', ([], {}), '()\n', (1316, 1318), False, 'from general_utils.temp_utils import clean_work_dir, gen_dir, free_dir\n'), ((1839, 1857), 'general_utils.temp_utils.free_dir', 'free_dir', (['path_dir'], {}), '(path_dir)\n', (1847, 1857), False, 'from general_utils.temp_utils import clean_work_dir, gen_dir, free_dir\n'), ((1977, 2006), 'os.path.exists', 'os.path.exists', (['know_pdb_path'], {}), '(know_pdb_path)\n', (1991, 2006), False, 'import os\n'), ((2459, 2488), 'random.shuffle', 'random.shuffle', (['real_pdb_name'], {}), '(real_pdb_name)\n', (2473, 2488), False, 'import random\n'), ((1914, 1939), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1929, 1939), False, 'import os\n'), ((2032, 2058), 'pandas.read_csv', 'pd.read_csv', (['know_pdb_path'], {}), '(know_pdb_path)\n', (2043, 2058), True, 'import pandas as pd\n'), ((2180, 2212), 'numpy.unique', 'np.unique', (['real_pdb_name'], {'axis': '(0)'}), '(real_pdb_name, axis=0)\n', (2189, 2212), True, 'import numpy as np\n'), ((2310, 2354), 'numpy.setdiff1d', 'np.setdiff1d', (['real_pdb_name', 'actual_pdb_list'], {}), '(real_pdb_name, actual_pdb_list)\n', (2322, 2354), True, 'import numpy as np\n'), ((47, 69), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (59, 69), False, 'import pathlib\n'), ((2849, 2906), 'csv_modules.csv_writer.write_in_file', 'write_in_file', (['know_pdb_path', "['Name', 'OK']", '[[f[0], 0]]'], {}), "(know_pdb_path, ['Name', 'OK'], [[f[0], 0]])\n", (2862, 2906), False, 'from csv_modules.csv_writer import write_in_file\n'), ((3006, 3063), 'csv_modules.csv_writer.write_in_file', 'write_in_file', (['know_pdb_path', "['Name', 'OK']", '[[f[0], 0]]'], {}), "(know_pdb_path, ['Name', 'OK'], [[f[0], 0]])\n", (3019, 3063), False, 'from csv_modules.csv_writer import write_in_file\n'), ((3377, 3434), 'csv_modules.csv_writer.write_in_file', 'write_in_file', (['know_pdb_path', "['Name', 'OK']", '[[f[0], 0]]'], {}), "(know_pdb_path, ['Name', 'OK'], [[f[0], 0]])\n", (3390, 3434), False, 'from csv_modules.csv_writer import write_in_file\n')] |
import sys
import numpy as np
I = np.array(sys.stdin.read().split(), dtype=np.int64)
n, q = I[:2]
l, r = I[2:].reshape(-1, 2).T - 1
def main():
res = np.zeros(n + 1, dtype=np.int64)
np.add.at(res, l, 1)
np.subtract.at(res, r + 1, 1)
res = np.cumsum(res)
ans = res[:-1] % 2
return ans
if __name__ == "__main__":
ans = main()
print(*ans, sep="")
| [
"numpy.subtract.at",
"sys.stdin.read",
"numpy.zeros",
"numpy.cumsum",
"numpy.add.at"
] | [((168, 199), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {'dtype': 'np.int64'}), '(n + 1, dtype=np.int64)\n', (176, 199), True, 'import numpy as np\n'), ((205, 225), 'numpy.add.at', 'np.add.at', (['res', 'l', '(1)'], {}), '(res, l, 1)\n', (214, 225), True, 'import numpy as np\n'), ((231, 260), 'numpy.subtract.at', 'np.subtract.at', (['res', '(r + 1)', '(1)'], {}), '(res, r + 1, 1)\n', (245, 260), True, 'import numpy as np\n'), ((272, 286), 'numpy.cumsum', 'np.cumsum', (['res'], {}), '(res)\n', (281, 286), True, 'import numpy as np\n'), ((49, 65), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (63, 65), False, 'import sys\n')] |
import numpy as np
from torch import nn
import copy
from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict
from neuralpredictors.layers.readouts import (
MultipleFullGaussian2d,
MultiplePointPooled2d,
MultipleSpatialXFeatureLinear,
MultipleFullSXF,
)
from ..utility.data_helpers import unpack_data_info
from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore
class Encoder(nn.Module):
def __init__(self, core, readout, elu_offset):
super().__init__()
self.core = core
self.readout = readout
self.offset = elu_offset
def forward(self, *args, data_key=None, detach_core=False, **kwargs):
x = args[0]
x = self.core(x)
if detach_core:
x = x.detach()
if "sample" in kwargs:
x = self.readout(x, data_key=data_key, sample=kwargs["sample"])
else:
x = self.readout(x, data_key=data_key)
return nn.functional.elu(x + self.offset) + 1
def regularizer(self, data_key, detach_core=False):
return int(
not detach_core
) * self.core.regularizer() + self.readout.regularizer(data_key)
def se2d_fullgaussian2d(
dataloaders,
seed,
elu_offset=0,
data_info=None,
transfer_state_dict=None,
# core args
hidden_channels=64,
input_kern=9,
hidden_kern=7,
layers=4,
gamma_input=6.3831,
skip=0,
bias=False,
final_nonlinearity=True,
momentum=0.9,
pad_input=False,
batch_norm=True,
hidden_dilation=1,
laplace_padding=None,
input_regularizer="LaplaceL2norm",
stack=-1,
se_reduction=32,
n_se_blocks=0,
depth_separable=True,
linear=False,
# readout args
init_mu_range=0.3,
init_sigma=0.1,
readout_bias=True,
gamma_readout=0.0076,
gauss_type="full",
grid_mean_predictor={
"type": "cortex",
"input_dimensions": 2,
"hidden_layers": 0,
"hidden_features": 30,
"final_tanh": True,
},
share_features=False,
share_grid=False,
share_transform=False,
init_noise=1e-3,
init_transform_scale=0.2,
):
"""
Model class of a SE2dCore and a Gaussian readout)
Args:
dataloaders: a dictionary of dataloaders, one loader per session
in the format {'data_key': dataloader object, .. }
seed: random seed
elu_offset: Offset for the output non-linearity [F.elu(x + self.offset)]
grid_mean_predictor: if not None, needs to be a dictionary of the form
{
'type': 'cortex',
'input_dimensions': 2,
'hidden_layers':0,
'hidden_features':20,
'final_tanh': False,
}
In that case the datasets need to have the property `neurons.cell_motor_coordinates`
share_features: whether to share features between readouts. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
share_grid: whether to share the grid between neurons. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
share_transform: whether to share the transform from the grid_mean_predictor between neurons. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
init_noise: noise for initialization of weights
init_transform_scale: scale of the weights of the randomly intialized grid_mean_predictor network
all other args: See Documentation of SE2dCore in neuralpredictors.layers.cores and
FullGaussian2d in neuralpredictors.layers.readouts
Returns: An initialized model which consists of model.core and model.readout
"""
if transfer_state_dict is not None:
print(
"Transfer state_dict given. This will only have an effect in the bayesian hypersearch. See: TrainedModelBayesianTransfer "
)
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
source_grids = None
grid_mean_predictor_type = None
if grid_mean_predictor is not None:
grid_mean_predictor = copy.deepcopy(grid_mean_predictor)
grid_mean_predictor_type = grid_mean_predictor.pop("type")
if grid_mean_predictor_type == "cortex":
input_dim = grid_mean_predictor.pop("input_dimensions", 2)
source_grids = {}
for k, v in dataloaders.items():
# real data
try:
if v.dataset.neurons.animal_ids[0] != 0:
source_grids[k] = v.dataset.neurons.cell_motor_coordinates[
:, :input_dim
]
# simulated data -> get random linear non-degenerate transform of true positions
else:
source_grid_true = v.dataset.neurons.center[:, :input_dim]
det = 0.0
loops = 0
grid_bias = np.random.rand(2) * 3
while det < 5.0 and loops < 100:
matrix = np.random.rand(2, 2) * 3
det = np.linalg.det(matrix)
loops += 1
assert det > 5.0, "Did not find a non-degenerate matrix"
source_grids[k] = np.add(
(matrix @ source_grid_true.T).T, grid_bias
)
except FileNotFoundError:
print(
"Dataset type is not recognized to be from Baylor College of Medicine."
)
source_grids[k] = v.dataset.neurons.cell_motor_coordinates[
:, :input_dim
]
elif grid_mean_predictor_type == "shared":
pass
else:
raise ValueError(
"Grid mean predictor type {} not understood.".format(
grid_mean_predictor_type
)
)
shared_match_ids = None
if share_features or share_grid:
shared_match_ids = {
k: v.dataset.neurons.multi_match_id for k, v in dataloaders.items()
}
all_multi_unit_ids = set(np.hstack(shared_match_ids.values()))
for match_id in shared_match_ids.values():
assert len(set(match_id) & all_multi_unit_ids) == len(
all_multi_unit_ids
), "All multi unit IDs must be present in all datasets"
set_random_seed(seed)
core = SE2dCore(
input_channels=core_input_channels,
hidden_channels=hidden_channels,
input_kern=input_kern,
hidden_kern=hidden_kern,
layers=layers,
gamma_input=gamma_input,
skip=skip,
final_nonlinearity=final_nonlinearity,
bias=bias,
momentum=momentum,
pad_input=pad_input,
batch_norm=batch_norm,
hidden_dilation=hidden_dilation,
laplace_padding=laplace_padding,
input_regularizer=input_regularizer,
stack=stack,
se_reduction=se_reduction,
n_se_blocks=n_se_blocks,
depth_separable=depth_separable,
linear=linear,
)
readout = MultipleFullGaussian2d(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
init_mu_range=init_mu_range,
bias=readout_bias,
init_sigma=init_sigma,
gamma_readout=gamma_readout,
gauss_type=gauss_type,
grid_mean_predictor=grid_mean_predictor,
grid_mean_predictor_type=grid_mean_predictor_type,
source_grids=source_grids,
share_features=share_features,
share_grid=share_grid,
share_transform=share_transform,
shared_match_ids=shared_match_ids,
init_noise=init_noise,
init_transform_scale=init_transform_scale,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
def se2d_pointpooled(
dataloaders,
seed,
elu_offset=0,
data_info=None,
# core args
hidden_channels=64,
input_kern=9, # core args
hidden_kern=7,
layers=4,
gamma_input=46.402,
bias=False,
skip=0,
final_nonlinearity=True,
momentum=0.9,
pad_input=False,
batch_norm=True,
hidden_dilation=1,
laplace_padding=None,
input_regularizer="LaplaceL2norm",
stack=-1,
se_reduction=32,
n_se_blocks=0,
depth_separable=True,
linear=False,
# readout args
pool_steps=2,
pool_kern=3,
readout_bias=True,
gamma_readout=0.0207,
init_range=0.2,
):
"""
Model class of a SE2dCore and a pointpooled (spatial transformer) readout
Args:
dataloaders: a dictionary of dataloaders, one loader per session
in the format {'data_key': dataloader object, .. }
seed: random seed
elu_offset: Offset for the output non-linearity [F.elu(x + self.offset)]
all other args: See Documentation of SE2dCore in neuralpredictors.layers.cores and
PointPooled2D in neuralpredictors.layers.readouts
Returns: An initialized model which consists of model.core and model.readout
"""
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
set_random_seed(seed)
core = SE2dCore(
input_channels=core_input_channels,
hidden_channels=hidden_channels,
input_kern=input_kern,
hidden_kern=hidden_kern,
layers=layers,
gamma_input=gamma_input,
bias=bias,
skip=skip,
final_nonlinearity=final_nonlinearity,
momentum=momentum,
pad_input=pad_input,
batch_norm=batch_norm,
hidden_dilation=hidden_dilation,
laplace_padding=laplace_padding,
input_regularizer=input_regularizer,
stack=stack,
se_reduction=se_reduction,
n_se_blocks=n_se_blocks,
depth_separable=depth_separable,
linear=linear,
)
readout = MultiplePointPooled2d(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
pool_steps=pool_steps,
pool_kern=pool_kern,
bias=readout_bias,
gamma_readout=gamma_readout,
init_range=init_range,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
def se2d_spatialxfeaturelinear(
dataloaders,
seed,
elu_offset=0,
data_info=None,
# core args
hidden_channels=64,
input_kern=9,
hidden_kern=7,
layers=4,
gamma_input=20.0,
skip=0,
final_nonlinearity=True,
momentum=0.9,
pad_input=False,
batch_norm=True,
hidden_dilation=1,
laplace_padding=None,
input_regularizer="LaplaceL2norm",
stack=-1,
se_reduction=32,
n_se_blocks=0,
depth_separable=True,
linear=False,
# readout args,
init_noise=4.1232e-05,
readout_bias=True,
gamma_readout=0.0019,
normalize=False,
):
"""
Model class of a SE2d core and a spatialXfeature (factorized) readout
Args:
Returns: An initialized model which consists of model.core and model.readout
"""
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
set_random_seed(seed)
core = SE2dCore(
input_channels=core_input_channels,
hidden_channels=hidden_channels,
input_kern=input_kern,
hidden_kern=hidden_kern,
layers=layers,
gamma_input=gamma_input,
skip=skip,
final_nonlinearity=final_nonlinearity,
bias=False,
momentum=momentum,
pad_input=pad_input,
batch_norm=batch_norm,
hidden_dilation=hidden_dilation,
laplace_padding=laplace_padding,
input_regularizer=input_regularizer,
stack=stack,
se_reduction=se_reduction,
n_se_blocks=n_se_blocks,
depth_separable=depth_separable,
linear=linear,
)
readout = MultipleSpatialXFeatureLinear(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
init_noise=init_noise,
bias=readout_bias,
gamma_readout=gamma_readout,
normalize=normalize,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
def se2d_fullSXF(
dataloaders,
seed,
elu_offset=0,
data_info=None,
transfer_state_dict=None,
# core args
hidden_channels=64,
input_kern=9,
hidden_kern=7,
layers=4,
gamma_input=6.3831,
skip=0,
bias=False,
final_nonlinearity=True,
momentum=0.9,
pad_input=False,
batch_norm=True,
hidden_dilation=1,
laplace_padding=None,
input_regularizer="LaplaceL2norm",
stack=-1,
se_reduction=32,
n_se_blocks=0,
depth_separable=True,
linear=False,
init_noise=4.1232e-05,
normalize=False,
readout_bias=True,
gamma_readout=0.0076,
share_features=False,
):
"""
Model class of a SE2dCore and a factorized (sxf) readout
Args:
dataloaders: a dictionary of dataloaders, one loader per session
in the format {'data_key': dataloader object, .. }
seed: random seed
elu_offset: Offset for the output non-linearity [F.elu(x + self.offset)]
all other args: See Documentation of SE2dCore in neuralpredictors.layers.cores and
fullSXF in neuralpredictors.layers.readouts
Returns: An initialized model which consists of model.core and model.readout
"""
if transfer_state_dict is not None:
print(
"Transfer state_dict given. This will only have an effect in the bayesian hypersearch. See: TrainedModelBayesianTransfer "
)
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
shared_match_ids = None
if share_features:
shared_match_ids = {
k: v.dataset.neurons.multi_match_id for k, v in dataloaders.items()
}
all_multi_unit_ids = set(np.hstack(shared_match_ids.values()))
for match_id in shared_match_ids.values():
assert len(set(match_id) & all_multi_unit_ids) == len(
all_multi_unit_ids
), "All multi unit IDs must be present in all datasets"
set_random_seed(seed)
core = SE2dCore(
input_channels=core_input_channels,
hidden_channels=hidden_channels,
input_kern=input_kern,
hidden_kern=hidden_kern,
layers=layers,
gamma_input=gamma_input,
skip=skip,
final_nonlinearity=final_nonlinearity,
bias=bias,
momentum=momentum,
pad_input=pad_input,
batch_norm=batch_norm,
hidden_dilation=hidden_dilation,
laplace_padding=laplace_padding,
input_regularizer=input_regularizer,
stack=stack,
se_reduction=se_reduction,
n_se_blocks=n_se_blocks,
depth_separable=depth_separable,
linear=linear,
)
readout = MultipleFullSXF(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
init_noise=init_noise,
bias=readout_bias,
gamma_readout=gamma_readout,
normalize=normalize,
share_features=share_features,
shared_match_ids=shared_match_ids,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
def taskdriven_fullgaussian2d(
dataloaders,
seed,
elu_offset=0,
data_info=None,
# core args
tl_model_name="vgg16",
layers=4,
pretrained=True,
final_batchnorm=True,
final_nonlinearity=True,
momentum=0.1,
fine_tune=False,
# readout args
init_mu_range=0.3,
init_sigma=0.1,
readout_bias=True,
gamma_readout=0.0076,
gauss_type="full",
grid_mean_predictor={
"type": "cortex",
"input_dimensions": 2,
"hidden_layers": 0,
"hidden_features": 30,
"final_tanh": True,
},
share_features=False,
share_grid=False,
share_transform=False,
init_noise=1e-3,
init_transform_scale=0.2,
):
"""
Model class of a task-driven transfer core and a Gaussian readout
Args:
dataloaders: a dictionary of dataloaders, one loader per session
in the format {'data_key': dataloader object, .. }
seed: random seed
elu_offset: Offset for the output non-linearity [F.elu(x + self.offset)]
grid_mean_predictor: if not None, needs to be a dictionary of the form
{
'type': 'cortex',
'input_dimensions': 2,
'hidden_layers':0,
'hidden_features':20,
'final_tanh': False,
}
In that case the datasets need to have the property `neurons.cell_motor_coordinates`
share_features: whether to share features between readouts. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
share_grid: whether to share the grid between neurons. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
share_transform: whether to share the transform from the grid_mean_predictor between neurons. This requires that the datasets
have the properties `neurons.multi_match_id` which are used for matching. Every dataset
has to have all these ids and cannot have any more.
init_noise: noise for initialization of weights
init_transform_scale: scale of the weights of the randomly intialized grid_mean_predictor network
all other args: See Documentation of TransferLearningCore in neuralpredictors.layers.cores and
FullGaussian2d in neuralpredictors.layers.readouts
Returns: An initialized model which consists of model.core and model.readout
"""
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
source_grids = None
grid_mean_predictor_type = None
if grid_mean_predictor is not None:
grid_mean_predictor = copy.deepcopy(grid_mean_predictor)
grid_mean_predictor_type = grid_mean_predictor.pop("type")
if grid_mean_predictor_type == "cortex":
input_dim = grid_mean_predictor.pop("input_dimensions", 2)
source_grids = {}
for k, v in dataloaders.items():
# real data
try:
if v.dataset.neurons.animal_ids[0] != 0:
source_grids[k] = v.dataset.neurons.cell_motor_coordinates[
:, :input_dim
]
# simulated data -> get random linear non-degenerate transform of true positions
else:
source_grid_true = v.dataset.neurons.center[:, :input_dim]
det = 0.0
loops = 0
grid_bias = np.random.rand(2) * 3
while det < 5.0 and loops < 100:
matrix = np.random.rand(2, 2) * 3
det = np.linalg.det(matrix)
loops += 1
assert det > 5.0, "Did not find a non-degenerate matrix"
source_grids[k] = np.add(
(matrix @ source_grid_true.T).T, grid_bias
)
except FileNotFoundError:
print(
"Dataset type is not recognized to be from Baylor College of Medicine."
)
source_grids[k] = v.dataset.neurons.cell_motor_coordinates[
:, :input_dim
]
elif grid_mean_predictor_type == "shared":
pass
else:
raise ValueError(
"Grid mean predictor type {} not understood.".format(
grid_mean_predictor_type
)
)
shared_match_ids = None
if share_features or share_grid:
shared_match_ids = {
k: v.dataset.neurons.multi_match_id for k, v in dataloaders.items()
}
all_multi_unit_ids = set(np.hstack(shared_match_ids.values()))
for match_id in shared_match_ids.values():
assert len(set(match_id) & all_multi_unit_ids) == len(
all_multi_unit_ids
), "All multi unit IDs must be present in all datasets"
set_random_seed(seed)
core = TransferLearningCore(
input_channels=core_input_channels,
tl_model_name=tl_model_name,
layers=layers,
pretrained=pretrained,
final_batchnorm=final_batchnorm,
final_nonlinearity=final_nonlinearity,
momentum=momentum,
fine_tune=fine_tune,
)
readout = MultipleFullGaussian2d(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
init_mu_range=init_mu_range,
bias=readout_bias,
init_sigma=init_sigma,
gamma_readout=gamma_readout,
gauss_type=gauss_type,
grid_mean_predictor=grid_mean_predictor,
grid_mean_predictor_type=grid_mean_predictor_type,
source_grids=source_grids,
share_features=share_features,
share_grid=share_grid,
shared_match_ids=shared_match_ids,
share_transform=share_transform,
init_noise=init_noise,
init_transform_scale=init_transform_scale,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
def taskdriven_fullSXF(
dataloaders,
seed,
elu_offset=0,
data_info=None,
# core args
tl_model_name="vgg16",
layers=4,
pretrained=True,
final_batchnorm=True,
final_nonlinearity=True,
momentum=0.1,
fine_tune=False,
# readout args
init_noise=4.1232e-05,
normalize=False,
readout_bias=True,
gamma_readout=0.0076,
share_features=False,
):
"""
Model class of a task-driven transfer core and a factorized (sxf) readout
Args:
dataloaders: a dictionary of dataloaders, one loader per session
in the format {'data_key': dataloader object, .. }
seed: random seed
elu_offset: Offset for the output non-linearity [F.elu(x + self.offset)]
all other args: See Documentation of TransferLearningCore in neuralpredictors.layers.cores and
fullSXF in neuralpredictors.layers.readouts
Returns: An initialized model which consists of model.core and model.readout
"""
if data_info is not None:
n_neurons_dict, in_shapes_dict, input_channels = unpack_data_info(data_info)
else:
if "train" in dataloaders.keys():
dataloaders = dataloaders["train"]
# Obtain the named tuple fields from the first entry of the first dataloader in the dictionary
in_name, out_name = next(iter(list(dataloaders.values())[0]))._fields
session_shape_dict = get_dims_for_loader_dict(dataloaders)
n_neurons_dict = {k: v[out_name][1] for k, v in session_shape_dict.items()}
in_shapes_dict = {k: v[in_name] for k, v in session_shape_dict.items()}
input_channels = [v[in_name][1] for v in session_shape_dict.values()]
core_input_channels = (
list(input_channels.values())[0]
if isinstance(input_channels, dict)
else input_channels[0]
)
shared_match_ids = None
if share_features:
shared_match_ids = {
k: v.dataset.neurons.multi_match_id for k, v in dataloaders.items()
}
all_multi_unit_ids = set(np.hstack(shared_match_ids.values()))
for match_id in shared_match_ids.values():
assert len(set(match_id) & all_multi_unit_ids) == len(
all_multi_unit_ids
), "All multi unit IDs must be present in all datasets"
set_random_seed(seed)
core = TransferLearningCore(
input_channels=core_input_channels,
tl_model_name=tl_model_name,
layers=layers,
pretrained=pretrained,
final_batchnorm=final_batchnorm,
final_nonlinearity=final_nonlinearity,
momentum=momentum,
fine_tune=fine_tune,
)
readout = MultipleFullSXF(
core,
in_shape_dict=in_shapes_dict,
n_neurons_dict=n_neurons_dict,
init_noise=init_noise,
bias=readout_bias,
gamma_readout=gamma_readout,
normalize=normalize,
share_features=share_features,
shared_match_ids=shared_match_ids,
)
# initializing readout bias to mean response
if readout_bias and data_info is None:
for key, value in dataloaders.items():
_, targets = next(iter(value))
readout[key].bias.data = targets.mean(0)
model = Encoder(core, readout, elu_offset)
return model
| [
"copy.deepcopy",
"neuralpredictors.layers.readouts.MultipleFullGaussian2d",
"nnfabrik.utility.nn_helpers.set_random_seed",
"neuralpredictors.layers.readouts.MultiplePointPooled2d",
"numpy.random.rand",
"neuralpredictors.layers.readouts.MultipleSpatialXFeatureLinear",
"neuralpredictors.layers.cores.SE2dC... | [((7673, 7694), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (7688, 7694), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((7707, 8245), 'neuralpredictors.layers.cores.SE2dCore', 'SE2dCore', ([], {'input_channels': 'core_input_channels', 'hidden_channels': 'hidden_channels', 'input_kern': 'input_kern', 'hidden_kern': 'hidden_kern', 'layers': 'layers', 'gamma_input': 'gamma_input', 'skip': 'skip', 'final_nonlinearity': 'final_nonlinearity', 'bias': 'bias', 'momentum': 'momentum', 'pad_input': 'pad_input', 'batch_norm': 'batch_norm', 'hidden_dilation': 'hidden_dilation', 'laplace_padding': 'laplace_padding', 'input_regularizer': 'input_regularizer', 'stack': 'stack', 'se_reduction': 'se_reduction', 'n_se_blocks': 'n_se_blocks', 'depth_separable': 'depth_separable', 'linear': 'linear'}), '(input_channels=core_input_channels, hidden_channels=\n hidden_channels, input_kern=input_kern, hidden_kern=hidden_kern, layers\n =layers, gamma_input=gamma_input, skip=skip, final_nonlinearity=\n final_nonlinearity, bias=bias, momentum=momentum, pad_input=pad_input,\n batch_norm=batch_norm, hidden_dilation=hidden_dilation, laplace_padding\n =laplace_padding, input_regularizer=input_regularizer, stack=stack,\n se_reduction=se_reduction, n_se_blocks=n_se_blocks, depth_separable=\n depth_separable, linear=linear)\n', (7715, 8245), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((8395, 8945), 'neuralpredictors.layers.readouts.MultipleFullGaussian2d', 'MultipleFullGaussian2d', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'init_mu_range': 'init_mu_range', 'bias': 'readout_bias', 'init_sigma': 'init_sigma', 'gamma_readout': 'gamma_readout', 'gauss_type': 'gauss_type', 'grid_mean_predictor': 'grid_mean_predictor', 'grid_mean_predictor_type': 'grid_mean_predictor_type', 'source_grids': 'source_grids', 'share_features': 'share_features', 'share_grid': 'share_grid', 'share_transform': 'share_transform', 'shared_match_ids': 'shared_match_ids', 'init_noise': 'init_noise', 'init_transform_scale': 'init_transform_scale'}), '(core, in_shape_dict=in_shapes_dict, n_neurons_dict=\n n_neurons_dict, init_mu_range=init_mu_range, bias=readout_bias,\n init_sigma=init_sigma, gamma_readout=gamma_readout, gauss_type=\n gauss_type, grid_mean_predictor=grid_mean_predictor,\n grid_mean_predictor_type=grid_mean_predictor_type, source_grids=\n source_grids, share_features=share_features, share_grid=share_grid,\n share_transform=share_transform, shared_match_ids=shared_match_ids,\n init_noise=init_noise, init_transform_scale=init_transform_scale)\n', (8417, 8945), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((11450, 11471), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (11465, 11471), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((11484, 12020), 'neuralpredictors.layers.cores.SE2dCore', 'SE2dCore', ([], {'input_channels': 'core_input_channels', 'hidden_channels': 'hidden_channels', 'input_kern': 'input_kern', 'hidden_kern': 'hidden_kern', 'layers': 'layers', 'gamma_input': 'gamma_input', 'bias': 'bias', 'skip': 'skip', 'final_nonlinearity': 'final_nonlinearity', 'momentum': 'momentum', 'pad_input': 'pad_input', 'batch_norm': 'batch_norm', 'hidden_dilation': 'hidden_dilation', 'laplace_padding': 'laplace_padding', 'input_regularizer': 'input_regularizer', 'stack': 'stack', 'se_reduction': 'se_reduction', 'n_se_blocks': 'n_se_blocks', 'depth_separable': 'depth_separable', 'linear': 'linear'}), '(input_channels=core_input_channels, hidden_channels=\n hidden_channels, input_kern=input_kern, hidden_kern=hidden_kern, layers\n =layers, gamma_input=gamma_input, bias=bias, skip=skip,\n final_nonlinearity=final_nonlinearity, momentum=momentum, pad_input=\n pad_input, batch_norm=batch_norm, hidden_dilation=hidden_dilation,\n laplace_padding=laplace_padding, input_regularizer=input_regularizer,\n stack=stack, se_reduction=se_reduction, n_se_blocks=n_se_blocks,\n depth_separable=depth_separable, linear=linear)\n', (11492, 12020), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((12172, 12385), 'neuralpredictors.layers.readouts.MultiplePointPooled2d', 'MultiplePointPooled2d', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'pool_steps': 'pool_steps', 'pool_kern': 'pool_kern', 'bias': 'readout_bias', 'gamma_readout': 'gamma_readout', 'init_range': 'init_range'}), '(core, in_shape_dict=in_shapes_dict, n_neurons_dict=\n n_neurons_dict, pool_steps=pool_steps, pool_kern=pool_kern, bias=\n readout_bias, gamma_readout=gamma_readout, init_range=init_range)\n', (12193, 12385), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((14412, 14433), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (14427, 14433), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((14446, 14985), 'neuralpredictors.layers.cores.SE2dCore', 'SE2dCore', ([], {'input_channels': 'core_input_channels', 'hidden_channels': 'hidden_channels', 'input_kern': 'input_kern', 'hidden_kern': 'hidden_kern', 'layers': 'layers', 'gamma_input': 'gamma_input', 'skip': 'skip', 'final_nonlinearity': 'final_nonlinearity', 'bias': '(False)', 'momentum': 'momentum', 'pad_input': 'pad_input', 'batch_norm': 'batch_norm', 'hidden_dilation': 'hidden_dilation', 'laplace_padding': 'laplace_padding', 'input_regularizer': 'input_regularizer', 'stack': 'stack', 'se_reduction': 'se_reduction', 'n_se_blocks': 'n_se_blocks', 'depth_separable': 'depth_separable', 'linear': 'linear'}), '(input_channels=core_input_channels, hidden_channels=\n hidden_channels, input_kern=input_kern, hidden_kern=hidden_kern, layers\n =layers, gamma_input=gamma_input, skip=skip, final_nonlinearity=\n final_nonlinearity, bias=False, momentum=momentum, pad_input=pad_input,\n batch_norm=batch_norm, hidden_dilation=hidden_dilation, laplace_padding\n =laplace_padding, input_regularizer=input_regularizer, stack=stack,\n se_reduction=se_reduction, n_se_blocks=n_se_blocks, depth_separable=\n depth_separable, linear=linear)\n', (14454, 14985), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((15135, 15331), 'neuralpredictors.layers.readouts.MultipleSpatialXFeatureLinear', 'MultipleSpatialXFeatureLinear', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'init_noise': 'init_noise', 'bias': 'readout_bias', 'gamma_readout': 'gamma_readout', 'normalize': 'normalize'}), '(core, in_shape_dict=in_shapes_dict,\n n_neurons_dict=n_neurons_dict, init_noise=init_noise, bias=readout_bias,\n gamma_readout=gamma_readout, normalize=normalize)\n', (15164, 15331), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((18433, 18454), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (18448, 18454), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((18467, 19005), 'neuralpredictors.layers.cores.SE2dCore', 'SE2dCore', ([], {'input_channels': 'core_input_channels', 'hidden_channels': 'hidden_channels', 'input_kern': 'input_kern', 'hidden_kern': 'hidden_kern', 'layers': 'layers', 'gamma_input': 'gamma_input', 'skip': 'skip', 'final_nonlinearity': 'final_nonlinearity', 'bias': 'bias', 'momentum': 'momentum', 'pad_input': 'pad_input', 'batch_norm': 'batch_norm', 'hidden_dilation': 'hidden_dilation', 'laplace_padding': 'laplace_padding', 'input_regularizer': 'input_regularizer', 'stack': 'stack', 'se_reduction': 'se_reduction', 'n_se_blocks': 'n_se_blocks', 'depth_separable': 'depth_separable', 'linear': 'linear'}), '(input_channels=core_input_channels, hidden_channels=\n hidden_channels, input_kern=input_kern, hidden_kern=hidden_kern, layers\n =layers, gamma_input=gamma_input, skip=skip, final_nonlinearity=\n final_nonlinearity, bias=bias, momentum=momentum, pad_input=pad_input,\n batch_norm=batch_norm, hidden_dilation=hidden_dilation, laplace_padding\n =laplace_padding, input_regularizer=input_regularizer, stack=stack,\n se_reduction=se_reduction, n_se_blocks=n_se_blocks, depth_separable=\n depth_separable, linear=linear)\n', (18475, 19005), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((19155, 19409), 'neuralpredictors.layers.readouts.MultipleFullSXF', 'MultipleFullSXF', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'init_noise': 'init_noise', 'bias': 'readout_bias', 'gamma_readout': 'gamma_readout', 'normalize': 'normalize', 'share_features': 'share_features', 'shared_match_ids': 'shared_match_ids'}), '(core, in_shape_dict=in_shapes_dict, n_neurons_dict=\n n_neurons_dict, init_noise=init_noise, bias=readout_bias, gamma_readout\n =gamma_readout, normalize=normalize, share_features=share_features,\n shared_match_ids=shared_match_ids)\n', (19170, 19409), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((25818, 25839), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (25833, 25839), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((25852, 26102), 'neuralpredictors.layers.cores.TransferLearningCore', 'TransferLearningCore', ([], {'input_channels': 'core_input_channels', 'tl_model_name': 'tl_model_name', 'layers': 'layers', 'pretrained': 'pretrained', 'final_batchnorm': 'final_batchnorm', 'final_nonlinearity': 'final_nonlinearity', 'momentum': 'momentum', 'fine_tune': 'fine_tune'}), '(input_channels=core_input_channels, tl_model_name=\n tl_model_name, layers=layers, pretrained=pretrained, final_batchnorm=\n final_batchnorm, final_nonlinearity=final_nonlinearity, momentum=\n momentum, fine_tune=fine_tune)\n', (25872, 26102), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((26174, 26724), 'neuralpredictors.layers.readouts.MultipleFullGaussian2d', 'MultipleFullGaussian2d', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'init_mu_range': 'init_mu_range', 'bias': 'readout_bias', 'init_sigma': 'init_sigma', 'gamma_readout': 'gamma_readout', 'gauss_type': 'gauss_type', 'grid_mean_predictor': 'grid_mean_predictor', 'grid_mean_predictor_type': 'grid_mean_predictor_type', 'source_grids': 'source_grids', 'share_features': 'share_features', 'share_grid': 'share_grid', 'shared_match_ids': 'shared_match_ids', 'share_transform': 'share_transform', 'init_noise': 'init_noise', 'init_transform_scale': 'init_transform_scale'}), '(core, in_shape_dict=in_shapes_dict, n_neurons_dict=\n n_neurons_dict, init_mu_range=init_mu_range, bias=readout_bias,\n init_sigma=init_sigma, gamma_readout=gamma_readout, gauss_type=\n gauss_type, grid_mean_predictor=grid_mean_predictor,\n grid_mean_predictor_type=grid_mean_predictor_type, source_grids=\n source_grids, share_features=share_features, share_grid=share_grid,\n shared_match_ids=shared_match_ids, share_transform=share_transform,\n init_noise=init_noise, init_transform_scale=init_transform_scale)\n', (26196, 26724), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((29462, 29483), 'nnfabrik.utility.nn_helpers.set_random_seed', 'set_random_seed', (['seed'], {}), '(seed)\n', (29477, 29483), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((29496, 29746), 'neuralpredictors.layers.cores.TransferLearningCore', 'TransferLearningCore', ([], {'input_channels': 'core_input_channels', 'tl_model_name': 'tl_model_name', 'layers': 'layers', 'pretrained': 'pretrained', 'final_batchnorm': 'final_batchnorm', 'final_nonlinearity': 'final_nonlinearity', 'momentum': 'momentum', 'fine_tune': 'fine_tune'}), '(input_channels=core_input_channels, tl_model_name=\n tl_model_name, layers=layers, pretrained=pretrained, final_batchnorm=\n final_batchnorm, final_nonlinearity=final_nonlinearity, momentum=\n momentum, fine_tune=fine_tune)\n', (29516, 29746), False, 'from neuralpredictors.layers.cores import TransferLearningCore, SE2dCore\n'), ((29818, 30072), 'neuralpredictors.layers.readouts.MultipleFullSXF', 'MultipleFullSXF', (['core'], {'in_shape_dict': 'in_shapes_dict', 'n_neurons_dict': 'n_neurons_dict', 'init_noise': 'init_noise', 'bias': 'readout_bias', 'gamma_readout': 'gamma_readout', 'normalize': 'normalize', 'share_features': 'share_features', 'shared_match_ids': 'shared_match_ids'}), '(core, in_shape_dict=in_shapes_dict, n_neurons_dict=\n n_neurons_dict, init_noise=init_noise, bias=readout_bias, gamma_readout\n =gamma_readout, normalize=normalize, share_features=share_features,\n shared_match_ids=shared_match_ids)\n', (29833, 30072), False, 'from neuralpredictors.layers.readouts import MultipleFullGaussian2d, MultiplePointPooled2d, MultipleSpatialXFeatureLinear, MultipleFullSXF\n'), ((4705, 4742), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (4729, 4742), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((5267, 5301), 'copy.deepcopy', 'copy.deepcopy', (['grid_mean_predictor'], {}), '(grid_mean_predictor)\n', (5280, 5301), False, 'import copy\n'), ((11014, 11051), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (11038, 11051), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((13976, 14013), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (14000, 14013), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((17533, 17570), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (17557, 17570), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((22850, 22887), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (22874, 22887), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((23412, 23446), 'copy.deepcopy', 'copy.deepcopy', (['grid_mean_predictor'], {}), '(grid_mean_predictor)\n', (23425, 23446), False, 'import copy\n'), ((28562, 28599), 'nnfabrik.utility.nn_helpers.get_dims_for_loader_dict', 'get_dims_for_loader_dict', (['dataloaders'], {}), '(dataloaders)\n', (28586, 28599), False, 'from nnfabrik.utility.nn_helpers import set_random_seed, get_dims_for_loader_dict\n'), ((973, 1007), 'torch.nn.functional.elu', 'nn.functional.elu', (['(x + self.offset)'], {}), '(x + self.offset)\n', (990, 1007), False, 'from torch import nn\n'), ((6499, 6549), 'numpy.add', 'np.add', (['(matrix @ source_grid_true.T).T', 'grid_bias'], {}), '((matrix @ source_grid_true.T).T, grid_bias)\n', (6505, 6549), True, 'import numpy as np\n'), ((24644, 24694), 'numpy.add', 'np.add', (['(matrix @ source_grid_true.T).T', 'grid_bias'], {}), '((matrix @ source_grid_true.T).T, grid_bias)\n', (24650, 24694), True, 'import numpy as np\n'), ((6140, 6157), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (6154, 6157), True, 'import numpy as np\n'), ((6315, 6336), 'numpy.linalg.det', 'np.linalg.det', (['matrix'], {}), '(matrix)\n', (6328, 6336), True, 'import numpy as np\n'), ((24285, 24302), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (24299, 24302), True, 'import numpy as np\n'), ((24460, 24481), 'numpy.linalg.det', 'np.linalg.det', (['matrix'], {}), '(matrix)\n', (24473, 24481), True, 'import numpy as np\n'), ((6256, 6276), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (6270, 6276), True, 'import numpy as np\n'), ((24401, 24421), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (24415, 24421), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from annotlib.standard import StandardAnnot
from annotlib.utils import check_positive_integer
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import column_or_1d, check_X_y
from sklearn.model_selection import RepeatedKFold
from sklearn.base import is_classifier
from sklearn.svm import SVC
from scipy.special import entr
class DifficultyBasedAnnot(StandardAnnot):
"""DifficultyBasedAnnot
This class implements a simulation technique aiming at quantifying the difficulty of a sample. The estimated
difficulty is used in combination with an annotator labelling performance to compute the probability that the
corresponding annotator labels the sample correctly.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Samples of the whole data set.
y_true: array-like, shape (n_samples)
True class labels of the given samples X.
n_annotators: int
Number of annotators who are simulated.
classifiers: sklearn.base.ClassifierMixin | list of ClassifierMixin, shape (n_classifiers)
The classifiers parameter is either a single sklearn classifier supporting :py:method::predict_proba` or a
list of such classifiers. If the parameter is not a list, the simplicity scores are estimate by a single
classifier, whereas if it is a list, the simplicity scores can be estimated by different classifier types or
different parametrisations. The default classifiers parameter is a single SVM
alphas: array-like, shape (n_annotators)
The entry alphas[a_idx] indicates the annotator labelling performance, which is in the interval (-inf, inf).
The following properties are valid:
- alphas[a_idx] = 0: annotator with index a_idx makes random guesses,
- alphas[a_idx] = inf: annotator with index a_idx is almost always right,
- alphas[a_idx] = -inf: annotator with index a_idx is almost always wrong (adversarial).
n_splits: int
Number of folds of the cross-validation.
n_repeats: int
Number of repeats of the cross-validation.
confidence_noise: array-like, shape (n_annotators)
An entry of confidence_noise defines the interval from which the noise is uniformly drawn, e.g.
confidence_noise[a] = 0.2 results in sampling n_samples times from U(-0.2, 0.2) and adding this noise
to the confidence scores. Zero noise is the default value for each annotator.
random_state: None | int | instance of :py:class:`numpy.random.RandomState`
The random state used for generating class labels of the annotators.
Attributes
----------
X_: numpy.ndarray, shape (n_samples, n_features)
Samples of the whole data set.
Y_: numpy.ndarray, shape (n_samples, n_annotators)
Class labels of the given samples X.
C_: numpy.ndarray, shape (n_samples, n_annotators)
confidence score for labelling the given samples x.
C_noise_: numpy.ndarray, shape (n_samples, n_annotators)
The uniformly noise for each annotator and each sample, e.g. C[x_idx, a_idx] indicates the noise for the
confidence score of annotator with id a_idx in labelling sample with id x_idx.
n_annotators_: int
Number of annotators.
n_queries_: numpy.ndarray, shape (n_annotators)
An entry n_queries_[a] indicates how many queries annotator a has processed.
queried_flags_: numpy.ndarray, shape (n_samples, n_annotators)
An entry queried_flags_[i, j] is a boolean indicating whether annotator a_i has provided a
class label for sample x_j.
y_true_: numpy.ndarray, shape (n_samples)
The true class labels of the given samples.
alphas_: array-like, shape (n_annotators)
The entry alphas_[a_idx] indicates the annotator labelling performance, which is in the
interval (-inf, inf). The following properties are valid:
- alphas_[a_idx] = 0: annotator with index a_idx makes random guesses,
- alphas_[a_idx] = inf: annotator with index a_idx is almost always right,
- alphas_[a_idx] = -inf: annotator with index a_idx is almost always wrong (adversarial).
betas_: array-like, shape (n_annotators)
The entry betas_[x_idx] represents the simplicity score of sample X_[x_idx], where betas_[x_idx] is in the
interval [0, inf):
- betas_[x_idx] = 0: annotator with index a_idx makes random guesses,
- betas_[x_idx] = inf: annotator with index a_idx is always right, if alphas_[a_idx] > 0
n_splits_: int
Number of folds of the cross-validation.
n_repeats: int
Number of repeats of the cross-validation.
random_state_: None | int | numpy.random.RandomState
The random state used for generating class labels of the annotators.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.svm import SVC
>>> # load iris data set
>>> X, y_true = load_iris(return_X_y=True)
>>> # create list of SVM and Gaussian Process classifier
>>> classifiers = [SVC(C=1, probability=True, gamma='auto'), SVC(C=3, probability=True), GaussianProcessClassifier()]
>>> # set labelling performances of annotators
>>> alphas = [-3, 0, 3]
>>> # simulate annotators on the iris data set
>>> annotators = DifficultyBasedAnnot(X=X, y_true=y_true, classifiers=classifiers, n_annotators=3, alphas=alphas)
>>> # the number of annotators must be equal to the number of classifiers
>>> annotators.n_annotators()
3
>>> # query class labels of 100 samples from annotators a_0, a_2
>>> annotators.class_labels(X=X[0:100], y_true=y_true[0:100], annotator_ids=[0, 2], query_value=100).shape
(100, 3)
>>> # check query values
>>> annotators.n_queries()
array([100, 0, 100])
>>> # query confidence scores of these 100 samples from annotators a_0, a_2
>>> annotators.confidence_scores(X=X[0:100], y_true=y_true[0:100], annotator_ids=[0, 2]).shape
(100, 3)
>>> # query values are not affected by calling the confidence score method
>>> annotators.n_queries()
array([100, 0, 100])
>>> # labelling performance of annotator a_0 is adversarial (worse than guessing)
>>> annotators.labelling_performance(X=X, y_true=y_true)[0] < 1/len(np.unique(y_true))
True
"""
def __init__(self, X, y_true, classifiers=None, n_annotators=None, alphas=None, n_splits=5, n_repeats=10,
confidence_noise=None, random_state=None):
# check shape of samples and labels
self.X_, self.y_true_ = check_X_y(X, y_true)
n_samples = len(self.X_)
# check and set number of annotators, query number and queried samples
n_annotators = 5 if n_annotators is None else n_annotators
self._check_parameters(n_annotators, n_samples, confidence_noise, random_state)
# check alpha scores
self.alphas_ = np.linspace(0, 2, self.n_annotators()) if alphas is None else column_or_1d(alphas)
if len(self.alphas_) != self.n_annotators():
raise ValueError('The parameter `alphas` must contain a single labelling performance value for each'
'annotator.')
# create class labels and confidence scores container
self.Y_ = np.empty((n_samples, self.n_annotators()))
self.C_ = np.empty((n_samples, self.n_annotators()))
# transform class labels to interval [0, n_classes-1]
le = LabelEncoder().fit(self.y_true_)
n_classes = len(le.classes_)
y_transformed = le.transform(self.y_true_)
# check classifier models
if not isinstance(classifiers, list):
clf = SVC(random_state=self.random_state_, probability=True,
gamma='auto') if classifiers is None else classifiers
classifiers = [clf]
for clf in classifiers:
if not is_classifier(clf) or getattr(clf, 'predict_proba', None) is None:
raise TypeError('The parameter `classifiers` must be a single sklearn classifier or a list of sklearn '
'classifiers supporting the method :py:method::`predict_proba`.')
# check n_splits and n_repeats
self.n_splits_, self.n_repeats_ = check_positive_integer(n_splits), check_positive_integer(n_repeats)
# estimate simplicity scores (proxies of difficulties) of samples
entropy_corr = np.zeros(n_samples)
test_per_sample = np.zeros(n_samples)
for classifier in classifiers:
rkf = RepeatedKFold(n_splits=self.n_splits_, n_repeats=self.n_repeats_, random_state=random_state)
for train_index, test_index in rkf.split(self.X_):
classifier = classifier.fit(self.X_[train_index], self.y_true_[train_index])
P = classifier.predict_proba(self.X_[test_index])
E = np.sum(entr(P) / np.log(n_classes), axis=1)
y_pred = classifier.predict(self.X_[test_index])
entropy_corr[test_index] += (y_pred == self.y_true_[test_index]) * E
entropy_corr[test_index] += (y_pred != self.y_true_[test_index])
test_per_sample[test_index] += 1
entropy_corr /= test_per_sample
self.betas_ = np.divide(1, entropy_corr) - 1
# compute confidence scores
self.C_ = 1 / (1 + (n_classes - 1) * np.exp(-self.betas_.reshape(-1, 1) @ self.alphas_.reshape(1, -1)))
# generate class labels
for a in range(self.n_annotators_):
for x in range(len(self.X_)):
acc = self.C_[x, a]
p = [(1 - acc) / (n_classes - 1)] * n_classes
p[y_transformed[x]] = acc
self.Y_[x, a] = le.inverse_transform([self.random_state_.choice(range(n_classes), p=p)])
# add confidence noise
self._add_confidence_noise(probabilistic=True)
def plot_annotators_labelling_probabilities(self, figsize=(5, 3), dpi=150, fontsize=7):
"""
Creates a plot of the correct labelling probabilities for given labelling performances and estimated
sample simplicity scores.
Returns
-------
fig : matplotlib.figure.Figure object
ax : matplotlib.axes.Axes.
"""
colors = cm.rainbow(np.linspace(0, 1, self.n_annotators() + 1))
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
for a_idx in range(self.n_annotators()):
ax.scatter(self.betas_, self.C_[:, a_idx], color=colors[a_idx].reshape(1, -1),
label=r'annotator $a_' + str(a_idx) + r'$: $\alpha_' + str(a_idx) + '=' + str(
self.alphas_[a_idx]) + '$', s=np.full(len(self.betas_), 5))
ax.legend(loc='best', fancybox=False, framealpha=0.5, fontsize=fontsize)
ax.set_xlabel(r'inverse difficulty scores of samples: $\beta_\mathbf{x}$', fontsize=fontsize)
ax.set_ylabel(r'correct labelling probability: $p(y_\mathbf{x} | \alpha_i, \beta_\mathbf{x})$',
fontsize=fontsize)
return fig, ax
| [
"numpy.divide",
"numpy.log",
"numpy.zeros",
"sklearn.utils.check_X_y",
"sklearn.preprocessing.LabelEncoder",
"scipy.special.entr",
"annotlib.utils.check_positive_integer",
"sklearn.utils.column_or_1d",
"sklearn.svm.SVC",
"sklearn.base.is_classifier",
"matplotlib.pyplot.subplots",
"sklearn.mode... | [((6744, 6764), 'sklearn.utils.check_X_y', 'check_X_y', (['X', 'y_true'], {}), '(X, y_true)\n', (6753, 6764), False, 'from sklearn.utils import column_or_1d, check_X_y\n'), ((8606, 8625), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (8614, 8625), True, 'import numpy as np\n'), ((8652, 8671), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (8660, 8671), True, 'import numpy as np\n'), ((10547, 10585), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (10559, 10585), True, 'import matplotlib.pyplot as plt\n'), ((7148, 7168), 'sklearn.utils.column_or_1d', 'column_or_1d', (['alphas'], {}), '(alphas)\n', (7160, 7168), False, 'from sklearn.utils import column_or_1d, check_X_y\n'), ((8440, 8472), 'annotlib.utils.check_positive_integer', 'check_positive_integer', (['n_splits'], {}), '(n_splits)\n', (8462, 8472), False, 'from annotlib.utils import check_positive_integer\n'), ((8474, 8507), 'annotlib.utils.check_positive_integer', 'check_positive_integer', (['n_repeats'], {}), '(n_repeats)\n', (8496, 8507), False, 'from annotlib.utils import check_positive_integer\n'), ((8729, 8825), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'self.n_splits_', 'n_repeats': 'self.n_repeats_', 'random_state': 'random_state'}), '(n_splits=self.n_splits_, n_repeats=self.n_repeats_,\n random_state=random_state)\n', (8742, 8825), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((9450, 9476), 'numpy.divide', 'np.divide', (['(1)', 'entropy_corr'], {}), '(1, entropy_corr)\n', (9459, 9476), True, 'import numpy as np\n'), ((7639, 7653), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (7651, 7653), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((7859, 7927), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': 'self.random_state_', 'probability': '(True)', 'gamma': '"""auto"""'}), "(random_state=self.random_state_, probability=True, gamma='auto')\n", (7862, 7927), False, 'from sklearn.svm import SVC\n'), ((8073, 8091), 'sklearn.base.is_classifier', 'is_classifier', (['clf'], {}), '(clf)\n', (8086, 8091), False, 'from sklearn.base import is_classifier\n'), ((9071, 9078), 'scipy.special.entr', 'entr', (['P'], {}), '(P)\n', (9075, 9078), False, 'from scipy.special import entr\n'), ((9081, 9098), 'numpy.log', 'np.log', (['n_classes'], {}), '(n_classes)\n', (9087, 9098), True, 'import numpy as np\n')] |
from BDFunction1D.Interpolation import InterpolateFunction
import numpy as np
from matplotlib import pyplot as plt
x = np.linspace(0.0, 2*np.pi, num=10, endpoint=True)
y = np.sin(x)
err = np.ones_like(x) * 0.1
# err = np.arange(10) * 0.1
f = InterpolateFunction(x, y, err)
x1 = np.linspace(0.0, 2*np.pi, num=1000, endpoint=True)
y1 = np.asarray(f.evaluate(x1))
err1 = f.error(x1)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(x, y, 'o-r')
ax1.plot(x1, y1, '-')
ax1.plot(x1, y1+err1, '-b')
ax1.plot(x1, y1-err1, '-b')
ax2.plot(x, err, 'o-r')
ax2.plot(x1, err1, '-')
plt.show()
| [
"BDFunction1D.Interpolation.InterpolateFunction",
"matplotlib.pyplot.show",
"numpy.ones_like",
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((122, 172), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)'], {'num': '(10)', 'endpoint': '(True)'}), '(0.0, 2 * np.pi, num=10, endpoint=True)\n', (133, 172), True, 'import numpy as np\n'), ((175, 184), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (181, 184), True, 'import numpy as np\n'), ((246, 276), 'BDFunction1D.Interpolation.InterpolateFunction', 'InterpolateFunction', (['x', 'y', 'err'], {}), '(x, y, err)\n', (265, 276), False, 'from BDFunction1D.Interpolation import InterpolateFunction\n'), ((283, 335), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)'], {'num': '(1000)', 'endpoint': '(True)'}), '(0.0, 2 * np.pi, num=1000, endpoint=True)\n', (294, 335), True, 'import numpy as np\n'), ((404, 432), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (416, 432), True, 'from matplotlib import pyplot as plt\n'), ((581, 591), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (589, 591), True, 'from matplotlib import pyplot as plt\n'), ((191, 206), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (203, 206), True, 'import numpy as np\n')] |
class DCMotor:
"""
Simple DC motor model.
"""
def __init__(self, nominal_voltage, no_load_speed, stall_torque):
"""
Create a motor with parameters:
:param nominal_voltage: [V]
:param no_load_speed: [rad/s]
:param stall_torque: [Nm]
"""
self.constant, self.resistance = self.get_motor_parameters(nominal_voltage, no_load_speed, stall_torque)
def get_torque(self, supply_voltage, w):
"""
Calculate instant torque from supply voltage and rotation speed.
:param supply_voltage: [V]
:param w: [rad/s
:return: shaft torque [Nm]
"""
return (supply_voltage - w * self.constant) / self.resistance * self.constant
@staticmethod
def get_motor_parameters(nominal_voltage, no_load_speed, stall_torque):
"""
Calculate motor constant and resistance from parameters:
:param nominal_voltage: [V]
:param no_load_speed: [rad/s]
:param stall_torque: [Nm]
:return: tuple of: motor constant, resistance
"""
motor_constant = nominal_voltage / no_load_speed
resistance = nominal_voltage / stall_torque * motor_constant
return motor_constant, resistance
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy as np
v = np.linspace(0, 6, 100)
motor = DCMotor(6., 105., 0.057)
T = [motor.get_torque(vs, 105.) for vs in v]
plt.plot(v, T)
plt.grid()
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((1349, 1371), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', '(100)'], {}), '(0, 6, 100)\n', (1360, 1371), True, 'import numpy as np\n'), ((1462, 1476), 'matplotlib.pyplot.plot', 'plt.plot', (['v', 'T'], {}), '(v, T)\n', (1470, 1476), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1491), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1489, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1506), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1504, 1506), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from sklearn.preprocessing import FunctionTransformer
X = [[4, 1, 2, 2], [1, 3, 9, 3], [5, 7, 5, 1]]
def function1(z):
return np.sqrt(z)
FT = FunctionTransformer(func = function1)
FT.fit(X)
newdata = FT.transform(X)
newdata
| [
"sklearn.preprocessing.FunctionTransformer",
"numpy.sqrt"
] | [((168, 203), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', ([], {'func': 'function1'}), '(func=function1)\n', (187, 203), False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((151, 161), 'numpy.sqrt', 'np.sqrt', (['z'], {}), '(z)\n', (158, 161), True, 'import numpy as np\n')] |
"""
Class that can figure out which DES tile given RA, dec coords are in.
"""
import os
import sys
import numpy as np
import astropy.table as atpy
import astropy.io.fits as pyfits
from astLib import astWCS
import urllib
import time
import IPython
import wget
from bs4 import BeautifulSoup
class DESTiler:
"""A class for relating RA, dec coords to DES tiled survey geometry.
"""
def __init__(self, tileInfoCSVPath="DES_DR1_TILE_INFO.csv"):
"""
This calculates the time it takes to get the WCS set up.
Args:
tileInfoCSVPath(string): This is path of where the DES DR1 tile information is.
This is set to "DES_DR1_TILE_INFO.csv".
Returns:
This outputs the amount of time it took to set up the WCS.
"""
self.WCSTabPath = tileInfoCSVPath
t0 = time.time()
self.setUpWCSDict()
t1 = time.time()
print("... WCS set-up took %.3f sec ..." % (t1 - t0))
def setUpWCSDict(self):
"""
Sets-up WCS info, needed for fetching images. This is slow (~30 sec) if the survey is large,
so don't do this lightly.
"""
# Add some extra columns to speed up searching
self.tileTab = atpy.Table().read(self.WCSTabPath)
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileTab)), 'RAMin'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileTab)), 'RAMax'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileTab)), 'decMin'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileTab)), 'decMax'))
self.WCSDict = {}
keyWordsToGet = ['NAXIS', 'NAXIS1', 'NAXIS2', 'CTYPE1', 'CTYPE2', 'CRVAL1', 'CRVAL2', 'CRPIX1', 'CRPIX2',
'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2', 'CDELT1', 'CDELT2', 'CUNIT1', 'CUNIT2']
for row in self.tileTab:
newHead = pyfits.Header()
for key in keyWordsToGet:
if key in self.tileTab.keys():
newHead[key] = row[key]
# Defaults if missing (needed for e.g. DES)
if 'NAXIS' not in newHead.keys():
newHead['NAXIS'] = 2
if 'CUNIT1' not in newHead.keys():
newHead['CUNIT1'] = 'DEG'
if 'CUNIT2' not in newHead.keys():
newHead['CUNIT2'] = 'DEG'
self.WCSDict[row['TILENAME']] = astWCS.WCS(newHead.copy(), mode='pyfits')
ra0, dec0 = self.WCSDict[row['TILENAME']].pix2wcs(0, 0)
ra1, dec1 = self.WCSDict[row['TILENAME']].pix2wcs(row['NAXIS1'], row['NAXIS2'])
if ra1 > ra0:
ra1 = -(360 - ra1)
row['RAMin'] = min([ra0, ra1])
row['RAMax'] = max([ra0, ra1])
row['decMin'] = min([dec0, dec1])
row['decMax'] = max([dec0, dec1])
def getTileName(self, RADeg, decDeg):
"""
Gets the tilename from DES in which the given ra and dec coordinates.
Args:
RADeg(float): This is the given right ascension of the object.
decDeg(float): This is the given declination of the object.
Returns:
Returns the tilename from DES in which the given ra and dec coordinates are found.
If the coordinates arent in the DES footprint (the DES DR1 tile information), then 'None'
will be returned.
"""
raMask = np.logical_and(np.greater_equal(RADeg, self.tileTab['RAMin']),
np.less(RADeg, self.tileTab['RAMax']))
decMask = np.logical_and(np.greater_equal(decDeg, self.tileTab['decMin']),
np.less(decDeg, self.tileTab['decMax']))
# print("RAMin: " + str(self.tileTab['RAMin']) + " TYPE: " + str(type(self.tileTab['RAMin'])))
# print("RAMax: " + str(self.tileTab['RAMax']) + " TYPE: " + str(type(self.tileTab['RAMax'])))
# print("decMin: " + str(self.tileTab['decMin']) + " TYPE: " + str(type(self.tileTab['decMin'])))
# print("decMax: " + str(self.tileTab['decMax']) + " TYPE: " + str(type(self.tileTab['decMax'])))
# print("raMask: " + str(raMask) + " TYPE: " + str(type(raMask)))
# print("decMask: " + str(decMask) + "TYPE: " +str(type(decMask)))
tileMask = np.logical_and(raMask, decMask)
if tileMask.sum() == 0:
return None
else:
return self.tileTab[tileMask]['TILENAME'][0]
def fetchTileImages(self, RADeg, decDeg, tileName, base_dir='DES/DES_Original'):
"""
If the ra, and dec are found in an DES info table that tile name is retrieved.
The g, r, and i .fits images from the DES DR1, are downloaded according to the tile name.
Args:
RADeg(float): This is the given right ascension of the object.
decDeg(float): This is the given declination of the object.
tileName(string): This is the tile name of the source from the DES info table.
base_dir(string): This is the root directory in which the original DES images are downloaded.
Returns:
Downloads the images from DES for g, r, and i .fits files of each source which contain the
given ra and dec coordinates. These images are downloaded to 'DES/DES_Original'.
"""
# Inside footprint check
raMask = np.logical_and(np.greater_equal(RADeg, self.tileTab['RAMin']),
np.less(RADeg, self.tileTab['RAMax']))
decMask = np.logical_and(np.greater_equal(decDeg, self.tileTab['decMin']),
np.less(decDeg, self.tileTab['decMax']))
tileMask = np.logical_and(raMask, decMask)
if tileMask.sum() == 0:
return None
if not os.path.exists('%s/%s' % (base_dir, tileName)):
os.makedirs('%s/%s' % (base_dir, tileName))
if os.path.exists('%s/%s/%s.html' % (base_dir, tileName, tileName)):
os.remove('%s/%s/%s.html' % (base_dir, tileName, tileName))
url = 'http://desdr-server.ncsa.illinois.edu/despublic/dr1_tiles/' + tileName + '/'
wget.download(url, '%s/%s/%s.html' % (base_dir, tileName, tileName))
with open('%s/%s/%s.html' % (base_dir, tileName, tileName), 'r') as content_file:
content = content_file.read()
print()
soup = BeautifulSoup(content, 'html.parser')
for row in soup.find_all('tr'):
for col in row.find_all('td'):
if col.text.find("r.fits.fz") != -1 or col.text.find("i.fits.fz") != -1 or col.text.find(
"g.fits.fz") != -1:
if not os.path.exists('%s/%s/%s' % (base_dir, tileName, col.text)):
print('Downloading: ' + url + col.text)
wget.download(url + col.text, '%s/%s/%s' % (base_dir, tileName, col.text))
print()
else:
print('%s/%s/%s already downloaded...' % (base_dir, tileName, col.text))
print()
print()
| [
"os.remove",
"astropy.table.Table",
"os.makedirs",
"numpy.logical_and",
"os.path.exists",
"time.time",
"wget.download",
"astropy.io.fits.Header",
"numpy.less",
"numpy.greater_equal",
"bs4.BeautifulSoup"
] | [((888, 899), 'time.time', 'time.time', ([], {}), '()\n', (897, 899), False, 'import time\n'), ((941, 952), 'time.time', 'time.time', ([], {}), '()\n', (950, 952), False, 'import time\n'), ((4346, 4377), 'numpy.logical_and', 'np.logical_and', (['raMask', 'decMask'], {}), '(raMask, decMask)\n', (4360, 4377), True, 'import numpy as np\n'), ((5743, 5774), 'numpy.logical_and', 'np.logical_and', (['raMask', 'decMask'], {}), '(raMask, decMask)\n', (5757, 5774), True, 'import numpy as np\n'), ((5963, 6027), 'os.path.exists', 'os.path.exists', (["('%s/%s/%s.html' % (base_dir, tileName, tileName))"], {}), "('%s/%s/%s.html' % (base_dir, tileName, tileName))\n", (5977, 6027), False, 'import os\n'), ((6203, 6271), 'wget.download', 'wget.download', (['url', "('%s/%s/%s.html' % (base_dir, tileName, tileName))"], {}), "(url, '%s/%s/%s.html' % (base_dir, tileName, tileName))\n", (6216, 6271), False, 'import wget\n'), ((1947, 1962), 'astropy.io.fits.Header', 'pyfits.Header', ([], {}), '()\n', (1960, 1962), True, 'import astropy.io.fits as pyfits\n'), ((3482, 3528), 'numpy.greater_equal', 'np.greater_equal', (['RADeg', "self.tileTab['RAMin']"], {}), "(RADeg, self.tileTab['RAMin'])\n", (3498, 3528), True, 'import numpy as np\n'), ((3562, 3599), 'numpy.less', 'np.less', (['RADeg', "self.tileTab['RAMax']"], {}), "(RADeg, self.tileTab['RAMax'])\n", (3569, 3599), True, 'import numpy as np\n'), ((3634, 3682), 'numpy.greater_equal', 'np.greater_equal', (['decDeg', "self.tileTab['decMin']"], {}), "(decDeg, self.tileTab['decMin'])\n", (3650, 3682), True, 'import numpy as np\n'), ((3717, 3756), 'numpy.less', 'np.less', (['decDeg', "self.tileTab['decMax']"], {}), "(decDeg, self.tileTab['decMax'])\n", (3724, 3756), True, 'import numpy as np\n'), ((5448, 5494), 'numpy.greater_equal', 'np.greater_equal', (['RADeg', "self.tileTab['RAMin']"], {}), "(RADeg, self.tileTab['RAMin'])\n", (5464, 5494), True, 'import numpy as np\n'), ((5528, 5565), 'numpy.less', 'np.less', (['RADeg', "self.tileTab['RAMax']"], {}), "(RADeg, self.tileTab['RAMax'])\n", (5535, 5565), True, 'import numpy as np\n'), ((5600, 5648), 'numpy.greater_equal', 'np.greater_equal', (['decDeg', "self.tileTab['decMin']"], {}), "(decDeg, self.tileTab['decMin'])\n", (5616, 5648), True, 'import numpy as np\n'), ((5683, 5722), 'numpy.less', 'np.less', (['decDeg', "self.tileTab['decMax']"], {}), "(decDeg, self.tileTab['decMax'])\n", (5690, 5722), True, 'import numpy as np\n'), ((5847, 5893), 'os.path.exists', 'os.path.exists', (["('%s/%s' % (base_dir, tileName))"], {}), "('%s/%s' % (base_dir, tileName))\n", (5861, 5893), False, 'import os\n'), ((5907, 5950), 'os.makedirs', 'os.makedirs', (["('%s/%s' % (base_dir, tileName))"], {}), "('%s/%s' % (base_dir, tileName))\n", (5918, 5950), False, 'import os\n'), ((6041, 6100), 'os.remove', 'os.remove', (["('%s/%s/%s.html' % (base_dir, tileName, tileName))"], {}), "('%s/%s/%s.html' % (base_dir, tileName, tileName))\n", (6050, 6100), False, 'import os\n'), ((6444, 6481), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (6457, 6481), False, 'from bs4 import BeautifulSoup\n'), ((1282, 1294), 'astropy.table.Table', 'atpy.Table', ([], {}), '()\n', (1292, 1294), True, 'import astropy.table as atpy\n'), ((6762, 6821), 'os.path.exists', 'os.path.exists', (["('%s/%s/%s' % (base_dir, tileName, col.text))"], {}), "('%s/%s/%s' % (base_dir, tileName, col.text))\n", (6776, 6821), False, 'import os\n'), ((6919, 6993), 'wget.download', 'wget.download', (['(url + col.text)', "('%s/%s/%s' % (base_dir, tileName, col.text))"], {}), "(url + col.text, '%s/%s/%s' % (base_dir, tileName, col.text))\n", (6932, 6993), False, 'import wget\n')] |
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import roc_auc_score as roc_auc
from fedot.core.composer.metrics import RMSE, ROCAUC, Silhouette
from fedot.core.data.data import InputData, OutputData
from fedot.core.repository.tasks import TaskTypesEnum
class MetricByTask:
__metric_by_task = {TaskTypesEnum.regression: RMSE,
TaskTypesEnum.classification: ROCAUC,
TaskTypesEnum.clustering: Silhouette,
TaskTypesEnum.ts_forecasting: RMSE,
}
def __init__(self, task_type):
self.metric_cls = self.__metric_by_task.get(task_type)
def get_value(self, true: InputData, predicted: OutputData, round_up_to: int = 6):
"""Returns the value of metric defined by task"""
try:
return round(self.metric_cls.metric(reference=true, predicted=predicted), round_up_to)
# TODO or raise ValueError? What to return in case of failure
except ValueError:
return 0.0
class TunerMetricByTask:
def __init__(self, task_type):
self.task_type = task_type
def get_metric_and_params(self, input_data):
""" Method return appropriate loss function for tuning
:param input_data: InputData which will be used for training
:return loss_function: function, which will calculate metric
:return loss_params: parameters for loss function
"""
if self.task_type == TaskTypesEnum.regression:
# Default metric for regression
loss_function = mean_squared_error
loss_params = {'squared': False}
elif self.task_type == TaskTypesEnum.ts_forecasting:
# Default metric for time series forecasting
loss_function = mean_squared_error
loss_params = {'squared': False}
elif self.task_type == TaskTypesEnum.classification:
# Default metric for time classification
amount_of_classes = len(np.unique(np.array(input_data.target)))
if amount_of_classes == 2:
# Binary classification
loss_function = roc_auc
loss_params = None
else:
# Metric for multiclass classification
loss_function = roc_auc
loss_params = {'multi_class': 'ovr', 'average': 'macro'}
else:
raise NotImplementedError(f'Metric for "{self.task_type}" is not supported')
return loss_function, loss_params
| [
"numpy.array"
] | [((2040, 2067), 'numpy.array', 'np.array', (['input_data.target'], {}), '(input_data.target)\n', (2048, 2067), True, 'import numpy as np\n')] |
import joblib
import features_extraction
import sys
import numpy as np
from features_extraction import LOCALHOST_PATH, DIRECTORY_NAME
def get_prediction_from_url(test_url,html):
features_test = features_extraction.main(test_url,html)
features_test = np.array(features_test).reshape((1, -1))
clf = joblib.load(LOCALHOST_PATH + DIRECTORY_NAME + '/classifier/random_forest1.pkl')
pred = clf.predict(features_test)
return int(pred[0])
if __name__ == "__main__":
main()
| [
"features_extraction.main",
"joblib.load",
"numpy.array"
] | [((201, 241), 'features_extraction.main', 'features_extraction.main', (['test_url', 'html'], {}), '(test_url, html)\n', (225, 241), False, 'import features_extraction\n'), ((313, 392), 'joblib.load', 'joblib.load', (["(LOCALHOST_PATH + DIRECTORY_NAME + '/classifier/random_forest1.pkl')"], {}), "(LOCALHOST_PATH + DIRECTORY_NAME + '/classifier/random_forest1.pkl')\n", (324, 392), False, 'import joblib\n'), ((261, 284), 'numpy.array', 'np.array', (['features_test'], {}), '(features_test)\n', (269, 284), True, 'import numpy as np\n')] |
import numpy as np
def index_limit(*args):
return min([len(i) for i in args])
def accuracy(y, yhat):
m = np.mean(np.abs((yhat-y)/y)) * 100
r = np.sqrt(np.mean((yhat - y)**2))
print('MAPE: \t\t%d%%' % m)
print('RMSE: \t\t%.1f' % r)
return m, r
def service_level(y, unmet_demand):
index = index_limit(y, unmet_demand)
r = 1 - (np.sum(unmet_demand[:index]) / np.sum(y[:index]))
print('Service level: \t%.2f' % r)
return r
def cycle_rate(y, inv):
index = index_limit(y, inv)
r = np.sum(inv[:index]) / np.sum(y[:index])
print('Cycle rate: \t%.2f' % r)
return r
def bullwhip_rate(y, order_list):
order_quantity = [i['quantity'] for i in order_list]
r = (np.std(order_quantity) / np.mean(order_quantity)) / (np.std(y)/np.mean(y))
print('Bullwhip rate: \t%.2f' % r)
return r
def get_all_metrics(result):
print('\nMetrics')
print('-'*25)
m, rmse = accuracy(result['y'], result['yhat'])
sl = service_level(result['y'], result['unmet_demand'])
cr = cycle_rate(result['y'], result['inv'])
br = bullwhip_rate(result['y'], result['order_list'])
return m, rmse, sl, cr, br | [
"numpy.std",
"numpy.abs",
"numpy.mean",
"numpy.sum"
] | [((161, 185), 'numpy.mean', 'np.mean', (['((yhat - y) ** 2)'], {}), '((yhat - y) ** 2)\n', (168, 185), True, 'import numpy as np\n'), ((507, 526), 'numpy.sum', 'np.sum', (['inv[:index]'], {}), '(inv[:index])\n', (513, 526), True, 'import numpy as np\n'), ((529, 546), 'numpy.sum', 'np.sum', (['y[:index]'], {}), '(y[:index])\n', (535, 546), True, 'import numpy as np\n'), ((121, 143), 'numpy.abs', 'np.abs', (['((yhat - y) / y)'], {}), '((yhat - y) / y)\n', (127, 143), True, 'import numpy as np\n'), ((347, 375), 'numpy.sum', 'np.sum', (['unmet_demand[:index]'], {}), '(unmet_demand[:index])\n', (353, 375), True, 'import numpy as np\n'), ((378, 395), 'numpy.sum', 'np.sum', (['y[:index]'], {}), '(y[:index])\n', (384, 395), True, 'import numpy as np\n'), ((690, 712), 'numpy.std', 'np.std', (['order_quantity'], {}), '(order_quantity)\n', (696, 712), True, 'import numpy as np\n'), ((715, 738), 'numpy.mean', 'np.mean', (['order_quantity'], {}), '(order_quantity)\n', (722, 738), True, 'import numpy as np\n'), ((743, 752), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (749, 752), True, 'import numpy as np\n'), ((753, 763), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (760, 763), True, 'import numpy as np\n')] |
import xarray as xr
import atmPy.precipitation.radar as _radar
import pandas as _pd
import numpy as np
import atmPy.general.timeseries as _timeseries
# def read_ceilometer_nc(fname):
# ds = xr.open_dataset(fname)
# ceil = _ceilometry.Ceilometer()
# ceil.backscatter = _ceilometry.Backscatter(ds.backscatter.to_pandas())
# return ceil
def read_kazr_nc(fname, timezone = None, keep_xr_dataset = False):
kazr = _radar.Kazr()
if type(fname) == str:
fname = [fname]
reflects = []
stds = []
for fn in fname:
ds = xr.open_dataset(fn)
stds.append(ds.snr_copol.to_pandas())
reflects.append(ds.reflectivity.to_pandas())
stds = _pd.concat(stds).sort_index()
reflects =_pd.concat(reflects).sort_index()
if not isinstance(timezone, type(None)):
stds.index += np.timedelta64(timezone, 'h')
reflects.index += np.timedelta64(timezone, 'h')
kazr.signal2noise_ratio = stds
kazr.reflectivity = reflects
if keep_xr_dataset:
kazr.xr_dataset = ds
return kazr | [
"xarray.open_dataset",
"pandas.concat",
"atmPy.precipitation.radar.Kazr",
"numpy.timedelta64"
] | [((429, 442), 'atmPy.precipitation.radar.Kazr', '_radar.Kazr', ([], {}), '()\n', (440, 442), True, 'import atmPy.precipitation.radar as _radar\n'), ((561, 580), 'xarray.open_dataset', 'xr.open_dataset', (['fn'], {}), '(fn)\n', (576, 580), True, 'import xarray as xr\n'), ((837, 866), 'numpy.timedelta64', 'np.timedelta64', (['timezone', '"""h"""'], {}), "(timezone, 'h')\n", (851, 866), True, 'import numpy as np\n'), ((893, 922), 'numpy.timedelta64', 'np.timedelta64', (['timezone', '"""h"""'], {}), "(timezone, 'h')\n", (907, 922), True, 'import numpy as np\n'), ((692, 708), 'pandas.concat', '_pd.concat', (['stds'], {}), '(stds)\n', (702, 708), True, 'import pandas as _pd\n'), ((736, 756), 'pandas.concat', '_pd.concat', (['reflects'], {}), '(reflects)\n', (746, 756), True, 'import pandas as _pd\n')] |
import numpy as np
import cv2
import argparse
from collections import deque
cap = cv2.VideoCapture(0)
pts = deque(maxlen=128)
lower_red = np.array([0, 80, 50])
upper_red = np.array([8, 255, 220])
lower_green = np.array([50, 120, 50])
upper_green = np.array([77, 255, 255])
while True:
ret, img = cap.read()
img = img[:, :-50]
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # 将捕获的视频帧由RBG转HSV
kernel = np.ones((3, 3), np.uint8)
# 将处于lower_green 和upper_green 区间外的值全部置为0,区间内的值置为255
mask = cv2.inRange(hsv, lower_green, upper_green)
# mask_pencil = cv2.inRange()
# 对mask图像进行腐蚀,将一些小的白色区域消除,将图像“变瘦”, iterations代表使用erode的次数
# erode就是让图像中白色部分变小
mask = cv2.erode(mask, kernel, iterations=2)
# 开运算 (MORPH_OPEN):先腐蚀再膨胀
# 删除不能包含结构元素的对象区域,平滑图像的轮廓,使拐点的地方更加连贯,断开一些狭窄的链接,去掉细小的突出部分。
# 在这里使用开运算就是为了使除笔头外的噪声区域尽量的消除
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
# dilate膨胀就是将白色区域变大,黑色的区域减小
mask = cv2.dilate(mask, kernel, iterations=5)
# bitwise_and对二进制数据进行“与”操作,即对图像(灰度图像或彩色图像均可)每个像素值进行二进制“与”操作
# 与操作后,白色区域的部分就会保存下来,黑色区域(为0)与后就还是为0
res = cv2.bitwise_and(img, img, mask=mask)
'''
findContours() 查找检测物体的轮廓。
第一个参数是寻找轮廓的图像;
第二个参数表示轮廓的检索模式,有四种(本文介绍的都是新的cv2接口):
cv2.RETR_EXTERNAL表示只检测外轮廓
cv2.RETR_LIST检测的轮廓不建立等级关系
cv2.RETR_CCOMP建立两个等级的轮廓,上面的一层为外边界,里面的一层为内孔的边界信息。
如果内孔内还有一个连通物体,这个物体的边界也在顶层。
cv2.RETR_TREE建立一个等级树结构的轮廓。
第三个参数method为轮廓的近似办法
cv2.CHAIN_APPROX_NONE存储所有的轮廓点,相邻的两个点的像素位置差不超过1,
即max(abs(x1-x2),abs(y2-y1))==1
cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,
例如一个矩形轮廓只需4个点来rs:h保存轮廓信息
cv2.findContours()函数返回两个值:contouierarchy,一个是轮廓本身,还有一个是每条轮廓对应的属性。
findContours函数首先返回一个list(即contours),list中每个元素都是图像中的一个轮廓,用numpy中的ndarray表示
'''
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) > 0: # 如果检测出了轮廓
c = max(cnts, key=cv2.contourArea) # 以轮廓的面积为条件,找出最大的面积
((x, y), radius) = cv2.minEnclosingCircle(c) # 找出最小的圆
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) # 用图像的矩求质心
if radius > 5:
# cv2.circle(image, center_coordinates, radius, color, thickness)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
# cv2.circle(res, (int(x), int(y)), int(radius), (0, 255, 255), 2)
# cv2.circle(res, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
# if pts[i - 1] == pts[i]:
continue
thick = int(np.sqrt(len(pts) / float(i + 1)) * 2.5)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 225), thick) # 画线
cv2.imshow("img", img)
cv2.imshow("mask", mask)
cv2.imshow("res", res)
k = cv2.waitKey(30) & 0xFF
if k == 32:
break
# cleanup the camera and close any open windows
cap.release()
cv2.destroyAllWindows()
| [
"cv2.line",
"cv2.circle",
"cv2.minEnclosingCircle",
"cv2.bitwise_and",
"cv2.dilate",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.waitKey",
"cv2.moments",
"cv2.imshow",
"numpy.ones",
"cv2.VideoCapture",
"numpy.array",
"cv2.erode",
"cv2.destroyAllWindows",
"cv2.inRange",
"collections.dequ... | [((83, 102), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (99, 102), False, 'import cv2\n'), ((110, 127), 'collections.deque', 'deque', ([], {'maxlen': '(128)'}), '(maxlen=128)\n', (115, 127), False, 'from collections import deque\n'), ((141, 162), 'numpy.array', 'np.array', (['[0, 80, 50]'], {}), '([0, 80, 50])\n', (149, 162), True, 'import numpy as np\n'), ((175, 198), 'numpy.array', 'np.array', (['[8, 255, 220]'], {}), '([8, 255, 220])\n', (183, 198), True, 'import numpy as np\n'), ((214, 237), 'numpy.array', 'np.array', (['[50, 120, 50]'], {}), '([50, 120, 50])\n', (222, 237), True, 'import numpy as np\n'), ((252, 276), 'numpy.array', 'np.array', (['[77, 255, 255]'], {}), '([77, 255, 255])\n', (260, 276), True, 'import numpy as np\n'), ((3092, 3115), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3113, 3115), False, 'import cv2\n'), ((349, 385), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (361, 385), False, 'import cv2\n'), ((419, 444), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (426, 444), True, 'import numpy as np\n'), ((512, 554), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_green', 'upper_green'], {}), '(hsv, lower_green, upper_green)\n', (523, 554), False, 'import cv2\n'), ((687, 724), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(2)'}), '(mask, kernel, iterations=2)\n', (696, 724), False, 'import cv2\n'), ((862, 922), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(mask, cv2.MORPH_OPEN, kernel, iterations=1)\n', (878, 922), False, 'import cv2\n'), ((967, 1005), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {'iterations': '(5)'}), '(mask, kernel, iterations=5)\n', (977, 1005), False, 'import cv2\n'), ((1121, 1157), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1136, 1157), False, 'import cv2\n'), ((2888, 2910), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (2898, 2910), False, 'import cv2\n'), ((2915, 2939), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (2925, 2939), False, 'import cv2\n'), ((2944, 2966), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res'], {}), "('res', res)\n", (2954, 2966), False, 'import cv2\n'), ((2068, 2093), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (2090, 2093), False, 'import cv2\n'), ((2117, 2131), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2128, 2131), False, 'import cv2\n'), ((2823, 2876), 'cv2.line', 'cv2.line', (['img', 'pts[i - 1]', 'pts[i]', '(0, 0, 225)', 'thick'], {}), '(img, pts[i - 1], pts[i], (0, 0, 225), thick)\n', (2831, 2876), False, 'import cv2\n'), ((2976, 2991), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (2987, 2991), False, 'import cv2\n'), ((2404, 2447), 'cv2.circle', 'cv2.circle', (['img', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, center, 5, (0, 0, 255), -1)\n', (2414, 2447), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import numpy as np
import pandas as pd
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df3 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
s = pd.Series(np.random.rand(10), name='x')
# %% concat
df = pd.concat([df1, df2, df3], ignore_index=True)
# %%
df = pd.concat([df1, df2, df3])
df.reset_index()
# %%
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('efgh'))
df = pd.concat([df1, df2])
df = pd.concat([df1, df2], axis=1)
# %%
df1 = df1[::2]
df = pd.concat([df1, df2], axis=1, join='outer')
df = pd.concat([df1, df2], axis=1, join='inner')
# %%
# append Series to DataFrame
pd.concat([df1, s])
pd.concat([df1, s], axis=1)
# %%
df1.columns
df2.columns = ['e', 'f', 'g', 'h']
df = pd.concat([df1, df2], axis=1)
| [
"numpy.random.rand",
"pandas.concat"
] | [((369, 414), 'pandas.concat', 'pd.concat', (['[df1, df2, df3]'], {'ignore_index': '(True)'}), '([df1, df2, df3], ignore_index=True)\n', (378, 414), True, 'import pandas as pd\n'), ((426, 452), 'pandas.concat', 'pd.concat', (['[df1, df2, df3]'], {}), '([df1, df2, df3])\n', (435, 452), True, 'import pandas as pd\n'), ((610, 631), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {}), '([df1, df2])\n', (619, 631), True, 'import pandas as pd\n'), ((637, 666), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (646, 666), True, 'import pandas as pd\n'), ((694, 737), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""outer"""'}), "([df1, df2], axis=1, join='outer')\n", (703, 737), True, 'import pandas as pd\n'), ((744, 787), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)', 'join': '"""inner"""'}), "([df1, df2], axis=1, join='inner')\n", (753, 787), True, 'import pandas as pd\n'), ((823, 842), 'pandas.concat', 'pd.concat', (['[df1, s]'], {}), '([df1, s])\n', (832, 842), True, 'import pandas as pd\n'), ((843, 870), 'pandas.concat', 'pd.concat', (['[df1, s]'], {'axis': '(1)'}), '([df1, s], axis=1)\n', (852, 870), True, 'import pandas as pd\n'), ((929, 958), 'pandas.concat', 'pd.concat', (['[df1, df2]'], {'axis': '(1)'}), '([df1, df2], axis=1)\n', (938, 958), True, 'import pandas as pd\n'), ((134, 155), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (148, 155), True, 'import numpy as np\n'), ((198, 219), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (212, 219), True, 'import numpy as np\n'), ((262, 283), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (276, 283), True, 'import numpy as np\n'), ((321, 339), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (335, 339), True, 'import numpy as np\n'), ((495, 516), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (509, 516), True, 'import numpy as np\n'), ((559, 580), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (573, 580), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
from numba import jit
DEBUG = False
def remove_object(image, mask, patch_size=21, alpha=0.8):
# time
t = 0
# mask post-processing
mask = cv.medianBlur(mask, 5)
mask_indx = np.where(mask > 245)
mask_boolean = np.ones_like(mask)
mask_boolean[mask_indx] = 0
if DEBUG:
cv.imshow(f'mask t={t}', mask)
cv.waitKey()
frame_shape = mask.shape
# source (phi)
source = np.copy(image)
source[mask_indx] = 255.0 # to speed up best-matching
# target (omega)
target = np.copy(source)
target[mask_indx] = 0.0
if DEBUG:
cv.imshow(f'target t={t}', target)
cv.waitKey()
# Confidence
C = np.ones_like(mask, dtype=np.float)
C[mask_indx] = 0.0
# Data
# TODO improve data algorithm D(p)
D = _find_data_property(image, mask_indx, t)
# Main Loop
print('Started main loop.')
not_filled = True
while not_filled:
# find front contour
front = _find_border(mask, t)
# TODO check that always holds
if np.max(front) == 0:
break
# find priority
front_c = cv.blur(C, (patch_size, patch_size))
front_d = cv.blur(D, (patch_size, patch_size))
# front_priority = front * front_c * front_d
front_priority = front * (alpha * front_c + (1 - alpha) * front_d)
# find max priority index
prio_indx = np.unravel_index(np.argmax(front_priority, axis=None), front_priority.shape)
assert front_priority[prio_indx] > 0
# get patch
top, bottom, left, right = _get_patch_coords(prio_indx, patch_size, frame_shape)
patch = target[top:bottom + 1, left:right + 1, :]
if DEBUG:
h, w, _ = patch.shape
pt = (left, top)
temp = np.copy(target)
cv.imshow(f'patch t={t}', cv.rectangle(temp, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2))
cv.waitKey()
# find best template
patch_c = C[top:bottom + 1, left:right + 1]
best_match = _find_best_match(source, patch, patch_c, mask_boolean, t)
# increment time
t += 1
# update target (Raw)
target[top:bottom + 1, left:right + 1, :] = best_match
# TODO update target (Seamless)
# hp, wp, _ = best_match.shape
# center = (left + wp // 2, top + hp // 2)
# bm_mask = np.full_like(best_match, fill_value=255, dtype=np.float)
# target = cv.seamlessClone(best_match, target, bm_mask, center, cv.NORMAL_CLONE)
if DEBUG:
cv.imshow(f'target t={t}', target)
cv.waitKey()
# update mask
mask[top:bottom + 1, left:right + 1] = 0
if DEBUG:
cv.imshow(f'mask t={t}', mask)
cv.waitKey()
# break if filled
if np.sum(mask) == 0:
not_filled = False
# update confidence
prev_c = C[top:bottom + 1, left:right + 1]
not_sure_ind = np.where(prev_c < 1)
new_c = front_c[top:bottom + 1, left:right + 1]
prev_c[not_sure_ind] = new_c[not_sure_ind]
C[top:bottom + 1, left:right + 1] = prev_c
# logging
if t % 20 == 0:
_log_mask_percentage(mask)
return target
def _find_border(mask, t):
edges = cv.Canny(mask, 100, 200)
if DEBUG:
cv.imshow(f'contour t={t}', edges)
cv.waitKey()
return edges / 255.
def _find_data_property(image, mask_indx, t):
image_bw = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
edges = cv.Canny(image_bw, 20, 100)
edges[mask_indx] = 0
if DEBUG:
cv.imshow(f'data t={t}', edges)
cv.waitKey()
return edges / 255.
def _get_patch_coords(prio_indx, patch_size, frame_shape):
i_center, j_center = prio_indx
h, w = frame_shape
half_patch = (patch_size - 1) // 2
top = np.maximum(i_center - half_patch, 0)
bottom = np.minimum(i_center + half_patch, h - 1)
left = np.maximum(j_center - half_patch, 0)
right = np.minimum(j_center + half_patch, w - 1)
return top, bottom, left, right
def _find_best_match(source, patch, confidence, mask_boolean, t):
h, w, _ = source.shape
hp, wp, _ = patch.shape
confidence3d = np.repeat(confidence[:, :, np.newaxis], 3, axis=2)
source_float = source.astype(np.float)
# Main loop
best_left, best_top = _find_best_match_loop(confidence3d, h, hp, mask_boolean, patch, source_float, w, wp)
best_bottom, best_right = best_top + hp, best_left + wp
if DEBUG:
pt = (best_left, best_top)
temp = np.copy(source)
cv.imshow(f'best_match t={t}', cv.rectangle(temp, pt, (best_right, best_bottom), (0, 0, 255), 2))
cv.waitKey()
return source[best_top:best_bottom, best_left:best_right, :]
@jit(nopython=True)
def _find_best_match_loop(confidence3d, h, hp, mask_boolean, patch, source, w, wp):
min_distance = np.inf
best_top = None
best_left = None
for top in range(h - hp + 1):
for left in range(w - wp + 1):
bottom, right = top + hp, left + wp
if np.prod(mask_boolean[top:bottom, left:right]) == 0.: # touches target region
continue
s = source[top:bottom, left:right]
d = np.sum(np.power(s - patch, 2) * confidence3d)
if d < min_distance:
min_distance = d
best_top = top
best_left = left
return best_left, best_top
def _log_mask_percentage(mask):
mask_indx = np.where(mask > 245)
mask_boolean = np.ones_like(mask)
mask_boolean[mask_indx] = 0
n = np.count_nonzero(mask_boolean)
perc = n / mask_boolean.size * 100
print(f'Covered {perc:.2f}% of the image.')
| [
"numpy.maximum",
"numpy.sum",
"cv2.medianBlur",
"numpy.argmax",
"cv2.rectangle",
"cv2.imshow",
"numpy.prod",
"numpy.copy",
"cv2.cvtColor",
"numpy.power",
"numpy.max",
"numpy.repeat",
"cv2.Canny",
"numpy.minimum",
"numpy.ones_like",
"cv2.waitKey",
"numpy.count_nonzero",
"cv2.blur",
... | [((5011, 5029), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5014, 5029), False, 'from numba import jit\n'), ((205, 227), 'cv2.medianBlur', 'cv.medianBlur', (['mask', '(5)'], {}), '(mask, 5)\n', (218, 227), True, 'import cv2 as cv\n'), ((245, 265), 'numpy.where', 'np.where', (['(mask > 245)'], {}), '(mask > 245)\n', (253, 265), True, 'import numpy as np\n'), ((286, 304), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (298, 304), True, 'import numpy as np\n'), ((485, 499), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (492, 499), True, 'import numpy as np\n'), ((598, 613), 'numpy.copy', 'np.copy', (['source'], {}), '(source)\n', (605, 613), True, 'import numpy as np\n'), ((755, 789), 'numpy.ones_like', 'np.ones_like', (['mask'], {'dtype': 'np.float'}), '(mask, dtype=np.float)\n', (767, 789), True, 'import numpy as np\n'), ((3453, 3477), 'cv2.Canny', 'cv.Canny', (['mask', '(100)', '(200)'], {}), '(mask, 100, 200)\n', (3461, 3477), True, 'import cv2 as cv\n'), ((3655, 3692), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (3666, 3692), True, 'import cv2 as cv\n'), ((3708, 3735), 'cv2.Canny', 'cv.Canny', (['image_bw', '(20)', '(100)'], {}), '(image_bw, 20, 100)\n', (3716, 3735), True, 'import cv2 as cv\n'), ((4046, 4082), 'numpy.maximum', 'np.maximum', (['(i_center - half_patch)', '(0)'], {}), '(i_center - half_patch, 0)\n', (4056, 4082), True, 'import numpy as np\n'), ((4097, 4137), 'numpy.minimum', 'np.minimum', (['(i_center + half_patch)', '(h - 1)'], {}), '(i_center + half_patch, h - 1)\n', (4107, 4137), True, 'import numpy as np\n'), ((4152, 4188), 'numpy.maximum', 'np.maximum', (['(j_center - half_patch)', '(0)'], {}), '(j_center - half_patch, 0)\n', (4162, 4188), True, 'import numpy as np\n'), ((4202, 4242), 'numpy.minimum', 'np.minimum', (['(j_center + half_patch)', '(w - 1)'], {}), '(j_center + half_patch, w - 1)\n', (4212, 4242), True, 'import numpy as np\n'), ((4432, 4482), 'numpy.repeat', 'np.repeat', (['confidence[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(confidence[:, :, np.newaxis], 3, axis=2)\n', (4441, 4482), True, 'import numpy as np\n'), ((5768, 5788), 'numpy.where', 'np.where', (['(mask > 245)'], {}), '(mask > 245)\n', (5776, 5788), True, 'import numpy as np\n'), ((5809, 5827), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (5821, 5827), True, 'import numpy as np\n'), ((5872, 5902), 'numpy.count_nonzero', 'np.count_nonzero', (['mask_boolean'], {}), '(mask_boolean)\n', (5888, 5902), True, 'import numpy as np\n'), ((364, 394), 'cv2.imshow', 'cv.imshow', (['f"""mask t={t}"""', 'mask'], {}), "(f'mask t={t}', mask)\n", (373, 394), True, 'import cv2 as cv\n'), ((404, 416), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (414, 416), True, 'import cv2 as cv\n'), ((669, 703), 'cv2.imshow', 'cv.imshow', (['f"""target t={t}"""', 'target'], {}), "(f'target t={t}', target)\n", (678, 703), True, 'import cv2 as cv\n'), ((713, 725), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (723, 725), True, 'import cv2 as cv\n'), ((1224, 1260), 'cv2.blur', 'cv.blur', (['C', '(patch_size, patch_size)'], {}), '(C, (patch_size, patch_size))\n', (1231, 1260), True, 'import cv2 as cv\n'), ((1280, 1316), 'cv2.blur', 'cv.blur', (['D', '(patch_size, patch_size)'], {}), '(D, (patch_size, patch_size))\n', (1287, 1316), True, 'import cv2 as cv\n'), ((3119, 3139), 'numpy.where', 'np.where', (['(prev_c < 1)'], {}), '(prev_c < 1)\n', (3127, 3139), True, 'import numpy as np\n'), ((3504, 3538), 'cv2.imshow', 'cv.imshow', (['f"""contour t={t}"""', 'edges'], {}), "(f'contour t={t}', edges)\n", (3513, 3538), True, 'import cv2 as cv\n'), ((3548, 3560), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (3558, 3560), True, 'import cv2 as cv\n'), ((3788, 3819), 'cv2.imshow', 'cv.imshow', (['f"""data t={t}"""', 'edges'], {}), "(f'data t={t}', edges)\n", (3797, 3819), True, 'import cv2 as cv\n'), ((3829, 3841), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (3839, 3841), True, 'import cv2 as cv\n'), ((4792, 4807), 'numpy.copy', 'np.copy', (['source'], {}), '(source)\n', (4799, 4807), True, 'import numpy as np\n'), ((4924, 4936), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (4934, 4936), True, 'import cv2 as cv\n'), ((1139, 1152), 'numpy.max', 'np.max', (['front'], {}), '(front)\n', (1145, 1152), True, 'import numpy as np\n'), ((1522, 1558), 'numpy.argmax', 'np.argmax', (['front_priority'], {'axis': 'None'}), '(front_priority, axis=None)\n', (1531, 1558), True, 'import numpy as np\n'), ((1906, 1921), 'numpy.copy', 'np.copy', (['target'], {}), '(target)\n', (1913, 1921), True, 'import numpy as np\n'), ((2038, 2050), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (2048, 2050), True, 'import cv2 as cv\n'), ((2695, 2729), 'cv2.imshow', 'cv.imshow', (['f"""target t={t}"""', 'target'], {}), "(f'target t={t}', target)\n", (2704, 2729), True, 'import cv2 as cv\n'), ((2743, 2755), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (2753, 2755), True, 'import cv2 as cv\n'), ((2863, 2893), 'cv2.imshow', 'cv.imshow', (['f"""mask t={t}"""', 'mask'], {}), "(f'mask t={t}', mask)\n", (2872, 2893), True, 'import cv2 as cv\n'), ((2907, 2919), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (2917, 2919), True, 'import cv2 as cv\n'), ((2961, 2973), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2967, 2973), True, 'import numpy as np\n'), ((4848, 4913), 'cv2.rectangle', 'cv.rectangle', (['temp', 'pt', '(best_right, best_bottom)', '(0, 0, 255)', '(2)'], {}), '(temp, pt, (best_right, best_bottom), (0, 0, 255), 2)\n', (4860, 4913), True, 'import cv2 as cv\n'), ((1961, 2023), 'cv2.rectangle', 'cv.rectangle', (['temp', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(2)'], {}), '(temp, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n', (1973, 2023), True, 'import cv2 as cv\n'), ((5325, 5370), 'numpy.prod', 'np.prod', (['mask_boolean[top:bottom, left:right]'], {}), '(mask_boolean[top:bottom, left:right])\n', (5332, 5370), True, 'import numpy as np\n'), ((5505, 5527), 'numpy.power', 'np.power', (['(s - patch)', '(2)'], {}), '(s - patch, 2)\n', (5513, 5527), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.