code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import gym_mupen64plus
# %%
import gym, torch, cv2, time
import numpy as np
import torch.nn as nn
from torch.distributions import Categorical
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from collections import deque
import pickle
from os import path
import itertools
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
version = input("Version: ")
# %%
# Game environment wrapper
_ACTION_DIM = 15
def process_obs(image):
image = cv2.resize(image, (84, 84))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
def map_action_space(action_i):
action = [0] * 8
if action_i == 0:
pass
elif action_i == 1:
action[0] = 127
elif action_i == 2:
action[0] = -128
elif action_i == 3:
action[1] = 127
elif action_i == 4:
action[1] = -128
elif action_i == 5:
action[0] = 127
action[1] = 127
elif action_i == 6:
action[0] = 127
action[1] = -128
elif action_i == 7:
action[0] = -128
action[1] = 127
elif action_i == 8:
action[0] = -128
action[1] = -128
else:
action[action_i - 7] = 1
return action
class FrameStack:
def __init__(self, stack_size):
self.frames = deque(maxlen=stack_size)
def reset(self, frame):
self.frames.extend([frame] * 4)
stack = np.stack(self.frames, axis=0)
return stack
def __call__(self, frame):
if len(self.frames) == 0:
self.frames.extend([frame] * 4)
else:
self.frames.append(frame)
stack = np.stack(self.frames, axis=0)
return stack
class SmashEnv:
def __init__(self, args):
self.env = gym.make(args.env_id)
self.args = args
def step(self, action):
if "Smash" in args.env_id:
action = map_action_space(action)
obs, reward, done, info = self.env.step(action)
state = self.args.framestack(process_obs(obs))
return state, reward, done, info
def reset(self):
obs = self.env.reset()
state = self.args.framestack.reset(process_obs(obs))
return state
def close(self): self.env.close()
# %%
# Recording functions
def save_video(args):
deep_frames = []
n = len(args.memory.buffer)
for experience in itertools.islice(args.memory.buffer,n-args.max_steps,n):
f = experience[0]
deep_frames += [f[-1]]
plt.figure(figsize=(deep_frames[0].shape[1] / 72.0, deep_frames[0].shape[0] / 72.0), dpi = 72)
patch = plt.imshow(deep_frames[0])
plt.axis('off')
animate = lambda i: patch.set_data(deep_frames[i])
ani = animation.FuncAnimation(plt.gcf(), animate, frames=len
(deep_frames), interval = 50)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani.save('recorded_data/training%s_%i.mp4' % (version, args.episode), writer=writer)
def log(args, update=False, episode = False):
if update:
args.losses.append(args.loss)
if args.iteration % args.print_period == 0:
print(args.loss)
if episode:
if args.episode % args.save_period == 0:
save_video(args)
print("%i Accumulated Reward: %f" % (args.episode, sum(args.rewards[-args.episode_length:])))
# %%
# Experience replay buffer
class Memory():
def __init__(self, max_size):
self.buffer = deque(maxlen = max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
buffer_size = len(self.buffer)
index = np.random.choice(np.arange(buffer_size),
size = batch_size,
replace = True)
return [self.buffer[i] for i in index]
# fill memory with random transitions
def fill_memory(args):
env = args.env
for episode_idx in range(20):
state = env.reset()
for step_idx in range(args.max_steps):
last_state = state
action = np.random.rand(args.action_dim)
state, reward, done, _ = env.step(action)
state = state
args.memory.add((last_state, action, reward, state, done))
# %%
# Training functions
# linearly decays epsilon
def epsilon_scheduler(args):
epsilon = max(0, args.epsilon - args.iteration * args.epsilon_decay)
return epsilon
def get_loss(args):
batch = args.memory.sample(args.batch_size)
states, actions, rewards, next_states, dones = list(zip(*batch))
states_t = torch.Tensor(states).to(device)
next_states_t = torch.Tensor(next_states).to(device)
rewards_t = torch.Tensor(rewards).to(device)
dones_t = torch.Tensor(dones).bool().to(device)
Qs = args.model(states_t)
next_Qs = args.target_model(next_states_t).detach()
preds_t = Qs[np.arange(args.batch_size), actions]
targets_t = args.gamma * rewards_t + next_Qs.max(1)[0] * ~dones_t
loss = args.loss_func(preds_t, targets_t)
args.loss = loss.item(); args.targets = targets_t; args.preds = preds_t
return loss
def update(args):
args.model.train()
args.optimizer.zero_grad()
loss = get_loss(args)
loss.backward()
if args.iteration % args.prop_steps == 0:
args.target_model.load_state_dict(args.model.state_dict())
log(args, update=True)
args.iteration += 1
# %%
# Get action
def get_action(args, state):
epsilon = epsilon_scheduler(args)
args.model.eval()
if np.random.rand() < epsilon:
action = np.random.randint(args.action_dim)
else:
state_t = torch.Tensor(state).unsqueeze(0).to(device)
Q = args.model(state_t)
action = torch.max(Q, 1)[1].item()
return action
# %%
# Initialize environment
def get_model(input_dim, output_dim):
return nn.Sequential(
# 84, 84
nn.Conv2d(input_dim, 32, 7, 2, 3), # 42, 42
nn.ReLU(),
nn.Conv2d(32, 64, 3, 2, 1), # 21, 21
nn.ReLU(),
nn.Conv2d(64, 128, 3, 2, 1), # 11, 11
nn.ReLU(),
nn.Flatten(), # 128 * 11 * 11
nn.Linear(128 * 11 * 11, 1000),
nn.ReLU(),
nn.Linear(1000, output_dim)
)
def save(args):
torch.save(args.model.state_dict(), './recorded_data/DQN%s.pth' % version)
#args.model = None
#with open("./recorded_data/args.pyobj", "w") as f:
# pickle.dump(args, f)
def load(args):
if path.exists("./recorded_data/DQN%s.pth" % version):
args.model.load_state_dict(torch.load("./recorded_data/DQN%s.pth" % version))
#with open("buffer.pyobj", "r") as f:
# buffer = pickle.load(f)
def init_env(args):
args.env = SmashEnv(args)
args.memory = Memory(args.memory_size)
args.model = get_model(args.obs_dim, args.action_dim).to(device)
args.target_model = get_model(args.obs_dim, args.action_dim).to(device)
args.target_model.load_state_dict(args.model.state_dict())
args.optimizer = torch.optim.Adam(args.model.parameters(), args.lr)
load(args)
fill_memory(args)
# %%
# Default args
class Args:
def __init__(self):
self.env_id = "Smash-dk-v0"
self.nb_stacks = 4
self.obs_dim = self.nb_stacks
self.action_dim = _ACTION_DIM
self.loss_func = nn.MSELoss()
self.framestack = FrameStack(self.nb_stacks)
self.epsilon = 0.2
self.epsilon_decay = 0.000002
self.gamma = 0.99
self.lr = 0.003
self.batch_size = 128
self.memory_size = 100000
self.nb_episodes = 100
self.max_steps = 2000
self.prop_steps = 50
self.update_steps = 30
self.save_period = 10
self.print_period = 10
self.iteration = 0
self.episode = 0
self.episode_length = 0
self.losses = []
self.actions = []
self.rewards = []
self.targets = None
self.preds = None
# %%
# Start
args = Args()
init_env(args)
# %%
# Training loop
def train ():
env = args.env
for episode_idx in range(args.nb_episodes):
state = env.reset()
for step_idx in range(args.max_steps):
last_state = state
action = get_action(args, last_state)
state, reward, done, _ = env.step(action)
state = state
args.memory.add((last_state, action, reward, state, done))
update(args)
args.rewards.append(reward); args.actions.append(action)
args.episode_length = step_idx; args.episode += 1
log(args, episode = True)
if episode_idx % args.save_period == 0:
save(args)
save(args) | [
"torch.nn.ReLU",
"numpy.random.rand",
"torch.max",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"gym.make",
"numpy.arange",
"matplotlib.pyplot.imshow",
"os.path.exists",
"collections.deque",
"torch.nn.Flatten",
"numpy.stack",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.gcf",
"torch.Te... | [((524, 551), 'cv2.resize', 'cv2.resize', (['image', '(84, 84)'], {}), '(image, (84, 84))\n', (534, 551), False, 'import gym, torch, cv2, time\n'), ((563, 602), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (575, 602), False, 'import gym, torch, cv2, time\n'), ((2415, 2474), 'itertools.islice', 'itertools.islice', (['args.memory.buffer', '(n - args.max_steps)', 'n'], {}), '(args.memory.buffer, n - args.max_steps, n)\n', (2431, 2474), False, 'import itertools\n'), ((2523, 2619), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(deep_frames[0].shape[1] / 72.0, deep_frames[0].shape[0] / 72.0)', 'dpi': '(72)'}), '(figsize=(deep_frames[0].shape[1] / 72.0, deep_frames[0].shape[0] /\n 72.0), dpi=72)\n', (2533, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2696), 'matplotlib.pyplot.imshow', 'plt.imshow', (['deep_frames[0]'], {}), '(deep_frames[0])\n', (2680, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2714), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2707, 2714), True, 'import matplotlib.pyplot as plt\n'), ((6561, 6611), 'os.path.exists', 'path.exists', (["('./recorded_data/DQN%s.pth' % version)"], {}), "('./recorded_data/DQN%s.pth' % version)\n", (6572, 6611), False, 'from os import path\n'), ((370, 395), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (393, 395), False, 'import gym, torch, cv2, time\n'), ((1333, 1357), 'collections.deque', 'deque', ([], {'maxlen': 'stack_size'}), '(maxlen=stack_size)\n', (1338, 1357), False, 'from collections import deque\n'), ((1443, 1472), 'numpy.stack', 'np.stack', (['self.frames'], {'axis': '(0)'}), '(self.frames, axis=0)\n', (1451, 1472), True, 'import numpy as np\n'), ((1672, 1701), 'numpy.stack', 'np.stack', (['self.frames'], {'axis': '(0)'}), '(self.frames, axis=0)\n', (1680, 1701), True, 'import numpy as np\n'), ((1789, 1810), 'gym.make', 'gym.make', (['args.env_id'], {}), '(args.env_id)\n', (1797, 1810), False, 'import gym, torch, cv2, time\n'), ((2800, 2809), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2807, 2809), True, 'import matplotlib.pyplot as plt\n'), ((3540, 3562), 'collections.deque', 'deque', ([], {'maxlen': 'max_size'}), '(maxlen=max_size)\n', (3545, 3562), False, 'from collections import deque\n'), ((5636, 5652), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5650, 5652), True, 'import numpy as np\n'), ((5681, 5715), 'numpy.random.randint', 'np.random.randint', (['args.action_dim'], {}), '(args.action_dim)\n', (5698, 5715), True, 'import numpy as np\n'), ((6001, 6034), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_dim', '(32)', '(7)', '(2)', '(3)'], {}), '(input_dim, 32, 7, 2, 3)\n', (6010, 6034), True, 'import torch.nn as nn\n'), ((6053, 6062), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6060, 6062), True, 'import torch.nn as nn\n'), ((6072, 6098), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)', '(2)', '(1)'], {}), '(32, 64, 3, 2, 1)\n', (6081, 6098), True, 'import torch.nn as nn\n'), ((6117, 6126), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6124, 6126), True, 'import torch.nn as nn\n'), ((6136, 6163), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(2)', '(1)'], {}), '(64, 128, 3, 2, 1)\n', (6145, 6163), True, 'import torch.nn as nn\n'), ((6182, 6191), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6189, 6191), True, 'import torch.nn as nn\n'), ((6201, 6213), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (6211, 6213), True, 'import torch.nn as nn\n'), ((6239, 6269), 'torch.nn.Linear', 'nn.Linear', (['(128 * 11 * 11)', '(1000)'], {}), '(128 * 11 * 11, 1000)\n', (6248, 6269), True, 'import torch.nn as nn\n'), ((6279, 6288), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6286, 6288), True, 'import torch.nn as nn\n'), ((6298, 6325), 'torch.nn.Linear', 'nn.Linear', (['(1000)', 'output_dim'], {}), '(1000, output_dim)\n', (6307, 6325), True, 'import torch.nn as nn\n'), ((7408, 7420), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (7418, 7420), True, 'import torch.nn as nn\n'), ((3751, 3773), 'numpy.arange', 'np.arange', (['buffer_size'], {}), '(buffer_size)\n', (3760, 3773), True, 'import numpy as np\n'), ((4172, 4203), 'numpy.random.rand', 'np.random.rand', (['args.action_dim'], {}), '(args.action_dim)\n', (4186, 4203), True, 'import numpy as np\n'), ((4678, 4698), 'torch.Tensor', 'torch.Tensor', (['states'], {}), '(states)\n', (4690, 4698), False, 'import gym, torch, cv2, time\n'), ((4730, 4755), 'torch.Tensor', 'torch.Tensor', (['next_states'], {}), '(next_states)\n', (4742, 4755), False, 'import gym, torch, cv2, time\n'), ((4783, 4804), 'torch.Tensor', 'torch.Tensor', (['rewards'], {}), '(rewards)\n', (4795, 4804), False, 'import gym, torch, cv2, time\n'), ((4973, 4999), 'numpy.arange', 'np.arange', (['args.batch_size'], {}), '(args.batch_size)\n', (4982, 4999), True, 'import numpy as np\n'), ((6646, 6695), 'torch.load', 'torch.load', (["('./recorded_data/DQN%s.pth' % version)"], {}), "('./recorded_data/DQN%s.pth' % version)\n", (6656, 6695), False, 'import gym, torch, cv2, time\n'), ((4830, 4849), 'torch.Tensor', 'torch.Tensor', (['dones'], {}), '(dones)\n', (4842, 4849), False, 'import gym, torch, cv2, time\n'), ((5837, 5852), 'torch.max', 'torch.max', (['Q', '(1)'], {}), '(Q, 1)\n', (5846, 5852), False, 'import gym, torch, cv2, time\n'), ((5744, 5763), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (5756, 5763), False, 'import gym, torch, cv2, time\n')] |
from manimlib.imports import *
import numpy as np
from math import erf
class PlotGraphs(GraphScene):
CONFIG = {
"x_min" : -6,
"x_max" : 6,
"y_min" : -2.5,
"y_max" : 2.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0
}
def construct(self):
erfInt = lambda x: erf(x)+0.5*PI**0.5+0.11
self.setup_axes(animate=True)
derivative = lambda x: -2*x*np.exp(-x**2)
exponential = lambda x: np.exp(-x**2)
derivativeObj = self.get_graph(derivative,self.function_color,x_min=-6,x_max=6)
erfObj = self.get_graph(erfInt,YELLOW,x_min=-6,x_max=6)
expObj = self.get_graph(exponential,WHITE,x_min=-6,x_max=6)
labelExp = self.get_graph_label(expObj, label = "B", direction= UP*0.55+LEFT*0.15)
labelDer = self.get_graph_label(derivativeObj, label = "A", direction= DOWN*0.55+LEFT*0.15)
labelErf = self.get_graph_label(erfObj, label = "C")
expComp = Mobject().add(expObj,labelExp)
erfComp = Mobject().add(erfObj,labelErf)
derComp = Mobject().add(derivativeObj,labelDer)
self.wait(2)
self.play(ShowCreation(derComp),ShowCreation(expComp),ShowCreation(erfComp),
run_time=2
)
self.wait(12)
opacity=0.3
derivativeFaded = self.get_graph(derivative,self.function_color,stroke_opacity = opacity)
erfFaded = self.get_graph(erfInt,YELLOW,stroke_opacity = opacity)
expFaded = self.get_graph(exponential,WHITE,stroke_opacity = opacity)
self.add(derivativeFaded, erfFaded, expFaded) #add faded underneath
def getDot(x, y):
return Dot(self.coords_to_point(x,y),color=ORANGE)
def getPath(func,minX,maxX):
return self.get_graph(func,x_min=minX,x_max=maxX)
self.play(
FadeOut(expComp),
FadeOut(derComp)
)
self.wait()
maximumExp = Dot(self.coords_to_point(0,1),color=ORANGE)
self.play(
FadeIn(expComp),
FadeIn(maximumExp),
FadeOut(erfComp)
)
self.wait()
derMaxVal = 0.5**0.5
maxDer = Dot(self.coords_to_point(-derMaxVal,derivative(-derMaxVal)),color=ORANGE)
minDer = Dot(self.coords_to_point(derMaxVal,derivative(derMaxVal)),color=ORANGE)
self.play(
FadeOut(maximumExp),
FadeIn(derComp),
FadeOut(expComp),
FadeIn(maxDer),
FadeIn(minDer)
)
self.wait(2)
origin = getDot(0,0)
self.play(
FadeOut(maxDer),
FadeOut(minDer),
FadeIn(origin)
)
self.wait(3)
self.play(
FadeOut(origin),
FadeOut(derComp),
FadeIn(expComp),
FadeIn(maximumExp)
)
self.wait(6)
self.play(
FadeOut(maximumExp),
FadeIn(derComp)
)
travDotDer = getDot(0,0)
travDotExp = getDot(0,0)
smallValuesDer = getPath(derivative,-6,-1.5)
smallValuesExp = getPath(exponential,-6,-1.5)
self.play(
MoveAlongPath(travDotDer,smallValuesDer),
MoveAlongPath(travDotExp,smallValuesExp),
run_time=2
)
self.wait()
midValuesDer = getPath(derivative,-1.5,1.5)
midValuesExp = getPath(exponential,-1.5,1.5)
self.play(
MoveAlongPath(travDotDer,midValuesDer),
MoveAlongPath(travDotExp,midValuesExp),
run_time=6
)
self.play(
FadeOut(travDotDer),
FadeOut(travDotExp)
)
self.wait(2)
self.play(
FadeOut(derComp),
FadeIn(erfComp)
)
self.wait(4)
travDotErf = getDot(0,0)
midValuesErf = getPath(erfInt,-1.5,1.5)
self.play(
MoveAlongPath(travDotErf,midValuesErf),
MoveAlongPath(travDotExp,midValuesExp),
run_time=2
)
self.wait(9)
BIGValuesErf = getPath(erfInt,1.5,6)
BIGValuesExp = getPath(exponential,1.5,6)
self.play(
MoveAlongPath(travDotErf,BIGValuesErf),
MoveAlongPath(travDotExp,BIGValuesExp),
run_time=8
)
self.wait(15)
class PlotDer(GraphScene):
CONFIG = {
"x_min" : -6,
"x_max" : 6,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"x_labeled_nums" : range(-5,5,1),
"center_point" : 0
}
def construct(self):
self.setup_axes(animate=False)
func = lambda x: -2*x*np.exp(-x**2)
graphObj = self.get_graph(func, BLUE)
graph_lab = self.get_graph_label(graphObj, label = "A",color=WHITE)
self.play(ShowCreation(graphObj), ShowCreation(graph_lab))
self.wait(4)
class PlotExp(GraphScene):
CONFIG = {
"x_min" : -6,
"x_max" : 6,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"x_labeled_nums" : range(-5,5,1),
"center_point" : 0
}
def construct(self):
self.setup_axes(animate=False)
func = lambda x: np.exp(-x**2)
graphObj = self.get_graph(func, BLUE)
graph_lab = self.get_graph_label(graphObj, label = "B",color=WHITE)
self.play(ShowCreation(graphObj), ShowCreation(graph_lab))
self.wait(4)
class PlotInt(GraphScene):
CONFIG = {
"x_min" : -6,
"x_max" : 6,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"x_labeled_nums" : range(-5,5,1),
"center_point" : 0
}
def construct(self):
self.setup_axes(animate=False)
func = erf
graphObj = self.get_graph(func, BLUE)
graph_lab = self.get_graph_label(graphObj, label = "C",color=WHITE)
self.play(ShowCreation(graphObj), ShowCreation(graph_lab))
self.wait(4)
class PlotExercise(GraphScene):
CONFIG = {
"x_min" : -6,
"x_max" : 6,
"y_min" : -2.5,
"y_max" : 2.5,
"graph_origin" : ORIGIN ,
"function_color" : BLUE ,
"axes_color" : GREEN,
"center_point" : 0
}
def construct(self):
erfInt = lambda x: 1.53846* np.exp(0.2*x)* (1.5*np.sin(0.3*x) + np.cos(0.3*x))-2
self.setup_axes(animate=False)
derivative = lambda x: np.exp(0.2*x)*(0.2*np.cos(0.3*x) - 0.3*np.sin(0.3*x))
exponential = lambda x: np.exp(0.2*x)*np.cos(0.3*x)
derivativeObj = self.get_graph(derivative,self.function_color,x_min=-6,x_max=6)
erfObj = self.get_graph(erfInt,YELLOW,x_min=-6,x_max=6)
expObj = self.get_graph(exponential,WHITE,x_min=-6,x_max=6)
labelExp = self.get_graph_label(expObj, label = "B")
labelDer = self.get_graph_label(derivativeObj, label = "A")
labelErf = self.get_graph_label(erfObj, label = "C")
expComp = Mobject().add(expObj,labelExp)
erfComp = Mobject().add(erfObj,labelErf)
derComp = Mobject().add(derivativeObj,labelDer)
self.play(ShowCreation(derComp),ShowCreation(expComp),ShowCreation(erfComp),
run_time=2
)
self.wait(12) | [
"numpy.exp",
"numpy.sin",
"numpy.cos",
"math.erf"
] | [((537, 552), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (543, 552), True, 'import numpy as np\n'), ((5436, 5451), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (5442, 5451), True, 'import numpy as np\n'), ((491, 506), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (497, 506), True, 'import numpy as np\n'), ((4814, 4829), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (4820, 4829), True, 'import numpy as np\n'), ((6719, 6734), 'numpy.exp', 'np.exp', (['(0.2 * x)'], {}), '(0.2 * x)\n', (6725, 6734), True, 'import numpy as np\n'), ((6805, 6820), 'numpy.exp', 'np.exp', (['(0.2 * x)'], {}), '(0.2 * x)\n', (6811, 6820), True, 'import numpy as np\n'), ((6819, 6834), 'numpy.cos', 'np.cos', (['(0.3 * x)'], {}), '(0.3 * x)\n', (6825, 6834), True, 'import numpy as np\n'), ((393, 399), 'math.erf', 'erf', (['x'], {}), '(x)\n', (396, 399), False, 'from math import erf\n'), ((6596, 6611), 'numpy.exp', 'np.exp', (['(0.2 * x)'], {}), '(0.2 * x)\n', (6602, 6611), True, 'import numpy as np\n'), ((6632, 6647), 'numpy.cos', 'np.cos', (['(0.3 * x)'], {}), '(0.3 * x)\n', (6638, 6647), True, 'import numpy as np\n'), ((6738, 6753), 'numpy.cos', 'np.cos', (['(0.3 * x)'], {}), '(0.3 * x)\n', (6744, 6753), True, 'import numpy as np\n'), ((6758, 6773), 'numpy.sin', 'np.sin', (['(0.3 * x)'], {}), '(0.3 * x)\n', (6764, 6773), True, 'import numpy as np\n'), ((6616, 6631), 'numpy.sin', 'np.sin', (['(0.3 * x)'], {}), '(0.3 * x)\n', (6622, 6631), True, 'import numpy as np\n')] |
"""
"""
import numpy as np
import nashpy as nash
# # question 1:
# player_1 = np.array([[1, 2, 0], [3, 1, 5]]) # the row player
# player_2 = np.array([[1, -2, 6], [1, 4, 0]]) # the column player
# game_1 = nash.Game(player_1, player_2)
# print(game_1)
#
# # find the Nash equilibrium with support enumeration
# equilibria = game_1.support_enumeration()
# for eq in equilibria:
# print(f"the equilibria for q1 are: {eq}")
# # question 2:
# player_1 = np.array([[1, 2, 0], [3, 1, 5]]) # the row player
# player_2 = np.array([[1, -2, 6], [1, 4, 0]]) # the column player
# game_2 = nash.Game(player_1, player_2)
# print(game_1)
#
# # find the Nash equilibrium with support enumeration, which returns a generator of all the equilibria
# equilibria = game_2.support_enumeration()
# for eq in equilibria:
# print(f"the equilibria for q2 are: {eq}")
# question 3:
# player_1 = np.array([[1, 2, 0], [3, 1, 5]]) # the row player
# player_2 = np.array([[1, -2, 6], [1, 4, 0]]) # the column player
# game_3 = nash.Game(player_1, player_2)
# print(game_3)
#
# equilibria = game_3.vertex_enumeration()
# for eq in equilibria:
# print(eq)
#
# # the is_best_response method returns a pair of booleans that indicate whether the specified strategy is best response
# sigma_r = np.array([0, 1]) # as a strategy for player 1 is not specified we should test for both. It indicates that strategy b is player 1's best response to the strategy for player 2 stated in the q
# sigma_c = np.array([0, 0, 1]) # the questions states a probability 0.5 for strategy c and 0.5 for strategy e
# print(game_3.is_best_response(sigma_r, sigma_c))
# question 4:
player_1 = np.array([[1, 2, 0], [3, 1, 5]]) # the row player
player_2 = np.array([[1, -2, 6], [1, 4, 0]]) # the column player
game_3 = nash.Game(player_1, player_2)
print(game_3)
equilibria = game_3.vertex_enumeration()
for eq in equilibria:
print(eq)
# question 5:
# question 6:
# question 7:
| [
"nashpy.Game",
"numpy.array"
] | [((1663, 1695), 'numpy.array', 'np.array', (['[[1, 2, 0], [3, 1, 5]]'], {}), '([[1, 2, 0], [3, 1, 5]])\n', (1671, 1695), True, 'import numpy as np\n'), ((1725, 1758), 'numpy.array', 'np.array', (['[[1, -2, 6], [1, 4, 0]]'], {}), '([[1, -2, 6], [1, 4, 0]])\n', (1733, 1758), True, 'import numpy as np\n'), ((1789, 1818), 'nashpy.Game', 'nash.Game', (['player_1', 'player_2'], {}), '(player_1, player_2)\n', (1798, 1818), True, 'import nashpy as nash\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure();
basel_exp = 3;
num_of_reps = 5;
for wexp in range(4):
#origin = [50,10]#[50,10]#[10,50]#[40,20]#[10,15]
#h_and_w = [15,70]#[15,70]#[60,20]#[20,50]#[20,42]
#exps = ["[10, 30],[20, 50]","[25, 10],[15, 70]","[42, 10],[20, 42]","[10, 10],[60, 20]"];
exps = ["[50, 10],[15, 70]","[10, 50],[60, 20]","[40, 20],[20, 50]","[10, 15],[20, 42]"];
data_1 = [];
data_2 = [];
#data_3 = [];
for i in range(1,num_of_reps+1):
data = pickle.load(open("data/plot_two_data/data_"+str(exps[wexp])+"up3_"+str(i)+".pkl", "rb" ))
data_1.append(np.array(data[0][:-1]));
t = np.array([t for t in range(1,len(data_1[-1])+1)]);
data_1[-1] = data_1[-1]/t
data_1[-1] = data_1[-1][None]
for i in range(basel_exp):
data_2.append(np.array(pickle.load(open("data/plot_three_data/experts_100avg_performance_exp"+str(i+4)+"_"+str(wexp+1)+"_.pkl", "rb" ))))
data_3 = np.array(pickle.load(open("data/plot_three_data/experts_100avg_performance_exp_b_"+str(wexp+1)+"_.pkl", "rb" )))
data_4 = np.array(pickle.load(open("data/plot_three_data/experts_100avg_performance_exp_b2_"+str(wexp+1)+"_.pkl", "rb" )))
data_5 = np.array(pickle.load(open("data/plot_three_data/experts_100avg_performance_exp_b3_"+str(wexp+1)+"_.pkl", "rb" )))
hund_step_rew = np.concatenate(data_1,axis=0)
data_2 = np.concatenate(data_2,axis=0)
data_3 = np.concatenate((data_3,data_4,data_5),axis=0)
ax = plt.subplot(2,2,wexp+1)
sns.tsplot(data=hund_step_rew, color='k');
sns.tsplot(data=data_2,condition=["Random O."], color='b')
sns.tsplot(data=data_3,condition=["No O. "], color='g')
#sns.tsplot(data=fixed_cum_rew, condition=["Fixed Exp. "+str(wexp+1)+" (100-S Avg. Rew)"], legend=True, color='b');
plt.xlim(0,1500); ax.set_xticks(ax.get_xticks()[::2]); plt.xlim(0,1500);
plt.ylim(0,0.08); ax.set_yticks(ax.get_yticks()[::2]); plt.ylim(0,0.08);
#plt.title("100-Step Reward");
#plt.xlabel("Bandit Timestep");
#plt.ylabel("100-Step Cum. Reward/100N");
fig.text(0.5, 0.04, 'Iteration Step', ha='center', va='center')
fig.text(0.03, 0.5, 'Iteration-Normalized', ha='center', va='center', rotation='vertical')
fig.text(0.06, 0.5, 'Cumulative Reward', ha='center', va='center', rotation='vertical')
plt.show(); | [
"seaborn.tsplot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((118, 130), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (128, 130), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2517), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2515, 2517), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1527), 'numpy.concatenate', 'np.concatenate', (['data_1'], {'axis': '(0)'}), '(data_1, axis=0)\n', (1511, 1527), True, 'import numpy as np\n'), ((1540, 1570), 'numpy.concatenate', 'np.concatenate', (['data_2'], {'axis': '(0)'}), '(data_2, axis=0)\n', (1554, 1570), True, 'import numpy as np\n'), ((1583, 1631), 'numpy.concatenate', 'np.concatenate', (['(data_3, data_4, data_5)'], {'axis': '(0)'}), '((data_3, data_4, data_5), axis=0)\n', (1597, 1631), True, 'import numpy as np\n'), ((1643, 1670), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(wexp + 1)'], {}), '(2, 2, wexp + 1)\n', (1654, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1712), 'seaborn.tsplot', 'sns.tsplot', ([], {'data': 'hund_step_rew', 'color': '"""k"""'}), "(data=hund_step_rew, color='k')\n", (1681, 1712), True, 'import seaborn as sns\n'), ((1718, 1777), 'seaborn.tsplot', 'sns.tsplot', ([], {'data': 'data_2', 'condition': "['Random O.']", 'color': '"""b"""'}), "(data=data_2, condition=['Random O.'], color='b')\n", (1728, 1777), True, 'import seaborn as sns\n'), ((1781, 1837), 'seaborn.tsplot', 'sns.tsplot', ([], {'data': 'data_3', 'condition': "['No O. ']", 'color': '"""g"""'}), "(data=data_3, condition=['No O. '], color='g')\n", (1791, 1837), True, 'import seaborn as sns\n'), ((1968, 1985), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1500)'], {}), '(0, 1500)\n', (1976, 1985), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2040), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1500)'], {}), '(0, 1500)\n', (2031, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2062), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.08)'], {}), '(0, 0.08)\n', (2053, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2117), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.08)'], {}), '(0, 0.08)\n', (2108, 2117), True, 'import matplotlib.pyplot as plt\n'), ((711, 733), 'numpy.array', 'np.array', (['data[0][:-1]'], {}), '(data[0][:-1])\n', (719, 733), True, 'import numpy as np\n')] |
from __future__ import division
from collections import OrderedDict
from functools import partial
import gzip
import io
import os
import logging
import os.path
import h5py
import numpy
from picklable_itertools.extras import equizip
from progressbar import ProgressBar
from PIL import Image
from scipy.io.matlab import loadmat
from six.moves import zip, xrange
import zmq
from fuel.converters.base import check_exists
from fuel.datasets import H5PYDataset
from fuel.utils.formats import tar_open
from fuel.utils.parallel import producer_consumer
from fuel import config
log = logging.getLogger(__name__)
DEVKIT_ARCHIVE = 'ILSVRC2010_devkit-1.0.tar.gz'
DEVKIT_META_PATH = 'devkit-1.0/data/meta.mat'
DEVKIT_VALID_GROUNDTRUTH_PATH = ('devkit-1.0/data/'
'ILSVRC2010_validation_ground_truth.txt')
PATCH_IMAGES_TAR = 'patch_images.tar'
TEST_GROUNDTRUTH = 'ILSVRC2010_test_ground_truth.txt'
TRAIN_IMAGES_TAR = 'ILSVRC2010_images_train.tar'
VALID_IMAGES_TAR = 'ILSVRC2010_images_val.tar'
TEST_IMAGES_TAR = 'ILSVRC2010_images_test.tar'
IMAGE_TARS = (TRAIN_IMAGES_TAR, VALID_IMAGES_TAR, TEST_IMAGES_TAR,
PATCH_IMAGES_TAR)
PUBLIC_FILES = TEST_GROUNDTRUTH, DEVKIT_ARCHIVE
ALL_FILES = PUBLIC_FILES + IMAGE_TARS
@check_exists(required_files=ALL_FILES)
def convert_ilsvrc2010(directory, output_directory,
output_filename='ilsvrc2010.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2010 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2010WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2010.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2010WEB] http://image-net.org/challenges/LSVRC/2010/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
test_groundtruth_path = os.path.join(directory, TEST_GROUNDTRUTH)
train, valid, test, patch = [os.path.join(directory, fn)
for fn in IMAGE_TARS]
n_train, valid_groundtruth, test_groundtruth, wnid_map = \
prepare_metadata(devkit_path, test_groundtruth_path)
n_valid, n_test = len(valid_groundtruth), len(test_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, test_groundtruth,
n_train + n_valid)
log.info('Done.')
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2010 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2010` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2010
def prepare_metadata(devkit_archive, test_groundtruth_path):
"""Extract dataset metadata required for HDF5 file setup.
Parameters
----------
devkit_archive : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
test_groundtruth_path : str or file-like object
The filename or file-handle for the text file containing
the ILSVRC2010 test set ground truth.
Returns
-------
n_train : int
The number of examples in the training set.
valid_groundtruth : ndarray, 1-dimensional
An ndarray containing the validation set groundtruth in terms of
0-based class indices.
test_groundtruth : ndarray, 1-dimensional
An ndarray containing the test groundtruth in terms of 0-based
class indices.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
"""
# Read what's necessary from the development kit.
synsets, cost_matrix, raw_valid_groundtruth = read_devkit(devkit_archive)
# Mapping to take WordNet IDs to our internal 0-999 encoding.
wnid_map = dict(zip((s.decode('utf8') for s in synsets['WNID']),
xrange(1000)))
# Map the 'ILSVRC2010 ID' to our zero-based ID.
ilsvrc_id_to_zero_based = dict(zip(synsets['ILSVRC2010_ID'],
xrange(len(synsets))))
# Map the validation set groundtruth to 0-999 labels.
valid_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_valid_groundtruth]
# Raw test data groundtruth, ILSVRC2010 IDs.
raw_test_groundtruth = numpy.loadtxt(test_groundtruth_path,
dtype=numpy.int16)
# Map the test set groundtruth to 0-999 labels.
test_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_test_groundtruth]
# Ascertain the number of filenames to prepare appropriate sized
# arrays.
n_train = int(synsets['num_train_images'].sum())
log.info('Training set: {} images'.format(n_train))
log.info('Validation set: {} images'.format(len(valid_groundtruth)))
log.info('Test set: {} images'.format(len(test_groundtruth)))
n_total = n_train + len(valid_groundtruth) + len(test_groundtruth)
log.info('Total (train/valid/test): {} images'.format(n_total))
return n_train, valid_groundtruth, test_groundtruth, wnid_map
def create_splits(n_train, n_valid, n_test):
n_total = n_train + n_valid + n_test
tuples = {}
tuples['train'] = (0, n_train)
tuples['valid'] = (n_train, n_train + n_valid)
tuples['test'] = (n_train + n_valid, n_total)
sources = ['encoded_images', 'targets', 'filenames']
return OrderedDict(
(split, OrderedDict((source, tuples[split]) for source in sources))
for split in ('train', 'valid', 'test')
)
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
def process_train_set(hdf5_file, train_archive, patch_archive, n_train,
wnid_map, shuffle_seed=None):
"""Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
"""
producer = partial(train_set_producer, train_archive=train_archive,
patch_archive=patch_archive, wnid_map=wnid_map)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=n_train, shuffle_seed=shuffle_seed)
producer_consumer(producer, consumer)
def _write_to_hdf5(hdf5_file, index, image_filename, image_data,
class_index):
hdf5_file['filenames'][index] = image_filename.encode('ascii')
hdf5_file['encoded_images'][index] = image_data
hdf5_file['targets'][index] = class_index
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
"""Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
"""
patch_images = extract_patch_images(patch_archive, 'train')
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split('.')[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError('not all patch images were used')
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,
offset=0):
"""Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
"""
with ProgressBar(maxval=num_expected) as pb:
if shuffle_seed is None:
index_gen = iter(xrange(num_expected))
else:
rng = numpy.random.RandomState(shuffle_seed)
index_gen = iter(rng.permutation(num_expected))
for i, num in enumerate(index_gen):
image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)
image_data = numpy.fromstring(socket.recv(), dtype='uint8')
_write_to_hdf5(hdf5_file, num + offset, image_filename,
image_data, class_index)
pb.update(i + 1)
def process_other_set(hdf5_file, which_set, image_archive, patch_archive,
groundtruth, offset):
"""Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
"""
producer = partial(other_set_producer, image_archive=image_archive,
patch_archive=patch_archive,
groundtruth=groundtruth, which_set=which_set)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=len(groundtruth), offset=offset)
producer_consumer(producer, consumer)
def other_set_producer(socket, which_set, image_archive, patch_archive,
groundtruth):
"""Push image files read from the valid/test set TAR to a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send images.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
"""
patch_images = extract_patch_images(patch_archive, which_set)
num_patched = 0
with tar_open(image_archive) as tar:
filenames = sorted(info.name for info in tar if info.isfile())
images = (load_from_tar_or_patch(tar, filename, patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1] for fn in filenames)
image_iterator = equizip(images, pathless_filenames, groundtruth)
for (image_data, patched), filename, class_index in image_iterator:
if patched:
num_patched += 1
socket.send_pyobj((filename, class_index), zmq.SNDMORE)
socket.send(image_data, copy=False)
if num_patched != len(patch_images):
raise Exception
def load_from_tar_or_patch(tar, image_filename, patch_images):
"""Do everything necessary to process an image inside a TAR.
Parameters
----------
tar : `TarFile` instance
The tar from which to read `image_filename`.
image_filename : str
Fully-qualified path inside of `tar` from which to read an
image file.
patch_images : dict
A dictionary containing filenames (without path) of replacements
to be substituted in place of the version of the same file found
in `tar`.
Returns
-------
image_data : bytes
The JPEG bytes representing either the image from the TAR archive
or its replacement from the patch dictionary.
patched : bool
True if the image was retrieved from the patch dictionary. False
if it was retrieved from the TAR file.
"""
patched = True
image_bytes = patch_images.get(os.path.basename(image_filename), None)
if image_bytes is None:
patched = False
try:
image_bytes = tar.extractfile(image_filename).read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
except (IOError, OSError):
with gzip.GzipFile(fileobj=tar.extractfile(image_filename)) as gz:
image_bytes = gz.read()
numpy.array(Image.open(io.BytesIO(image_bytes)))
return image_bytes, patched
def read_devkit(f):
"""Read relevant information from the development kit archive.
Parameters
----------
f : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
See :func:`read_metadata_mat_file` for details.
cost_matrix : ndarray, 2-dimensional, uint8
See :func:`read_metadata_mat_file` for details.
raw_valid_groundtruth : ndarray, 1-dimensional, int16
The labels for the ILSVRC2010 validation set,
distributed with the development kit code.
"""
with tar_open(f) as tar:
# Metadata table containing class hierarchy, textual descriptions, etc.
meta_mat = tar.extractfile(DEVKIT_META_PATH)
synsets, cost_matrix = read_metadata_mat_file(meta_mat)
# Raw validation data groundtruth, ILSVRC2010 IDs. Confusingly
# distributed inside the development kit archive.
raw_valid_groundtruth = numpy.loadtxt(tar.extractfile(
DEVKIT_VALID_GROUNDTRUTH_PATH), dtype=numpy.int16)
return synsets, cost_matrix, raw_valid_groundtruth
def read_metadata_mat_file(meta_mat):
"""Read ILSVRC2010 metadata from the distributed MAT file.
Parameters
----------
meta_mat : str or file-like object
The filename or file-handle for `meta.mat` from the
ILSVRC2010 development kit.
Returns
-------
synsets : ndarray, 1-dimensional, compound dtype
A table containing ILSVRC2010 metadata for the "synonym sets"
or "synsets" that comprise the classes and superclasses,
including the following fields:
* `ILSVRC2010_ID`: the integer ID used in the original
competition data.
* `WNID`: A string identifier that uniquely identifies
a synset in ImageNet and WordNet.
* `wordnet_height`: The length of the longest path to
a leaf node in the FULL ImageNet/WordNet hierarchy
(leaf nodes in the FULL ImageNet/WordNet hierarchy
have `wordnet_height` 0).
* `gloss`: A string representation of an English
textual description of the concept represented by
this synset.
* `num_children`: The number of children in the hierarchy
for this synset.
* `words`: A string representation, comma separated,
of different synoym words or phrases for the concept
represented by this synset.
* `children`: A vector of `ILSVRC2010_ID`s of children
of this synset, padded with -1. Note that these refer
to `ILSVRC2010_ID`s from the original data and *not*
the zero-based index in the table.
* `num_train_images`: The number of training images for
this synset.
cost_matrix : ndarray, 2-dimensional, uint8
A 1000x1000 matrix containing the precomputed pairwise
cost (based on distance in the hierarchy) for all
low-level synsets (i.e. the thousand possible output
classes with training data associated).
"""
mat = loadmat(meta_mat, squeeze_me=True)
synsets = mat['synsets']
cost_matrix = mat['cost_matrix']
new_dtype = numpy.dtype([
('ILSVRC2010_ID', numpy.int16),
('WNID', ('S', max(map(len, synsets['WNID'])))),
('wordnet_height', numpy.int8),
('gloss', ('S', max(map(len, synsets['gloss'])))),
('num_children', numpy.int8),
('words', ('S', max(map(len, synsets['words'])))),
('children', (numpy.int8, max(synsets['num_children']))),
('num_train_images', numpy.uint16)
])
new_synsets = numpy.empty(synsets.shape, dtype=new_dtype)
for attr in ['ILSVRC2010_ID', 'WNID', 'wordnet_height', 'gloss',
'num_children', 'words', 'num_train_images']:
new_synsets[attr] = synsets[attr]
children = [numpy.atleast_1d(ch) for ch in synsets['children']]
padded_children = [
numpy.concatenate((c,
-numpy.ones(new_dtype['children'].shape[0] - len(c),
dtype=numpy.int16)))
for c in children
]
new_synsets['children'] = padded_children
return new_synsets, cost_matrix
def extract_patch_images(f, which_set):
"""Extracts a dict of the "patch images" for ILSVRC2010.
Parameters
----------
f : str or file-like object
The filename or file-handle to the patch images TAR file.
which_set : str
Which set of images to extract. One of 'train', 'valid', 'test'.
Returns
-------
dict
A dictionary contains a mapping of filenames (without path) to a
bytes object containing the replacement image.
Notes
-----
Certain images in the distributed archives are blank, or display
an "image not available" banner. A separate TAR file of
"patch images" is distributed with the corrected versions of
these. It is this archive that this function is intended to read.
"""
if which_set not in ('train', 'valid', 'test'):
raise ValueError('which_set must be one of train, valid, or test')
which_set = 'val' if which_set == 'valid' else which_set
patch_images = {}
with tar_open(f) as tar:
for info_obj in tar:
if not info_obj.name.endswith('.JPEG'):
continue
# Pretty sure that '/' is used for tarfile regardless of
# os.path.sep, but I officially don't care about Windows.
tokens = info_obj.name.split('/')
file_which_set = tokens[-2]
if file_which_set != which_set:
continue
filename = tokens[-1]
patch_images[filename] = tar.extractfile(info_obj.name).read()
return patch_images
| [
"logging.getLogger",
"fuel.converters.base.check_exists",
"io.BytesIO",
"six.moves.xrange",
"fuel.utils.formats.tar_open",
"scipy.io.matlab.loadmat",
"numpy.random.RandomState",
"progressbar.ProgressBar",
"fuel.utils.parallel.producer_consumer",
"fuel.datasets.H5PYDataset.create_split_array",
"o... | [((578, 605), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (595, 605), False, 'import logging\n'), ((1251, 1289), 'fuel.converters.base.check_exists', 'check_exists', ([], {'required_files': 'ALL_FILES'}), '(required_files=ALL_FILES)\n', (1263, 1289), False, 'from fuel.converters.base import check_exists\n'), ((2210, 2249), 'os.path.join', 'os.path.join', (['directory', 'DEVKIT_ARCHIVE'], {}), '(directory, DEVKIT_ARCHIVE)\n', (2222, 2249), False, 'import os\n'), ((2278, 2319), 'os.path.join', 'os.path.join', (['directory', 'TEST_GROUNDTRUTH'], {}), '(directory, TEST_GROUNDTRUTH)\n', (2290, 2319), False, 'import os\n'), ((2646, 2693), 'os.path.join', 'os.path.join', (['output_directory', 'output_filename'], {}), '(output_directory, output_filename)\n', (2658, 2693), False, 'import os\n'), ((5490, 5545), 'numpy.loadtxt', 'numpy.loadtxt', (['test_groundtruth_path'], {'dtype': 'numpy.int16'}), '(test_groundtruth_path, dtype=numpy.int16)\n', (5503, 5545), False, 'import numpy\n'), ((7288, 7326), 'fuel.datasets.H5PYDataset.create_split_array', 'H5PYDataset.create_split_array', (['splits'], {}), '(splits)\n', (7318, 7326), False, 'from fuel.datasets import H5PYDataset\n'), ((8682, 8791), 'functools.partial', 'partial', (['train_set_producer'], {'train_archive': 'train_archive', 'patch_archive': 'patch_archive', 'wnid_map': 'wnid_map'}), '(train_set_producer, train_archive=train_archive, patch_archive=\n patch_archive, wnid_map=wnid_map)\n', (8689, 8791), False, 'from functools import partial\n'), ((8825, 8922), 'functools.partial', 'partial', (['image_consumer'], {'hdf5_file': 'hdf5_file', 'num_expected': 'n_train', 'shuffle_seed': 'shuffle_seed'}), '(image_consumer, hdf5_file=hdf5_file, num_expected=n_train,\n shuffle_seed=shuffle_seed)\n', (8832, 8922), False, 'from functools import partial\n'), ((8946, 8983), 'fuel.utils.parallel.producer_consumer', 'producer_consumer', (['producer', 'consumer'], {}), '(producer, consumer)\n', (8963, 8983), False, 'from fuel.utils.parallel import producer_consumer\n'), ((13641, 13777), 'functools.partial', 'partial', (['other_set_producer'], {'image_archive': 'image_archive', 'patch_archive': 'patch_archive', 'groundtruth': 'groundtruth', 'which_set': 'which_set'}), '(other_set_producer, image_archive=image_archive, patch_archive=\n patch_archive, groundtruth=groundtruth, which_set=which_set)\n', (13648, 13777), False, 'from functools import partial\n'), ((13952, 13989), 'fuel.utils.parallel.producer_consumer', 'producer_consumer', (['producer', 'consumer'], {}), '(producer, consumer)\n', (13969, 13989), False, 'from fuel.utils.parallel import producer_consumer\n'), ((20209, 20243), 'scipy.io.matlab.loadmat', 'loadmat', (['meta_mat'], {'squeeze_me': '(True)'}), '(meta_mat, squeeze_me=True)\n', (20216, 20243), False, 'from scipy.io.matlab import loadmat\n'), ((20767, 20810), 'numpy.empty', 'numpy.empty', (['synsets.shape'], {'dtype': 'new_dtype'}), '(synsets.shape, dtype=new_dtype)\n', (20778, 20810), False, 'import numpy\n'), ((2353, 2380), 'os.path.join', 'os.path.join', (['directory', 'fn'], {}), '(directory, fn)\n', (2365, 2380), False, 'import os\n'), ((2704, 2731), 'h5py.File', 'h5py.File', (['output_path', '"""w"""'], {}), "(output_path, 'w')\n", (2713, 2731), False, 'import h5py\n'), ((9994, 10017), 'fuel.utils.formats.tar_open', 'tar_open', (['train_archive'], {}), '(train_archive)\n', (10002, 10017), False, 'from fuel.utils.formats import tar_open\n'), ((11967, 11999), 'progressbar.ProgressBar', 'ProgressBar', ([], {'maxval': 'num_expected'}), '(maxval=num_expected)\n', (11978, 11999), False, 'from progressbar import ProgressBar\n'), ((14937, 14960), 'fuel.utils.formats.tar_open', 'tar_open', (['image_archive'], {}), '(image_archive)\n', (14945, 14960), False, 'from fuel.utils.formats import tar_open\n'), ((15253, 15301), 'picklable_itertools.extras.equizip', 'equizip', (['images', 'pathless_filenames', 'groundtruth'], {}), '(images, pathless_filenames, groundtruth)\n', (15260, 15301), False, 'from picklable_itertools.extras import equizip\n'), ((16537, 16569), 'os.path.basename', 'os.path.basename', (['image_filename'], {}), '(image_filename)\n', (16553, 16569), False, 'import os\n'), ((17705, 17716), 'fuel.utils.formats.tar_open', 'tar_open', (['f'], {}), '(f)\n', (17713, 17716), False, 'from fuel.utils.formats import tar_open\n'), ((21001, 21021), 'numpy.atleast_1d', 'numpy.atleast_1d', (['ch'], {}), '(ch)\n', (21017, 21021), False, 'import numpy\n'), ((22361, 22372), 'fuel.utils.formats.tar_open', 'tar_open', (['f'], {}), '(f)\n', (22369, 22372), False, 'from fuel.utils.formats import tar_open\n'), ((5050, 5062), 'six.moves.xrange', 'xrange', (['(1000)'], {}), '(1000)\n', (5056, 5062), False, 'from six.moves import zip, xrange\n'), ((7368, 7388), 'numpy.dtype', 'numpy.dtype', (['"""uint8"""'], {}), "('uint8')\n", (7379, 7388), False, 'import numpy\n'), ((12123, 12161), 'numpy.random.RandomState', 'numpy.random.RandomState', (['shuffle_seed'], {}), '(shuffle_seed)\n', (12147, 12161), False, 'import numpy\n'), ((6624, 6682), 'collections.OrderedDict', 'OrderedDict', (['((source, tuples[split]) for source in sources)'], {}), '((source, tuples[split]) for source in sources)\n', (6635, 6682), False, 'from collections import OrderedDict\n'), ((10691, 10730), 'picklable_itertools.extras.equizip', 'equizip', (['pathless_filenames', 'images_gen'], {}), '(pathless_filenames, images_gen)\n', (10698, 10730), False, 'from picklable_itertools.extras import equizip\n'), ((12069, 12089), 'six.moves.xrange', 'xrange', (['num_expected'], {}), '(num_expected)\n', (12075, 12089), False, 'from six.moves import zip, xrange\n'), ((15185, 15202), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (15198, 15202), False, 'import os\n'), ((16742, 16765), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (16752, 16765), False, 'import io\n'), ((10585, 10602), 'os.path.split', 'os.path.split', (['fn'], {}), '(fn)\n', (10598, 10602), False, 'import os\n'), ((16961, 16984), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (16971, 16984), False, 'import io\n')] |
import pickle
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
import sys
import time
from sklearn.decomposition import PCA
from sklearn import cluster as sklearn_clustering
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import data_extract as dext
from heimat import reco
import settings
import cache_manager as cmng
import portals_urls
import data_cleaning
pca_u, G = None, None
pca_a, A_star = None, None
A, U, MAP, txtclf = None, None, None, None
M = None
CLF_MLP = None
CLF_DBSCAN = None
D, PAPERS_LIST = None, None
UVT = None
papers_total, objects_articles_dict, objects_df = cmng.load_work()
pca_G_ncomponents = settings.pca_G_ncomponents
pca_A_ncomponents = settings.pca_A_ncomponents
mlp_iter = settings.mlp_iter
funksvd_iter = settings.funksvd_iter
funksvd_latent_features = settings.funksvd_latent_features
pd.set_option("max_rows", 50)
np.random.seed()
def dist_em(xs1, xs2):
euklid = np.linalg.norm(xs1 - xs2)
manhattan = sum(abs(e - s) for s, e in zip(xs1, xs2))
return euklid, manhattan
def show_articles_by_group(group=0):
"""
Shows paper_id corresponding to objects in some particular group
:param group:
:return:
"""
global U
r = U[U.group == group]
articles = []
for paper_id in r['OBJID'].values:
articles.extend(MAP[paper_id])
for paper_id in list(set(articles)):
print("--------------------------------------------------")
dext.get_paper(paper_id)
def show_first3_components(matrix, title="", start_at_index=0):
"""
:param matrix: G or A_star matrices
:param title:
:param start_at_index: Depending on whether matrix is G or A_star, start_at_index differs (1, respectively 0)
:return:
"""
plt.figure(figsize=(10, 8))
ax = plt.axes(projection='3d')
i, j, k = [start_at_index + t for t in range(0, 3)]
ax.scatter3D(matrix[:, i], matrix[:, j], matrix[:, k], s=8, cmap='Greens', edgecolors='k')
if title:
plt.title(title)
plt.show()
plt.close()
time.sleep(1)
def gen_matrix_G(ncomp=25):
"""
matrix G of principal components for the object representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_u, G, U
print("\n[x] PCA for matrix G:")
pca_u = PCA(n_components=ncomp)
U_matrix = U[list(filter(lambda x: x not in ["OBJID", "group"], U.columns))]
G = pca_u.fit_transform(U_matrix.fillna(U_matrix.mean()).values)
G = np.append(U['OBJID'].values.reshape(U.shape[0], 1), G, axis=1)
print("[x] Explained variance ratio:")
print(pca_u.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_u.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_u.explained_variance_ratio_))
show_first3_components(G, title="First 3 principal components for G", start_at_index=1)
def gen_matrix_A_star(ncomp=25):
"""
matrix A* of principal components for the article representation
- generates the PCA form of matrix U
- adds the OBJID value on the first column
:param ncomp:
:return:
"""
global pca_a, A_star
print("\n[x] PCA for matrix A:")
pca_a = PCA(n_components=ncomp)
A_star = pca_a.fit_transform(A.fillna(A.mean()).values[:, 1:])
A_star = np.append(A['paper_id'].values.reshape(A_star.shape[0], 1), A_star, axis=1)
print("[x] Explained variance ratio:")
print(pca_a.explained_variance_ratio_)
print("[x] Singular values:")
print(pca_a.singular_values_)
print("[x] Sum of variance:")
print(np.sum(pca_a.explained_variance_ratio_))
show_first3_components(A_star, title="First 3 principal components for A_star", start_at_index=1)
def get_indexes_articles_in_df(objid):
"""
MAP contains the mapping between astronomical object ids and the paper ids
returns the indexes in matrix A of object with objid
:param objid:
:return:
"""
global A, MAP
res = []
for paper_id in MAP[objid]:
record = A[A.paper_id == paper_id].index.values.tolist()
if len(record) != 0:
res.append(record[0])
else:
# ignoring for the moment if a paper id couldn't be found
# (probably there was an exception at download phase)
pass
return res
def gen_matrix_M(balance_factor=3):
"""
- construct matrix M by combining values from G and A_star
- since a brute force would require too much time and would lead to overly unbalanced training set
decided to build up by factor of 3 (balance_factor):
- a portion of data is "as is", thus object data in G corresponds to data in A_star (by MAP)
- a portion of data (3 times bigger) is "simulated" and contains objects to articles that are not associated
- target value is set to 1 if association is given, otherwise 0
:param balance_factor:
:return:
"""
global G, U, A_star, A
M = []
y = []
print("Building matrix M, this will take a while .. ")
for i in range(0, G.shape[0]):
if i != 0 and i % int(0.1 * G.shape[0]) == 0:
print("%.2f" % (100 * i / G.shape[0]) + "% of objects")
r1 = G[i, 1:].tolist()
object_id = U.values[i, 0]
indexes_associations = get_indexes_articles_in_df(object_id)
indexes_non_associations = list(filter(lambda k: k not in indexes_associations, range(A.shape[0])))
indexes_non_associations = pd.Series(indexes_non_associations).sample(
len(indexes_associations) * balance_factor).tolist()
for j in indexes_associations + indexes_non_associations:
r2 = A_star[j, 1:].tolist()
M.append(r1 + r2)
y.append(1 if j in indexes_associations else 0)
M = np.array(M)
return M, y
def gen_matrix_Mi(i):
"""
Generates matrix Mi, that is the portion of Matrix M given an astronomical object id OBJID found at index i in G
This is done by taking the record from G of object and combine it with all records from A_star,
so that the calculation of probability P(Association | Gi, A_star) gets calculated for all A_star papers
:param i:
:return:
"""
global U, G, A, A_star
Mi = []
yi = []
r1 = G[i, 1:].tolist()
for j in range(0, A_star.shape[0]):
object_id = U.values[i, 0].encode("utf-8")
articles_found_related = dext.objects_articles_dict[object_id]
r2 = A_star[j, 1:].tolist()
article_id = A.values[j, 0]
target_value = int(article_id in articles_found_related)
Mi.append(
r1 + r2
)
yi.append(target_value)
Mi = np.array(Mi)
return Mi, yi
def get_confusion_matrix_stats(cm, i):
"""
Given a Confusion Matrix cm, calculates precision, recall and F1 scores
:param cm: confusion matrix
:param i: position of the variable, for with the caculation be done
:return: three statistics: precision, recall and the F1-Score
"""
tp = cm[i, i]
fp = np.sum(cm[i, :]) - tp
fn = np.sum(cm[:, i]) - tp
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = 2 * (precision * recall) / (precision + recall)
return precision, recall, f1_score
def check_mlp(x, y):
global CLF_MLP
print("+++++++++++++++++++++++++++++++++++++")
labels_zuordnung_mlp = CLF_MLP.classes_
beispiel_mlp_x = x
beispiel_mlp_y = y
y_true = np.array(beispiel_mlp_y)
y_pred = np.array([labels_zuordnung_mlp[np.argmax(t)] for t in CLF_MLP.predict_proba(beispiel_mlp_x)])
accuracy = (y_pred == y_true).mean()
cm = confusion_matrix(y_true, y_pred, labels=labels_zuordnung_mlp)
if True:
print("Labels:", labels_zuordnung_mlp)
print("Confusion Matrix:")
print(cm)
for i in range(0, len(cm)):
precision, recall, f1_score = get_confusion_matrix_stats(cm, i)
print("Label {} - precision {}, recall {}, f1_score {}: ".format(
i, np.round(precision, 2), np.round(recall, 2), np.round(f1_score, 2)
))
print("precision:", accuracy)
print("+++++++++++++++++++++++++++++++++++++")
def show_object_details(object_id, article_indexes, pred_df=None, topk=10):
"""
Shows associated papers for an object id according to predicted article_indexes
# U expands categorical variables, so it has a dimension larger than dext.objects_df
:param object_id:
:param article_indexes:
:param pred_df:
:param topk:
:return:
"""
global A
print("""
\nObject with ID: {}
""".format(object_id))
if pred_df is not None:
print("[x] Predicted articles in pred_df:")
print(pred_df)
objid = object_id.encode("utf-8")
url = "http://skyserver.sdss.org/dr16/en/tools/explore/Summary.aspx?id={}".format(
object_id
)
print("[x] You can check the SkyServer Explore page at: ")
print(url, "\n")
print("[x] Compact form from original object pandas dataframe (objects_df as in data_extract.py):")
print(dext.objects_df[dext.objects_df.OBJID == objid].transpose())
print("\n[x] Showing maximum Top-{}:".format(topk))
for k in range(0, min(len(article_indexes), topk)):
print("*************************************************************************************")
if pred_df is not None:
print(pred_df.iloc[k])
j = article_indexes[k]
dext.get_paper(paper_id=A.paper_id.iloc[j])
input(".....")
def apply_mlp(object_id=None):
"""
uses trained MLP classifier to calculate probability P(Bij | ui, aj) for one object_id ui and all aj
- uses construction of matrix Mi to achieve that, that is the portion of general matrix M for the object
:param object_id:
:return:
"""
global U, G, CLF_MLP
if object_id is None:
i = pd.Series(range(0, G.shape[0])).sample(10).iloc[5] # index of object id in matrices G, U
object_id = U.OBJID.iloc[i]
else:
i = U[U.OBJID == object_id].index.values.tolist()[-1]
print("\n[x] Object ID:", object_id)
Mi, yi = gen_matrix_Mi(i)
Mi = pd.DataFrame(Mi)
print("[x] The portion of M matrix, corresponding to | ui | aj |, with j in [0, A_star.shape[0]]: ")
print(Mi)
preds = [np.round(t[1], 2) for t in CLF_MLP.predict_proba(Mi.values)]
# print("\n[x] Predictions:")
# print(preds)
pred_df = pd.DataFrame(
{
"article_index": Mi.index.values.tolist(),
"mlp_proba": preds,
"associated": yi
}
)
pred_df = pred_df.sort_values(by="mlp_proba", ascending=False)
pred_df = pred_df[pred_df.mlp_proba > 0.5]
pred_df = pred_df.reset_index(drop=True)
print("\n[x] Summarised with a threshold for probabilty of 50%, that is P(Bij | ui, aj) > 0.5:")
print(pred_df)
articles_indexes = pred_df.article_index.values.tolist()
print("")
return object_id, articles_indexes, pred_df
def data_extraction():
"""
with module dext original data is accessible: papers_total, objects_articles_dict, objects_df
:return:
"""
print("[x] Extracting data and creating matrices A, U and dictionary map MAP .. ")
dext.run()
A, U, MAP, txtclf = dext.load_matrices()
return A, U, MAP, txtclf
####################### Constructing Matrix M and MLP model #######################
def construct_G_Astar_M_matrices():
"""
uses above methods to construct training data M by combining G and A_star matrices
:return:
"""
global G, A_star, M, pca_A_ncomponents, pca_G_ncomponents
print("[x] Generating PCA projections of:"
"\n- matrices U (matrix G of astronomical objects)"
"\n- and A (matrix A_star of related papers)")
gen_matrix_G(ncomp=pca_G_ncomponents)
# TODO: increase automatically pca_A_ncomponents if the explained variance drops to less than, for instance, 0.85
gen_matrix_A_star(ncomp=pca_A_ncomponents)
print("\n[x] Generating matrix M out of two parts "
"| ui | aj | target {1 if related, 0 otherwise} ")
M, y = gen_matrix_M()
M = pd.DataFrame(M)
target_col = M.shape[1]
M[target_col] = y
p = 100 * M[target_col].sum() / M.shape[0]
print("The percentage of articles that are related directly (found at NED) at about: {}%".format(
np.round(p, 2)
))
print("[x] Done. Head(10):")
print(M.head(10))
print("")
time.sleep(5)
def do_model_mlp():
"""
perform modeling using MLP on constructed matrix M
:return:
"""
global M, CLF_MLP, labels_mlp
print("\n[x] Performing MLP modeling with balancing by choosing combinations objects (matrix G) "
"to articles (matrix A_star)"
"\n(target == 1) and three times those not related")
X = M.copy()
indx = X.index.values.tolist()
np.random.shuffle(indx)
X = X.loc[indx]
X, Y = X.values[:, :-1], X.values[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
mlp = MLPClassifier(max_iter=mlp_iter, verbose=True, early_stopping=False)
CLF_MLP = mlp.fit(X_train, y_train)
labels_mlp = CLF_MLP.classes_
print("[x] Validation of MLP classifier results on test data:")
check_mlp(x=X_test, y=y_test)
input("enter to continue ...")
print("")
def apply_clf_mlp(object_id="M 79", show_details=False):
"""
applies trained CLF_MLP model on one object
:param object_id:
:return:
"""
# example prediction using the MLP classifier to calculate probabability of association to any paper
print("\n[x] Applying model MLP to object: {}".format(object_id))
object_id, articles_indexes, pred_df = apply_mlp(object_id=object_id) # * sig Ori
if show_details:
print("\n[x] Example prediction:")
show_object_details(object_id, articles_indexes, pred_df)
####################### Constructing Matrix D, Clustering and FunkSVD models #######################
def perform_object_optimal_clustering():
"""
performs a search for clustering with DBSCAN to be able to construct a reduced form of a "user-item" matrix
:return:
"""
global CLF_DBSCAN, U, G
print("\n[x] Choosing an optimal parameter for DBSCAN object cluster classifier .. ")
list_dist = []
for i in range(0, G.shape[1] - 1):
for j in range(i + 1, G.shape[1]):
euclidean, _ = dist_em(G[i, 1:], G[j, 1:])
list_dist.append(euclidean)
number_of_clusters = []
data_noise_list = []
distribution_data_list = []
param = np.linspace(0.01, pd.Series(list_dist).quantile(0.5), 100)
eps_list = []
for eps in param:
clf = sklearn_clustering.DBSCAN(eps=eps, metric="euclidean")
clf.fit(G[:, 1:])
U["group"] = clf.labels_
res = U.groupby("group").count()['OBJID']
# don't consider any results under some lower threshold N clusters (no point in using later D)
if res.shape[0] <= 5:
continue
eps_list.append(eps)
distribution_data = res.loc[list(filter(lambda x: x != -1, res.index.values))]
distribution_data = (distribution_data / distribution_data.sum()).mean()
number_of_clusters.append(len(set(clf.labels_)))
if -1 in res.index.values:
data_noise = res.loc[-1]
data_noise_list.append(data_noise)
else:
data_noise_list.append(0)
distribution_data_list.append(distribution_data)
param_choose = pd.DataFrame(
{
"nclusters": number_of_clusters,
"eps": eps_list,
"noise": data_noise_list,
"distribution": distribution_data_list
}
)
param_choose['score'] = [t1 + t1 * t3 - np.log10(t2) for t1, t2, t3 in
param_choose[["nclusters", "noise", "distribution"]].values]
param_choose = param_choose.sort_values(by="score", ascending=False)
param_choose = param_choose.reset_index(drop=True)
param_choose = param_choose[param_choose.nclusters >= 3]
param_choose = param_choose[param_choose.nclusters <= 15]
q90 = param_choose.distribution.quantile(0.9)
q10 = param_choose.distribution.quantile(0.1)
q80 = param_choose.noise.quantile(0.8)
param_choose = param_choose[param_choose.distribution >= q10]
param_choose = param_choose[param_choose.distribution <= q90]
param_choose = param_choose[param_choose.noise <= q80]
eps_choice = param_choose.eps.iloc[0]
print(param_choose)
# visualization of choice for parameter epsilon
plt.scatter(x=eps_list, y=number_of_clusters, s=5)
plt.xlabel("optimal eps parameter: {}".format(eps_choice))
plt.ylabel("expected number of clusters")
plt.axvline(x=eps_choice, color='k', linestyle='--')
plt.title("Choice for optimal eps parameter for DBSCAN")
plt.show()
print("[x] (Re-)building the classifier for clusters of objects with parameter eps={}".format(eps_choice))
CLF_DBSCAN = sklearn_clustering.DBSCAN(eps=eps_choice, metric="euclidean")
CLF_DBSCAN.fit(G[:, 1:])
print("[x] Number of clusters:", len(set(CLF_DBSCAN.labels_)))
U["group"] = CLF_DBSCAN.labels_
print("Distribution of objects into groups:")
print(U.groupby("group").count()['OBJID'])
print("")
time.sleep(5)
def construct_D_matrix():
"""
Based on groupping with DBSCAN reduces data to centers of clusters and constructs D matrix such that:
- Dkj is 1 if any object in cluster k has an association to article j and None otherwise
- the value None is left for the method FunkSVD to be filled in
:return:
"""
global D, PAPERS_LIST, MAP, U
print("[x] Constructing 'user-item' matrix D out of the clustered data .. ")
PAPERS_LIST = list(set(A.paper_id.values))
PAPERS_LIST.sort(reverse=True)
D = []
list_object_groups = list(set(CLF_DBSCAN.labels_))
for cluster in list_object_groups:
objects_in_cluster = U[U.group == cluster].OBJID.values.tolist()
list_associated_articles = list(set(list(reduce(lambda a, b: a + b,
[MAP[objid] for objid in objects_in_cluster]))))
D.append(
[cluster]
+ list(map(lambda id: 1.0 if id in list_associated_articles else None, PAPERS_LIST))
)
D = pd.DataFrame(np.array(D), columns=(["cluster"] + PAPERS_LIST))
print("First 10 rows out of {} (clusters):".format(D.shape[0]))
D = D.fillna(value=np.nan)
print(D.head(10))
time.sleep(5)
def generate_UVT():
"""
performs FunkSVD method on D matrix, it allows finding of two matrices U, VT such that:
U @ VT ~ D
- this allows using SVD to find the latent features
:return:
"""
global UVT, funksvd_latent_features
print("\n[x] Generating U, VT matrices through FunkSVD for the final SVD model .. ")
if funksvd_latent_features > D.shape[0]:
funksvd_latent_features = D.shape[0]
U_clusters, V_articles, sse_means = reco.mat.FunkSVD(D.values[:, 1:],
latent_features=funksvd_latent_features,
learning_rate=0.0001,
iters=funksvd_iter)
D_approx = U_clusters @ V_articles
u, s, vt = reco.mat.svd_dekomposition(D_approx)
k = funksvd_latent_features
s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]
res = u_new @ s_new @ vt_new
res = pd.DataFrame(res)
print("[x] 'User-Item' matrix successfully build and ready for predictions:")
print(res)
UVT = res
time.sleep(5)
def apply_clf_svd(object_id):
"""
uses the newly obtained matrix UVT to make predictions on associations between object_id and all available papers
:param object_id:
:return:
"""
global UVT
print("[x] Applying SVD classifier .. ")
# object_indexes = U[U.OBJID == object_id].index.values.tolist()
object_cluster = U[U.OBJID == object_id].group.values.tolist()
k = 0
# corresponding to PAPERS_LIST ids
preds = UVT.iloc[object_cluster[k]].values
pred_dict = {}
for i in range(len(preds)):
pred_dict[PAPERS_LIST[i]] = preds[i]
print("[x] Done.")
return pred_dict
def apply_mlp_svd_voting(object_id="M 79", topk=10, return_df=False, ignore_articles=None): # "* sig Ori"
"""
combines both results from above (MLP and SVD) to deliver a final scoring on most probably intersting
papers related to a given object
:param object_id:
:param topk: how many articles to recommend
:param return_df: whether to return dataframe
:param ignore_articles: list of article ids to ignore
:return:
"""
_, articles_indexes, pred_df = apply_mlp(object_id=object_id)
articles_indexes_to_paper_ids = [A.paper_id.iloc[j] for j in articles_indexes]
pred_dict = apply_clf_svd(object_id)
pred_df['article_id'] = articles_indexes_to_paper_ids
pred_df['cf_score'] = [pred_dict[paper_id] for paper_id in articles_indexes_to_paper_ids]
pred_df['recommend'] = [(a + b) / 2.0 for a, b in pred_df[["mlp_proba", "cf_score"]].values]
pred_df = pred_df.sort_values("recommend", ascending=False)
pred_df = pred_df.reset_index(drop=True)
if ignore_articles is not None:
pred_df = pred_df[list(map(lambda id: id not in ignore_articles, pred_df["article_id"].values.tolist()))]
if return_df:
return pred_df.iloc[:topk]
else:
print(pred_df)
articles_indexes = pred_df.article_index.values.tolist()
show_object_details(object_id, articles_indexes, pred_df, topk=topk)
def check_voting_model():
"""
Randomly choosing objects and run the model on them
Checks typical precision, recall and f1_score for the expected associations
precision is mostly pessimistic, since the new connections to papers,
that are actually legitimate are not labeled as such.
:return:
"""
global A, MAP
print("+++++++++++++++++++++++++++++++++++++")
# reco.load_model(model_filepath)
# A = reco.A; MAP = reco.MAP; U = reco.U; apply_mlp_svd_voting = reco.apply_mlp_svd_voting
statistic = []
for object_id in U.OBJID.sample(100):
# object_id = "M 79"
pred_df = apply_mlp_svd_voting(object_id=object_id, topk=20, return_df=True)
expected = MAP[object_id]
predicted = list(map(lambda k: A.paper_id.iloc[k], pred_df.article_index.values))
actual = list(filter(lambda x: x in expected, predicted))
tp = len(actual)
fp = len(predicted) - tp # np.sum(cm[i, :]) - tp
fn = len(expected) - tp # np.sum(cm[:, i]) - tp
precision = (tp / (tp + fp)) if (tp + fp) != 0 else 0
recall = tp / (tp + fn) if (tp + fn) != 0 else 0
f1_score = (2 * (precision * recall) / (precision + recall)) if ((precision + recall) != 0) else 0.0
statistic.append([precision, recall, f1_score])
print("[x] Current statistic:")
statistics_df = pd.DataFrame(statistic, columns=["precision", "recall", "f1_score"])
print(statistics_df.describe())
print("[x] Progress: %.2f" % (100 * np.round(statistics_df.shape[0] / 100, 2)) + "%")
print("\n\n")
def clean_matrix_U_invalid_object_id(_U):
"""
in pipeline object_ids that couldn't be processed and end up having no OBJID ('')
should be removed
:param _U:
:return: invalid records from _U where OBJID missing
"""
return _U[_U.OBJID != ""]
def drop_dups(xdf):
xdf = xdf.drop_duplicates()
xdf = xdf.reset_index(drop=True)
return xdf
def update_mlp_svd_model(object_id="* sig Ori", model_filepath="./data/salamander_model.pckl"):
"""
- performs a complete pipeline to update the model based on current data
- currently added data by download either from SDSS or SIMBAD will be included in model
:param object_id:
:param model_filepath:
:return:
"""
global A, U, MAP, txtclf
data_cleaning.clean_object_names()
data_cleaning.handle_objects_nan_values()
A, U, MAP, txtclf = data_extraction()
U = clean_matrix_U_invalid_object_id(U)
U = drop_dups(U)
A = drop_dups(A)
construct_G_Astar_M_matrices()
do_model_mlp()
apply_clf_mlp(object_id=object_id, show_details=True)
perform_object_optimal_clustering()
construct_D_matrix()
generate_UVT()
apply_clf_svd(object_id=object_id)
apply_mlp_svd_voting(object_id=object_id)
save_model(model_filepath=model_filepath)
xy = input("\n\n[x] check now voting model on data? (takes a while, checks 100 objects) [y/n]: ")
if xy == "y" or xy == "":
check_voting_model()
def save_model(model_filepath):
global A, U, MAP, pca_u, G, pca_a, A_star, CLF_MLP, CLF_DBSCAN, D, PAPERS_LIST, UVT
salamander_model = {
"A": A,
"U": U,
"MAP": MAP,
"G": G, "pca_u": pca_u,
"A_star": A_star, "pca_a": pca_a,
"CLF_MLP": CLF_MLP,
"CLF_DBSCAN": CLF_DBSCAN,
"D": D,
"PAPERS_LIST": PAPERS_LIST,
"UVT": UVT
}
with open(model_filepath, 'wb') as f:
pickle.dump(salamander_model, f)
print("Model successfully saved under {}".format(model_filepath))
def load_model(model_filepath):
"""
Load model example:
import main_reco as mr
mr.load_model(mr.get_model_filepath_by_name('salamander'))
"""
global A, U, MAP, pca_u, G, pca_a, A_star, CLF_MLP, CLF_DBSCAN, D, PAPERS_LIST, UVT
global papers_total, objects_articles_dict, objects_df
import os
if not os.path.isfile(model_filepath):
print("[x] Model doesn't exist at specified path: {}".format(model_filepath))
print("[x] Need to first rebuild it.")
return
with open(model_filepath, 'rb') as f:
salamander_model = pickle.load(f)
A = salamander_model["A"]
U = salamander_model["U"]
MAP = salamander_model["MAP"]
G = salamander_model["G"]
pca_u = salamander_model["pca_u"]
A_star = salamander_model["A_star"]
pca_a = salamander_model["pca_a"]
CLF_MLP = salamander_model["CLF_MLP"]
CLF_DBSCAN = salamander_model["CLF_DBSCAN"]
D = salamander_model["D"]
PAPERS_LIST = salamander_model["PAPERS_LIST"]
UVT = salamander_model["UVT"]
print("Model loaded from {}".format(model_filepath))
def print_object_dict_as_df(object_dict_form):
"""
instead of using pprint, uses pandas dataframe to better display content of object dictionary form
:param object_dict_form:
:return:
"""
from copy import deepcopy
pd.set_option("max_rows", None)
xdict = deepcopy(object_dict_form)
for xkey in xdict.keys():
xdict[xkey] = [xdict[xkey]]
print(pd.DataFrame(xdict).transpose())
pd.set_option("max_rows", 50)
def update_matrices_for_new_object_id(object_U_dict_form):
"""
In case a new object was just downloaded, it is possible to get recommendations
without updating the model, but only for the articles that are already in A.
This needs following steps:
- adds object_id to matrix U, including the prediction of cluster (CLF_DBSCAN)
- calculates the PCA form of the object and append it to G
:param object_U_dict_form: dictionary for the object obtained by reading data from local cache with data_extraction()
:return:
"""
global U, pca_u, G
# create record to append to U
record = {}
for col in U.columns:
if col != "group":
record[col] = [object_U_dict_form[col]]
record = pd.DataFrame(record)
record['group'] = [None]
record = record[U.columns]
# update U with the new object
U = pd.concat([U, record])
U = U.reset_index(drop=True)
"""
At this point there is only one way to make a quess about the possible cluster a new object could be in,
by measuring the distance to the vectors G from Matrix M and guessing the possible cluster of closest record
- currently G matrix to execute PCA need to average based on previous data (as done in gen_matrix_G)
"""
print("[x] G matrix-form for object:")
record_averaged = U.fillna(U.mean()).values[:, 1:-1][-1:, :] # group feature is located on last position
record_G_form = pca_u.transform(record_averaged)
record_G_form = np.append(np.array([object_U_dict_form['OBJID']], dtype='object').reshape(1, -1),
record_G_form, axis=1)
print(record_G_form)
distances = [dist_em(g_record[1:], record_G_form[:, 1:])[0] for g_record in G]
position_closest_distance = np.argsort(distances)[0]
cluster_at_pos = U.group.iloc[position_closest_distance]
print("[x] Based on closest distance to current data, most probable cluster would be:", cluster_at_pos)
# update group in matrix U
U.at[U.shape[0] - 1, "group"] = cluster_at_pos
# update data in G
G = np.append(G, record_G_form, axis=0)
def apply(object_id, model_filepath, topk=10, return_value=False, ignore_articles=None):
"""
If the object is already in U, that is in the model, then the prediction get's done directly without
need for constructing the G vector
:param object_id:
:param model_filepath:
:return:
"""
global U, A, MAP, G, A_star, CLF_MLP, CLF_DBSCAN, D, PAPERS_LIST, UVT
_object_id = object_id
print("================================ Recommending papers for {} ================================".format(
object_id
))
print("")
load_model(model_filepath)
object_id = search_object_name(object_id, U)
if object_id is None:
print("[x] Object ID is not in modeled U matrix")
print("[x] Checking if the object is in cache .. ")
time.sleep(3)
# by accessing data_extraction() => matrices A, U and MAP are new, since all new objects are included.
# => the only matrix that is important is U with the record for the newly downloaded object.
_, U_new, _, _ = data_extraction()
object_id = search_object_name(_object_id, U_new) # has it been already downloaded but model not updated?
if object_id is None:
print("[x] Data for the object must be at least downloaded."
"\nUse either download path from SDSS or SIMBAD before continuing here.")
print_help()
return
object_U_dict_form = U_new[U_new.OBJID == object_id].iloc[0].to_dict()
print("\n[x] Object found: ")
print_object_dict_as_df(object_U_dict_form)
update_matrices_for_new_object_id(object_U_dict_form)
if not return_value:
apply_mlp_svd_voting(object_id=object_id, topk=topk, ignore_articles=ignore_articles)
else:
return apply_mlp_svd_voting(object_id=object_id, topk=topk, return_df=True, ignore_articles=ignore_articles)
def reformat_object_id(xstr):
return ' '.join(list(filter(lambda x: x.strip() != "", xstr.split(" "))))
def describe_data(model_filepath="./data/salamander_model.pckl"):
"""
- Prints out information about downloaded and processed data:
- how many objects, papers
- what are the most important papers
- plots figures to show what type of objects are available
:return:
"""
global U, A, MAP, G, A_star, CLF_MLP, CLF_DBSCAN, D, PAPERS_LIST, UVT
load_model(model_filepath)
def search_object_name(xname, input_df):
"""
SIMBAD gives names in specific format, so "M 1" and not "M1"
This function looks for the name without the spaces and returns the correct SIMBAD or SDSS expected format
:param xname:
:param U: matrix U as parameter so that it is possible to search either in U from saved model or in U_new
(see method apply())
:return:
"""
res = list(filter(lambda x: reformat_object_id(xname) in reformat_object_id(x), input_df['OBJID'].values.tolist()))
if len(res) != 0:
return res[0]
else:
return None
def get_coordinates(object_id):
global objects_df
res = objects_df[
list(map(lambda x: reformat_object_id(x.strip().lower().decode("utf-8")) ==
reformat_object_id(object_id.lower()),
objects_df.OBJID.values))]
if res.shape[0] == 0:
print("Nix gefunden .. ")
print("object_id:", object_id)
return res[["PLUG_RA", "PLUG_DEC"]]
def get_model_filepath_by_name(name):
return './data/{}_model.pckl'.format(name)
def print_help():
print("\n[x] Example usage:"
"\n Update current model with (newly) downloaded data: "
"\n\t python main_reco.py update 'salamander' "
"\n\n Apply model for a downloaded object "
"\n\t python main_reco.py papers 'salamander' '* sig Ori' 15 "
"\n")
print("""[x] Alternatives:
# Download data from SIMBAD, much less data available (most probably no spectra)
# - for instance CDS Portal can be used to get the coordinates
python read_simbad.py 05 34 31.940 +22 00 52.20
# Download data from SDSS (https://dr12.sdss.org/advancedSearch);
# as long as the object is in the database, then complete record is available
# - use DR12 Advanced Search to get the download.txt for the region of the sky with the object
# - replace download.txt under ./data/
python read_sdss.py
""")
"""
This script is the main component to be used for:
- rebuild the proposed voting model
- check recommendation for astronomical objects (that were included in the model)
- check recommendations for a set of astronomical objects
Object types, nomenclature, Abkürzungen:
http://simbad.u-strasbg.fr/simbad/sim-display?data=otypes
When adding new objects, might need to update clean_tags() method
import data_cleaning
keywords = data_cleaning.clean_tags(keywordlist=None, debug=True)
"""
if __name__ == "__main__":
print("\n")
try:
if sys.argv[1] not in ['update', 'papers', 'process']:
print_help()
# python main_reco.py update 'salamander'
elif sys.argv[1] == 'update':
model_path = get_model_filepath_by_name(sys.argv[2])
update_mlp_svd_model(model_filepath=model_path)
# python main_reco.py papers 'salamander' '* sig Ori' 15
elif sys.argv[1] == 'papers':
model_path = get_model_filepath_by_name(sys.argv[2])
topk = 10 if len(sys.argv) == 4 else int(sys.argv[4])
apply(object_id=sys.argv[3], model_filepath=model_path, topk=topk)
# python main_reco.py process 'salamander' 15 'NGC 2566' 'NGC 2207' 'NGC 2974' 'NGC 2559' 'NGC 2292' 'NGC 2613' 'NGC 3115'
elif sys.argv[1] == 'process':
model_path = get_model_filepath_by_name(sys.argv[2])
topk = int(sys.argv[3])
object_id_list = sys.argv[4:]
output_table = []
paper_ids = []
for object_id in object_id_list:
res = get_coordinates(object_id)
portals_urls.RA = res.PLUG_RA.iloc[0]
portals_urls.DEC = res.PLUG_DEC.iloc[0]
url_img, url_cas, url_simbad, url_cds, url_ned = portals_urls.show_urls()
urls = "\n\n{}" \
"\n\n{}" \
"\n\n{}" \
"\n\n{}" \
"\n\n{}".format(
url_img, url_cas, url_simbad, url_cds, url_ned
)
pred_df = apply(object_id=object_id, model_filepath=model_path, topk=topk,
return_value=True, ignore_articles=paper_ids)
for i in range(0, pred_df.shape[0]):
article_index = pred_df.article_index.iloc[i]
paper_id = A.paper_id.iloc[article_index]
associated = pred_df.associated.iloc[i]
scores = str([pred_df.mlp_proba.iloc[i], pred_df.cf_score.iloc[i]])
if paper_id not in paper_ids:
res = dext.get_paper(paper_id=paper_id)
output_table.append([object_id, associated, scores,
urls,
res['title'] + "\n\n" + res['link'], res['description']])
paper_ids.append(paper_id)
pd.DataFrame(output_table,
columns=["identifier", "association", "scores",
"url", "title", "description"]).to_csv("./data/output.csv", index=False)
print("[x] List saved under ./data/output.csv")
except:
import traceback
traceback.print_exc()
print_help()
| [
"numpy.log10",
"data_cleaning.clean_object_names",
"matplotlib.pyplot.ylabel",
"heimat.reco.mat.FunkSVD",
"time.sleep",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"copy.deepcopy",
"data_extract.load_matrices",
"sklearn.cluster.DBSCAN",
"portals_urls.show_urls",
"sklearn.decompositi... | [((764, 780), 'cache_manager.load_work', 'cmng.load_work', ([], {}), '()\n', (778, 780), True, 'import cache_manager as cmng\n'), ((1002, 1031), 'pandas.set_option', 'pd.set_option', (['"""max_rows"""', '(50)'], {}), "('max_rows', 50)\n", (1015, 1031), True, 'import pandas as pd\n'), ((1033, 1049), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (1047, 1049), True, 'import numpy as np\n'), ((1088, 1113), 'numpy.linalg.norm', 'np.linalg.norm', (['(xs1 - xs2)'], {}), '(xs1 - xs2)\n', (1102, 1113), True, 'import numpy as np\n'), ((1910, 1937), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1920, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1972), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1955, 1972), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2177), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2175, 2177), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2193), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2191, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2211), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2208, 2211), False, 'import time\n'), ((2524, 2547), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'ncomp'}), '(n_components=ncomp)\n', (2527, 2547), False, 'from sklearn.decomposition import PCA\n'), ((3421, 3444), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'ncomp'}), '(n_components=ncomp)\n', (3424, 3444), False, 'from sklearn.decomposition import PCA\n'), ((6030, 6041), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (6038, 6041), True, 'import numpy as np\n'), ((6934, 6946), 'numpy.array', 'np.array', (['Mi'], {}), '(Mi)\n', (6942, 6946), True, 'import numpy as np\n'), ((7709, 7733), 'numpy.array', 'np.array', (['beispiel_mlp_y'], {}), '(beispiel_mlp_y)\n', (7717, 7733), True, 'import numpy as np\n'), ((7891, 7952), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {'labels': 'labels_zuordnung_mlp'}), '(y_true, y_pred, labels=labels_zuordnung_mlp)\n', (7907, 7952), False, 'from sklearn.metrics import confusion_matrix\n'), ((10473, 10489), 'pandas.DataFrame', 'pd.DataFrame', (['Mi'], {}), '(Mi)\n', (10485, 10489), True, 'import pandas as pd\n'), ((11555, 11565), 'data_extract.run', 'dext.run', ([], {}), '()\n', (11563, 11565), True, 'import data_extract as dext\n'), ((11590, 11610), 'data_extract.load_matrices', 'dext.load_matrices', ([], {}), '()\n', (11608, 11610), True, 'import data_extract as dext\n'), ((12469, 12484), 'pandas.DataFrame', 'pd.DataFrame', (['M'], {}), '(M)\n', (12481, 12484), True, 'import pandas as pd\n'), ((12787, 12800), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (12797, 12800), False, 'import time\n'), ((13206, 13229), 'numpy.random.shuffle', 'np.random.shuffle', (['indx'], {}), '(indx)\n', (13223, 13229), True, 'import numpy as np\n'), ((13334, 13371), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)'}), '(X, Y, test_size=0.3)\n', (13350, 13371), False, 'from sklearn.model_selection import train_test_split\n'), ((13382, 13450), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'max_iter': 'mlp_iter', 'verbose': '(True)', 'early_stopping': '(False)'}), '(max_iter=mlp_iter, verbose=True, early_stopping=False)\n', (13395, 13450), False, 'from sklearn.neural_network import MLPClassifier\n'), ((15865, 15999), 'pandas.DataFrame', 'pd.DataFrame', (["{'nclusters': number_of_clusters, 'eps': eps_list, 'noise': data_noise_list,\n 'distribution': distribution_data_list}"], {}), "({'nclusters': number_of_clusters, 'eps': eps_list, 'noise':\n data_noise_list, 'distribution': distribution_data_list})\n", (15877, 15999), True, 'import pandas as pd\n'), ((16941, 16991), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'eps_list', 'y': 'number_of_clusters', 's': '(5)'}), '(x=eps_list, y=number_of_clusters, s=5)\n', (16952, 16991), True, 'import matplotlib.pyplot as plt\n'), ((17059, 17100), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""expected number of clusters"""'], {}), "('expected number of clusters')\n", (17069, 17100), True, 'import matplotlib.pyplot as plt\n'), ((17105, 17157), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'eps_choice', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(x=eps_choice, color='k', linestyle='--')\n", (17116, 17157), True, 'import matplotlib.pyplot as plt\n'), ((17162, 17218), 'matplotlib.pyplot.title', 'plt.title', (['"""Choice for optimal eps parameter for DBSCAN"""'], {}), "('Choice for optimal eps parameter for DBSCAN')\n", (17171, 17218), True, 'import matplotlib.pyplot as plt\n'), ((17223, 17233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17231, 17233), True, 'import matplotlib.pyplot as plt\n'), ((17363, 17424), 'sklearn.cluster.DBSCAN', 'sklearn_clustering.DBSCAN', ([], {'eps': 'eps_choice', 'metric': '"""euclidean"""'}), "(eps=eps_choice, metric='euclidean')\n", (17388, 17424), True, 'from sklearn import cluster as sklearn_clustering\n'), ((17672, 17685), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17682, 17685), False, 'import time\n'), ((18930, 18943), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (18940, 18943), False, 'import time\n'), ((19429, 19549), 'heimat.reco.mat.FunkSVD', 'reco.mat.FunkSVD', (['D.values[:, 1:]'], {'latent_features': 'funksvd_latent_features', 'learning_rate': '(0.0001)', 'iters': 'funksvd_iter'}), '(D.values[:, 1:], latent_features=funksvd_latent_features,\n learning_rate=0.0001, iters=funksvd_iter)\n', (19445, 19549), False, 'from heimat import reco\n'), ((19772, 19808), 'heimat.reco.mat.svd_dekomposition', 'reco.mat.svd_dekomposition', (['D_approx'], {}), '(D_approx)\n', (19798, 19808), False, 'from heimat import reco\n'), ((19947, 19964), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (19959, 19964), True, 'import pandas as pd\n'), ((20080, 20093), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (20090, 20093), False, 'import time\n'), ((24525, 24559), 'data_cleaning.clean_object_names', 'data_cleaning.clean_object_names', ([], {}), '()\n', (24557, 24559), False, 'import data_cleaning\n'), ((24564, 24605), 'data_cleaning.handle_objects_nan_values', 'data_cleaning.handle_objects_nan_values', ([], {}), '()\n', (24603, 24605), False, 'import data_cleaning\n'), ((27162, 27193), 'pandas.set_option', 'pd.set_option', (['"""max_rows"""', 'None'], {}), "('max_rows', None)\n", (27175, 27193), True, 'import pandas as pd\n'), ((27206, 27232), 'copy.deepcopy', 'deepcopy', (['object_dict_form'], {}), '(object_dict_form)\n', (27214, 27232), False, 'from copy import deepcopy\n'), ((27346, 27375), 'pandas.set_option', 'pd.set_option', (['"""max_rows"""', '(50)'], {}), "('max_rows', 50)\n", (27359, 27375), True, 'import pandas as pd\n'), ((28152, 28172), 'pandas.DataFrame', 'pd.DataFrame', (['record'], {}), '(record)\n', (28164, 28172), True, 'import pandas as pd\n'), ((28277, 28299), 'pandas.concat', 'pd.concat', (['[U, record]'], {}), '([U, record])\n', (28286, 28299), True, 'import pandas as pd\n'), ((29497, 29532), 'numpy.append', 'np.append', (['G', 'record_G_form'], {'axis': '(0)'}), '(G, record_G_form, axis=0)\n', (29506, 29532), True, 'import numpy as np\n'), ((1614, 1638), 'data_extract.get_paper', 'dext.get_paper', (['paper_id'], {}), '(paper_id)\n', (1628, 1638), True, 'import data_extract as dext\n'), ((2146, 2162), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2155, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2967, 3006), 'numpy.sum', 'np.sum', (['pca_u.explained_variance_ratio_'], {}), '(pca_u.explained_variance_ratio_)\n', (2973, 3006), True, 'import numpy as np\n'), ((3799, 3838), 'numpy.sum', 'np.sum', (['pca_a.explained_variance_ratio_'], {}), '(pca_a.explained_variance_ratio_)\n', (3805, 3838), True, 'import numpy as np\n'), ((7299, 7315), 'numpy.sum', 'np.sum', (['cm[i, :]'], {}), '(cm[i, :])\n', (7305, 7315), True, 'import numpy as np\n'), ((7330, 7346), 'numpy.sum', 'np.sum', (['cm[:, i]'], {}), '(cm[:, i])\n', (7336, 7346), True, 'import numpy as np\n'), ((9759, 9802), 'data_extract.get_paper', 'dext.get_paper', ([], {'paper_id': 'A.paper_id.iloc[j]'}), '(paper_id=A.paper_id.iloc[j])\n', (9773, 9802), True, 'import data_extract as dext\n'), ((10622, 10639), 'numpy.round', 'np.round', (['t[1]', '(2)'], {}), '(t[1], 2)\n', (10630, 10639), True, 'import numpy as np\n'), ((15045, 15099), 'sklearn.cluster.DBSCAN', 'sklearn_clustering.DBSCAN', ([], {'eps': 'eps', 'metric': '"""euclidean"""'}), "(eps=eps, metric='euclidean')\n", (15070, 15099), True, 'from sklearn import cluster as sklearn_clustering\n'), ((18755, 18766), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (18763, 18766), True, 'import numpy as np\n'), ((19868, 19882), 'numpy.diag', 'np.diag', (['s[:k]'], {}), '(s[:k])\n', (19875, 19882), True, 'import numpy as np\n'), ((23529, 23597), 'pandas.DataFrame', 'pd.DataFrame', (['statistic'], {'columns': "['precision', 'recall', 'f1_score']"}), "(statistic, columns=['precision', 'recall', 'f1_score'])\n", (23541, 23597), True, 'import pandas as pd\n'), ((25686, 25718), 'pickle.dump', 'pickle.dump', (['salamander_model', 'f'], {}), '(salamander_model, f)\n', (25697, 25718), False, 'import pickle\n'), ((26146, 26176), 'os.path.isfile', 'os.path.isfile', (['model_filepath'], {}), '(model_filepath)\n', (26160, 26176), False, 'import os\n'), ((26396, 26410), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (26407, 26410), False, 'import pickle\n'), ((29188, 29209), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (29198, 29209), True, 'import numpy as np\n'), ((30343, 30356), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (30353, 30356), False, 'import time\n'), ((12692, 12706), 'numpy.round', 'np.round', (['p', '(2)'], {}), '(p, 2)\n', (12700, 12706), True, 'import numpy as np\n'), ((16112, 16124), 'numpy.log10', 'np.log10', (['t2'], {}), '(t2)\n', (16120, 16124), True, 'import numpy as np\n'), ((37531, 37552), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (37550, 37552), False, 'import traceback\n'), ((7778, 7790), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (7787, 7790), True, 'import numpy as np\n'), ((14950, 14970), 'pandas.Series', 'pd.Series', (['list_dist'], {}), '(list_dist)\n', (14959, 14970), True, 'import pandas as pd\n'), ((27309, 27328), 'pandas.DataFrame', 'pd.DataFrame', (['xdict'], {}), '(xdict)\n', (27321, 27328), True, 'import pandas as pd\n'), ((28922, 28977), 'numpy.array', 'np.array', (["[object_U_dict_form['OBJID']]"], {'dtype': '"""object"""'}), "([object_U_dict_form['OBJID']], dtype='object')\n", (28930, 28977), True, 'import numpy as np\n'), ((8275, 8297), 'numpy.round', 'np.round', (['precision', '(2)'], {}), '(precision, 2)\n', (8283, 8297), True, 'import numpy as np\n'), ((8299, 8318), 'numpy.round', 'np.round', (['recall', '(2)'], {}), '(recall, 2)\n', (8307, 8318), True, 'import numpy as np\n'), ((8320, 8341), 'numpy.round', 'np.round', (['f1_score', '(2)'], {}), '(f1_score, 2)\n', (8328, 8341), True, 'import numpy as np\n'), ((18454, 18526), 'functools.reduce', 'reduce', (['(lambda a, b: a + b)', '[MAP[objid] for objid in objects_in_cluster]'], {}), '(lambda a, b: a + b, [MAP[objid] for objid in objects_in_cluster])\n', (18460, 18526), False, 'from functools import reduce\n'), ((5717, 5752), 'pandas.Series', 'pd.Series', (['indexes_non_associations'], {}), '(indexes_non_associations)\n', (5726, 5752), True, 'import pandas as pd\n'), ((23682, 23723), 'numpy.round', 'np.round', (['(statistics_df.shape[0] / 100)', '(2)'], {}), '(statistics_df.shape[0] / 100, 2)\n', (23690, 23723), True, 'import numpy as np\n'), ((36027, 36051), 'portals_urls.show_urls', 'portals_urls.show_urls', ([], {}), '()\n', (36049, 36051), False, 'import portals_urls\n'), ((37218, 37328), 'pandas.DataFrame', 'pd.DataFrame', (['output_table'], {'columns': "['identifier', 'association', 'scores', 'url', 'title', 'description']"}), "(output_table, columns=['identifier', 'association', 'scores',\n 'url', 'title', 'description'])\n", (37230, 37328), True, 'import pandas as pd\n'), ((36891, 36924), 'data_extract.get_paper', 'dext.get_paper', ([], {'paper_id': 'paper_id'}), '(paper_id=paper_id)\n', (36905, 36924), True, 'import data_extract as dext\n')] |
"""
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import numpy as np
import random
import torch
import gym
from gym import spaces
from gym.utils import seeding
from ..mcts import Node
from ..game import Action, ActionHistory
from collections import deque
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, settings, seed=1, goal_velocity=0):
self.child_visits = []
self.root_values = []
self.discount = settings['gamma']
self.G = 0
self.k = 3
self.frames = deque([], maxlen=self.k)
self.x_range = [-1.2, 0.5]
self.v_range = [-0.07, 0.07]
self.goal_position = 0.05
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.low = np.array([self.x_range[0], self.v_range[0]], dtype=np.float32)
self.high = np.array([self.x_range[1], self.v_range[1]], dtype=np.float32)
self.num_actions = 3
self.gamma = settings['gamma']
self.init_state = settings['init_state_train']
self.init_state_train = settings['init_state_train']
self.init_state_transition = settings['init_state_transition']
self.init_state_test = settings['init_state_test']
self.init_state_eval = settings['init_state_eval']
self.reward_terminal2_x_range = [-0.6, -0.44]
self.reward_terminal2_v_range = [-0.003, 0.003]
self.terminal2_radius = 0.07
self.current_state = {}
self.init = {}
self.task = 0
self.reward_terminal1 = 4
self.reward_terminal2 = 2
self.phase = settings['phase']
self.flipped_terminal = settings['flipped_terminals']
self.flipped_actions = settings['flipped_actions']
# self.states = [self.reset()]
self.action_space = spaces.Discrete(3)
self.action_space_size = 3
self.actions = list(map(lambda i: Action(i), range(self.action_space.n))) #range(3) #
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
self.seed(seed)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_task(self, task):
self.task = task
if self.flipped_terminal:
if task == 0: # taskA
self.reward_terminal1 = 2
self.reward_terminal2 = 4
else: # taskB
self.reward_terminal1 = 2
self.reward_terminal2 = 1
else:
if task == 0: # taskA
self.reward_terminal1 = 4
self.reward_terminal2 = 2
else: # taskB
self.reward_terminal1 = 1
self.reward_terminal2 = 2
def set_phase(self, phase):
self.phase = phase
if phase == 'train':
self.init_state = self.init_state_train
elif phase == 'transition':
self.init_state = self.init_state_transition
elif phase == 'test':
self.init_state = self.init_state_test
else:
assert False, 'incorrect identifier'
def set_eval_mode(self, eval):
if eval:
if self.reward_terminal1 > self.reward_terminal2:
self.reward_terminal1 = 1
self.reward_terminal2 = 0
else:
self.reward_terminal1 = 0
self.reward_terminal2 = 1
self.init_state = self.init_state_eval
else:
self.set_task(self.task)
self.set_phase(self.phase)
def step(self, action):
# assert self.actions.contains(action), "%r (%s) invalid" % (action, type(action))
if self.flipped_actions:
action = (action + 1) % 3
v = self.current_state['v']
x = self.current_state['x']
if not self.flipped_terminal:
if x >= 0.4 and v >= 0:
action = 2
else:
# if ((x + 0.52) ** 2 + 100 * v ** 2) <= 0.0164:
if self.init_state_transition[0][0][0] <= x <= self.init_state_transition[0][0][1] \
and abs(v) <= self.init_state_transition[0][1][1]:
action = 0 if v > 0 else 2
next_v = v + self.force * (action - 1) - self.gravity * math.cos(3 * x)
if next_v < self.v_range[0]:
next_v = self.v_range[0]
elif next_v > self.v_range[1]:
next_v = self.v_range[1]
next_x = x + next_v
term = 0
if next_x <= self.x_range[0]:
next_x = self.x_range[0]
next_v = 0
elif next_x >= self.x_range[1]:
next_x = self.x_range[1]
next_v = 0
term = 1
# elif self.reward_terminal2_x_range[0] <= next_x <= self.reward_terminal2_x_range[1]\
# and abs(next_v) <= self.reward_terminal2_v_range[1]:
elif ((next_x + 0.52) ** 2 + 100 * next_v ** 2) <= (self.terminal2_radius)**2:
next_v = 0
term = 2
self.current_state['terminal'] = term
self.current_state['x'] = next_x
self.current_state['v'] = next_v
self.states += [np.array([self.current_state['x'], self.current_state['v']])]
reward = 0
if self.current_state['terminal'] == 1:
reward = self.reward_terminal1
if self.current_state['terminal'] == 2:
reward = self.reward_terminal2
self.rewards.append(reward)
self.history.append(action)
return self.obs(len(self.rewards)), reward, term, {}
def reset(self):
self.history = []
self.rewards = []
self.states = []
reset = False
x, v = 0, 0
p = random.uniform(0, 1)
while not reset:
if len(self.init_state) == 1:
x = random.uniform(self.init_state[0][0][0], self.init_state[0][0][1])
v = random.uniform(self.init_state[0][1][0], self.init_state[0][1][1])
elif len(self.init_state) == 2: # a mix distribution for initial states
if p < 0.5:
x = random.uniform(self.init_state[0][0][0], self.init_state[0][0][1])
v = random.uniform(self.init_state[0][1][0], self.init_state[0][1][1])
else:
x = random.uniform(self.init_state[1][0][0], self.init_state[1][0][1])
v = random.uniform(self.init_state[1][1][0], self.init_state[1][1][1])
if ((x + 0.5234) ** 2 + 100 * v ** 2) >= (self.terminal2_radius)**2:
# if (x <= self.reward_terminal2_x_range[0] or x >= self.reward_terminal2_x_range[1]) or \
# (v <= self.reward_terminal2_v_range[0] or v >= self.reward_terminal2_v_range[1]):
reset = True
self.init['x'] = x
self.init['v'] = v
self.current_state['x'] = x
self.current_state['v'] = v
self.current_state['terminal'] = 0
for _ in range(self.k):
self.states.append(np.array([self.current_state['x'], self.current_state['v']]))
return self.obs(0)
def _height(self, xs):
return np.sin(3 * xs) * .45 + .55
def get_keys_to_action(self):
return {(): 1, (276,): 0, (275,): 2, (275, 276): 1} # control with left and right arrow keys
def obs(self, i: int):
"""Compute the state of the game."""
# return self.states[state_index]
frames = self.states[i:i + self.k]
return np.array(frames).flatten()
def legal_actions(self):
"""Return the legal actions available at this instant."""
return self.actions
def action_history(self):
"""Return the actions executed inside the search."""
return ActionHistory(self.history, 3)
def to_play(self):
"""Return the current player."""
return 0
def store_search_statistics(self, root: Node):
"""After each MCTS run, store the statistics generated by the search."""
sum_visits = sum(child.visit_count for child in root.children.values())
self.child_visits.append([
root.children[a].visit_count / sum_visits if a in root.children else 0
for a in self.actions
])
self.root_values.append(np.maximum(0, root.value()))
def make_target(self, state_index: int, num_unroll_steps: int, td_steps: int, model=None, config=None):
# The value target is the discounted root value of the search tree N steps into the future, plus
# the discounted sum of all rewards until then.
target_values, target_rewards, target_policies = [], [], []
for current_index in range(state_index, state_index + num_unroll_steps + 1):
bootstrap_index = current_index + td_steps
if bootstrap_index < len(self.root_values):
if model is None:
value = self.root_values[bootstrap_index] * self.discount ** td_steps
else:
# Reference : Appendix H => Reanalyze
# Note : a target network based on recent parameters is used to provide a fresher,
# stable n-step bootstrapped target for the value function
obs = self.obs(bootstrap_index)
obs = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
network_output = model.initial_inference(obs)
value = network_output.value.data.cpu().item() * self.discount ** td_steps
else:
value = 0
for i, reward in enumerate(self.rewards[current_index:bootstrap_index]):
value += reward * self.discount ** i
if current_index < len(self.root_values):
target_values.append(value)
target_rewards.append(self.rewards[current_index])
# Reference : Appendix H => Reanalyze
# Note : MuZero Reanalyze revisits its past time-steps and re-executes its search using the
# latest model parameters, potentially resulting in a better quality policy than the original search.
# This fresh policy is used as the policy target for 80% of updates during MuZero training
if model is not None and random.random() <= config.revisit_policy_search_rate:
from ..mcts import MCTS, Node
root = Node(0)
obs = self.obs(current_index)
obs = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
network_output = model.initial_inference(obs)
root.expand(self.to_play(), self.legal_actions(), network_output)
MCTS(config).run(root, self.action_history(), model)
self.store_search_statistics(root)
target_policies.append(self.child_visits[current_index])
else:
# States past the end of games are treated as absorbing states.
target_values.append(0)
target_rewards.append(0)
# Note: Target policy is set to 0 so that no policy loss is calculated for them
target_policies.append([0 for _ in range(len(self.child_visits[0]))])
return target_values, target_rewards, target_policies
def terminal(self):
return self.current_state['terminal']
def __len__(self):
return len(self.history)
| [
"random.uniform",
"collections.deque",
"gym.spaces.Discrete",
"gym.spaces.Box",
"math.cos",
"numpy.array",
"torch.tensor",
"numpy.sin",
"random.random",
"gym.utils.seeding.np_random"
] | [((696, 720), 'collections.deque', 'deque', (['[]'], {'maxlen': 'self.k'}), '([], maxlen=self.k)\n', (701, 720), False, 'from collections import deque\n'), ((950, 1012), 'numpy.array', 'np.array', (['[self.x_range[0], self.v_range[0]]'], {'dtype': 'np.float32'}), '([self.x_range[0], self.v_range[0]], dtype=np.float32)\n', (958, 1012), True, 'import numpy as np\n'), ((1033, 1095), 'numpy.array', 'np.array', (['[self.x_range[1], self.v_range[1]]'], {'dtype': 'np.float32'}), '([self.x_range[1], self.v_range[1]], dtype=np.float32)\n', (1041, 1095), True, 'import numpy as np\n'), ((1993, 2011), 'gym.spaces.Discrete', 'spaces.Discrete', (['(3)'], {}), '(3)\n', (2008, 2011), False, 'from gym import spaces\n'), ((2174, 2223), 'gym.spaces.Box', 'spaces.Box', (['self.low', 'self.high'], {'dtype': 'np.float32'}), '(self.low, self.high, dtype=np.float32)\n', (2184, 2223), False, 'from gym import spaces\n'), ((2312, 2335), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2329, 2335), False, 'from gym.utils import seeding\n'), ((5894, 5914), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (5908, 5914), False, 'import random\n'), ((5342, 5402), 'numpy.array', 'np.array', (["[self.current_state['x'], self.current_state['v']]"], {}), "([self.current_state['x'], self.current_state['v']])\n", (5350, 5402), True, 'import numpy as np\n'), ((4460, 4475), 'math.cos', 'math.cos', (['(3 * x)'], {}), '(3 * x)\n', (4468, 4475), False, 'import math\n'), ((6002, 6068), 'random.uniform', 'random.uniform', (['self.init_state[0][0][0]', 'self.init_state[0][0][1]'], {}), '(self.init_state[0][0][0], self.init_state[0][0][1])\n', (6016, 6068), False, 'import random\n'), ((6089, 6155), 'random.uniform', 'random.uniform', (['self.init_state[0][1][0]', 'self.init_state[0][1][1]'], {}), '(self.init_state[0][1][0], self.init_state[0][1][1])\n', (6103, 6155), False, 'import random\n'), ((7210, 7270), 'numpy.array', 'np.array', (["[self.current_state['x'], self.current_state['v']]"], {}), "([self.current_state['x'], self.current_state['v']])\n", (7218, 7270), True, 'import numpy as np\n'), ((7342, 7356), 'numpy.sin', 'np.sin', (['(3 * xs)'], {}), '(3 * xs)\n', (7348, 7356), True, 'import numpy as np\n'), ((7680, 7696), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (7688, 7696), True, 'import numpy as np\n'), ((6293, 6359), 'random.uniform', 'random.uniform', (['self.init_state[0][0][0]', 'self.init_state[0][0][1]'], {}), '(self.init_state[0][0][0], self.init_state[0][0][1])\n', (6307, 6359), False, 'import random\n'), ((6384, 6450), 'random.uniform', 'random.uniform', (['self.init_state[0][1][0]', 'self.init_state[0][1][1]'], {}), '(self.init_state[0][1][0], self.init_state[0][1][1])\n', (6398, 6450), False, 'import random\n'), ((6497, 6563), 'random.uniform', 'random.uniform', (['self.init_state[1][0][0]', 'self.init_state[1][0][1]'], {}), '(self.init_state[1][0][0], self.init_state[1][0][1])\n', (6511, 6563), False, 'import random\n'), ((6588, 6654), 'random.uniform', 'random.uniform', (['self.init_state[1][1][0]', 'self.init_state[1][1][1]'], {}), '(self.init_state[1][1][0], self.init_state[1][1][1])\n', (6602, 6654), False, 'import random\n'), ((10479, 10494), 'random.random', 'random.random', ([], {}), '()\n', (10492, 10494), False, 'import random\n'), ((9488, 9526), 'torch.tensor', 'torch.tensor', (['obs'], {'dtype': 'torch.float32'}), '(obs, dtype=torch.float32)\n', (9500, 9526), False, 'import torch\n'), ((10694, 10732), 'torch.tensor', 'torch.tensor', (['obs'], {'dtype': 'torch.float32'}), '(obs, dtype=torch.float32)\n', (10706, 10732), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from pathlib import Path
import numpy as np
import pytest
from pymatgen.electronic_structure.core import Spin
from vise.analyzer.band_edge_properties import (
BandEdge, BandEdgeProperties, is_band_gap)
from vise.defaults import defaults
from vise.tests.helpers.assertion import assert_msonable
parent_dir = Path(__file__).parent
actual_kpt = [[10.1, 10.2, 10.3], [10.4, 10.5, 10.6]]
expected_metal = \
{'energies': 0.0, 'direct': None, 'transition': None}, None, None
def test_band_edge_msonable():
assert_msonable(BandEdge(energy=0.0, spin=Spin.up, band_index=0,
kpoint_index=1, kpoint_coords=[0.0, 0.0, 0.0]))
def test_band_edge_equal():
e1 = BandEdge(0.0, Spin.up, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])
e2 = BandEdge(0.0, Spin.up, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])
e3 = BandEdge(0.0, Spin.up, band_index=0, kpoint_coords=[0.1, 0.0, 0.0])
e4 = BandEdge(0.0, Spin.down, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])
assert e1.is_direct(e2) is True
assert e1.is_direct(e3) is False
assert e1.is_direct(e4) is False
def test_metal_judged_from_non_uniform_band_occupation():
eigenvalues = {Spin.up: np.array([[0.0, 0.1, 0.2], [0.2, 0.3, 0.4]])}
band_edge = BandEdgeProperties(eigenvalues=eigenvalues,
nelect=4.0,
magnetization=0.0,
kpoints=actual_kpt)
assert band_edge.band_gap is None
assert band_edge.is_direct is None
assert band_edge.vbm_info is None
assert band_edge.cbm_info is None
def test_metal_judged_from_fractional_nelect():
eigenvalues = {Spin.up: np.array([[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]])}
integer_criterion = 0.1
band_edge = BandEdgeProperties(eigenvalues=eigenvalues,
nelect=4.0 + integer_criterion + 1e-5,
magnetization=0.0,
kpoints=actual_kpt,
integer_criterion=0.1)
assert band_edge.band_gap is None
assert band_edge.is_direct is None
assert band_edge.vbm_info is None
assert band_edge.cbm_info is None
def test_metal_judged_from_fractional_magnetization():
eigenvalues = {Spin.up: np.array([[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]]),
Spin.down: np.array([[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]])}
integer_criterion = 0.1
band_edge = BandEdgeProperties(eigenvalues=eigenvalues,
nelect=3.0,
magnetization=1.0 + integer_criterion + 1e-5,
kpoints=actual_kpt,
integer_criterion=0.1)
assert band_edge.band_gap is None
assert band_edge.is_direct is None
assert band_edge.vbm_info is None
assert band_edge.cbm_info is None
def test_nonmagnetic_insulator():
# k-point indices run fast.
eigenvalues = {Spin.up: np.array([[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]])}
integer_criterion = 0.1
band_edge = BandEdgeProperties(eigenvalues=eigenvalues,
nelect=4.0 + integer_criterion - 1e-5,
magnetization=0.0,
kpoints=actual_kpt,
integer_criterion=0.1)
assert pytest.approx(band_edge.band_gap) == 0.90
assert band_edge.is_direct is False
assert pytest.approx(band_edge.vbm_info.energy) == 1.1
assert pytest.approx(band_edge.cbm_info.energy) == 2.0
assert band_edge.vbm_info.spin == Spin.up
assert band_edge.cbm_info.spin == Spin.up
assert band_edge.vbm_info.band_index == 1
assert band_edge.cbm_info.band_index == 2
assert band_edge.vbm_info.kpoint_coords == [10.4, 10.5, 10.6]
assert band_edge.cbm_info.kpoint_coords == [10.1, 10.2, 10.3]
@pytest.fixture
def band_edge():
eigenvalues = {Spin.up: np.array([[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]]),
Spin.down: np.array([[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]])}
return BandEdgeProperties(eigenvalues=eigenvalues,
nelect=3.0,
magnetization=1.0,
kpoints=actual_kpt)
def test_magnetic_insulator(band_edge):
assert pytest.approx(band_edge.band_gap) == 0.3
assert band_edge.is_direct is False
assert pytest.approx(band_edge.vbm_info.energy) == 1.1
assert pytest.approx(band_edge.cbm_info.energy) == 1.4
assert band_edge.vbm_info.spin == Spin.up
assert band_edge.cbm_info.spin == Spin.down
assert band_edge.vbm_info.band_index == 1
assert band_edge.cbm_info.band_index == 1
assert band_edge.vbm_info.kpoint_coords == [10.4, 10.5, 10.6]
assert band_edge.cbm_info.kpoint_coords == [10.1, 10.2, 10.3]
def test_is_metal():
band_edge_metal = BandEdgeProperties(
eigenvalues={Spin.up: np.array([[0, 1, 2], [0, 3, 4]])},
nelect=4.0,
magnetization=0.0,
kpoints=actual_kpt)
assert band_edge_metal.is_metal is True
assert band_edge_metal.vbm_info is None
assert band_edge_metal.cbm_info is None
assert band_edge_metal.vbm_cbm is None
assert band_edge_metal.band_gap is None
assert repr(band_edge_metal) == "Metal"
def test_repr(band_edge):
expected = """Band gap 0.300 eV
VBM energy position: 1.1, spin: up, band index 1, k-point index 1, k-point coords 10.400 10.500 10.600
CBM energy position: 1.4, spin: down, band index 1, k-point index 0, k-point coords 10.100 10.200 10.300"""
assert repr(band_edge) == expected
def test_is_band_gap():
assert is_band_gap(None, [1.0, 1.0 + defaults.band_gap_criterion + 1e-5]) is True
assert is_band_gap(None, [1.0, 1.0 + defaults.band_gap_criterion - 1e-5]) is False
assert is_band_gap(None, None) is False
| [
"pytest.approx",
"vise.analyzer.band_edge_properties.is_band_gap",
"vise.analyzer.band_edge_properties.BandEdgeProperties",
"pathlib.Path",
"vise.analyzer.band_edge_properties.BandEdge",
"numpy.array"
] | [((410, 424), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (414, 424), False, 'from pathlib import Path\n'), ((795, 862), 'vise.analyzer.band_edge_properties.BandEdge', 'BandEdge', (['(0.0)', 'Spin.up'], {'band_index': '(0)', 'kpoint_coords': '[0.0, 0.0, 0.0]'}), '(0.0, Spin.up, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])\n', (803, 862), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((874, 941), 'vise.analyzer.band_edge_properties.BandEdge', 'BandEdge', (['(0.0)', 'Spin.up'], {'band_index': '(0)', 'kpoint_coords': '[0.0, 0.0, 0.0]'}), '(0.0, Spin.up, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])\n', (882, 941), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((953, 1020), 'vise.analyzer.band_edge_properties.BandEdge', 'BandEdge', (['(0.0)', 'Spin.up'], {'band_index': '(0)', 'kpoint_coords': '[0.1, 0.0, 0.0]'}), '(0.0, Spin.up, band_index=0, kpoint_coords=[0.1, 0.0, 0.0])\n', (961, 1020), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((1032, 1101), 'vise.analyzer.band_edge_properties.BandEdge', 'BandEdge', (['(0.0)', 'Spin.down'], {'band_index': '(0)', 'kpoint_coords': '[0.0, 0.0, 0.0]'}), '(0.0, Spin.down, band_index=0, kpoint_coords=[0.0, 0.0, 0.0])\n', (1040, 1101), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((1362, 1460), 'vise.analyzer.band_edge_properties.BandEdgeProperties', 'BandEdgeProperties', ([], {'eigenvalues': 'eigenvalues', 'nelect': '(4.0)', 'magnetization': '(0.0)', 'kpoints': 'actual_kpt'}), '(eigenvalues=eigenvalues, nelect=4.0, magnetization=0.0,\n kpoints=actual_kpt)\n', (1380, 1460), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((1884, 2033), 'vise.analyzer.band_edge_properties.BandEdgeProperties', 'BandEdgeProperties', ([], {'eigenvalues': 'eigenvalues', 'nelect': '(4.0 + integer_criterion + 1e-05)', 'magnetization': '(0.0)', 'kpoints': 'actual_kpt', 'integer_criterion': '(0.1)'}), '(eigenvalues=eigenvalues, nelect=4.0 + integer_criterion +\n 1e-05, magnetization=0.0, kpoints=actual_kpt, integer_criterion=0.1)\n', (1902, 2033), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((2580, 2729), 'vise.analyzer.band_edge_properties.BandEdgeProperties', 'BandEdgeProperties', ([], {'eigenvalues': 'eigenvalues', 'nelect': '(3.0)', 'magnetization': '(1.0 + integer_criterion + 1e-05)', 'kpoints': 'actual_kpt', 'integer_criterion': '(0.1)'}), '(eigenvalues=eigenvalues, nelect=3.0, magnetization=1.0 +\n integer_criterion + 1e-05, kpoints=actual_kpt, integer_criterion=0.1)\n', (2598, 2729), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((3205, 3354), 'vise.analyzer.band_edge_properties.BandEdgeProperties', 'BandEdgeProperties', ([], {'eigenvalues': 'eigenvalues', 'nelect': '(4.0 + integer_criterion - 1e-05)', 'magnetization': '(0.0)', 'kpoints': 'actual_kpt', 'integer_criterion': '(0.1)'}), '(eigenvalues=eigenvalues, nelect=4.0 + integer_criterion -\n 1e-05, magnetization=0.0, kpoints=actual_kpt, integer_criterion=0.1)\n', (3223, 3354), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((4225, 4323), 'vise.analyzer.band_edge_properties.BandEdgeProperties', 'BandEdgeProperties', ([], {'eigenvalues': 'eigenvalues', 'nelect': '(3.0)', 'magnetization': '(1.0)', 'kpoints': 'actual_kpt'}), '(eigenvalues=eigenvalues, nelect=3.0, magnetization=1.0,\n kpoints=actual_kpt)\n', (4243, 4323), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((630, 729), 'vise.analyzer.band_edge_properties.BandEdge', 'BandEdge', ([], {'energy': '(0.0)', 'spin': 'Spin.up', 'band_index': '(0)', 'kpoint_index': '(1)', 'kpoint_coords': '[0.0, 0.0, 0.0]'}), '(energy=0.0, spin=Spin.up, band_index=0, kpoint_index=1,\n kpoint_coords=[0.0, 0.0, 0.0])\n', (638, 729), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((1300, 1344), 'numpy.array', 'np.array', (['[[0.0, 0.1, 0.2], [0.2, 0.3, 0.4]]'], {}), '([[0.0, 0.1, 0.2], [0.2, 0.3, 0.4]])\n', (1308, 1344), True, 'import numpy as np\n'), ((1794, 1838), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]]'], {}), '([[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]])\n', (1802, 1838), True, 'import numpy as np\n'), ((2410, 2456), 'numpy.array', 'np.array', (['[[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]]'], {}), '([[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]])\n', (2418, 2456), True, 'import numpy as np\n'), ((2488, 2534), 'numpy.array', 'np.array', (['[[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]]'], {}), '([[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]])\n', (2496, 2534), True, 'import numpy as np\n'), ((3115, 3159), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]]'], {}), '([[0.0, 1.0, 2.0], [0.1, 1.1, 2.1]])\n', (3123, 3159), True, 'import numpy as np\n'), ((3502, 3535), 'pytest.approx', 'pytest.approx', (['band_edge.band_gap'], {}), '(band_edge.band_gap)\n', (3515, 3535), False, 'import pytest\n'), ((3597, 3637), 'pytest.approx', 'pytest.approx', (['band_edge.vbm_info.energy'], {}), '(band_edge.vbm_info.energy)\n', (3610, 3637), False, 'import pytest\n'), ((3656, 3696), 'pytest.approx', 'pytest.approx', (['band_edge.cbm_info.energy'], {}), '(band_edge.cbm_info.energy)\n', (3669, 3696), False, 'import pytest\n'), ((4088, 4134), 'numpy.array', 'np.array', (['[[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]]'], {}), '([[0.0, 1.0, 10.0], [0.0, 1.1, 10.0]])\n', (4096, 4134), True, 'import numpy as np\n'), ((4166, 4212), 'numpy.array', 'np.array', (['[[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]]'], {}), '([[0.0, 1.4, 10.0], [0.0, 1.5, 10.0]])\n', (4174, 4212), True, 'import numpy as np\n'), ((4464, 4497), 'pytest.approx', 'pytest.approx', (['band_edge.band_gap'], {}), '(band_edge.band_gap)\n', (4477, 4497), False, 'import pytest\n'), ((4558, 4598), 'pytest.approx', 'pytest.approx', (['band_edge.vbm_info.energy'], {}), '(band_edge.vbm_info.energy)\n', (4571, 4598), False, 'import pytest\n'), ((4617, 4657), 'pytest.approx', 'pytest.approx', (['band_edge.cbm_info.energy'], {}), '(band_edge.cbm_info.energy)\n', (4630, 4657), False, 'import pytest\n'), ((5807, 5874), 'vise.analyzer.band_edge_properties.is_band_gap', 'is_band_gap', (['None', '[1.0, 1.0 + defaults.band_gap_criterion + 1e-05]'], {}), '(None, [1.0, 1.0 + defaults.band_gap_criterion + 1e-05])\n', (5818, 5874), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((5893, 5960), 'vise.analyzer.band_edge_properties.is_band_gap', 'is_band_gap', (['None', '[1.0, 1.0 + defaults.band_gap_criterion - 1e-05]'], {}), '(None, [1.0, 1.0 + defaults.band_gap_criterion - 1e-05])\n', (5904, 5960), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((5980, 6003), 'vise.analyzer.band_edge_properties.is_band_gap', 'is_band_gap', (['None', 'None'], {}), '(None, None)\n', (5991, 6003), False, 'from vise.analyzer.band_edge_properties import BandEdge, BandEdgeProperties, is_band_gap\n'), ((5081, 5113), 'numpy.array', 'np.array', (['[[0, 1, 2], [0, 3, 4]]'], {}), '([[0, 1, 2], [0, 3, 4]])\n', (5089, 5113), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from daproperties.stats import rv_discrete
class TestRVDiscrete(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
#del self.rv
pass
def test_xk_and_pk_dimensions(self):
xk = [(0,1),(1,0), (1,1)]
pk = [0.6, 0.4]
with self.assertRaises(ValueError):
rv = rv_discrete(xk, pk)
def test_prob_of_tuple(self):
xk = [(0,1),(1,0)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk)
p = rv._prob_of((0,1))
self.assertEqual(p, 0.6)
def test_prob_of_scalar(self):
xk = [(0,),(1,)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk)
p = rv._prob_of(0)
self.assertEqual(p, 0.6)
def test_prob_of_list(self):
xk = [(0,),(1,)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk)
p = rv._prob_of([0])
self.assertIs(p, 0.6)
def test_prob_of_invalid(self):
xk = [(0,),(1,)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk)
p = rv._prob_of(3)
self.assertIs(p, rv.badvalue)
def test_pmf_tuple(self):
xk = [(0,1),(1,0)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk, badvalue=0)
x= [(0,1),(0,1),(1,0),(0,0)]
P_is = rv.pmf(x)
P_target = np.array([0.6, 0.6, 0.4, 0])
self.assertTrue(np.array_equal(P_is, P_target), f"P_is: {P_is} is not P_target: {P_target}.")
def test_pmf_array(self):
xk = [(0,1),(1,0)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk, badvalue=0)
x= [[0,1],[0,1],[1,0],[0,0]]
P_is = rv.pmf(x)
P_target = np.array([0.6, 0.6, 0.4, 0])
self.assertTrue(np.array_equal(P_is, P_target), f"P_is: {P_is} is not P_target: {P_target}.")
def test_pmf_scalar(self):
xk = [(0,),(1,)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk, badvalue=0)
x= [0,0,1,1,2]
P_is = rv.pmf(x)
P_target = np.array([0.6, 0.6, 0.4, 0.4, 0])
self.assertTrue(np.array_equal(P_is, P_target), f"P_is: {P_is} is not P_target: {P_target}.")
def test_score_samples(self):
xk = [(0,),(1,)]
pk = [0.6, 0.4]
rv = rv_discrete(xk, pk, badvalue=0)
x= [0,0,1,1,2]
score_is = rv.score_samples(x)
score_target = np.log(np.array([0.6, 0.6, 0.4, 0.4, 0]))
self.assertTrue(np.array_equal(score_is, score_target), f"P_is: {score_is} is not P_target: {score_target}.")
def test_common_coverage(self):
rv1 = rv_discrete([(0,),(1,),(2,)], pk=[0.3, 0.3, 0.4])
rv2 = rv_discrete([(0,),(1,),(4,)], pk=[0.2, 0.4, 0.4])
coverage_is = rv1._common_coverage(rv2)
coverage_reverse = rv2._common_coverage(rv1)
coverage_target = np.array([(0,),(1,)]).reshape(-1)
self.assertTrue(np.array_equal(coverage_is, coverage_target), f"covarage_is {coverage_is} not equal to coverage_target {coverage_target}.")
self.assertTrue(np.array_equal(coverage_is, coverage_reverse), f"covarage_is {coverage_is} not equal to coverage_target {coverage_reverse}.")
def test_divergence(self):
rv1 = rv_discrete([(0,),(1,),(2,)], pk=[0.3, 0.3, 0.4])
rv2 = rv_discrete([(0,),(1,),(4,)], pk=[0.2, 0.4, 0.4])
pd_1 = rv1.score_samples([(0,), (1,)])
pd_2 = rv2.score_samples([(0,), (1,)])
divergence_target = rv1.divergence_from_distribution(pd_1, pd_2)
divergence_is = rv1.divergence(rv2)
self.assertEqual(divergence_is, divergence_target)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"daproperties.stats.rv_discrete",
"numpy.array_equal"
] | [((3686, 3701), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3699, 3701), False, 'import unittest\n'), ((507, 526), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {}), '(xk, pk)\n', (518, 526), False, 'from daproperties.stats import rv_discrete\n'), ((693, 712), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {}), '(xk, pk)\n', (704, 712), False, 'from daproperties.stats import rv_discrete\n'), ((873, 892), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {}), '(xk, pk)\n', (884, 892), False, 'from daproperties.stats import rv_discrete\n'), ((1055, 1074), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {}), '(xk, pk)\n', (1066, 1074), False, 'from daproperties.stats import rv_discrete\n'), ((1240, 1271), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {'badvalue': '(0)'}), '(xk, pk, badvalue=0)\n', (1251, 1271), False, 'from daproperties.stats import rv_discrete\n'), ((1354, 1382), 'numpy.array', 'np.array', (['[0.6, 0.6, 0.4, 0]'], {}), '([0.6, 0.6, 0.4, 0])\n', (1362, 1382), True, 'import numpy as np\n'), ((1594, 1625), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {'badvalue': '(0)'}), '(xk, pk, badvalue=0)\n', (1605, 1625), False, 'from daproperties.stats import rv_discrete\n'), ((1708, 1736), 'numpy.array', 'np.array', (['[0.6, 0.6, 0.4, 0]'], {}), '([0.6, 0.6, 0.4, 0])\n', (1716, 1736), True, 'import numpy as np\n'), ((1947, 1978), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {'badvalue': '(0)'}), '(xk, pk, badvalue=0)\n', (1958, 1978), False, 'from daproperties.stats import rv_discrete\n'), ((2047, 2080), 'numpy.array', 'np.array', (['[0.6, 0.6, 0.4, 0.4, 0]'], {}), '([0.6, 0.6, 0.4, 0.4, 0])\n', (2055, 2080), True, 'import numpy as np\n'), ((2293, 2324), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {'badvalue': '(0)'}), '(xk, pk, badvalue=0)\n', (2304, 2324), False, 'from daproperties.stats import rv_discrete\n'), ((2635, 2686), 'daproperties.stats.rv_discrete', 'rv_discrete', (['[(0,), (1,), (2,)]'], {'pk': '[0.3, 0.3, 0.4]'}), '([(0,), (1,), (2,)], pk=[0.3, 0.3, 0.4])\n', (2646, 2686), False, 'from daproperties.stats import rv_discrete\n'), ((2699, 2750), 'daproperties.stats.rv_discrete', 'rv_discrete', (['[(0,), (1,), (4,)]'], {'pk': '[0.2, 0.4, 0.4]'}), '([(0,), (1,), (4,)], pk=[0.2, 0.4, 0.4])\n', (2710, 2750), False, 'from daproperties.stats import rv_discrete\n'), ((3256, 3307), 'daproperties.stats.rv_discrete', 'rv_discrete', (['[(0,), (1,), (2,)]'], {'pk': '[0.3, 0.3, 0.4]'}), '([(0,), (1,), (2,)], pk=[0.3, 0.3, 0.4])\n', (3267, 3307), False, 'from daproperties.stats import rv_discrete\n'), ((3320, 3371), 'daproperties.stats.rv_discrete', 'rv_discrete', (['[(0,), (1,), (4,)]'], {'pk': '[0.2, 0.4, 0.4]'}), '([(0,), (1,), (4,)], pk=[0.2, 0.4, 0.4])\n', (3331, 3371), False, 'from daproperties.stats import rv_discrete\n'), ((383, 402), 'daproperties.stats.rv_discrete', 'rv_discrete', (['xk', 'pk'], {}), '(xk, pk)\n', (394, 402), False, 'from daproperties.stats import rv_discrete\n'), ((1416, 1446), 'numpy.array_equal', 'np.array_equal', (['P_is', 'P_target'], {}), '(P_is, P_target)\n', (1430, 1446), True, 'import numpy as np\n'), ((1770, 1800), 'numpy.array_equal', 'np.array_equal', (['P_is', 'P_target'], {}), '(P_is, P_target)\n', (1784, 1800), True, 'import numpy as np\n'), ((2114, 2144), 'numpy.array_equal', 'np.array_equal', (['P_is', 'P_target'], {}), '(P_is, P_target)\n', (2128, 2144), True, 'import numpy as np\n'), ((2418, 2451), 'numpy.array', 'np.array', (['[0.6, 0.6, 0.4, 0.4, 0]'], {}), '([0.6, 0.6, 0.4, 0.4, 0])\n', (2426, 2451), True, 'import numpy as np\n'), ((2486, 2524), 'numpy.array_equal', 'np.array_equal', (['score_is', 'score_target'], {}), '(score_is, score_target)\n', (2500, 2524), True, 'import numpy as np\n'), ((2936, 2980), 'numpy.array_equal', 'np.array_equal', (['coverage_is', 'coverage_target'], {}), '(coverage_is, coverage_target)\n', (2950, 2980), True, 'import numpy as np\n'), ((3084, 3129), 'numpy.array_equal', 'np.array_equal', (['coverage_is', 'coverage_reverse'], {}), '(coverage_is, coverage_reverse)\n', (3098, 3129), True, 'import numpy as np\n'), ((2877, 2899), 'numpy.array', 'np.array', (['[(0,), (1,)]'], {}), '([(0,), (1,)])\n', (2885, 2899), True, 'import numpy as np\n')] |
import numpy as np
from common.dataset.pre_process.norm_data import norm_to_pixel
from common.transformation.cam_utils import normalize_screen_coordinates
def load_mpi_test(file_path, seq, norm):
"""
Usage: Load a section once
:param dataset_root: root path
:param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7).
If you want to evaluate by scene setting, you can use the sequencewise evaluation
to convert to these numbers by doing
#1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540)
#2:Studio without Green Screen (TS3*505+TS4*553)/(505+553)
#3:Outdoor (TS5*276+TS6*452)/(276+452)
:return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
"""
info = np.load(file_path, allow_pickle=True)
if seq in range(0,6):
pose_3d = info['pose3d_univ'][seq]
pose_2d = info['pose2d'][seq]
if seq in [0, 1, 2, 3]:
img_w, img_h = 2048, 2048
cam_intri = np.array([1500.0686135995716, 1500.6590966853348, 1017.3794860438494, 1043.062824876024, 1,1,1,1,1])
elif seq in [4, 5]:
img_w, img_h = 1920, 1080
cam_intri = np.array([1683.482559482185, 1671.927242063379, 939.9278168524228, 560.2072491988034, 1,1,1,1,1])
elif seq == 7:
pose_3d = info['pose3d_univ'][0]
pose_2d = info['pose2d'][0]
img_w, img_h = 2048, 2048
cam_intri = np.array([1504.1479043534127, 1556.86936732066, 991.7469587022122, 872.994958045596, 1, 1, 1, 1, 1])
params = {}
if norm == 'base':
# Remove global offset, but keep trajectory in first position
pose_3d[:, 1:] -= pose_3d[:, :1]
normed_pose_3d = pose_3d/1000
normed_pose_2d = normalize_screen_coordinates(pose_2d[..., :2], w=img_w, h=img_h)
params['intrinsic'] = cam_intri
else:
normed_pose_3d, normed_pose_2d, pixel_ratio, rescale_ratio, offset_2d, abs_root_Z = norm_to_pixel(pose_3d/1000, pose_2d, cam_intri, norm)
norm_params=np.concatenate((pixel_ratio, rescale_ratio, offset_2d, abs_root_Z), axis=-1) # [T, 1, 5], len()==4
params['intrinsic'] = cam_intri
params['normalization_params'] = norm_params
return normed_pose_3d, normed_pose_2d, params | [
"common.transformation.cam_utils.normalize_screen_coordinates",
"numpy.array",
"numpy.concatenate",
"numpy.load",
"common.dataset.pre_process.norm_data.norm_to_pixel"
] | [((793, 830), 'numpy.load', 'np.load', (['file_path'], {'allow_pickle': '(True)'}), '(file_path, allow_pickle=True)\n', (800, 830), True, 'import numpy as np\n'), ((1786, 1850), 'common.transformation.cam_utils.normalize_screen_coordinates', 'normalize_screen_coordinates', (['pose_2d[..., :2]'], {'w': 'img_w', 'h': 'img_h'}), '(pose_2d[..., :2], w=img_w, h=img_h)\n', (1814, 1850), False, 'from common.transformation.cam_utils import normalize_screen_coordinates\n'), ((1993, 2048), 'common.dataset.pre_process.norm_data.norm_to_pixel', 'norm_to_pixel', (['(pose_3d / 1000)', 'pose_2d', 'cam_intri', 'norm'], {}), '(pose_3d / 1000, pose_2d, cam_intri, norm)\n', (2006, 2048), False, 'from common.dataset.pre_process.norm_data import norm_to_pixel\n'), ((2067, 2143), 'numpy.concatenate', 'np.concatenate', (['(pixel_ratio, rescale_ratio, offset_2d, abs_root_Z)'], {'axis': '(-1)'}), '((pixel_ratio, rescale_ratio, offset_2d, abs_root_Z), axis=-1)\n', (2081, 2143), True, 'import numpy as np\n'), ((1032, 1141), 'numpy.array', 'np.array', (['[1500.0686135995716, 1500.6590966853348, 1017.3794860438494, \n 1043.062824876024, 1, 1, 1, 1, 1]'], {}), '([1500.0686135995716, 1500.6590966853348, 1017.3794860438494, \n 1043.062824876024, 1, 1, 1, 1, 1])\n', (1040, 1141), True, 'import numpy as np\n'), ((1472, 1577), 'numpy.array', 'np.array', (['[1504.1479043534127, 1556.86936732066, 991.7469587022122, 872.994958045596,\n 1, 1, 1, 1, 1]'], {}), '([1504.1479043534127, 1556.86936732066, 991.7469587022122, \n 872.994958045596, 1, 1, 1, 1, 1])\n', (1480, 1577), True, 'import numpy as np\n'), ((1223, 1329), 'numpy.array', 'np.array', (['[1683.482559482185, 1671.927242063379, 939.9278168524228, 560.2072491988034,\n 1, 1, 1, 1, 1]'], {}), '([1683.482559482185, 1671.927242063379, 939.9278168524228, \n 560.2072491988034, 1, 1, 1, 1, 1])\n', (1231, 1329), True, 'import numpy as np\n')] |
import os
import sys
import yaml
import json
import random
import argparse
import numpy as np
import torch
from tricolo.trainers.SimCLR import SimCLR
from tricolo.dataloader.dataloader import ClrDataLoader
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=str, default="None", help="Exp to evaluate")
parser.add_argument("--split", type=str, help="Dataset split to evaluate on (valid or test)")
parser.add_argument('--clip', action='store_true', help='Use pretrained CLIP to evaluate')
args = parser.parse_args()
def main(load_dir):
if not args.clip:
with open(load_dir + '/checkpoints/config.json', 'r') as f:
config = json.load(f)
config['train'] = False
config['log_dir'] = load_dir
else:
"Dummy config file"
config = yaml.load(open('./tricolo/configs/clip.yaml', "r"), Loader=yaml.FullLoader)
config['train'] = False
config['log_dir'] = './logs/retrieval/clip'
dataset = ClrDataLoader(config['dset'], config['batch_size'], config['sparse_model'], **config['dataset'])
simclr = SimCLR(dataset, config)
pr_at_k = simclr.test(config['log_dir'], clip=args.clip, eval_loader=args.split)
precision = pr_at_k['precision']
recall = pr_at_k['recall']
recall_rate = pr_at_k['recall_rate']
ndcg = pr_at_k['ndcg']
# r_rank = pr_at_k['r_rank']
rr_1 = recall_rate[0]
rr_5 = recall_rate[4]
ndcg_5 = ndcg[4]
return rr_1, rr_5, ndcg_5
if __name__ == "__main__":
torch.multiprocessing.set_sharing_strategy('file_system')
path = './logs/retrieval/' + args.exp
load_dirs = [path]
rr_1 = []
rr_5 = []
ndcg_5 = []
print(load_dirs)
for load_dir in load_dirs:
_rr_1, _rr_5, _ndcg_5 = main(load_dir)
torch.cuda.empty_cache()
rr_1.append(_rr_1)
rr_5.append(_rr_5)
ndcg_5.append(_ndcg_5)
# Report back numbers as percentages
rr_1 = np.array(rr_1) * 100
rr_5 = np.array(rr_5) * 100
ndcg_5 = np.array(ndcg_5) * 100
print(np.mean(rr_1), np.mean(rr_5), np.mean(ndcg_5))
| [
"numpy.mean",
"argparse.ArgumentParser",
"tricolo.trainers.SimCLR.SimCLR",
"numpy.array",
"torch.multiprocessing.set_sharing_strategy",
"json.load",
"torch.cuda.empty_cache",
"tricolo.dataloader.dataloader.ClrDataLoader"
] | [((218, 243), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (241, 243), False, 'import argparse\n'), ((979, 1079), 'tricolo.dataloader.dataloader.ClrDataLoader', 'ClrDataLoader', (["config['dset']", "config['batch_size']", "config['sparse_model']"], {}), "(config['dset'], config['batch_size'], config['sparse_model'],\n **config['dataset'])\n", (992, 1079), False, 'from tricolo.dataloader.dataloader import ClrDataLoader\n'), ((1089, 1112), 'tricolo.trainers.SimCLR.SimCLR', 'SimCLR', (['dataset', 'config'], {}), '(dataset, config)\n', (1095, 1112), False, 'from tricolo.trainers.SimCLR import SimCLR\n'), ((1506, 1563), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (1548, 1563), False, 'import torch\n'), ((1781, 1805), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1803, 1805), False, 'import torch\n'), ((1945, 1959), 'numpy.array', 'np.array', (['rr_1'], {}), '(rr_1)\n', (1953, 1959), True, 'import numpy as np\n'), ((1977, 1991), 'numpy.array', 'np.array', (['rr_5'], {}), '(rr_5)\n', (1985, 1991), True, 'import numpy as np\n'), ((2011, 2027), 'numpy.array', 'np.array', (['ndcg_5'], {}), '(ndcg_5)\n', (2019, 2027), True, 'import numpy as np\n'), ((2044, 2057), 'numpy.mean', 'np.mean', (['rr_1'], {}), '(rr_1)\n', (2051, 2057), True, 'import numpy as np\n'), ((2059, 2072), 'numpy.mean', 'np.mean', (['rr_5'], {}), '(rr_5)\n', (2066, 2072), True, 'import numpy as np\n'), ((2074, 2089), 'numpy.mean', 'np.mean', (['ndcg_5'], {}), '(ndcg_5)\n', (2081, 2089), True, 'import numpy as np\n'), ((667, 679), 'json.load', 'json.load', (['f'], {}), '(f)\n', (676, 679), False, 'import json\n')] |
import time
import torch
import random
import itertools
import numpy as np
from argparse import ArgumentParser
from torch.utils.data import DataLoader
from .learning_approach import Learning_Appr
class Appr(Learning_Appr):
""" Class implementing the Riemannian Walk approach described in
http://openaccess.thecvf.com/content_ECCV_2018/papers/Arslan_Chaudhry__Riemannian_Walk_ECCV_2018_paper.pdf """
def __init__(self, model, device, nepochs=100, lr=0.05, lr_min=1e-4, lr_factor=3, lr_patience=5, clipgrad=10000,
momentum=0, wd=0, multi_softmax=False, wu_nepochs=0, wu_lr_factor=1, logger=None, lamb=1, alpha=0.5,
damping=0.1, fim_sampling_type='max_pred', fim_num_samples=-1, num_exemplars=200,
exemplar_selection='herding'):
super(Appr, self).__init__(model, device, nepochs, lr, lr_min, lr_factor, lr_patience, clipgrad, momentum, wd,
multi_softmax, wu_nepochs, wu_lr_factor, logger)
self.lamb = lamb
self.alpha = alpha
self.damping = damping
self.sampling_type = fim_sampling_type
self.num_samples = fim_num_samples
self.num_exemplars = num_exemplars
self.exemplar_selection = exemplar_selection
# In all cases, we only keep importance weights for the model, but not for the heads.
feat_ext = self.model.model
# Page 7: "task-specific parameter importance over the entire training trajectory."
self.w = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
# Store current parameters as the initial parameters before first task starts
self.older_params = {n: p.clone().detach() for n, p in feat_ext.named_parameters() if p.requires_grad}
# Store scores and fisher information
self.scores = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
self.fisher = {n: torch.zeros(p.shape).to(self.device) for n, p in feat_ext.named_parameters() if p.requires_grad}
# Returns a parser containing the approach specific parameters
@staticmethod
def extra_parser(args):
parser = ArgumentParser()
parser.add_argument('--lamb', default=1, type=float, required=False, help='(default=%(default)s)')
parser.add_argument('--alpha', default=0.5, type=float, required=False, help='(default=%(default)s)') # in [0,1]
parser.add_argument('--damping', default=0.1, type=float, required=False, help='(default=%(default)s)')
parser.add_argument('--fim_num_samples', default=-1, type=int, required=False, help='(default=%(default)s)')
parser.add_argument('--fim_sampling_type', default='max_pred', type=str, required=False,
choices=['true', 'max_pred', 'multinomial'], help='(default=%(default)s)')
parser.add_argument('--num_exemplars', default=200, type=int, required=False, help='(default=%(default)s)')
# TODO: implemented random uniform and herding, they also propose two more sampling strategies
parser.add_argument('--exemplar_selection', default='random', type=str, choices=['herding', 'random'],
required=False, help='(default=%(default)s)')
return parser.parse_known_args(args)
# Returns the optimizer
def _get_optimizer(self):
if len(self.model.heads) > 1:
return torch.optim.SGD(list(self.model.model.parameters()) + list(self.model.heads[-1].parameters()),
lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
else:
return torch.optim.SGD(self.model.parameters(),
lr=self.lr, weight_decay=self.wd, momentum=self.momentum)
def train(self, t, trn_loader, val_loader):
# number of classes and buffer samples per class
num_cls = sum(self.model.task_cls)
num_trn_ex_cls = int(np.ceil(self.num_exemplars / num_cls))
# add exemplars to train_loader
if self.num_exemplars > 0 and t > 0:
# if dataset is in memory or files type
if type(trn_loader.dataset.images) is np.ndarray:
trn_loader.dataset.images = np.vstack([trn_loader.dataset.images, np.vstack(self.x_train_exemplars)])
trn_loader.dataset.labels.extend(sum(self.y_train_exemplars, []))
else:
print('Adding exemplars in Base Dataset is not implemented yet.')
exit()
# RESUME DEFAULT TRAINING -- contains the epochs loop
super().train(t, trn_loader, val_loader)
# EXEMPLAR MANAGEMENT -- select training subset
if self.num_exemplars > 0:
print('Select training exemplars')
clock0 = time.time()
if self.exemplar_selection == 'random':
# iterate through all existing classes
self.x_train_exemplars = []
self.y_train_exemplars = []
for curr_cls in range(num_cls):
# get all indices from current class -- check if there are exemplars from previous task in loader
cls_ind = np.where(np.asarray(trn_loader.dataset.labels) == curr_cls)[0]
assert (len(cls_ind) > 0), "No samples to choose from for class {:d}".format(curr_cls)
assert (num_trn_ex_cls <= len(cls_ind)), "Not enough samples to store"
# select the exemplars randomly
selected = random.sample(list(cls_ind), num_trn_ex_cls)
# add the exemplars to the buffer
self.x_train_exemplars.append(trn_loader.dataset.images[selected])
self.y_train_exemplars.append([trn_loader.dataset.labels[idx] for idx in selected])
elif self.exemplar_selection == 'herding':
# change loader and fix to go sequentially (shuffle=False), keeps same order for later, eval transforms
ex_sel_loader = DataLoader(trn_loader.dataset, batch_size=trn_loader.batch_size, shuffle=False,
num_workers=trn_loader.num_workers, pin_memory=trn_loader.pin_memory)
ex_sel_loader.dataset.transform = val_loader.dataset.transform
# extract outputs from the model for all train samples
extracted_features = []
with torch.no_grad():
self.model.eval()
for images, targets in ex_sel_loader:
extracted_features.append(self.model(images.to(self.device))[0])
extracted_features = (torch.cat(extracted_features)).cpu()
# iterate through all existing classes
self.x_train_exemplars = []
self.y_train_exemplars = []
for curr_cls in range(num_cls):
# get all indices from current class -- check if there are exemplars from previous task in loader
cls_ind = np.where(np.asarray(trn_loader.dataset.labels) == curr_cls)[0]
assert (len(cls_ind) > 0), "No samples to choose from for class {:d}".format(curr_cls)
assert (num_trn_ex_cls <= len(cls_ind)), "Not enough samples to store"
# get all extracted features for current class
cls_feats = extracted_features[cls_ind]
# calculate the mean
cls_mu = cls_feats.mean(0)
# select the exemplars closer to the mean of each class
selected = []
selected_feat = []
for k in range(num_trn_ex_cls):
# fix this to the dimension of the model features
sum_others = torch.zeros(cls_feats.shape[1])
for j in selected_feat:
sum_others += j / (k + 1)
dist_min = np.inf
# choose the closest to the mean of the current class
for item in cls_ind:
if item not in selected:
feat = extracted_features[item]
dist = torch.norm(cls_mu - feat / (k + 1) - sum_others)
if dist < dist_min:
dist_min = dist
newone = item
newonefeat = feat
selected_feat.append(newonefeat)
selected.append(newone)
# add the exemplars to the buffer
self.x_train_exemplars.append(trn_loader.dataset.images[selected])
self.y_train_exemplars.append([trn_loader.dataset.labels[idx] for idx in selected])
# Log
clock1 = time.time()
print(' | Selected {:d} train exemplars, time={:5.1f}s'.format(
sum([len(elem) for elem in self.y_train_exemplars]), clock1 - clock0))
# Runs a single epoch
def train_epoch(self, t, trn_loader):
self.model.train()
for images, targets in trn_loader:
# store current model
curr_feat_ext = {n: p.clone().detach() for n, p in self.model.model.named_parameters() if p.requires_grad}
# Forward current model
outputs = self.model(images.to(self.device))
# cross-entropy loss on current task
loss = torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets.to(self.device))
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
# store gradients without regularization term
unreg_grads = {n: p.grad.clone().detach() for n, p in self.model.model.named_parameters()
if p.grad is not None}
# apply loss with path integral regularization
loss = self.criterion(t, outputs, targets.to(self.device))
# Backward
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clipgrad)
self.optimizer.step()
# Page 7: "accumulate task-specific parameter importance over the entire training trajectory"
# "the parameter importance is defined as the ratio of the change in the loss function to the distance
# between the conditional likelihod distributions per step in the parameter space."
with torch.no_grad():
for n, p in self.model.model.named_parameters():
if n in unreg_grads.keys():
self.w[n] -= unreg_grads[n] * (p.detach() - curr_feat_ext[n])
def compute_fisher_matrix_diag(self, trn_loader):
# Store Fisher Information
fisher = {n: torch.zeros(p.shape).to(self.device) for n, p in self.model.model.named_parameters()
if p.requires_grad}
# Compute fisher information for specified number of samples -- rounded to the batch size
n_samples_batches = (self.num_samples // trn_loader.batch_size + 1) if self.num_samples > 0 \
else (len(trn_loader.dataset) // trn_loader.batch_size)
# Do forward and backward pass to compute the fisher information
self.model.train()
for images, targets in itertools.islice(trn_loader, n_samples_batches):
outputs = self.model.forward(images.to(self.device))
if self.sampling_type == 'true':
# Use the labels to compute the gradients based on the CE-loss with the ground truth
preds = targets.to(self.device)
elif self.sampling_type == 'max_pred':
# Not use labels and compute the gradients related to the prediction the model has learned
preds = torch.cat(outputs, dim=1).argmax(1).flatten()
elif self.sampling_type == 'multinomial':
# Use a multinomial sampling to compute the gradients
probs = torch.nn.functional.softmax(torch.cat(outputs, dim=1), dim=1)
preds = torch.multinomial(probs, len(targets)).flatten()
loss = torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), preds)
self.optimizer.zero_grad()
loss.backward()
# Page 6: "the Fisher component [...] is the expected square of the loss gradient w.r.t the i-th parameter."
for n, p in self.model.model.named_parameters():
if p.grad is not None:
fisher[n] += p.grad.pow(2) * len(targets)
# Apply mean across all samples
n_samples = n_samples_batches * trn_loader.batch_size
fisher = {n: (p / n_samples) for n, p in fisher.items()}
return fisher
# Runs after training all the epochs of the task (at the end of train function)
def post_train_process(self, t, trn_loader):
# Store current parameters for the next task
self.older_params = {n: p.clone().detach() for n, p in self.model.model.named_parameters() if p.requires_grad}
# calculate Fisher Information Matrix
curr_fisher = self.compute_fisher_matrix_diag(trn_loader)
# Eq. 10: efficiently update Fisher Information Matrix
for n in self.fisher.keys():
self.fisher[n] = self.alpha * curr_fisher[n] + (1 - self.alpha) * self.fisher[n]
# Page 7: Optimization Path-based Parameter Importance: importance scores computation
curr_score = {n: torch.zeros(p.shape).to(self.device) for n, p in self.model.model.named_parameters()
if p.requires_grad}
with torch.no_grad():
curr_params = {n: p for n, p in self.model.model.named_parameters() if p.requires_grad}
for n, p in self.scores.items():
curr_score[n] = self.w[n] / (self.fisher[n] * ((curr_params[n] - self.older_params[n]) ** 2) + self.damping)
self.w[n].zero_()
# Page 7: "Since we care about positive influence of the parameters, negative scores are set to zero."
curr_score[n] = torch.nn.functional.relu(curr_score[n])
# Page 8: alleviating regularization getting increasingly rigid by averaging scores
for n, p in self.scores.items():
self.scores[n] = (self.scores[n] + curr_score[n]) / 2
# Returns the loss value
def criterion(self, t, outputs, targets):
loss_reg = 0
if t > 0:
# Eq. 9: final objective function
for n, p in self.model.model.named_parameters():
loss_reg += torch.sum((self.fisher[n] + self.scores[n]) * (p - self.older_params[n]).pow(2))
# since there are no exemplars, the CE loss is only applied to the current training head
return torch.nn.functional.cross_entropy(torch.cat(outputs, dim=1), targets) + self.lamb * loss_reg
| [
"numpy.ceil",
"itertools.islice",
"argparse.ArgumentParser",
"numpy.asarray",
"torch.norm",
"numpy.vstack",
"torch.nn.functional.relu",
"torch.utils.data.DataLoader",
"torch.no_grad",
"time.time",
"torch.zeros",
"torch.cat"
] | [((2230, 2246), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2244, 2246), False, 'from argparse import ArgumentParser\n'), ((11559, 11606), 'itertools.islice', 'itertools.islice', (['trn_loader', 'n_samples_batches'], {}), '(trn_loader, n_samples_batches)\n', (11575, 11606), False, 'import itertools\n'), ((4002, 4039), 'numpy.ceil', 'np.ceil', (['(self.num_exemplars / num_cls)'], {}), '(self.num_exemplars / num_cls)\n', (4009, 4039), True, 'import numpy as np\n'), ((4835, 4846), 'time.time', 'time.time', ([], {}), '()\n', (4844, 4846), False, 'import time\n'), ((8990, 9001), 'time.time', 'time.time', ([], {}), '()\n', (8999, 9001), False, 'import time\n'), ((13880, 13895), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13893, 13895), False, 'import torch\n'), ((9688, 9713), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (9697, 9713), False, 'import torch\n'), ((10710, 10725), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10723, 10725), False, 'import torch\n'), ((12434, 12459), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (12443, 12459), False, 'import torch\n'), ((14352, 14391), 'torch.nn.functional.relu', 'torch.nn.functional.relu', (['curr_score[n]'], {}), '(curr_score[n])\n', (14376, 14391), False, 'import torch\n'), ((15068, 15093), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (15077, 15093), False, 'import torch\n'), ((1513, 1533), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (1524, 1533), False, 'import torch\n'), ((1879, 1899), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (1890, 1899), False, 'import torch\n'), ((2002, 2022), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (2013, 2022), False, 'import torch\n'), ((6079, 6238), 'torch.utils.data.DataLoader', 'DataLoader', (['trn_loader.dataset'], {'batch_size': 'trn_loader.batch_size', 'shuffle': '(False)', 'num_workers': 'trn_loader.num_workers', 'pin_memory': 'trn_loader.pin_memory'}), '(trn_loader.dataset, batch_size=trn_loader.batch_size, shuffle=\n False, num_workers=trn_loader.num_workers, pin_memory=trn_loader.pin_memory\n )\n', (6089, 6238), False, 'from torch.utils.data import DataLoader\n'), ((11037, 11057), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (11048, 11057), False, 'import torch\n'), ((13740, 13760), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (13751, 13760), False, 'import torch\n'), ((4323, 4356), 'numpy.vstack', 'np.vstack', (['self.x_train_exemplars'], {}), '(self.x_train_exemplars)\n', (4332, 4356), True, 'import numpy as np\n'), ((6484, 6499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6497, 6499), False, 'import torch\n'), ((6724, 6753), 'torch.cat', 'torch.cat', (['extracted_features'], {}), '(extracted_features)\n', (6733, 6753), False, 'import torch\n'), ((7889, 7920), 'torch.zeros', 'torch.zeros', (['cls_feats.shape[1]'], {}), '(cls_feats.shape[1])\n', (7900, 7920), False, 'import torch\n'), ((12273, 12298), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (12282, 12298), False, 'import torch\n'), ((5247, 5284), 'numpy.asarray', 'np.asarray', (['trn_loader.dataset.labels'], {}), '(trn_loader.dataset.labels)\n', (5257, 5284), True, 'import numpy as np\n'), ((7110, 7147), 'numpy.asarray', 'np.asarray', (['trn_loader.dataset.labels'], {}), '(trn_loader.dataset.labels)\n', (7120, 7147), True, 'import numpy as np\n'), ((8344, 8392), 'torch.norm', 'torch.norm', (['(cls_mu - feat / (k + 1) - sum_others)'], {}), '(cls_mu - feat / (k + 1) - sum_others)\n', (8354, 8392), False, 'import torch\n'), ((12051, 12076), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (12060, 12076), False, 'import torch\n')] |
#!/usr/bin/env python3
"""
Reads experiments descriptions in the passed configuration file
and runs them sequentially, logging outputs to files called <experimentname>.log
and <experimentname>.err.log, and reporting on final perplexity metrics.
"""
import argparse
import sys
import os
import six
import random
import shutil
import numpy as np
# XNMT imports
import copy
import xnmt.xnmt_preproc, xnmt.xnmt_train, xnmt.xnmt_decode, xnmt.xnmt_evaluate
from xnmt.options import OptionParser, Option
from xnmt.tee import Tee
def main(overwrite_args=None):
argparser = argparse.ArgumentParser()
argparser.add_argument("--dynet-mem", type=int)
argparser.add_argument("--dynet-seed", type=int)
argparser.add_argument("--dynet-autobatch", type=int)
argparser.add_argument("--dynet-devices", type=str)
argparser.add_argument("--dynet-viz", action='store_true', help="use visualization")
argparser.add_argument("--dynet-gpu", action='store_true', help="use GPU acceleration")
argparser.add_argument("--dynet-gpu-ids", type=int)
argparser.add_argument("--dynet-gpus", type=int)
argparser.add_argument("--dynet-weight-decay", type=float)
argparser.add_argument("--generate-doc", action='store_true', help="Do not run, output documentation instead")
argparser.add_argument("experiments_file")
argparser.add_argument("experiment_name", nargs='*', help="Run only the specified experiments")
argparser.set_defaults(generate_doc=False)
args = argparser.parse_args(overwrite_args)
config_parser = OptionParser()
config_parser.add_task("preproc", xnmt.xnmt_preproc.options)
config_parser.add_task("train", xnmt.xnmt_train.options)
config_parser.add_task("decode", xnmt.xnmt_decode.options)
config_parser.add_task("evaluate", xnmt.xnmt_evaluate.options)
# Tweak the options to make config files less repetitive:
# - Delete evaluate:evaluator, replace with exp:eval_metrics
# - Delete decode:hyp_file, evaluate:hyp_file, replace with exp:hyp_file
# - Delete train:model, decode:model_file, replace with exp:model_file
config_parser.remove_option("evaluate", "evaluator")
config_parser.remove_option("decode", "trg_file")
config_parser.remove_option("evaluate", "hyp_file")
config_parser.remove_option("train", "model_file")
config_parser.remove_option("decode", "model_file")
experiment_options = [
Option("model_file", default_value="<EXP>.mod", help_str="Location to write the model file"),
Option("hyp_file", default_value="<EXP>.hyp", help_str="Location to write decoded output for evaluation"),
Option("out_file", default_value="<EXP>.out", help_str="Location to write stdout messages"),
Option("err_file", default_value="<EXP>.err", help_str="Location to write stderr messages"),
Option("cfg_file", default_value=None, help_str="Location to write a copy of the YAML configuration file", required=False),
Option("eval_only", bool, default_value=False, help_str="Skip training and evaluate only"),
Option("eval_metrics", default_value="bleu", help_str="Comma-separated list of evaluation metrics (bleu/wer/cer)"),
Option("run_for_epochs", int, help_str="How many epochs to run each test for"),
]
config_parser.add_task("experiment", experiment_options)
if args.generate_doc:
print(config_parser.generate_options_table())
exit(0)
if args.dynet_seed:
random.seed(args.dynet_seed)
np.random.seed(args.dynet_seed)
config = config_parser.args_from_config_file(args.experiments_file)
results = []
# Check ahead of time that all experiments exist, to avoid bad surprises
experiment_names = args.experiment_name or config.keys()
if args.experiment_name:
nonexistent = set(experiment_names).difference(config.keys())
if len(nonexistent) != 0:
raise Exception("Experiments {} do not exist".format(",".join(list(nonexistent))))
for experiment_name in sorted(experiment_names):
exp_tasks = config[experiment_name]
print("=> Running {}".format(experiment_name))
exp_args = exp_tasks["experiment"]
if exp_args.cfg_file != None:
shutil.copyfile(args.experiments_file, exp_args.cfg_file)
preproc_args = exp_tasks["preproc"]
train_args = exp_tasks["train"]
train_args.model_file = exp_args.model_file
decode_args = exp_tasks["decode"]
decode_args.trg_file = exp_args.hyp_file
decode_args.model_file = None # The model is passed to the decoder directly
evaluate_args = exp_tasks["evaluate"]
evaluate_args.hyp_file = exp_args.hyp_file
evaluators = map(lambda s: s.lower(), exp_args.eval_metrics.split(","))
output = Tee(exp_args.out_file, 3)
err_output = Tee(exp_args.err_file, 3, error=True)
# Do preprocessing
print("> Preprocessing")
xnmt.xnmt_preproc.xnmt_preproc(preproc_args)
# Do training
for task_name in exp_tasks:
if hasattr(exp_tasks[task_name], "random_search_report"):
print("> instantiated random parameter search: %s" % exp_tasks[task_name].random_search_report)
print("> Training")
xnmt_trainer = xnmt.xnmt_train.XnmtTrainer(train_args)
xnmt_trainer.decode_args = copy.copy(decode_args)
xnmt_trainer.evaluate_args = copy.copy(evaluate_args)
eval_scores = "Not evaluated"
for i_epoch in six.moves.range(exp_args.run_for_epochs):
if not exp_args.eval_only:
xnmt_trainer.run_epoch()
if xnmt_trainer.early_stopping_reached:
break
if not exp_args.eval_only:
print('reverting learned weights to best checkpoint..')
xnmt_trainer.model_context.dynet_param_collection.revert_to_best_model()
if evaluators:
print("> Evaluating test set")
output.indent += 2
xnmt.xnmt_decode.xnmt_decode(decode_args,
model_elements=(xnmt_trainer.corpus_parser, xnmt_trainer.model))
eval_scores = []
for evaluator in evaluators:
evaluate_args.evaluator = evaluator
eval_score = xnmt.xnmt_evaluate.xnmt_evaluate(evaluate_args)
print(eval_score)
eval_scores.append(eval_score)
output.indent -= 2
results.append((experiment_name, eval_scores))
output.close()
err_output.close()
print("")
print("{:<30}|{:<40}".format("Experiment", " Final Scores"))
print("-" * (70 + 1))
for line in results:
experiment_name, eval_scores = line
for i in range(len(eval_scores)):
print("{:<30}| {:<40}".format((experiment_name if i==0 else ""), str(eval_scores[i])))
if __name__ == '__main__':
import _dynet
dyparams = _dynet.DynetParams()
dyparams.from_args()
sys.exit(main())
| [
"xnmt.options.Option",
"six.moves.range",
"argparse.ArgumentParser",
"random.seed",
"_dynet.DynetParams",
"shutil.copyfile",
"xnmt.tee.Tee",
"numpy.random.seed",
"copy.copy",
"xnmt.options.OptionParser"
] | [((571, 596), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (594, 596), False, 'import argparse\n'), ((1517, 1531), 'xnmt.options.OptionParser', 'OptionParser', ([], {}), '()\n', (1529, 1531), False, 'from xnmt.options import OptionParser, Option\n'), ((6539, 6559), '_dynet.DynetParams', '_dynet.DynetParams', ([], {}), '()\n', (6557, 6559), False, 'import _dynet\n'), ((2350, 2447), 'xnmt.options.Option', 'Option', (['"""model_file"""'], {'default_value': '"""<EXP>.mod"""', 'help_str': '"""Location to write the model file"""'}), "('model_file', default_value='<EXP>.mod', help_str=\n 'Location to write the model file')\n", (2356, 2447), False, 'from xnmt.options import OptionParser, Option\n'), ((2448, 2558), 'xnmt.options.Option', 'Option', (['"""hyp_file"""'], {'default_value': '"""<EXP>.hyp"""', 'help_str': '"""Location to write decoded output for evaluation"""'}), "('hyp_file', default_value='<EXP>.hyp', help_str=\n 'Location to write decoded output for evaluation')\n", (2454, 2558), False, 'from xnmt.options import OptionParser, Option\n'), ((2559, 2655), 'xnmt.options.Option', 'Option', (['"""out_file"""'], {'default_value': '"""<EXP>.out"""', 'help_str': '"""Location to write stdout messages"""'}), "('out_file', default_value='<EXP>.out', help_str=\n 'Location to write stdout messages')\n", (2565, 2655), False, 'from xnmt.options import OptionParser, Option\n'), ((2656, 2752), 'xnmt.options.Option', 'Option', (['"""err_file"""'], {'default_value': '"""<EXP>.err"""', 'help_str': '"""Location to write stderr messages"""'}), "('err_file', default_value='<EXP>.err', help_str=\n 'Location to write stderr messages')\n", (2662, 2752), False, 'from xnmt.options import OptionParser, Option\n'), ((2753, 2880), 'xnmt.options.Option', 'Option', (['"""cfg_file"""'], {'default_value': 'None', 'help_str': '"""Location to write a copy of the YAML configuration file"""', 'required': '(False)'}), "('cfg_file', default_value=None, help_str=\n 'Location to write a copy of the YAML configuration file', required=False)\n", (2759, 2880), False, 'from xnmt.options import OptionParser, Option\n'), ((2881, 2976), 'xnmt.options.Option', 'Option', (['"""eval_only"""', 'bool'], {'default_value': '(False)', 'help_str': '"""Skip training and evaluate only"""'}), "('eval_only', bool, default_value=False, help_str=\n 'Skip training and evaluate only')\n", (2887, 2976), False, 'from xnmt.options import OptionParser, Option\n'), ((2977, 3096), 'xnmt.options.Option', 'Option', (['"""eval_metrics"""'], {'default_value': '"""bleu"""', 'help_str': '"""Comma-separated list of evaluation metrics (bleu/wer/cer)"""'}), "('eval_metrics', default_value='bleu', help_str=\n 'Comma-separated list of evaluation metrics (bleu/wer/cer)')\n", (2983, 3096), False, 'from xnmt.options import OptionParser, Option\n'), ((3097, 3175), 'xnmt.options.Option', 'Option', (['"""run_for_epochs"""', 'int'], {'help_str': '"""How many epochs to run each test for"""'}), "('run_for_epochs', int, help_str='How many epochs to run each test for')\n", (3103, 3175), False, 'from xnmt.options import OptionParser, Option\n'), ((3355, 3383), 'random.seed', 'random.seed', (['args.dynet_seed'], {}), '(args.dynet_seed)\n', (3366, 3383), False, 'import random\n'), ((3388, 3419), 'numpy.random.seed', 'np.random.seed', (['args.dynet_seed'], {}), '(args.dynet_seed)\n', (3402, 3419), True, 'import numpy as np\n'), ((4608, 4633), 'xnmt.tee.Tee', 'Tee', (['exp_args.out_file', '(3)'], {}), '(exp_args.out_file, 3)\n', (4611, 4633), False, 'from xnmt.tee import Tee\n'), ((4651, 4688), 'xnmt.tee.Tee', 'Tee', (['exp_args.err_file', '(3)'], {'error': '(True)'}), '(exp_args.err_file, 3, error=True)\n', (4654, 4688), False, 'from xnmt.tee import Tee\n'), ((5125, 5147), 'copy.copy', 'copy.copy', (['decode_args'], {}), '(decode_args)\n', (5134, 5147), False, 'import copy\n'), ((5181, 5205), 'copy.copy', 'copy.copy', (['evaluate_args'], {}), '(evaluate_args)\n', (5190, 5205), False, 'import copy\n'), ((5260, 5300), 'six.moves.range', 'six.moves.range', (['exp_args.run_for_epochs'], {}), '(exp_args.run_for_epochs)\n', (5275, 5300), False, 'import six\n'), ((4079, 4136), 'shutil.copyfile', 'shutil.copyfile', (['args.experiments_file', 'exp_args.cfg_file'], {}), '(args.experiments_file, exp_args.cfg_file)\n', (4094, 4136), False, 'import shutil\n')] |
from __future__ import print_function, division
import scipy
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import datetime
import matplotlib.pyplot as plt
import sys
from data_loader import DataLoader
import numpy as np
import os
class model():
def __init__(self):
# Input shape
self.img_rows = 200
self.img_cols = 200
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
# Configure data loader
self.dataset_name = 'train'
self.data_loader = DataLoader(dataset_name=self.dataset_name,
img_res=(self.img_rows, self.img_cols))
# Calculate output shape of D (PatchGAN)
patch = int(self.img_rows / 2**4)
self.disc_patch = (patch, patch, 1)
# Number of filters in the first layer of G
self.gf = 32
# Loss weights
self.lambda_consist = 10.0 # Consistency loss
self.lambda_id = 0.1 * self.lambda_consist # Identity loss
optimizer = Adam(0.0002, 0.5)
#-------------------------
# Construct Computational
# Graph of Generators
#-------------------------
# Build the generators
self.g = self.build_generator()
# Input images from both domains
img_A = Input(shape=self.img_shape)
img_B = Input(shape=self.img_shape)
# Translate images to the other domain
deblur = self.g(img_B)
img_id = self.g(img_A)
# Combined model trains generators to fool discriminators
self.combined = Model(inputs=[img_A, img_B],
outputs=[ deblur, img_id ])
self.combined.compile(loss=['mse', 'mae'],
loss_weights=[ self.lambda_consist, self.lambda_id ],
optimizer=optimizer)
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
d = InstanceNormalization()(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = InstanceNormalization()(u)
u = Concatenate()([u, skip_input])
return u
# Image input
d0 = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(d0, self.gf)
d2 = conv2d(d1, self.gf*2)
d3 = conv2d(d2, self.gf*4)
# d4 = conv2d(d3, self.gf*8)
print(d0,d1,d2,d3)
# Upsampling
# u1 = deconv2d(d4, d3, self.gf*4)
# u2 = deconv2d(u1, d2, self.gf*2)
# u3 = deconv2d(u2, d1, self.gf)
u1 = deconv2d(d3, d2, self.gf*2)
u2 = deconv2d(u1, d1, self.gf)
#u3 = deconv2d(u2, d1, self.gf)
u3 = UpSampling2D(size=2)(u2)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u3)
return Model(d0, output_img)
def train(self, epochs, batch_size=1, sample_interval=50):
start_time = datetime.datetime.now()
# Adversarial loss ground truths
valid = np.ones((batch_size,) + self.disc_patch)
deblur = np.zeros((batch_size,) + self.disc_patch)
for epoch in range(epochs):
for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):
deblurs = self.g.predict(imgs_B)
imgs_id = self.g.predict(imgs_A)
# ------------------
# Train Generators
# ------------------
# Train the generators
g_loss = self.combined.train_on_batch([imgs_A, imgs_B],
[imgs_id, deblurs])
elapsed_time = datetime.datetime.now() - start_time
# Plot the progress
print ("[Epoch %d/%d] [Batch %d/%d] [G loss: %05f, id: %05f] time: %s " \
% ( epoch, epochs,
batch_i, self.data_loader.n_batches,
g_loss[0],
np.mean(g_loss[1:3]),
elapsed_time))
# If at save interval => save generated image samples
if batch_i % sample_interval == 0:
self.sample_images(epoch, batch_i)
def sample_images(self, epoch, batch_i):
os.makedirs('images/%s' % self.dataset_name, exist_ok=True)
r, c = 2, 3
imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True)
imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=True)
deblur = self.g.predict(imgs_B)
gen_imgs = np.concatenate([imgs_B, deblur, imgs_A])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
titles = ['Blur', 'Deblur', 'Original']
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt])
axs[i, j].set_title(titles[j])
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i))
plt.close()
if __name__ == '__main__':
gan = model()
gan.train(epochs=200, batch_size=1, sample_interval=200)
| [
"keras.optimizers.Adam",
"numpy.mean",
"keras_contrib.layers.normalization.instancenormalization.InstanceNormalization",
"numpy.ones",
"os.makedirs",
"data_loader.DataLoader",
"keras.layers.Concatenate",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"keras.layers.Input",
"numpy.zeros",
"... | [((929, 1016), 'data_loader.DataLoader', 'DataLoader', ([], {'dataset_name': 'self.dataset_name', 'img_res': '(self.img_rows, self.img_cols)'}), '(dataset_name=self.dataset_name, img_res=(self.img_rows, self.\n img_cols))\n', (939, 1016), False, 'from data_loader import DataLoader\n'), ((1449, 1466), 'keras.optimizers.Adam', 'Adam', (['(0.0002)', '(0.5)'], {}), '(0.0002, 0.5)\n', (1453, 1466), False, 'from keras.optimizers import Adam\n'), ((1734, 1761), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (1739, 1761), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((1778, 1805), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (1783, 1805), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((2008, 2062), 'keras.models.Model', 'Model', ([], {'inputs': '[img_A, img_B]', 'outputs': '[deblur, img_id]'}), '(inputs=[img_A, img_B], outputs=[deblur, img_id])\n', (2013, 2062), False, 'from keras.models import Sequential, Model\n'), ((3141, 3168), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (3146, 3168), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((3791, 3812), 'keras.models.Model', 'Model', (['d0', 'output_img'], {}), '(d0, output_img)\n', (3796, 3812), False, 'from keras.models import Sequential, Model\n'), ((3899, 3922), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3920, 3922), False, 'import datetime\n'), ((3981, 4021), 'numpy.ones', 'np.ones', (['((batch_size,) + self.disc_patch)'], {}), '((batch_size,) + self.disc_patch)\n', (3988, 4021), True, 'import numpy as np\n'), ((4039, 4080), 'numpy.zeros', 'np.zeros', (['((batch_size,) + self.disc_patch)'], {}), '((batch_size,) + self.disc_patch)\n', (4047, 4080), True, 'import numpy as np\n'), ((5520, 5579), 'os.makedirs', 'os.makedirs', (["('images/%s' % self.dataset_name)"], {'exist_ok': '(True)'}), "('images/%s' % self.dataset_name, exist_ok=True)\n", (5531, 5579), False, 'import os\n'), ((5836, 5876), 'numpy.concatenate', 'np.concatenate', (['[imgs_B, deblur, imgs_A]'], {}), '([imgs_B, deblur, imgs_A])\n', (5850, 5876), True, 'import numpy as np\n'), ((6017, 6035), 'matplotlib.pyplot.subplots', 'plt.subplots', (['r', 'c'], {}), '(r, c)\n', (6029, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6355, 6366), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6364, 6366), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3662), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (3654, 3662), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3688, 3775), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['self.channels'], {'kernel_size': '(4)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(self.channels, kernel_size=4, strides=1, padding='same', activation=\n 'tanh')\n", (3694, 3775), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2458, 2520), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'f_size', 'strides': '(2)', 'padding': '"""same"""'}), "(filters, kernel_size=f_size, strides=2, padding='same')\n", (2464, 2520), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2550, 2570), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2559, 2570), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2590, 2613), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (2611, 2613), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((2785, 2805), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (2797, 2805), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2835, 2921), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'f_size', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters, kernel_size=f_size, strides=1, padding='same', activation=\n 'relu')\n", (2841, 2921), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3010, 3033), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (3031, 3033), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((3053, 3066), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (3064, 3066), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((2969, 2990), 'keras.layers.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2976, 2990), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n'), ((4645, 4668), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4666, 4668), False, 'import datetime\n'), ((5176, 5196), 'numpy.mean', 'np.mean', (['g_loss[1:3]'], {}), '(g_loss[1:3])\n', (5183, 5196), True, 'import numpy as np\n')] |
"""
Train VFL on ModelNet-10 dataset
"""
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import argparse
import numpy as np
import time
import os
import copy
import random
import pickle
import math
import itertools
from scipy.optimize import minimize
from scipy.optimize import Bounds
from scipy.optimize import NonlinearConstraint
from scipy.optimize import BFGS
from models.resnet import *
from models.mvcnn import *
from models.mvcnn_top_small import *
from models.mvcnn_bottom_small import *
from models.resnet import *
from models.resnet_top import *
import util
from logger import Logger
from custom_dataset import MultiViewDataSet
import sys
from sklearn.cluster import KMeans
from sklearn import metrics as skmetrics
import latbin
from PIL import Image
MVCNN = 'mvcnn'
RESNET = 'resnet'
MODELS = [RESNET,MVCNN]
# Set up input arguments
num_clients = int(sys.argv[3])
parser = argparse.ArgumentParser(description='MVCNN-PyTorch')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--num_clients', type=int, help='Number of clients to split data between vertically',
default=2)
parser.add_argument('--depth', choices=[18, 34, 50, 101, 152], type=int, metavar='N', default=18, help='resnet depth (default: resnet18)')
parser.add_argument('--model', '-m', metavar='MODEL', default=RESNET, choices=MODELS,
help='pretrained model: ' + ' | '.join(MODELS) + ' (default: {})'.format(RESNET))
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run (default: 100)')
parser.add_argument('-b', '--batch_size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate (default: 0.0001)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--lr-decay-freq', default=30, type=float,
metavar='W', help='learning rate decay (default: 30)')
parser.add_argument('--lr-decay', default=0.1, type=float,
metavar='W', help='learning rate decay (default: 0.1)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--local_epochs', type=int, help='Number of local epochs to run at each client before synchronizing',
default=1)
parser.add_argument('--labeled_frac', type=float, help='Fraction of full dataset that is labeled.',
default=0.25)
parser.add_argument('--weight_decay', type=float, help='Fraction of full dataset that is labeled.',
default=1)
parser.add_argument('--attack', type=str, help='Information available for the attack',
default="none")
parser.add_argument('--dataset', type=int, help='Dataset to use',
default=0)
parser.add_argument('--seed', type=int, help='Random seed to use', default=42)
# Parse input arguments
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Loading data')
if args.dataset == 0:
transform = transforms.Compose([
transforms.CenterCrop(500),
transforms.Resize(224),
transforms.ToTensor(),
])
# Load dataset
dset_train = MultiViewDataSet(args.data, 'train', transform=transform)
indices = torch.randperm(len(dset_train))
dset_train_sub = torch.utils.data.Subset(dset_train, indices[:int(len(dset_train)*args.labeled_frac)])
samples_weight = torch.tensor([0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0])
num_iters = math.ceil(len(dset_train_sub)/args.batch_size)
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, num_iters*args.batch_size)
train_loader = DataLoader(dset_train_sub, batch_size=args.batch_size, shuffle=False, num_workers=1, sampler=sampler)
dset_aux = dset_train #torch.utils.data.Subset(dset_train, indices[int(len(dset_train)*args.labeled_frac):int(2*len(dset_train)*args.labeled_frac)])
dset_val = MultiViewDataSet(args.data, 'test', transform=transform)
test_loader = DataLoader(dset_val, batch_size=args.batch_size, shuffle=False, num_workers=1)
classes = dset_train.classes
else:
transform = transforms.Compose(
[transforms.ToTensor()])#,transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dset_train = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
indices = torch.randperm(len(dset_train))
dset_train_sub = torch.utils.data.Subset(dset_train, indices[:int(len(dset_train)*0.05)])#*args.labeled_frac)])
samples_weight = torch.tensor([0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0])
samples_weight = samples_weight.repeat(10)
num_iters = math.ceil(len(dset_train_sub)/args.batch_size)
sampler = torch.utils.data.WeightedRandomSampler(samples_weight, num_iters*args.batch_size)
train_loader = DataLoader(dset_train_sub, batch_size=args.batch_size, shuffle=False, num_workers=1, sampler=sampler)
indices = torch.randperm(len(dset_train))
dset_aux = dset_train #torch.utils.data.Subset(dset_train, indices[int(len(dset_train)*args.labeled_frac):int(2*len(dset_train)*args.labeled_frac)])
dset_val = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform)
test_loader = DataLoader(dset_val, batch_size=args.batch_size, shuffle=False, num_workers=1)
classes = dset_train.classes
num_classes = len(classes)
print(len(classes), classes)
# Loss and Optimizer
n_epochs = args.epochs
criterion = nn.CrossEntropyLoss()
if args.dataset == 0:
coords_per = int(12/num_clients)
else:
coords_per = int(32*32/num_clients)
best_acc = 0.0
best_loss = 0.0
start_epoch = 0
losses = []
accs_train = []
accs_test = []
models = []
optimizers = []
# Make models for each client
for i in range(num_clients+1):
if i == num_clients:
if args.dataset == 0:
model = mvcnn_top(pretrained=args.pretrained,
num_classes=len(classes), num_clients=num_clients)
else:
model = resnet_top(pretrained=args.pretrained,
num_classes=len(classes), num_clients=num_clients)
else:
if args.dataset == 0:
model = mvcnn_bottom(pretrained=args.pretrained,num_classes=len(classes))
else:
#model = resnet18(pretrained=args.pretrained, num_classes=len(classes))
model = torch.hub.load('pytorch/vision:v0.5.0', 'resnet18', pretrained=False, num_classes=len(classes))
model.to(device)
cudnn.benchmark = True
#optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=0.1)
# Check if model .pt exists
#PATH = f"/gpfs/u/home/VFLA/VFLAcstg/scratch/checkpoint{i}_supervised_NC{args.num_clients}_frac{args.labeled_frac}_seed{args.seed}.pt"
#if os.path.exists(PATH):
# print("Loading from checkpoint")
# checkpoint = torch.load(PATH)
# model.load_state_dict(checkpoint['model_state_dict'])
# model.eval()
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# start_epoch = checkpoint['epoch']
# losses = pickle.load(open(f'/gpfs/u/home/VFLA/VFLAcstg/scratch/loss_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_seed{args.seed}.pkl','rb'))
# accs_train = pickle.load(open(f'/gpfs/u/home/VFLA/VFLAcstg/scratch/accs_train_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_seed{args.seed}.pkl','rb'))
# accs_test = pickle.load(open(f'/gpfs/u/home/VFLA/VFLAcstg/scratch/accs_test_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_seed{args.seed}.pkl','rb'))
models.append(model)
optimizers.append(optimizer)
def save_eval(models, train_loader, test_loader, losses, accs_train, accs_test, step, train_size, leakages):
"""
Evaluate and save current loss and accuracy
"""
avg_train_acc, avg_loss = eval(models, train_loader)
avg_test_acc, _ = eval(models, test_loader)
losses.append(avg_loss)
accs_train.append(avg_train_acc)
accs_test.append(avg_test_acc)
pickle.dump(losses, open(f'loss_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_batch{args.batch_size}_dataset{args.dataset}_seed{args.seed}.pkl', 'wb'))
pickle.dump(accs_train, open(f'accs_train_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_batch{args.batch_size}_dataset{args.dataset}_seed{args.seed}.pkl', 'wb'))
pickle.dump(accs_test, open(f'accs_test_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_batch{args.batch_size}_dataset{args.dataset}_seed{args.seed}.pkl', 'wb'))
pickle.dump(leakages, open(f'leakage_mvcnn_NC{args.num_clients}_LE{args.local_epochs}_frac{args.labeled_frac}_batch{args.batch_size}_dataset{args.dataset}_seed{args.seed}.pkl', 'wb'))
print('Iter [%d/%d]: Test Acc: %.2f - Train Acc: %.2f - Loss: %.4f'
% (step + 1, train_size, avg_test_acc.item(), avg_train_acc.item(), avg_loss.item()))
def labels_estimate(models, optimizers):
E = []
S = np.zeros(num_classes)
G = np.zeros(num_classes)
m = 0
grads = []
for param in models[-1].parameters():
grads.append(param.grad)
for i in range(num_classes):
G[i] = torch.sum(grads[0][i,:])
if args.attack == "white-box" or args.attack == "aux-info":
# Load aux data
Gbar = np.zeros(num_classes)
for clas in range(num_classes):
if args.dataset == 0:
dset_aux_x = np.array(dset_aux.x)
dset_aux_y = dset_aux.y
else:
dset_aux_x = dset_aux.data
dset_aux_y = dset_aux.targets
index = np.where(np.array(dset_aux_y) == clas)
aux_x = (dset_aux_x[index])[:args.batch_size]
if args.dataset == 0:
aux_x_new = []
for original_views in aux_x:
views = []
for view in original_views:
im = Image.open(view)
im = im.convert('RGB')
if transform is not None:
im = transform(im)
views.append(im)
aux_x_new.append(torch.stack(views))
aux_x = np.array(torch.stack(aux_x_new))
aux_y = torch.tensor((np.array(dset_aux_y)[index])[:args.batch_size])
# Feed aux data into white-box model
forward(models, optimizers, aux_x, aux_y)
grads = []
for param in models[-1].parameters():
grads.append(param.grad)
Gbar[clas] = torch.sum(grads[0][clas,:])
# Estimate m
for j, gbar_i in enumerate(Gbar):
m += (1/(args.batch_size*num_classes))*gbar_i*(1+1/num_classes)
t = 10
Gbar = np.zeros(num_classes)
for i in range(num_classes):
for k in range(t):
ik = random.choice([num for num in range(0,9) if num != i])
if args.dataset == 0:
dset_aux_x = np.array(dset_aux.x)
dset_aux_y = dset_aux.y
else:
dset_aux_x = dset_aux.data
dset_aux_y = dset_aux.targets
index = np.where(np.array(dset_aux_y) == ik)
aux_x = (dset_aux_x[index])[:args.batch_size]
if args.dataset == 0:
aux_x_new = []
for original_views in aux_x:
views = []
for view in original_views:
im = Image.open(view)
im = im.convert('RGB')
if transform is not None:
im = transform(im)
views.append(im)
aux_x_new.append(torch.stack(views))
aux_x = np.array(torch.stack(aux_x_new))
aux_y = torch.tensor((np.array(dset_aux_y)[index])[:args.batch_size])
# Feed aux data into white-box model
forward(models, optimizers, aux_x, aux_y)
grads = []
for param in models[-1].parameters():
grads.append(param.grad)
Gbar[i] += torch.sum(grads[0][i,:])
# Estimate s
for j, gbar_i in enumerate(Gbar):
S[j] = (1/t)*gbar_i
elif args.attack != "shared-only":
for i, g_i in enumerate(G):
if g_i < 0:
m += (1/args.batch_size)*g_i*(1+1/num_classes)
if args.attack != "none":
for i, g_i in enumerate(G):
if g_i < 0:
E.append(i)
g_i -= m
G = G - S
while len(E) < args.batch_size:
i = np.argmin(G)
E.append(i)
G[i] -= m
return E
def leakage_percent(E, y):
num_correct = 0
y = copy.deepcopy(y.cpu().detach().numpy())
num_labels = len(y)
for guess in E:
if guess in y:
y = np.delete(y, np.argwhere(y == guess)[0])
num_correct += 1
return num_correct/num_labels
def forward(models, optimizers, inputs, targets):
"""
Train all clients on all batches
"""
server_model = models[-1]
server_optimizer = optimizers[-1]
#inputs = np.stack(inputs, axis=1)
inputs = torch.from_numpy(inputs).type(torch.FloatTensor)
inputs, targets = inputs.cuda(device), targets.cuda(device)
inputs, targets = Variable(inputs), Variable(targets)
# Exchange embeddings
H_orig = [None] * num_clients
for i in range(num_clients):
if args.dataset == 0:
x_local = inputs[:,coords_per*i:coords_per*(i+1),:,:,:]
else:
r = math.floor(i/2)
c = i % 2
section = int(math.sqrt(coords_per))
x_local = inputs[:,
section*r:section*(r+1),
section*c:section*(c+1),:]
x_local = torch.transpose(x_local,1,3)
H_orig[i] = models[i](x_local)
# Train clients
for i in range(num_clients):
if args.dataset == 0:
x_local = inputs[:,coords_per*i:coords_per*(i+1),:,:,:]
else:
r = math.floor(i/2)
c = i % 2
section = int(math.sqrt(coords_per))
x_local = inputs[:,
section*r:section*(r+1),
section*c:section*(c+1),:]
x_local = torch.transpose(x_local,1,3)
H = H_orig.copy()
model = models[i]
optimizer = optimizers[i]
# Calculate number of local iterations
client_epochs = args.local_epochs
# Train
# compute output
outputs = model(x_local)
H[i] = outputs
outputs = server_model(torch.cat(H,axis=1))
loss = criterion(outputs, targets)
# compute gradient and do gradient step
optimizer.zero_grad()
server_optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# Train server
H = H_orig.copy()
# compute output
outputs = server_model(torch.cat(H,axis=1))
loss = criterion(outputs, targets)
# compute gradient and do SGD step
server_optimizer.zero_grad()
loss.backward(retain_graph=True)
def train(models, optimizers, epoch, leakages): #, centers):
"""
Train all clients on all batches
"""
train_size = len(train_loader)
server_model = models[-1]
server_optimizer = optimizers[-1]
Hs = np.empty((len(train_loader), num_clients), dtype=object)
Hs.fill([])
for step, (inputs, targets) in enumerate(train_loader):
# Convert from list of 3D to 4D
inputs = np.stack(inputs, axis=1)
inputs = torch.from_numpy(inputs)
inputs, targets = inputs.cuda(device), targets.cuda(device)
inputs, targets = Variable(inputs), Variable(targets)
# Exchange embeddings
H_orig = [None] * num_clients
for i in range(num_clients):
if args.dataset == 0:
x_local = inputs[:,coords_per*i:coords_per*(i+1),:,:,:]
else:
r = math.floor(i/2)
c = i % 2
section = int(math.sqrt(coords_per))
x_local = inputs[:,:,
section*r:section*(r+1),
section*c:section*(c+1)]
x_local = torch.transpose(x_local,0,1)
H_orig[i] = models[i](x_local)
Hs[step, i] = H_orig[i].cpu().detach().numpy()
# Train clients
for i in range(num_clients):
if args.dataset == 0:
x_local = inputs[:,coords_per*i:coords_per*(i+1),:,:,:]
else:
r = math.floor(i/2)
c = i % 2
section = int(math.sqrt(coords_per))
x_local = inputs[:,:,
section*r:section*(r+1),
section*c:section*(c+1)]
x_local = torch.transpose(x_local,0,1)
H = H_orig.copy()
model = models[i]
optimizer = optimizers[i]
# Calculate number of local iterations
client_epochs = args.local_epochs
# Train
for le in range(client_epochs):
# compute output
outputs = model(x_local)
H[i] = outputs
outputs = server_model(torch.cat(H,axis=1))
loss = criterion(outputs, targets)
# compute gradient and do gradient step
optimizer.zero_grad()
server_optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# Train server
for le in range(args.local_epochs):
H = H_orig.copy()
# compute output
outputs = server_model(torch.cat(H,axis=1))
loss = criterion(outputs, targets)
# compute gradient and do SGD step
server_optimizer.zero_grad()
loss.backward(retain_graph=True)
server_optimizer.step()
# Estimate labels based on final layer gradients
E = labels_estimate(models, optimizers)
percent_correct = leakage_percent(E, targets)
leakages.append([])
leakages[-1].append(percent_correct)
percent_correct = leakage_percent(np.random.randint(0,9,size=targets.shape), targets)
leakages[-1].append(percent_correct)
if (step + 1) % args.print_freq == 0:
print("\tServer Iter [%d/%d] Loss: %.4f - Leakage: %.1f - Guess: %.1f" % (step + 1, train_size, loss.item(), leakages[-1][0]*100, leakages[-1][1]*100))
return leakages
# Validation and Testing
def eval(models, data_loader):
"""
Calculate loss and accuracy for a given data_loader
"""
total = 0.0
correct = 0.0
total_loss = 0.0
n = 0
for i, (inputs, targets) in enumerate(data_loader):
with torch.no_grad():
# Convert from list of 3D to 4D
inputs = np.stack(inputs, axis=1)
inputs = torch.from_numpy(inputs)
inputs, targets = inputs.cuda(device), targets.cuda(device)
inputs, targets = Variable(inputs), Variable(targets)
# Get current embeddings
H_new = [None] * num_clients
for i in range(num_clients):
if args.dataset == 0:
x_local = inputs[:,coords_per*i:coords_per*(i+1),:,:,:]
else:
r = math.floor(i/2)
c = i % 2
section = int(math.sqrt(coords_per))
x_local = inputs[:,:,
section*r:section*(r+1),
section*c:section*(c+1)]
x_local = torch.transpose(x_local,0,1)
H_new[i] = models[i](x_local)
# compute output
outputs = models[-1](torch.cat(H_new,axis=1))
loss = criterion(outputs, targets)
total_loss += loss
n += 1
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted.cpu() == targets.cpu()).sum()
avg_test_acc = 100 * correct / total
avg_loss = total_loss / n
return avg_test_acc, avg_loss
# Get initial loss/accuracy
#if start_epoch == 0:
# save_eval(models, train_loader, test_loader, losses, accs_train, accs_test, 0, len(train_loader))
# Training / Eval loop
leakages = []
train_size = len(train_loader)
for epoch in range(start_epoch, n_epochs):
print('\n-----------------------------------')
print('Epoch: [%d/%d]' % (epoch+1, n_epochs))
start = time.time()
leakages = train(models, optimizers, epoch, leakages)
save_eval(models, train_loader, test_loader, losses, accs_train, accs_test, epoch, train_size, leakages)
#for i in range(num_clients+1):
# PATH = f"/gpfs/u/home/VFLA/VFLAcstg/scratch/checkpoint{i}_supervised_NC{args.num_clients}_frac{args.labeled_frac}_seed{args.seed}.pt"
# torch.save({
# 'epoch': epoch+1,
# 'model_state_dict': models[i].state_dict(),
# 'optimizer_state_dict': optimizers[i].state_dict(),
# 'loss': 0,
# }, PATH)
print('Time taken: %.2f sec.' % (time.time() - start))
| [
"torchvision.datasets.CIFAR100",
"torch.nn.CrossEntropyLoss",
"math.floor",
"torch.max",
"math.sqrt",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"torch.utils.data.WeightedRandomSampler",
"argparse.ArgumentParser",
"numpy.stack",
"numpy.random.seed",
"numpy.a... | [((1057, 1109), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MVCNN-PyTorch"""'}), "(description='MVCNN-PyTorch')\n", (1080, 1109), False, 'import argparse\n'), ((3610, 3635), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3624, 3635), True, 'import numpy as np\n'), ((3636, 3664), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3653, 3664), False, 'import torch\n'), ((3665, 3687), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3676, 3687), False, 'import random\n'), ((6394, 6415), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6413, 6415), True, 'import torch.nn as nn\n'), ((3985, 4042), 'custom_dataset.MultiViewDataSet', 'MultiViewDataSet', (['args.data', '"""train"""'], {'transform': 'transform'}), "(args.data, 'train', transform=transform)\n", (4001, 4042), False, 'from custom_dataset import MultiViewDataSet\n'), ((4217, 4281), 'torch.tensor', 'torch.tensor', (['[0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0]'], {}), '([0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0])\n', (4229, 4281), False, 'import torch\n'), ((4359, 4447), 'torch.utils.data.WeightedRandomSampler', 'torch.utils.data.WeightedRandomSampler', (['samples_weight', '(num_iters * args.batch_size)'], {}), '(samples_weight, num_iters * args.\n batch_size)\n', (4397, 4447), False, 'import torch\n'), ((4460, 4565), 'torch.utils.data.DataLoader', 'DataLoader', (['dset_train_sub'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(1)', 'sampler': 'sampler'}), '(dset_train_sub, batch_size=args.batch_size, shuffle=False,\n num_workers=1, sampler=sampler)\n', (4470, 4565), False, 'from torch.utils.data import DataLoader\n'), ((4732, 4788), 'custom_dataset.MultiViewDataSet', 'MultiViewDataSet', (['args.data', '"""test"""'], {'transform': 'transform'}), "(args.data, 'test', transform=transform)\n", (4748, 4788), False, 'from custom_dataset import MultiViewDataSet\n'), ((4807, 4885), 'torch.utils.data.DataLoader', 'DataLoader', (['dset_val'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dset_val, batch_size=args.batch_size, shuffle=False, num_workers=1)\n', (4817, 4885), False, 'from torch.utils.data import DataLoader\n'), ((5066, 5162), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=True, download=True,\n transform=transform)\n", (5095, 5162), False, 'import torchvision\n'), ((5394, 5458), 'torch.tensor', 'torch.tensor', (['[0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0]'], {}), '([0.1, 0.5, 0.1, 0.1, 0.7, 0.1, 0.1, 0.1, 0.1, 1.0])\n', (5406, 5458), False, 'import torch\n'), ((5583, 5671), 'torch.utils.data.WeightedRandomSampler', 'torch.utils.data.WeightedRandomSampler', (['samples_weight', '(num_iters * args.batch_size)'], {}), '(samples_weight, num_iters * args.\n batch_size)\n', (5621, 5671), False, 'import torch\n'), ((5684, 5789), 'torch.utils.data.DataLoader', 'DataLoader', (['dset_train_sub'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(1)', 'sampler': 'sampler'}), '(dset_train_sub, batch_size=args.batch_size, shuffle=False,\n num_workers=1, sampler=sampler)\n', (5694, 5789), False, 'from torch.utils.data import DataLoader\n'), ((6002, 6099), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "(root='./data', train=False, download=True,\n transform=transform)\n", (6031, 6099), False, 'import torchvision\n'), ((6166, 6244), 'torch.utils.data.DataLoader', 'DataLoader', (['dset_val'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dset_val, batch_size=args.batch_size, shuffle=False, num_workers=1)\n', (6176, 6244), False, 'from torch.utils.data import DataLoader\n'), ((10057, 10078), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (10065, 10078), True, 'import numpy as np\n'), ((10087, 10108), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (10095, 10108), True, 'import numpy as np\n'), ((21959, 21970), 'time.time', 'time.time', ([], {}), '()\n', (21968, 21970), False, 'import time\n'), ((3721, 3746), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3744, 3746), False, 'import torch\n'), ((10259, 10284), 'torch.sum', 'torch.sum', (['grads[0][i, :]'], {}), '(grads[0][i, :])\n', (10268, 10284), False, 'import torch\n'), ((10388, 10409), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (10396, 10409), True, 'import numpy as np\n'), ((11866, 11887), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (11874, 11887), True, 'import numpy as np\n'), ((14568, 14584), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (14576, 14584), False, 'from torch.autograd import Variable\n'), ((14586, 14603), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (14594, 14603), False, 'from torch.autograd import Variable\n'), ((16243, 16263), 'torch.cat', 'torch.cat', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (16252, 16263), False, 'import torch\n'), ((16834, 16858), 'numpy.stack', 'np.stack', (['inputs'], {'axis': '(1)'}), '(inputs, axis=1)\n', (16842, 16858), True, 'import numpy as np\n'), ((16876, 16900), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (16892, 16900), False, 'import torch\n'), ((3850, 3876), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(500)'], {}), '(500)\n', (3871, 3876), True, 'import torchvision.transforms as transforms\n'), ((3886, 3908), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (3903, 3908), True, 'import torchvision.transforms as transforms\n'), ((3918, 3939), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3937, 3939), True, 'import torchvision.transforms as transforms\n'), ((4966, 4987), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4985, 4987), True, 'import torchvision.transforms as transforms\n'), ((11666, 11694), 'torch.sum', 'torch.sum', (['grads[0][clas, :]'], {}), '(grads[0][clas, :])\n', (11675, 11694), False, 'import torch\n'), ((13848, 13860), 'numpy.argmin', 'np.argmin', (['G'], {}), '(G)\n', (13857, 13860), True, 'import numpy as np\n'), ((14433, 14457), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (14449, 14457), False, 'import torch\n'), ((14826, 14843), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (14836, 14843), False, 'import math\n'), ((15075, 15105), 'torch.transpose', 'torch.transpose', (['x_local', '(1)', '(3)'], {}), '(x_local, 1, 3)\n', (15090, 15105), False, 'import torch\n'), ((15325, 15342), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (15335, 15342), False, 'import math\n'), ((15574, 15604), 'torch.transpose', 'torch.transpose', (['x_local', '(1)', '(3)'], {}), '(x_local, 1, 3)\n', (15589, 15604), False, 'import torch\n'), ((15907, 15927), 'torch.cat', 'torch.cat', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (15916, 15927), False, 'import torch\n'), ((16996, 17012), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (17004, 17012), False, 'from torch.autograd import Variable\n'), ((17014, 17031), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (17022, 17031), False, 'from torch.autograd import Variable\n'), ((20189, 20204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20202, 20204), False, 'import torch\n'), ((20271, 20295), 'numpy.stack', 'np.stack', (['inputs'], {'axis': '(1)'}), '(inputs, axis=1)\n', (20279, 20295), True, 'import numpy as np\n'), ((20317, 20341), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (20333, 20341), False, 'import torch\n'), ((21346, 21372), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (21355, 21372), False, 'import torch\n'), ((10514, 10534), 'numpy.array', 'np.array', (['dset_aux.x'], {}), '(dset_aux.x)\n', (10522, 10534), True, 'import numpy as np\n'), ((13345, 13370), 'torch.sum', 'torch.sum', (['grads[0][i, :]'], {}), '(grads[0][i, :])\n', (13354, 13370), False, 'import torch\n'), ((14890, 14911), 'math.sqrt', 'math.sqrt', (['coords_per'], {}), '(coords_per)\n', (14899, 14911), False, 'import math\n'), ((15389, 15410), 'math.sqrt', 'math.sqrt', (['coords_per'], {}), '(coords_per)\n', (15398, 15410), False, 'import math\n'), ((17281, 17298), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (17291, 17298), False, 'import math\n'), ((17554, 17584), 'torch.transpose', 'torch.transpose', (['x_local', '(0)', '(1)'], {}), '(x_local, 0, 1)\n', (17569, 17584), False, 'import torch\n'), ((17891, 17908), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (17901, 17908), False, 'import math\n'), ((18164, 18194), 'torch.transpose', 'torch.transpose', (['x_local', '(0)', '(1)'], {}), '(x_local, 0, 1)\n', (18179, 18194), False, 'import torch\n'), ((19053, 19073), 'torch.cat', 'torch.cat', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (19062, 19073), False, 'import torch\n'), ((19590, 19633), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {'size': 'targets.shape'}), '(0, 9, size=targets.shape)\n', (19607, 19633), True, 'import numpy as np\n'), ((20445, 20461), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (20453, 20461), False, 'from torch.autograd import Variable\n'), ((20463, 20480), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (20471, 20480), False, 'from torch.autograd import Variable\n'), ((21195, 21219), 'torch.cat', 'torch.cat', (['H_new'], {'axis': '(1)'}), '(H_new, axis=1)\n', (21204, 21219), False, 'import torch\n'), ((22617, 22628), 'time.time', 'time.time', ([], {}), '()\n', (22626, 22628), False, 'import time\n'), ((10711, 10731), 'numpy.array', 'np.array', (['dset_aux_y'], {}), '(dset_aux_y)\n', (10719, 10731), True, 'import numpy as np\n'), ((11309, 11331), 'torch.stack', 'torch.stack', (['aux_x_new'], {}), '(aux_x_new)\n', (11320, 11331), False, 'import torch\n'), ((12105, 12125), 'numpy.array', 'np.array', (['dset_aux.x'], {}), '(dset_aux.x)\n', (12113, 12125), True, 'import numpy as np\n'), ((14114, 14137), 'numpy.argwhere', 'np.argwhere', (['(y == guess)'], {}), '(y == guess)\n', (14125, 14137), True, 'import numpy as np\n'), ((17353, 17374), 'math.sqrt', 'math.sqrt', (['coords_per'], {}), '(coords_per)\n', (17362, 17374), False, 'import math\n'), ((17963, 17984), 'math.sqrt', 'math.sqrt', (['coords_per'], {}), '(coords_per)\n', (17972, 17984), False, 'import math\n'), ((18597, 18617), 'torch.cat', 'torch.cat', (['H'], {'axis': '(1)'}), '(H, axis=1)\n', (18606, 18617), False, 'import torch\n'), ((20761, 20778), 'math.floor', 'math.floor', (['(i / 2)'], {}), '(i / 2)\n', (20771, 20778), False, 'import math\n'), ((21058, 21088), 'torch.transpose', 'torch.transpose', (['x_local', '(0)', '(1)'], {}), '(x_local, 0, 1)\n', (21073, 21088), False, 'import torch\n'), ((11017, 11033), 'PIL.Image.open', 'Image.open', (['view'], {}), '(view)\n', (11027, 11033), False, 'from PIL import Image\n'), ((11256, 11274), 'torch.stack', 'torch.stack', (['views'], {}), '(views)\n', (11267, 11274), False, 'import torch\n'), ((11367, 11387), 'numpy.array', 'np.array', (['dset_aux_y'], {}), '(dset_aux_y)\n', (11375, 11387), True, 'import numpy as np\n'), ((12322, 12342), 'numpy.array', 'np.array', (['dset_aux_y'], {}), '(dset_aux_y)\n', (12330, 12342), True, 'import numpy as np\n'), ((12970, 12992), 'torch.stack', 'torch.stack', (['aux_x_new'], {}), '(aux_x_new)\n', (12981, 12992), False, 'import torch\n'), ((20841, 20862), 'math.sqrt', 'math.sqrt', (['coords_per'], {}), '(coords_per)\n', (20850, 20862), False, 'import math\n'), ((12654, 12670), 'PIL.Image.open', 'Image.open', (['view'], {}), '(view)\n', (12664, 12670), False, 'from PIL import Image\n'), ((12913, 12931), 'torch.stack', 'torch.stack', (['views'], {}), '(views)\n', (12924, 12931), False, 'import torch\n'), ((13032, 13052), 'numpy.array', 'np.array', (['dset_aux_y'], {}), '(dset_aux_y)\n', (13040, 13052), True, 'import numpy as np\n')] |
import os
import sys
import random
import numpy as np
import pandas as pd
# Please modify to fit your environment
import tensorflow as tf
import tensorflow.contrib.keras.api.keras as keras
from tensorflow.contrib.keras.api.keras import backend, callbacks
from tensorflow.contrib.keras.api.keras.models import Model
from tensorflow.contrib.keras.api.keras.layers import Input
from tensorflow.contrib.keras.api.keras.utils import Progbar
from tensorflow.contrib.keras.api.keras.optimizers import Adam
from functools import partial
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import macro as mc
import load_data as ld
import preprocess as pre
import models
import compute_relation_vectors as rv
if len(sys.argv) != 2:
print("input error: main.py method_flag")
print("method flag : nontransfer (=0), standard transfer learning (=1), count ver. all transfer deep learning (=2),\
mean ver. all transfer deep learning (=3), mean modified ver. all transfer deep learning (=4)")
sys.exit(1)
_, method_flag = sys.argv
def Neighbors( labels, database, knum ):
nbrs = NearestNeighbors(n_neighbors=knum, algorithm='ball_tree').fit(database)
dis, idx = nbrs.kneighbors(labels)
return dis, idx
def main(method_flag):
# load data
source_df, target_df = ld.load_file()
predicts, corrects = [], []
random.seed(123)
np.random.seed(123)
kf = KFold(shuffle=False,random_state=1,n_splits=mc._FOLD_NUM)
fold_num = 1
cnt = 0
for train, test in kf.split(target_df):
print('{0}/{1}'.format(fold_num, mc._FOLD_NUM))
target_train = target_df.iloc[train]
target_test = target_df.iloc[test]
idx, labels = transfer_model(source_df, target_train, target_test, method_flag, fold_num)
predicts.extend(idx.tolist())
corrects.extend(labels[0].tolist())
fold_num = fold_num+1
# save results
predicts = np.array(predicts)
corrects = np.array(corrects)
err = []
for i in range(len(predicts)):
if predicts[i] == corrects[i]:
err.append(0)
else:
err.append(1)
test = np.concatenate((np.reshape(predicts,[len(predicts),1]),np.reshape(corrects,[len(corrects),1]),\
np.reshape(err,[len(err),1])), axis=1)
save_data = pd.DataFrame(test)
save_data.to_csv('%s'%(mc._RESULT_FILE),index=False,header=False)
#save_data.to_csv('../results/results.csv',index=False,header=False)
fp = open('%s'%(mc._RESULT_FILE),'a')
#fp = open('../results/results.csv','a')
fp.write('%f\n'%((1.0-np.mean(err))*100.0))
fp.close()
def transfer_model(source_df, target_df, test_df, method_flag, fold_num):
source_labels, source_data = np.split(np.array(source_df),[1],axis=1)
target_labels, target_data = np.split(np.array(target_df),[1],axis=1)
test_labels, test_data = np.split(np.array(test_df),[1],axis=1)
# normalization
#normalized_source_data = pre.normalize(source_data)
#normalized_target_data = pre.normalize(target_data)
#normalized_test_data = pre.normalize(test_data)
normalized_source_data = source_data
normalized_target_data = target_data
normalized_test_data = test_data
### constuct model for source domain task ###
# optimization
opt = Adam()
# network setting
latent = models.latent(normalized_source_data.shape[1])
sll = models.source_last_layer()
tll = models.target_last_layer()
source_inputs = Input(shape=normalized_source_data.shape[1:])
latent_features = latent(source_inputs)
source_predictors = sll(latent_features)
latent.trainable = mc._SORUCE_LATENT_TRAIN
source_predictors.trainable = True
source_nn = Model(inputs=[source_inputs], outputs=[source_predictors])
source_nn.compile(loss=['mean_squared_error'],optimizer=opt)
#source_nn.summary()
# training using source domain data
if method_flag != mc._SCRATCH:
source_max_loop = int(normalized_source_data.shape[0]/mc._BATCH_SIZE)
source_progbar = Progbar(target=mc._SOURCE_EPOCH_NUM)
for epoch in range(mc._SOURCE_EPOCH_NUM):
shuffle_data, shuffle_labels, _ = pre.paired_shuffle(normalized_source_data,source_labels,1)
for loop in range(source_max_loop):
batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = np.reshape(batch_train_labels, [len(batch_train_labels)])
one_hots = np.identity(mc._SOURCE_DIM_NUM)[np.array(batch_train_labels, dtype=np.int32)]
loss = source_nn.train_on_batch([batch_train_data],[one_hots])
#source_progbar.add(1, values=[("source loss",loss)])
# save
#latent.save('../results/source_latent.h5')
#sll.save('../results/source_last_layer.h5')
# compute relation vectors
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
target_vectors = np.identity(mc._TARGET_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
elif method_flag == mc._COUNT_ATDL:
target_labels, relations = rv.compute_relation_labels(source_nn, normalized_target_data, target_labels, fold_num)
target_vectors = np.identity(mc._SOURCE_DIM_NUM)[np.array(target_labels, dtype=np.int32)]
target_vectors = np.reshape(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])
else:
relation_vectors = rv.compute_relation_vectors(source_nn, normalized_target_data, target_labels, fold_num, method_flag)
target_vectors = np.zeros((len(target_labels),mc._SOURCE_DIM_NUM), dtype=np.float32)
for i in range(len(target_labels)):
target_vectors[i] = relation_vectors[int(target_labels[i])]
### tuning model for target domain task ###
latent.trainable = mc._TARGET_LATENT_TRAIN
target_inputs = Input(shape=normalized_target_data.shape[1:])
latent_features = latent(target_inputs)
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
predictors = tll(latent_features)
label_num = mc._TARGET_DIM_NUM
else:
predictors= sll(latent_features)
label_num = mc._SOURCE_DIM_NUM
target_nn = Model(inputs=[target_inputs], outputs=[predictors])
target_nn.compile(loss=['mean_squared_error'],optimizer=opt)
#target_nn.summary()
# training using target domain data
target_max_loop = int(normalized_target_data.shape[0]/mc._BATCH_SIZE)
target_progbar = Progbar(target=mc._TARGET_EPOCH_NUM)
for epoch in range(mc._TARGET_EPOCH_NUM):
shuffle_data, shuffle_labels, _ = \
pre.paired_shuffle(normalized_target_data, target_vectors, label_num)
for loop in range(target_max_loop):
batch_train_data = shuffle_data[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
batch_train_labels = shuffle_labels[loop*mc._BATCH_SIZE:(loop+1)*mc._BATCH_SIZE]
loss = target_nn.train_on_batch([batch_train_data],[batch_train_labels])
#target_progbar.add(1, values=[("target loss",loss)])
# compute outputs of test data of target domain
x = target_nn.predict([normalized_test_data])
if method_flag == mc._SCRATCH or method_flag == mc._CONV_TRANSFER:
idx = np.argmax(x, axis=1)
elif method_flag == mc._COUNT_ATDL:
idx = np.argmax(x,axis=1)
for j in range(len(test_labels)):
for i in range(mc._TARGET_DIM_NUM):
if test_labels[j] == i:
test_labels[j] = relations[i]
break
else:
distance, idx = Neighbors(x, relation_vectors, 1)
idx = idx[:,0]
backend.clear_session()
return idx.T, test_labels.T
if __name__ == '__main__':
method_flag = int(method_flag)
main(method_flag)
| [
"compute_relation_vectors.compute_relation_vectors",
"models.source_last_layer",
"numpy.array",
"tensorflow.contrib.keras.api.keras.utils.Progbar",
"sys.exit",
"sklearn.model_selection.KFold",
"numpy.mean",
"numpy.reshape",
"numpy.random.seed",
"sklearn.neighbors.NearestNeighbors",
"tensorflow.c... | [((1089, 1100), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1097, 1100), False, 'import sys\n'), ((1373, 1387), 'load_data.load_file', 'ld.load_file', ([], {}), '()\n', (1385, 1387), True, 'import load_data as ld\n'), ((1424, 1440), 'random.seed', 'random.seed', (['(123)'], {}), '(123)\n', (1435, 1440), False, 'import random\n'), ((1443, 1462), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1457, 1462), True, 'import numpy as np\n'), ((1472, 1531), 'sklearn.model_selection.KFold', 'KFold', ([], {'shuffle': '(False)', 'random_state': '(1)', 'n_splits': 'mc._FOLD_NUM'}), '(shuffle=False, random_state=1, n_splits=mc._FOLD_NUM)\n', (1477, 1531), False, 'from sklearn.model_selection import KFold\n'), ((1950, 1968), 'numpy.array', 'np.array', (['predicts'], {}), '(predicts)\n', (1958, 1968), True, 'import numpy as np\n'), ((1982, 2000), 'numpy.array', 'np.array', (['corrects'], {}), '(corrects)\n', (1990, 2000), True, 'import numpy as np\n'), ((2284, 2302), 'pandas.DataFrame', 'pd.DataFrame', (['test'], {}), '(test)\n', (2296, 2302), True, 'import pandas as pd\n'), ((3249, 3255), 'tensorflow.contrib.keras.api.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3253, 3255), False, 'from tensorflow.contrib.keras.api.keras.optimizers import Adam\n'), ((3289, 3335), 'models.latent', 'models.latent', (['normalized_source_data.shape[1]'], {}), '(normalized_source_data.shape[1])\n', (3302, 3335), False, 'import models\n'), ((3344, 3370), 'models.source_last_layer', 'models.source_last_layer', ([], {}), '()\n', (3368, 3370), False, 'import models\n'), ((3379, 3405), 'models.target_last_layer', 'models.target_last_layer', ([], {}), '()\n', (3403, 3405), False, 'import models\n'), ((3426, 3471), 'tensorflow.contrib.keras.api.keras.layers.Input', 'Input', ([], {'shape': 'normalized_source_data.shape[1:]'}), '(shape=normalized_source_data.shape[1:])\n', (3431, 3471), False, 'from tensorflow.contrib.keras.api.keras.layers import Input\n'), ((3657, 3715), 'tensorflow.contrib.keras.api.keras.models.Model', 'Model', ([], {'inputs': '[source_inputs]', 'outputs': '[source_predictors]'}), '(inputs=[source_inputs], outputs=[source_predictors])\n', (3662, 3715), False, 'from tensorflow.contrib.keras.api.keras.models import Model\n'), ((5845, 5890), 'tensorflow.contrib.keras.api.keras.layers.Input', 'Input', ([], {'shape': 'normalized_target_data.shape[1:]'}), '(shape=normalized_target_data.shape[1:])\n', (5850, 5890), False, 'from tensorflow.contrib.keras.api.keras.layers import Input\n'), ((6167, 6218), 'tensorflow.contrib.keras.api.keras.models.Model', 'Model', ([], {'inputs': '[target_inputs]', 'outputs': '[predictors]'}), '(inputs=[target_inputs], outputs=[predictors])\n', (6172, 6218), False, 'from tensorflow.contrib.keras.api.keras.models import Model\n'), ((6436, 6472), 'tensorflow.contrib.keras.api.keras.utils.Progbar', 'Progbar', ([], {'target': 'mc._TARGET_EPOCH_NUM'}), '(target=mc._TARGET_EPOCH_NUM)\n', (6443, 6472), False, 'from tensorflow.contrib.keras.api.keras.utils import Progbar\n'), ((7475, 7498), 'tensorflow.contrib.keras.api.keras.backend.clear_session', 'backend.clear_session', ([], {}), '()\n', (7496, 7498), False, 'from tensorflow.contrib.keras.api.keras import backend, callbacks\n'), ((2705, 2724), 'numpy.array', 'np.array', (['source_df'], {}), '(source_df)\n', (2713, 2724), True, 'import numpy as np\n'), ((2777, 2796), 'numpy.array', 'np.array', (['target_df'], {}), '(target_df)\n', (2785, 2796), True, 'import numpy as np\n'), ((2845, 2862), 'numpy.array', 'np.array', (['test_df'], {}), '(test_df)\n', (2853, 2862), True, 'import numpy as np\n'), ((3968, 4004), 'tensorflow.contrib.keras.api.keras.utils.Progbar', 'Progbar', ([], {'target': 'mc._SOURCE_EPOCH_NUM'}), '(target=mc._SOURCE_EPOCH_NUM)\n', (3975, 4004), False, 'from tensorflow.contrib.keras.api.keras.utils import Progbar\n'), ((4983, 5061), 'numpy.reshape', 'np.reshape', (['target_vectors', '[target_vectors.shape[0], target_vectors.shape[2]]'], {}), '(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])\n', (4993, 5061), True, 'import numpy as np\n'), ((6561, 6630), 'preprocess.paired_shuffle', 'pre.paired_shuffle', (['normalized_target_data', 'target_vectors', 'label_num'], {}), '(normalized_target_data, target_vectors, label_num)\n', (6579, 6630), True, 'import preprocess as pre\n'), ((7150, 7170), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7159, 7170), True, 'import numpy as np\n'), ((1181, 1238), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'knum', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=knum, algorithm='ball_tree')\n", (1197, 1238), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((4088, 4148), 'preprocess.paired_shuffle', 'pre.paired_shuffle', (['normalized_source_data', 'source_labels', '(1)'], {}), '(normalized_source_data, source_labels, 1)\n', (4106, 4148), True, 'import preprocess as pre\n'), ((4890, 4921), 'numpy.identity', 'np.identity', (['mc._TARGET_DIM_NUM'], {}), '(mc._TARGET_DIM_NUM)\n', (4901, 4921), True, 'import numpy as np\n'), ((4922, 4961), 'numpy.array', 'np.array', (['target_labels'], {'dtype': 'np.int32'}), '(target_labels, dtype=np.int32)\n', (4930, 4961), True, 'import numpy as np\n'), ((5130, 5220), 'compute_relation_vectors.compute_relation_labels', 'rv.compute_relation_labels', (['source_nn', 'normalized_target_data', 'target_labels', 'fold_num'], {}), '(source_nn, normalized_target_data, target_labels,\n fold_num)\n', (5156, 5220), True, 'import compute_relation_vectors as rv\n'), ((5330, 5408), 'numpy.reshape', 'np.reshape', (['target_vectors', '[target_vectors.shape[0], target_vectors.shape[2]]'], {}), '(target_vectors, [target_vectors.shape[0], target_vectors.shape[2]])\n', (5340, 5408), True, 'import numpy as np\n'), ((5439, 5543), 'compute_relation_vectors.compute_relation_vectors', 'rv.compute_relation_vectors', (['source_nn', 'normalized_target_data', 'target_labels', 'fold_num', 'method_flag'], {}), '(source_nn, normalized_target_data,\n target_labels, fold_num, method_flag)\n', (5466, 5543), True, 'import compute_relation_vectors as rv\n'), ((7218, 7238), 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7227, 7238), True, 'import numpy as np\n'), ((5237, 5268), 'numpy.identity', 'np.identity', (['mc._SOURCE_DIM_NUM'], {}), '(mc._SOURCE_DIM_NUM)\n', (5248, 5268), True, 'import numpy as np\n'), ((5269, 5308), 'numpy.array', 'np.array', (['target_labels'], {'dtype': 'np.int32'}), '(target_labels, dtype=np.int32)\n', (5277, 5308), True, 'import numpy as np\n'), ((2551, 2563), 'numpy.mean', 'np.mean', (['err'], {}), '(err)\n', (2558, 2563), True, 'import numpy as np\n'), ((4457, 4488), 'numpy.identity', 'np.identity', (['mc._SOURCE_DIM_NUM'], {}), '(mc._SOURCE_DIM_NUM)\n', (4468, 4488), True, 'import numpy as np\n'), ((4489, 4533), 'numpy.array', 'np.array', (['batch_train_labels'], {'dtype': 'np.int32'}), '(batch_train_labels, dtype=np.int32)\n', (4497, 4533), True, 'import numpy as np\n')] |
def plot():
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
x, y = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))
z = x**2 - y**2
fig = plt.figure()
plt.pcolormesh(x, y, z, cmap=cm.viridis, shading="gouraud")
# plt.colorbar()
return fig
def test():
from .helpers import assert_equality
# test relative data path
assert_equality(
plot,
__file__[:-3] + "_reference.tex",
# tex_relative_path_to_data="data/files"
)
| [
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.pcolormesh"
] | [((195, 207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (205, 207), True, 'import matplotlib.pyplot as plt\n'), ((212, 271), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['x', 'y', 'z'], {'cmap': 'cm.viridis', 'shading': '"""gouraud"""'}), "(x, y, z, cmap=cm.viridis, shading='gouraud')\n", (226, 271), True, 'import matplotlib.pyplot as plt\n'), ((126, 143), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (137, 143), True, 'import numpy as np\n'), ((145, 162), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (156, 162), True, 'import numpy as np\n')] |
from numpy.polynomial import Polynomial
def legendres(n: int):
if n < 0:
raise Exception("n must be >= 0")
X = Polynomial([0, 1])
P = [Polynomial([1]), X]
# P_n = (2n - 1)/n * P_{n-1} * X - (n-1)/n * P_{n-2}
for i in range(2, n + 1):
P_i = (2 * i - 1) / i * P[i - 1] * X - (i - 1) / i * P[i - 2]
P.append(P_i)
return P[:n + 1]
| [
"numpy.polynomial.Polynomial"
] | [((130, 148), 'numpy.polynomial.Polynomial', 'Polynomial', (['[0, 1]'], {}), '([0, 1])\n', (140, 148), False, 'from numpy.polynomial import Polynomial\n'), ((158, 173), 'numpy.polynomial.Polynomial', 'Polynomial', (['[1]'], {}), '([1])\n', (168, 173), False, 'from numpy.polynomial import Polynomial\n')] |
"""
The implementation of the LocalOutlierFactor for anomaly detection.
Authors:
<NAME>
Reference:
LOF: Identifying Density-Based Local Outliers, by <NAME>, <NAME>, <NAME>, <NAME>.
"""
import numpy as np
from sklearn.neighbors import LocalOutlierFactor as LOF
from ..utils import metrics
class LocalOutlierFactor(LOF):
def __init__(self, n_neighbors=10, algorithm='auto', contamination='auto', leaf_size=30, metric='minkowski', p=2):
"""
Auguments
---------
n_neighbors: int, default=20
algorithm: {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}, default=’auto’
leaf_size:int, default=30
metric: str or callable, default=’minkowski’
p: int, default=2
metric_params: dict, default=None. Additional keyword arguments for the metric function.
contamination: ‘auto’ or float, default=’auto’
novelty: bool, default=False. True if you want to use LocalOutlierFactor for novelty detection.
In this case be aware that you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
n_jobs: int, default=None. The number of parallel jobs to run for neighbors search.
None means 1 and -1 means all Processors
Reference
---------
For more information, please visit https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.LocalOutlierFactor.html
"""
super(LocalOutlierFactor, self).__init__(n_neighbors=n_neighbors, algorithm=algorithm, contamination=contamination, leaf_size=leaf_size, metric=metric, p=p)
def fit(self, X, y=None):
"""
Auguments
---------
X: ndarray, the event count matrix of shape num_instances-by-num_events
"""
print('====== Model summary ======')
super(LocalOutlierFactor, self).fit(X)
def predict(self, X):
""" Predict anomalies with mined invariants
Arguments
---------
X: the input event count matrix
Returns
-------
y_pred: ndarray, the predicted label vector of shape (num_instances,)
"""
self.novelty = True
y_pred = super(LocalOutlierFactor, self).predict(X)
y_pred = np.where(y_pred > 0, 0, 1)
return y_pred
def evaluate(self, X, y_true):
print('====== Evaluation summary ======')
y_pred = self.predict(X)
precision, recall, f1 = metrics(y_pred, y_true)
print('Precision: {:.3f}, recall: {:.3f}, F1-measure: {:.3f}\n'.format(precision, recall, f1))
return precision, recall, f1
| [
"numpy.where"
] | [((2304, 2330), 'numpy.where', 'np.where', (['(y_pred > 0)', '(0)', '(1)'], {}), '(y_pred > 0, 0, 1)\n', (2312, 2330), True, 'import numpy as np\n')] |
# This is the new generation code. It creates 2 tables: one for all wordforms,
# the other for the noun-lemmas, and populates it with different information.
# Namely, 1. wordform, POS (the previous 2 uniquelly identify the entry),
# reference id to the noun-lemma it gets converted to.
# import timing
import xlrd
import nltk
from nltk.corpus import wordnet as wn
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tag.mapping import tagset_mapping, map_tag
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
# Functions to generate wordfroms and noun-lemmas they convert to. from wordforms.py
def wordnet_word(word):
from nltk.corpus import wordnet as wn
if not wn.synsets(word):
return False
else:
return True
def wordnet_tag(word):
from nltk.corpus import wordnet as wn
noun_synsets = wn.synsets(word, pos='n')
verb_synsets = wn.synsets(word, pos='v')
adj_synsets = wn.synsets(word, pos='a')
adv_synsets = wn.synsets(word, pos='r')
if noun_synsets != [] and verb_synsets == [] and adj_synsets == [] and adv_synsets == []:
tag = 'NN'
elif noun_synsets == [] and verb_synsets != [] and adj_synsets == [] and adv_synsets == []:
tag = 'VB'
elif noun_synsets == [] and verb_synsets == [] and adj_synsets != [] and adv_synsets == []:
tag = 'JJ'
elif noun_synsets == [] and verb_synsets == [] and adj_synsets == [] and adv_synsets != []:
tag = 'RB'
else: tag = None
return tag
def hyphenated_word(word):
if word[0] != '-' and '-' in word:
return True
else:
return False
def wordent_hyphenation(words):
# This function takes a list of words and deals with hyphenated words in
# this list, checking if each of the single words is in WordNet
i = 0
while i < len(words): #word in words:
try: word = words[i]
except: print(i, words)
if hyphenated_word(word):
if wordnet_word(word):
i = i + 1
elif wordnet_word(word.replace('-', '')):
words[i] = word.replace('-', '')
i = i + 1
else: # replace word by wordbeforehyphen, wordafterhyphen
hyphen_words = word.split('-')
word1 = hyphen_words[0]
word2 = hyphen_words[1]
# print(word, word1, word2)
words[i] = word1
words.insert(i+1, word2)
if len(hyphen_words) > 2:
for k in range(2,len(hyphen_words)):
words.insert(i+k, hyphen_words[k])
i = i + len(hyphen_words)
i = i + 1
# Need to remove the instances of '' from the list which could appear
a = 0
while a < 1:
try:
words.remove('')
except:
a = 1
return words
def derivational_conversion(word, from_pos, to_pos):
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets:
for l in s.lemmas():
if s.name().split('.')[1] == from_pos or from_pos in ('a', 's') and s.name().split('.')[1] in ('a', 's'):
lemmas += [l]
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_noun_lemmas = []
for drf in derivationally_related_forms:
for l in drf[1]:
if l.synset().name().split('.')[1] == to_pos or to_pos in ('a', 's') and l.synset().name().split('.')[1] in ('a', 's'):
related_noun_lemmas += [l]
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
# print(words)
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
used = set()
unique_list = [x for x in words if x not in used and (used.add(x) or True)]
# unique_list = set(words) # added this line for testing and edited it in the follwoing line as well
# print(unique_list) # to check if the order stays the same
result = [(w, float(words.count(w)) / len_words) for w in unique_list]
result.sort(key=lambda w:-w[1]) # result = sorted(result, key=lambda w:-w[1])# Changed the original to keep the order intact over re-runs of the the code. The original version: result.sort(key=lambda w:-w[1])
return result
def nounalike_conversion(word, from_pos):
""" This function checks if there is an identically spelled noun in WordNet"""
synsets = wn.synsets(word, from_pos)
# Word not found
if not synsets:
return []
# for s in synsets:
syn_1 = synsets[0]
word_1 = syn_1.name().split('.')[0]
noun_synset_1 = wn.synsets(word_1, pos='n')
if noun_synset_1 != []:
result = [(word_1, 1)]
else:
return []
return result
def attribute_conversion(word, from_pos):
""" This function converts a word to a noun using the attribute method from WordNet"""
# The attribute method exists for adjectives I think
synsets = wn.synsets(word, from_pos)
# Word not found
if not synsets:
return []
result =[]
attribute_list = []
for s in synsets:
# word_g = s.name().split('.')[0]
attrib_s = s.attributes()
if len(attrib_s) > 1:
print('There is more thant 1 attribute: ', s, attrib_s)
attribute_list += attrib_s
for a in attribute_list:
word_a = a.name().split('.')[0]
noun_a = wn.synsets(word_a, pos='n')
if noun_a != []:
result = [(word_a, 1)]
break
else: continue
return result
def convert_similartos(word, from_pos):
""" Transforms words uing synomyms (similar_tos) method from WordNet"""
synsets = wn.synsets(word, from_pos)
# Word not found
if not synsets:
return []
synsets_similar = []
for s in synsets:
similar_s = s.similar_tos() # gives a list of synsets similar ot this one
synsets_similar += similar_s
# if not synsets_similar:
# result = []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets_similar:
for l in s.lemmas():
if s.name().split('.')[1] == from_pos or from_pos in ('a', 's') and s.name().split('.')[1] in ('a', 's'):
lemmas += [l]
# Get related forms
derivationally_related_forms = [(l, l.derivationally_related_forms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_noun_lemmas = []
for drf in derivationally_related_forms:
for l in drf[1]:
if l.synset().name().split('.')[1] == 'n':
related_noun_lemmas += [l]
# Extract the words from the lemmas
words = [l.name() for l in related_noun_lemmas]
# print(words)
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
used = set()
unique_list = [x for x in words if x not in used and (used.add(x) or True)]
# unique_list = set(words) # added this line for testing and edited it in the follwoing line as well
# print(unique_list) # to check if the order stays the same
result = [(w, float(words.count(w)) / len_words) for w in unique_list]
result.sort(key=lambda w:-w[1]) # result = sorted(result, key=lambda w:-w[1])# Changed the original to keep the order intact over re-runs of the the code. The original version: result.sort(key=lambda w:-w[1])
return result # the result is a list of tuples with (word, word-frequency) as a tuple
def convert_pertainym(word):
""" Transforms adverbs into adjectives"""
synsets = wn.synsets(word, 'r')
# Word not found
if not synsets:
return []
# Get all lemmas of the word (consider 'a'and 's' equivalent)
lemmas = []
for s in synsets:
for l in s.lemmas():
lemmas += [l]
# Get pertainyms
pertainyms = [(l, l.pertainyms()) for l in lemmas]
# filter only the desired pos (consider 'a' and 's' equivalent)
related_adj_lemmas = []
for prt in pertainyms:
for l in prt[1]:
if l.synset().name().split('.')[1] in ['a', 's']:
related_adj_lemmas += [l]
else:
print('Pertainym for the word is not an adjectif: ', word, l.synset().name().split('.')[1])
# Extract the words from the lemmas
words = [l.name() for l in related_adj_lemmas]
# print(words)
len_words = len(words)
# Build the result in the form of a list containing tuples (word, probability)
used = set()
unique_list = [x for x in words if x not in used and (used.add(x) or True)]
# unique_list = set(words) # added this line for testing and edited it in the follwoing line as well
# print(unique_list) # to check if the order stays the same
result = [(w, float(words.count(w)) / len_words) for w in unique_list]
result.sort(key=lambda w:-w[1]) # result = sorted(result, key=lambda w:-w[1])# Changed the original to keep the order intact over re-runs of the the code. The original version: result.sort(key=lambda w:-w[1])
return result # the result is a list of tuples with (word, word-frequency) as a tuple
def convert_to_noun(word, from_pos):
""" Transform words given from/to POS tags """
if word.lower() in ['most', 'more'] and from_pos == 'a':
word = 'many'
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
result = derivational_conversion(word, from_pos, 'n')
if len(result) == 0:
result = attribute_conversion(word, from_pos)
if len(result) == 0 and word[-2:].lower() == 'ed' and from_pos != 'v':
result = derivational_conversion(word, 'v', 'n')
if len(result) == 0:
result = convert_similartos(word, from_pos)
if len(result) == 0 and from_pos == 'r': # working with pertainyms
adj_words = convert_pertainym(word)
for adj in adj_words:
word_a = adj[0]
# print(word_a)
result = derivational_conversion(word_a, 'a', 'n')
if len(result) == 0:
result = attribute_conversion(word_a, 'a')
else: break
if len(result) == 0 and word_a[-2:].lower() == 'ed' and from_pos != 'v':
result = derivational_conversion(word_a, 'v', 'n')
else: break
if len(result) == 0:
result = convert_similartos(word_a, 'a')
else: break
if len(result) == 0:
result = nounalike_conversion(word, from_pos)
# return all the possibilities sorted by probability
return result
def nounify(word, tag):
noun_list = convert_to_noun(word, tag)
if noun_list != []:
noun = noun_list[0][0]
else:
# print('Not found in derivationally related forms: ', word, tag)
if word == 'visuals': noun = 'picture'
elif word.lower() == 'gameplay': noun = 'game'
elif word.lower() == 'personalization': noun = 'individualization'
elif word.lower() == 'coworker': noun = 'co-worker'
elif word.lower() == 'coworkers': noun = 'co-workers'
elif word.lower() == 'sans': noun = 'font'
elif word.lower() == 'microsoft': noun = 'corporation'
elif word.lower() == 'ios': noun = 'software'
elif word.lower() == 'powerpoint': noun = 'programme'
elif word.lower() == 'youtube': noun = 'website'
elif word.lower() == 'hodge': noun = 'surname'
elif tag == 'n' and 'thing' in word.lower():
noun = 'thing'
elif tag[0] == 'n' and word.lower() in ['anyone', 'everyone', 'anybody', 'everybody']:
noun = 'person'
elif tag == 'a':
noun_list = convert_to_noun(word, 'v')
if noun_list != []:
noun = noun_list[0][0]
else: noun = None
else: noun = None
return noun
def wordformtion(text):
# this function generates 2 dictionaries: a dictionary of wordforms with POS and count
# and a dictionary of corrsponding noun-lemmas with counts
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
from nltk.stem import PorterStemmer
porter = PorterStemmer()
sentences = sent_tokenize(text)
wordforms = dict() # Initializes an empty dictionary where we will keep track of all nouns in the whole corpus of reviews and how many times their occurence values
word_wordforms = dict() # The wordforms whithout punctuation, CC, DT, EX, IN
for sentence in sentences:
words = word_tokenize(sentence)
indexes_todelete = [] # A list of indicies of the wrds to be deleted (artifacts from word_tokenization)
for i in range(1, len(words)):
if words[i-1][0] == "'" and words[i] == "'":
# print('A word originally in single quatation marks, before the first quatation mark removed: ', words[i-1])
words[i-1] = words[i-1][1:]
indexes_todelete = indexes_todelete + [i]
words = np.delete(words, indexes_todelete).tolist()
for i in range(1, len(words)):
word = words[i]
if word[0] == "'" and word[-1] == "'": words[i] = word[1:-2]
elif word[0] == "'" and word != "'": words[i] = word[1:]
elif word[0] == '-' and word != '-': words[i] = word[1:]
# if len(words[i]) == 0:
# print(word, sentence)
words = list(filter(None, words))
# Here we need to insert treatement of hyphenated words
words = wordent_hyphenation(words)
try:
nltk_tagged = nltk.pos_tag(words)
except:
print('NLTK-tagging fails on the following sentence: ', words)
continue
a = 0 # setting the marker for the preceeding word being a verb
for word, tag in nltk_tagged:
# The next piece deals with corrections to POS tagging
if word.lower() in ['sans', 'microsoft', 'powerpoint', 'youtube', 'ios']:
tag = 'NNP'
elif word.lower() in ['app', 'pt', 'neck', 'bottom', 'font', 'kind', 'stiff', 'collar']:
tag = 'NN'
elif word.lower() in ['apps', 'thumbs', 'drawbacks']:
tag = 'NNS'
elif word.lower() in ['wow', 'aw']:
tag = 'UH'
elif tag == 'NNP' and word.lower() in ['boy']:
tag = 'UH'
elif word.lower() in ['weird', 'overall', 'great', 'ok', 'stupid', 'okay', 'perfect', 'ok.', 'full']:
tag = 'JJ'
elif tag[:2] == 'VB' and word.lower() in ['potential', 'routine', 'ping']:
tag = 'NN'
elif tag[:2] == 'VB' and word.lower() in ['deep', 'straight', 'simple', 'stiff', 'groundbreaking', 'good', 'handy', 'specific', 'daily', 'glad', 'sore', 'quick', 'sobering', 'fun']:
tag = 'JJ'
elif tag[:2] == 'VB' and word.lower() in ['more', 'sideways']:
tag = 'RB'
elif tag[:2] == 'JJ' and word.lower() in ['web']:
tag = 'NN'
elif tag[:2] == 'JJ' and word.lower() in ['aside']:
tag = 'RB'
elif tag[:2] == 'RB' and word.lower() in ['silly', 'friendly', 'sore', 'nice']:
tag = 'JJ'
elif tag[:2] == 'RB' and word.lower() in ['neck', 'strain', 'winter', 'pain', 'flows']:
tag = 'NN'
elif tag[:2] == 'NN' and word.lower() in ['begin', 'do', 'clasp', 'say']:
tag = 'VB'
elif tag == 'NNS' and word.lower() in ['uses', 'teaches', 'eases']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'VBZ'
elif tag[0] == 'N' and word.lower() in ['saved', 'developed']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'VBD'
elif tag[0] == 'N' and word.lower() in ['described']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'VBN'
elif tag[0] == 'N' and word.lower() in ['buzzing', 'matching', 'crashing', 'staring']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'VBG'
elif tag[0] == 'N' and word.lower() in ['soothing', 'condescending', 'entertaining', 'amazing', 'relaxing', 'challenging', 'interesting', 'confusing', 'damaging', 'nagging', 'changing', 'decent', 'easy', 'slow', 'relaxed', 'sure', 'goofy', 'quick']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'JJ'
elif tag[0] == 'N' and word.lower() in ['quicker']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'JJR'
elif tag[0] == 'N' and word.lower() in ['pretty', 'anytime', 'forth', 'first']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'RB'
elif tag[0] == 'N' and word.lower() in ['towards', 'about']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'IN'
elif tag[0] in ['N', 'V'] and word.lower() in ['ourselves', 'myself']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'PRP'
elif tag[0] == 'N' and word.lower() in ['yours']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'PRP$'
elif tag[0] == 'V' and word.lower() in ['everything']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'NN'
elif tag[0] == 'V' and word.lower() in ['easy', 'tight']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'JJ'
elif tag[0] == 'V' and word.lower() in ['that']: # or word.lower() == 'coherent' or word.lower() == 'helpful'):
tag = 'PR'
elif word.lower() == 'alright':
tag = 'RB'
elif tag[:2] != 'NN' and word.lower() in ['neck']:# , 'workday', 'workplace', 'desk']:
tag = 'NN'
elif len(word) > 2 and wordnet_tag(word) != None and (tag[:2] != wordnet_tag(word) or word == 'font') and (wordnet_tag(word) != 'NN' or 'font' in word): # and tag[0] in ['N', 'V', 'J', 'R'] I have removed the NN tagged words, as they often overlap with some strange words, abbreviations for pronouns or propositions which are not in wordnet. With the exclusion of the word font
# print('Before tag replacement: ', word, tag)
tag = wordnet_tag(word)
elif len(word) > 2 and wordnet_tag(word) != None and (tag[:2] != wordnet_tag(word) or word.lower() == 'fun') and wordnet_tag(word) == 'NN' and word not in ['might']: # and tag[0] in ['N', 'V', 'J', 'R'] I have removed the NN tagged words, as they often overlap with some strange words, abbreviations for pronouns or propositions which are not in wordnet. With the exclusion of the word font
if word.lower() not in ['why', 'its', 'who', 'may', 'yes', 'tho', 'while', 'otter', 'upside', 'genius', 'despite', 'sceptic', 'lifesaving']: # Note: removed the word 'fun' from the exclusion list
# print('Retagging as a noun. Before tag replacement: ', word, tag)
# print(sentence)
tag = wordnet_tag(word)
if a >= 1 and tag[0] == 'V': # handling auxiliary verbs
wordforms[(word_prev, tag_prev)] -= 1
wordforms[(word_prev, 'AU')] = wordforms.get((word_prev, 'AU'), 0) + 1
# if word.lower() == 'dr':
# print(sentence)
if tag[0] == 'V' and word.lower() in ['am', "m", "'m", 'is', "s", "'s", 'are', "re", "'re", 'was', 'were', 'being', 'been', 'be', 'have', "ve", "'ve", 'has', 'had', 'd', "'d", 'do', 'does', 'did', 'will', 'll', "'ll", 'would', 'shall', 'should', 'may', 'might', 'must', 'can', 'could']:
a += 1 # a marker to memorize that the word was a verb to handle auxiliary verbs
if a > 3:
# print('More than 3 consecutive verbs detected in the following sentence: ', nltk_tagged)
a = 0
break
tag_prev = tag
word_prev = word.lower()
elif a >= 1 and (tag[:2] == 'RB' or word.lower() in ['not' , "n't", 't', "'t"]):
a += 1
# if word.lower() in ["n't", 't', "'t"]:
# print("Sentence containing n't, t or 't'", nltk_tagged)
else: a = 0
wordforms[(word.lower(), tag)] = wordforms.get((word.lower(), tag), 0) + 1
return wordforms
def noun_lemmas(wordforms):
"""This function receves as input a dictionary of wordforms and outputs the corresponding noun-lemmas as a dictionary with wordform(word, tag) as key and the noun-lemma as the value"""
all_nouns = dict()
wordforms_notinWordNet = []
for w in wordforms:
word = w[0]
tag = w[1]
# Now let's update the list of nouns.
# First, we ensure that the word quaifies. That is: 1) it is longer than 2 characters
if tag[:2] == 'VB' and word == 're':
word = 'are'
elif tag[:2] == 'VB' and word == 've':
word = 'have'
if ((len(word) > 2 or tag == 'CD' or (tag != 'AU' and word in ['be', 'do', 'go', 'ad', 'is', 'am'])) and word != "n't") or (tag[:2] == 'NN' and word.lower() in ['pc', 'pt', 'ms']): # and tag in ['N', 'V', 'J', 'R']
if word in ['app', 'apps']:
word_rep = 'application'
# elif tag == 'NN' and word.lower() in ['pc']: # ;wnl.lemmatize doesn't work on double words
# word_rep = 'personal computer'
# print(word_rep)
elif tag[:2] == 'NN' and word in ['pt']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'therapist'
elif tag == 'NNP' and word.lower() in ['ms']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'microsoft'
elif tag[:2] == 'JJ' and word in ['ok', 'ok.']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'satisfactoriness'
elif word in ['ios']: # ;wnl.lemmatize doesn't work on double words
word_rep = 'software'
elif 'smartphone' in word:
word_rep = 'phone'
elif tag == 'NNP' and word == 'kevin':
word_rep = 'person'
elif tag[0] == 'N' and word in ['others']:
word_rep = 'people'
elif 'redesign' in word:
word_rep = 'design'
elif 'restructure' in word:
word_rep = 'structure'
elif 'realign' in word:
word_rep = 'align'
elif tag[0] == 'N' and word == 'rhyming':
word_rep = 'rhyme'
elif 'download' in word:
word_rep = 'transfer'
elif 'customize' in word:
word_rep = 'custom'
elif 'thank' in word:
word_rep = 'thanks'
elif 'keyboarding' in word:
word_rep = 'keyboard'
elif 'multitasking' in word:
word_rep = 'task'
elif 'off-putting' in word:
word_rep = 'appeal'
elif 'inexcusable' in word:
word_rep = 'excuse'
elif tag[:2] == 'VB' and word == 'due':
word_rep = 'do'
elif tag[0] == 'V' and 'enable' in word:
word_rep = 'ability'
# elif tag[0] == 'V' and word == 'sobering':
# word_rep = 'sobriety'
elif tag[0] == 'J' and word == 'unorganized':
word_rep = 'organization'
elif tag[0] == 'J' and word == 'hypermobile':
word_rep = 'mobility'
elif tag[0] == 'J' and word == 'memorable':
word_rep = 'memory'
elif tag[0] == 'J' and word == 'delightful':
word_rep = 'delight'
elif tag[0] == 'J' and word == 'optional':
word_rep = 'option'
elif tag[0] == 'J' and word == 'outdated':
word_rep = 'date'
elif tag[0] == 'J' and word == 'positional':
word_rep = 'position'
elif tag[0] == 'J' and word == 'unfocused':
word_rep = 'focus'
elif tag[0] == 'J' and word == 'descriptive':
word_rep = 'description'
elif word in ['never', 'once', 'already', 'full-time', 'ever', 'initially', 'again', 'sometimes', 'before', 'yet', 'soon', 'ahead', 'anytime', 'eventually', 'finally', 'ago', 'throughout']:
word_rep = 'time'
elif tag[:2] == 'RB' and word in ['prior']:
word_rep = 'time'
elif word in ['maybe', 'perhaps']:
word_rep = 'possibility'
elif tag == 'RB' and word in ['quite', 'bit', 'far']:
word_rep = 'extent'
elif tag == 'RB' and word in ['long']:
word_rep = 'length'
elif tag[0] == 'R' and word == 'simply':
word_rep = 'simplicity'
elif tag[0] == 'R' and word == 'professionally':
word_rep = 'profession'
elif tag[0] == 'R' and word == 'supposedly':
word_rep = 'supposition'
elif tag[0] == 'R' and word == 'undoubtedly':
word_rep = 'doubt'
elif tag[0] == 'R' and word == 'continually':
word_rep = 'continuity'
elif tag[0] == 'R' and word == 'safely':
word_rep = 'safety'
elif tag[0] == 'R' and word == 'routinely':
word_rep = 'routine'
elif tag[0] == 'R' and word == 'additionally':
word_rep = 'addition'
elif tag[0] == 'R' and word == 'namely':
word_rep = 'name'
elif tag[0] == 'R' and word == 'periodically':
word_rep = 'period'
elif tag[0] == 'R' and word == 'relaxed':
word_rep = 'relaxation'
elif word in ['another', 'every', 'both', 'either', 'together', 'anymore', 'almost', 'else']:
word_rep = 'number'
elif word in ['visually']:
word_rep = 'vision'
elif tag[0] == 'R' and word in ['most', 'more']:
word_rep = 'group'
elif tag[0] == 'R' and word in ['around', 'away', 'elsewhere', 'wherever', 'anywhere', 'between', 'sidewards', 'forth']:
word_rep = 'place'
elif tag[0] == 'R' and word in ['loose']:
word_rep = 'looseness'
elif tag[:2] == 'RB' and word in ['lighter']:
word_rep = 'lightness'
else:
word_rep = word
noun = None # pre-assign the variable noun to None
# check if the word is found in WordNet as it is:
if (tag[0] == 'N' or tag == 'CD') and wn.synsets(wnl.lemmatize(word_rep,'n'), pos='n') != []:
noun = wnl.lemmatize(word_rep,'n') # = all_nouns.get((word.lower(), tag, wnl.lemmatize(word_rep,'n')), 0) + 1
elif 'sideway' in word_rep:
noun = ['side', 'way'] # = all_nouns.get((word.lower(), tag, ('side', 'way')), 0) + 1
# elif tag[0] == 'N' and word.lower() == 'rhyming':
# all_nouns['rhyme'] = all_nouns.get('rhyme', 0) + 1
elif tag[0] in ['N', 'V', 'J', 'R'] and tag != 'RP': # Added on 20200520 "and tag != 'RP'" to exclude Particles. New idea: use derivationally related forms etc. Original idea: Transform the word through stemming and lemmatization
short_tag = tag[0].lower() # generate a short-tag from POS tag
if short_tag == 'j': short_tag = 'a'
noun = nounify(word_rep, short_tag) # prints out word and short_tag if not found in Wordnet
if noun == None and word_rep not in ['also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'esque', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow']:# and word.lower()[-2:] != 'ly':
# check if the word is found in WordNet as it is:
if wn.synsets(wnl.lemmatize(word_rep,'n'), pos='n') != [] and word not in ['tho', 'otter']:
noun = wnl.lemmatize(word_rep,'n') # = all_nouns.get((word.lower(), tag, wnl.lemmatize(word_rep,'n')), 0) + 1
if tag[:2] in ['NN', 'VB', 'JJ', 'RB', 'CD'] and noun == None and word_rep not in ['also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'esque', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow', 'thus']:# and word.lower()[-2:] != 'ly':
wordforms_notinWordNet = wordforms_notinWordNet + [w]
elif noun != None:
all_nouns[w] = noun # = all_nouns.get((word.lower(), tag, noun), 0) + 1
return all_nouns, wordforms_notinWordNet
# Now lets define the fuctions to find both hypernym and hyponym depth.
def hypernym_depth(word, postag):
return wn.synsets(wnl.lemmatize(word, postag), postag)[0].min_depth() #this selects the first synset. We could think of a smarter way of selecting a synset
#wn.synset('car.n.01').min_depth()
# Now let's create a table with verbs inside our database and populate it with their respective depth values
import sqlite3
import shutil # we use this library to create a copy of a file (in this case to duplicate the database
# so that we can loop over one instance while editing the other)
# Establish a SQLite connection to a database named 'Liars4.sqlite':
conn = sqlite3.connect('Liars7_clean_tr20200618.sqlite')
# Get the cursor, which is used to traverse the database, line by line
cur = conn.cursor()
# Then we duplicate thedatabase, so that one can loop and edit it at the same time
# and 'open' the other 'instance' of the same database
shutil.copyfile('Liars7_clean_tr20200618.sqlite', 'Liars7_w.sqlite')
conn_w = sqlite3.connect('Liars7_w.sqlite')
cur_w = conn_w.cursor()
# First, let's move brysbaert into SQL:
#Input the name of the excel file to be converted into a SQL database
name = input("Enter excel file name:")
if len(name) < 1 : name = "Concreteness_ratings_Brysbaert_et_al_BRM.xlsx"
# Open the workbook and define the worksheet:
wb = xlrd.open_workbook(name)
# We could add input() function to select the sheets we would like
#to convert into the database
sheet = wb.sheet_by_index(0) #selecting the first sheet only
#sheet = book.sheet_by_name('Organized')
# Create a new table in the database:
# Note: The first line after 'try' statement deletes the table if it already exists.
# This is good at the stage of twicking your code. After the code for importing into
# the database from excel is finalized it is better to remove this: 'DROP TABLE IF EXISTS Reviews;'
# The 'try except else' that follows will generate a message if the table we are
# trying to create already exists.
try:
cur_w.executescript('''DROP TABLE IF EXISTS BWK;
CREATE TABLE BWK (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Wordform TEXT UNIQUE,
Dom_Pos TEXT,
Concreteness REAL,
Concreteness_SD REAL,
SUBTLEX INTEGER,
Bigram INTEGER
)''')
except sqlite3.OperationalError:
print('Most probably the table we are trying to create already exists')
else:
print('The table "BWK" has been successfully created')
try:
cur_w.executescript('''DROP TABLE IF EXISTS [Noun-lemmas];
CREATE TABLE [Noun-lemmas] (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Noun_lemma TEXT UNIQUE,
RF INTEGER DEFAULT 0,
WordNet_depth INTEGER
)''')
except sqlite3.OperationalError:
print('Most probably the table we are trying to create already exists')
else:
print('The table "Noun-lemmas" has been successfully created')
try:
cur_w.executescript('''DROP TABLE IF EXISTS Wordforms;
CREATE TABLE Wordforms (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
Wordform TEXT,
POS TEXT,
TF INTEGER DEFAULT 0,
RF INTEGER DEFAULT 0,
noun_lemma_id INTEGER,
UNIQUE(Wordform, POS)
)''') # Note: noun_lemma_id need to replace by INTEGERE NOT NULL
except sqlite3.OperationalError:
print('Most probably the table we are trying to create already exists')
else:
print('The table "Wordforms" has been successfully created')
#Create a For loop to iterate through each row in the XLS file,
#starting at row 3 by default to skip the headers:
for r in range(1, sheet.nrows):
if sheet.cell_type(r, 0) == (xlrd.XL_CELL_EMPTY, xlrd.XL_CELL_BLANK): continue
else:
try: # attempt to extract the content of the relevant cells
# id = int(sheet.cell(r,0).value)
wordform = sheet.cell(r,0).value
dom_pos = sheet.cell(r,8).value
concretness = float(sheet.cell(r,2).value)
concreteness_sd = float(sheet.cell(r,3).value)
subtlex = int(sheet.cell(r,7).value)
bigram = int(sheet.cell(r,1).value)
# print(id, date, duration, condition, word_recall)
# break
#condition = 1
#review = 'abra'
except IndexError: #handling the error: print a possible error source
print('One or several of the cells selected may be out of the table range. Please doublecheck the location of the column from which to parse the data')
else: # populating the table with data from the original excel
cur_w.execute('''INSERT OR IGNORE INTO BWK (Wordform, Dom_Pos, Concreteness, Concreteness_SD, SUBTLEX, Bigram)
VALUES (?, ?, ?, ?, ?, ?)''', (wordform, dom_pos, concretness, concreteness_sd, subtlex, bigram))
#cur.execute('SELECT id FROM Product WHERE asin = ? ', (asin, ))
#Product_id = cur.fetchone()[0]
# Commit the transaction
conn_w.commit()
# Next we loop through each review and identify a dictionary of wordforms and a dictionary of noun_lemmas
# Next we define the column through which we are going to loop
sqlstr = 'SELECT id, Review_cleaned FROM Reviews'
number_convertable = 0
nouns_notinWordNet = dict()
number_convertable_nouns = 0
verbs_notinWordNet = dict()
number_convertable_verbs = 0
adj_notinWordNet = dict()
number_convertable_adj = 0
adv_notinWordNet = dict()
number_convertable_adv = 0
# Here we loop through the table, extract the verbs from the reviews and populate the new table with values of depth and frequency in the whole corpus for each verb
for row in cur.execute(sqlstr):
id = row[0]
reviewtext = row[1]#.encode('ascii','ignore')
word_forms_review = wordformtion(reviewtext)
[nouns, wordforms_notinWordNet] = noun_lemmas(word_forms_review)
# Now we need to loop through words in the review and get the value of depth for each one of them (and also keep the total count for each word)
convertables = list()
for wordform in word_forms_review:
word = wordform[0]
pos = wordform[1]
tf = word_forms_review[wordform]
rf = 0
if tf > 0:
rf = 1
try:
noun = nouns[wordform]
if type(noun) == list:
separator = ', '
noun = separator.join(noun)
cur_w.execute('''INSERT OR IGNORE INTO [Noun-lemmas] (Noun_lemma) VALUES (?)''', (noun,))
cur_w.execute('''UPDATE [Noun-lemmas] SET RF = RF + ? WHERE Noun_lemma = ?''', (rf, noun))
cur_w.execute('SELECT id FROM [Noun-lemmas] WHERE Noun_lemma = ? ', (noun, ))
noun_id = cur_w.fetchone()[0]
# noun_id = cur_w.lastrowid # cur_w.fetchone()[0] - need a select before this line
except:
noun_id = None
cur_w.execute('''INSERT OR IGNORE INTO Wordforms (Wordform, POS, noun_lemma_id)
VALUES (?, ?, ?)''', (word, pos, noun_id))
cur_w.execute('''UPDATE Wordforms SET TF = TF + ? WHERE (Wordform = ? AND POS = ?)''', (tf, word, pos))
cur_w.execute('''UPDATE Wordforms SET RF = RF + ? WHERE (Wordform = ? AND POS = ?)''', (rf, word, pos))
# Let's count the convertable forms
if len(word) > 2 and pos[0] in ['N', 'V', 'J', 'R'] and pos[0] != 'RP' and word not in ['esque', 'also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow', 'thus']: # added "and wordform[1][0] != 'RP'" on 20200520
convertables = convertables + [wordform]
number_convertable = number_convertable + len(convertables)
convertable_nouns = list()
for wordform in word_forms_review:
if len(wordform[0]) > 2 and wordform[1][:2] in ['NN', 'CD'] and wordform[1][0] != 'RP' and wordform[0].lower() not in ['esque', 'also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however']:
convertable_nouns = convertable_nouns + [wordform]
if wordform[0].lower() == 'full':
print(reviewtext)
number_convertable_nouns = number_convertable_nouns + len(convertable_nouns)
convertable_verbs = list()
for wordform in word_forms_review:
if len(wordform[0]) > 2 and wordform[1][0] == 'V' and wordform[1][0] != 'RP' and wordform[0].lower() not in ['esque', 'also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however']:
convertable_verbs = convertable_verbs + [wordform]
# if wordform[0].lower() == 'sideways':
# print(reviewtext)
number_convertable_verbs = number_convertable_verbs + len(convertable_verbs)
convertable_adj = list()
for wordform in word_forms_review:
if len(wordform[0]) > 2 and wordform[1][0] == 'J' and wordform[1][0] != 'RP' and wordform[0].lower() not in ['esque', 'also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however']:
convertable_adj = convertable_adj + [wordform]
number_convertable_adj = number_convertable_adj + len(convertable_adj)
convertable_adv = list()
for wordform in word_forms_review:
if len(wordform[0]) > 2 and wordform[1][:2] == 'RB' and wordform[0].lower() not in ['also', 'not', 'just', 'too', 'instead', 'only', 'very', 'rather', 'however', 'esque', 'but', 'anyway', 'furthermore', 'about', 'though', 'regardless', 'alright', 'further', 'mostly', 'anyways', 'nonetheless', 'virtually', 'beyond', 'along', 'alongside', 'somehow', 'thus']: # excludes WH adverbs
convertable_adv = convertable_adv + [wordform]
# if wordform[0].lower() == 'okay':
# print(reviewtext)
number_convertable_adv = number_convertable_adv + len(convertable_adv)
# print('The number of convertable wordforms: ', len(convertables))
# print('Convertable wordforms: ', len(convertables), convertables)
if wordforms_notinWordNet != []:
for wordform in wordforms_notinWordNet:
if wordform[1][0] == 'N':
nouns_notinWordNet[wordform[0]] = nouns_notinWordNet.get(wordform[0], 0) + 1 # counts the number of reviews with such a wordform
# if wordform[0].lower() == 'kevin':
# print(wordform)
if wordform[1][0] == 'V':
verbs_notinWordNet[wordform[0]] = verbs_notinWordNet.get(wordform[0], 0) + 1 # counts the number of reviews with such a wordform
# if wordform[0].lower() == 'ping':
# print(reviewtext)
if wordform[1][0] == 'J':
adj_notinWordNet[wordform[0]] = adj_notinWordNet.get(wordform[0], 0) + 1 # counts the number of reviews with such a wordform
# if wordform[0].lower() == 'tight':
# print(reviewtext)
if wordform[1][:2] == 'RB':
adv_notinWordNet[wordform[0]] = adv_notinWordNet.get(wordform[0], 0) + 1 # counts the number of reviews with such a wordform
# if wordform[0].lower() == 'ahead':
# print(reviewtext)
# for w in word_forms_review:
# word = wordformm[0]
# # print(word)
# pos = wordformm[1]
# # print(pos)
# count = wordforms_review[wordformm]
# # if wn.synsets(wnl.lemmatize(word,'n'), pos='n')==[]:
# # # print(word, tag)
# # continue #excludes nouns which can not be found in WordNet
# # else:
# # #print word
# # if len(wnl.lemmatize(word.lower(),'n')) > 1 :
# # if (hypernym_depth(wnl.lemmatize(word.lower(),'n'), 'n') == 0 and wnl.lemmatize(word.lower(),'n') != 'entity'): continue
# # else:
# # all_nouns[wnl.lemmatize(word.lower(),'n')] = all_nouns.get(wnl.lemmatize(word.lower(),'n'),0)+1 # we lowercased all words. before doing that there was about 3600 unique words. After the change 3076 words left
# # else: continue
# cur_w.execute('''INSERT OR IGNORE INTO Wordforms (Wordform, POS)
# VALUES (?,?)''', (word, pos))
# cur_w.execute('''UPDATE Wordforms SET TF = TF + ? WHERE (Wordform = ? AND POS = ?)''', (count, word, pos))
conn_w.commit()
number_nouns_notfound = 0
for wordform in nouns_notinWordNet:
number_nouns_notfound = number_nouns_notfound + nouns_notinWordNet[wordform]
number_verbs_notfound = 0
for wordform in verbs_notinWordNet:
number_verbs_notfound = number_verbs_notfound + verbs_notinWordNet[wordform]
number_adj_notfound = 0
for wordform in adj_notinWordNet:
number_adj_notfound = number_adj_notfound + adj_notinWordNet[wordform]
number_adv_notfound = 0
for wordform in adv_notinWordNet:
number_adv_notfound = number_adv_notfound + adv_notinWordNet[wordform]
print('Number of convertable wordforms: ', number_convertable)
print('Number of convertable nouns: ', number_convertable_nouns)
print('Number of nouns not converted: ', number_nouns_notfound)
print('Number of unique nouns which were not converted: ', len(nouns_notinWordNet))
print('Non-converted noun-wordforms: ', nouns_notinWordNet)
print('Number of convertable verbs: ', number_convertable_verbs)
print('Number of verbs not converted: ', number_verbs_notfound)
print('Number of unique verbs which were not converted: ', len(verbs_notinWordNet))
print('Non-converted verb-wordforms: ', verbs_notinWordNet)
print('Number of convertable adjectives: ', number_convertable_adj)
print('Number of adjectives not converted: ', number_adj_notfound)
print('Number of unique adjectives which were not converted: ', len(adj_notinWordNet))
print('Non-converted adjective-wordforms: ', adj_notinWordNet)
print('Number of convertable adverbs: ', number_convertable_adv)
print('Number of adverbs not converted: ', number_adv_notfound)
print('Number of unique adverbs which were not converted: ', len(adv_notinWordNet))
print('Non-converted adverb-wordforms: ', adv_notinWordNet)
#
# sqlstr = 'SELECT Words_cleaned FROM [Word Recall]'
#
# # Here we loop through the table, extract the verbs from the reviews and populate the new table with values of depth and frequency in the whole corpus for each verb
# for row in cur.execute(sqlstr):
# reviewtext = row[0]#.encode('ascii','ignore')
# # The following line tests if the loop works by printing out the contents of the column row by row up to row 9
# #if line < 10: print(reviewtext,type(reviewtext))
#
# allverbs_in_review = verbs(reviewtext)
# # Now we need to loop through verbs in the review and get the value of depth for each one of them (and also keep the total count for each verb)
# for verb in allverbs_in_review:
# depth = hypernym_depth(verb, 'v')
# #frequency = frequency + allverbs_in_review[verb]
# #frequency = 0
# if (len(verb)<2 or (depth == 0 and verb != 'entity')): continue # make sure the words which are not in word and give depth 0 are not included. usually those are incorrectly assigned parts of speech. there was 298 of them without this line of code
# else:
# #if len(verb)<2: print verb
# cur_w.execute('''INSERT OR IGNORE INTO Verbs (Verb, Depth)
# VALUES (?,?)''', (verb, depth))
# #conn_w.commit()
# conn_w.commit()
# Now lets populate the table with verb count in the whole corpus.
#shutil.copyfile('Liars4_w.sqlite', 'Liars4_s.sqlite') # we need to create an extra database to use it to generate another search query, because we will need a nested loop (a loop with a subloop)
# conn_s = sqlite3.connect('Liars4_s.sqlite')
# cur_s = conn_s.cursor()
# for row in cur_s.execute('SELECT verb FROM verbs'):
# verb = row[0]
# count = 0
# for line in cur:
# reviewtext = line[0]
# count = count + verbs(reviewtext)[verb]
# cur_w.execute('''INSERT OR IGNORE INTO verbs (TF)
# VALUES (?)''', (count))
#
# conn_w.commit()
# in the line just below we test if updating a column with set values of similarity works correctly
#similarity = 0.5
#cur_w.execute('UPDATE Reviews SET Similarity = ? WHERE review = ?', (similarity, reviewtext, ))
#conn_w.commit()
#line = line + 1
cur_w.close()
conn_w.close()
cur.close()
conn.close()
shutil.copyfile('Liars7_w.sqlite', 'Liars7_wordforms.sqlite')
| [
"nltk.pos_tag",
"sqlite3.connect",
"numpy.delete",
"xlrd.open_workbook",
"nltk.stem.WordNetLemmatizer",
"nltk.stem.PorterStemmer",
"nltk.tokenize.word_tokenize",
"shutil.copyfile",
"nltk.tokenize.sent_tokenize",
"nltk.corpus.wordnet.synsets"
] | [((520, 539), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (537, 539), False, 'from nltk.stem import WordNetLemmatizer\n'), ((30325, 30374), 'sqlite3.connect', 'sqlite3.connect', (['"""Liars7_clean_tr20200618.sqlite"""'], {}), "('Liars7_clean_tr20200618.sqlite')\n", (30340, 30374), False, 'import sqlite3\n'), ((30604, 30672), 'shutil.copyfile', 'shutil.copyfile', (['"""Liars7_clean_tr20200618.sqlite"""', '"""Liars7_w.sqlite"""'], {}), "('Liars7_clean_tr20200618.sqlite', 'Liars7_w.sqlite')\n", (30619, 30672), False, 'import shutil\n'), ((30682, 30716), 'sqlite3.connect', 'sqlite3.connect', (['"""Liars7_w.sqlite"""'], {}), "('Liars7_w.sqlite')\n", (30697, 30716), False, 'import sqlite3\n'), ((31017, 31041), 'xlrd.open_workbook', 'xlrd.open_workbook', (['name'], {}), '(name)\n', (31035, 31041), False, 'import xlrd\n'), ((46465, 46526), 'shutil.copyfile', 'shutil.copyfile', (['"""Liars7_w.sqlite"""', '"""Liars7_wordforms.sqlite"""'], {}), "('Liars7_w.sqlite', 'Liars7_wordforms.sqlite')\n", (46480, 46526), False, 'import shutil\n'), ((857, 882), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': '"""n"""'}), "(word, pos='n')\n", (867, 882), True, 'from nltk.corpus import wordnet as wn\n'), ((902, 927), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': '"""v"""'}), "(word, pos='v')\n", (912, 927), True, 'from nltk.corpus import wordnet as wn\n'), ((946, 971), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': '"""a"""'}), "(word, pos='a')\n", (956, 971), True, 'from nltk.corpus import wordnet as wn\n'), ((990, 1015), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': '"""r"""'}), "(word, pos='r')\n", (1000, 1015), True, 'from nltk.corpus import wordnet as wn\n'), ((2938, 2968), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': 'from_pos'}), '(word, pos=from_pos)\n', (2948, 2968), True, 'from nltk.corpus import wordnet as wn\n'), ((4708, 4734), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word', 'from_pos'], {}), '(word, from_pos)\n', (4718, 4734), True, 'from nltk.corpus import wordnet as wn\n'), ((4904, 4931), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word_1'], {'pos': '"""n"""'}), "(word_1, pos='n')\n", (4914, 4931), True, 'from nltk.corpus import wordnet as wn\n'), ((5239, 5265), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word', 'from_pos'], {}), '(word, from_pos)\n', (5249, 5265), True, 'from nltk.corpus import wordnet as wn\n'), ((5962, 5988), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word', 'from_pos'], {}), '(word, from_pos)\n', (5972, 5988), True, 'from nltk.corpus import wordnet as wn\n'), ((7900, 7921), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word', '"""r"""'], {}), "(word, 'r')\n", (7910, 7921), True, 'from nltk.corpus import wordnet as wn\n'), ((9655, 9685), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {'pos': 'from_pos'}), '(word, pos=from_pos)\n', (9665, 9685), True, 'from nltk.corpus import wordnet as wn\n'), ((12524, 12539), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (12537, 12539), False, 'from nltk.stem import PorterStemmer\n'), ((12556, 12575), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (12569, 12575), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((703, 719), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word'], {}), '(word)\n', (713, 719), True, 'from nltk.corpus import wordnet as wn\n'), ((5683, 5710), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['word_a'], {'pos': '"""n"""'}), "(word_a, pos='n')\n", (5693, 5710), True, 'from nltk.corpus import wordnet as wn\n'), ((12873, 12896), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sentence'], {}), '(sentence)\n', (12886, 12896), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((13938, 13957), 'nltk.pos_tag', 'nltk.pos_tag', (['words'], {}), '(words)\n', (13950, 13957), False, 'import nltk\n'), ((13349, 13383), 'numpy.delete', 'np.delete', (['words', 'indexes_todelete'], {}), '(words, indexes_todelete)\n', (13358, 13383), True, 'import numpy as np\n')] |
import numpy as np
from abc import ABC, abstractmethod
from .model import Model
from ..util.metrics import mse, mse_prime
class Layer(ABC):
def __init__(self):
self.input = None
self.output = None
@abstractmethod
def forward(self, input):
raise NotImplementedError
@abstractmethod
def backward(self, output_erros, learing_rate):
raise NotImplementedError
class Dense(Layer):
def __init__(self, input_size, output_size):
"""Fully Connected layer"""
self.weights = np.random.rand(input_size, output_size) - 0.5
self.bias = np.zeros((1, output_size))
def set_weights(self, weights, bias):
if weights.shape != self.weights.shape:
raise ValueError(f"Shapes mismatch {weights.shape} and {self.weights.shape}")
if bias.shape != self.bias.shape:
raise ValueError(f"Shapes mismatch {weights.shape} and {self.weights.shape}")
self.weights = weights
self.bias = bias
def forward(self, input_data):
self.input = input_data
self.output = np.dot(self.input, self.weights) + self.bias
return self.output
def backward(self, output_error, learning_rate):
"""
Computes dE/dW, dE/dB for a given output_error=dE/dY
Returns input_erros=dE/dX to fedd the previous layer.
"""
# Compute the weights erros dE/dW = X.T * dE/dY
weights_error = np.dot(self.input.T, output_error)
# Compute the bias error dE/dB = dE/dY
bias_error = np.sum(output_error, axis=0)
# Error dE/dX to pass on to the previous layer
input_error = np.dot(output_error, self.weights.T)
# Update parameters
self.weights -= learning_rate*weights_error
self.bias -= learning_rate*bias_error
return input_error
class Activation(Layer):
def __init__(self, activation):
self.activation = activation
def foward(self, input_data):
self.input = input_data
self.output = self.activation(self.input)
return self.output
def backward(self, output_error, learning_rate):
# learning_rate is not used because thre is no "learnable" parameters.
# Only passed the error do the previous layer
return np.multiply(self.activation.prime(self.input), output_error)
class NN(Model):
def __init__(self, epochs=1000, lr=0.001, verbose=True):
self.epochs = epochs
self.lr = lr
self.verbose = verbose
self.layers = []
self.loss = mse
self.loss_prime = mse_prime
def fit(self, dataset):
X, y = dataset.getXy()
self.dataset = dataset
self.history = dict()
for epoch in range(self.epochs):
output = X
# forward propagation
for layer in self.layers:
output = layer.forward(output)
# backward propagation
error = self.loss_prime(y, output)
for layer in reversed(self.layers):
error = layer.backward(error, self.lr)
# calculate average error
err = self.loss(y, output)
self.history[epoch] = err
if self.verbose:
print(f'epoch {epoch + 1}/{self.epochs} error={err}')
if not self.verbose:
print(f'error={err}')
self.is_fitted = True
def add(self, layer):
self.layers.append(layer)
def predict(self, x):
self.is_fitted = True
output = x
for layer in self.layers:
output = layer.forward(output)
return output
def cost(self, X=None, y=None):
assert self.is_fitted, 'Model must be fit before predict'
X = X if X is not None else self.dataset.X
y = y if y is not None else self.dataset.Y
output = self.predict(X)
return self.loss(y, output)
class Conv2D:
...
class Pooling2D(Layer):
def __init__(self, size=2, stride=2):
self.size = size
self.stride = stride
def pool(self):
pass
def dpool(self):
pass
def forward(self, input):
self.X_shape = input.shape
n, h, w, d = input.shape
h_out = (h.self.size)/self.stride+1
w_out = (w.self.size) / self.stride + 1
if not w_out.is_integer() or not h_out.is_integer():
raise Exception('Invaid output dimension!')
h_out, w_out = int(h_out), int(w_out)
X_reshaped = input.reshape(n*d, h, w, 1)
self.X_col = im2col(X_reshaped, self.size, padding=0, stride=self.stride)
out, self.max_idx = self.pool(self.X_col)
out = out.reshape(h_out, w_out, n, d)
out = out.transpose(3, 2, 0, 1)
return out
def backward(self, output_erros, learing_rate):
n, w, h, d = self.X_shape
dX_col = np.zeros_like(self.X_col)
dout_col = output_error.transpose(1, 2, 3, 0).ravel()
dX = self.dpool(dX_col, dout_col, self.max_idx)
dX = self.col2im(dX, (n*d, h, w, 1),
self.size, self.size, padding=0, stride=self.stride)
dX = dX.reshape(self.X_shape)
return dX
class MaxPooling(Pooling2D):
def __init__(self, size=2, stride=2):
self.size = size
self.stride = stride
def pool(self):
pass
def dpool(self):
pass
def forward(self, input):
pass
def backward(self, output_erros, learing_rate):
pass | [
"numpy.random.rand",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.zeros_like"
] | [((607, 633), 'numpy.zeros', 'np.zeros', (['(1, output_size)'], {}), '((1, output_size))\n', (615, 633), True, 'import numpy as np\n'), ((1446, 1480), 'numpy.dot', 'np.dot', (['self.input.T', 'output_error'], {}), '(self.input.T, output_error)\n', (1452, 1480), True, 'import numpy as np\n'), ((1549, 1577), 'numpy.sum', 'np.sum', (['output_error'], {'axis': '(0)'}), '(output_error, axis=0)\n', (1555, 1577), True, 'import numpy as np\n'), ((1655, 1691), 'numpy.dot', 'np.dot', (['output_error', 'self.weights.T'], {}), '(output_error, self.weights.T)\n', (1661, 1691), True, 'import numpy as np\n'), ((4870, 4895), 'numpy.zeros_like', 'np.zeros_like', (['self.X_col'], {}), '(self.X_col)\n', (4883, 4895), True, 'import numpy as np\n'), ((541, 580), 'numpy.random.rand', 'np.random.rand', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (555, 580), True, 'import numpy as np\n'), ((1093, 1125), 'numpy.dot', 'np.dot', (['self.input', 'self.weights'], {}), '(self.input, self.weights)\n', (1099, 1125), True, 'import numpy as np\n')] |
from keras.models import load_model
import csv
import cv2
import sklearn
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation, Lambda, MaxPooling2D, Cropping2D, Flatten, Conv2D
from sklearn.model_selection import train_test_split
from scipy import ndimage
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
#keras.backend.set_image_data_format('channels_first')
path ='/home/workspace/CarND-Behavioral-Cloning-P3/data/'
## read the csv file
lines = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
lines = lines[1:]
image = mpimg.imread(path+lines[4][0])
model = load_model('model.h5')
image_array = np.asarray(image)
steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))
print(steering_angle)
| [
"keras.models.load_model",
"matplotlib.image.imread",
"warnings.catch_warnings",
"numpy.asarray",
"csv.reader",
"warnings.filterwarnings"
] | [((781, 813), 'matplotlib.image.imread', 'mpimg.imread', (['(path + lines[4][0])'], {}), '(path + lines[4][0])\n', (793, 813), True, 'import matplotlib.image as mpimg\n'), ((821, 843), 'keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (831, 843), False, 'from keras.models import load_model\n'), ((858, 875), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (868, 875), True, 'import numpy as np\n'), ((387, 412), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (410, 412), False, 'import warnings\n'), ((418, 475), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (441, 475), False, 'import warnings\n'), ((683, 702), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (693, 702), False, 'import csv\n')] |
#!/usr/bin/env python3
from __future__ import print_function
import math
import sys
import numpy as np
"""
This is a version of BFGS specialized for the case where the function
is constrained to a particular convex region via a barrier function,
and where we can efficiently evaluate (via calling f_finite(x), which
returns bool) whether the function is finite at the given point.
x0 The value to start the optimization at.
f The function being minimized. f(x) returns a pair (value, gradient).
f_finite f_finite(x) returns true if f(x) would be finite, and false otherwise.
init_hessian This gives you a way to specify a "better guess" at the initial
Hessian.
return Returns a 4-tuple (x, f(x), f'(x), inverse-hessian-approximation).
"""
def Bfgs(x0, f, f_finite, init_inv_hessian=None,
gradient_tolerance=0.0005, progress_tolerance=1.0e-06,
verbose=False):
b = __bfgs(x0, f, f_finite,
init_inv_hessian=init_inv_hessian,
gradient_tolerance=gradient_tolerance,
progress_tolerance=progress_tolerance,
verbose=verbose)
return b.Minimize()
class __bfgs:
def __init__(self, x0, f, f_finite, init_inv_hessian=None,
gradient_tolerance=0.0005, progress_tolerance=1.0e-06,
progress_tolerance_num_iters=3, verbose=False):
self.c1 = 1.0e-04 # constant used in line search
self.c2 = 0.9 # constant used in line search
assert len(x0.shape) == 1
self.dim = x0.shape[0]
self.f = f
self.f_finite = f_finite
self.gradient_tolerance = gradient_tolerance
self.num_restarts = 0
self.progress_tolerance = progress_tolerance
assert progress_tolerance_num_iters >= 1
self.progress_tolerance_num_iters = progress_tolerance_num_iters
self.verbose = verbose
if not self.f_finite(x0):
self.LogMessage("Function is not finite at initial point {0}".format(x0))
sys.exit(1)
# evaluations will be a list of 3-tuples (x, function-value f(x),
# function-derivative f'(x)). it's written to and read from by the
# function self.FunctionValueAndDerivative().
self.cached_evaluations = []
self.x = [x0]
(value0, deriv0) = self.FunctionValueAndDerivative(x0)
self.value = [value0]
self.deriv = [deriv0]
deriv_magnitude = math.sqrt(np.dot(deriv0, deriv0))
self.LogMessage("On iteration 0, value is %.6f, deriv-magnitude %.6f" %
(value0, deriv_magnitude))
# note: self.inv_hessian is referred to as H_k in the Nocedal
# and Wright textbook.
if init_inv_hessian is None:
self.inv_hessian = np.identity(self.dim)
else:
self.inv_hessian = init_inv_hessian
def Minimize(self):
while not self.Converged():
self.Iterate()
self.FinalDebugOutput()
return (self.x[-1], self.value[-1], self.deriv[-1], self.inv_hessian)
def FinalDebugOutput(self):
pass
# currently this does nothing.
# This does one iteration of update.
def Iterate(self):
self.p = - np.dot(self.inv_hessian, self.deriv[-1])
alpha = self.LineSearch()
if alpha is None:
self.LogMessage("Restarting BFGS with unit Hessian since line search failed")
self.inv_hessian = np.identity(self.dim)
self.num_restarts += 1
return
cur_x = self.x[-1]
next_x = cur_x + alpha * self.p
(next_value, next_deriv) = self.FunctionValueAndDerivative(next_x)
next_deriv_magnitude = math.sqrt(np.dot(next_deriv, next_deriv))
self.LogMessage("On iteration %d, value is %.6f, deriv-magnitude %.6f" %
(len(self.x), next_value, next_deriv_magnitude))
# obtain s_k = x_{k+1} - x_k, y_k = gradient_{k+1} - gradient_{k}
# see eq. 6.5 in Nocedal and Wright.
self.x.append(next_x)
self.value.append(next_value)
self.deriv.append(next_deriv)
s_k = alpha * self.p
y_k = self.deriv[-1] - self.deriv[-2]
ysdot = np.dot(s_k, y_k)
if not ysdot > 0:
self.LogMessage("Restarting BFGS with unit Hessian since curvature "
"condition failed [likely a bug in the optimization code]")
self.inv_hessian = np.identity(self.dim)
return
rho_k = 1.0 / ysdot # eq. 6.14 in Nocedal and Wright.
# the next equation is eq. 6.17 in Nocedal and Wright.
# the following comment is the simple but inefficient version
# I = np.identity(self.dim)
# self.inv_hessian = ((I - np.outer(s_k, y_k) * rho_k) * self.inv_hessian *
# (I - np.outer(y_k, s_k) * rho_k)) + np.outer(s_k, s_k) * rho_k
z_k = np.dot(self.inv_hessian, y_k)
self.inv_hessian += np.outer(s_k, s_k) * (ysdot + np.dot(y_k, z_k)) * rho_k**2 - \
(np.outer(z_k, s_k) + np.outer(s_k, z_k)) * rho_k
# the function LineSearch is to be called after you have set self.x and
# self.p. It returns an alpha value satisfying the strong Wolfe conditions,
# or None if the line search failed. It is Algorithm 3.5 of Nocedal and
# Wright.
def LineSearch(self):
alpha_max = 1.0e+10
alpha1 = self.GetDefaultAlpha()
# amount by which we increase alpha if needed... after the 1st time we make it 4.
increase_factor = 2.0
if alpha1 is None:
self.LogMessage("Line search failed unexpectedly in making sure "
"f(x) is finite.")
return None
alpha = [0.0, alpha1]
(phi_0, phi_dash_0) = self.FunctionValueAndDerivativeForAlpha(0.0)
phi = [phi_0]
phi_dash = [phi_dash_0]
if self.verbose:
self.LogMessage("Search direction is: {0}".format(self.p))
if phi_dash_0 >= 0.0:
self.LogMessage("{0}: line search failed unexpectedly: not a descent "
"direction")
return None
while True:
i = len(phi)
alpha_i = alpha[-1]
(phi_i, phi_dash_i) = self.FunctionValueAndDerivativeForAlpha(alpha_i)
phi.append(phi_i)
phi_dash.append(phi_dash_i)
if (phi_i > phi_0 + self.c1 * alpha_i * phi_dash_0 or
(i > 1 and phi_i >= phi[-2])):
return self.Zoom(alpha[-2], alpha_i)
if abs(phi_dash_i) <= -self.c2 * phi_dash_0:
self.LogMessage("Line search: accepting alpha = {0}".format(alpha_i))
return alpha_i
if phi_dash_i >= 0:
return self.Zoom(alpha_i, alpha[-2])
# the algorithm says "choose alpha_{i+1} \in (alpha_i, alpha_max).
# the rest of this block is implementing that.
next_alpha = alpha_i * increase_factor
increase_factor = 4.0 # after we double once, we get more aggressive.
if next_alpha > alpha_max:
# something went wrong if alpha needed to get this large.
# most likely we'll restart BFGS.
self.LogMessage("Line search failed unexpectedly, went "
"past the max.")
return None
# make sure the function is finite at the next alpha, if possible.
# we don't need to worry about efficiency too much, as this check
# for finiteness is very fast.
while next_alpha > alpha_i * 1.2 and not self.IsFiniteForAlpha(next_alpha):
next_alpha *= 0.9
while next_alpha > alpha_i * 1.02 and not self.IsFiniteForAlpha(next_alpha):
next_alpha *= 0.99
self.LogMessage("Increasing alpha from {0} to {1} in line search".format(alpha_i,
next_alpha))
alpha.append(next_alpha)
# This function, from Nocedal and Wright (alg. 3.6) is called from from
# LineSearch. It returns the alpha value satisfying the strong Wolfe
# conditions, or None if there was an error.
def Zoom(self, alpha_lo, alpha_hi):
# these function evaluations don't really happen, we use caching.
(phi_0, phi_dash_0) = self.FunctionValueAndDerivativeForAlpha(0.0)
(phi_lo, phi_dash_lo) = self.FunctionValueAndDerivativeForAlpha(alpha_lo)
(phi_hi, phi_dash_hi) = self.FunctionValueAndDerivativeForAlpha(alpha_hi)
# the minimum interval length [on alpha] that we allow is normally
# 1.0e-10; but if the magnitude of the search direction is large, we make
# it proportionally smaller.
min_diff = 1.0e-10 / max(1.0, math.sqrt(np.dot(self.p, self.p)))
while True:
if abs(alpha_lo - alpha_hi) < min_diff:
self.LogMessage("Line search failed, interval is too small: [{0},{1}]".format(
alpha_lo, alpha_hi))
return None
# the algorithm says "Interpolate (using quadratic, cubic or
# bisection) to find a trial step length between alpha_lo and
# alpha_hi. We basically choose bisection, but because alpha_lo is
# guaranteed to always have a "better" (lower) function value than
# alpha_hi, we actually want to be a little bit closer to alpha_lo,
# so we go one third of the distance between alpha_lo and alpha_hi.
alpha_j = alpha_lo + 0.3333 * (alpha_hi - alpha_lo)
if self.verbose:
self.LogMessage("Trying alpha = {0}".format(alpha_j))
(phi_j, phi_dash_j) = self.FunctionValueAndDerivativeForAlpha(alpha_j)
if phi_j > phi_0 + self.c1 * alpha_j * phi_dash_0 or phi_j >= phi_lo:
(alpha_hi, phi_hi, phi_dash_hi) = (alpha_j, phi_j, phi_dash_j)
else:
if abs(phi_dash_j) <= - self.c2 * phi_dash_0:
self.LogMessage("Acceptable alpha is {0}".format(alpha_j))
return alpha_j
if phi_dash_j * (alpha_hi - alpha_lo) >= 0.0:
(alpha_hi, phi_hi, phi_dash_hi) = (alpha_lo, phi_lo, phi_dash_lo)
(alpha_lo, phi_lo, phi_dash_lo) = (alpha_j, phi_j, phi_dash_j)
# The function GetDefaultAlpha(), called from LineSearch(), is to be called
# after you have set self.x and self.p. It normally returns 1.0, but it
# will reduce it by factors of 0.9 until the function evaluated at 1.5 * alpha
# is finite. This is because generally speaking, approaching the edge of
# the barrier function too rapidly will lead to poor function values. Note:
# evaluating whether the function is finite is very efficient.
# If the function was not finite even at very tiny alpha, then something
# probably went wrong; we'll restart BFGS in this case.
def GetDefaultAlpha(self):
alpha_factor = 1.5 # this should be strictly > 1.
min_alpha = 1.0e-10
alpha = 1.0
while alpha > min_alpha and not self.IsFiniteForAlpha(alpha * alpha_factor):
alpha *= 0.9
return alpha if alpha > min_alpha else None
# this function, called from LineSearch(), returns true if the function is finite
# at the given alpha value.
def IsFiniteForAlpha(self, alpha):
x = self.x[-1] + self.p * alpha
return self.f_finite(x)
def FunctionValueAndDerivativeForAlpha(self, alpha):
x = self.x[-1] + self.p * alpha
(value, deriv) = self.FunctionValueAndDerivative(x)
return (value, np.dot(self.p, deriv))
def Converged(self):
# we say that we're converged if either the gradient magnitude
current_gradient = self.deriv[-1]
gradient_magnitude = math.sqrt(np.dot(current_gradient, current_gradient))
if gradient_magnitude < self.gradient_tolerance:
self.LogMessage("BFGS converged on iteration {0} due to gradient magnitude {1} "
"less than gradient tolerance {2}".format(
len(self.x), "%.6f" % gradient_magnitude, self.gradient_tolerance))
return True
if self.num_restarts > 1:
self.LogMessage("Restarted BFGS computation twice: declaring convergence to avoid a loop")
return True
n = self.progress_tolerance_num_iters
if len(self.x) > n:
cur_value = self.value[-1]
prev_value = self.value[-1 - n]
# the following will be nonnegative.
change_per_iter_amortized = (prev_value - cur_value) / n
if change_per_iter_amortized < self.progress_tolerance:
self.LogMessage("BFGS converged on iteration {0} due to objf-change per "
"iteration amortized over {1} iterations = {2} < "
"threshold = {3}.".format(
len(self.x), n, change_per_iter_amortized, self.progress_tolerance))
return True
return False
# this returns the function value and derivative for x, as a tuple; it
# does caching.
def FunctionValueAndDerivative(self, x):
for i in range(len(self.cached_evaluations)):
if np.array_equal(x, self.cached_evaluations[i][0]):
return (self.cached_evaluations[i][1],
self.cached_evaluations[i][2])
# we didn't find it cached, so we need to actually evaluate the
# function. this is where it gets slow.
(value, deriv) = self.f(x)
self.cached_evaluations.append((x, value, deriv))
return (value, deriv)
def LogMessage(self, message):
print(sys.argv[0] + ": " + message, file=sys.stderr)
def __TestFunction(x):
dim = 15
a = np.array(range(1, dim + 1))
B = np.diag(range(5, dim + 5))
# define a function f(x) = x.a + x^T B x
value = np.dot(x, a) + np.dot(x, np.dot(B, x))
# derivative is a + 2 B x.
deriv = a + np.dot(B, x) * 2.0
return (value, deriv)
def __TestBfgs():
dim = 15
x0 = np.array(range(10, dim + 10))
(a, b, c, d) = Bfgs(x0, __TestFunction, lambda x: True, )
# __TestBfgs()
| [
"numpy.identity",
"numpy.dot",
"numpy.outer",
"numpy.array_equal",
"sys.exit"
] | [((4256, 4272), 'numpy.dot', 'np.dot', (['s_k', 'y_k'], {}), '(s_k, y_k)\n', (4262, 4272), True, 'import numpy as np\n'), ((4964, 4993), 'numpy.dot', 'np.dot', (['self.inv_hessian', 'y_k'], {}), '(self.inv_hessian, y_k)\n', (4970, 4993), True, 'import numpy as np\n'), ((14196, 14208), 'numpy.dot', 'np.dot', (['x', 'a'], {}), '(x, a)\n', (14202, 14208), True, 'import numpy as np\n'), ((2059, 2070), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2067, 2070), False, 'import sys\n'), ((2496, 2518), 'numpy.dot', 'np.dot', (['deriv0', 'deriv0'], {}), '(deriv0, deriv0)\n', (2502, 2518), True, 'import numpy as np\n'), ((2821, 2842), 'numpy.identity', 'np.identity', (['self.dim'], {}), '(self.dim)\n', (2832, 2842), True, 'import numpy as np\n'), ((3272, 3312), 'numpy.dot', 'np.dot', (['self.inv_hessian', 'self.deriv[-1]'], {}), '(self.inv_hessian, self.deriv[-1])\n', (3278, 3312), True, 'import numpy as np\n'), ((3494, 3515), 'numpy.identity', 'np.identity', (['self.dim'], {}), '(self.dim)\n', (3505, 3515), True, 'import numpy as np\n'), ((3753, 3783), 'numpy.dot', 'np.dot', (['next_deriv', 'next_deriv'], {}), '(next_deriv, next_deriv)\n', (3759, 3783), True, 'import numpy as np\n'), ((4499, 4520), 'numpy.identity', 'np.identity', (['self.dim'], {}), '(self.dim)\n', (4510, 4520), True, 'import numpy as np\n'), ((11838, 11859), 'numpy.dot', 'np.dot', (['self.p', 'deriv'], {}), '(self.p, deriv)\n', (11844, 11859), True, 'import numpy as np\n'), ((12039, 12081), 'numpy.dot', 'np.dot', (['current_gradient', 'current_gradient'], {}), '(current_gradient, current_gradient)\n', (12045, 12081), True, 'import numpy as np\n'), ((13528, 13576), 'numpy.array_equal', 'np.array_equal', (['x', 'self.cached_evaluations[i][0]'], {}), '(x, self.cached_evaluations[i][0])\n', (13542, 13576), True, 'import numpy as np\n'), ((14221, 14233), 'numpy.dot', 'np.dot', (['B', 'x'], {}), '(B, x)\n', (14227, 14233), True, 'import numpy as np\n'), ((14283, 14295), 'numpy.dot', 'np.dot', (['B', 'x'], {}), '(B, x)\n', (14289, 14295), True, 'import numpy as np\n'), ((5022, 5040), 'numpy.outer', 'np.outer', (['s_k', 's_k'], {}), '(s_k, s_k)\n', (5030, 5040), True, 'import numpy as np\n'), ((5122, 5140), 'numpy.outer', 'np.outer', (['z_k', 's_k'], {}), '(z_k, s_k)\n', (5130, 5140), True, 'import numpy as np\n'), ((5143, 5161), 'numpy.outer', 'np.outer', (['s_k', 'z_k'], {}), '(s_k, z_k)\n', (5151, 5161), True, 'import numpy as np\n'), ((8963, 8985), 'numpy.dot', 'np.dot', (['self.p', 'self.p'], {}), '(self.p, self.p)\n', (8969, 8985), True, 'import numpy as np\n'), ((5052, 5068), 'numpy.dot', 'np.dot', (['y_k', 'z_k'], {}), '(y_k, z_k)\n', (5058, 5068), True, 'import numpy as np\n')] |
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""Perform GNM calculations and output the results in plain text NMD, and
graphical formats."""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import os.path
from actions import *
from nmaoptions import *
def prody_gnm(opt):
"""Perform GNM calculations based on command line arguments."""
outdir = opt.outdir
if not os.path.isdir(outdir):
opt.subparser.error('{0:s} is not a valid path'.format(repr(outdir)))
import numpy as np
import prody
LOGGER = prody.LOGGER
pdb, prefix = opt.pdb, opt.prefix
cutoff, gamma = opt.cutoff, opt.gamma,
nmodes, selstr, model = opt.nmodes, opt.select, opt.model
pdb = prody.parsePDB(pdb, model=model)
if prefix == '_gnm':
prefix = pdb.getTitle() + '_gnm'
select = pdb.select(selstr)
if select is None:
opt.subparser.error('Selection {0:s} do not match any atoms.'
.format(repr(selstr)))
LOGGER.info('{0:d} atoms will be used for GNM calculations.'
.format(len(select)))
gnm = prody.GNM(pdb.getTitle())
gnm.buildKirchhoff(select, cutoff, gamma)
gnm.calcModes(nmodes)
LOGGER.info('Writing numerical output.')
if opt.npz:
prody.saveModel(gnm)
prody.writeNMD(os.path.join(outdir, prefix + '.nmd'), gnm, select)
outall = opt.all
delim, ext, format = opt.delim, opt.ext, opt.numformat
if outall or opt.eigen:
prody.writeArray(os.path.join(outdir, prefix + '_evectors'+ext),
gnm.getArray(), delimiter=delim, format=format)
prody.writeArray(os.path.join(outdir, prefix + '_evalues'+ext),
gnm.getEigvals(), delimiter=delim, format=format)
if outall or opt.beta:
fout = prody.openFile(prefix + '_beta.txt', 'w', folder=outdir)
fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4s} {0[3]:5s} {0[4]:5s}\n'
.format(['C', 'RES', '####', 'Exp.', 'The.']))
for data in zip(select.getChids(), select.getResnames(),
select.getResnums(), select.getBetas(),
prody.calcTempFactors(gnm, select)):
fout.write('{0[0]:1s} {0[1]:4s} {0[2]:4d} {0[3]:5.2f} {0[4]:5.2f}\n'
.format(data))
fout.close()
if outall or opt.covar:
prody.writeArray(os.path.join(outdir, prefix + '_covariance'+ext),
gnm.getCovariance(), delimiter=delim, format=format)
if outall or opt.ccorr:
prody.writeArray(os.path.join(outdir, prefix + '_cross-correlations'
+ ext),
prody.calcCrossCorr(gnm), delimiter=delim,
format=format)
if outall or opt.kirchhoff:
prody.writeArray(os.path.join(outdir, prefix + '_kirchhoff'+ext),
gnm.getKirchhoff(), delimiter=delim, format=format)
if outall or opt.sqflucts:
prody.writeArray(os.path.join(outdir, prefix + '_sqfluct'+ext),
prody.calcSqFlucts(gnm), delimiter=delim,
format=format)
figall, cc, sf, bf, cm, modes = \
opt.figures, opt.cc, opt.sf, opt.bf, opt.cm, opt.modes
if figall or cc or sf or bf or cm or modes:
try:
import matplotlib.pyplot as plt
except ImportError:
LOGGER.warning('Matplotlib could not be imported. '
'Figures are not saved.')
else:
LOGGER.info('Saving graphical output.')
format, width, height, dpi = \
opt.figformat, opt.width, opt.height, opt.dpi
format = format.lower()
if figall or cc:
plt.figure(figsize=(width, height))
prody.showCrossCorr(gnm)
plt.savefig(os.path.join(outdir, prefix + '_cc.'+format),
dpi=dpi, format=format)
plt.close('all')
if figall or cm:
plt.figure(figsize=(width, height))
prody.showContactMap(gnm)
plt.savefig(os.path.join(outdir, prefix + '_cm.'+format),
dpi=dpi, format=format)
plt.close('all')
if figall or sf:
plt.figure(figsize=(width, height))
prody.showSqFlucts(gnm)
plt.savefig(os.path.join(outdir, prefix + '_sf.'+format),
dpi=dpi, format=format)
plt.close('all')
if figall or bf:
plt.figure(figsize=(width, height))
bexp = select.getBetas()
bcal = prody.calcTempFactors(gnm, select)
plt.plot(bexp, label='Experimental')
plt.plot(bcal, label=('Theoretical (corr coef = {0:.2f})'
.format(np.corrcoef(bcal, bexp)[0,1])))
plt.legend(prop={'size': 10})
plt.xlabel('Node index')
plt.ylabel('Experimental B-factors')
plt.title(pdb.getTitle() + ' B-factors')
plt.savefig(os.path.join(outdir, prefix + '_bf.'+format),
dpi=dpi, format=format)
plt.close('all')
if modes:
indices = []
items = modes.split()
items = sum([item.split(',') for item in items], [])
for item in items:
try:
item = item.split('-')
if len(item) == 1:
indices.append(int(item[0])-1)
elif len(item) == 2:
indices.extend(range(int(item[0])-1, int(item[1])))
except:
pass
for index in indices:
try:
mode = gnm[index]
except:
pass
else:
plt.figure(figsize=(width, height))
prody.showMode(mode)
plt.grid()
plt.savefig(os.path.join(outdir, prefix + '_mode_' +
str(mode.getIndex()+1) + '.' + format),
dpi=dpi, format=format)
plt.close('all')
def addCommand(commands):
subparser = commands.add_parser('gnm',
help='perform Gaussian network model calculations')
subparser.add_argument('--quiet', help="suppress info messages to stderr",
action=Quiet, nargs=0)
subparser.add_argument('--examples', action=UsageExample, nargs=0,
help='show usage examples and exit')
subparser.set_defaults(usage_example=
"""This command performs GNM calculations for given PDB structure and \
outputs results in NMD format. If an identifier is passed, structure file \
will be downloaded from the PDB FTP server.
Fetch PDB 1p38, run GNM calculations using default parameters, and results:
$ prody gnm 1p38
Fetch PDB 1aar, run GNM calculations with cutoff distance 7 angstrom for \
chain A carbon alpha atoms with residue numbers less than 70, and \
save all of the graphical output files:
$ prody gnm 1aar -c 7 -s "calpha and chain A and resnum < 70" -A"""
)
group = addNMAParameters(subparser)
group.add_argument('-c', '--cutoff', dest='cutoff', type=float,
default=10.0, metavar='FLOAT',
help='cutoff distance (A) (default: "%(default)s")')
group.add_argument('-g', '--gamma', dest='gamma', type=float,
default=1.0, metavar='FLOAT',
help='spring constant (default: %(default)s)')
group.add_argument('-m', '--model', dest='model', type=int,
default=1, metavar='INT',
help=('model that will be used in the calculations'))
group = addNMAOutput(subparser)
group.add_argument('-b', '--beta-factors', dest='beta', action='store_true',
default=False, help='write B-factors')
group.add_argument('-k', '--kirchhoff', dest='kirchhoff', action='store_true',
default=False, help='write Kirchhoff matrix')
group = addNMAOutputOptions(subparser, '_gnm')
group = addNMAFigures(subparser)
group.add_argument('-B', '--beta-factors-figure', dest='bf',
action='store_true', default=False,
help='save beta-factors')
group.add_argument('-K', '--contact-map', dest='cm', action='store_true',
default=False,
help='save contact map (Kirchhoff matrix)')
group.add_argument('-M', '--mode-shape-figure', dest='modes', type=str,
default='', metavar='STR',
help=('save mode shape figures for specified modes, '
'e.g. "1-3 5" for modes 1, 2, 3 and 5'))
group = addNMAFigureOptions(subparser)
subparser.add_argument('pdb', help='PDB identifier or filename')
subparser.set_defaults(func=prody_gnm)
subparser.set_defaults(subparser=subparser)
| [
"prody.showMode",
"matplotlib.pyplot.grid",
"prody.saveModel",
"prody.calcSqFlucts",
"prody.showContactMap",
"prody.openFile",
"prody.showSqFlucts",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"numpy.corrcoef",
"prody.calcCrossCorr",
"matplotlib.pyplot.c... | [((1431, 1463), 'prody.parsePDB', 'prody.parsePDB', (['pdb'], {'model': 'model'}), '(pdb, model=model)\n', (1445, 1463), False, 'import prody\n'), ((1988, 2008), 'prody.saveModel', 'prody.saveModel', (['gnm'], {}), '(gnm)\n', (2003, 2008), False, 'import prody\n'), ((2535, 2591), 'prody.openFile', 'prody.openFile', (["(prefix + '_beta.txt')", '"""w"""'], {'folder': 'outdir'}), "(prefix + '_beta.txt', 'w', folder=outdir)\n", (2549, 2591), False, 'import prody\n'), ((2890, 2924), 'prody.calcTempFactors', 'prody.calcTempFactors', (['gnm', 'select'], {}), '(gnm, select)\n', (2911, 2924), False, 'import prody\n'), ((3442, 3466), 'prody.calcCrossCorr', 'prody.calcCrossCorr', (['gnm'], {}), '(gnm)\n', (3461, 3466), False, 'import prody\n'), ((3839, 3862), 'prody.calcSqFlucts', 'prody.calcSqFlucts', (['gnm'], {}), '(gnm)\n', (3857, 3862), False, 'import prody\n'), ((4537, 4572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (4547, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4589, 4613), 'prody.showCrossCorr', 'prody.showCrossCorr', (['gnm'], {}), '(gnm)\n', (4608, 4613), False, 'import prody\n'), ((4749, 4765), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4758, 4765), True, 'import matplotlib.pyplot as plt\n'), ((4811, 4846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (4821, 4846), True, 'import matplotlib.pyplot as plt\n'), ((4863, 4888), 'prody.showContactMap', 'prody.showContactMap', (['gnm'], {}), '(gnm)\n', (4883, 4888), False, 'import prody\n'), ((5024, 5040), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5033, 5040), True, 'import matplotlib.pyplot as plt\n'), ((5086, 5121), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (5096, 5121), True, 'import matplotlib.pyplot as plt\n'), ((5138, 5161), 'prody.showSqFlucts', 'prody.showSqFlucts', (['gnm'], {}), '(gnm)\n', (5156, 5161), False, 'import prody\n'), ((5297, 5313), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5306, 5313), True, 'import matplotlib.pyplot as plt\n'), ((5359, 5394), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (5369, 5394), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5493), 'prody.calcTempFactors', 'prody.calcTempFactors', (['gnm', 'select'], {}), '(gnm, select)\n', (5480, 5493), False, 'import prody\n'), ((5510, 5546), 'matplotlib.pyplot.plot', 'plt.plot', (['bexp'], {'label': '"""Experimental"""'}), "(bexp, label='Experimental')\n", (5518, 5546), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5746), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (5727, 5746), True, 'import matplotlib.pyplot as plt\n'), ((5763, 5787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Node index"""'], {}), "('Node index')\n", (5773, 5787), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Experimental B-factors"""'], {}), "('Experimental B-factors')\n", (5814, 5840), True, 'import matplotlib.pyplot as plt\n'), ((6033, 6049), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6042, 6049), True, 'import matplotlib.pyplot as plt\n'), ((6813, 6848), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (6823, 6848), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6893), 'prody.showMode', 'prody.showMode', (['mode'], {}), '(mode)\n', (6887, 6893), False, 'import prody\n'), ((6918, 6928), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6926, 6928), True, 'import matplotlib.pyplot as plt\n'), ((7152, 7168), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7161, 7168), True, 'import matplotlib.pyplot as plt\n'), ((5669, 5692), 'numpy.corrcoef', 'np.corrcoef', (['bcal', 'bexp'], {}), '(bcal, bexp)\n', (5680, 5692), True, 'import numpy as np\n')] |
import numpy as np
from copy import deepcopy
import sys
# python huicv/evaluation/location_evaluation.py \
# '/home/ubuntu/dataset/visDrone/coco_fmt_annotations/VisDrone2018-DET-val-person.json' \
# exp//latest_result.json --matchThs 1.0
# part1: matcher, to get matched gt/ignored gt of each det result start ###############################
BACKGROUND = -1
class GTMatcher(object):
"""
1. if a det have multi regular gts that V[det, gt] > v_th, choose the max V gt as matched;
2. if multi dets match to same regular gt, set the max score det as matched with the gt,
and the lower score det try to match other gt
3. if a det without regular gt matched after 1,2, try to match ignored gt and if matched set it as ignore det.
"""
def __init__(self, eps=1e-8, LOG=None):
self.eps = eps
self.LOG = LOG
def cal_value(self, D, G):
"""
for bbox detection, cal_value is calculate IOU of dets and gts
"""
raise NotImplementedError()
def _match_to_regluar_gt_no_repeat(self, V, v_th, M):
"""
we assume the input det result have been descend sorted by score,
this function match det to a gt which have not been matched before and have max value with the det and IOU[det, gt]>v_th.
args:
V: value of det box with gt box,for bbox detection it is IOU, shape = (len(D), len(G)), V range in [0, 1]
v_th: the threshold for macth
return:
M: matched gt id for each det, shape=len(D)
"""
left_g = V.shape[1]
keep = np.array([1] * V.shape[1])
for i in range(V.shape[0]):
if left_g <= 0: continue
j = np.argmax(V[i, :] * keep)
if V[i, j] >= v_th:
M[i] = j
keep[j] = 0 # remove gt j
left_g -= 1
def _match_to_regluar_gt_no_repeat_v2(self, V, v_th, scores, M):
equal_score_edge = [0]
last_score = scores[0]
for i in range(1, len(scores)):
if abs(scores[i] - last_score) < self.eps:
continue
equal_score_edge.append(i)
last_score = scores[i]
equal_score_edge.append(len(scores))
keep_det = np.array([1] * V.shape[0]).reshape((-1, 1))
keep_gt = np.array([1] * V.shape[1]).reshape((1, -1))
left_gt = V.shape[1]
for i in range(len(equal_score_edge) - 1):
if left_gt == 0: break
s, e = equal_score_edge[i], equal_score_edge[i + 1]
for _ in range(s, e):
if left_gt == 0: break
idx = np.argmax(((V[s:e] * keep_det[s:e]) * keep_gt).reshape((-1,)))
det_id = s + idx // V.shape[1]
gt_id = idx % V.shape[1]
if V[det_id, gt_id] >= v_th:
M[det_id] = gt_id
keep_det[det_id] = 0
keep_gt[:, gt_id] = 0
left_gt -= 1
def _match_as_ignore_det(self, V, v_th, start_gt_idx, M, ID):
"""
args:
V: value of det box with gt box,for bbox detection it is IOU, shape = (len(D), len(G)), where
v_th: the threshold for macth
return:
M: matched gt id for each det, shape=len(D)
ID: whether a det is a ignored det, shape=len(D),
ignore det will calculate as neither TP(true positive) nor FP(false positive)
"""
for i in range(V.shape[0]):
if M[i] != BACKGROUND: continue
j = np.argmax(V[i, :])
if V[i, j] >= v_th:
M[i] = j + start_gt_idx
ID[i] = True
def __call__(self, D, det_scores, G, IG, v_th, multi_match_not_false_alarm, multi_match_v_th=None):
"""
D must be sorted by det_scores
"""
if multi_match_v_th is None:
multi_match_v_th = v_th
M = np.array([BACKGROUND] * len(D))
ID = np.array([False] * len(D)) # ignore det
if len(G) > 0:
V = self.cal_value(D, G)
if self.LOG is not None: print('V(D, G):\n', V, file=self.LOG)
# match det to regular gt with no repeated
# self._match_to_regluar_gt_no_repeat(V, v_th, M)
self._match_to_regluar_gt_no_repeat_v2(V, v_th, det_scores, M)
if len(IG) > 0:
IV = self.cal_value(D, IG)
if self.LOG is not None: print('V(D, IG):\n', IV, file=self.LOG)
# match det to ignore gt with repeated
self._match_as_ignore_det(IV, v_th, len(G), M, ID)
if multi_match_not_false_alarm and len(G) > 0:
# if do not treat multi det that match same gt as false alarm, set them as ignore det
self._match_as_ignore_det(V, multi_match_v_th, 0, M, ID)
return M, ID, det_scores
# class BoxMatcher(GTMatcher):
# def cal_value(self, dets, gts):
# return IOU(dets, gts)
class PointMatcher(GTMatcher):
def cal_value(self, dets, gts):
"""
L2 distance matcher for (xc, yc) det and (xc, yc, w, h) gt.
return:
square_of_distance = (dx*dx + dy*dy)
# add 1 to avoid divide by 0, transform range of V to (0, 1], like IOU
V = 1/(square_of_distance+1)
return V
"""
det_values = np.empty((len(dets), len(gts)))
for i in range(len(dets)):
d = (dets[i].reshape((1, -1)) - gts[:, :2]) / gts[:, 2:]
det_values[i, :] = (d[:, 0] * d[:, 0] + d[:, 1] * d[:, 1])
return 1 / (1 + det_values)
def __call__(self, dets, det_scores, gts, ignore_gts, dis_th, multi_match_not_false_alarm, multi_match_dis_th=None):
v_th = 1 / (dis_th * dis_th + 1)
multi_match_v_th = 1 / (multi_match_dis_th * multi_match_dis_th + 1) if multi_match_dis_th is not None else v_th
if self.LOG: print('v_th:', v_th, file=self.LOG)
return super(PointMatcher, self).__call__(
dets, det_scores, gts, ignore_gts, v_th, multi_match_not_false_alarm, multi_match_v_th
)
# part1: matcher, to get matched gt/ignored gt of each det result end ###############################
# part2: recall precision cal, to get recall and precision from match result start ###############################
def cal_recall_precision(match_gts, dets_score, len_pos):
idx = np.argsort(-dets_score)
match_gts, dets_score = match_gts[idx], dets_score[idx]
is_pos = (match_gts != BACKGROUND)
TP = np.cumsum(is_pos.astype(np.float32))
recall = TP / (len_pos + 1e-12)
precision = TP / np.arange(1, len(is_pos) + 1)
last_r = -1
final_recall = []
final_precison = []
chosen_idx = []
for i, (r, p) in enumerate(zip(recall, precision)):
# for each recall choose the max precision
if abs(last_r - r) < 1e-10:
continue
final_recall.append(r)
final_precison.append(p)
last_r = r
chosen_idx.append(i)
recall = np.array(final_recall)
precision = np.array(final_precison)
if LocationEvaluator.SAVE_RECALL_PRECISION_PATH is not None:
np.savez(LocationEvaluator.SAVE_RECALL_PRECISION_PATH, recall=recall, precision=precision, dets_score=dets_score[chosen_idx])
return recall, precision
def match_and_cal_recall_precision(all_dets, all_dets_score, all_gts, all_gts_ignore, match_th, maxDets,
matcher, matcher_kwargs={}):
"""
match and cal recall and precision of single condition, which means
single class, single size_range, single match_th
called by evaluate_in_multi_condition
"""
assert (set(all_gts.keys()) | set(all_dets.keys())) == set(all_gts.keys()), "all det image must in gt"
all_match_gts, all_sorted_dets_scores, all_dets_keep, len_pos = {}, {}, {}, 0
for i in all_gts:
gts, gts_ignore = all_gts[i], all_gts_ignore[i]
dets, dets_score = all_dets[i], all_dets_score[i]
len_pos += len(gts_ignore) - np.sum(gts_ignore)
if len(dets) > 0:
G, IG = gts[np.logical_not(gts_ignore)], gts[gts_ignore]
# D = descend_sort_by_score(D)
idx = np.argsort(-dets_score)
dets, dets_score = dets[idx][:maxDets], dets_score[idx][:maxDets]
match_gts, dets_ignore, dets_score = matcher(dets, dets_score, G, IG, match_th, **matcher_kwargs)
all_match_gts[i] = match_gts
all_sorted_dets_scores[i] = dets_score
all_dets_keep[i] = np.logical_not(dets_ignore)
# filter ignore det out when evaluate AP
images_id = list(all_match_gts.keys())
match_gts_array = np.concatenate([all_match_gts[img_id][all_dets_keep[img_id]] for img_id in images_id])
dets_scores_array = np.concatenate([all_sorted_dets_scores[img_id][all_dets_keep[img_id]] for img_id in images_id])
recall, precision = cal_recall_precision(match_gts_array, dets_scores_array, len_pos)
return {"recall": recall, "precision": precision}
def match_and_cal_recall_precision_of_every_image(all_dets, all_dets_score, all_gts, all_gts_ignore,
match_th, maxDets, matcher,
matcher_kwargs={}):
"""
debug function: it is a function to cal recall and precision for each image
try to measure
"""
assert (set(all_gts.keys()) | set(all_dets.keys())) == set(all_gts.keys()), "all det image must in gt"
all_recall, all_precision = {}, {}
for i in all_gts:
gts, gts_ignore = all_gts[i], all_gts_ignore[i]
dets, dets_score = all_dets[i], all_dets_score[i]
if len(dets) > 0:
G, IG = gts[np.logical_not(gts_ignore)], gts[gts_ignore]
# D = descend_sort_by_score(D)
idx = np.argsort(-dets_score)
dets, dets_score = dets[idx][:maxDets], dets_score[idx][:maxDets]
match_gts, dets_ignore, dets_score = matcher(dets, dets_score, G, IG, match_th, **matcher_kwargs)
dets_keep = np.logical_not(dets_ignore)
if len(G) > 0 and (np.sum(dets_keep) == 0):
# miss
all_recall[i] = [0.]
all_precision[i] = [-2]
if len(G) == 0 and (np.sum(dets_keep) > 0):
# flase alarm
all_recall[i] = [0]
all_precision[i] = [-3]
elif len(G) == 0 and (np.sum(dets_keep) == 0):
all_recall[i] = [1.]
all_precision[i] = [2.]
elif len(G) > 0 and np.sum(dets_keep) > 0:
recall, precision = cal_recall_precision(match_gts[dets_keep], dets_score[dets_keep], len(G))
all_recall[i] = recall
all_precision[i] = precision
elif len(G) > 0:
# miss gt
all_recall[i] = [0.]
all_precision[i] = [-2]
else:
all_recall[i] = [1.]
all_precision[i] = [1.]
return {"all_recall": all_recall, "all_precision": all_precision}
def evaluate_in_multi_condition(all_dets, all_dets_score, all_gts, all_gts_ignore,
match_th_list, size_ranges, maxDets_list, matcher,
matcher_kwargs={}, evaluate_img_separate=False):
"""
evaluate_img_seperate: if True, then for each image, calculate recall and precision, only for analysis
"""
res = {
'match_th_idx': [],
'size_range_idx': [],
'maxDets_idx': []
}
for si, (min_size, max_size) in enumerate(size_ranges): # choose a size_range
# set gt that size out of [min_size, max_size) as ignored gt
all_gts_ignore_copy = deepcopy(all_gts_ignore)
for i in all_gts_ignore_copy:
gts = all_gts[i]
if len(gts) <= 0: continue
gts_ignore = all_gts_ignore_copy[i]
sizes = np.sqrt((gts[:, -1] * gts[:, -2]))
gts_ignore[np.logical_or(sizes >= max_size, sizes < min_size)] = True
for mi, match_th in enumerate(match_th_list):
for mdi, maxDets in enumerate(maxDets_list):
if not evaluate_img_separate:
results = match_and_cal_recall_precision(
all_dets, all_dets_score, all_gts, all_gts_ignore_copy, match_th, maxDets,
matcher, matcher_kwargs)
else:
results = match_and_cal_recall_precision_of_every_image(
all_dets, all_dets_score, all_gts, all_gts_ignore_copy, match_th, maxDets,
matcher, matcher_kwargs)
res['match_th_idx'].append(mi)
res['size_range_idx'].append(si)
res['maxDets_idx'].append(mdi)
for key, value in results.items():
if key not in res:
res[key] = [value]
else:
res[key].append(value)
return res
# part2: recall precision cal, to get recall and precision from match result end ###############################
# part3: transform input to call evaluate_in_multi_condition start ###############################
def group_by(dicts, key):
res = {}
for objs in dicts:
v = objs[key]
if v in res:
res[v].append(objs)
else:
res[v] = [objs]
return res
def get_center_w_h(x, y, w, h):
return [x + (w - 1) / 2, y + (h - 1) / 2, w, h]
class LocationEvaluator(object):
"""
example:
--------------------------------------------------------------------
MAX_SIZE = 1e5
evaluator = LocationEvaluator(
areaRng=[(1**2, 20**2), (20**2, MAX_SIZE**2), (1**2, MAX_SIZE**2)],
matchThs=[0.5, 1.0, 2.0],
matcher_kwargs=dict(multi_match_not_false_alarm=False)
)
# first call way
from pycocotools.coco import COCO
gt_jd = COCO(gt_file)
det_jd = gt_jd.loadRes(det_file)
LocationEvaluator.add_center_from_bbox_if_no_point(det_jd)
res = evaluator(det_jd, gt_jd)
# second call way
gt_jd = json.load(open(gt_file))
det_jd = json.load(open(det_file))
LocationEvaluator.add_center_from_bbox_if_no_point(det_jd)
res = evaluator(det_jd, gt_jd)
--------------------------------------------------------------------
return:
--------------------------------------------------------------------
res[cate_idx] = {
'match_th_idx': [....],
'size_range)idx': [....],
'maxDets_idx': [....],
'recall': [[...], ....],
'precision': [[...], ....]
}
category: gt_jd['categories'][cate_idx]
"""
SAVE_RECALL_PRECISION_PATH = None
def __init__(self, evaluate_img_separate=False, class_wise=False,
location_param={}, matcher_kwargs=dict(multi_match_not_false_alarm=False), **kwargs):
"""
evaluate_img_separate: if True, then for each image, calculate recall and precision, only set True for analysis
"""
self.matchThs = [0.5, 1.0, 2.0]
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [200] if "maxDets" not in kwargs else kwargs["maxDets"]
self.areaRng = [[1 ** 2, 1e5 ** 2], [1 ** 2, 20 ** 2], [1 ** 2, 8 ** 2], [8 ** 2, 12 ** 2],
[12 ** 2, 20 ** 2], [20 ** 2, 32 ** 2], [32 ** 2, 1e5 ** 2]] \
if "areaRng" not in kwargs else kwargs["areaRng"]
self.areaRngLbl = ['all', 'tiny', 'tiny1', 'tiny2', 'tiny3', 'small', 'reasonable'] \
if "areaRngLbl" not in kwargs else kwargs["areaRngLbl"]
for key, value in location_param.items():
assert key in ['maxDets', 'recThrs', 'matchThs', 'areaRng', 'areaRngLbl'], f"{key} is not valid"
self.__setattr__(key, value)
if isinstance(self.recThrs, str):
self.recThrs = eval(self.recThrs)
self.recThrs = np.array(self.recThrs)
assert len(self.areaRng) == len(self.areaRngLbl)
self.size_ranges = np.array([[min_area**0.5, max_area**0.5] for min_area, max_area in self.areaRng])
self.class_wise = class_wise
self.evaluate_img_separate = evaluate_img_separate
self.matcher = PointMatcher()
self.matcher_kwargs = matcher_kwargs
def __call__(self, det_jd, gt_jd):
try:
from pycocotools.coco import COCO
if isinstance(det_jd, COCO):
det_jd = list(det_jd.anns.values())
if isinstance(gt_jd, COCO):
gt_jd = gt_jd.dataset
except ModuleNotFoundError as e:
pass
return self.evaluate_multi_class(det_jd, gt_jd)
def evaluate_multi_class(self, det_jd, gt_jd):
res_set = []
for cate in gt_jd['categories']:
gt_annos = [anno for anno in gt_jd['annotations'] if anno['category_id'] == cate['id']]
single_class_det_jd = [det for det in det_jd if det['category_id'] == cate['id']]
single_class_gt_jd = {key: value for key, value in gt_jd.items() if key != 'annotations'}
single_class_gt_jd['annotations'] = gt_annos
res = self.evaluate_single_class(single_class_det_jd, single_class_gt_jd)
res_set.append(res)
return res_set
def evaluate_single_class(self, det_jd, gt_jd):
g_det_jd = {img['id']: [] for img in gt_jd['images']}
g_det_jd.update(group_by(det_jd, "image_id"))
g_gt_jd = {img['id']: [] for img in gt_jd['images']}
g_gt_jd.update(group_by(gt_jd['annotations'], 'image_id'))
# all_dets_bbox = {img_id: [det['bbox'] for det in dets] for img_id, dets in g_det_jd.items()}
all_dets_point = {img_id: np.array([det['point'] for det in dets], dtype=np.float32) for img_id, dets in
g_det_jd.items()}
all_dets_score = {img_id: np.array([det['score'] for det in dets], dtype=np.float32) for img_id, dets in
g_det_jd.items()}
# all_gts_point = {img_id: np.array([get_center(*gt['bbox']) for gt in gts], dtype=np.float32)
# for img_id, gts in g_gt_jd.items()}
# all_gts_score = {img_id: np.array([0.9 for det in dets], dtype=np.float32)
# for img_id, dets in g_gt_jd.items()}
all_gts_centerwh = {img_id: np.array([get_center_w_h(*gt['bbox']) for gt in gts], dtype=np.float32) for
img_id, gts in g_gt_jd.items()}
all_gts_ignore = {img_id: np.array([gt['ignore'] for gt in gts], dtype=np.bool) for img_id, gts in
g_gt_jd.items()}
res = evaluate_in_multi_condition(all_dets_point, all_dets_score, all_gts_centerwh, all_gts_ignore,
self.matchThs, self.size_ranges, self.maxDets,
self.matcher, self.matcher_kwargs, self.evaluate_img_separate)
return res
def summarize(self, res, gt_jd, print_func=None):
if print_func is None:
print_func = print
try:
from pycocotools.coco import COCO
if isinstance(gt_jd, COCO):
gt_jd = gt_jd.dataset
except ModuleNotFoundError as e:
pass
assert isinstance(gt_jd, dict)
all_aps = []
all_ars = []
for cls_i, (single_class_res, category) in enumerate(zip(res, gt_jd['categories'])):
recalls = single_class_res['recall']
precisions = single_class_res['precision']
aps, ars = [], []
for recall, precision in zip(recalls, precisions):
ap = LocationEvaluator.get_AP_of_recall(recall, precision, recall_th=self.recThrs)
aps.append(ap)
ars.append(max(recall))
all_aps.append(aps)
all_ars.append(ars)
all_aps = np.array(all_aps)
all_ars = np.array(all_ars)
if len(all_aps) > 0:
mi = res[0]['match_th_idx']
si = res[0]['size_range_idx']
mdi = res[0]['maxDets_idx']
if self.class_wise:
raise NotImplementedError()
else:
all_aps = all_aps.mean(axis=0)
all_ars = all_ars.mean(axis=0)
print(mi)
for i, (ap, ar) in enumerate(zip(all_aps, all_ars)):
logs = "Location eval: (AP/AR) @[ dis={}\t| area={}\t| maxDets={}]\t= {}/{}".format(
self.matchThs[mi[i]], self.areaRngLbl[si[i]], self.maxDets[mdi[i]], '%.4f'% ap, '%.4f' % ar)
print_func(logs)
@staticmethod
def get_AP_of_recall(recall, precision, recall_th=None, DEBUG=False):
assert len(recall) == len(precision), ""
if recall_th is None:
recall_th = np.linspace(.0, 1.00, np.round((1.00 - .0) / .01) + 1, endpoint=True)
elif isinstance(recall_th, int):
recall_th = np.linspace(.0, 1.00, np.round((1.00 - .0) * recall_th) + 1, endpoint=True)
inds = np.searchsorted(recall, recall_th, side='left')
choose_precisions = [precision[pi] if pi < len(recall) else 0 for pi in inds]
if DEBUG:
print("choose_precisions", choose_precisions)
return np.sum(choose_precisions) / len(recall_th)
@staticmethod
def add_center_from_bbox_if_no_point(det_jd):
try:
from pycocotools.coco import COCO
if isinstance(det_jd, COCO):
for idx, det in det_jd.anns.items():
if 'point' not in det:
x, y, w, h = det['bbox']
det['point'] = [x + (w - 1) / 2, y + (h - 1) / 2]
det_jd.anns[idx] = det
return
except ModuleNotFoundError as e:
pass
assert isinstance(det_jd, list)
for det in det_jd:
if 'point' not in det:
x, y, w, h = det['bbox']
det['point'] = [x + (w - 1) / 2, y + (h - 1) / 2]
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('gt', help='gt file')
parser.add_argument('det', help='det result file')
parser.add_argument('--matchThs', default=[0.5, 1.0, 2.0], nargs='+', type=float)
parser.add_argument('--maxDets', default=[300], nargs='+', type=int)
parser.add_argument('--task', default=1, type=int)
parser.add_argument('--given-recall', default=[0.9], nargs='+', type=float, help='arg for task==2')
args = parser.parse_args()
if isinstance(args.matchThs, float):
args.matchThs = [args.matchThs]
# ############################### 1. normal evaluation
if args.task == 1:
location_kwargs = dict(
matcher_kwargs=dict(multi_match_not_false_alarm=False),
location_param=dict(
matchThs=args.matchThs, # [0.5, 1.0, 2.0],
recThrs='np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)',
maxDets=args.maxDets, # [300],
# recThrs='np.linspace(.90, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)',
# maxDets=[1000],
)
)
print(location_kwargs)
# '/home/ubuntu/dataset/visDrone/coco_fmt_annotations/VisDrone2018-DET-val-person.json'
# exp//latest_result.json
gt_file = args.gt # '/home/ubuntu/dataset/visDrone/coco_fmt_annotations/VisDrone2018-DET-val-person.json'
det_file = args.det # '/home/ubuntu/github/sparsercnn/outputs/locanet/visdroneperson_sparsercnn.res50.1000pro/' \
# '640_stridein3_ADAMW_1x/inference/coco_instances_results.json'
import json
gt_jd = json.load(open(gt_file))
det_jd = json.load(open(det_file))
LocationEvaluator.add_center_from_bbox_if_no_point(det_jd)
loc_evaluator = LocationEvaluator(**location_kwargs)
res = loc_evaluator(det_jd, gt_jd)
loc_evaluator.summarize(res, gt_jd)
# ############################################# 2. find score with given recall
elif args.task == 2:
location_kwargs = dict(
matcher_kwargs=dict(multi_match_not_false_alarm=False),
location_param=dict(
matchThs=args.matchThs, # [0.5, 1.0, 2.0],
recThrs='np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)',
maxDets=args.maxDets, # [300],
# recThrs='np.linspace(.90, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)',
# maxDets=[1000],
areaRng=[[1 ** 2, 1e5 ** 2]],
areaRngLbl=['all'],
)
)
LocationEvaluator.SAVE_RECALL_PRECISION_PATH = "/tmp/evaluation.npz"
print(location_kwargs)
# '/home/ubuntu/dataset/visDrone/coco_fmt_annotations/VisDrone2018-DET-val-person.json'
# exp//latest_result.json
gt_file = args.gt # '/home/ubuntu/dataset/visDrone/coco_fmt_annotations/VisDrone2018-DET-val-person.json'
det_file = args.det # '/home/ubuntu/github/sparsercnn/outputs/locanet/visdroneperson_sparsercnn.res50.1000pro/' \
# '640_stridein3_ADAMW_1x/inference/coco_instances_results.json'
import json
gt_jd = json.load(open(gt_file))
det_jd = json.load(open(det_file))
LocationEvaluator.add_center_from_bbox_if_no_point(det_jd)
loc_evaluator = LocationEvaluator(**location_kwargs)
res = loc_evaluator(det_jd, gt_jd)
loc_evaluator.summarize(res, gt_jd)
#
import matplotlib.pyplot as plt
d = np.load(LocationEvaluator.SAVE_RECALL_PRECISION_PATH)
dr = d['recall']
for given_recall in args.given_recall:
idx = np.arange(0, len(dr), 1)[dr >= given_recall][0]
print('recall, precision, score:', dr[idx], d['precision'][idx], d['dets_score'][idx])
# import sys
# # ########## small test1 begin #########################################################################
# S, D = [0.9, 0.89, 0.7], [(0.79, 0.7), (0.1, 0.2), (0.498, 0.498)]
# S, D = np.array(S), np.array(D)
# gts = np.array([(0.5, 0.5, 0.1, 0.2), (0.7, 0.7, 0.2, 0.1)])
# gts_ignore = np.array([False, False])
#
# ignore_gts = gts[gts_ignore]
# gts = gts[np.logical_not(gts_ignore)]
#
# # D = descend_sort_by_score(D)
# idx = np.argsort(-S)
# D = D[idx]
# S = S[idx]
#
# matcher = PointMatcher(LOG=sys.stdout)
# print('[test]: dis_th = 1, multi_match_not_false_alarm=False')
# M, ID, det_scores = matcher(D, S, gts, ignore_gts, 1, False)
# print("match_gt_id and is_ignore_det", M, D)
#
# matcher = PointMatcher(LOG=sys.stdout)
# print('[test]: dis_th = 5, multi_match_not_false_alarm=False')
# print("match_gt_id and is_ignore_det", matcher(D, S, gts, ignore_gts, 5, False))
#
# matcher = PointMatcher(LOG=sys.stdout)
# print('[test]: dis_th = 5, multi_match_not_false_alarm=True')
# print("match_gt_id and is_ignore_det", matcher(D, S, gts, ignore_gts, 5, True))
# # ########## small test1 over ############################################################################
#
# # all full test2: apply point evaluation detection
# root_dir = "/home/data/github/tiny_benchmark/tiny_benchmark/outputs/tiny_set/"
# # maskrcnn_benchmark format output of bbox detection
# det_file = root_dir + "FPN/baseline3_R101_cocov3_DA_t_s2.5x_a8/inference/" \
# "tiny_set_corner_sw640_sh512_test_all_coco/bbox_merge_nms0.5.json"
#
# data_root_dir = "/home/data/github/TinyObject/Tiny/add_dataset/_final_dataset/"
# gt_file = data_root_dir + "annotations/task/tiny_set_test_all.json"
# import json
#
# # from pycocotools.coco import COCO
# # gt_jd = COCO(gt_file)
# # det_jd = gt_jd.loadRes(det_file)
#
# gt_jd = json.load(open(gt_file))
# det_jd = json.load(open(det_file))
# LocationEvaluator.add_center_from_bbox_if_no_point(det_jd)
#
# MAX_SIZE = 1e9
# evaluator = LocationEvaluator(
# size_ranges=[(1, 20), (20, MAX_SIZE), (1, MAX_SIZE)],
# match_th_list=[0.5, 1.0, 2.0],
# multi_match_not_false_alarm=False
# )
#
# res = evaluator(det_jd, gt_jd)
# res = res[0]
#
# print(res.keys())
#
# # precision, recall = res['precision'][2], res['recall'][2]
# import matplotlib.pyplot as plt
#
# for i in range(len(res['precision'])):
# plt.plot(res['recall'][i], res['precision'][i], label="{},{},{}".format(
# res['size_range'][i], res['match_th'][i], np.mean(res['precision'][i]).round(3)))
# plt.legend()
# plt.show()
# # print(np.mean(precision))
| [
"numpy.savez",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.searchsorted",
"numpy.logical_not",
"numpy.argmax",
"numpy.logical_or",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"copy.deepcopy",
"numpy.load",
"numpy.round"
] | [((6488, 6511), 'numpy.argsort', 'np.argsort', (['(-dets_score)'], {}), '(-dets_score)\n', (6498, 6511), True, 'import numpy as np\n'), ((7116, 7138), 'numpy.array', 'np.array', (['final_recall'], {}), '(final_recall)\n', (7124, 7138), True, 'import numpy as np\n'), ((7155, 7179), 'numpy.array', 'np.array', (['final_precison'], {}), '(final_precison)\n', (7163, 7179), True, 'import numpy as np\n'), ((8790, 8880), 'numpy.concatenate', 'np.concatenate', (['[all_match_gts[img_id][all_dets_keep[img_id]] for img_id in images_id]'], {}), '([all_match_gts[img_id][all_dets_keep[img_id]] for img_id in\n images_id])\n', (8804, 8880), True, 'import numpy as np\n'), ((8901, 9000), 'numpy.concatenate', 'np.concatenate', (['[all_sorted_dets_scores[img_id][all_dets_keep[img_id]] for img_id in images_id]'], {}), '([all_sorted_dets_scores[img_id][all_dets_keep[img_id]] for\n img_id in images_id])\n', (8915, 9000), True, 'import numpy as np\n'), ((22383, 22399), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (22397, 22399), False, 'from argparse import ArgumentParser\n'), ((1638, 1664), 'numpy.array', 'np.array', (['([1] * V.shape[1])'], {}), '([1] * V.shape[1])\n', (1646, 1664), True, 'import numpy as np\n'), ((7253, 7382), 'numpy.savez', 'np.savez', (['LocationEvaluator.SAVE_RECALL_PRECISION_PATH'], {'recall': 'recall', 'precision': 'precision', 'dets_score': 'dets_score[chosen_idx]'}), '(LocationEvaluator.SAVE_RECALL_PRECISION_PATH, recall=recall,\n precision=precision, dets_score=dets_score[chosen_idx])\n', (7261, 7382), True, 'import numpy as np\n'), ((11850, 11874), 'copy.deepcopy', 'deepcopy', (['all_gts_ignore'], {}), '(all_gts_ignore)\n', (11858, 11874), False, 'from copy import deepcopy\n'), ((16195, 16217), 'numpy.array', 'np.array', (['self.recThrs'], {}), '(self.recThrs)\n', (16203, 16217), True, 'import numpy as np\n'), ((16303, 16393), 'numpy.array', 'np.array', (['[[min_area ** 0.5, max_area ** 0.5] for min_area, max_area in self.areaRng]'], {}), '([[min_area ** 0.5, max_area ** 0.5] for min_area, max_area in self\n .areaRng])\n', (16311, 16393), True, 'import numpy as np\n'), ((20137, 20154), 'numpy.array', 'np.array', (['all_aps'], {}), '(all_aps)\n', (20145, 20154), True, 'import numpy as np\n'), ((20173, 20190), 'numpy.array', 'np.array', (['all_ars'], {}), '(all_ars)\n', (20181, 20190), True, 'import numpy as np\n'), ((21307, 21354), 'numpy.searchsorted', 'np.searchsorted', (['recall', 'recall_th'], {'side': '"""left"""'}), "(recall, recall_th, side='left')\n", (21322, 21354), True, 'import numpy as np\n'), ((1754, 1779), 'numpy.argmax', 'np.argmax', (['(V[i, :] * keep)'], {}), '(V[i, :] * keep)\n', (1763, 1779), True, 'import numpy as np\n'), ((3635, 3653), 'numpy.argmax', 'np.argmax', (['V[i, :]'], {}), '(V[i, :])\n', (3644, 3653), True, 'import numpy as np\n'), ((8140, 8158), 'numpy.sum', 'np.sum', (['gts_ignore'], {}), '(gts_ignore)\n', (8146, 8158), True, 'import numpy as np\n'), ((8315, 8338), 'numpy.argsort', 'np.argsort', (['(-dets_score)'], {}), '(-dets_score)\n', (8325, 8338), True, 'import numpy as np\n'), ((8651, 8678), 'numpy.logical_not', 'np.logical_not', (['dets_ignore'], {}), '(dets_ignore)\n', (8665, 8678), True, 'import numpy as np\n'), ((9950, 9973), 'numpy.argsort', 'np.argsort', (['(-dets_score)'], {}), '(-dets_score)\n', (9960, 9973), True, 'import numpy as np\n'), ((10187, 10214), 'numpy.logical_not', 'np.logical_not', (['dets_ignore'], {}), '(dets_ignore)\n', (10201, 10214), True, 'import numpy as np\n'), ((12049, 12081), 'numpy.sqrt', 'np.sqrt', (['(gts[:, -1] * gts[:, -2])'], {}), '(gts[:, -1] * gts[:, -2])\n', (12056, 12081), True, 'import numpy as np\n'), ((17994, 18052), 'numpy.array', 'np.array', (["[det['point'] for det in dets]"], {'dtype': 'np.float32'}), "([det['point'] for det in dets], dtype=np.float32)\n", (18002, 18052), True, 'import numpy as np\n'), ((18151, 18209), 'numpy.array', 'np.array', (["[det['score'] for det in dets]"], {'dtype': 'np.float32'}), "([det['score'] for det in dets], dtype=np.float32)\n", (18159, 18209), True, 'import numpy as np\n'), ((18763, 18816), 'numpy.array', 'np.array', (["[gt['ignore'] for gt in gts]"], {'dtype': 'np.bool'}), "([gt['ignore'] for gt in gts], dtype=np.bool)\n", (18771, 18816), True, 'import numpy as np\n'), ((21532, 21557), 'numpy.sum', 'np.sum', (['choose_precisions'], {}), '(choose_precisions)\n', (21538, 21557), True, 'import numpy as np\n'), ((25965, 26018), 'numpy.load', 'np.load', (['LocationEvaluator.SAVE_RECALL_PRECISION_PATH'], {}), '(LocationEvaluator.SAVE_RECALL_PRECISION_PATH)\n', (25972, 26018), True, 'import numpy as np\n'), ((2299, 2325), 'numpy.array', 'np.array', (['([1] * V.shape[0])'], {}), '([1] * V.shape[0])\n', (2307, 2325), True, 'import numpy as np\n'), ((2361, 2387), 'numpy.array', 'np.array', (['([1] * V.shape[1])'], {}), '([1] * V.shape[1])\n', (2369, 2387), True, 'import numpy as np\n'), ((12107, 12157), 'numpy.logical_or', 'np.logical_or', (['(sizes >= max_size)', '(sizes < min_size)'], {}), '(sizes >= max_size, sizes < min_size)\n', (12120, 12157), True, 'import numpy as np\n'), ((8209, 8235), 'numpy.logical_not', 'np.logical_not', (['gts_ignore'], {}), '(gts_ignore)\n', (8223, 8235), True, 'import numpy as np\n'), ((9844, 9870), 'numpy.logical_not', 'np.logical_not', (['gts_ignore'], {}), '(gts_ignore)\n', (9858, 9870), True, 'import numpy as np\n'), ((10247, 10264), 'numpy.sum', 'np.sum', (['dets_keep'], {}), '(dets_keep)\n', (10253, 10264), True, 'import numpy as np\n'), ((10404, 10421), 'numpy.sum', 'np.sum', (['dets_keep'], {}), '(dets_keep)\n', (10410, 10421), True, 'import numpy as np\n'), ((15344, 15372), 'numpy.round', 'np.round', (['((1.0 - 0.0) / 0.01)'], {}), '((1.0 - 0.0) / 0.01)\n', (15352, 15372), True, 'import numpy as np\n'), ((21103, 21131), 'numpy.round', 'np.round', (['((1.0 - 0.0) / 0.01)'], {}), '((1.0 - 0.0) / 0.01)\n', (21111, 21131), True, 'import numpy as np\n'), ((10568, 10585), 'numpy.sum', 'np.sum', (['dets_keep'], {}), '(dets_keep)\n', (10574, 10585), True, 'import numpy as np\n'), ((21238, 21271), 'numpy.round', 'np.round', (['((1.0 - 0.0) * recall_th)'], {}), '((1.0 - 0.0) * recall_th)\n', (21246, 21271), True, 'import numpy as np\n'), ((10702, 10719), 'numpy.sum', 'np.sum', (['dets_keep'], {}), '(dets_keep)\n', (10708, 10719), True, 'import numpy as np\n')] |
import backtrader as bt
import pandas as pd
import numpy as np
class NetTradeStrategy(bt.Strategy):
params=(('p1',12),('p2',26),('p3',9),)
def __init__(self):
self.order = None
#获取MACD柱
self.macdhist = bt.ind.MACDHisto(self.data,
period_me1=self.p.p1,
period_me2=self.p.p2,
period_signal=self.p.p3)
# bt.ind.MACD(self.data)
# bt.ind.MACDHisto(self.data)
# bt.ind.RSI(self.data,period=14)
# bt.ind.BBands(self.data)
self.highest = bt.indicators.Highest(self.data.high, period=650, subplot=False)
self.lowest = bt.indicators.Lowest(self.data.low, period=650, subplot=False)
mid = (self.highest + self.lowest)/2
perc_levels = [x for x in np.arange(
1 + 0.005 * 5, 1 - 0.005 * 5 - 0.005/2, -0.005)]
self.price_levels = [mid * x for x in perc_levels]
self.last_price_index = None
for i in range(len(self.price_levels)):
print(i)
print(self.price_levels[i] + 0)
def next(self):
if self.last_price_index == None:
for i in range(len(self.price_levels)):
if self.data.close > self.price_levels[i]:
self.last_price_index = i
self.order_target_percent(
target=i/(len(self.price_levels) - 1))
return
else:
signal = False
while True:
upper = None
lower = None
if self.last_price_index > 0:
upper = self.price_levels[self.last_price_index - 1]
if self.last_price_index < len(self.price_levels) - 1:
lower = self.price_levels[self.last_price_index + 1]
# 还不是最轻仓,继续涨,就再卖一档
if upper != None and self.data.close > upper:
self.last_price_index = self.last_price_index - 1
signal = True
continue
# 还不是最重仓,继续跌,再买一档
if lower != None and self.data.close < lower:
self.last_price_index = self.last_price_index + 1
signal = True
continue
break
if signal:
self.long_short = None
self.order_target_percent(
target=self.last_price_index/(len(self.price_levels) - 1)) | [
"backtrader.indicators.Highest",
"backtrader.ind.MACDHisto",
"numpy.arange",
"backtrader.indicators.Lowest"
] | [((236, 336), 'backtrader.ind.MACDHisto', 'bt.ind.MACDHisto', (['self.data'], {'period_me1': 'self.p.p1', 'period_me2': 'self.p.p2', 'period_signal': 'self.p.p3'}), '(self.data, period_me1=self.p.p1, period_me2=self.p.p2,\n period_signal=self.p.p3)\n', (252, 336), True, 'import backtrader as bt\n'), ((578, 642), 'backtrader.indicators.Highest', 'bt.indicators.Highest', (['self.data.high'], {'period': '(650)', 'subplot': '(False)'}), '(self.data.high, period=650, subplot=False)\n', (599, 642), True, 'import backtrader as bt\n'), ((665, 727), 'backtrader.indicators.Lowest', 'bt.indicators.Lowest', (['self.data.low'], {'period': '(650)', 'subplot': '(False)'}), '(self.data.low, period=650, subplot=False)\n', (685, 727), True, 'import backtrader as bt\n'), ((807, 866), 'numpy.arange', 'np.arange', (['(1 + 0.005 * 5)', '(1 - 0.005 * 5 - 0.005 / 2)', '(-0.005)'], {}), '(1 + 0.005 * 5, 1 - 0.005 * 5 - 0.005 / 2, -0.005)\n', (816, 866), True, 'import numpy as np\n')] |
from rb.core.lang import Lang
from rb.core.document import Document
from rb.complexity.complexity_index import ComplexityIndex, compute_indices
from rb.complexity.cohesion.adj_cohesion import AdjCohesion
from rb.similarity.word2vec import Word2Vec
from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel
from rb.similarity.vector_model_factory import VECTOR_MODELS, create_vector_model
from rb.cna.cna_graph import CnaGraph
from typing import Tuple, List
from sklearn.svm import SVR
import pickle
import os
import csv
from rb.utils.rblogger import Logger
from typing import List, Tuple
import numpy as np
logger = Logger.get_logger()
class Fluctuations:
def __init__(self):
pass
def get_vector_model(self, lang: Lang = Lang.RO) -> VectorModel:
global logger
if lang is Lang.RO:
vector_model = create_vector_model(Lang.RO, VectorModelType.from_str('word2vec'), "readme")
elif lang is Lang.EN:
vector_model = create_vector_model(Lang.EN, VectorModelType.from_str("word2vec"), "coca")
else:
logger.error(f'Language {lang.value} is not supported for fluctuations task')
vector_model = None
return vector_model
def compute_thresholds(self, values: List[float]) -> Tuple[int, int]:
if len(values) > 1:
stdev = np.std(values)
avg = np.mean(values)
elif len(values) == 1:
avg = values[0]
stdev = 1
else:
avg = -1
stdev = -1
return (max(0, avg + 2.0 * stdev), max(0, avg - 2.0 * stdev))
def compute_indices(self, text: str, lang: Lang) -> List[List]:
doc = Document(lang=lang, text=text)
vector_model = self.get_vector_model(lang=lang)
cna_graph = CnaGraph(docs=doc, models=[vector_model])
compute_indices(doc=doc, cna_graph=cna_graph)
indices_sent = {
'AvgSentUnqPOSMain_noun':
{Lang.RO: 'Numărul de substantive unice per propoziție',
Lang.EN: 'Number of unique nouns per sentence'},
'AvgSentUnqPOSMain_verb':
{Lang.RO: 'Numărul de verbe unice per propoziție',
Lang.EN: 'Number of unique verbs per sentence'},
'AvgSentUnqPOSMain_adj':
{Lang.RO: 'Numărul de adjective unice per propoziție',
Lang.EN: 'Number of unique adjectives per sentence'},
'AvgSentUnqPOSMain_adv':
{Lang.RO: 'Numărului de adverbe unice per propoziție',
Lang.EN: 'Number of unique adverbs per sentence'},
'AdjExtCoh_SENT':
{Lang.RO: 'Coeziunea propoziției curente cu propozițiile vecine',
Lang.EN: 'Cohesion of the current sentence with its neighbouring sentences'}
}
indices_block = {
'AvgSentUnqPOSMain_noun':
{Lang.RO: 'Media numărului de substantive unice per frază',
Lang.EN: 'Average of the number of unique nouns per paragraph'},
'AvgSentUnqPOSMain_verb':
{Lang.RO: 'Media numărului de verbe unice per frază',
Lang.EN: 'Average of the number of unique verbs per paragraph'},
'AvgSentUnqPOSMain_adj':
{Lang.RO: 'Media numărului de adjective unice per frază',
Lang.EN: 'Average of the number of unique adjectives per paragraph'},
'AvgSentUnqPOSMain_adv':
{Lang.RO: 'Media numărului de adverbe unice per frază',
Lang.EN: 'Average of the number of unique adverbs per paragraph'},
'AdjExtCoh_BLOCK':
{Lang.RO: 'Coeziunea paragrafului curent cu paragrafele vecine',
Lang.EN: 'Cohesion of the current paragraph with its neighbouring paragraphs'}
}
result = []
for ind_sent, _ in indices_sent.items():
d = {'index': ind_sent, 'index_description': indices_sent[ind_sent][lang],
'level': 'sentence', 'values': [], 'text': []}
for sent in doc.get_sentences():
for key, v in sent.indices.items():
if repr(key) == ind_sent:
d['values'].append(v)
d['text'].append(sent.text)
maxt, mint = self.compute_thresholds(d['values'])
d['threshold'] = {
'min': str(mint),
'max': str(maxt)
}
for i, v in enumerate(d['values']):
d['values'][i] = str(v)
result.append(d)
for ind_block, _ in indices_block.items():
d = {'index': ind_block, 'index_description': indices_block[ind_block][lang],
'level': 'paragraph', 'values': [], 'text': []}
for block in doc.get_blocks():
for key, v in block.indices.items():
if repr(key) == ind_block:
d['values'].append(v)
d['text'].append(block.text)
maxt, mint = self.compute_thresholds(d['values'])
d['threshold'] = {
'min': str(mint),
'max': str(maxt)
}
for i, v in enumerate(d['values']):
d['values'][i] = str(v)
result.append(d)
return result | [
"numpy.mean",
"rb.utils.rblogger.Logger.get_logger",
"rb.similarity.vector_model.VectorModelType.from_str",
"rb.complexity.complexity_index.compute_indices",
"numpy.std",
"rb.core.document.Document",
"rb.cna.cna_graph.CnaGraph"
] | [((641, 660), 'rb.utils.rblogger.Logger.get_logger', 'Logger.get_logger', ([], {}), '()\n', (658, 660), False, 'from rb.utils.rblogger import Logger\n'), ((1710, 1740), 'rb.core.document.Document', 'Document', ([], {'lang': 'lang', 'text': 'text'}), '(lang=lang, text=text)\n', (1718, 1740), False, 'from rb.core.document import Document\n'), ((1817, 1858), 'rb.cna.cna_graph.CnaGraph', 'CnaGraph', ([], {'docs': 'doc', 'models': '[vector_model]'}), '(docs=doc, models=[vector_model])\n', (1825, 1858), False, 'from rb.cna.cna_graph import CnaGraph\n'), ((1867, 1912), 'rb.complexity.complexity_index.compute_indices', 'compute_indices', ([], {'doc': 'doc', 'cna_graph': 'cna_graph'}), '(doc=doc, cna_graph=cna_graph)\n', (1882, 1912), False, 'from rb.complexity.complexity_index import ComplexityIndex, compute_indices\n'), ((1368, 1382), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (1374, 1382), True, 'import numpy as np\n'), ((1401, 1416), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (1408, 1416), True, 'import numpy as np\n'), ((901, 937), 'rb.similarity.vector_model.VectorModelType.from_str', 'VectorModelType.from_str', (['"""word2vec"""'], {}), "('word2vec')\n", (925, 937), False, 'from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel\n'), ((1035, 1071), 'rb.similarity.vector_model.VectorModelType.from_str', 'VectorModelType.from_str', (['"""word2vec"""'], {}), "('word2vec')\n", (1059, 1071), False, 'from rb.similarity.vector_model import VectorModelType, CorporaEnum, VectorModel\n')] |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
# Copyright 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was modified by <NAME> in 2021
"""Tests for utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from disentanglement_lib.evaluation.metrics import utils
from disentanglement_lib.data.ground_truth import dummy_data
import numpy as np
class UtilsTest(absltest.TestCase):
def test_histogram_discretizer(self):
# Input of 2D samples.
target = np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
[0.6, .5, .4, .3, .2, .1]])
result = utils._histogram_discretize(target, num_bins=3)
shouldbe = np.array([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])
np.testing.assert_array_equal(result, shouldbe)
def test_discrete_entropy(self):
target = np.array([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])
result = utils.discrete_entropy(target)
shouldbe = np.log(3)
np.testing.assert_allclose(result, [shouldbe, shouldbe])
def test_discrete_mutual_info(self):
xs = np.array([[1, 2, 1, 2], [1, 1, 2, 2]])
ys = np.array([[1, 2, 1, 2], [2, 2, 1, 1]])
result = utils.discrete_mutual_info(xs, ys)
shouldbe = np.array([[np.log(2), 0.], [0., np.log(2)]])
np.testing.assert_allclose(result, shouldbe)
def test_split_train_test(self):
xs = np.zeros([10, 100])
xs_train, xs_test = utils.split_train_test(xs, 0.9)
shouldbe_train = np.zeros([10, 90])
shouldbe_test = np.zeros([10, 10])
np.testing.assert_allclose(xs_train, shouldbe_train)
np.testing.assert_allclose(xs_test, shouldbe_test)
def test_local_sample_factors(self):
random_state = np.random.RandomState(3)
# sample range of 10% of num_factors
factor_num_values = [1, 9, 10, 11, 100, 101]
factor_centroid = np.array([0, 4, 9, 3, 10, 10])
samps = utils.local_sample_factors(1000, 0.1,
factor_num_values, factor_centroid, 0, random_state)
np.testing.assert_equal(samps.shape, (1000, 6))
self.assertTrue(np.all(samps[:,0] == 0))
# should all have the same value, since 0.1 * 9 < 1
self.assertTrue(np.max(samps[:,1]) - np.min(samps[:,1]) == 0)
# should have diameter of 2 for both these
for inx in [2,3]:
assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1)
# should have diameter of 20 for both these
for inx in [4,5]:
assert_correct_radius(self, samps[:,inx], 10, 0, factor_num_values[inx]-1)
# same experiment, but now we don't consider any factor
# with numfactors less than 11 to count as continuous (so 10 should now also
# return all same values)
# sample range of 10% of num_factors
factor_num_values = [1, 9, 10, 11, 100, 110]
samps = utils.local_sample_factors(1000, 0.15,
factor_num_values, factor_centroid, 11, random_state)
np.testing.assert_equal(samps.shape, (1000, 6))
self.assertTrue(np.all(samps[:,0] == 0))
# should all have the same value
for inx in [1,2]:
self.assertTrue(np.max(samps[:,inx]) - np.min(samps[:,inx]) == 0)
# should have radius 1 for this, since floor(0.15 * 11) = 1
for inx in [3]:
assert_correct_radius(self, samps[:,inx], 1, 0, factor_num_values[inx]-1)
# should have diameter of 20 for both these
for inx in [4]:
assert_correct_radius(self, samps[:,inx], 15, 0, factor_num_values[inx]-1)
for inx in [5]:
assert_correct_radius(self, samps[:,inx], 16, 0, factor_num_values[inx]-1)
def test_sample_integers_around_center(self):
random_state = np.random.RandomState(3)
for i in range(20):
sample = utils.sample_integers_around_center(5, 3, 0, 10, 100, random_state)
self.assertTrue(np.all(sample <= 8))
self.assertTrue(np.all(sample >= 2))
self.assertTrue(np.any(sample > 6))
self.assertTrue(np.any(sample < 4))
for i in range(20):
sample = utils.sample_integers_around_center(5, 3, 4, 6, 100, random_state)
self.assertTrue(np.all(sample <= 6))
self.assertTrue(np.all(sample >= 4))
sample = utils.sample_integers_around_center(5, 0, 4, 6, 100, random_state)
self.assertTrue(np.all(sample == 5))
self.assertTrue(len(sample) == 100)
self.assertTrue(sample.dtype == np.int32)
def test_generate_batch_factor_code(self):
ground_truth_data = dummy_data.IdentityObservationsData()
representation_function = lambda x: np.array(x, dtype=np.float64)
num_points = 100
random_state = np.random.RandomState(3)
batch_size = 192
represents, factors = utils.generate_batch_factor_code(ground_truth_data,
representation_function, num_points, random_state, batch_size)
# representation is identity
for batch in [represents, factors]:
np.testing.assert_equal(batch.shape, [10, num_points])
for inx in range(10):
self.assertEqual(np.min(batch[inx,:]), 0)
self.assertEqual(np.max(batch[inx,:]), 10 - 1)
# just for debugging
#def test_print_sample(self):
# ground_truth_data = dummy_data.IdentityObservationsCustomSize([100] * 10)
# representation_function = lambda x: np.array(x % 50, dtype=np.float64)
# num_points = 10
# random_state = np.random.RandomState(3)
# batch_size = 192
# local_repr, local_facts = utils.generate_local_batch_factor_code(ground_truth_data,
# representation_function, num_points, random_state, batch_size,
# locality_proportion=1.0, continuity_cutoff=0.0)
# print(local_repr)
# print(local_facts)
def test_generate_local_batch_factor_code(self):
ground_truth_data = dummy_data.IdentityObservationsData()
representation_function = lambda x: np.array(x, dtype=np.float64)
num_points = 100
random_state = np.random.RandomState(3)
# you gotta test batch size smaller than num_points, silly
batch_size = 13
local_repr, local_facts = utils.generate_local_batch_factor_code(ground_truth_data,
representation_function, num_points, random_state, batch_size,
locality_proportion=1.0, continuity_cutoff=0.0)
for local_batch in [local_repr, local_facts]:
np.testing.assert_equal(local_batch.shape, [10,num_points])
for inx in range(10):
self.assertEqual(np.min(local_batch[inx,:]), 0)
self.assertEqual(np.max(local_batch[inx,:]), 10 - 1)
local_repr, local_facts = utils.generate_local_batch_factor_code(ground_truth_data,
representation_function, num_points, random_state, batch_size,
locality_proportion=0.1, continuity_cutoff=0.0)
# representation is identity
for local_batch in [local_repr, local_facts]:
np.testing.assert_equal(local_batch.shape, [10, num_points])
for inx in range(10):
assert_correct_radius(self, local_batch[inx,:], 1, 0, 10-1)
# used in the sampling test
# samples should span the full 2 * radius unless they hit an upper/lower bound
def assert_correct_radius(tester, array, radius, lowbound, upbound):
minval = np.min(array)
maxval = np.max(array)
if minval == lowbound or maxval == upbound:
tester.assertTrue(maxval - minval <= 2 * radius)
else:
tester.assertEqual(maxval - minval, 2 * radius)
if __name__ == '__main__':
absltest.main()
| [
"numpy.testing.assert_equal",
"disentanglement_lib.evaluation.metrics.utils.local_sample_factors",
"numpy.log",
"disentanglement_lib.evaluation.metrics.utils.discrete_entropy",
"numpy.array",
"disentanglement_lib.evaluation.metrics.utils.sample_integers_around_center",
"numpy.random.RandomState",
"num... | [((7613, 7626), 'numpy.min', 'np.min', (['array'], {}), '(array)\n', (7619, 7626), True, 'import numpy as np\n'), ((7638, 7651), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (7644, 7651), True, 'import numpy as np\n'), ((7842, 7857), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (7855, 7857), False, 'from absl.testing import absltest\n'), ((1149, 1223), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]]'], {}), '([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], [0.6, 0.5, 0.4, 0.3, 0.2, 0.1]])\n', (1157, 1223), True, 'import numpy as np\n'), ((1255, 1302), 'disentanglement_lib.evaluation.metrics.utils._histogram_discretize', 'utils._histogram_discretize', (['target'], {'num_bins': '(3)'}), '(target, num_bins=3)\n', (1282, 1302), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((1318, 1368), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]]'], {}), '([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])\n', (1326, 1368), True, 'import numpy as np\n'), ((1373, 1420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'shouldbe'], {}), '(result, shouldbe)\n', (1402, 1420), True, 'import numpy as np\n'), ((1470, 1520), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]]'], {}), '([[1, 1, 2, 2, 3, 3], [3, 3, 2, 2, 1, 1]])\n', (1478, 1520), True, 'import numpy as np\n'), ((1534, 1564), 'disentanglement_lib.evaluation.metrics.utils.discrete_entropy', 'utils.discrete_entropy', (['target'], {}), '(target)\n', (1556, 1564), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((1580, 1589), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (1586, 1589), True, 'import numpy as np\n'), ((1594, 1650), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', '[shouldbe, shouldbe]'], {}), '(result, [shouldbe, shouldbe])\n', (1620, 1650), True, 'import numpy as np\n'), ((1700, 1738), 'numpy.array', 'np.array', (['[[1, 2, 1, 2], [1, 1, 2, 2]]'], {}), '([[1, 2, 1, 2], [1, 1, 2, 2]])\n', (1708, 1738), True, 'import numpy as np\n'), ((1748, 1786), 'numpy.array', 'np.array', (['[[1, 2, 1, 2], [2, 2, 1, 1]]'], {}), '([[1, 2, 1, 2], [2, 2, 1, 1]])\n', (1756, 1786), True, 'import numpy as np\n'), ((1800, 1834), 'disentanglement_lib.evaluation.metrics.utils.discrete_mutual_info', 'utils.discrete_mutual_info', (['xs', 'ys'], {}), '(xs, ys)\n', (1826, 1834), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((1899, 1943), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'shouldbe'], {}), '(result, shouldbe)\n', (1925, 1943), True, 'import numpy as np\n'), ((1989, 2008), 'numpy.zeros', 'np.zeros', (['[10, 100]'], {}), '([10, 100])\n', (1997, 2008), True, 'import numpy as np\n'), ((2033, 2064), 'disentanglement_lib.evaluation.metrics.utils.split_train_test', 'utils.split_train_test', (['xs', '(0.9)'], {}), '(xs, 0.9)\n', (2055, 2064), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((2086, 2104), 'numpy.zeros', 'np.zeros', (['[10, 90]'], {}), '([10, 90])\n', (2094, 2104), True, 'import numpy as np\n'), ((2125, 2143), 'numpy.zeros', 'np.zeros', (['[10, 10]'], {}), '([10, 10])\n', (2133, 2143), True, 'import numpy as np\n'), ((2148, 2200), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['xs_train', 'shouldbe_train'], {}), '(xs_train, shouldbe_train)\n', (2174, 2200), True, 'import numpy as np\n'), ((2205, 2255), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['xs_test', 'shouldbe_test'], {}), '(xs_test, shouldbe_test)\n', (2231, 2255), True, 'import numpy as np\n'), ((2315, 2339), 'numpy.random.RandomState', 'np.random.RandomState', (['(3)'], {}), '(3)\n', (2336, 2339), True, 'import numpy as np\n'), ((2452, 2482), 'numpy.array', 'np.array', (['[0, 4, 9, 3, 10, 10]'], {}), '([0, 4, 9, 3, 10, 10])\n', (2460, 2482), True, 'import numpy as np\n'), ((2495, 2589), 'disentanglement_lib.evaluation.metrics.utils.local_sample_factors', 'utils.local_sample_factors', (['(1000)', '(0.1)', 'factor_num_values', 'factor_centroid', '(0)', 'random_state'], {}), '(1000, 0.1, factor_num_values, factor_centroid, 0,\n random_state)\n', (2521, 2589), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((2599, 2646), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['samps.shape', '(1000, 6)'], {}), '(samps.shape, (1000, 6))\n', (2622, 2646), True, 'import numpy as np\n'), ((3388, 3485), 'disentanglement_lib.evaluation.metrics.utils.local_sample_factors', 'utils.local_sample_factors', (['(1000)', '(0.15)', 'factor_num_values', 'factor_centroid', '(11)', 'random_state'], {}), '(1000, 0.15, factor_num_values, factor_centroid, \n 11, random_state)\n', (3414, 3485), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((3494, 3541), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['samps.shape', '(1000, 6)'], {}), '(samps.shape, (1000, 6))\n', (3517, 3541), True, 'import numpy as np\n'), ((4200, 4224), 'numpy.random.RandomState', 'np.random.RandomState', (['(3)'], {}), '(3)\n', (4221, 4224), True, 'import numpy as np\n'), ((4707, 4773), 'disentanglement_lib.evaluation.metrics.utils.sample_integers_around_center', 'utils.sample_integers_around_center', (['(5)', '(0)', '(4)', '(6)', '(100)', 'random_state'], {}), '(5, 0, 4, 6, 100, random_state)\n', (4742, 4773), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((4971, 5008), 'disentanglement_lib.data.ground_truth.dummy_data.IdentityObservationsData', 'dummy_data.IdentityObservationsData', ([], {}), '()\n', (5006, 5008), False, 'from disentanglement_lib.data.ground_truth import dummy_data\n'), ((5119, 5143), 'numpy.random.RandomState', 'np.random.RandomState', (['(3)'], {}), '(3)\n', (5140, 5143), True, 'import numpy as np\n'), ((5191, 5309), 'disentanglement_lib.evaluation.metrics.utils.generate_batch_factor_code', 'utils.generate_batch_factor_code', (['ground_truth_data', 'representation_function', 'num_points', 'random_state', 'batch_size'], {}), '(ground_truth_data, representation_function,\n num_points, random_state, batch_size)\n', (5223, 5309), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((6223, 6260), 'disentanglement_lib.data.ground_truth.dummy_data.IdentityObservationsData', 'dummy_data.IdentityObservationsData', ([], {}), '()\n', (6258, 6260), False, 'from disentanglement_lib.data.ground_truth import dummy_data\n'), ((6371, 6395), 'numpy.random.RandomState', 'np.random.RandomState', (['(3)'], {}), '(3)\n', (6392, 6395), True, 'import numpy as np\n'), ((6510, 6686), 'disentanglement_lib.evaluation.metrics.utils.generate_local_batch_factor_code', 'utils.generate_local_batch_factor_code', (['ground_truth_data', 'representation_function', 'num_points', 'random_state', 'batch_size'], {'locality_proportion': '(1.0)', 'continuity_cutoff': '(0.0)'}), '(ground_truth_data,\n representation_function, num_points, random_state, batch_size,\n locality_proportion=1.0, continuity_cutoff=0.0)\n', (6548, 6686), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((6987, 7163), 'disentanglement_lib.evaluation.metrics.utils.generate_local_batch_factor_code', 'utils.generate_local_batch_factor_code', (['ground_truth_data', 'representation_function', 'num_points', 'random_state', 'batch_size'], {'locality_proportion': '(0.1)', 'continuity_cutoff': '(0.0)'}), '(ground_truth_data,\n representation_function, num_points, random_state, batch_size,\n locality_proportion=0.1, continuity_cutoff=0.0)\n', (7025, 7163), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((2667, 2691), 'numpy.all', 'np.all', (['(samps[:, 0] == 0)'], {}), '(samps[:, 0] == 0)\n', (2673, 2691), True, 'import numpy as np\n'), ((3562, 3586), 'numpy.all', 'np.all', (['(samps[:, 0] == 0)'], {}), '(samps[:, 0] == 0)\n', (3568, 3586), True, 'import numpy as np\n'), ((4264, 4331), 'disentanglement_lib.evaluation.metrics.utils.sample_integers_around_center', 'utils.sample_integers_around_center', (['(5)', '(3)', '(0)', '(10)', '(100)', 'random_state'], {}), '(5, 3, 0, 10, 100, random_state)\n', (4299, 4331), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((4541, 4607), 'disentanglement_lib.evaluation.metrics.utils.sample_integers_around_center', 'utils.sample_integers_around_center', (['(5)', '(3)', '(4)', '(6)', '(100)', 'random_state'], {}), '(5, 3, 4, 6, 100, random_state)\n', (4576, 4607), False, 'from disentanglement_lib.evaluation.metrics import utils\n'), ((4794, 4813), 'numpy.all', 'np.all', (['(sample == 5)'], {}), '(sample == 5)\n', (4800, 4813), True, 'import numpy as np\n'), ((5049, 5078), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (5057, 5078), True, 'import numpy as np\n'), ((5394, 5448), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['batch.shape', '[10, num_points]'], {}), '(batch.shape, [10, num_points])\n', (5417, 5448), True, 'import numpy as np\n'), ((6301, 6330), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (6309, 6330), True, 'import numpy as np\n'), ((6752, 6812), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['local_batch.shape', '[10, num_points]'], {}), '(local_batch.shape, [10, num_points])\n', (6775, 6812), True, 'import numpy as np\n'), ((7262, 7322), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['local_batch.shape', '[10, num_points]'], {}), '(local_batch.shape, [10, num_points])\n', (7285, 7322), True, 'import numpy as np\n'), ((4354, 4373), 'numpy.all', 'np.all', (['(sample <= 8)'], {}), '(sample <= 8)\n', (4360, 4373), True, 'import numpy as np\n'), ((4397, 4416), 'numpy.all', 'np.all', (['(sample >= 2)'], {}), '(sample >= 2)\n', (4403, 4416), True, 'import numpy as np\n'), ((4440, 4458), 'numpy.any', 'np.any', (['(sample > 6)'], {}), '(sample > 6)\n', (4446, 4458), True, 'import numpy as np\n'), ((4482, 4500), 'numpy.any', 'np.any', (['(sample < 4)'], {}), '(sample < 4)\n', (4488, 4500), True, 'import numpy as np\n'), ((4630, 4649), 'numpy.all', 'np.all', (['(sample <= 6)'], {}), '(sample <= 6)\n', (4636, 4649), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.all', 'np.all', (['(sample >= 4)'], {}), '(sample >= 4)\n', (4679, 4692), True, 'import numpy as np\n'), ((1861, 1870), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1867, 1870), True, 'import numpy as np\n'), ((1882, 1891), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1888, 1891), True, 'import numpy as np\n'), ((2768, 2787), 'numpy.max', 'np.max', (['samps[:, 1]'], {}), '(samps[:, 1])\n', (2774, 2787), True, 'import numpy as np\n'), ((2789, 2808), 'numpy.min', 'np.min', (['samps[:, 1]'], {}), '(samps[:, 1])\n', (2795, 2808), True, 'import numpy as np\n'), ((5502, 5523), 'numpy.min', 'np.min', (['batch[inx, :]'], {}), '(batch[inx, :])\n', (5508, 5523), True, 'import numpy as np\n'), ((5552, 5573), 'numpy.max', 'np.max', (['batch[inx, :]'], {}), '(batch[inx, :])\n', (5558, 5573), True, 'import numpy as np\n'), ((6865, 6892), 'numpy.min', 'np.min', (['local_batch[inx, :]'], {}), '(local_batch[inx, :])\n', (6871, 6892), True, 'import numpy as np\n'), ((6921, 6948), 'numpy.max', 'np.max', (['local_batch[inx, :]'], {}), '(local_batch[inx, :])\n', (6927, 6948), True, 'import numpy as np\n'), ((3668, 3689), 'numpy.max', 'np.max', (['samps[:, inx]'], {}), '(samps[:, inx])\n', (3674, 3689), True, 'import numpy as np\n'), ((3691, 3712), 'numpy.min', 'np.min', (['samps[:, inx]'], {}), '(samps[:, inx])\n', (3697, 3712), True, 'import numpy as np\n')] |
import pandas as pn
import matplotlib.pyplot as plt
import numpy as np
import sys
reg = pn.read_csv(sys.argv[1])
reg.plot.scatter(x = 'x', y = 'y', title = "Scatter plot-python")
plt.savefig('scatter-python.png')
xdata=reg['x']
ydata=reg['y']
plt.plot(xdata, ydata, 'o', color='blue')
m, b =np.polyfit(xdata, ydata, 1)
plt.plot(xdata,m*xdata+b)
plt.savefig('linearmodeling-py.png')
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.polyfit"
] | [((90, 114), 'pandas.read_csv', 'pn.read_csv', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (101, 114), True, 'import pandas as pn\n'), ((181, 214), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""scatter-python.png"""'], {}), "('scatter-python.png')\n", (192, 214), True, 'import matplotlib.pyplot as plt\n'), ((246, 287), 'matplotlib.pyplot.plot', 'plt.plot', (['xdata', 'ydata', '"""o"""'], {'color': '"""blue"""'}), "(xdata, ydata, 'o', color='blue')\n", (254, 287), True, 'import matplotlib.pyplot as plt\n'), ((294, 321), 'numpy.polyfit', 'np.polyfit', (['xdata', 'ydata', '(1)'], {}), '(xdata, ydata, 1)\n', (304, 321), True, 'import numpy as np\n'), ((322, 352), 'matplotlib.pyplot.plot', 'plt.plot', (['xdata', '(m * xdata + b)'], {}), '(xdata, m * xdata + b)\n', (330, 352), True, 'import matplotlib.pyplot as plt\n'), ((348, 384), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""linearmodeling-py.png"""'], {}), "('linearmodeling-py.png')\n", (359, 384), True, 'import matplotlib.pyplot as plt\n')] |
import os
import random
import argparse
import numpy as np
import pickle
from sklearn import metrics
from loader import DataLoader
from trainer import GCNTrainer
from utils import helper
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='Restaurants')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--post_dim', type=int, default=30, help='Position embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='Pos embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=300, help='GCN mem dim.')
parser.add_argument('--rnn_hidden', type=int, default=300, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of GCN layers.')
parser.add_argument('--num_class', type=int, default=3, help='Num of sentiment class.')
parser.add_argument('--input_dropout', type=float, default=0, help='Input dropout rate.')
parser.add_argument('--gcn_dropout', type=float, default=0., help='GCN layer dropout rate.')
parser.add_argument('--lower', default=True, help='Lowercase all words.')
parser.add_argument('--direct', default=False)
parser.add_argument('--loop', default=True)
parser.add_argument('--lr', type=float, default=0.01, help='learning rate.')
parser.add_argument('--l2reg', type=float, default=1e-5, help='l2 .')
# parser.add_argument('--num_epoch', type=int, default=100, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=32, help='Training batch size.')
parser.add_argument('--save_dir', type=str, default='./saved_models/best_model_rest_max_add_loss_83.98.pt', help='Root dir for saving models.')
parser.add_argument('--head_num', default=3, type=int, help='head_num must be a multiple of 3')
parser.add_argument('--top_k', default=2, type=int)
parser.add_argument('--beta', default=1.0, type=float)
parser.add_argument('--theta', default=1.0, type=float)
parser.add_argument('--second_layer', default='max', type=str)
parser.add_argument('--DEVICE', default='cuda:0', type=str)
args = parser.parse_args()
# load contants
dicts = eval(open('./dataset/'+args.dataset+'/constant.py', 'r').read())
vocab_file = './dataset/'+args.dataset+'/vocab.pkl'
token_vocab = dict()
with open(vocab_file, 'rb') as infile:
token_vocab['i2w'] = pickle.load(infile)
token_vocab['w2i'] = {token_vocab['i2w'][i]:i for i in range(len(token_vocab['i2w']))}
emb_file = './dataset/'+args.dataset+'/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == len(token_vocab['i2w'])
assert emb_matrix.shape[1] == args.emb_dim
args.token_vocab_size = len(token_vocab['i2w'])
args.post_vocab_size = len(dicts['post'])
args.pos_vocab_size = len(dicts['pos'])
dicts['token'] = token_vocab['w2i']
# load training set and test set
print("Loading data from {} with batch size {}...".format(args.dataset, args.batch_size))
test_batch = DataLoader('./dataset/'+args.dataset+'/test.json', args.batch_size, args, dicts)
# create the model
trainer = GCNTrainer(args, emb_matrix=emb_matrix)
print("Loading model from {}".format(args.save_dir))
DEVICE_ID = 0
DEVICE = torch.device(args.DEVICE if torch.cuda.is_available() else 'cpu')
mdict = torch.load(args.save_dir, map_location=DEVICE)
print(mdict['config'])
model_dict = trainer.model.state_dict()
pretrained_dict = {k: v for k, v in mdict['model'].items() if k in model_dict}
model_dict.update(pretrained_dict)
trainer.model.load_state_dict(model_dict)
print("Evaluating...")
predictions, labels = [], []
test_loss, test_acc, test_step = 0., 0., 0
for i, batch in enumerate(test_batch):
loss, acc, pred, label, _, _ = trainer.predict(batch)
test_loss += loss
test_acc += acc
predictions += pred
labels += label
test_step += 1
f1_score = metrics.f1_score(labels, predictions, average='macro')
print("test_loss: {}, test_acc: {}, f1_score: {}".format( \
test_loss/test_step, \
test_acc/test_step, \
f1_score))
| [
"sklearn.metrics.f1_score",
"argparse.ArgumentParser",
"torch.load",
"pickle.load",
"trainer.GCNTrainer",
"torch.cuda.is_available",
"loader.DataLoader",
"numpy.load"
] | [((210, 235), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (233, 235), False, 'import argparse\n'), ((2558, 2575), 'numpy.load', 'np.load', (['emb_file'], {}), '(emb_file)\n', (2565, 2575), True, 'import numpy as np\n'), ((2978, 3066), 'loader.DataLoader', 'DataLoader', (["('./dataset/' + args.dataset + '/test.json')", 'args.batch_size', 'args', 'dicts'], {}), "('./dataset/' + args.dataset + '/test.json', args.batch_size,\n args, dicts)\n", (2988, 3066), False, 'from loader import DataLoader\n'), ((3089, 3128), 'trainer.GCNTrainer', 'GCNTrainer', (['args'], {'emb_matrix': 'emb_matrix'}), '(args, emb_matrix=emb_matrix)\n', (3099, 3128), False, 'from trainer import GCNTrainer\n'), ((3280, 3326), 'torch.load', 'torch.load', (['args.save_dir'], {'map_location': 'DEVICE'}), '(args.save_dir, map_location=DEVICE)\n', (3290, 3326), False, 'import torch\n'), ((3855, 3909), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (3871, 3909), False, 'from sklearn import metrics\n'), ((2380, 2399), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (2391, 2399), False, 'import pickle\n'), ((3234, 3259), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3257, 3259), False, 'import torch\n')] |
# %% Load packages
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from kanga.plots import redblue_cmap
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants import output_path
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.constants import pred_interval_x1, pred_interval_x2
# %% Load ground truth
pred_interval_y = np.loadtxt(output_path.joinpath('ground_truth.csv'), delimiter=',', skiprows=0)
# %% Plot heat map of ground truth
num_ticks = 8
xticks = np.linspace(0, len(pred_interval_x1)-1, num=num_ticks, dtype=np.int)
xticklabels = [np.round(pred_interval_x1[idx], decimals=2) for idx in xticks]
yticks = np.linspace(0, len(pred_interval_x2)-1, num=num_ticks, dtype=np.int)
yticklabels = [np.round(pred_interval_x2[idx], decimals=2) for idx in yticks]
ax = sns.heatmap(
pred_interval_y,
cmap=redblue_cmap,
linewidths=0.01,
linecolor='white',
cbar=True,
square=True
)
plt.ylim(0, len(pred_interval_x2))
ax.set_xticks(xticks+0.5)
ax.set_xticklabels(xticklabels, rotation=0, fontsize=8)
ax.set_yticks(yticks+0.5)
ax.set_yticklabels(yticklabels, rotation=0, fontsize=8)
ax.collections[0].colorbar.ax.tick_params(labelsize=8)
plt.savefig(
output_path.joinpath('ground_truth.png'),
pil_kwargs={'quality': 100},
transparent=True,
bbox_inches='tight',
pad_inches=0.1
)
| [
"numpy.round",
"bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants.output_path.joinpath",
"seaborn.heatmap"
] | [((822, 934), 'seaborn.heatmap', 'sns.heatmap', (['pred_interval_y'], {'cmap': 'redblue_cmap', 'linewidths': '(0.01)', 'linecolor': '"""white"""', 'cbar': '(True)', 'square': '(True)'}), "(pred_interval_y, cmap=redblue_cmap, linewidths=0.01, linecolor=\n 'white', cbar=True, square=True)\n", (833, 934), True, 'import seaborn as sns\n'), ((382, 422), 'bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants.output_path.joinpath', 'output_path.joinpath', (['"""ground_truth.csv"""'], {}), "('ground_truth.csv')\n", (402, 422), False, 'from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants import output_path\n'), ((596, 639), 'numpy.round', 'np.round', (['pred_interval_x1[idx]'], {'decimals': '(2)'}), '(pred_interval_x1[idx], decimals=2)\n', (604, 639), True, 'import numpy as np\n'), ((753, 796), 'numpy.round', 'np.round', (['pred_interval_x2[idx]'], {'decimals': '(2)'}), '(pred_interval_x2[idx], decimals=2)\n', (761, 796), True, 'import numpy as np\n'), ((1232, 1272), 'bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants.output_path.joinpath', 'output_path.joinpath', (['"""ground_truth.png"""'], {}), "('ground_truth.png')\n", (1252, 1272), False, 'from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.constants import output_path\n')] |
# test for generate seismic data
# Std import block
import time
import numpy as np
import matplotlib.pyplot as plt
from pysit import *
from pysit.gallery import horizontal_reflector
import os
import shutil
# Define Domain
pmlx = PML(0.1, 100)
pmlz = PML(0.1, 100)
os.environ["OMP_NUM_THREADS"] = "12"
x_config = (0.1, 1.0, pmlx, pmlx)
z_config = (0.1, 0.8, pmlz, pmlz)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 91, 71)
# Generate true wave speed
C, C0, m, d = horizontal_reflector(m)
# Set up shots
zmin = d.z.lbound
zmax = d.z.rbound
zpos = zmin + (1./9.)*zmax
shots = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=5,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={},
)
shots_pickle = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=5,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={},
)
shots_savemat = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=5,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={},
)
shots_h5py = equispaced_acquisition(m,
RickerWavelet(10.0),
sources=5,
source_depth=zpos,
source_kwargs={},
receivers='max',
receiver_depth=zpos,
receiver_kwargs={},
)
# Define and configure the wave solver
trange = (0.0,3.0)
solver_time = ConstantDensityAcousticWave(m,
spatial_accuracy_order=6,
kernel_implementation='cpp',
trange=trange)
# Generate Seismic data/ Loading it
print("shot TIME generation...")
base_model = solver_time.ModelParameters(m,{'C':C})
generate_seismic_data(shots, solver_time, base_model)
print("DONE")
print("pickle TIME generation...")
generate_seismic_data(shots, solver_time, base_model,save_method='pickle')
generate_seismic_data_from_file(shots_pickle, solver_time, save_method='pickle')
print("DONE")
print("savemat TIME generation...")
generate_seismic_data(shots, solver_time, base_model,save_method='savemat')
generate_seismic_data_from_file(shots_savemat, solver_time, save_method='savemat')
print("DONE")
print("h5py TIME generation...")
generate_seismic_data(shots, solver_time, base_model,save_method='h5py')
generate_seismic_data_from_file(shots_h5py, solver_time, save_method='h5py')
print("DONE")
# Now compare the result to be sure of your code catches all the exceptions
for i in range(1,len(shots)+1):
if (shots[i-1].receivers.data == shots_pickle[i-1].receivers.data).all():
print("Test for receivers %d data of pickle : OK" % i)
else:
print(("Test for receivers %d data of pickle : fail") % i)
if (shots[i-1].receivers.data == shots_savemat[i-1].receivers.data).all():
print("Test for receivers %d data of savemat : OK" % i)
else:
print(("Test for receivers %d data of savemat : fail") % i)
if (shots[i-1].receivers.data == shots_h5py[i-1].receivers.data).all():
print("Test for receivers %d data of hdf5 : OK" % i)
else:
print(("Test for receivers %d data of hdf5 : fail") % i)
if (shots[i-1].receivers.ts == shots_pickle[i-1].receivers.ts).all():
print("Test for receivers %d ts of pickle : OK" % i)
else:
print(("Test for receivers %d ts of pickle : fail") % i)
if (shots[i-1].receivers.ts == shots_savemat[i-1].receivers.ts).all():
print("Test for receivers %d ts of savemat : OK" % i)
else:
print(("Test for receivers %d ts of savemat : fail") % i)
if (shots[i-1].receivers.ts == shots_h5py[i-1].receivers.ts).all():
print("Test for receivers %d ts of hdf5 : OK" % i)
else:
print(("Test for receivers %d ts of hdf5 : fail") % i)
#########################################################
# raise some error to verify that there are well caught #
#########################################################
# os.remove("./shots/shot_2.hdf5")
# generate_seismic_data_from_file(shots_h5py, save_method='h5py')
# generate_seismic_data_from_file(shots_h5py, save_method='pickle')
# generate_seismic_data_from_file(shots_h5py, save_method='petsc')
# generate_seismic_data_from_file(shots_h5py)
##############################################################
# Now Frequencies we save direct fourier transform data #
##############################################################
solver = ConstantDensityHelmholtz(m)
frequencies = [2.0, 3.5, 5.0, 6.5, 8.0, 9.5]
# Generate synthetic Seismic data
base_model = solver.ModelParameters(m,{'C': C})
tt = time.time()
print("shot FREQUENCY generation...")
generate_seismic_data(shots, solver, base_model, frequencies=frequencies)
print("DONE")
print("pickle FREQUENCY generation...")
generate_seismic_data(shots, solver, base_model,save_method='pickle', frequencies=frequencies)
generate_seismic_data_from_file(shots_pickle, solver, save_method='pickle')
print("DONE")
print("savemat FREQUENCY generation...")
generate_seismic_data(shots, solver, base_model,save_method='savemat', frequencies=frequencies)
generate_seismic_data_from_file(shots_savemat, solver, save_method='savemat', frequencies=frequencies)
print("DONE")
def compare_dict(a,b):
if list(a.keys()) == list(b.keys()):
same = True
for key in a:
same = (same and ((a[key]-b[key]) < np.finfo(float).eps).all())
if same == False : break
return same
else:
return False
# Now compare the result to be sure of your code catches all the exceptions
for i in range(1,len(shots)+1):
if compare_dict(shots[i-1].receivers.data_dft, shots_pickle[i-1].receivers.data_dft):
print("Test for receivers %d data_dft of pickle : OK" % i)
else:
print(("Test for receivers %d data_dft of pickle : fail") % i)
if compare_dict(shots[i-1].receivers.data_dft, shots_savemat[i-1].receivers.data_dft):
print("Test for receivers %d data_dft of savemat : OK" % i)
else:
print(("Test for receivers %d data_dft of savemat : fail") % i)
| [
"numpy.finfo",
"pysit.gallery.horizontal_reflector",
"time.time"
] | [((491, 514), 'pysit.gallery.horizontal_reflector', 'horizontal_reflector', (['m'], {}), '(m)\n', (511, 514), False, 'from pysit.gallery import horizontal_reflector\n'), ((5676, 5687), 'time.time', 'time.time', ([], {}), '()\n', (5685, 5687), False, 'import time\n'), ((6435, 6450), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6443, 6450), True, 'import numpy as np\n')] |
"""
Testshot script for getting GPI equipment ready while still at MIT.
Usage :
python testgpi_mit.py 1180227500
<NAME>, Feb 27, 2018
"""
from MDSplus import *
from MitDevices.acq132 import ACQ132
from MitDevices.acq196 import ACQ196
from MitDevices.acq196ao import ACQ196AO
import numpy as np
import sys
import time
import matplotlib.pyplot as plt
s=int(sys.argv[1])
myTree=Tree("spectroscopy",-1)
myTree.createPulse(s) #Copies the model tree
myTree=Tree("spectroscopy",s)
myDIO2=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:DIO2")
HV_prog_i=4.0
for i in range (1,9):
myTree.getNode("GPI_TCV.APD_ARRAY.CONTROL.HV_PROG_"+str(i)).putData(myTree.tdiCompile(str(HV_prog_i)))
#Initialize DIO2 through TCL command, since there is no working python command for DIO2
#DIO2_ENCDEC does not work for this, neither does DIO4
myTree.tcl('do /meth '+myDIO2.getFullPath()+' init')
print("Initialized DIO2")
#Take node of each digitizer, and initialize them
myACQ132_1=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_1")
inst_ACQ132_1=ACQ132(myACQ132_1)
inst_ACQ132_1.initftp()
print("Initialized ACQ132_1")
myACQ132_2=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_2")
inst_ACQ132_2=ACQ132(myACQ132_2)
inst_ACQ132_2.initftp()
print("Initialized ACQ132_2")
myACQ132_3=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_3")
inst_ACQ132_3=ACQ132(myACQ132_3)
inst_ACQ132_3.initftp()
print("Initialized ACQ132_3")
myACQ132_4=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ132_4")
inst_ACQ132_4=ACQ132(myACQ132_4)
inst_ACQ132_4.initftp()
print("Initialized ACQ132_4")
myACQ196=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ196")
inst_ACQ196=ACQ196(myACQ196)
inst_ACQ196.initftp()
print("Initialized ACQ196")
myACQ196AO=myTree.getNode("GPI_TCV.APD_ARRAY.HARDWARE:ACQ196AO")
inst_ACQ196AO=ACQ196AO(myACQ196AO)
inst_ACQ196AO.init()
print("Initialized ACQ196AO")
#Wait for the initialization
time.sleep(7)
#Trigger DIO2 in order to start the data acquisition
myTree.tcl('do /meth '+myDIO2.getFullPath()+' trigger')
#myTree.getNode('GPI.APD_ARRAY.HARDWARE:eng_encoder').doMethod("set_event","SPECTROSCOPY_START") #Should work with Trig.mode=event in the device setup of DIO2 - put a spectroscopy start MDSplus event on the CPCI network
print("Triggered DIO2")
#Wait for shot to end
time.sleep(7)
#Store data to the MDSplus tree
inst_ACQ132_1.store()
print("Stored data on ACQ132_1")
inst_ACQ132_2.store()
print("Stored data on ACQ132_2")
inst_ACQ132_3.store()
print("Stored data on ACQ132_3")
inst_ACQ132_4.store()
print("Stored data on ACQ132_4")
inst_ACQ196.store()
print("Stored data on ACQ196")
for i in range (1,5):
for j in range (1,33):
if j<10:
sig=myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(i)+'.input_0'+str(j)).getData().data()
# t=myTree.getNode('gpi_tcv.apd_array.hardware.dt132_'+str(i)+'.input_0'+str(j)).dim_of().data()
else:
sig=myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(i)+'.input_'+str(j)).getData().data()
# t=myTree.getNode('gpi_tcv.apd_array.hardware.dt132_'+str(i)+'.input_'+str(j)).dim_of().data()
print("ACQ132_"+str(i)+", Input "+str(j)+": "+str(np.mean(sig)))
"""
for i in range (1,17):
if i < 10:
node_HV_prog=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196AO.OUTPUT_0"+str(i))
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i))
else:
node_HV_prog=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196AO.OUTPUT_"+str(i))
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i))
HV_prog=max(node_HV_prog.getData().data())
HV_meas=np.mean(node_HV_meas.getData().data())
print("HV_prog for output "+str(i)+" : "+str(HV_prog))
print("HV_meas for input "+str(i)+" : "+str(HV_meas))
for i in range (17,33):
node_HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i))
HV_meas=np.mean(node_HV_meas.getData().data())
print("HV_meas for input "+str(i)+" : "+str(HV_meas))
"""
"""
for i in range (1,33):
if i<10:
HV_meas=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_0"+str(i)).getData().data()
t=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_0"+str(i)).dim_of().data()
else:
HV_meas=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_"+str(i)).getData().data()
t=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_4.INPUT_"+str(i)).dim_of().data()
# plt.plot(t,HV_meas)
print('Input_'+str(i)+' : '+str(np.mean(HV_meas)))
"""
"""
for i in range (1,33):
if i<10:
HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i)).getData().data()
t=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_0"+str(i)).dim_of().data()
else:
HV_meas=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i)).getData().data()
t=myTree.getNode("GPI.APD_ARRAY.HARDWARE:ACQ196.INPUT_"+str(i)).dim_of().data()
# plt.plot(t,HV_meas)
print('Input_'+str(i)+' : '+str(np.mean(HV_meas)))
"""
#plt.xlabel('Time (sec)')
#plt.ylabel('HV_meas (V)')
#plt.ylim(0.,5.)
#plt.show()
#for i in range (1,2):
# if i < 10:
# node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_3.INPUT_0"+str(i))
# else:
# node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_3.INPUT_"+str(i))
# sig=np.mean(node_sig.getData().data())
# print("Input "+str(i)+": "+str(sig))
# signal=node_sig.getData().data()
# t=node_sig.dim_of().data()
# plt.plot(t,signal)
# plt.xlabel('Time (sec)')
# plt.ylabel('Signal (V)')
"""
plt.xlabel('Time (sec)')
plt.ylabel('Signal (V)')
line=[]
for i in range (1,5):
if i<4:
node_sig=myTree.getNode("GPI.APD_ARRAY.HARDWARE:DT132_"+str(i)+".INPUT_01")
else:
node_sig=myTree.getNode("GPI.INNER_APD.HARDWARE:ACQ132_"+str(i)+".INPUT_01")
signal=node_sig.getData().data()
t=node_sig.dim_of().data()
line.append(plt.plot(t,signal,label="DT132_"+str(i)))
plt.legend(line,('DT132_1','DT132_2','DT132_3','DT132_4'))
plt.xlim([0.05,0.05003])
plt.show()
"""
| [
"numpy.mean",
"MitDevices.acq196ao.ACQ196AO",
"MitDevices.acq196.ACQ196",
"time.sleep",
"MitDevices.acq132.ACQ132"
] | [((1033, 1051), 'MitDevices.acq132.ACQ132', 'ACQ132', (['myACQ132_1'], {}), '(myACQ132_1)\n', (1039, 1051), False, 'from MitDevices.acq132 import ACQ132\n'), ((1186, 1204), 'MitDevices.acq132.ACQ132', 'ACQ132', (['myACQ132_2'], {}), '(myACQ132_2)\n', (1192, 1204), False, 'from MitDevices.acq132 import ACQ132\n'), ((1339, 1357), 'MitDevices.acq132.ACQ132', 'ACQ132', (['myACQ132_3'], {}), '(myACQ132_3)\n', (1345, 1357), False, 'from MitDevices.acq132 import ACQ132\n'), ((1492, 1510), 'MitDevices.acq132.ACQ132', 'ACQ132', (['myACQ132_4'], {}), '(myACQ132_4)\n', (1498, 1510), False, 'from MitDevices.acq132 import ACQ132\n'), ((1639, 1655), 'MitDevices.acq196.ACQ196', 'ACQ196', (['myACQ196'], {}), '(myACQ196)\n', (1645, 1655), False, 'from MitDevices.acq196 import ACQ196\n'), ((1786, 1806), 'MitDevices.acq196ao.ACQ196AO', 'ACQ196AO', (['myACQ196AO'], {}), '(myACQ196AO)\n', (1794, 1806), False, 'from MitDevices.acq196ao import ACQ196AO\n'), ((1888, 1901), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (1898, 1901), False, 'import time\n'), ((2280, 2293), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (2290, 2293), False, 'import time\n'), ((3177, 3189), 'numpy.mean', 'np.mean', (['sig'], {}), '(sig)\n', (3184, 3189), True, 'import numpy as np\n')] |
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import zigzag
from matplotlib.finance import candlestick2_ohlc
fpath = os.path.dirname(os.path.abspath(__file__))
fpath += '/data/ingest_data/'
load_file_name = 'binance_btc_usdt_4h.csv'
write_up_down_file_name = 'action_binance_btc_usdt_4h.csv'
chart_data = pd.read_csv(fpath + load_file_name, thousands=',', header=None)
chart_data.columns = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_data['date'] = pd.to_datetime(chart_data['date'])
chart_data = chart_data[(chart_data['date'] >= '2017-11-01') & (chart_data['date'] <= '2018-02-01')]
high_low = []
trend = 0
open_a = chart_data.open.values
close_a = chart_data.close.values
low_a = chart_data.low.values
high_a = chart_data.high.values
ohlcv4_a = (open_a + close_a + low_a + high_a) / 4
for i in range(len(chart_data.date)):
open = open_a[i]
close = close_a[i]
low = low_a[i]
high = high_a[i]
if i == 0:
high_low.append(high if open < close else low)
continue
high_low.append(max(ohlcv4_a[i], ohlcv4_a[i - 1]))
X = np.array(high_low)
pivots = zigzag.peak_valley_pivots(X, 0.02, -0.01)
"""
위 변곡점: 1
아래 변곡점: -1
나머지: 0
swsong
위 꼭지점은 -1
아래 꼭지점은 1
번갈아 나오면 0점.
"""
hold_count = 1
left_hold_range = 0.15
right_hold_range = 0.01
# action = 'B', 'S', 'H'
actions = []
last_action = None
last_action_price = None
last_action_index = 0
highest_price = 0
lowest_price = 0
prev_pivot_index = 0
tmp_pivot = None
current_hold_count = hold_count
def get_next_pivot_index(index, hold_count):
next_index = None
for i in range(index + hold_count, len(pivots)):
if pivots[i] != 0:
next_index = i
break
return next_index
for index in range(len(pivots)):
price = close_a[index]
pivot = pivots[index]
act = None
if last_action is None:
# 처음엔 상태가 없으므로 매수
act = 'B'
tmp_pivot = 1
elif pivot != 0 or current_hold_count > 0:
# 홀드봉이 있을 경우.
# pivot 0 아닌 경우
act = 'H'
current_hold_count -= 1
if pivot != 0:
tmp_pivot = pivot
prev_pivot_index = index
if current_hold_count <= 0:
current_hold_count = hold_count
else:
next_pivot_index = get_next_pivot_index(index, 1)
if next_pivot_index is None:
act = 'H'
else:
print('--------------------------------------------')
print('date: {}'.format(chart_data['date'].values[index]))
total_count = next_pivot_index - prev_pivot_index
current_count = index - prev_pivot_index
act = 'H'
if tmp_pivot == -1:
is_left_hold_action = (total_count - current_count) / total_count < left_hold_range
is_right_hold_action = abs(lowest_price - price) / price < right_hold_range
print('매수')
print('left: {}, right: {}'.format(is_left_hold_action, is_right_hold_action))
if is_left_hold_action or is_right_hold_action:
act = 'H'
else:
act = 'B'
if tmp_pivot == 1:
is_left_hold_action = (total_count - current_count) / total_count < left_hold_range
is_right_hold_action = (highest_price - price) / price < right_hold_range
print('매도')
print('left: {}, right: {}'.format(is_left_hold_action, is_right_hold_action))
if is_left_hold_action or is_right_hold_action:
act = 'H'
else:
act = 'S'
print('act: {}, trends: {}'.format(act, tmp_pivot))
print('price: {}, lowest: {}, lowest: {}'.format(price, lowest_price, lowest_price))
print('--------------------------------------------')
if highest_price < price:
highest_price = price
if lowest_price > price:
lowest_price = price
if act != 'H':
last_action = act
last_action_price = price
last_action_index = index
highest_price = price
lowest_price = price
actions.append(act)
actions = np.array(actions)
fake_data = zip(range(len(actions)), chart_data.date, actions)
act_data = pd.DataFrame([data for num, *data in fake_data], columns=['date', 'action'])
act_one_hot = pd.get_dummies(act_data)
chart_data = pd.merge(chart_data, act_one_hot, on='date')
# B, H, S
chart_data.to_csv(fpath + write_up_down_file_name, mode='w', index=False, header=False)
print(chart_data.head(10))
print('저장 완료. {}'.format(fpath + write_up_down_file_name))
def ohlcv_plot(data):
fig, ax = plt.subplots()
date = data.date.values
open = data.open.values
high = data.high.values
low = data.low.values
close = data.close.values
volume = data.volume.values
candlestick2_ohlc(ax, open, high, low, close, width=0.6)
ax.xaxis.set_major_locator(ticker.MaxNLocator(10))
def mydate(x, pos):
try:
return pd.to_datetime(date[int(x)]).strftime('%Y.%m.%d %H:%M')
except IndexError:
return ''
ax.xaxis.set_major_formatter(ticker.FuncFormatter(mydate))
fig.autofmt_xdate()
fig.tight_layout()
def plot_pivots(X, pivots):
plt.plot(np.arange(len(X))[pivots != 0], X[pivots != 0], 'k-')
# plt.scatter(np.arange(len(X))[pivots == 1], X[pivots == 1], color='g')
# plt.scatter(np.arange(len(X))[pivots == -1], X[pivots == -1], color='r')
def plot_actions(X, actions):
plt.scatter(np.arange(len(X))[actions == 'B'], X[actions == 'B'], color='g')
plt.scatter(np.arange(len(X))[actions == 'S'], X[actions == 'S'], color='r')
# plt.scatter(np.arange(len(X))[actions == 'H'], X[actions == 'H'], color='b')
ohlcv_plot(chart_data)
plot_pivots(X, pivots)
plot_actions(X, actions)
plt.show()
| [
"zigzag.peak_valley_pivots",
"matplotlib.ticker.FuncFormatter",
"pandas.read_csv",
"pandas.merge",
"pandas.get_dummies",
"numpy.array",
"matplotlib.ticker.MaxNLocator",
"pandas.DataFrame",
"matplotlib.finance.candlestick2_ohlc",
"os.path.abspath",
"matplotlib.pyplot.subplots",
"pandas.to_datet... | [((377, 440), 'pandas.read_csv', 'pd.read_csv', (['(fpath + load_file_name)'], {'thousands': '""","""', 'header': 'None'}), "(fpath + load_file_name, thousands=',', header=None)\n", (388, 440), True, 'import pandas as pd\n'), ((535, 569), 'pandas.to_datetime', 'pd.to_datetime', (["chart_data['date']"], {}), "(chart_data['date'])\n", (549, 569), True, 'import pandas as pd\n'), ((1147, 1165), 'numpy.array', 'np.array', (['high_low'], {}), '(high_low)\n', (1155, 1165), True, 'import numpy as np\n'), ((1175, 1216), 'zigzag.peak_valley_pivots', 'zigzag.peak_valley_pivots', (['X', '(0.02)', '(-0.01)'], {}), '(X, 0.02, -0.01)\n', (1200, 1216), False, 'import zigzag\n'), ((4259, 4276), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (4267, 4276), True, 'import numpy as np\n'), ((4352, 4428), 'pandas.DataFrame', 'pd.DataFrame', (['[data for num, *data in fake_data]'], {'columns': "['date', 'action']"}), "([data for num, *data in fake_data], columns=['date', 'action'])\n", (4364, 4428), True, 'import pandas as pd\n'), ((4443, 4467), 'pandas.get_dummies', 'pd.get_dummies', (['act_data'], {}), '(act_data)\n', (4457, 4467), True, 'import pandas as pd\n'), ((4481, 4525), 'pandas.merge', 'pd.merge', (['chart_data', 'act_one_hot'], {'on': '"""date"""'}), "(chart_data, act_one_hot, on='date')\n", (4489, 4525), True, 'import pandas as pd\n'), ((5923, 5933), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5931, 5933), True, 'import matplotlib.pyplot as plt\n'), ((205, 230), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'import os\n'), ((4747, 4761), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4759, 4761), True, 'import matplotlib.pyplot as plt\n'), ((4938, 4994), 'matplotlib.finance.candlestick2_ohlc', 'candlestick2_ohlc', (['ax', 'open', 'high', 'low', 'close'], {'width': '(0.6)'}), '(ax, open, high, low, close, width=0.6)\n', (4955, 4994), False, 'from matplotlib.finance import candlestick2_ohlc\n'), ((5026, 5048), 'matplotlib.ticker.MaxNLocator', 'ticker.MaxNLocator', (['(10)'], {}), '(10)\n', (5044, 5048), True, 'import matplotlib.ticker as ticker\n'), ((5244, 5272), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['mydate'], {}), '(mydate)\n', (5264, 5272), True, 'import matplotlib.ticker as ticker\n')] |
from mpl_toolkits.mplot3d import Axes3D ##New Library required for projected 3d plots
import numpy
from matplotlib import pyplot, cm
###variable declarations
nx = 81
ny = 81
nt = 100
c = 1
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
sigma = .2
dt = sigma * dx
x = numpy.linspace(0, 2, nx)
y = numpy.linspace(0, 2, ny)
u = numpy.ones((ny, nx)) ##create a 1xn vector of 1's
un = numpy.ones((ny, nx)) ##
###Assign initial conditions
##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2
u[int(.5 / dy):int(1 / dy + 1),int(.5 / dx):int(1 / dx + 1)] = 2
###Plot Initial Condition
##the figsize parameter can be used to produce different sized images
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x, y)
surf = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)
u = numpy.ones((ny, nx))
u[int(.5 / dy):int(1 / dy + 1), int(.5 / dx):int(1 / dx + 1)] = 2
for n in range(nt + 1): ##loop across number of time steps
un = u.copy()
row, col = u.shape
for j in range(1, row):
for i in range(1, col):
u[j, i] = (un[j, i] - (c * dt / dx * (un[j, i] - un[j, i - 1])) -
(c * dt / dy * (un[j, i] - un[j - 1, i])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)
"""
u = numpy.ones((ny, nx))
u[int(.5 / dy):int(1 / dy + 1), int(.5 / dx):int(1 / dx + 1)] = 2
for n in range(nt + 1): ##loop across number of time steps
un = u.copy()
u[1:, 1:] = (un[1:, 1:] - (c * dt / dx * (un[1:, 1:] - un[1:, :-1])) -
(c * dt / dy * (un[1:, 1:] - un[:-1, 1:])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X, Y, u[:], cmap=cm.viridis)
""" | [
"numpy.meshgrid",
"numpy.linspace",
"numpy.ones",
"matplotlib.pyplot.figure"
] | [((263, 287), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2)', 'nx'], {}), '(0, 2, nx)\n', (277, 287), False, 'import numpy\n'), ((292, 316), 'numpy.linspace', 'numpy.linspace', (['(0)', '(2)', 'ny'], {}), '(0, 2, ny)\n', (306, 316), False, 'import numpy\n'), ((322, 342), 'numpy.ones', 'numpy.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (332, 342), False, 'import numpy\n'), ((377, 397), 'numpy.ones', 'numpy.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (387, 397), False, 'import numpy\n'), ((657, 696), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(11, 7)', 'dpi': '(100)'}), '(figsize=(11, 7), dpi=100)\n', (670, 696), False, 'from matplotlib import pyplot, cm\n'), ((756, 776), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (770, 776), False, 'import numpy\n'), ((862, 882), 'numpy.ones', 'numpy.ones', (['(ny, nx)'], {}), '((ny, nx))\n', (872, 882), False, 'import numpy\n'), ((1370, 1409), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(11, 7)', 'dpi': '(100)'}), '(figsize=(11, 7), dpi=100)\n', (1383, 1409), False, 'from matplotlib import pyplot, cm\n')] |
import numpy as np
from scipy.misc import imresize
import gym
from gym.core import ObservationWrapper, Wrapper
from gym.spaces.box import Box
from gym.wrappers import SkipWrapper, TimeLimit
from copy import copy
import collections
try:
import ppaquette_gym_doom
from ppaquette_gym_doom.wrappers.action_space import ToDiscrete
except ImportError:
print("no doom envs")
Transition = collections.namedtuple(
"Transition",
["state", "action", "reward", "next_state", "done"])
class PreprocessImage(ObservationWrapper):
def __init__(self, env, height=64, width=64, grayscale=True, crop=None):
"""
A gym wrapper that crops, scales image into the desired shapes and optionally grayscales it.
"""
super(PreprocessImage, self).__init__(env)
self.img_size = (height, width)
self.grayscale = grayscale
no_crop = lambda img: img
self.crop = crop or no_crop
n_colors = 1 if self.grayscale else 3
self.observation_space = Box(0.0, 1.0, [height, width, n_colors])
def _observation(self, img):
"""what happens to the observation"""
img = self.crop(img)
img = imresize(img, self.img_size)
if self.grayscale:
img = img.mean(-1, keepdims=True)
img = img.astype('float32') / 255.
return img
class FrameBuffer(Wrapper):
def __init__(self, env, n_frames=4, reshape_fn=None):
"""A gym wrapper that returns last n_frames observations as a single observation.
Useful for games like Atari and Doom with screen as input."""
super(FrameBuffer, self).__init__(env)
self.framebuffer = np.zeros([n_frames, ] + list(env.observation_space.shape))
# now, hacky auto-reshape fn
if reshape_fn is None:
shape_dims = list(range(len(self.framebuffer.shape)))
shape_dims = shape_dims[1:] + [shape_dims[0]]
result_shape = list(env.observation_space.shape)
if len(result_shape) == 1:
# so, its linear env
result_shape += [1]
result_shape[-1] = result_shape[-1] * n_frames
reshape_fn = lambda x: np.transpose(x, shape_dims).reshape(result_shape)
self.reshape_fn = reshape_fn
self.observation_space = Box(0.0, 1.0, self.reshape_fn(self.framebuffer).shape)
def reset(self):
"""resets breakout, returns initial frames"""
self.framebuffer = np.zeros_like(self.framebuffer)
self.update_buffer(self.env.reset())
return self.reshape_fn(self.framebuffer)
def step(self, action):
"""plays breakout for 1 step, returns 4-frame buffer"""
new_obs, r, done, info = self.env.step(action)
self.update_buffer(new_obs)
return self.reshape_fn(self.framebuffer), r, done, info
def update_buffer(self, obs):
"""push new observation to the buffer, remove the earliest one"""
self.framebuffer = np.vstack([obs[None], self.framebuffer[:-1]])
class EnvPool(Wrapper):
"""
Typical EnvPool, that does not care about done envs.
"""
def __init__(self, env, n_envs=16, autoreload_envs=False):
super(EnvPool, self).__init__(env)
self.initial_env = env
self.n_envs = n_envs
self.env_shape = env.observation_space.shape
self.envs = []
self.recreate_envs()
self.reset()
def recreate_envs(self):
self.close()
self.envs = np.array([copy(self.initial_env) for _ in range(self.n_envs)])
def reset(self):
self._states = np.zeros(shape=(self.n_envs,) + tuple(self.env_shape), dtype=np.float32)
self._rewards = np.zeros(shape=self.n_envs, dtype=np.float32)
self._dones = np.zeros(shape=self.n_envs, dtype=np.bool)
for i, env in enumerate(self.envs):
self._states[i] = env.reset()
return self._states.copy()
def step(self, actions):
for i, (action, env) in enumerate(zip(actions, self.envs)):
new_s, r, done, _ = env.step(action)
self._rewards[i] = r
self._dones[i] = done
if not done:
self._states[i] = new_s
else:
self._states[i] = env.reset()
return self._states.copy(), self._rewards.copy(), self._dones.copy(), None
def close(self):
for env in self.envs:
env.close()
def pool_states(self):
return self._states.copy()
def make_env(env_name, n_games=1, episode_limit=None, n_frames=1, autoreload_envs=False):
env = gym.make(env_name) if episode_limit is None else gym.make(env_name).env
env = FrameBuffer(env, n_frames=n_frames) if n_frames > 1 else env
if episode_limit is not None:
env = TimeLimit(env, max_episode_steps=episode_limit)
return EnvPool(env, n_games, autoreload_envs) if n_games > 0 else env
def make_image_env(
env_name, n_games=1, episode_limit=None,
n_frames=1, autoreload_envs=False,
width=64, height=64,
grayscale=True, crop=None):
env = gym.make(env_name) if episode_limit is None else gym.make(env_name).env
if "ppaquette" in env_name:
env = SkipWrapper(4)(ToDiscrete("minimal")(env))
env = PreprocessImage(env, width=width, height=height, grayscale=grayscale, crop=crop)
env = FrameBuffer(env, n_frames=n_frames) if n_frames > 1 else env
if episode_limit is not None:
env = TimeLimit(env, max_episode_steps=episode_limit)
return EnvPool(env, n_games, autoreload_envs) if n_games > 0 else env
def make_env_wrapper(make_env_fn, params):
def wrapper(env, n_games, episode_limit=None):
return make_env_fn(env, n_games, episode_limit=episode_limit, **params)
return wrapper
| [
"collections.namedtuple",
"gym.spaces.box.Box",
"gym.wrappers.TimeLimit",
"ppaquette_gym_doom.wrappers.action_space.ToDiscrete",
"numpy.zeros",
"numpy.vstack",
"scipy.misc.imresize",
"gym.wrappers.SkipWrapper",
"copy.copy",
"numpy.transpose",
"numpy.zeros_like",
"gym.make"
] | [((396, 489), 'collections.namedtuple', 'collections.namedtuple', (['"""Transition"""', "['state', 'action', 'reward', 'next_state', 'done']"], {}), "('Transition', ['state', 'action', 'reward',\n 'next_state', 'done'])\n", (418, 489), False, 'import collections\n'), ((1019, 1059), 'gym.spaces.box.Box', 'Box', (['(0.0)', '(1.0)', '[height, width, n_colors]'], {}), '(0.0, 1.0, [height, width, n_colors])\n', (1022, 1059), False, 'from gym.spaces.box import Box\n'), ((1183, 1211), 'scipy.misc.imresize', 'imresize', (['img', 'self.img_size'], {}), '(img, self.img_size)\n', (1191, 1211), False, 'from scipy.misc import imresize\n'), ((2469, 2500), 'numpy.zeros_like', 'np.zeros_like', (['self.framebuffer'], {}), '(self.framebuffer)\n', (2482, 2500), True, 'import numpy as np\n'), ((2979, 3024), 'numpy.vstack', 'np.vstack', (['[obs[None], self.framebuffer[:-1]]'], {}), '([obs[None], self.framebuffer[:-1]])\n', (2988, 3024), True, 'import numpy as np\n'), ((3697, 3742), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.n_envs', 'dtype': 'np.float32'}), '(shape=self.n_envs, dtype=np.float32)\n', (3705, 3742), True, 'import numpy as np\n'), ((3765, 3807), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.n_envs', 'dtype': 'np.bool'}), '(shape=self.n_envs, dtype=np.bool)\n', (3773, 3807), True, 'import numpy as np\n'), ((4597, 4615), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (4605, 4615), False, 'import gym\n'), ((4788, 4835), 'gym.wrappers.TimeLimit', 'TimeLimit', (['env'], {'max_episode_steps': 'episode_limit'}), '(env, max_episode_steps=episode_limit)\n', (4797, 4835), False, 'from gym.wrappers import SkipWrapper, TimeLimit\n'), ((5099, 5117), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (5107, 5117), False, 'import gym\n'), ((5470, 5517), 'gym.wrappers.TimeLimit', 'TimeLimit', (['env'], {'max_episode_steps': 'episode_limit'}), '(env, max_episode_steps=episode_limit)\n', (5479, 5517), False, 'from gym.wrappers import SkipWrapper, TimeLimit\n'), ((4646, 4664), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (4654, 4664), False, 'import gym\n'), ((5148, 5166), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (5156, 5166), False, 'import gym\n'), ((5217, 5231), 'gym.wrappers.SkipWrapper', 'SkipWrapper', (['(4)'], {}), '(4)\n', (5228, 5231), False, 'from gym.wrappers import SkipWrapper, TimeLimit\n'), ((3502, 3524), 'copy.copy', 'copy', (['self.initial_env'], {}), '(self.initial_env)\n', (3506, 3524), False, 'from copy import copy\n'), ((5232, 5253), 'ppaquette_gym_doom.wrappers.action_space.ToDiscrete', 'ToDiscrete', (['"""minimal"""'], {}), "('minimal')\n", (5242, 5253), False, 'from ppaquette_gym_doom.wrappers.action_space import ToDiscrete\n'), ((2190, 2217), 'numpy.transpose', 'np.transpose', (['x', 'shape_dims'], {}), '(x, shape_dims)\n', (2202, 2217), True, 'import numpy as np\n')] |
import scipy.io as io
import numpy as np
from typing import List, Tuple
def savenpz(filename:str, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
When saving dictionaries, the dictionary keys become filenames
inside the ZIP archive. Therefore, keys should be valid filenames.
E.g., avoid keys that begin with ``/`` or contain ``.``.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# Remove other than '.npz' extension if filename has the other extention.
if filename[-4:] != '.npz':
if filename.find('.') != -1:
bad_ext = filename[filename.find('.'):]
print('Warning: Filename "'+filename+'" has a bad extension "'+bad_ext+'" .')
filename = filename[0:filename.find('.')]
print('Renamed to "'+filename+'.npz".')
# Add '.npz' force.
filename = filename + '.npz'
np.savez(filename, *args, **kwds)
| [
"numpy.savez"
] | [((3622, 3655), 'numpy.savez', 'np.savez', (['filename', '*args'], {}), '(filename, *args, **kwds)\n', (3630, 3655), True, 'import numpy as np\n')] |
import glob
import os
import random
from collections import deque
from itertools import zip_longest
from typing import Callable, Iterable, Optional, Tuple, Union
import gym
from gym import spaces
import numpy as np
import torch
import torch.nn.functional as F
from lightning_baselines3.common.type_aliases import GymEnv
from lightning_baselines3.common.vec_env import is_image_space
def get_obs_shape(observation_space: spaces.Space) -> Tuple[int, ...]:
"""
Get the shape of the observation (useful for the buffers).
:param observation_space: (spaces.Space)
:return: (Tuple[int, ...])
"""
if isinstance(observation_space, spaces.Box):
return observation_space.shape
elif isinstance(observation_space, spaces.Discrete):
# Observation is an int
return (1,)
elif isinstance(observation_space, spaces.MultiDiscrete):
# Number of discrete features
return (int(len(observation_space.nvec)),)
elif isinstance(observation_space, spaces.MultiBinary):
# Number of binary features
return (int(observation_space.n),)
else:
raise NotImplementedError()
def get_action_dim(action_space: spaces.Space) -> int:
"""
Get the dimension of the action space.
:param action_space: (spaces.Space)
:return: (int)
"""
if isinstance(action_space, spaces.Box):
return int(np.prod(action_space.shape))
elif isinstance(action_space, spaces.Discrete):
# Action is an int
return 1
elif isinstance(action_space, spaces.MultiDiscrete):
# Number of discrete actions
return int(len(action_space.nvec))
elif isinstance(action_space, spaces.MultiBinary):
# Number of binary actions
return int(action_space.n)
else:
raise NotImplementedError()
def set_random_seed(seed: int) -> None:
"""
Seed the different random generators
:param seed: (int)
"""
# Seed python RNG
random.seed(seed)
# Seed numpy RNG
np.random.seed(seed)
# seed the RNG for all devices (both CPU and CUDA)
torch.manual_seed(seed)
# From stable baselines
def explained_variance(y_pred: torch.tensor, y_true: torch.tensor) -> np.ndarray:
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
:param y_pred: (np.ndarray) the prediction
:param y_true: (np.ndarray) the expected value
:return: (float) explained variance of ypred and y
"""
assert y_true.ndim == 1 and y_pred.ndim == 1
var_y = torch.var(y_true)
return torch.nan if var_y == 0 else 1 - torch.var(y_true - y_pred) / var_y
def zip_strict(*iterables: Iterable) -> Iterable:
r"""
``zip()`` function but enforces that iterables are of equal length.
Raises ``ValueError`` if iterables not of equal length.
Code inspired by Stackoverflow answer for question #32954486.
:param \*iterables: iterables to ``zip()``
"""
# As in Stackoverflow #32954486, use
# new object for "empty" in case we have
# Nones in iterable.
sentinel = object()
for combo in zip_longest(*iterables, fillvalue=sentinel):
if sentinel in combo:
raise ValueError("Iterables have different lengths")
yield combo
def polyak_update(params: Iterable[torch.nn.Parameter], target_params: Iterable[torch.nn.Parameter], tau: float) -> None:
"""
Perform a Polyak average update on ``target_params`` using ``params``:
target parameters are slowly updated towards the main parameters.
``tau``, the soft update coefficient controls the interpolation:
``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.
The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,
or a computation graph, reducing memory cost and improving performance. We scale the target params
by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target
params (in place).
See https://github.com/DLR-RM/stable-baselines3/issues/93
:param params: parameters to use to update the target params
:param target_params: parameters to update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
"""
with torch.no_grad():
# zip does not raise an exception if length of parameters does not match.
for param, target_param in zip(params, target_params):
target_param.data.mul_(1 - tau)
torch.add(target_param.data, param.data, alpha=tau, out=target_param.data)
| [
"torch.manual_seed",
"numpy.prod",
"itertools.zip_longest",
"random.seed",
"torch.add",
"numpy.random.seed",
"torch.no_grad",
"torch.var"
] | [((1971, 1988), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1982, 1988), False, 'import random\n'), ((2014, 2034), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2028, 2034), True, 'import numpy as np\n'), ((2094, 2117), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2111, 2117), False, 'import torch\n'), ((2718, 2735), 'torch.var', 'torch.var', (['y_true'], {}), '(y_true)\n', (2727, 2735), False, 'import torch\n'), ((3282, 3325), 'itertools.zip_longest', 'zip_longest', (['*iterables'], {'fillvalue': 'sentinel'}), '(*iterables, fillvalue=sentinel)\n', (3293, 3325), False, 'from itertools import zip_longest\n'), ((4521, 4536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4534, 4536), False, 'import torch\n'), ((1390, 1417), 'numpy.prod', 'np.prod', (['action_space.shape'], {}), '(action_space.shape)\n', (1397, 1417), True, 'import numpy as np\n'), ((4739, 4813), 'torch.add', 'torch.add', (['target_param.data', 'param.data'], {'alpha': 'tau', 'out': 'target_param.data'}), '(target_param.data, param.data, alpha=tau, out=target_param.data)\n', (4748, 4813), False, 'import torch\n'), ((2780, 2806), 'torch.var', 'torch.var', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (2789, 2806), False, 'import torch\n')] |
#!/usr/bin/env python3
import sys
import pickle
import os
import gc
import json
from logging import getLogger
import numpy as np
import pandas as pd
import cv2
#import tensorflow as tf
from tqdm import tqdm
try:
APP_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
sys.path.append(APP_ROOT)
sys.path.append('/openpilot')
sys.path.append(APP_ROOT + '/LaneATT')
sys.path.append(APP_ROOT + '/Ultra-Fast-Lane-Detection')
except:
raise
from car_motion_attack.attack import CarMotionAttack
from car_motion_attack.replay_bicycle import ReplayBicycle
from car_motion_attack.load_sensor_data import load_sensor_data, load_transform_matrix, create_mock_driving
logger = getLogger(None)
model_type = sys.argv[1]
if model_type not in ('laneatt', 'ultrafast', 'scnn', 'polylanenet'):
raise Exception(f'unknown model: {model_type}')
def read_img(path):
img = np.zeros((874, 1164, 3), dtype=np.uint8)
img[200:-220, 106:-106] = cv2.resize(cv2.imread(path), (952, 454))
#img = cv2.resize(cv2.imread(path), (1164, 874))
return img
def main(data_path='',
n_epoch=10000,
n_frames=20,
scale=5,
base_color=0.38,
starting_meters=45,
patch_lateral_shift=0,
result_dir='./result/',
left_lane_pos=4,
right_lane_pos=36,
left_solid=False,
right_solid=False,
src_corners=None,
target_deviation=0.5,
is_attack_to_rigth=True,
patch_width=45,
patch_length=300,
frame_offset=0,
l2_weight=0.01
):
df_sensors = create_mock_driving(speed_ms=26.8224, n_frames=n_frames + 1) # 60 mph
roi_mat = None
list_bgr_img = [read_img(data_path + f'/{i + 1}.jpg') for i in range(frame_offset, frame_offset + n_frames + 1)]
global_bev_mask = np.random.random((patch_length * scale, patch_width * scale)) > 0
if not os.path.exists(result_dir + 'result.json'):
cma = CarMotionAttack(
list_bgr_img,
df_sensors,
global_bev_mask,
base_color,
roi_mat,
scale=scale,
n_epoch=n_epoch,
result_dir=result_dir,
left_lane_pos=left_lane_pos,
right_lane_pos=right_lane_pos,
src_corners=src_corners,
is_attack_to_rigth=is_attack_to_rigth,
target_deviation=target_deviation,
l2_weight=l2_weight,
target=model_type
)
cma.run(
starting_meters=starting_meters,
lateral_shift=patch_lateral_shift,
starting_steering_angle=0,#cm.list_desired_steering_angle[0],
# starting_patch_dir=START_DIR,
# starting_patch_epoch=START_DIR_EPOCH,
trajectory_update=False
)
last_epoch = cma.last_epoch
par = cma.perturbable_area_ratio
del cma, list_bgr_img
gc.collect()
result = {'data_path': data_path,
'n_epoch': n_epoch,
'n_frames': n_frames,
'scale': scale,
'base_color': base_color,
'starting_meters': starting_meters,
'patch_lateral_shift': patch_lateral_shift,
'result_dir': result_dir,
'left_lane_pos': left_lane_pos,
'right_lane_pos': right_lane_pos,
'src_corners': src_corners,
'target_deviation': target_deviation,
'is_attack_to_rigth': is_attack_to_rigth,
'perturbable_area_ratio': par,
'last_epoch': last_epoch,
'model_type': model_type,
}
with open(result_dir + 'result.json', 'w') as f:
f.write(json.dumps(result))
else:
with open(result_dir + 'result.json', 'r') as f:
last_epoch = json.loads(f.read())['last_epoch']
# include last
df_sensors = create_mock_driving(speed_ms=26.8224, n_frames=n_frames + 1) # 60 mph
roi_mat = None
list_bgr_img = [read_img(data_path + f'/{i + 1}.jpg') for i in range(frame_offset, frame_offset + n_frames + 1)]
rb = ReplayBicycle(
list_bgr_img, df_sensors, global_bev_mask, roi_mat, src_corners, scale=scale, target=model_type
)
cm = rb.run(start_steering_angle=None, trajectory_update=False)
#df_sensors['lateral_shift_openpilot'] = [0] + cm.list_total_lateral_shift[:-1]
#df_sensors['yaw_openpilot'] = [0] + cm.list_yaw[:-1]
with open(result_dir + '/replay/model_benign_lane_pred.pkl', 'wb') as f:
pickle.dump(rb.model_lane_pred, f, -1)
cma_rep = CarMotionAttack(
list_bgr_img,
df_sensors,
global_bev_mask,
base_color,
roi_mat,
scale=scale,
n_epoch=n_epoch,
result_dir=result_dir,
left_lane_pos=left_lane_pos,
right_lane_pos=right_lane_pos,
src_corners=src_corners,
is_attack_to_rigth=is_attack_to_rigth,
target_deviation=target_deviation,
l2_weight=l2_weight,
target=model_type
)
cma_rep.replay(
epoch=last_epoch,
starting_meters=starting_meters,
lateral_shift=patch_lateral_shift,
starting_steering_angle=0,#cm.list_desired_steering_angle[0],
trajectory_update=False
)
if __name__ == '__main__':
from logging import StreamHandler, Formatter, FileHandler
config_path = sys.argv[2]
with open(config_path, 'r') as f:
config = json.loads(f.read())
config['result_dir'] = f'logs/tusimple_attack/logs_{model_type}_drp_wb/' + config['result_dir']
config['l2_weight'] = 0.001
config['n_epoch'] = 200
#config['n_frames'] = 1
#config['frame_offset'] = 15
config['base_color'] = min(config['base_color'], -0.1)
os.makedirs(config['result_dir'] + '/replay/', exist_ok=True)
log_fmt = Formatter(
'%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '
)
handler = StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.setLevel('INFO')
logger.addHandler(handler)
handler = FileHandler(
config['result_dir'] + os.path.basename(os.path.abspath(__file__)) + '.log', 'a'
)
handler.setLevel('DEBUG')
handler.setFormatter(log_fmt)
handler.setLevel('DEBUG')
logger.addHandler(handler)
logger.info(f'start: model={model_type}')
main(**config)
logger.info('end')
| [
"logging.getLogger",
"car_motion_attack.replay_bicycle.ReplayBicycle",
"os.path.exists",
"logging.StreamHandler",
"pickle.dump",
"os.makedirs",
"numpy.random.random",
"logging.Formatter",
"car_motion_attack.load_sensor_data.create_mock_driving",
"json.dumps",
"car_motion_attack.attack.CarMotionA... | [((711, 726), 'logging.getLogger', 'getLogger', (['None'], {}), '(None)\n', (720, 726), False, 'from logging import getLogger\n'), ((298, 323), 'sys.path.append', 'sys.path.append', (['APP_ROOT'], {}), '(APP_ROOT)\n', (313, 323), False, 'import sys\n'), ((328, 357), 'sys.path.append', 'sys.path.append', (['"""/openpilot"""'], {}), "('/openpilot')\n", (343, 357), False, 'import sys\n'), ((362, 400), 'sys.path.append', 'sys.path.append', (["(APP_ROOT + '/LaneATT')"], {}), "(APP_ROOT + '/LaneATT')\n", (377, 400), False, 'import sys\n'), ((405, 461), 'sys.path.append', 'sys.path.append', (["(APP_ROOT + '/Ultra-Fast-Lane-Detection')"], {}), "(APP_ROOT + '/Ultra-Fast-Lane-Detection')\n", (420, 461), False, 'import sys\n'), ((907, 947), 'numpy.zeros', 'np.zeros', (['(874, 1164, 3)'], {'dtype': 'np.uint8'}), '((874, 1164, 3), dtype=np.uint8)\n', (915, 947), True, 'import numpy as np\n'), ((1629, 1689), 'car_motion_attack.load_sensor_data.create_mock_driving', 'create_mock_driving', ([], {'speed_ms': '(26.8224)', 'n_frames': '(n_frames + 1)'}), '(speed_ms=26.8224, n_frames=n_frames + 1)\n', (1648, 1689), False, 'from car_motion_attack.load_sensor_data import load_sensor_data, load_transform_matrix, create_mock_driving\n'), ((4044, 4104), 'car_motion_attack.load_sensor_data.create_mock_driving', 'create_mock_driving', ([], {'speed_ms': '(26.8224)', 'n_frames': '(n_frames + 1)'}), '(speed_ms=26.8224, n_frames=n_frames + 1)\n', (4063, 4104), False, 'from car_motion_attack.load_sensor_data import load_sensor_data, load_transform_matrix, create_mock_driving\n'), ((4261, 4375), 'car_motion_attack.replay_bicycle.ReplayBicycle', 'ReplayBicycle', (['list_bgr_img', 'df_sensors', 'global_bev_mask', 'roi_mat', 'src_corners'], {'scale': 'scale', 'target': 'model_type'}), '(list_bgr_img, df_sensors, global_bev_mask, roi_mat,\n src_corners, scale=scale, target=model_type)\n', (4274, 4375), False, 'from car_motion_attack.replay_bicycle import ReplayBicycle\n'), ((4737, 5086), 'car_motion_attack.attack.CarMotionAttack', 'CarMotionAttack', (['list_bgr_img', 'df_sensors', 'global_bev_mask', 'base_color', 'roi_mat'], {'scale': 'scale', 'n_epoch': 'n_epoch', 'result_dir': 'result_dir', 'left_lane_pos': 'left_lane_pos', 'right_lane_pos': 'right_lane_pos', 'src_corners': 'src_corners', 'is_attack_to_rigth': 'is_attack_to_rigth', 'target_deviation': 'target_deviation', 'l2_weight': 'l2_weight', 'target': 'model_type'}), '(list_bgr_img, df_sensors, global_bev_mask, base_color,\n roi_mat, scale=scale, n_epoch=n_epoch, result_dir=result_dir,\n left_lane_pos=left_lane_pos, right_lane_pos=right_lane_pos, src_corners\n =src_corners, is_attack_to_rigth=is_attack_to_rigth, target_deviation=\n target_deviation, l2_weight=l2_weight, target=model_type)\n', (4752, 5086), False, 'from car_motion_attack.attack import CarMotionAttack\n'), ((6018, 6079), 'os.makedirs', 'os.makedirs', (["(config['result_dir'] + '/replay/')"], {'exist_ok': '(True)'}), "(config['result_dir'] + '/replay/', exist_ok=True)\n", (6029, 6079), False, 'import os\n'), ((6094, 6191), 'logging.Formatter', 'Formatter', (['"""%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s """'], {}), "(\n '%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s '\n )\n", (6103, 6191), False, 'from logging import StreamHandler, Formatter, FileHandler\n'), ((6211, 6226), 'logging.StreamHandler', 'StreamHandler', ([], {}), '()\n', (6224, 6226), False, 'from logging import StreamHandler, Formatter, FileHandler\n'), ((989, 1005), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (999, 1005), False, 'import cv2\n'), ((1858, 1919), 'numpy.random.random', 'np.random.random', (['(patch_length * scale, patch_width * scale)'], {}), '((patch_length * scale, patch_width * scale))\n', (1874, 1919), True, 'import numpy as np\n'), ((1937, 1979), 'os.path.exists', 'os.path.exists', (["(result_dir + 'result.json')"], {}), "(result_dir + 'result.json')\n", (1951, 1979), False, 'import os\n'), ((1995, 2344), 'car_motion_attack.attack.CarMotionAttack', 'CarMotionAttack', (['list_bgr_img', 'df_sensors', 'global_bev_mask', 'base_color', 'roi_mat'], {'scale': 'scale', 'n_epoch': 'n_epoch', 'result_dir': 'result_dir', 'left_lane_pos': 'left_lane_pos', 'right_lane_pos': 'right_lane_pos', 'src_corners': 'src_corners', 'is_attack_to_rigth': 'is_attack_to_rigth', 'target_deviation': 'target_deviation', 'l2_weight': 'l2_weight', 'target': 'model_type'}), '(list_bgr_img, df_sensors, global_bev_mask, base_color,\n roi_mat, scale=scale, n_epoch=n_epoch, result_dir=result_dir,\n left_lane_pos=left_lane_pos, right_lane_pos=right_lane_pos, src_corners\n =src_corners, is_attack_to_rigth=is_attack_to_rigth, target_deviation=\n target_deviation, l2_weight=l2_weight, target=model_type)\n', (2010, 2344), False, 'from car_motion_attack.attack import CarMotionAttack\n'), ((3021, 3033), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3031, 3033), False, 'import gc\n'), ((4682, 4720), 'pickle.dump', 'pickle.dump', (['rb.model_lane_pred', 'f', '(-1)'], {}), '(rb.model_lane_pred, f, -1)\n', (4693, 4720), False, 'import pickle\n'), ((259, 284), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (274, 284), False, 'import os\n'), ((3860, 3878), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3870, 3878), False, 'import json\n'), ((6425, 6450), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (6440, 6450), False, 'import os\n')] |
"""Copyright (c) 2020 AIT Lab, ETH Zurich
Students and holders of copies of this code, accompanying datasets,
and documentation, are not allowed to copy, distribute or modify
any of the mentioned materials beyond the scope and duration of the
Machine Perception course projects.
That is, no partial/full copy nor modification of this code and
accompanying data should be made publicly or privately available to
current/future students or other parties.
"""
"""Manage saving and loading of model checkpoints."""
import os
import re
import numpy as np
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
class CheckpointManager(object):
"""Manager to coordinate saving and loading of trainable parameters."""
def __init__(self, model):
"""Initialize manager based on given model instance."""
self._tensorflow_session = model._tensorflow_session
self._model = model
def build_savers(self):
"""Create tf.train.Saver instances."""
all_saveable_vars = sorted(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) +
tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS) +
tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES) +
tf.get_collection_ref('batch_norm_non_trainable'),
key=lambda v: v.name)
# Grab all available prefixes
all_prefixes = []
for v in all_saveable_vars:
name = v.name
if '/' not in name:
continue
prefix = name.split('/')[0]
if prefix == 'test' or prefix == 'learning_params':
continue
if prefix not in all_prefixes:
all_prefixes.append(prefix)
# For each prefix, create saver
self._savers = {}
for prefix in all_prefixes:
vars_to_save = [v for v in all_saveable_vars if v.name.startswith(prefix + '/')]
if len(vars_to_save):
self._savers[prefix] = tf.train.Saver(vars_to_save, max_to_keep=2)
def load_all(self):
"""Load all available weights for each known prefix."""
iteration_number = 0
iteration_numbers = []
for prefix, saver in self._savers.items():
output_path = '%s/checkpoints/%s' % (self._model.output_path, prefix)
checkpoint = tf.train.get_checkpoint_state(output_path)
if checkpoint and checkpoint.model_checkpoint_path:
checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path)
try: # Attempt to restore saveable variables
self._savers[prefix].restore(self._tensorflow_session,
'%s/%s' % (output_path, checkpoint_name))
iteration_numbers.append(
int(next(re.finditer("(\d+)(?!.*\d)", checkpoint_name)).group(0))
)
except Exception as e:
import traceback
traceback.print_exc()
if len(iteration_numbers) > 0:
iteration_number = np.amax(iteration_numbers)
return iteration_number
def save_all(self, iteration_number):
"""Save all prefixes."""
for prefix, saver in self._savers.items():
output_path = '%s/checkpoints/%s' % (self._model.output_path, prefix)
if not os.path.isdir(output_path):
os.makedirs(output_path)
saver.save(self._tensorflow_session, output_path + '/model',
global_step=iteration_number)
logger.debug('Saved %s' % output_path)
logger.info('CheckpointManager::save_all call done')
| [
"logging.getLogger",
"tensorflow.get_collection_ref",
"os.makedirs",
"tensorflow.train.Saver",
"tensorflow.train.get_checkpoint_state",
"os.path.isdir",
"os.path.basename",
"re.finditer",
"traceback.print_exc",
"numpy.amax",
"tensorflow.get_collection"
] | [((603, 630), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (620, 630), False, 'import logging\n'), ((2432, 2474), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['output_path'], {}), '(output_path)\n', (2461, 2474), True, 'import tensorflow as tf\n'), ((3198, 3224), 'numpy.amax', 'np.amax', (['iteration_numbers'], {}), '(iteration_numbers)\n', (3205, 3224), True, 'import numpy as np\n'), ((1304, 1353), 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['"""batch_norm_non_trainable"""'], {}), "('batch_norm_non_trainable')\n", (1325, 1353), True, 'import tensorflow as tf\n'), ((2081, 2124), 'tensorflow.train.Saver', 'tf.train.Saver', (['vars_to_save'], {'max_to_keep': '(2)'}), '(vars_to_save, max_to_keep=2)\n', (2095, 2124), True, 'import tensorflow as tf\n'), ((2573, 2623), 'os.path.basename', 'os.path.basename', (['checkpoint.model_checkpoint_path'], {}), '(checkpoint.model_checkpoint_path)\n', (2589, 2623), False, 'import os\n'), ((3485, 3511), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (3498, 3511), False, 'import os\n'), ((3529, 3553), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (3540, 3553), False, 'import os\n'), ((1210, 1266), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.MOVING_AVERAGE_VARIABLES'], {}), '(tf.GraphKeys.MOVING_AVERAGE_VARIABLES)\n', (1227, 1266), True, 'import tensorflow as tf\n'), ((1038, 1086), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (1055, 1086), True, 'import tensorflow as tf\n'), ((1124, 1172), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SAVEABLE_OBJECTS'], {}), '(tf.GraphKeys.SAVEABLE_OBJECTS)\n', (1141, 1172), True, 'import tensorflow as tf\n'), ((3106, 3127), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3125, 3127), False, 'import traceback\n'), ((2931, 2978), 're.finditer', 're.finditer', (['"""(\\\\d+)(?!.*\\\\d)"""', 'checkpoint_name'], {}), "('(\\\\d+)(?!.*\\\\d)', checkpoint_name)\n", (2942, 2978), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Adapted from the following repos:
- https://github.com/keon/policy-gradient/blob/master/actor.py
- https://gist.github.com/kkweon/c8d1caabaf7b43317bc8825c226045d2
- https://towardsdatascience.com/reinforcement-learning-w-keras-openai-actor-critic-models-f084612cfd69
- https://github.com/dennybritz/reinforcement-learning/blob/master/PolicyGradient/CliffWalk%20Actor%20Critic%20Solution.ipynb
"""
import mxnet as mx
import mxnet.ndarray as F
import mxnet.gluon as gluon
from mxnet import nd, autograd
from mxnet.gluon import nn
# TODO: use minpy?
import numpy as np
class Net(gluon.Block):
def __init__(self, n_dims, hidden_dims=(32, 32), **kwargs):
super(Net, self).__init__(**kwargs)
self.n_dims = n_dims
self.hidden_dims = hidden_dims
with self.name_scope():
self.embedding = nn.Embedding(256, output_dim=32) # suggestion: not greater than 16
self.bn = nn.BatchNorm() # TODO: is this necessary?
self.conv1 = nn.Conv1D(channels=32, kernel_size=3, activation='relu',
padding=0, strides=1)
#self.conv2 = nn.Conv1D(channels=32, kernel_size=3, activation='relu',
# padding=0, strides=1)
self.pool = nn.GlobalMaxPool1D()
self.h1 = nn.Dense(32, activation='relu')
#self.h2 = nn.Dense(32, activation='relu')
self.output = nn.Dense(1)
def forward(self, x):
x = self.embedding(nd.array(x))
x = self.bn(x)
x = self.pool(self.conv1(x))
x = self.h1(x)
return F.identity(self.output(x))
class ValueEstimator(object):
# TODO: use eligibility traces as it is explained here:
# http://www0.cs.ucl.ac.uk/staff/D.Silver/web/Teaching_files/pg.pdf
def __init__(self, n_dims, lr=1e-4, ctx=mx.cpu(0)):
self.n_dims = n_dims
self.lr = lr
self.ctx = ctx
self.model, self.trainer = self._build_network(n_dims=self.n_dims, ctx=self.ctx)
@staticmethod
def _build_network(n_dims, hidden_dims=(32, 32), ctx=mx.cpu(0)):
model = Net(n_dims=n_dims, hidden_dims=hidden_dims)
model.initialize(mx.init.Uniform(), ctx=ctx)
#self.trainer = gluon.Trainer(self.model.collect_params(), 'adadelta', {'rho': 0.9})
trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': 1e-4})
return model, trainer
def train(self, x, v):
x = nd.array(x)
v = nd.array(v)
with autograd.record():
o = self.model(x)
L = gluon.loss.L2Loss()
loss = L(o, v)
autograd.backward(loss)
self.trainer.step(batch_size=x.shape[0])
def predict(self, x):
if isinstance(x, list) or isinstance(x, tuple):
x = np.asarray(x)
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
return self.model.forward(x).squeeze().asnumpy()
def load(self, filename):
self.model.load_params(filename)
return self
def save(self, filename):
try:
self.model.save_params(filename)
success = True
except:
success = False
return success
| [
"mxnet.gluon.loss.L2Loss",
"mxnet.autograd.record",
"mxnet.gluon.nn.Dense",
"mxnet.gluon.nn.BatchNorm",
"mxnet.cpu",
"mxnet.gluon.nn.Embedding",
"numpy.asarray",
"mxnet.init.Uniform",
"mxnet.gluon.nn.GlobalMaxPool1D",
"mxnet.autograd.backward",
"numpy.expand_dims",
"mxnet.nd.array",
"mxnet.g... | [((1898, 1907), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (1904, 1907), True, 'import mxnet as mx\n'), ((2149, 2158), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (2155, 2158), True, 'import mxnet as mx\n'), ((2526, 2537), 'mxnet.nd.array', 'nd.array', (['x'], {}), '(x)\n', (2534, 2537), False, 'from mxnet import nd, autograd\n'), ((2550, 2561), 'mxnet.nd.array', 'nd.array', (['v'], {}), '(v)\n', (2558, 2561), False, 'from mxnet import nd, autograd\n'), ((886, 918), 'mxnet.gluon.nn.Embedding', 'nn.Embedding', (['(256)'], {'output_dim': '(32)'}), '(256, output_dim=32)\n', (898, 918), False, 'from mxnet.gluon import nn\n'), ((976, 990), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {}), '()\n', (988, 990), False, 'from mxnet.gluon import nn\n'), ((1044, 1122), 'mxnet.gluon.nn.Conv1D', 'nn.Conv1D', ([], {'channels': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '(0)', 'strides': '(1)'}), "(channels=32, kernel_size=3, activation='relu', padding=0, strides=1)\n", (1053, 1122), False, 'from mxnet.gluon import nn\n'), ((1324, 1344), 'mxnet.gluon.nn.GlobalMaxPool1D', 'nn.GlobalMaxPool1D', ([], {}), '()\n', (1342, 1344), False, 'from mxnet.gluon import nn\n'), ((1368, 1399), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1376, 1399), False, 'from mxnet.gluon import nn\n'), ((1482, 1493), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(1)'], {}), '(1)\n', (1490, 1493), False, 'from mxnet.gluon import nn\n'), ((1548, 1559), 'mxnet.nd.array', 'nd.array', (['x'], {}), '(x)\n', (1556, 1559), False, 'from mxnet import nd, autograd\n'), ((2246, 2263), 'mxnet.init.Uniform', 'mx.init.Uniform', ([], {}), '()\n', (2261, 2263), True, 'import mxnet as mx\n'), ((2576, 2593), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (2591, 2593), False, 'from mxnet import nd, autograd\n'), ((2641, 2660), 'mxnet.gluon.loss.L2Loss', 'gluon.loss.L2Loss', ([], {}), '()\n', (2658, 2660), True, 'import mxnet.gluon as gluon\n'), ((2700, 2723), 'mxnet.autograd.backward', 'autograd.backward', (['loss'], {}), '(loss)\n', (2717, 2723), False, 'from mxnet import nd, autograd\n'), ((2873, 2886), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2883, 2886), True, 'import numpy as np\n'), ((2934, 2959), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2948, 2959), True, 'import numpy as np\n')] |
import pickle as pkl
import numpy as np
import time
t1 = time.time()
gsan_keep_data_list, gsan_right_data_list,gsan_left_data_list = [], [], []
for i in range(60):
with open(f"new_data/new_data_{i}.pkl","rb") as f:
_ = pkl.load(f)
data = _['data']
label = _['label']
gsan_left_number = gsan_right_number = gsan_keep_number = 0
if (label == 1).any():
gsan_left_data = data[label==1]
gsan_left_data_list.append(gsan_left_data)
gsan_left_number = gsan_left_data.shape[0]
if (label == -1).any():
gsan_right_data = data[label==-1]
gsan_right_data_list.append(gsan_right_data)
gsan_right_number = gsan_right_data.shape[0]
if (label == 0).any():
gsan_keep_number = max(gsan_left_number,gsan_right_number)*10
gsan_keep_data = data[label==0]
gsan_keep_data_list.append(gsan_keep_data[:gsan_keep_number])
print(i,gsan_left_data.shape,gsan_right_data.shape,gsan_keep_data.shape)
gsan_keep_data_array = np.vstack(gsan_keep_data_list)
gsan_right_data_array = np.vstack(gsan_right_data_list)
gsan_left_data_array = np.vstack(gsan_left_data_list)
print("Totally: ",gsan_left_data_array.shape,gsan_right_data_array.shape,gsan_keep_data_array.shape)
gsan_left_data_array = np.transpose(gsan_left_data_array,axes=(0,2,1,3))
gsan_right_data_array = np.transpose(gsan_right_data_array,axes=(0,2,1,3))
gsan_keep_data_array = np.transpose(gsan_keep_data_array,axes=(0,2,1,3))
print("Transposed:",gsan_left_data_array.shape,gsan_right_data_array.shape,gsan_keep_data_array.shape)
total = {
"right":gsan_right_data_array,
"left":gsan_left_data_array,
"keep":gsan_keep_data_array
}
with open("new_data/total.pkl","wb") as f:
pkl.dump(total,f)
# with open("new_data/left.pkl","wb") as f:
# pkl.dump(gsan_left_data_array,f)
# with open("new_data/right.pkl","wb") as f:
# pkl.dump(gsan_right_data_array,f)
# with open("new_data/keep.pkl","wb") as f:
# pkl.dump(gsan_keep_data_array,f)
t2 = time.time()
print(f"time : {t2-t1:.2f}") | [
"pickle.dump",
"pickle.load",
"numpy.vstack",
"numpy.transpose",
"time.time"
] | [((57, 68), 'time.time', 'time.time', ([], {}), '()\n', (66, 68), False, 'import time\n'), ((1003, 1033), 'numpy.vstack', 'np.vstack', (['gsan_keep_data_list'], {}), '(gsan_keep_data_list)\n', (1012, 1033), True, 'import numpy as np\n'), ((1058, 1089), 'numpy.vstack', 'np.vstack', (['gsan_right_data_list'], {}), '(gsan_right_data_list)\n', (1067, 1089), True, 'import numpy as np\n'), ((1113, 1143), 'numpy.vstack', 'np.vstack', (['gsan_left_data_list'], {}), '(gsan_left_data_list)\n', (1122, 1143), True, 'import numpy as np\n'), ((1269, 1322), 'numpy.transpose', 'np.transpose', (['gsan_left_data_array'], {'axes': '(0, 2, 1, 3)'}), '(gsan_left_data_array, axes=(0, 2, 1, 3))\n', (1281, 1322), True, 'import numpy as np\n'), ((1343, 1397), 'numpy.transpose', 'np.transpose', (['gsan_right_data_array'], {'axes': '(0, 2, 1, 3)'}), '(gsan_right_data_array, axes=(0, 2, 1, 3))\n', (1355, 1397), True, 'import numpy as np\n'), ((1417, 1470), 'numpy.transpose', 'np.transpose', (['gsan_keep_data_array'], {'axes': '(0, 2, 1, 3)'}), '(gsan_keep_data_array, axes=(0, 2, 1, 3))\n', (1429, 1470), True, 'import numpy as np\n'), ((2005, 2016), 'time.time', 'time.time', ([], {}), '()\n', (2014, 2016), False, 'import time\n'), ((1731, 1749), 'pickle.dump', 'pkl.dump', (['total', 'f'], {}), '(total, f)\n', (1739, 1749), True, 'import pickle as pkl\n'), ((231, 242), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (239, 242), True, 'import pickle as pkl\n')] |
# ###############################################################################################
# <NAME> ----------- 201313819
# ###############################################################################################
import matplotlib.pyplot as plotter
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
# ###############################################################################################
# Covid 19 Honduras
# Data founded at : https://covid19tracking.narrativa.com/es/panama/api.html
# Dataset was taken from 14-02-2020 to 11-11-2020
# ###############################################################################################
# ###############################################################################################
# Setting the data
# ###############################################################################################
cases = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 7, 7, 10, 14, 15, 15, 22, 22, 22, 22, 23, 23, 24, 25, 25, 26, 31, 35, 41, 46, 46, 46, 46, 46, 47, 47, 55, 59, 61, 64, 66, 71, 75, 75, 76, 82, 83, 93, 99, 105, 107, 108, 108, 116, 121, 123, 133, 134, 138, 142, 146, 147, 151, 156, 167, 174, 180, 182, 188, 194, 196, 199, 201, 212, 217, 225, 234, 243, 248, 250, 258, 262, 271, 290, 294, 306, 310, 312, 322, 330, 336, 343, 349, 358, 363, 395, 405, 417, 426, 471, 479, 479, 485, 497, 542, 591, 605, 629, 639, 656, 677, 694, 704, 750, 771, 774, 789,
807, 825, 835, 857, 891, 900, 935, 988, 1006, 1011, 1061, 1098, 1116, 1166, 1214, 1259, 1312, 1337, 1368, 1377, 1384, 1400, 1423, 1446, 1465, 1476, 1495, 1506, 1515, 1533, 1542, 1548, 1567, 1575, 1583, 1593, 1608, 1619, 1632, 1643, 1654, 1683, 1703, 1747, 1803, 1827, 1842, 1858, 1873, 1888, 1924, 1954, 1984, 2006, 2007, 2023, 2034, 2044, 2049, 2058, 2065, 2079, 2087, 2087, 2102, 2122, 2146, 2166, 2184, 2204, 2206, 2222, 2249, 2271, 2288, 2289, 2301, 2323, 2380, 2386, 2399, 2422, 2433, 2447, 2466, 2477, 2492, 2504, 2512, 2521, 2528, 2533, 2552, 2556, 2563, 2568, 2576, 2582, 2596, 2604, 2604, 2617, 2623, 2633, 2639, 2652, 2661, 2669, 2669, 2675, 2688, 2706, 2730, 2736, 2741, 2745, 2745, 2765, 2765]
count_cases = []
val = 0
for i in cases:
val = val+i
count_cases.append(val)
print(len(count_cases))
days = []
for i in range(len(count_cases)):
days.append(i + 1)
count_cases = np.asarray(count_cases)
days = np.asarray(days)
count_cases = count_cases[:, np.newaxis]
days = days[:, np.newaxis]
# ###############################################################################################
# Prediction of day 350
# ###############################################################################################
sequence = np.linspace(days.min(), 350, 400).reshape(-1, 1)
# ###############################################################################################
# Create the model
# Grade 7
# ###############################################################################################
model = make_pipeline(PolynomialFeatures(7), LinearRegression())
model.fit(days, count_cases)
plotter.figure()
plotter.scatter(days, count_cases)
# ###############################################################################################
# Plotting the model
# ###############################################################################################
plotter.plot(sequence, model.predict(sequence), color="yellow")
plotter.title("Prediction of # of deaths for day 350 in Honduras - Covid-19")
plotter.show()
| [
"sklearn.preprocessing.PolynomialFeatures",
"numpy.asarray",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((2744, 2767), 'numpy.asarray', 'np.asarray', (['count_cases'], {}), '(count_cases)\n', (2754, 2767), True, 'import numpy as np\n'), ((2775, 2791), 'numpy.asarray', 'np.asarray', (['days'], {}), '(days)\n', (2785, 2791), True, 'import numpy as np\n'), ((3577, 3593), 'matplotlib.pyplot.figure', 'plotter.figure', ([], {}), '()\n', (3591, 3593), True, 'import matplotlib.pyplot as plotter\n'), ((3594, 3628), 'matplotlib.pyplot.scatter', 'plotter.scatter', (['days', 'count_cases'], {}), '(days, count_cases)\n', (3609, 3628), True, 'import matplotlib.pyplot as plotter\n'), ((3947, 4024), 'matplotlib.pyplot.title', 'plotter.title', (['"""Prediction of # of deaths for day 350 in Honduras - Covid-19"""'], {}), "('Prediction of # of deaths for day 350 in Honduras - Covid-19')\n", (3960, 4024), True, 'import matplotlib.pyplot as plotter\n'), ((4025, 4039), 'matplotlib.pyplot.show', 'plotter.show', ([], {}), '()\n', (4037, 4039), True, 'import matplotlib.pyplot as plotter\n'), ((3505, 3526), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(7)'], {}), '(7)\n', (3523, 3526), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3528, 3546), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3544, 3546), False, 'from sklearn.linear_model import LinearRegression\n')] |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from matplotlib.ticker import FormatStrFormatter
from plot_decision_regions import *
from rbf_kernel_pca import *
from numpy import cos, sin
from numpy import pi
# for sklearn 0.18's alternative syntax
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.grid_search import train_test_split
from sklearn.lda import LDA
else:
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
#############################################################################
# 2次元正規分布に従う乱数を生成
# 平均
mu = np.array([0, 0])
# 共分散
cov_d = np.array([[80, 0],
[0, 10]])
c = cos(pi/3)
s = sin(pi/3)
R = np.array([[c, -s],
[s, c]])
cov = np.dot(np.dot(R, cov_d), R.T)
print(cov)
N = 1000
np.random.seed(seed=1)
X = np.random.multivariate_normal(mu, cov, N)
print(X.shape)
plt.figure(1)
plt.axis('equal')
plt.scatter(X[:,0], X[:,1], color='r', marker='x')
# scikit-learn の KernelPCA で確認してみる
# 自前実装版と比べると、値のスケールが違う
# 線形カーネル
scikit_kpca = KernelPCA(n_components=2, kernel='linear')
# ガウスカーネル
#g = 1
#scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=g)
X_skernpca = scikit_kpca.fit_transform(X)
# 自前実装版
X_kpca = linear_kernel_pca(X, 2)
#X_kpca = rbf_kernel_pca(X, g, 2)
# 比を見てみる
print(X_skernpca / X_kpca)
plt.figure(2)
plt.axis('equal')
plt.scatter(X_skernpca[:, 0], X_skernpca[:, 1], color='b', marker='x')
plt.figure(3)
plt.axis('equal')
plt.scatter(X_kpca[:, 0], X_kpca[:, 1], color='g', marker='x')
plt.show()
| [
"numpy.random.multivariate_normal",
"distutils.version.LooseVersion",
"numpy.array",
"matplotlib.pyplot.figure",
"sklearn.decomposition.KernelPCA",
"numpy.dot",
"numpy.random.seed",
"numpy.cos",
"matplotlib.pyplot.scatter",
"numpy.sin",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((1245, 1261), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1253, 1261), True, 'import numpy as np\n'), ((1276, 1304), 'numpy.array', 'np.array', (['[[80, 0], [0, 10]]'], {}), '([[80, 0], [0, 10]])\n', (1284, 1304), True, 'import numpy as np\n'), ((1327, 1338), 'numpy.cos', 'cos', (['(pi / 3)'], {}), '(pi / 3)\n', (1330, 1338), False, 'from numpy import cos, sin\n'), ((1341, 1352), 'numpy.sin', 'sin', (['(pi / 3)'], {}), '(pi / 3)\n', (1344, 1352), False, 'from numpy import cos, sin\n'), ((1355, 1382), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (1363, 1382), True, 'import numpy as np\n'), ((1454, 1476), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (1468, 1476), True, 'import numpy as np\n'), ((1481, 1522), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'cov', 'N'], {}), '(mu, cov, N)\n', (1510, 1522), True, 'import numpy as np\n'), ((1539, 1552), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1549, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1570), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1561, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1623), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'color': '"""r"""', 'marker': '"""x"""'}), "(X[:, 0], X[:, 1], color='r', marker='x')\n", (1582, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1747), 'sklearn.decomposition.KernelPCA', 'KernelPCA', ([], {'n_components': '(2)', 'kernel': '"""linear"""'}), "(n_components=2, kernel='linear')\n", (1714, 1747), False, 'from sklearn.decomposition import KernelPCA\n'), ((1986, 1999), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (1996, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2017), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2008, 2017), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2088), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_skernpca[:, 0]', 'X_skernpca[:, 1]'], {'color': '"""b"""', 'marker': '"""x"""'}), "(X_skernpca[:, 0], X_skernpca[:, 1], color='b', marker='x')\n", (2029, 2088), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2103), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2100, 2103), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2121), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2112, 2121), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2184), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_kpca[:, 0]', 'X_kpca[:, 1]'], {'color': '"""g"""', 'marker': '"""x"""'}), "(X_kpca[:, 0], X_kpca[:, 1], color='g', marker='x')\n", (2133, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2196), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2194, 2196), True, 'import matplotlib.pyplot as plt\n'), ((873, 897), 'distutils.version.LooseVersion', 'Version', (['sklearn_version'], {}), '(sklearn_version)\n', (880, 897), True, 'from distutils.version import LooseVersion as Version\n'), ((1410, 1426), 'numpy.dot', 'np.dot', (['R', 'cov_d'], {}), '(R, cov_d)\n', (1416, 1426), True, 'import numpy as np\n')] |
# pylint: skip-file
import pytest
import numpy as np
import pyomo.environ as pe
from galini.core import LinearExpression, QuadraticExpression, SumExpression
from galini.pyomo import problem_from_pyomo_model
from galini.branch_and_bound.relaxations import ConvexRelaxation, LinearRelaxation
from galini.special_structure import propagate_special_structure, perform_fbbt
from galini.util import print_problem, expr_to_str
def _convex_relaxation(problem):
bounds = perform_fbbt(
problem,
maxiter=10,
timelimit=60,
)
bounds, monotonicity, convexity = \
propagate_special_structure(problem, bounds)
return ConvexRelaxation(problem, bounds, monotonicity, convexity)
def _linear_relaxation(problem):
bounds = perform_fbbt(
problem,
maxiter=10,
timelimit=60,
)
bounds, monotonicity, convexity = \
propagate_special_structure(problem, bounds)
return LinearRelaxation(problem, bounds, monotonicity, convexity)
def test_convex_relaxation_of_linear_problem():
m = pe.ConcreteModel()
m.I = range(10)
m.x = pe.Var(m.I)
m.obj = pe.Objective(expr=pe.quicksum(m.x[i] for i in m.I) + 2.0)
m.c0 = pe.Constraint(expr=pe.quicksum(2 * m.x[i] for i in m.I) - 4.0 >= 0)
dag = problem_from_pyomo_model(m)
relaxation = _convex_relaxation(dag)
relaxed = relaxation.relax(dag)
assert relaxed.objective
assert len(relaxed.constraints) == 1
objective = relaxed.objective
constraint = relaxed.constraints[0]
assert isinstance(objective.root_expr, LinearExpression)
assert len(objective.root_expr.children) == 10
assert np.isclose(objective.root_expr.constant_term, 2.0)
assert isinstance(constraint.root_expr, LinearExpression)
assert len(constraint.root_expr.children) == 10
assert np.isclose(constraint.root_expr.constant_term, -4.0)
def test_convex_relaxation_with_quadratic_only():
m = pe.ConcreteModel()
m.I = range(10)
m.x = pe.Var(m.I)
m.obj = pe.Objective(expr=pe.quicksum(m.x[i]*m.x[i] for i in m.I))
m.c0 = pe.Constraint(expr=pe.quicksum(2 * m.x[i] * m.x[i] for i in m.I) >= 0)
dag = problem_from_pyomo_model(m)
relaxation = _convex_relaxation(dag)
relaxed = relaxation.relax(dag)
assert relaxed.objective
# original constraint + 10 for disaggregated squares
assert len(relaxed.constraints) == 1 + 10
objective = relaxed.objective
constraint = relaxed.constraints[0]
assert isinstance(objective.root_expr, LinearExpression)
assert len(objective.root_expr.children) == 10
for constraint in relaxed.constraints[:-1]:
assert isinstance(constraint.root_expr, SumExpression)
children = constraint.root_expr.children
assert len(children) == 2
assert isinstance(children[0], LinearExpression)
assert isinstance(children[1], QuadraticExpression)
constraint = relaxed.constraints[-1]
assert len(constraint.root_expr.children) == 10
assert isinstance(constraint.root_expr, LinearExpression)
def test_convex_relaxation_with_quadratic_and_linear():
m = pe.ConcreteModel()
m.I = range(10)
m.x = pe.Var(m.I)
m.obj = pe.Objective(
expr=pe.quicksum(m.x[i]*m.x[i] for i in m.I) + pe.quicksum(m.x[i] for i in m.I)
)
m.c0 = pe.Constraint(
expr=pe.quicksum(2 * m.x[i] * m.x[i] for i in m.I) + pe.quicksum(m.x[i] for i in m.I) >= 0
)
dag = problem_from_pyomo_model(m)
relaxation = _convex_relaxation(dag)
relaxed = relaxation.relax(dag)
print_problem(relaxed)
assert relaxed.objective
assert len(relaxed.constraints) == 1 + 10
objective = relaxed.objective
assert isinstance(objective.root_expr, SumExpression)
assert all(
isinstance(c, LinearExpression)
for c in objective.root_expr.children
)
for constraint in relaxed.constraints[:-1]:
assert isinstance(constraint.root_expr, SumExpression)
children = constraint.root_expr.children
assert len(children) == 2
assert isinstance(children[0], LinearExpression)
assert isinstance(children[1], QuadraticExpression)
constraint = relaxed.constraints[-1]
assert all(
isinstance(c, LinearExpression)
for c in constraint.root_expr.children
)
def test_linear_relaxation_with_quadratic_and_linear():
m = pe.ConcreteModel()
m.I = range(10)
m.x = pe.Var(m.I, bounds=(0, 1))
m.obj = pe.Objective(
expr=pe.quicksum(m.x[i]*m.x[i] for i in m.I) + pe.quicksum(m.x[i] for i in m.I)
)
m.c0 = pe.Constraint(
expr=pe.quicksum(2 * m.x[i] * m.x[i] for i in m.I) + pe.quicksum(m.x[i] for i in m.I) >= 0
)
dag = problem_from_pyomo_model(m)
relaxation = _linear_relaxation(dag)
relaxed = relaxation.relax(dag)
print_problem(relaxed)
assert relaxed.objective
# 1 objective, 1 c0, 4 * 10 x^2 (3 mccormick, 1 midpoint)
assert len(relaxed.constraints) == 1 + 1 + 4*10
objective = relaxed.objective
constraint = relaxed.constraint('c0')
assert isinstance(objective.root_expr, LinearExpression)
assert isinstance(constraint.root_expr, SumExpression)
# Root is only objvar
assert len(objective.root_expr.children) == 1
c0, c1 = constraint.root_expr.children
assert isinstance(c0, LinearExpression)
assert isinstance(c1, LinearExpression)
assert len(c0.children) == 10
assert len(c1.children) == 10
| [
"galini.special_structure.propagate_special_structure",
"numpy.isclose",
"galini.special_structure.perform_fbbt",
"pyomo.environ.quicksum",
"galini.util.print_problem",
"pyomo.environ.Var",
"galini.branch_and_bound.relaxations.ConvexRelaxation",
"galini.pyomo.problem_from_pyomo_model",
"galini.branc... | [((468, 515), 'galini.special_structure.perform_fbbt', 'perform_fbbt', (['problem'], {'maxiter': '(10)', 'timelimit': '(60)'}), '(problem, maxiter=10, timelimit=60)\n', (480, 515), False, 'from galini.special_structure import propagate_special_structure, perform_fbbt\n'), ((596, 640), 'galini.special_structure.propagate_special_structure', 'propagate_special_structure', (['problem', 'bounds'], {}), '(problem, bounds)\n', (623, 640), False, 'from galini.special_structure import propagate_special_structure, perform_fbbt\n'), ((652, 710), 'galini.branch_and_bound.relaxations.ConvexRelaxation', 'ConvexRelaxation', (['problem', 'bounds', 'monotonicity', 'convexity'], {}), '(problem, bounds, monotonicity, convexity)\n', (668, 710), False, 'from galini.branch_and_bound.relaxations import ConvexRelaxation, LinearRelaxation\n'), ((759, 806), 'galini.special_structure.perform_fbbt', 'perform_fbbt', (['problem'], {'maxiter': '(10)', 'timelimit': '(60)'}), '(problem, maxiter=10, timelimit=60)\n', (771, 806), False, 'from galini.special_structure import propagate_special_structure, perform_fbbt\n'), ((887, 931), 'galini.special_structure.propagate_special_structure', 'propagate_special_structure', (['problem', 'bounds'], {}), '(problem, bounds)\n', (914, 931), False, 'from galini.special_structure import propagate_special_structure, perform_fbbt\n'), ((943, 1001), 'galini.branch_and_bound.relaxations.LinearRelaxation', 'LinearRelaxation', (['problem', 'bounds', 'monotonicity', 'convexity'], {}), '(problem, bounds, monotonicity, convexity)\n', (959, 1001), False, 'from galini.branch_and_bound.relaxations import ConvexRelaxation, LinearRelaxation\n'), ((1060, 1078), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (1076, 1078), True, 'import pyomo.environ as pe\n'), ((1109, 1120), 'pyomo.environ.Var', 'pe.Var', (['m.I'], {}), '(m.I)\n', (1115, 1120), True, 'import pyomo.environ as pe\n'), ((1280, 1307), 'galini.pyomo.problem_from_pyomo_model', 'problem_from_pyomo_model', (['m'], {}), '(m)\n', (1304, 1307), False, 'from galini.pyomo import problem_from_pyomo_model\n'), ((1656, 1706), 'numpy.isclose', 'np.isclose', (['objective.root_expr.constant_term', '(2.0)'], {}), '(objective.root_expr.constant_term, 2.0)\n', (1666, 1706), True, 'import numpy as np\n'), ((1833, 1885), 'numpy.isclose', 'np.isclose', (['constraint.root_expr.constant_term', '(-4.0)'], {}), '(constraint.root_expr.constant_term, -4.0)\n', (1843, 1885), True, 'import numpy as np\n'), ((1946, 1964), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (1962, 1964), True, 'import pyomo.environ as pe\n'), ((1995, 2006), 'pyomo.environ.Var', 'pe.Var', (['m.I'], {}), '(m.I)\n', (2001, 2006), True, 'import pyomo.environ as pe\n'), ((2170, 2197), 'galini.pyomo.problem_from_pyomo_model', 'problem_from_pyomo_model', (['m'], {}), '(m)\n', (2194, 2197), False, 'from galini.pyomo import problem_from_pyomo_model\n'), ((3130, 3148), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (3146, 3148), True, 'import pyomo.environ as pe\n'), ((3179, 3190), 'pyomo.environ.Var', 'pe.Var', (['m.I'], {}), '(m.I)\n', (3185, 3190), True, 'import pyomo.environ as pe\n'), ((3452, 3479), 'galini.pyomo.problem_from_pyomo_model', 'problem_from_pyomo_model', (['m'], {}), '(m)\n', (3476, 3479), False, 'from galini.pyomo import problem_from_pyomo_model\n'), ((3563, 3585), 'galini.util.print_problem', 'print_problem', (['relaxed'], {}), '(relaxed)\n', (3576, 3585), False, 'from galini.util import print_problem, expr_to_str\n'), ((4393, 4411), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (4409, 4411), True, 'import pyomo.environ as pe\n'), ((4442, 4468), 'pyomo.environ.Var', 'pe.Var', (['m.I'], {'bounds': '(0, 1)'}), '(m.I, bounds=(0, 1))\n', (4448, 4468), True, 'import pyomo.environ as pe\n'), ((4730, 4757), 'galini.pyomo.problem_from_pyomo_model', 'problem_from_pyomo_model', (['m'], {}), '(m)\n', (4754, 4757), False, 'from galini.pyomo import problem_from_pyomo_model\n'), ((4840, 4862), 'galini.util.print_problem', 'print_problem', (['relaxed'], {}), '(relaxed)\n', (4853, 4862), False, 'from galini.util import print_problem, expr_to_str\n'), ((2037, 2078), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] * m.x[i] for i in m.I)'], {}), '(m.x[i] * m.x[i] for i in m.I)\n', (2048, 2078), True, 'import pyomo.environ as pe\n'), ((1151, 1183), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] for i in m.I)'], {}), '(m.x[i] for i in m.I)\n', (1162, 1183), True, 'import pyomo.environ as pe\n'), ((2108, 2153), 'pyomo.environ.quicksum', 'pe.quicksum', (['(2 * m.x[i] * m.x[i] for i in m.I)'], {}), '(2 * m.x[i] * m.x[i] for i in m.I)\n', (2119, 2153), True, 'import pyomo.environ as pe\n'), ((3230, 3271), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] * m.x[i] for i in m.I)'], {}), '(m.x[i] * m.x[i] for i in m.I)\n', (3241, 3271), True, 'import pyomo.environ as pe\n'), ((3272, 3304), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] for i in m.I)'], {}), '(m.x[i] for i in m.I)\n', (3283, 3304), True, 'import pyomo.environ as pe\n'), ((4508, 4549), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] * m.x[i] for i in m.I)'], {}), '(m.x[i] * m.x[i] for i in m.I)\n', (4519, 4549), True, 'import pyomo.environ as pe\n'), ((4550, 4582), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] for i in m.I)'], {}), '(m.x[i] for i in m.I)\n', (4561, 4582), True, 'import pyomo.environ as pe\n'), ((1221, 1257), 'pyomo.environ.quicksum', 'pe.quicksum', (['(2 * m.x[i] for i in m.I)'], {}), '(2 * m.x[i] for i in m.I)\n', (1232, 1257), True, 'import pyomo.environ as pe\n'), ((3350, 3395), 'pyomo.environ.quicksum', 'pe.quicksum', (['(2 * m.x[i] * m.x[i] for i in m.I)'], {}), '(2 * m.x[i] * m.x[i] for i in m.I)\n', (3361, 3395), True, 'import pyomo.environ as pe\n'), ((3398, 3430), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] for i in m.I)'], {}), '(m.x[i] for i in m.I)\n', (3409, 3430), True, 'import pyomo.environ as pe\n'), ((4628, 4673), 'pyomo.environ.quicksum', 'pe.quicksum', (['(2 * m.x[i] * m.x[i] for i in m.I)'], {}), '(2 * m.x[i] * m.x[i] for i in m.I)\n', (4639, 4673), True, 'import pyomo.environ as pe\n'), ((4676, 4708), 'pyomo.environ.quicksum', 'pe.quicksum', (['(m.x[i] for i in m.I)'], {}), '(m.x[i] for i in m.I)\n', (4687, 4708), True, 'import pyomo.environ as pe\n')] |
import numpy as np
from numpy.testing import assert_allclose
from elastica.memory_block.memory_block_rigid_body import MemoryBlockRigidBody
import pytest
from elastica.utils import Tolerance
class MockRigidBody:
def __init__(self):
self.radius = np.random.randn()
self.length = np.random.randn()
self.density = np.random.randn()
self.volume = np.random.randn()
self.mass = np.random.randn()
self.position_collection = np.random.randn(3, 1)
self.velocity_collection = np.random.randn(3, 1)
self.acceleration_collection = np.random.randn(3, 1)
self.omega_collection = np.random.randn(3, 1)
self.alpha_collection = np.random.randn(3, 1)
self.director_collection = np.random.randn(3, 3, 1)
self.external_forces = np.random.randn(3, 1)
self.external_torques = np.random.randn(3, 1)
self.mass_second_moment_of_inertia = np.random.randn(3, 3, 1)
self.inv_mass_second_moment_of_inertia = np.random.randn(3, 3, 1)
@pytest.mark.parametrize("n_rods", [1, 2, 5, 6])
def test_block_structure_scalar_validity(n_rods):
"""
This function is testing validity of scalars. It is been tested that
for scalar variables, if the block structure memory and values are
set correctly and rods that belong to rod structure share the memory
with block structure.
Parameters
----------
n_rods
Returns
-------
"""
world_bodies = [MockRigidBody() for _ in range(n_rods)]
block_structure = MemoryBlockRigidBody(world_bodies)
for i in range(n_rods):
# radius
assert np.shares_memory(block_structure.radius, world_bodies[i].radius)
assert np.shares_memory(
block_structure.scalar_dofs_in_rigid_bodies, world_bodies[i].radius
)
assert_allclose(
block_structure.radius[i : i + 1],
world_bodies[i].radius,
atol=Tolerance.atol(),
)
# length
assert np.shares_memory(block_structure.length, world_bodies[i].length)
assert np.shares_memory(
block_structure.scalar_dofs_in_rigid_bodies, world_bodies[i].length
)
assert_allclose(
block_structure.length[i : i + 1],
world_bodies[i].length,
atol=Tolerance.atol(),
)
# density
assert np.shares_memory(block_structure.density, world_bodies[i].density)
assert np.shares_memory(
block_structure.scalar_dofs_in_rigid_bodies, world_bodies[i].density
)
assert_allclose(
block_structure.density[i : i + 1],
world_bodies[i].density,
atol=Tolerance.atol(),
)
# volume
assert np.shares_memory(block_structure.volume, world_bodies[i].volume)
assert np.shares_memory(
block_structure.scalar_dofs_in_rigid_bodies, world_bodies[i].volume
)
assert_allclose(
block_structure.volume[i : i + 1],
world_bodies[i].volume,
atol=Tolerance.atol(),
)
# mass
assert np.shares_memory(block_structure.mass, world_bodies[i].mass)
assert np.shares_memory(
block_structure.scalar_dofs_in_rigid_bodies, world_bodies[i].mass
)
assert_allclose(
block_structure.mass[i : i + 1],
world_bodies[i].mass,
atol=Tolerance.atol(),
)
@pytest.mark.parametrize("n_rods", [1, 2, 5, 6])
def test_block_structure_vectors_validity(n_rods):
"""
This function is testing validity of vectors. It is been
tested that for vector variables, if the block structure memory
and values are set correctly and rods that belong to rod structure
share the memory with block structure.
Parameters
----------
n_rods
Returns
-------
"""
world_bodies = [MockRigidBody() for _ in range(n_rods)]
block_structure = MemoryBlockRigidBody(world_bodies)
for i in range(n_rods):
# position collection
assert np.shares_memory(
block_structure.position_collection, world_bodies[i].position_collection
)
assert np.shares_memory(
block_structure.vector_dofs_in_rigid_bodies,
world_bodies[i].position_collection,
)
assert_allclose(
block_structure.position_collection[..., i : i + 1],
world_bodies[i].position_collection,
)
# external forces
assert np.shares_memory(
block_structure.external_forces, world_bodies[i].external_forces
)
assert np.shares_memory(
block_structure.vector_dofs_in_rigid_bodies, world_bodies[i].external_forces
)
assert_allclose(
block_structure.external_forces[..., i : i + 1],
world_bodies[i].external_forces,
)
# external torques
assert np.shares_memory(
block_structure.external_torques, world_bodies[i].external_torques
)
assert np.shares_memory(
block_structure.vector_dofs_in_rigid_bodies,
world_bodies[i].external_torques,
)
assert_allclose(
block_structure.external_torques[..., i : i + 1],
world_bodies[i].external_torques,
)
@pytest.mark.parametrize("n_rods", [1, 2, 5, 6])
def test_block_structure_matrix_validity(n_rods):
"""
This function is testing validity of matrices. It is been
tested that for matrix variables, if the block structure memory
and values are set correctly and rods that belong to rod structure
share the memory with block structure.
Parameters
----------
n_rods
Returns
-------
"""
world_bodies = [MockRigidBody() for _ in range(n_rods)]
block_structure = MemoryBlockRigidBody(world_bodies)
for i in range(n_rods):
# director collection
assert np.shares_memory(
block_structure.director_collection, world_bodies[i].director_collection
)
assert np.shares_memory(
block_structure.matrix_dofs_in_rigid_bodies,
world_bodies[i].director_collection,
)
assert_allclose(
block_structure.director_collection[..., i : i + 1],
world_bodies[i].director_collection,
)
# mass second moment of inertia
assert np.shares_memory(
block_structure.mass_second_moment_of_inertia,
world_bodies[i].mass_second_moment_of_inertia,
)
assert np.shares_memory(
block_structure.matrix_dofs_in_rigid_bodies,
world_bodies[i].mass_second_moment_of_inertia,
)
assert_allclose(
block_structure.mass_second_moment_of_inertia[..., i : i + 1],
world_bodies[i].mass_second_moment_of_inertia,
)
# inv mass second moment of inertia
assert np.shares_memory(
block_structure.inv_mass_second_moment_of_inertia,
world_bodies[i].inv_mass_second_moment_of_inertia,
)
assert np.shares_memory(
block_structure.matrix_dofs_in_rigid_bodies,
world_bodies[i].inv_mass_second_moment_of_inertia,
)
assert_allclose(
block_structure.inv_mass_second_moment_of_inertia[..., i : i + 1],
world_bodies[i].inv_mass_second_moment_of_inertia,
)
@pytest.mark.parametrize("n_rods", [1, 2, 5, 6])
def test_block_structure_symplectic_stepper_variables_validity(n_rods):
"""
This function is testing validity of vectors. It is been
tested that for vector variables, if the block structure memory
and values are set correctly and rods that belong to rod structure
share the memory with block structure.
Parameters
----------
n_rods
Returns
-------
"""
world_bodies = [MockRigidBody() for _ in range(n_rods)]
block_structure = MemoryBlockRigidBody(world_bodies)
for i in range(n_rods):
# velocity collection
assert np.shares_memory(
block_structure.velocity_collection, world_bodies[i].velocity_collection
)
assert np.shares_memory(
block_structure.rate_collection, world_bodies[i].velocity_collection
)
assert_allclose(
block_structure.velocity_collection[..., i : i + 1],
world_bodies[i].velocity_collection,
)
# omega collection
assert np.shares_memory(
block_structure.omega_collection, world_bodies[i].omega_collection
)
assert np.shares_memory(
block_structure.rate_collection, world_bodies[i].omega_collection
)
assert_allclose(
block_structure.omega_collection[..., i : i + 1],
world_bodies[i].omega_collection,
)
# acceleration collection
assert np.shares_memory(
block_structure.acceleration_collection,
world_bodies[i].acceleration_collection,
)
assert np.shares_memory(
block_structure.rate_collection, world_bodies[i].acceleration_collection
)
assert_allclose(
block_structure.acceleration_collection[..., i : i + 1],
world_bodies[i].acceleration_collection,
)
# alpha collection
assert np.shares_memory(
block_structure.alpha_collection, world_bodies[i].alpha_collection
)
assert np.shares_memory(
block_structure.rate_collection, world_bodies[i].alpha_collection
)
assert_allclose(
block_structure.alpha_collection[..., i : i + 1],
world_bodies[i].alpha_collection,
)
# Validity of the rate collection array
assert np.shares_memory(
block_structure.rate_collection, block_structure.v_w_collection
)
assert np.shares_memory(
block_structure.rate_collection, block_structure.dvdt_dwdt_collection
)
assert block_structure.v_w_collection.shape == (2, 3 * block_structure.n_nodes)
assert block_structure.dvdt_dwdt_collection.shape == (
2,
3 * block_structure.n_nodes,
)
if __name__ == "__main__":
from pytest import main
main([__file__])
| [
"elastica.memory_block.memory_block_rigid_body.MemoryBlockRigidBody",
"numpy.testing.assert_allclose",
"pytest.main",
"pytest.mark.parametrize",
"numpy.shares_memory",
"numpy.random.randn",
"elastica.utils.Tolerance.atol"
] | [((1039, 1086), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rods"""', '[1, 2, 5, 6]'], {}), "('n_rods', [1, 2, 5, 6])\n", (1062, 1086), False, 'import pytest\n'), ((3478, 3525), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rods"""', '[1, 2, 5, 6]'], {}), "('n_rods', [1, 2, 5, 6])\n", (3501, 3525), False, 'import pytest\n'), ((5369, 5416), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rods"""', '[1, 2, 5, 6]'], {}), "('n_rods', [1, 2, 5, 6])\n", (5392, 5416), False, 'import pytest\n'), ((7484, 7531), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_rods"""', '[1, 2, 5, 6]'], {}), "('n_rods', [1, 2, 5, 6])\n", (7507, 7531), False, 'import pytest\n'), ((1547, 1581), 'elastica.memory_block.memory_block_rigid_body.MemoryBlockRigidBody', 'MemoryBlockRigidBody', (['world_bodies'], {}), '(world_bodies)\n', (1567, 1581), False, 'from elastica.memory_block.memory_block_rigid_body import MemoryBlockRigidBody\n'), ((3986, 4020), 'elastica.memory_block.memory_block_rigid_body.MemoryBlockRigidBody', 'MemoryBlockRigidBody', (['world_bodies'], {}), '(world_bodies)\n', (4006, 4020), False, 'from elastica.memory_block.memory_block_rigid_body import MemoryBlockRigidBody\n'), ((5877, 5911), 'elastica.memory_block.memory_block_rigid_body.MemoryBlockRigidBody', 'MemoryBlockRigidBody', (['world_bodies'], {}), '(world_bodies)\n', (5897, 5911), False, 'from elastica.memory_block.memory_block_rigid_body import MemoryBlockRigidBody\n'), ((8013, 8047), 'elastica.memory_block.memory_block_rigid_body.MemoryBlockRigidBody', 'MemoryBlockRigidBody', (['world_bodies'], {}), '(world_bodies)\n', (8033, 8047), False, 'from elastica.memory_block.memory_block_rigid_body import MemoryBlockRigidBody\n'), ((9862, 9948), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'block_structure.v_w_collection'], {}), '(block_structure.rate_collection, block_structure.\n v_w_collection)\n', (9878, 9948), True, 'import numpy as np\n'), ((9969, 10061), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'block_structure.dvdt_dwdt_collection'], {}), '(block_structure.rate_collection, block_structure.\n dvdt_dwdt_collection)\n', (9985, 10061), True, 'import numpy as np\n'), ((10330, 10346), 'pytest.main', 'main', (['[__file__]'], {}), '([__file__])\n', (10334, 10346), False, 'from pytest import main\n'), ((262, 279), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (277, 279), True, 'import numpy as np\n'), ((302, 319), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (317, 319), True, 'import numpy as np\n'), ((343, 360), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (358, 360), True, 'import numpy as np\n'), ((383, 400), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (398, 400), True, 'import numpy as np\n'), ((421, 438), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (436, 438), True, 'import numpy as np\n'), ((475, 496), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (490, 496), True, 'import numpy as np\n'), ((532, 553), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (547, 553), True, 'import numpy as np\n'), ((593, 614), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (608, 614), True, 'import numpy as np\n'), ((647, 668), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (662, 668), True, 'import numpy as np\n'), ((701, 722), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (716, 722), True, 'import numpy as np\n'), ((758, 782), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (773, 782), True, 'import numpy as np\n'), ((815, 836), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (830, 836), True, 'import numpy as np\n'), ((869, 890), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (884, 890), True, 'import numpy as np\n'), ((937, 961), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (952, 961), True, 'import numpy as np\n'), ((1011, 1035), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (1026, 1035), True, 'import numpy as np\n'), ((1644, 1708), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.radius', 'world_bodies[i].radius'], {}), '(block_structure.radius, world_bodies[i].radius)\n', (1660, 1708), True, 'import numpy as np\n'), ((1724, 1814), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.scalar_dofs_in_rigid_bodies', 'world_bodies[i].radius'], {}), '(block_structure.scalar_dofs_in_rigid_bodies, world_bodies[\n i].radius)\n', (1740, 1814), True, 'import numpy as np\n'), ((2018, 2082), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.length', 'world_bodies[i].length'], {}), '(block_structure.length, world_bodies[i].length)\n', (2034, 2082), True, 'import numpy as np\n'), ((2098, 2188), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.scalar_dofs_in_rigid_bodies', 'world_bodies[i].length'], {}), '(block_structure.scalar_dofs_in_rigid_bodies, world_bodies[\n i].length)\n', (2114, 2188), True, 'import numpy as np\n'), ((2393, 2459), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.density', 'world_bodies[i].density'], {}), '(block_structure.density, world_bodies[i].density)\n', (2409, 2459), True, 'import numpy as np\n'), ((2475, 2566), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.scalar_dofs_in_rigid_bodies', 'world_bodies[i].density'], {}), '(block_structure.scalar_dofs_in_rigid_bodies, world_bodies[\n i].density)\n', (2491, 2566), True, 'import numpy as np\n'), ((2772, 2836), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.volume', 'world_bodies[i].volume'], {}), '(block_structure.volume, world_bodies[i].volume)\n', (2788, 2836), True, 'import numpy as np\n'), ((2852, 2942), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.scalar_dofs_in_rigid_bodies', 'world_bodies[i].volume'], {}), '(block_structure.scalar_dofs_in_rigid_bodies, world_bodies[\n i].volume)\n', (2868, 2942), True, 'import numpy as np\n'), ((3144, 3204), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.mass', 'world_bodies[i].mass'], {}), '(block_structure.mass, world_bodies[i].mass)\n', (3160, 3204), True, 'import numpy as np\n'), ((3220, 3308), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.scalar_dofs_in_rigid_bodies', 'world_bodies[i].mass'], {}), '(block_structure.scalar_dofs_in_rigid_bodies, world_bodies[\n i].mass)\n', (3236, 3308), True, 'import numpy as np\n'), ((4096, 4191), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.position_collection', 'world_bodies[i].position_collection'], {}), '(block_structure.position_collection, world_bodies[i].\n position_collection)\n', (4112, 4191), True, 'import numpy as np\n'), ((4224, 4327), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.vector_dofs_in_rigid_bodies', 'world_bodies[i].position_collection'], {}), '(block_structure.vector_dofs_in_rigid_bodies, world_bodies[\n i].position_collection)\n', (4240, 4327), True, 'import numpy as np\n'), ((4366, 4473), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.position_collection[..., i:i + 1]', 'world_bodies[i].position_collection'], {}), '(block_structure.position_collection[..., i:i + 1],\n world_bodies[i].position_collection)\n', (4381, 4473), False, 'from numpy.testing import assert_allclose\n'), ((4549, 4636), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.external_forces', 'world_bodies[i].external_forces'], {}), '(block_structure.external_forces, world_bodies[i].\n external_forces)\n', (4565, 4636), True, 'import numpy as np\n'), ((4669, 4768), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.vector_dofs_in_rigid_bodies', 'world_bodies[i].external_forces'], {}), '(block_structure.vector_dofs_in_rigid_bodies, world_bodies[\n i].external_forces)\n', (4685, 4768), True, 'import numpy as np\n'), ((4794, 4894), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.external_forces[..., i:i + 1]', 'world_bodies[i].external_forces'], {}), '(block_structure.external_forces[..., i:i + 1], world_bodies\n [i].external_forces)\n', (4809, 4894), False, 'from numpy.testing import assert_allclose\n'), ((4970, 5059), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.external_torques', 'world_bodies[i].external_torques'], {}), '(block_structure.external_torques, world_bodies[i].\n external_torques)\n', (4986, 5059), True, 'import numpy as np\n'), ((5092, 5192), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.vector_dofs_in_rigid_bodies', 'world_bodies[i].external_torques'], {}), '(block_structure.vector_dofs_in_rigid_bodies, world_bodies[\n i].external_torques)\n', (5108, 5192), True, 'import numpy as np\n'), ((5231, 5332), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.external_torques[..., i:i + 1]', 'world_bodies[i].external_torques'], {}), '(block_structure.external_torques[..., i:i + 1],\n world_bodies[i].external_torques)\n', (5246, 5332), False, 'from numpy.testing import assert_allclose\n'), ((5986, 6081), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.director_collection', 'world_bodies[i].director_collection'], {}), '(block_structure.director_collection, world_bodies[i].\n director_collection)\n', (6002, 6081), True, 'import numpy as np\n'), ((6114, 6217), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.matrix_dofs_in_rigid_bodies', 'world_bodies[i].director_collection'], {}), '(block_structure.matrix_dofs_in_rigid_bodies, world_bodies[\n i].director_collection)\n', (6130, 6217), True, 'import numpy as np\n'), ((6256, 6363), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.director_collection[..., i:i + 1]', 'world_bodies[i].director_collection'], {}), '(block_structure.director_collection[..., i:i + 1],\n world_bodies[i].director_collection)\n', (6271, 6363), False, 'from numpy.testing import assert_allclose\n'), ((6453, 6567), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.mass_second_moment_of_inertia', 'world_bodies[i].mass_second_moment_of_inertia'], {}), '(block_structure.mass_second_moment_of_inertia,\n world_bodies[i].mass_second_moment_of_inertia)\n', (6469, 6567), True, 'import numpy as np\n'), ((6614, 6727), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.matrix_dofs_in_rigid_bodies', 'world_bodies[i].mass_second_moment_of_inertia'], {}), '(block_structure.matrix_dofs_in_rigid_bodies, world_bodies[\n i].mass_second_moment_of_inertia)\n', (6630, 6727), True, 'import numpy as np\n'), ((6766, 6893), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.mass_second_moment_of_inertia[..., i:i + 1]', 'world_bodies[i].mass_second_moment_of_inertia'], {}), '(block_structure.mass_second_moment_of_inertia[..., i:i + 1],\n world_bodies[i].mass_second_moment_of_inertia)\n', (6781, 6893), False, 'from numpy.testing import assert_allclose\n'), ((6987, 7109), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.inv_mass_second_moment_of_inertia', 'world_bodies[i].inv_mass_second_moment_of_inertia'], {}), '(block_structure.inv_mass_second_moment_of_inertia,\n world_bodies[i].inv_mass_second_moment_of_inertia)\n', (7003, 7109), True, 'import numpy as np\n'), ((7156, 7273), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.matrix_dofs_in_rigid_bodies', 'world_bodies[i].inv_mass_second_moment_of_inertia'], {}), '(block_structure.matrix_dofs_in_rigid_bodies, world_bodies[\n i].inv_mass_second_moment_of_inertia)\n', (7172, 7273), True, 'import numpy as np\n'), ((7312, 7447), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.inv_mass_second_moment_of_inertia[..., i:i + 1]', 'world_bodies[i].inv_mass_second_moment_of_inertia'], {}), '(block_structure.inv_mass_second_moment_of_inertia[..., i:i +\n 1], world_bodies[i].inv_mass_second_moment_of_inertia)\n', (7327, 7447), False, 'from numpy.testing import assert_allclose\n'), ((8123, 8218), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.velocity_collection', 'world_bodies[i].velocity_collection'], {}), '(block_structure.velocity_collection, world_bodies[i].\n velocity_collection)\n', (8139, 8218), True, 'import numpy as np\n'), ((8251, 8342), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'world_bodies[i].velocity_collection'], {}), '(block_structure.rate_collection, world_bodies[i].\n velocity_collection)\n', (8267, 8342), True, 'import numpy as np\n'), ((8368, 8475), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.velocity_collection[..., i:i + 1]', 'world_bodies[i].velocity_collection'], {}), '(block_structure.velocity_collection[..., i:i + 1],\n world_bodies[i].velocity_collection)\n', (8383, 8475), False, 'from numpy.testing import assert_allclose\n'), ((8552, 8641), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.omega_collection', 'world_bodies[i].omega_collection'], {}), '(block_structure.omega_collection, world_bodies[i].\n omega_collection)\n', (8568, 8641), True, 'import numpy as np\n'), ((8674, 8762), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'world_bodies[i].omega_collection'], {}), '(block_structure.rate_collection, world_bodies[i].\n omega_collection)\n', (8690, 8762), True, 'import numpy as np\n'), ((8788, 8889), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.omega_collection[..., i:i + 1]', 'world_bodies[i].omega_collection'], {}), '(block_structure.omega_collection[..., i:i + 1],\n world_bodies[i].omega_collection)\n', (8803, 8889), False, 'from numpy.testing import assert_allclose\n'), ((8973, 9076), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.acceleration_collection', 'world_bodies[i].acceleration_collection'], {}), '(block_structure.acceleration_collection, world_bodies[i].\n acceleration_collection)\n', (8989, 9076), True, 'import numpy as np\n'), ((9122, 9217), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'world_bodies[i].acceleration_collection'], {}), '(block_structure.rate_collection, world_bodies[i].\n acceleration_collection)\n', (9138, 9217), True, 'import numpy as np\n'), ((9243, 9358), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.acceleration_collection[..., i:i + 1]', 'world_bodies[i].acceleration_collection'], {}), '(block_structure.acceleration_collection[..., i:i + 1],\n world_bodies[i].acceleration_collection)\n', (9258, 9358), False, 'from numpy.testing import assert_allclose\n'), ((9435, 9524), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.alpha_collection', 'world_bodies[i].alpha_collection'], {}), '(block_structure.alpha_collection, world_bodies[i].\n alpha_collection)\n', (9451, 9524), True, 'import numpy as np\n'), ((9557, 9645), 'numpy.shares_memory', 'np.shares_memory', (['block_structure.rate_collection', 'world_bodies[i].alpha_collection'], {}), '(block_structure.rate_collection, world_bodies[i].\n alpha_collection)\n', (9573, 9645), True, 'import numpy as np\n'), ((9671, 9772), 'numpy.testing.assert_allclose', 'assert_allclose', (['block_structure.alpha_collection[..., i:i + 1]', 'world_bodies[i].alpha_collection'], {}), '(block_structure.alpha_collection[..., i:i + 1],\n world_bodies[i].alpha_collection)\n', (9686, 9772), False, 'from numpy.testing import assert_allclose\n'), ((1957, 1973), 'elastica.utils.Tolerance.atol', 'Tolerance.atol', ([], {}), '()\n', (1971, 1973), False, 'from elastica.utils import Tolerance\n'), ((2331, 2347), 'elastica.utils.Tolerance.atol', 'Tolerance.atol', ([], {}), '()\n', (2345, 2347), False, 'from elastica.utils import Tolerance\n'), ((2711, 2727), 'elastica.utils.Tolerance.atol', 'Tolerance.atol', ([], {}), '()\n', (2725, 2727), False, 'from elastica.utils import Tolerance\n'), ((3085, 3101), 'elastica.utils.Tolerance.atol', 'Tolerance.atol', ([], {}), '()\n', (3099, 3101), False, 'from elastica.utils import Tolerance\n'), ((3447, 3463), 'elastica.utils.Tolerance.atol', 'Tolerance.atol', ([], {}), '()\n', (3461, 3463), False, 'from elastica.utils import Tolerance\n')] |
import sys
import matplotlib.pyplot as plt
import csv
import numpy
y = numpy.array([10, 50, 100, 500, 1000, 5000, 10000, 50000])
zmq = numpy.array([8300000, 4900000, 3750000, 1250000, 776000, 210000, 120000, 78000]) * y / 1024 / 1024
jeromq = numpy.array([6100000, 4600000, 3400000, 1200000, 670000, 160000, 75000, 26000]) * y / 1024 / 1024
solo5 = numpy.array([1450000, 810000, 500000, 125000, 82000, 22000, 12000, 2560]) * y / 1024 / 1024
exe = numpy.array([405000, 400000, 390000, 302000, 250000, 100000, 81000, 37000]) * y / 1024 / 1024
plt.xscale('log')
plt.plot(y, zmq)
plt.plot(y, jeromq, linestyle='--')
plt.plot(y, solo5, linestyle='-.')
plt.plot(y, exe, linestyle=':')
plt.legend(['libzmq', 'JeroMQ', 'Mirage-zmq (unikernel)', 'Mirage-zmq (executable)'], loc='upper left')
plt.title('Throughput vs message size')
plt.xlabel('Message size [Byte]')
plt.ylabel('Throughput [MB/s]')
plt.savefig('throughput.pdf')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend"
] | [((72, 129), 'numpy.array', 'numpy.array', (['[10, 50, 100, 500, 1000, 5000, 10000, 50000]'], {}), '([10, 50, 100, 500, 1000, 5000, 10000, 50000])\n', (83, 129), False, 'import numpy\n'), ((544, 561), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (554, 561), True, 'import matplotlib.pyplot as plt\n'), ((562, 578), 'matplotlib.pyplot.plot', 'plt.plot', (['y', 'zmq'], {}), '(y, zmq)\n', (570, 578), True, 'import matplotlib.pyplot as plt\n'), ((579, 614), 'matplotlib.pyplot.plot', 'plt.plot', (['y', 'jeromq'], {'linestyle': '"""--"""'}), "(y, jeromq, linestyle='--')\n", (587, 614), True, 'import matplotlib.pyplot as plt\n'), ((615, 649), 'matplotlib.pyplot.plot', 'plt.plot', (['y', 'solo5'], {'linestyle': '"""-."""'}), "(y, solo5, linestyle='-.')\n", (623, 649), True, 'import matplotlib.pyplot as plt\n'), ((650, 681), 'matplotlib.pyplot.plot', 'plt.plot', (['y', 'exe'], {'linestyle': '""":"""'}), "(y, exe, linestyle=':')\n", (658, 681), True, 'import matplotlib.pyplot as plt\n'), ((684, 791), 'matplotlib.pyplot.legend', 'plt.legend', (["['libzmq', 'JeroMQ', 'Mirage-zmq (unikernel)', 'Mirage-zmq (executable)']"], {'loc': '"""upper left"""'}), "(['libzmq', 'JeroMQ', 'Mirage-zmq (unikernel)',\n 'Mirage-zmq (executable)'], loc='upper left')\n", (694, 791), True, 'import matplotlib.pyplot as plt\n'), ((788, 827), 'matplotlib.pyplot.title', 'plt.title', (['"""Throughput vs message size"""'], {}), "('Throughput vs message size')\n", (797, 827), True, 'import matplotlib.pyplot as plt\n'), ((828, 861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Message size [Byte]"""'], {}), "('Message size [Byte]')\n", (838, 861), True, 'import matplotlib.pyplot as plt\n'), ((862, 893), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Throughput [MB/s]"""'], {}), "('Throughput [MB/s]')\n", (872, 893), True, 'import matplotlib.pyplot as plt\n'), ((894, 923), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""throughput.pdf"""'], {}), "('throughput.pdf')\n", (905, 923), True, 'import matplotlib.pyplot as plt\n'), ((137, 222), 'numpy.array', 'numpy.array', (['[8300000, 4900000, 3750000, 1250000, 776000, 210000, 120000, 78000]'], {}), '([8300000, 4900000, 3750000, 1250000, 776000, 210000, 120000, 78000]\n )\n', (148, 222), False, 'import numpy\n'), ((245, 324), 'numpy.array', 'numpy.array', (['[6100000, 4600000, 3400000, 1200000, 670000, 160000, 75000, 26000]'], {}), '([6100000, 4600000, 3400000, 1200000, 670000, 160000, 75000, 26000])\n', (256, 324), False, 'import numpy\n'), ((351, 424), 'numpy.array', 'numpy.array', (['[1450000, 810000, 500000, 125000, 82000, 22000, 12000, 2560]'], {}), '([1450000, 810000, 500000, 125000, 82000, 22000, 12000, 2560])\n', (362, 424), False, 'import numpy\n'), ((449, 524), 'numpy.array', 'numpy.array', (['[405000, 400000, 390000, 302000, 250000, 100000, 81000, 37000]'], {}), '([405000, 400000, 390000, 302000, 250000, 100000, 81000, 37000])\n', (460, 524), False, 'import numpy\n')] |
"""
Copyright (c) 2017 - <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# -*- coding: utf-8 -*-
# Modified from https://raw.githubusercontent.com/Newmu/dcgan_code/master/lib/inits.py
# MIT License
from math import sqrt
import numpy as np
from .utils import sharedX
from .rng import np_rng
import theano
def not_shared(X, dtype=theano.config.floatX, name=None, target=None):
return X
# Initializations
def uniform(shape, scale=0.05, name=None, shared=True, target=None):
wrapper = sharedX if shared else not_shared
return wrapper(np_rng.uniform(low=-scale, high=scale, size=shape), name=name, target=target)
def normal(shape, mean=0., std_dev=1., name=None, shared=True, target=None):
wrapper = sharedX if shared else not_shared
return wrapper(np_rng.normal(loc=mean, scale=std_dev, size=shape), name=name, target=target)
def orthogonal(shape, scale=1.1, name=None, shared=True, target=None):
wrapper = sharedX if shared else not_shared
flat_shape = (shape[0], np.prod(shape[1:]))
a = np_rng.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return wrapper(scale * q[:shape[0], :shape[1]], name=name, target=target)
def constant(shape, c=0., name=None, shared=True, target=None):
wrapper = sharedX if shared else not_shared
return wrapper(np.ones(shape) * c, name=name, target=target)
def glorot(shape, fan_in, fan_out, name=None, shared=True, target=None):
return uniform(shape, scale=sqrt(12. / (fan_in + fan_out)), name=name, shared=shared, target=target)
def he(shape, fan_in, factor=sqrt(2.), name=None, shared=True, target=None):
return normal(shape, mean=0., std_dev=(factor * sqrt(1. / fan_in)), name=name, shared=shared, target=target)
| [
"numpy.linalg.svd",
"numpy.prod",
"math.sqrt",
"numpy.ones"
] | [((2059, 2096), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (2072, 2096), True, 'import numpy as np\n'), ((2668, 2677), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2672, 2677), False, 'from math import sqrt\n'), ((1981, 1999), 'numpy.prod', 'np.prod', (['shape[1:]'], {}), '(shape[1:])\n', (1988, 1999), True, 'import numpy as np\n'), ((2413, 2427), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2420, 2427), True, 'import numpy as np\n'), ((2565, 2596), 'math.sqrt', 'sqrt', (['(12.0 / (fan_in + fan_out))'], {}), '(12.0 / (fan_in + fan_out))\n', (2569, 2596), False, 'from math import sqrt\n'), ((2768, 2786), 'math.sqrt', 'sqrt', (['(1.0 / fan_in)'], {}), '(1.0 / fan_in)\n', (2772, 2786), False, 'from math import sqrt\n')] |
import polychrom.starting_conformations
import polychrom.forces, polychrom.forcekits, polychrom.polymerutils
from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI
from polychrom.simulation import Simulation
import numpy as np
def test_basic_simulation_and_hdf5(tmp_path):
data = polychrom.starting_conformations.create_random_walk(1,100)
"""
Here we created a hdf5Reporter attached to a foler test, and we are saving 5 blocks per file
(you should probalby use 50 here or 100. 5 is just for a showcase)
"""
reporter = HDF5Reporter(folder=tmp_path, max_data_length=5)
"""
Passing a reporter to the simulation object - many reporters are possible, and more will be added in a future
"""
sim = Simulation(
N=100,
error_tol=0.001,
collision_rate=0.1,
integrator="variableLangevin",
platform="reference",
max_Ek=40,
reporters=[reporter],
)
sim.set_data(data)
sim.add_force(polychrom.forcekits.polymer_chains(sim))
sim.add_force(polychrom.forces.spherical_confinement(sim, r=4, k=3))
sim._apply_forces()
datas = []
for i in range(19):
"""
Here we pass two extra records: a string and an array-like object.
First becomes an attr, and second becomes an HDF5 dataset
"""
sim.do_block(
20,
save_extras={
"eggs": "I don't eat green eggs and ham!!!",
"spam": [1, 2, 3],
},
)
datas.append(sim.get_data())
"""
Here we are not forgetting to dump the last set of blocks that the reporter has.
We have to do it at the end of every simulation.
I tried adding it to the destructor to make it automatic,
but some weird interactions with garbage collection made it not very useable.
"""
reporter.dump_data()
files = list_URIs(tmp_path)
d1 = load_URI(files[1])
d1_direct = datas[1]
assert np.abs(d1["pos"] - d1_direct).max() <= 0.0051
d1_fetch = polychrom.polymerutils.fetch_block(tmp_path, 1)
assert np.allclose(d1["pos"], d1_fetch)
assert np.allclose(d1["spam"], [1, 2, 3]) # spam got saved correctly
assert d1["eggs"] == "I don't eat green eggs and ham!!!"
del sim
del reporter
rep = HDF5Reporter(folder=tmp_path, overwrite=False, check_exists=False)
ind, data = rep.continue_trajectory()
# continuing from the last trajectory
assert np.abs(data["pos"] - datas[-1]).max() <= 0.0054
def run():
import tempfile
tmp_dir = tempfile.TemporaryDirectory()
test_basic_simulation_and_hdf5(tmp_dir.name)
if __name__ == "__main__":
run()
| [
"tempfile.TemporaryDirectory",
"numpy.abs",
"polychrom.simulation.Simulation",
"numpy.allclose",
"polychrom.hdf5_format.list_URIs",
"polychrom.hdf5_format.load_URI",
"polychrom.hdf5_format.HDF5Reporter"
] | [((559, 607), 'polychrom.hdf5_format.HDF5Reporter', 'HDF5Reporter', ([], {'folder': 'tmp_path', 'max_data_length': '(5)'}), '(folder=tmp_path, max_data_length=5)\n', (571, 607), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((749, 894), 'polychrom.simulation.Simulation', 'Simulation', ([], {'N': '(100)', 'error_tol': '(0.001)', 'collision_rate': '(0.1)', 'integrator': '"""variableLangevin"""', 'platform': '"""reference"""', 'max_Ek': '(40)', 'reporters': '[reporter]'}), "(N=100, error_tol=0.001, collision_rate=0.1, integrator=\n 'variableLangevin', platform='reference', max_Ek=40, reporters=[reporter])\n", (759, 894), False, 'from polychrom.simulation import Simulation\n'), ((1899, 1918), 'polychrom.hdf5_format.list_URIs', 'list_URIs', (['tmp_path'], {}), '(tmp_path)\n', (1908, 1918), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((1928, 1946), 'polychrom.hdf5_format.load_URI', 'load_URI', (['files[1]'], {}), '(files[1])\n', (1936, 1946), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((2105, 2137), 'numpy.allclose', 'np.allclose', (["d1['pos']", 'd1_fetch'], {}), "(d1['pos'], d1_fetch)\n", (2116, 2137), True, 'import numpy as np\n'), ((2150, 2184), 'numpy.allclose', 'np.allclose', (["d1['spam']", '[1, 2, 3]'], {}), "(d1['spam'], [1, 2, 3])\n", (2161, 2184), True, 'import numpy as np\n'), ((2315, 2381), 'polychrom.hdf5_format.HDF5Reporter', 'HDF5Reporter', ([], {'folder': 'tmp_path', 'overwrite': '(False)', 'check_exists': '(False)'}), '(folder=tmp_path, overwrite=False, check_exists=False)\n', (2327, 2381), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((2574, 2603), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2601, 2603), False, 'import tempfile\n'), ((1984, 2013), 'numpy.abs', 'np.abs', (["(d1['pos'] - d1_direct)"], {}), "(d1['pos'] - d1_direct)\n", (1990, 2013), True, 'import numpy as np\n'), ((2478, 2509), 'numpy.abs', 'np.abs', (["(data['pos'] - datas[-1])"], {}), "(data['pos'] - datas[-1])\n", (2484, 2509), True, 'import numpy as np\n')] |
from .image import *
import random
import typing as t
import numpy as np
from moviepy.editor import VideoFileClip
FrameProcessingFunc = t.Callable[[Image], Image]
class Video:
def read(fpath):
clip = VideoFileClip(fpath)
return Video(clip)
def __init__(self, clip):
self._clip = clip
@property
def frame_count(self) -> int:
return int(self._clip.fps * self._clip.duration)
def __getitem__(self, key: int):
time = float(key) / self._clip.fps
frame = self._clip.get_frame(time)
image = Video.__image_from_frame(frame)
return image
def random_frame(self) -> Image:
index = random.randint(0, self.frame_count-1)
return self[index]
def subclip(self, start: float=0, end: float=None):
clip = self._clip.subclip(start, end)
return Video(clip)
def process(self, f: FrameProcessingFunc):
def raw_f(frame: np.ndarray) -> np.ndarray:
in_image = Video.__image_from_frame(frame)
out_image = f(in_image)
return out_image._data
clip = self._clip.fl_image(raw_f)
return Video(clip)
def write(self, fpath, audio=False, progress_bar=True, verbose=True):
return self._clip.write_videofile(fpath, audio=audio, progress_bar=progress_bar, verbose=verbose)
def write_gif(self, fpath, fps=None, loop=True, verbose=True):
return self._clip.write_gif(fpath, fps=fps, verbose=verbose, loop=loop)
def __image_from_frame(frame: np.ndarray) -> Image:
frame = np.moveaxis(frame, -1, 0)
return RGBImage.fromChannels(frame)
| [
"numpy.moveaxis",
"moviepy.editor.VideoFileClip",
"random.randint"
] | [((216, 236), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['fpath'], {}), '(fpath)\n', (229, 236), False, 'from moviepy.editor import VideoFileClip\n'), ((675, 714), 'random.randint', 'random.randint', (['(0)', '(self.frame_count - 1)'], {}), '(0, self.frame_count - 1)\n', (689, 714), False, 'import random\n'), ((1568, 1593), 'numpy.moveaxis', 'np.moveaxis', (['frame', '(-1)', '(0)'], {}), '(frame, -1, 0)\n', (1579, 1593), True, 'import numpy as np\n')] |
import numpy as np
from scipy.spatial import distance_matrix
def get_num_points(config):
if config.test_model is None:
return config.num_training_points
else:
return config.num_test_points
def get_random_capacities(n):
capacities = np.random.randint(9, size=n) + 1
depot_capacity_map = {
10: 20,
20: 30,
50: 40,
100: 50
}
capacities[0] = depot_capacity_map.get(n - 1, 50)
return capacities
class Problem:
def __init__(self, locations, capacities):
self.locations = locations.copy()
self.capacities = capacities.copy()
self.distance_matrix = distance_matrix(self.locations, self.locations)
self.total_customer_capacities = np.sum(capacities[1:])
self.change_at = np.zeros([len(self.locations) + 1])
self.no_improvement_at = {}
self.num_solutions = 0
self.num_traversed = np.zeros((len(locations), len(locations)))
self.distance_hashes = set()
def record_solution(self, solution, distance):
inv_dist = 1.0 / distance
self.num_solutions += inv_dist
for path in solution:
if len(path) > 2:
for to_index in range(1, len(path)):
self.num_traversed[path[to_index - 1]][path[to_index]] += inv_dist
self.num_traversed[path[to_index]][path[to_index - 1]] += inv_dist
def add_distance_hash(self, distance_hash):
self.distance_hashes.add(distance_hash)
def get_location(self, index):
return self.locations[index]
def get_capacity(self, index):
return self.capacities[index]
def get_num_customers(self):
return len(self.locations) - 1
def get_distance(self, from_index, to_index):
return self.distance_matrix[from_index][to_index]
def get_frequency(self, from_index, to_index):
return self.num_traversed[from_index][to_index] / (1.0 + self.num_solutions)
def reset_change_at_and_no_improvement_at(self):
self.change_at = np.zeros([len(self.locations) + 1])
self.no_improvement_at = {}
def mark_change_at(self, step, path_indices):
for path_index in path_indices:
self.change_at[path_index] = step
def mark_no_improvement(self, step, action, index_first, index_second=-1, index_third=-1):
key = '{}_{}_{}_{}'.format(action, index_first, index_second, index_third)
self.no_improvement_at[key] = step
def should_try(self, action, index_first, index_second=-1, index_third=-1):
key = '{}_{}_{}_{}'.format(action, index_first, index_second, index_third)
no_improvement_at = self.no_improvement_at.get(key, -1)
return self.change_at[index_first] >= no_improvement_at or \
self.change_at[index_second] >= no_improvement_at or \
self.change_at[index_third] >= no_improvement_at
def generate_problem(config):
np.random.seed(config.problem_seed)
config.problem_seed += 1
num_sample_points = get_num_points(config) + 1
locations = np.random.uniform(size=(num_sample_points, 2))
capacities = get_random_capacities(num_sample_points)
problem = Problem(locations, capacities)
np.random.seed(config.problem_seed * 10)
return problem
| [
"scipy.spatial.distance_matrix",
"numpy.sum",
"numpy.random.randint",
"numpy.random.seed",
"numpy.random.uniform"
] | [((2948, 2983), 'numpy.random.seed', 'np.random.seed', (['config.problem_seed'], {}), '(config.problem_seed)\n', (2962, 2983), True, 'import numpy as np\n'), ((3081, 3127), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_sample_points, 2)'}), '(size=(num_sample_points, 2))\n', (3098, 3127), True, 'import numpy as np\n'), ((3235, 3275), 'numpy.random.seed', 'np.random.seed', (['(config.problem_seed * 10)'], {}), '(config.problem_seed * 10)\n', (3249, 3275), True, 'import numpy as np\n'), ((262, 290), 'numpy.random.randint', 'np.random.randint', (['(9)'], {'size': 'n'}), '(9, size=n)\n', (279, 290), True, 'import numpy as np\n'), ((648, 695), 'scipy.spatial.distance_matrix', 'distance_matrix', (['self.locations', 'self.locations'], {}), '(self.locations, self.locations)\n', (663, 695), False, 'from scipy.spatial import distance_matrix\n'), ((737, 759), 'numpy.sum', 'np.sum', (['capacities[1:]'], {}), '(capacities[1:])\n', (743, 759), True, 'import numpy as np\n')] |
"""
Udacity dataset general functions.
These are adopted in training, testing the models and also driving the
Udacity simulator autonomous car.
"""
import os
import random
import numpy as np
import pandas as pd
import csv
import cv2
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from constants import IMAGE_HEIGHT, IMAGE_CHANNELS, IMAGE_WIDTH, TRAIN_DATA_DIR, B_SIZE, NB_EPOCHS, LR
def load_data(labels_fl, test_size=None):
"""
Load and split the data into training and testing sets.
:param labels_fl: directory of the labels (measurement logs) CSV file
:param test_size: size of the testing set
:return: training and testing input and output sets if a test_size parameter is provided
otherwise return without splitting
"""
labels = pd.read_csv(labels_fl)
x = labels[['center', 'left', 'right']].values
y = labels['steering'].values
# R
if test_size:
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size, random_state=0)
return x_train, x_test, y_train, y_test
return x, y
def load_image(dt_dir, image_file):
"""
Load an RGB image.
:param dt_dir: Images directory
:param image_file: Image file name
:return: RGB image
"""
return mpimg.imread(os.path.join(dt_dir, image_file.strip()))
def preprocess(img):
"""
Preprocess the input image by cropping, resizing and converting to YUV colour space.
:param img: input image
:return: preprocessed image
"""
# Crop the image
img = img[60:-25, :, :]
# Resize the image
img = cv2.resize(img, (200, 66), cv2.INTER_AREA)
# Convert the image to YUV
img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
return img
# Data augmentation functions
def random_adjust(data_dir, center, left, right, steering_angle):
"""
Adjust the steering angle of a random image.
:param data_dir: images directory
:param center: center view image
:param left: left view image
:param right: right view image
:param steering_angle: the steering angle related to the input frame
:return: random image and its corresponding steering angle after adjustment
"""
choice = np.random.choice(3)
if choice == 0:
return load_image(data_dir, left), steering_angle + 0.2
elif choice == 1:
return load_image(data_dir, right), steering_angle - 0.2
return load_image(data_dir, center), steering_angle
def random_flip(image, steering_angle):
"""
Flip the input image horizontally and perform a steering angle adjustment at random.
:param image: input frame
:param steering_angle: steering angle related to the input frame
:return: flipped input frame and adjusted steering angle
"""
if np.random.rand() < 1:
image = cv2.flip(image, 1)
steering_angle = -steering_angle
return image, steering_angle
def random_shift(image, steering_angle, range_x, range_y):
"""
Shift (translate) the input image and perform a steering angle adjustment.
:param image: input frame
:param steering_angle: steering angle related to the input frame
:param range_x: horizontal shift range
:param range_y: vertical shift range
:return: shifted version of the input frame and steering angle
"""
trans_x = range_x * (np.random.rand() - 0.5)
trans_y = range_y * (np.random.rand() - 0.5)
steering_angle += trans_x * 0.002
trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])
height, width = image.shape[:2]
image = cv2.warpAffine(image, trans_m, (width, height))
return image, steering_angle
def random_shadow(image):
"""
Add shadow to the input frame.
:param image: input frame
:return: shaded input frame
"""
bright_factor = 0.3
x = random.randint(0, image.shape[1])
y = random.randint(0, image.shape[0])
width = random.randint(image.shape[1], image.shape[1])
if x + width > image.shape[1]:
x = image.shape[1] - x
height = random.randint(image.shape[0], image.shape[0])
if y + height > image.shape[0]:
y = image.shape[0] - y
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image[y:y + height, x:x + width, 2] = image[y:y + height, x:x + width, 2] * bright_factor
return cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
def random_brightness(image):
"""
Alter the brightness of the input image.
:param image: input frame
:return: altered input image
"""
# HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
ratio = 1.0 + (np.random.rand() - 0.5)
hsv[:, :, 2] = hsv[:, :, 2] * ratio
return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
def augment(data_dir, center, left, right, steering_angle, range_x=100, range_y=10):
"""
Generate an augmented image and adjust the associated steering angle.
:param data_dir: images directory
:param center: center view image
:param left: left view image
:param right: right view image
:param steering_angle: the steering angle related to the input frame
:param range_x: horizontal translation range
:param range_y: vertical translation range
:return: modified version of the input frame and steering angle
"""
image, steering_angle = random_adjust(data_dir, center, left, right, steering_angle)
image, steering_angle = random_flip(image, steering_angle)
image, steering_angle = random_shift(image, steering_angle, range_x, range_y)
image = random_shadow(image)
image = random_brightness(image)
return image, steering_angle
def batcher(dt_dir, image_paths, steering_angles, batch_size, training_flag, to_csv=False):
"""
Generate batches of training images from the given image paths and their associated steering angles.
:param dt_dir: the directory where the images are
:param image_paths: paths to the input images
:param steering_angles: the steering angles related to the input frames
:param batch_size: the batch size used to train the model
:param training_flag: a boolean flag to determine whether we are in training or validation mode
:param to_csv: a boolean flag to decide if we are augmenting data and saving the angles in a CSV file
:return: batches of images and steering angles
"""
images = np.empty([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])
steers = np.empty(batch_size)
permuted = np.random.permutation(image_paths.shape[0]) # Global seed set
count = 0
while True:
batch = permuted[count:count + batch_size]
curr_bs = batch.shape[0]
if image_paths.shape[0] <= count and to_csv:
break
# if batch.size == 0:
# break
assert batch.size != 0
for idx, index in enumerate(batch):
center, left, right = image_paths[index]
steering_angle = steering_angles[index]
if training_flag and np.random.rand() < 0.6: # 60% probability of augmenting the data
image, steering_angle = augment(dt_dir, center, left, right, steering_angle)
else:
image = load_image(dt_dir, center)
# Populate the image and steering angle arrays
images[idx] = preprocess(image)
steers[idx] = steering_angle
count += batch_size
if curr_bs < batch_size:
count = 0 # Reset the counter
yield images[:curr_bs], steers[:curr_bs]
def train_model(mdl, x_train, x_valid, y_train, y_valid, model_name, cps_path='checkpoints/model-{val_loss:03f}.h5'):
"""
Train a model.
:param mdl: Keras sequential or functional model
:param x_train: training procedure input data
:param x_valid: validation procedure input data
:param y_train: training procedure output (label) data
:param y_valid: validation procedure output (label data
:param model_name: name of the model
:param cps_path: checkpoints path where the trained models are stored
:return: None
"""
# Checkpoint callback used to save the trained models
checkpoint = ModelCheckpoint(cps_path,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
# Compile the model
mdl.compile(loss='mse',
optimizer=Adam(lr=LR))
# Train the model
history = mdl.fit(batcher(TRAIN_DATA_DIR, x_train, y_train, B_SIZE, True),
steps_per_epoch=np.ceil(len(x_train) / B_SIZE),
epochs=NB_EPOCHS,
validation_data=batcher(TRAIN_DATA_DIR, x_valid, y_valid, B_SIZE, False),
validation_steps=np.ceil(len(x_valid) / B_SIZE),
callbacks=[checkpoint],
verbose=1)
# Plot the training and validation losses
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title(f'{model_name} Model Loss (learning rate: {LR})')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'val_loss'], loc='upper left')
plt.show()
| [
"numpy.random.rand",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.empty",
"random.randint",
"numpy.random.permutation",
"cv2.warpAffine",
"numpy.random.choice",
"sklearn.model_selection.train_test_split",
"cv2.cvtColor",
"matplot... | [((944, 966), 'pandas.read_csv', 'pd.read_csv', (['labels_fl'], {}), '(labels_fl)\n', (955, 966), True, 'import pandas as pd\n'), ((1762, 1804), 'cv2.resize', 'cv2.resize', (['img', '(200, 66)', 'cv2.INTER_AREA'], {}), '(img, (200, 66), cv2.INTER_AREA)\n', (1772, 1804), False, 'import cv2\n'), ((1847, 1883), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2YUV'], {}), '(img, cv2.COLOR_RGB2YUV)\n', (1859, 1883), False, 'import cv2\n'), ((2375, 2394), 'numpy.random.choice', 'np.random.choice', (['(3)'], {}), '(3)\n', (2391, 2394), True, 'import numpy as np\n'), ((3627, 3673), 'numpy.float32', 'np.float32', (['[[1, 0, trans_x], [0, 1, trans_y]]'], {}), '([[1, 0, trans_x], [0, 1, trans_y]])\n', (3637, 3673), True, 'import numpy as np\n'), ((3722, 3769), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'trans_m', '(width, height)'], {}), '(image, trans_m, (width, height))\n', (3736, 3769), False, 'import cv2\n'), ((3978, 4011), 'random.randint', 'random.randint', (['(0)', 'image.shape[1]'], {}), '(0, image.shape[1])\n', (3992, 4011), False, 'import random\n'), ((4020, 4053), 'random.randint', 'random.randint', (['(0)', 'image.shape[0]'], {}), '(0, image.shape[0])\n', (4034, 4053), False, 'import random\n'), ((4066, 4112), 'random.randint', 'random.randint', (['image.shape[1]', 'image.shape[1]'], {}), '(image.shape[1], image.shape[1])\n', (4080, 4112), False, 'import random\n'), ((4192, 4238), 'random.randint', 'random.randint', (['image.shape[0]', 'image.shape[0]'], {}), '(image.shape[0], image.shape[0])\n', (4206, 4238), False, 'import random\n'), ((4318, 4356), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (4330, 4356), False, 'import cv2\n'), ((4462, 4500), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2RGB'], {}), '(image, cv2.COLOR_HSV2RGB)\n', (4474, 4500), False, 'import cv2\n'), ((4745, 4783), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2HSV'], {}), '(image, cv2.COLOR_RGB2HSV)\n', (4757, 4783), False, 'import cv2\n'), ((4878, 4914), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (4890, 4914), False, 'import cv2\n'), ((6540, 6605), 'numpy.empty', 'np.empty', (['[batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS]'], {}), '([batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS])\n', (6548, 6605), True, 'import numpy as np\n'), ((6619, 6639), 'numpy.empty', 'np.empty', (['batch_size'], {}), '(batch_size)\n', (6627, 6639), True, 'import numpy as np\n'), ((6655, 6698), 'numpy.random.permutation', 'np.random.permutation', (['image_paths.shape[0]'], {}), '(image_paths.shape[0])\n', (6676, 6698), True, 'import numpy as np\n'), ((8334, 8429), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['cps_path'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""'}), "(cps_path, monitor='val_loss', verbose=1, save_best_only=\n True, mode='auto')\n", (8349, 8429), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((9158, 9191), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (9166, 9191), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9233), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (9204, 9233), True, 'import matplotlib.pyplot as plt\n'), ((9238, 9297), 'matplotlib.pyplot.title', 'plt.title', (['f"""{model_name} Model Loss (learning rate: {LR})"""'], {}), "(f'{model_name} Model Loss (learning rate: {LR})')\n", (9247, 9297), True, 'import matplotlib.pyplot as plt\n'), ((9302, 9320), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (9312, 9320), True, 'import matplotlib.pyplot as plt\n'), ((9325, 9344), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9335, 9344), True, 'import matplotlib.pyplot as plt\n'), ((9349, 9405), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_loss', 'val_loss']"], {'loc': '"""upper left"""'}), "(['train_loss', 'val_loss'], loc='upper left')\n", (9359, 9405), True, 'import matplotlib.pyplot as plt\n'), ((9410, 9420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9418, 9420), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1181), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': 'test_size', 'random_state': '(0)'}), '(x, y, test_size=test_size, random_state=0)\n', (1138, 1181), False, 'from sklearn.model_selection import train_test_split\n'), ((2938, 2954), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2952, 2954), True, 'import numpy as np\n'), ((2976, 2994), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2984, 2994), False, 'import cv2\n'), ((3502, 3518), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3516, 3518), True, 'import numpy as np\n'), ((3551, 3567), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3565, 3567), True, 'import numpy as np\n'), ((4803, 4819), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4817, 4819), True, 'import numpy as np\n'), ((8636, 8647), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'LR'}), '(lr=LR)\n', (8640, 8647), False, 'from tensorflow.keras.optimizers import Adam\n'), ((7168, 7184), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7182, 7184), True, 'import numpy as np\n')] |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
XVG auxiliary reader --- :mod:`MDAnalysis.auxiliary.XVG`
========================================================
xvg files are produced by Gromacs during simulation or analysis, formatted
for plotting data with Grace.
Data is column-formatted; time/data selection is enabled by providing column
indices.
Note
----
By default, the time of each step is assumed to be stored in the first column,
in units of ps.
.. autoclass:: XVGStep
:members:
XVG Readers
-----------
The default :class:`XVGReader` reads and stores the full contents of the .xvg
file on initialisation, while a second reader (:class:`XVGFileReader`) that
reads steps one at a time as required is also provided for when a lower memory
footprint is desired.
Note
----
Data is assumed to be time-ordered.
Multiple datasets, separated in the .xvg file by '&', are currently not
supported (the readers will stop at the first line starting '&').
.. autoclass:: XVGReader
:members:
.. autoclass:: XVGFileReader
:members:
.. autofunction:: uncomment
"""
from __future__ import absolute_import
from six.moves import range
import numbers
import os
import numpy as np
from . import base
from ..lib.util import anyopen
def uncomment(lines):
""" Remove comments from lines in an .xvg file
Parameters
----------
lines : list of str
Lines as directly read from .xvg file
Yields
------
str
The next non-comment line, with any trailing comments removed
"""
for line in lines:
stripped_line = line.strip()
# ignore blank lines
if not stripped_line:
continue
# '@' must be at the beginning of a line to be a grace instruction
if stripped_line[0] == '@':
continue
# '#' can be anywhere in the line, everything after is a comment
comment_position = stripped_line.find('#')
if comment_position > 0 and stripped_line[:comment_position]:
yield stripped_line[:comment_position]
elif comment_position < 0 and stripped_line:
yield stripped_line
# if comment_position == 0, then the line is empty
class XVGStep(base.AuxStep):
""" AuxStep class for .xvg file format.
Extends the base AuxStep class to allow selection of time and
data-of-interest fields (by column index) from the full set of data read
each step.
Parameters
----------
time_selector : int | None, optional
Index of column in .xvg file storing time, assumed to be in ps. Default
value is 0 (i.e. first column).
data_selector : list of int | None, optional
List of indices of columns in .xvg file containing data of interest to
be stored in ``data``. Default value is ``None``.
**kwargs
Other AuxStep options.
See Also
--------
:class:`~MDAnalysis.auxiliary.base.AuxStep`
"""
def __init__(self, time_selector=0, data_selector=None, **kwargs):
super(XVGStep, self).__init__(time_selector=time_selector,
data_selector=data_selector,
**kwargs)
def _select_time(self, key):
if key is None:
# here so that None is a valid value; just return
return
if isinstance(key, numbers.Integral):
return self._select_data(key)
else:
raise ValueError('Time selector must be single index')
def _select_data(self, key):
if key is None:
# here so that None is a valid value; just return
return
if isinstance(key, numbers.Integral):
try:
return self._data[key]
except IndexError:
raise ValueError('{} not a valid index for data with {} '
'columns'.format(key, len(self._data)))
else:
return np.array([self._select_data(i) for i in key])
class XVGReader(base.AuxReader):
""" Auxiliary reader to read data from an .xvg file.
Detault reader for .xvg files. All data from the file will be read and stored
on initialisation.
Parameters
----------
filename : str
Location of the file containing the auxiliary data.
**kwargs
Other AuxReader options.
See Also
--------
:class:`~MDAnalysis.auxiliary.base.AuxReader`
Note
----
The file is assumed to be of a size such that reading and storing the full
contents is practical.
"""
format = "XVG"
_Auxstep = XVGStep
def __init__(self, filename, **kwargs):
self._auxdata = os.path.abspath(filename)
with anyopen(filename) as xvg_file:
lines = xvg_file.readlines()
auxdata_values = []
# remove comments before storing
for i, line in enumerate(uncomment(lines)):
if line.lstrip()[0] == '&':
# multiple data sets not supported; stop at the end of the first
break
auxdata_values.append([float(l) for l in line.split()])
# check the number of columns is consistent
if len(auxdata_values[i]) != len(auxdata_values[0]):
raise ValueError('Step {0} has {1} columns instead of '
'{2}'.format(i, auxdata_values[i],
auxdata_values[0]))
self._auxdata_values = np.array(auxdata_values)
self._n_steps = len(self._auxdata_values)
super(XVGReader, self).__init__(**kwargs)
def _read_next_step(self):
""" Read next auxiliary step and update ``auxstep``.
Returns
-------
AuxStep object
Updated with the data for the new step.
Raises
------
StopIteration
When end of auxiliary data set is reached.
"""
auxstep = self.auxstep
new_step = self.step + 1
if new_step < self.n_steps:
auxstep._data = self._auxdata_values[new_step]
auxstep.step = new_step
return auxstep
else:
self.rewind()
raise StopIteration
def _go_to_step(self, i):
""" Move to and read i-th auxiliary step.
Parameters
----------
i: int
Step number (0-indexed) to move to
Returns
-------
:class:`XVGStep`
Raises
------
ValueError
If step index not in valid range.
"""
if i >= self.n_steps or i < 0:
raise ValueError("Step index {0} is not valid for auxiliary "
"(num. steps {1})".format(i, self.n_steps))
self.auxstep.step = i-1
self.next()
return self.auxstep
def read_all_times(self):
""" Get list of time at each step.
Returns
-------
list of float
Time at each step.
"""
return self._auxdata_values[:,self.time_selector]
class XVGFileReader(base.AuxFileReader):
""" Auxiliary reader to read (step at a time) from an .xvg file.
An alternative XVG reader which reads each step from the .xvg file as
needed (rather than reading and storing all from the start), for a lower
memory footprint.
Parameters
----------
filename : str
Location of the file containing the auxiliary data.
**kwargs
Other AuxReader options.
See Also
--------
:class:`~MDAnalysis.auxiliary.base.AuxFileReader`
Note
----
The default reader for .xvg files is :class:`XVGReader`.
"""
format = 'XVG-F'
_Auxstep = XVGStep
def __init__(self, filename, **kwargs):
super(XVGFileReader, self).__init__(filename, **kwargs)
def _read_next_step(self):
""" Read next recorded step in xvg file and update ``austep``.
Returns
-------
AuxStep object
Updated with the data for the new step.
Raises
------
StopIteration
When end of file or end of first data set is reached.
"""
line = next(self.auxfile)
while True:
if not line or (line.strip() and line.strip()[0] == '&'):
# at end of file or end of first set of data (multiple sets
# currently not supported)
self.rewind()
raise StopIteration
# uncomment the line
for uncommented in uncomment([line]):
# line has data in it; add to auxstep + return
auxstep = self.auxstep
auxstep.step = self.step + 1
auxstep._data = [float(i) for i in uncommented.split()]
# see if we've set n_cols yet...
try:
auxstep._n_cols
except AttributeError:
# haven't set n_cols yet; set now
auxstep._n_cols = len(auxstep._data)
if len(auxstep._data) != auxstep._n_cols:
raise ValueError('Step {0} has {1} columns instead of '
'{2}'.format(self.step, len(auxstep._data),
auxstep._n_cols))
return auxstep
# line is comment only - move to next
line = next(self.auxfile)
def _count_n_steps(self):
""" Iterate through all steps to count total number.
Returns
-------
int
Total number of steps
"""
if not self.constant_dt:
# check if we've already iterated through to build _times list
try:
return len(self._times)
except AttributeError:
# might as well build _times now, since we'll need to iterate
# through anyway
self._times = self.read_all_times()
return len(self.read_all_times())
else:
# don't need _times; iterate here instead
self._restart()
count = 0
for step in self:
count = count + 1
return count
def read_all_times(self):
""" Iterate through all steps to build times list.
Returns
-------
list of float
Time of each step
"""
self._restart()
times = []
for step in self:
times.append(self.time)
return np.array(times)
| [
"os.path.abspath",
"numpy.array"
] | [((5590, 5615), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (5605, 5615), False, 'import os\n'), ((6385, 6409), 'numpy.array', 'np.array', (['auxdata_values'], {}), '(auxdata_values)\n', (6393, 6409), True, 'import numpy as np\n'), ((11454, 11469), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (11462, 11469), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import collections
from PIL import Image
import tensorflow as tf
import matplotlib.pyplot as plt
class Pixel_weights:
def __init__(self):
print("pixel weights init")
def compute_label_weights(self,image):
weights = np.array(image)
unique = np.unique(image)
counter = collections.Counter(image.flatten())
size = image.size
for i in unique:
weights[np.where(image==i)]= counter[i]
weights = weights/size
return weights
def compute_pixel_weights(self,image_tensor):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
image = sess.run(image_tensor)
image = np.reshape(image,[image.shape[1],image.shape[2]])
# print("imageshape is ",image.shape)
# plt.interactive(False)
# plt.imshow(image)
# plt.show()
print('image is ',image)
cnc = np.array(image == 1).astype(np.int8)
(_, output, _, _) = cv2.connectedComponentsWithStats(cnc, 4, cv2.CV_32S)
#print(output.shape)
#print(np.max(output))
maps = np.zeros([output.shape[0], output.shape[1], np.max(output)])
for label in range(np.max(output)):
maps[:, :, label] = cv2.distanceTransform(np.array(output != label).astype(np.uint8), 2, 5)
#print(maps.shape)
d = np.zeros([output.shape[0], output.shape[1]], dtype=np.int32)
#print(d.shape)
for i in range(output.shape[0]):
for j in range(output.shape[1]):
#print(np.sort(maps).shape)
d[i, j] = 10 * np.exp(-np.square(np.sort(maps[i, j, :])[1] + np.sort(maps[i, j, :])[2]) / 200)
weights = self.compute_label_weights(image)+d
return weights
if __name__=="__main__":
obj = Pixel_weights()
image = Image.open('B:/Tensorflow/Segmentation/UNet/data/ground-truth.jpg').convert('L')
image = np.asarray(image)
weights = obj.compute_pixel_weights(image)
print(weights.shape)
| [
"PIL.Image.open",
"numpy.reshape",
"numpy.unique",
"numpy.where",
"tensorflow.Session",
"numpy.sort",
"numpy.asarray",
"numpy.max",
"tensorflow.global_variables_initializer",
"numpy.array",
"numpy.zeros",
"cv2.connectedComponentsWithStats"
] | [((2042, 2059), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2052, 2059), True, 'import numpy as np\n'), ((285, 300), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (293, 300), True, 'import numpy as np\n'), ((319, 335), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (328, 335), True, 'import numpy as np\n'), ((1082, 1134), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['cnc', '(4)', 'cv2.CV_32S'], {}), '(cnc, 4, cv2.CV_32S)\n', (1114, 1134), False, 'import cv2\n'), ((1465, 1525), 'numpy.zeros', 'np.zeros', (['[output.shape[0], output.shape[1]]'], {'dtype': 'np.int32'}), '([output.shape[0], output.shape[1]], dtype=np.int32)\n', (1473, 1525), True, 'import numpy as np\n'), ((621, 633), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (631, 633), True, 'import tensorflow as tf\n'), ((765, 816), 'numpy.reshape', 'np.reshape', (['image', '[image.shape[1], image.shape[2]]'], {}), '(image, [image.shape[1], image.shape[2]])\n', (775, 816), True, 'import numpy as np\n'), ((1302, 1316), 'numpy.max', 'np.max', (['output'], {}), '(output)\n', (1308, 1316), True, 'import numpy as np\n'), ((1948, 2015), 'PIL.Image.open', 'Image.open', (['"""B:/Tensorflow/Segmentation/UNet/data/ground-truth.jpg"""'], {}), "('B:/Tensorflow/Segmentation/UNet/data/ground-truth.jpg')\n", (1958, 2015), False, 'from PIL import Image\n'), ((466, 486), 'numpy.where', 'np.where', (['(image == i)'], {}), '(image == i)\n', (474, 486), True, 'import numpy as np\n'), ((665, 698), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (696, 698), True, 'import tensorflow as tf\n'), ((1016, 1036), 'numpy.array', 'np.array', (['(image == 1)'], {}), '(image == 1)\n', (1024, 1036), True, 'import numpy as np\n'), ((1257, 1271), 'numpy.max', 'np.max', (['output'], {}), '(output)\n', (1263, 1271), True, 'import numpy as np\n'), ((1374, 1399), 'numpy.array', 'np.array', (['(output != label)'], {}), '(output != label)\n', (1382, 1399), True, 'import numpy as np\n'), ((1734, 1756), 'numpy.sort', 'np.sort', (['maps[i, j, :]'], {}), '(maps[i, j, :])\n', (1741, 1756), True, 'import numpy as np\n'), ((1762, 1784), 'numpy.sort', 'np.sort', (['maps[i, j, :]'], {}), '(maps[i, j, :])\n', (1769, 1784), True, 'import numpy as np\n')] |
import logging
import os
import pickle
import random
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from src.data.make_dataset import DATE_COLUMNS, CAT_COLUMNS
import utm
project_dir = Path(__file__).resolve().parents[2]
def rule(row):
lat, long,_,_= utm.from_latlon(row["Surf_Latitude"], row["Surf_Longitude"], 45, 'K')
return pd.Series({"lat": lat, "long": long})
def distance(s_lat, s_lng, e_lat, e_lng):
# approximate radius of earth in km
R = 6373.0
s_lat = s_lat * np.pi / 180.0
s_lng = np.deg2rad(s_lng)
e_lat = np.deg2rad(e_lat)
e_lng = np.deg2rad(e_lng)
d = np.sin((e_lat - s_lat) / 2) ** 2 + np.cos(s_lat) * np.cos(e_lat) * np.sin((e_lng - s_lng) / 2) ** 2
return 2 * R * np.arcsin(np.sqrt(d))
def build_features(input_file_path, output_file_path, suffix="Train"):
input_filename = os.path.join(input_file_path, f"{suffix}_df.pck")
output_file_name = os.path.join(output_file_path, f"{suffix}_final.pck")
df = pd.read_pickle(input_filename)
#df.loc[df["Surf_Longitude"] > -70, "Surf_Longitude"] = np.nan
# for col in ['RigReleaseDate','SpudDate']:
# df[f'{col}_month']=df[col].dt.month
# df[f'{col}_year'] = df[col].dt.year
# df[f'{col}_day'] = df[col].dt.day
# df.loc[df["Surf_Longitude"] > -70, "Surf_Longitude"] = np.nan
df['RigReleaseDate_days_till_monthend'] = 31 - df['RigReleaseDate'].dt.day
df['FinalDrillDate_days_till_monthend'] = 31 - df['FinalDrillDate'].dt.day
#df['BOE_average'] = df['_Max`Prod`(BOE)'] / df['RigReleaseDate_days_till_monthend']
#df['BOE_fd_av'] = df['_Max`Prod`(BOE)'] / df['FinalDrillDate_days_till_monthend']
df['SpudDate_dt'] = df['SpudDate']
for col in DATE_COLUMNS:
df[col] = (df[col] - pd.to_datetime("1950-01-01")).dt.total_seconds()
# All possible diff interactions of dates:
# for i in range(len(DATE_COLUMNS)):
# for j in range(i + 1, len(DATE_COLUMNS)):
# l=DATE_COLUMNS[i]
# r=DATE_COLUMNS[j]
# df[f'{l}_m_{r}'] = df[l] - df[r]
df["timediff"] = df["SpudDate"] - df["SurfAbandonDate"]
df["st_timediff"] = df["SpudDate"] - df["StatusDate"]
df["cf_timediff"] = df["ConfidentialReleaseDate"] - df["SpudDate"]
df["lic_timediff"] = df["LicenceDate"] - df["SpudDate"]
df["final_timediff"] = df["FinalDrillDate"] - df["SpudDate"]
df["rrd_timediff"] = df["RigReleaseDate"] - df["SpudDate"]
#df['is_na_completion_date'] = pd.to_datetime(df['CompletionDate']).isna()
df["LengthDrill"] = df["DaysDrilling"] * df["DrillMetresPerDay"]
#df["DepthDiff"] = df["ProjectedDepth"]/ df["TVD"]
#df["DepthDiffLD"] = df["ProjectedDepth"] - df["LengthDrill"]
#df["TDPD"] = df["ProjectedDepth"] - df["TotalDepth"]
#df['LicenceNumber_nchar']=df['LicenceNumber'].astype(str).str.count("[a-zA-Z]")
#df['LicenceNumber_ndig'] = df['LicenceNumber'].astype(str).str.count("[0-9]")
#df['is_na_BH'] = df['BH_Latitude'].isna() | df['BH_Longitude'].isna()
df.drop(['OSArea','OSDeposit'], axis=1, inplace=True)
# # TODO Haversine, azimuth:
df['haversine_Length'] = distance(df['Surf_Latitude'], df['Surf_Longitude'], df['BH_Latitude'], df['BH_Longitude'])
#df['azi_proxy'] = np.arctan((df['Surf_Latitude'] - df['BH_Latitude'])/(df['Surf_Longitude'] - df['BH_Longitude']))
df.drop(['BH_Latitude', 'BH_Longitude','LicenceNumber'], axis=1, inplace=True)
df.to_pickle(output_file_name)
return df
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
input_file_path = os.path.join(project_dir, "data", "processed")
output_file_path = os.path.join(project_dir, "data", "final")
os.makedirs(input_file_path, exist_ok=True)
os.makedirs(output_file_path, exist_ok=True)
df_train = build_features(input_file_path, output_file_path, suffix="Train")
df_test = build_features(input_file_path, output_file_path, suffix="Test")
df_val = build_features(input_file_path, output_file_path, suffix="Validation")
| [
"pandas.Series",
"pandas.read_pickle",
"logging.basicConfig",
"utm.from_latlon",
"numpy.sqrt",
"os.makedirs",
"pathlib.Path",
"os.path.join",
"numpy.deg2rad",
"numpy.cos",
"numpy.sin",
"pandas.to_datetime"
] | [((338, 407), 'utm.from_latlon', 'utm.from_latlon', (["row['Surf_Latitude']", "row['Surf_Longitude']", '(45)', '"""K"""'], {}), "(row['Surf_Latitude'], row['Surf_Longitude'], 45, 'K')\n", (353, 407), False, 'import utm\n'), ((419, 456), 'pandas.Series', 'pd.Series', (["{'lat': lat, 'long': long}"], {}), "({'lat': lat, 'long': long})\n", (428, 456), True, 'import pandas as pd\n'), ((602, 619), 'numpy.deg2rad', 'np.deg2rad', (['s_lng'], {}), '(s_lng)\n', (612, 619), True, 'import numpy as np\n'), ((632, 649), 'numpy.deg2rad', 'np.deg2rad', (['e_lat'], {}), '(e_lat)\n', (642, 649), True, 'import numpy as np\n'), ((662, 679), 'numpy.deg2rad', 'np.deg2rad', (['e_lng'], {}), '(e_lng)\n', (672, 679), True, 'import numpy as np\n'), ((925, 974), 'os.path.join', 'os.path.join', (['input_file_path', 'f"""{suffix}_df.pck"""'], {}), "(input_file_path, f'{suffix}_df.pck')\n", (937, 974), False, 'import os\n'), ((998, 1051), 'os.path.join', 'os.path.join', (['output_file_path', 'f"""{suffix}_final.pck"""'], {}), "(output_file_path, f'{suffix}_final.pck')\n", (1010, 1051), False, 'import os\n'), ((1061, 1091), 'pandas.read_pickle', 'pd.read_pickle', (['input_filename'], {}), '(input_filename)\n', (1075, 1091), True, 'import pandas as pd\n'), ((3660, 3715), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (3679, 3715), False, 'import logging\n'), ((3946, 3992), 'os.path.join', 'os.path.join', (['project_dir', '"""data"""', '"""processed"""'], {}), "(project_dir, 'data', 'processed')\n", (3958, 3992), False, 'import os\n'), ((4016, 4058), 'os.path.join', 'os.path.join', (['project_dir', '"""data"""', '"""final"""'], {}), "(project_dir, 'data', 'final')\n", (4028, 4058), False, 'import os\n'), ((4063, 4106), 'os.makedirs', 'os.makedirs', (['input_file_path'], {'exist_ok': '(True)'}), '(input_file_path, exist_ok=True)\n', (4074, 4106), False, 'import os\n'), ((4111, 4155), 'os.makedirs', 'os.makedirs', (['output_file_path'], {'exist_ok': '(True)'}), '(output_file_path, exist_ok=True)\n', (4122, 4155), False, 'import os\n'), ((689, 716), 'numpy.sin', 'np.sin', (['((e_lat - s_lat) / 2)'], {}), '((e_lat - s_lat) / 2)\n', (695, 716), True, 'import numpy as np\n'), ((819, 829), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (826, 829), True, 'import numpy as np\n'), ((266, 280), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (270, 280), False, 'from pathlib import Path\n'), ((724, 737), 'numpy.cos', 'np.cos', (['s_lat'], {}), '(s_lat)\n', (730, 737), True, 'import numpy as np\n'), ((740, 753), 'numpy.cos', 'np.cos', (['e_lat'], {}), '(e_lat)\n', (746, 753), True, 'import numpy as np\n'), ((756, 783), 'numpy.sin', 'np.sin', (['((e_lng - s_lng) / 2)'], {}), '((e_lng - s_lng) / 2)\n', (762, 783), True, 'import numpy as np\n'), ((1844, 1872), 'pandas.to_datetime', 'pd.to_datetime', (['"""1950-01-01"""'], {}), "('1950-01-01')\n", (1858, 1872), True, 'import pandas as pd\n')] |
from glob import glob
from os import chdir
from os.path import dirname
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
import matplotlib.pyplot as plt
from numpy.fft import rfft, rfftfreq
from scipy import signal
from math import pi
WINDOW = "sg" # sg for savgol filter
WINDOW = "hamming"
WINDOW_SIZE = 41
CUTOFF_FREQ = 2.5 # Hz
SG_POLYORDER = 2
SG_PARAMS = (WINDOW_SIZE, SG_POLYORDER)
USING_SG = WINDOW.lower() == "sg"
def normalize(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
def freq_analysis(x, y):
fs = freq_sample(x)
freq = rfftfreq(len(y), d=1 / fs)
w = signal.get_window("hamming", len(y))
yf = np.abs(rfft(y * w)) / len(y)
return freq, yf
def freq_sample(x):
return 1 / (x[1] - x[0])
def plot_freq_analysis(x, y):
plt.figure()
f, yf = freq_analysis(x, y)
plt.plot(f, 20 * np.log10(yf), "-o")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Frequency [Hz]")
def plot_filter_response(x):
plt.figure()
fs = freq_sample(x)
if USING_SG:
fir = signal.savgol_coeffs(*SG_PARAMS)
else:
fir = signal.firwin(
WINDOW_SIZE, CUTOFF_FREQ / (0.5 * fs), window=WINDOW
)
w, h = signal.freqz(fir)
plt.plot(w * fs / 2 / pi, 20 * np.log10(abs(h)), label=WINDOW)
plt.title("FIR filter frequency response")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [Hz]")
plt.grid(True)
def filter(x, y):
fs = freq_sample(x)
if USING_SG:
return signal.savgol_filter(y, SG_PARAMS[0], SG_PARAMS[1])
else:
# Cutoff is normalized to the Nyquist frequency
# which is half the sampling rate.
w = signal.firwin(WINDOW_SIZE, CUTOFF_FREQ / (0.5 * fs), window=WINDOW)
return signal.filtfilt(w, 1, y)
def process_file(file_path):
print(f"Processing {file_path}")
df = pd.read_csv(file_path, sep=",", encoding="cp1250", skiprows=34)
df.rename(
columns={
df.columns[0]: "temperature",
df.columns[1]: "time",
df.columns[3]: "mass",
},
inplace=True,
)
df.temperature += 273.15
df["mass_filtred"] = filter(df.time, df.mass)
df["mass_diff"] = np.gradient(df.mass_filtred, df.time)
df["mass_diff_filtred"] = filter(df.time, df.mass_diff)
df["mass_diff2"] = np.gradient(df.mass_diff_filtred, df.time)
df["mass_diff2_filtred"] = filter(df.time, df.mass_diff2)
return df
def plot(df: DataFrame):
plt.title("TG")
plt.plot(df.time, df.mass, "o")
plt.plot(df.time, df.mass_filtred, "-")
plot_freq_analysis(df.time, df.mass)
plt.figure()
plt.title("DTG")
plt.plot(df.time, df.mass_diff, "o")
plt.plot(df.time, df.mass_diff_filtred, "-")
plot_freq_analysis(df.time, df.mass_diff)
plt.figure()
plt.title("DDTG")
plt.plot(df.time, df.mass_diff2, "o")
plt.plot(df.time, df.mass_diff2_filtred, "-")
plot_freq_analysis(df.time, df.mass_diff)
plt.figure()
plt.plot(df.time, normalize(df.mass_filtred), "-r", label="TG")
plt.plot(df.time, normalize(df.mass_diff_filtred), "-g", label="DTG")
plt.plot(df.time, normalize(df.mass_diff2_filtred), "-b", label="DDTG")
plt.legend()
plot_filter_response(df.time)
def main():
# set working directory to where is this script
chdir(dirname(__file__))
for file_path in glob("*.txt"):
df = process_file(file_path)
plot(df)
# TODO: remove when done
break
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.grid",
"numpy.log10",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"scipy.signal.filtfilt",
"scipy.signal.savgol_filter",
"numpy.gradient",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.fft.rfft",
"numpy.min",
"scipy.signal.savgol_coeffs",
"... | [((826, 838), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n'), ((916, 944), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude [dB]"""'], {}), "('Magnitude [dB]')\n", (926, 944), True, 'import matplotlib.pyplot as plt\n'), ((949, 977), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (959, 977), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1258), 'scipy.signal.freqz', 'signal.freqz', (['fir'], {}), '(fir)\n', (1253, 1258), False, 'from scipy import signal\n'), ((1331, 1373), 'matplotlib.pyplot.title', 'plt.title', (['"""FIR filter frequency response"""'], {}), "('FIR filter frequency response')\n", (1340, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [dB]"""'], {}), "('Amplitude [dB]')\n", (1388, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1411, 1439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1421, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1458), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1452, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1956), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'sep': '""","""', 'encoding': '"""cp1250"""', 'skiprows': '(34)'}), "(file_path, sep=',', encoding='cp1250', skiprows=34)\n", (1904, 1956), True, 'import pandas as pd\n'), ((2242, 2279), 'numpy.gradient', 'np.gradient', (['df.mass_filtred', 'df.time'], {}), '(df.mass_filtred, df.time)\n', (2253, 2279), True, 'import numpy as np\n'), ((2363, 2405), 'numpy.gradient', 'np.gradient', (['df.mass_diff_filtred', 'df.time'], {}), '(df.mass_diff_filtred, df.time)\n', (2374, 2405), True, 'import numpy as np\n'), ((2514, 2529), 'matplotlib.pyplot.title', 'plt.title', (['"""TG"""'], {}), "('TG')\n", (2523, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2565), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass', '"""o"""'], {}), "(df.time, df.mass, 'o')\n", (2542, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2609), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass_filtred', '"""-"""'], {}), "(df.time, df.mass_filtred, '-')\n", (2578, 2609), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2668), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2666, 2668), True, 'import matplotlib.pyplot as plt\n'), ((2673, 2689), 'matplotlib.pyplot.title', 'plt.title', (['"""DTG"""'], {}), "('DTG')\n", (2682, 2689), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2730), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass_diff', '"""o"""'], {}), "(df.time, df.mass_diff, 'o')\n", (2702, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2779), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass_diff_filtred', '"""-"""'], {}), "(df.time, df.mass_diff_filtred, '-')\n", (2743, 2779), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2843), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2865), 'matplotlib.pyplot.title', 'plt.title', (['"""DDTG"""'], {}), "('DDTG')\n", (2857, 2865), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2907), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass_diff2', '"""o"""'], {}), "(df.time, df.mass_diff2, 'o')\n", (2878, 2907), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2957), 'matplotlib.pyplot.plot', 'plt.plot', (['df.time', 'df.mass_diff2_filtred', '"""-"""'], {}), "(df.time, df.mass_diff2_filtred, '-')\n", (2920, 2957), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3021), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3019, 3021), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3256), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3254, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3422), 'glob.glob', 'glob', (['"""*.txt"""'], {}), "('*.txt')\n", (3413, 3422), False, 'from glob import glob\n'), ((3530, 3540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3538, 3540), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1114), 'scipy.signal.savgol_coeffs', 'signal.savgol_coeffs', (['*SG_PARAMS'], {}), '(*SG_PARAMS)\n', (1102, 1114), False, 'from scipy import signal\n'), ((1139, 1206), 'scipy.signal.firwin', 'signal.firwin', (['WINDOW_SIZE', '(CUTOFF_FREQ / (0.5 * fs))'], {'window': 'WINDOW'}), '(WINDOW_SIZE, CUTOFF_FREQ / (0.5 * fs), window=WINDOW)\n', (1152, 1206), False, 'from scipy import signal\n'), ((1535, 1586), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['y', 'SG_PARAMS[0]', 'SG_PARAMS[1]'], {}), '(y, SG_PARAMS[0], SG_PARAMS[1])\n', (1555, 1586), False, 'from scipy import signal\n'), ((1708, 1775), 'scipy.signal.firwin', 'signal.firwin', (['WINDOW_SIZE', '(CUTOFF_FREQ / (0.5 * fs))'], {'window': 'WINDOW'}), '(WINDOW_SIZE, CUTOFF_FREQ / (0.5 * fs), window=WINDOW)\n', (1721, 1775), False, 'from scipy import signal\n'), ((1791, 1815), 'scipy.signal.filtfilt', 'signal.filtfilt', (['w', '(1)', 'y'], {}), '(w, 1, y)\n', (1806, 1815), False, 'from scipy import signal\n'), ((3368, 3385), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3375, 3385), False, 'from os.path import dirname\n'), ((499, 511), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (505, 511), True, 'import numpy as np\n'), ((516, 528), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (522, 528), True, 'import numpy as np\n'), ((531, 543), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (537, 543), True, 'import numpy as np\n'), ((696, 707), 'numpy.fft.rfft', 'rfft', (['(y * w)'], {}), '(y * w)\n', (700, 707), False, 'from numpy.fft import rfft, rfftfreq\n'), ((892, 904), 'numpy.log10', 'np.log10', (['yf'], {}), '(yf)\n', (900, 904), True, 'import numpy as np\n')] |
import unittest
import abc
import os
import torch
import numpy as np
from configs import BaseConfig, env
from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, \
all_subclasses_not_abstract, hasattrs
from utils.path import get_filename, get_path, comp_path
from utils.image import _2dto3d, sobel3d, laplace3d
from utils.medical import cbf
__all__ = ['TestLogger', 'TestSummary', 'TestCommon', 'TestPath', 'TestImage', 'TestMedical']
class TestCommon(unittest.TestCase):
def test_deepcopy(self):
class s:
pass
a = s()
a.b = 1
a.c = [1, 2]
t1 = deepcopy(a)
t1.c[0] = 3
self.assertEqual(a.c, [1, 2])
self.assertEqual(t1.c, [3, 2])
t2 = deepcopy(a, ['c'])
t2.c[0] = 5
self.assertEqual(a.c, [5, 2])
self.assertEqual(t2.c, [5, 2])
def test_merge_dict(self):
a = dict()
merge_dict(a, dict(c=torch.tensor(1)))
self.assertEqual(a, dict(c=[1]))
merge_dict(a, dict(c=1.0))
self.assertEqual(a, dict(c=[1, 1.0]))
def test_all_subclasses_(self):
A = type('A', (object,), dict())
B = type('B', (A,), dict())
C = type('C', (B,), dict())
D = type('D', (A,), dict())
subclasses = _all_subclasses(A)
self.assertTrue(B in subclasses)
self.assertTrue(C in subclasses)
self.assertTrue(D in subclasses)
self.assertEqual(len(subclasses), 3)
def test_cmp_class(self):
class A:
pass
class B:
pass
self.assertEqual(cmp_class(A, B), -1)
self.assertEqual(cmp_class(B, A), 1)
self.assertEqual(cmp_class(A, A), 0)
def test_all_subclasses(self):
A = type('A', (object,), dict())
B = type('B', (A,), dict())
C = type('C', (B,), dict())
D = type('D', (A,), dict())
subclasses = all_subclasses(A)
self.assertEqual(subclasses, [B, C, D])
def test_is_abstract(self):
class A(abc.ABC):
@abc.abstractmethod
def t(self): pass
self.assertTrue(is_abstract(A))
class B(A):
def t(self): pass
self.assertFalse(is_abstract(B))
class C(abc.ABC):
pass
self.assertFalse(is_abstract(C))
def test_all_subclasses_not_abstract(self):
class A(abc.ABC):
@abc.abstractmethod
def t(self): pass
class B(A):
def t(self): pass
class C(B):
pass
class D(A):
pass
class E(D):
def t(self): pass
subclasses = all_subclasses_not_abstract(A)
self.assertTrue(B in subclasses)
self.assertTrue(C in subclasses)
self.assertTrue(E in subclasses)
self.assertEqual(len(subclasses), 3)
def test_hasattrs(self):
a = BaseConfig(dict(a=1, b=2, c=3))
self.assertTrue(hasattrs(a, ['a', 'b', 'c']))
self.assertFalse(hasattrs(a, ['c', 'd']))
class TestPath(unittest.TestCase):
def test_get_filename(self):
file_path = "/user/file"
self.assertEqual(get_filename(file_path), 'file')
def test_get_path(self):
class p:
_path: str
m_cfg = p()
m_cfg._path = 'm'
d_cfg = p()
d_cfg._path = 'd'
d_cfg.index_cross = 1
r_cfg = p()
r_cfg._path = 'r'
self.assertEqual(get_path(m_cfg, d_cfg, r_cfg),
os.path.join(env.getdir(env.paths.save_folder), 'm-r-d-1'))
def test_comp_path(self):
paths = dict(a='/user/file_*??_??', b='*??_??')
counts = [2, 10]
indexes_1 = [1, 10]
self.assertEqual(comp_path(paths, counts, indexes_1), dict(a='/user/file_*01_10', b='*01_10'))
indexes_2 = [2, 1]
self.assertEqual(comp_path(paths, counts, indexes_2), dict(a='/user/file_*02_01', b='*02_01'))
def test_real_config_path(self):
# TODO test
pass
class TestLogger(unittest.TestCase):
# TODO test logger
def testInit(self):
pass
class TestSummary(unittest.TestCase):
# TODO test summary
def testInit(self):
pass
class TestImage(unittest.TestCase):
def test_2dto3d(self):
a = np.random.rand(1, 3, 6, 6)
b = _2dto3d(a)
self.assertEqual(b.shape[0], a.shape[0])
self.assertEqual(b.shape[2:], a.shape[1:])
def test_sobel3d(self):
# TODO more test
a = torch.randn(1, 3, 6, 6)
b = np.random.rand(2, 3, 6, 6)
sobel = sobel3d(a)
self.assertEqual(a.shape, sobel.shape)
sobel_np = sobel3d(b)
self.assertEqual(b.shape, sobel_np.shape)
def test_laplace3d(self):
# TODO more test
a = torch.randn(1, 3, 6, 6)
b = np.random.rand(2, 3, 6, 6)
laplace = laplace3d(a)
self.assertEqual(a.shape, laplace.shape)
laplace_np = laplace3d(b)
self.assertEqual(b.shape, laplace_np.shape)
class TestMedical(unittest.TestCase):
def test_cbf(self):
# TODO more test
a = torch.randn((2, 3, 6, 6), requires_grad=True)
b = torch.randn((2, 3, 6, 6))
c = cbf(a, b)
self.assertEqual(c.shape, a.shape)
self.assertTrue(c.requires_grad)
a = np.random.randn(2, 3, 6, 6)
b = np.random.randn(2, 3, 6, 6)
c = cbf(a, b)
self.assertEqual(c.shape, a.shape)
class TestDDP(unittest.TestCase):
def test_zero_first(self):
# TODO test DDP
pass
def test_sequence(self):
# TODO test DDP
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"utils.common.all_subclasses",
"numpy.random.rand",
"utils.common.cmp_class",
"unittest.main",
"utils.common.deepcopy",
"utils.image.sobel3d",
"utils.common.hasattrs",
"utils.image._2dto3d",
"torch.randn",
"utils.path.get_path",
"utils.common._all_subclasses",
"utils.path.get_filename",
"uti... | [((5718, 5744), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5731, 5744), False, 'import unittest\n'), ((660, 671), 'utils.common.deepcopy', 'deepcopy', (['a'], {}), '(a)\n', (668, 671), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((783, 801), 'utils.common.deepcopy', 'deepcopy', (['a', "['c']"], {}), "(a, ['c'])\n", (791, 801), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((1329, 1347), 'utils.common._all_subclasses', '_all_subclasses', (['A'], {}), '(A)\n', (1344, 1347), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((1960, 1977), 'utils.common.all_subclasses', 'all_subclasses', (['A'], {}), '(A)\n', (1974, 1977), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((2701, 2731), 'utils.common.all_subclasses_not_abstract', 'all_subclasses_not_abstract', (['A'], {}), '(A)\n', (2728, 2731), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((4345, 4371), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(6)', '(6)'], {}), '(1, 3, 6, 6)\n', (4359, 4371), True, 'import numpy as np\n'), ((4384, 4394), 'utils.image._2dto3d', '_2dto3d', (['a'], {}), '(a)\n', (4391, 4394), False, 'from utils.image import _2dto3d, sobel3d, laplace3d\n'), ((4561, 4584), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(6)', '(6)'], {}), '(1, 3, 6, 6)\n', (4572, 4584), False, 'import torch\n'), ((4597, 4623), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(6)', '(6)'], {}), '(2, 3, 6, 6)\n', (4611, 4623), True, 'import numpy as np\n'), ((4640, 4650), 'utils.image.sobel3d', 'sobel3d', (['a'], {}), '(a)\n', (4647, 4650), False, 'from utils.image import _2dto3d, sobel3d, laplace3d\n'), ((4717, 4727), 'utils.image.sobel3d', 'sobel3d', (['b'], {}), '(b)\n', (4724, 4727), False, 'from utils.image import _2dto3d, sobel3d, laplace3d\n'), ((4846, 4869), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(6)', '(6)'], {}), '(1, 3, 6, 6)\n', (4857, 4869), False, 'import torch\n'), ((4882, 4908), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(6)', '(6)'], {}), '(2, 3, 6, 6)\n', (4896, 4908), True, 'import numpy as np\n'), ((4927, 4939), 'utils.image.laplace3d', 'laplace3d', (['a'], {}), '(a)\n', (4936, 4939), False, 'from utils.image import _2dto3d, sobel3d, laplace3d\n'), ((5010, 5022), 'utils.image.laplace3d', 'laplace3d', (['b'], {}), '(b)\n', (5019, 5022), False, 'from utils.image import _2dto3d, sobel3d, laplace3d\n'), ((5177, 5222), 'torch.randn', 'torch.randn', (['(2, 3, 6, 6)'], {'requires_grad': '(True)'}), '((2, 3, 6, 6), requires_grad=True)\n', (5188, 5222), False, 'import torch\n'), ((5235, 5260), 'torch.randn', 'torch.randn', (['(2, 3, 6, 6)'], {}), '((2, 3, 6, 6))\n', (5246, 5260), False, 'import torch\n'), ((5273, 5282), 'utils.medical.cbf', 'cbf', (['a', 'b'], {}), '(a, b)\n', (5276, 5282), False, 'from utils.medical import cbf\n'), ((5380, 5407), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(6)', '(6)'], {}), '(2, 3, 6, 6)\n', (5395, 5407), True, 'import numpy as np\n'), ((5420, 5447), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(6)', '(6)'], {}), '(2, 3, 6, 6)\n', (5435, 5447), True, 'import numpy as np\n'), ((5460, 5469), 'utils.medical.cbf', 'cbf', (['a', 'b'], {}), '(a, b)\n', (5463, 5469), False, 'from utils.medical import cbf\n'), ((1642, 1657), 'utils.common.cmp_class', 'cmp_class', (['A', 'B'], {}), '(A, B)\n', (1651, 1657), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((1688, 1703), 'utils.common.cmp_class', 'cmp_class', (['B', 'A'], {}), '(B, A)\n', (1697, 1703), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((1733, 1748), 'utils.common.cmp_class', 'cmp_class', (['A', 'A'], {}), '(A, A)\n', (1742, 1748), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((2171, 2185), 'utils.common.is_abstract', 'is_abstract', (['A'], {}), '(A)\n', (2182, 2185), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((2263, 2277), 'utils.common.is_abstract', 'is_abstract', (['B'], {}), '(B)\n', (2274, 2277), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((2348, 2362), 'utils.common.is_abstract', 'is_abstract', (['C'], {}), '(C)\n', (2359, 2362), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((2999, 3027), 'utils.common.hasattrs', 'hasattrs', (['a', "['a', 'b', 'c']"], {}), "(a, ['a', 'b', 'c'])\n", (3007, 3027), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((3054, 3077), 'utils.common.hasattrs', 'hasattrs', (['a', "['c', 'd']"], {}), "(a, ['c', 'd'])\n", (3062, 3077), False, 'from utils.common import deepcopy, merge_dict, _all_subclasses, cmp_class, all_subclasses, is_abstract, all_subclasses_not_abstract, hasattrs\n'), ((3208, 3231), 'utils.path.get_filename', 'get_filename', (['file_path'], {}), '(file_path)\n', (3220, 3231), False, 'from utils.path import get_filename, get_path, comp_path\n'), ((3506, 3535), 'utils.path.get_path', 'get_path', (['m_cfg', 'd_cfg', 'r_cfg'], {}), '(m_cfg, d_cfg, r_cfg)\n', (3514, 3535), False, 'from utils.path import get_filename, get_path, comp_path\n'), ((3788, 3823), 'utils.path.comp_path', 'comp_path', (['paths', 'counts', 'indexes_1'], {}), '(paths, counts, indexes_1)\n', (3797, 3823), False, 'from utils.path import get_filename, get_path, comp_path\n'), ((3918, 3953), 'utils.path.comp_path', 'comp_path', (['paths', 'counts', 'indexes_2'], {}), '(paths, counts, indexes_2)\n', (3927, 3953), False, 'from utils.path import get_filename, get_path, comp_path\n'), ((3575, 3608), 'configs.env.getdir', 'env.getdir', (['env.paths.save_folder'], {}), '(env.paths.save_folder)\n', (3585, 3608), False, 'from configs import BaseConfig, env\n'), ((980, 995), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (992, 995), False, 'import torch\n')] |
import numpy as np
from gym import spaces
from causal_world.utils.env_utils import clip
class TriFingerAction(object):
def __init__(self, action_mode="joint_positions", normalize_actions=True):
"""
This class is responsible for the robot action limits and its spaces.
:param action_mode: (str) this can be "joint_positions", "joint_torques" or
"end_effector_positions".
:param normalize_actions: (bool) true if actions should be normalized.
"""
self.normalize_actions = normalize_actions
self.max_motor_torque = 0.36
self.low = None
self.high = None
num_fingers = 3
self.action_mode = action_mode
self.joint_positions_lower_bounds = np.array(
[-1.57, -1.2, -3.0] * 3)
self.joint_positions_upper_bounds = np.array(
[1.0, 1.57, 3.0] * 3)
self.joint_positions_raised = np.array([-1.56, -0.08, -2.7] * 3)
if action_mode == "joint_positions":
lower_bounds = self.joint_positions_lower_bounds
upper_bounds = self.joint_positions_upper_bounds
elif action_mode == "joint_torques":
lower_bounds = np.array([-self.max_motor_torque] * 3 * num_fingers)
upper_bounds = np.array([self.max_motor_torque] * 3 * num_fingers)
elif action_mode == "end_effector_positions":
lower_bounds = np.array([-0.5, -0.5, 0] * 3)
upper_bounds = np.array([0.5, 0.5, 0.5] * 3)
else:
raise ValueError(
"No valid action_mode specified: {}".format(action_mode))
self.set_action_space(lower_bounds, upper_bounds)
def set_action_space(self, lower_bounds, upper_bounds):
"""
:param lower_bounds: (list) array of the lower bounds of actions.
:param upper_bounds: (list) array of the upper bounds of actions.
:return:
"""
assert len(lower_bounds) == len(upper_bounds)
self.low = lower_bounds
self.high = upper_bounds
def get_action_space(self):
"""
:return: (gym.spaces.Box) returns the current actions space.
"""
if self.normalize_actions:
return spaces.Box(low=-np.ones(len(self.low)),
high=np.ones(len(self.high)),
dtype=np.float64)
else:
return spaces.Box(low=self.low, high=self.high, dtype=np.float64)
def is_normalized(self):
"""
:return: (bool) returns true if actions are normalized, false otherwise.
"""
return self.normalize_actions
def satisfy_constraints(self, action):
"""
:param action: (nd.array) action to check if it satisfies the constraints.
:return: (bool) returns true if the action satisfies all constraints.
"""
if self.normalize_actions:
return (action > -1.).all() and (action < 1.).all()
else:
return (action > self.low).all() and (action < self.high).all()
def clip_action(self, action):
"""
:param action: (nd.array) action to clip to the limits.
:return: (nd.array) clipped action.
"""
if self.normalize_actions:
return clip(action, -1.0, 1.0)
else:
return clip(action, self.low, self.high)
def normalize_action(self, action):
"""
:param action: (nd.array) action to normalize.
:return: (nd.array) normalized action.
"""
return 2.0 * (action - self.low) / (self.high - self.low) - 1.0
def denormalize_action(self, action):
"""
:param action: (nd.array) action to denormalize.
:return: (nd.array) denormalized action.
"""
return self.low + (action + 1.0) / 2.0 * \
(self.high - self.low)
| [
"numpy.array",
"causal_world.utils.env_utils.clip",
"gym.spaces.Box"
] | [((770, 803), 'numpy.array', 'np.array', (['([-1.57, -1.2, -3.0] * 3)'], {}), '([-1.57, -1.2, -3.0] * 3)\n', (778, 803), True, 'import numpy as np\n'), ((861, 891), 'numpy.array', 'np.array', (['([1.0, 1.57, 3.0] * 3)'], {}), '([1.0, 1.57, 3.0] * 3)\n', (869, 891), True, 'import numpy as np\n'), ((944, 978), 'numpy.array', 'np.array', (['([-1.56, -0.08, -2.7] * 3)'], {}), '([-1.56, -0.08, -2.7] * 3)\n', (952, 978), True, 'import numpy as np\n'), ((2430, 2488), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.low', 'high': 'self.high', 'dtype': 'np.float64'}), '(low=self.low, high=self.high, dtype=np.float64)\n', (2440, 2488), False, 'from gym import spaces\n'), ((3307, 3330), 'causal_world.utils.env_utils.clip', 'clip', (['action', '(-1.0)', '(1.0)'], {}), '(action, -1.0, 1.0)\n', (3311, 3330), False, 'from causal_world.utils.env_utils import clip\n'), ((3364, 3397), 'causal_world.utils.env_utils.clip', 'clip', (['action', 'self.low', 'self.high'], {}), '(action, self.low, self.high)\n', (3368, 3397), False, 'from causal_world.utils.env_utils import clip\n'), ((1219, 1271), 'numpy.array', 'np.array', (['([-self.max_motor_torque] * 3 * num_fingers)'], {}), '([-self.max_motor_torque] * 3 * num_fingers)\n', (1227, 1271), True, 'import numpy as np\n'), ((1299, 1350), 'numpy.array', 'np.array', (['([self.max_motor_torque] * 3 * num_fingers)'], {}), '([self.max_motor_torque] * 3 * num_fingers)\n', (1307, 1350), True, 'import numpy as np\n'), ((1433, 1462), 'numpy.array', 'np.array', (['([-0.5, -0.5, 0] * 3)'], {}), '([-0.5, -0.5, 0] * 3)\n', (1441, 1462), True, 'import numpy as np\n'), ((1490, 1519), 'numpy.array', 'np.array', (['([0.5, 0.5, 0.5] * 3)'], {}), '([0.5, 0.5, 0.5] * 3)\n', (1498, 1519), True, 'import numpy as np\n')] |
""" pixtosky - A module to perform coordinate transformation from pixel coordinates
in one image to pixel coordinates in another frame
:Authors: <NAME>
:License: :doc:`LICENSE`
PARAMETERS
----------
inimage : str
full filename with path of input image, an extension name ['sci',1] should be
provided if input is a multi-extension FITS file
outimage : str, optional
full filename with path of output image, an extension name ['sci',1] should be
provided if output is a multi-extension FITS file. If no image gets
specified, the input image will be used to generate a default output
WCS using stwcs.distortion.util.output_wcs().
direction : str
Direction of transform (forward or backward). The 'forward' transform
takes the pixel positions (assumed to be from the 'input' image) and determines
their position in the 'output' image. The 'backward' transform converts
the pixel positions (assumed to be from the 'output' image) into pixel
positions in the 'input' image.
Optional Parameters
-------------------
x : float, optional
X position from image
y : float, optional
Y position from image
coords : str, deprecated
[DEPRECATED] full filename with path of file with x,y coordinates
Filename given here will be *ignored* if a file has been specified
in `coordfile` parameter.
coordfile : str, optional
full filename with path of file with starting x,y coordinates
colnames : str, optional
comma separated list of column names from 'coordfile' files
containing x,y coordinates, respectively. Will default to
first two columns if None are specified. Column names for ASCII
files will use 'c1','c2',... convention.
separator : str, optional
non-blank separator used as the column delimiter in the coordfile file
precision : int, optional
Number of floating-point digits in output values
output : str, optional
Name of output file with results, if desired
verbose : bool
Print out full list of transformation results (default: False)
RETURNS
-------
outx : float
X position of transformed pixel. If more than 1 input value, then it
will be a numpy array.
outy : float
Y position of transformed pixel. If more than 1 input value, then it
will be a numpy array.
NOTES
-----
This module performs a full distortion-corrected coordinate transformation
based on all WCS keywords and any recognized distortion keywords from the
input image header.
Usage
-----
It can be called from within Python using the syntax::
>>> from drizzlepac import pixtopix
>>> outx,outy = pixtopix.tran("input_flt.fits[sci,1]",
"output_drz.fits[sci,1],"forward",100,100)
EXAMPLES
--------
1. The following command will transform the position 256,256 from
'input_flt.fits[sci,1]' into a position on the output image
'output_drz.fits[sci,1]' using::
>>> from drizzlepac import pixtopix
>>> outx,outy = pixtopix.tran("input_file_flt.fits[sci,1]",
"output_drz.fits[sci,1],"forward", 256,256)
2. The set of X,Y positions from 'output_drz.fits[sci,1]' stored as
the 3rd and 4th columns from the ASCII file 'xy_sci1.dat'
will be transformed into pixel positions from 'input_flt.fits[sci,1]'
and written out to 'xy_flt1.dat' using::
>>> from drizzlepac import pixtopix
>>> x,y = pixtopix.tran("input_flt.fits[sci,1]", "output_drz.fits[sci,1]",
"backward", coordfile='xy_sci1.dat', colnames=['c3','c4'],
output="xy_flt1.dat")
"""
from __future__ import absolute_import, division, print_function # confidence medium
import os,copy
import warnings
import numpy as np
from stsci.tools import fileutil, teal
from . import wcs_functions
from . import util
from stwcs import wcsutil, distortion
# This is specifically NOT intended to match the package-wide version information.
__version__ = '0.1'
__version_date__ = '1-Mar-2011'
__taskname__ = 'pixtopix'
def tran(inimage,outimage,direction='forward',x=None,y=None,
coords=None, coordfile=None,colnames=None,separator=None,
precision=6, output=None,verbose=True):
""" Primary interface to perform coordinate transformations in pixel
coordinates between 2 images using STWCS and full distortion models
read from each image's header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in util.blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x,np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for each image
im1wcs = wcsutil.HSTWCS(inimage)
if im1wcs.wcs.is_unity():
print("####\nNo valid input WCS found in {}.\n Results may be invalid.\n####\n".format(inimage))
if util.is_blank(outimage):
fname,fextn = fileutil.parseFilename(inimage)
numsci = fileutil.countExtn(fname)
chips = []
for e in range(1,numsci+1):
chips.append(wcsutil.HSTWCS(fname,ext=('sci',e)))
if len(chips) == 0:
chips = [im1wcs]
im2wcs = distortion.utils.output_wcs(chips)
else:
im2wcs = wcsutil.HSTWCS(outimage)
if im2wcs.wcs.is_unity():
print("####\nNo valid output WCS found in {}.\n Results may be invalid.\n####\n".format(outimage))
# Setup the transformation
p2p = wcs_functions.WCSMap(im1wcs,im2wcs)
if direction[0].lower() == 'f':
outx,outy = p2p.forward(xlist,ylist)
else:
outx,outy = p2p.backward(xlist,ylist)
if isinstance(outx,np.ndarray):
outx = outx.tolist()
outy = outy.tolist()
# add formatting based on precision here...
xstr = []
ystr = []
fmt = "%."+repr(precision)+"f"
for ox,oy in zip(outx,outy):
xstr.append(fmt%ox)
ystr.append(fmt%oy)
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',inimage)
print('# X(in) Y(in) X(out) Y(out)\n')
for xs,ys,a,b in zip(xlist,ylist,xstr,ystr):
print("%.4f %.4f %s %s"%(xs,ys,a,b))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%inimage)
for xs,ys in zip(xstr,ystr):
f.write('%s %s\n'%(xs,ys))
f.close()
print('Wrote out results to: ',output)
if single_coord:
outx = outx[0]
outy = outy[0]
return outx,outy
#--------------------------
# TEAL Interface functions
#--------------------------
def run(configObj):
if 'coords' in configObj:
coords = util.check_blank(configObj['coords'])
else:
coords = None
coordfile = util.check_blank(configObj['coordfile'])
colnames = util.check_blank(configObj['colnames'])
sep = util.check_blank(configObj['separator'])
outfile = util.check_blank(configObj['output'])
outimage = util.check_blank(configObj['outimage'])
tran(configObj['inimage'], outimage,direction=configObj['direction'],
x = configObj['x'], y = configObj['y'], coords=coords,
coordfile = coordfile, colnames = colnames,
separator= sep, precision= configObj['precision'],
output= outfile, verbose = configObj['verbose'])
def help(file=None):
"""
Print out syntax help for running astrodrizzle
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver = True)
if file is None:
print(helpstr)
else:
if os.path.exists(file): os.remove(file)
f = open(file, mode = 'w')
f.write(helpstr)
f.close()
def getHelpAsString(docstring = False, show_ver = True):
"""
return useful help from a file in the script directory called
__taskname__.help
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, '')
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and not os.path.exists(htmlfile)):
if show_ver:
helpString = os.linesep + \
' '.join([__taskname__, 'Version', __version__,
' updated on ', __version_date__]) + 2*os.linesep
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__)
else:
if __doc__ is not None:
helpString += __doc__ + os.linesep
else:
helpString = 'file://' + htmlfile
return helpString
__doc__ = getHelpAsString(docstring = True, show_ver = False)
| [
"os.path.exists",
"stsci.tools.teal.getHelpFileAsString",
"os.path.join",
"stwcs.wcsutil.HSTWCS",
"os.path.dirname",
"numpy.loadtxt",
"os.remove",
"stwcs.distortion.utils.output_wcs",
"warnings.simplefilter",
"warnings.warn",
"stsci.tools.fileutil.parseFilename",
"stsci.tools.fileutil.countExt... | [((6070, 6093), 'stwcs.wcsutil.HSTWCS', 'wcsutil.HSTWCS', (['inimage'], {}), '(inimage)\n', (6084, 6093), False, 'from stwcs import wcsutil, distortion\n'), ((9548, 9573), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9563, 9573), False, 'import os, copy\n'), ((9641, 9698), 'os.path.join', 'os.path.join', (['install_dir', '"""htmlhelp"""', "(taskname + '.html')"], {}), "(install_dir, 'htmlhelp', taskname + '.html')\n", (9653, 9698), False, 'import os, copy\n'), ((9714, 9759), 'os.path.join', 'os.path.join', (['install_dir', "(taskname + '.help')"], {}), "(install_dir, taskname + '.help')\n", (9726, 9759), False, 'import os, copy\n'), ((4872, 4923), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""', 'DeprecationWarning'], {}), "('always', DeprecationWarning)\n", (4893, 4923), False, 'import warnings\n'), ((4931, 5056), 'warnings.warn', 'warnings.warn', (['"""Please update calling code to pass in `coordfile` instead of `coords`."""'], {'category': 'DeprecationWarning'}), "(\n 'Please update calling code to pass in `coordfile` instead of `coords`.',\n category=DeprecationWarning)\n", (4944, 5056), False, 'import warnings\n'), ((5068, 5120), 'warnings.simplefilter', 'warnings.simplefilter', (['"""default"""', 'DeprecationWarning'], {}), "('default', DeprecationWarning)\n", (5089, 5120), False, 'import warnings\n'), ((5410, 5466), 'numpy.loadtxt', 'np.loadtxt', (['coordfile'], {'usecols': 'cols', 'delimiter': 'separator'}), '(coordfile, usecols=cols, delimiter=separator)\n', (5420, 5466), True, 'import numpy as np\n'), ((6285, 6316), 'stsci.tools.fileutil.parseFilename', 'fileutil.parseFilename', (['inimage'], {}), '(inimage)\n', (6307, 6316), False, 'from stsci.tools import fileutil, teal\n'), ((6334, 6359), 'stsci.tools.fileutil.countExtn', 'fileutil.countExtn', (['fname'], {}), '(fname)\n', (6352, 6359), False, 'from stsci.tools import fileutil, teal\n'), ((6551, 6585), 'stwcs.distortion.utils.output_wcs', 'distortion.utils.output_wcs', (['chips'], {}), '(chips)\n', (6578, 6585), False, 'from stwcs import wcsutil, distortion\n'), ((6613, 6637), 'stwcs.wcsutil.HSTWCS', 'wcsutil.HSTWCS', (['outimage'], {}), '(outimage)\n', (6627, 6637), False, 'from stwcs import wcsutil, distortion\n'), ((9250, 9270), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (9264, 9270), False, 'import os, copy\n'), ((10075, 10099), 'os.path.exists', 'os.path.exists', (['helpfile'], {}), '(helpfile)\n', (10089, 10099), False, 'import os, copy\n'), ((9272, 9287), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (9281, 9287), False, 'import os, copy\n'), ((10127, 10171), 'stsci.tools.teal.getHelpFileAsString', 'teal.getHelpFileAsString', (['taskname', '__file__'], {}), '(taskname, __file__)\n', (10151, 10171), False, 'from stsci.tools import fileutil, teal\n'), ((6440, 6477), 'stwcs.wcsutil.HSTWCS', 'wcsutil.HSTWCS', (['fname'], {'ext': "('sci', e)"}), "(fname, ext=('sci', e))\n", (6454, 6477), False, 'from stwcs import wcsutil, distortion\n'), ((9804, 9828), 'os.path.exists', 'os.path.exists', (['htmlfile'], {}), '(htmlfile)\n', (9818, 9828), False, 'import os, copy\n')] |
import os
import time
import pickle
from tqdm import tqdm
import librosa
import soundfile as sf
import numpy as np
import oneflow as flow
import utils.data_utils as preprocess
from utils.dataset import trainingDataset
from model.model import Generator, Discriminator
class CycleGANTrainr(object):
def __init__(
self,
logf0s_normalization,
mcep_normalization,
coded_sps_A_norm,
coded_sps_B_norm,
model_checkpoint,
validation_A_dir,
output_A_dir,
validation_B_dir,
output_B_dir,
restart_training_at=None,
):
self.start_epoch = 0
self.num_epochs = 200000
self.mini_batch_size = 10
self.dataset_A = self.loadPickleFile(coded_sps_A_norm)
self.dataset_B = self.loadPickleFile(coded_sps_B_norm)
self.device = flow.device("cuda" if flow.cuda.is_available() else "cpu")
# Speech Parameters
logf0s_normalization = np.load(logf0s_normalization)
self.log_f0s_mean_A = logf0s_normalization["mean_A"]
self.log_f0s_std_A = logf0s_normalization["std_A"]
self.log_f0s_mean_B = logf0s_normalization["mean_B"]
self.log_f0s_std_B = logf0s_normalization["std_B"]
mcep_normalization = np.load(mcep_normalization)
self.coded_sps_A_mean = mcep_normalization["mean_A"]
self.coded_sps_A_std = mcep_normalization["std_A"]
self.coded_sps_B_mean = mcep_normalization["mean_B"]
self.coded_sps_B_std = mcep_normalization["std_B"]
# Generator and Discriminator
self.generator_A2B = Generator().to(self.device)
self.generator_B2A = Generator().to(self.device)
self.discriminator_A = Discriminator().to(self.device)
self.discriminator_B = Discriminator().to(self.device)
# Loss Functions
criterion_mse = flow.nn.MSELoss()
# Optimizer
g_params = list(self.generator_A2B.parameters()) + list(
self.generator_B2A.parameters()
)
d_params = list(self.discriminator_A.parameters()) + list(
self.discriminator_B.parameters()
)
# Initial learning rates
self.generator_lr = 2e-4
self.discriminator_lr = 1e-4
# Learning rate decay
self.generator_lr_decay = self.generator_lr / 200000
self.discriminator_lr_decay = self.discriminator_lr / 200000
# Starts learning rate decay from after this many iterations have passed
self.start_decay = 10000
self.generator_optimizer = flow.optim.Adam(
g_params, lr=self.generator_lr, betas=(0.5, 0.999)
)
self.discriminator_optimizer = flow.optim.Adam(
d_params, lr=self.discriminator_lr, betas=(0.5, 0.999)
)
# To Load save previously saved models
self.modelCheckpoint = model_checkpoint
os.makedirs(self.modelCheckpoint, exist_ok=True)
# Validation set Parameters
self.validation_A_dir = validation_A_dir
self.output_A_dir = output_A_dir
os.makedirs(self.output_A_dir, exist_ok=True)
self.validation_B_dir = validation_B_dir
self.output_B_dir = output_B_dir
os.makedirs(self.output_B_dir, exist_ok=True)
# Storing Discriminatior and Generator Loss
self.generator_loss_store = []
self.discriminator_loss_store = []
self.file_name = "log_store_non_sigmoid.txt"
def adjust_lr_rate(self, optimizer, name="generator"):
if name == "generator":
self.generator_lr = max(0.0, self.generator_lr - self.generator_lr_decay)
for param_groups in optimizer.param_groups:
param_groups["lr"] = self.generator_lr
else:
self.discriminator_lr = max(
0.0, self.discriminator_lr - self.discriminator_lr_decay
)
for param_groups in optimizer.param_groups:
param_groups["lr"] = self.discriminator_lr
def reset_grad(self):
self.generator_optimizer.zero_grad()
self.discriminator_optimizer.zero_grad()
def train(self):
# Training Begins
for epoch in range(self.start_epoch, self.num_epochs):
start_time_epoch = time.time()
# Constants
cycle_loss_lambda = 10
identity_loss_lambda = 5
# Preparing Dataset
n_samples = len(self.dataset_A)
dataset = trainingDataset(
datasetA=self.dataset_A, datasetB=self.dataset_B, n_frames=128
)
train_loader = flow.utils.data.DataLoader(
dataset=dataset,
batch_size=self.mini_batch_size,
shuffle=True,
drop_last=False,
)
pbar = tqdm(enumerate(train_loader))
for i, (real_A, real_B) in enumerate(train_loader):
num_iterations = (n_samples // self.mini_batch_size) * epoch + i
if num_iterations > 10000:
identity_loss_lambda = 0
if num_iterations > self.start_decay:
self.adjust_lr_rate(self.generator_optimizer, name="generator")
self.adjust_lr_rate(self.generator_optimizer, name="discriminator")
real_A = real_A.to(self.device).float()
real_B = real_B.to(self.device).float()
# Generator Loss function
fake_B = self.generator_A2B(real_A)
cycle_A = self.generator_B2A(fake_B)
fake_A = self.generator_B2A(real_B)
cycle_B = self.generator_A2B(fake_A)
identity_A = self.generator_B2A(real_A)
identity_B = self.generator_A2B(real_B)
d_fake_A = self.discriminator_A(fake_A)
d_fake_B = self.discriminator_B(fake_B)
# for the second step adverserial loss
d_fake_cycle_A = self.discriminator_A(cycle_A)
d_fake_cycle_B = self.discriminator_B(cycle_B)
# Generator Cycle loss
cycleLoss = flow.mean(flow.abs(real_A - cycle_A)) + flow.mean(
flow.abs(real_B - cycle_B)
)
# Generator Identity Loss
identiyLoss = flow.mean(flow.abs(real_A - identity_A)) + flow.mean(
flow.abs(real_B - identity_B)
)
# Generator Loss
generator_loss_A2B = flow.mean((1 - d_fake_B) ** 2)
generator_loss_B2A = flow.mean((1 - d_fake_A) ** 2)
# Total Generator Loss
generator_loss = (
generator_loss_A2B
+ generator_loss_B2A
+ cycle_loss_lambda * cycleLoss
+ identity_loss_lambda * identiyLoss
)
self.generator_loss_store.append(generator_loss.item())
# Backprop for Generator
self.reset_grad()
generator_loss.backward()
self.generator_optimizer.step()
# Discriminator Feed Forward
d_real_A = self.discriminator_A(real_A)
d_real_B = self.discriminator_B(real_B)
generated_A = self.generator_B2A(real_B)
d_fake_A = self.discriminator_A(generated_A)
# for the second step adverserial loss
cycled_B = self.generator_A2B(generated_A)
d_cycled_B = self.discriminator_B(cycled_B)
generated_B = self.generator_A2B(real_A)
d_fake_B = self.discriminator_B(generated_B)
# for the second step adverserial loss
cycled_A = self.generator_B2A(generated_B)
d_cycled_A = self.discriminator_A(cycled_A)
# Loss Functions
d_loss_A_real = flow.mean((1 - d_real_A) ** 2)
d_loss_A_fake = flow.mean((0 - d_fake_A) ** 2)
d_loss_A = (d_loss_A_real + d_loss_A_fake) / 2.0
d_loss_B_real = flow.mean((1 - d_real_B) ** 2)
d_loss_B_fake = flow.mean((0 - d_fake_B) ** 2)
d_loss_B = (d_loss_B_real + d_loss_B_fake) / 2.0
# the second step adverserial loss
d_loss_A_cycled = flow.mean((0 - d_cycled_A) ** 2)
d_loss_B_cycled = flow.mean((0 - d_cycled_B) ** 2)
d_loss_A_2nd = (d_loss_A_real + d_loss_A_cycled) / 2.0
d_loss_B_2nd = (d_loss_B_real + d_loss_B_cycled) / 2.0
# Final Loss for discriminator with the second step adverserial loss
d_loss = (d_loss_A + d_loss_B) / 2.0 + (
d_loss_A_2nd + d_loss_B_2nd
) / 2.0
self.discriminator_loss_store.append(d_loss.item())
# Backprop for Discriminator
self.reset_grad()
d_loss.backward()
self.discriminator_optimizer.step()
if (i + 1) % 2 == 0:
pbar.set_description(
"Iter:{} Generator Loss:{:.4f} Discrimator Loss:{:.4f} GA2B:{:.4f} GB2A:{:.4f} G_id:{:.4f} G_cyc:{:.4f} D_A:{:.4f} D_B:{:.4f}".format(
num_iterations,
generator_loss.item(),
d_loss.item(),
generator_loss_A2B,
generator_loss_B2A,
identiyLoss,
cycleLoss,
d_loss_A,
d_loss_B,
)
)
if epoch % 2000 == 0 and epoch != 0:
end_time = time.time()
store_to_file = "Epoch: {} Generator Loss: {:.4f} Discriminator Loss: {}, Time: {:.2f}\n\n".format(
epoch,
generator_loss.item(),
d_loss.item(),
end_time - start_time_epoch,
)
self.store_to_file(store_to_file)
print(
"Epoch: {} Generator Loss: {:.4f} Discriminator Loss: {}, Time: {:.2f}\n\n".format(
epoch,
generator_loss.item(),
d_loss.item(),
end_time - start_time_epoch,
)
)
# Save the Entire model
print("Saving model Checkpoint ......")
store_to_file = "Saving model Checkpoint ......"
self.store_to_file(store_to_file)
self.saveModelCheckPoint(epoch, self.modelCheckpoint)
print("Model Saved!")
if epoch % 2000 == 0 and epoch != 0:
# Validation Set
validation_start_time = time.time()
self.validation_for_A_dir()
self.validation_for_B_dir()
validation_end_time = time.time()
store_to_file = "Time taken for validation Set: {}".format(
validation_end_time - validation_start_time
)
self.store_to_file(store_to_file)
print(
"Time taken for validation Set: {}".format(
validation_end_time - validation_start_time
)
)
def infer(self, PATH="sample"):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
infer_A_dir = PATH
output_A_dir = PATH
for file in os.listdir(infer_A_dir):
filePath = os.path.join(infer_A_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_A,
std_log_src=self.log_f0s_std_A,
mean_log_target=self.log_f0s_mean_B,
std_log_target=self.log_f0s_std_B,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_A_mean
) / self.coded_sps_A_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_B_std + self.coded_sps_B_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_A_dir, "convert_" + os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def validation_for_A_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_A_dir = self.validation_A_dir
output_A_dir = self.output_A_dir
print("Generating Validation Data B from A...")
for file in os.listdir(validation_A_dir):
filePath = os.path.join(validation_A_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_A,
std_log_src=self.log_f0s_std_A,
mean_log_target=self.log_f0s_mean_B,
std_log_target=self.log_f0s_std_B,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_A_mean
) / self.coded_sps_A_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_A2B(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_B_std + self.coded_sps_B_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_A_dir, os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def validation_for_B_dir(self):
num_mcep = 36
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
validation_B_dir = self.validation_B_dir
output_B_dir = self.output_B_dir
print("Generating Validation Data A from B...")
for file in os.listdir(validation_B_dir):
filePath = os.path.join(validation_B_dir, file)
wav, _ = librosa.load(filePath, sr=sampling_rate, mono=True)
wav = preprocess.wav_padding(
wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4
)
f0, timeaxis, sp, ap = preprocess.world_decompose(
wav=wav, fs=sampling_rate, frame_period=frame_period
)
f0_converted = preprocess.pitch_conversion(
f0=f0,
mean_log_src=self.log_f0s_mean_B,
std_log_src=self.log_f0s_std_B,
mean_log_target=self.log_f0s_mean_A,
std_log_target=self.log_f0s_std_A,
)
coded_sp = preprocess.world_encode_spectral_envelop(
sp=sp, fs=sampling_rate, dim=num_mcep
)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (
coded_sp_transposed - self.coded_sps_B_mean
) / self.coded_sps_B_std
coded_sp_norm = np.array([coded_sp_norm])
if flow.cuda.is_available():
coded_sp_norm = flow.tensor(coded_sp_norm).cuda().float()
else:
coded_sp_norm = flow.tensor(coded_sp_norm).float()
coded_sp_converted_norm = self.generator_B2A(coded_sp_norm)
coded_sp_converted_norm = coded_sp_converted_norm.cpu().detach().numpy()
coded_sp_converted_norm = np.squeeze(coded_sp_converted_norm)
coded_sp_converted = (
coded_sp_converted_norm * self.coded_sps_A_std + self.coded_sps_A_mean
)
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = preprocess.world_decode_spectral_envelop(
coded_sp=coded_sp_converted, fs=sampling_rate
)
wav_transformed = preprocess.world_speech_synthesis(
f0=f0_converted,
decoded_sp=decoded_sp_converted,
ap=ap,
fs=sampling_rate,
frame_period=frame_period,
)
sf.write(
os.path.join(output_B_dir, os.path.basename(file)),
wav_transformed,
sampling_rate,
)
def savePickle(self, variable, fileName):
with open(fileName, "wb") as f:
pickle.dump(variable, f)
def loadPickleFile(self, fileName):
with open(fileName, "rb") as f:
return pickle.load(f)
def store_to_file(self, doc):
doc = doc + "\n"
with open(self.file_name, "a") as myfile:
myfile.write(doc)
def saveModelCheckPoint(self, epoch, PATH):
flow.save(
self.generator_A2B.state_dict(),
os.path.join(PATH, "generator_A2B_%d" % epoch),
)
flow.save(
self.generator_B2A.state_dict(),
os.path.join(PATH, "generator_B2A_%d" % epoch),
)
flow.save(
self.discriminator_A.state_dict(),
os.path.join(PATH, "discriminator_A_%d" % epoch),
)
flow.save(
self.discriminator_B.state_dict(),
os.path.join(PATH, "discriminator_B_%d" % epoch),
)
def loadModel(self, PATH):
self.generator_A2B.load_state_dict(
flow.load(os.path.join(PATH, "generator_A2B"))
)
self.generator_B2A.load_state_dict(
flow.load(os.path.join(PATH, "generator_B2A"))
)
self.discriminator_A.load_state_dict(
flow.load(os.path.join(PATH, "discriminator_A"))
)
self.discriminator_B.load_state_dict(
flow.load(os.path.join(PATH, "discriminator_B"))
)
| [
"oneflow.optim.Adam",
"utils.data_utils.world_speech_synthesis",
"numpy.ascontiguousarray",
"model.model.Discriminator",
"numpy.array",
"utils.data_utils.world_decode_spectral_envelop",
"oneflow.tensor",
"librosa.load",
"os.listdir",
"oneflow.mean",
"model.model.Generator",
"oneflow.utils.data... | [((969, 998), 'numpy.load', 'np.load', (['logf0s_normalization'], {}), '(logf0s_normalization)\n', (976, 998), True, 'import numpy as np\n'), ((1269, 1296), 'numpy.load', 'np.load', (['mcep_normalization'], {}), '(mcep_normalization)\n', (1276, 1296), True, 'import numpy as np\n'), ((1866, 1883), 'oneflow.nn.MSELoss', 'flow.nn.MSELoss', ([], {}), '()\n', (1881, 1883), True, 'import oneflow as flow\n'), ((2563, 2630), 'oneflow.optim.Adam', 'flow.optim.Adam', (['g_params'], {'lr': 'self.generator_lr', 'betas': '(0.5, 0.999)'}), '(g_params, lr=self.generator_lr, betas=(0.5, 0.999))\n', (2578, 2630), True, 'import oneflow as flow\n'), ((2692, 2763), 'oneflow.optim.Adam', 'flow.optim.Adam', (['d_params'], {'lr': 'self.discriminator_lr', 'betas': '(0.5, 0.999)'}), '(d_params, lr=self.discriminator_lr, betas=(0.5, 0.999))\n', (2707, 2763), True, 'import oneflow as flow\n'), ((2890, 2938), 'os.makedirs', 'os.makedirs', (['self.modelCheckpoint'], {'exist_ok': '(True)'}), '(self.modelCheckpoint, exist_ok=True)\n', (2901, 2938), False, 'import os\n'), ((3074, 3119), 'os.makedirs', 'os.makedirs', (['self.output_A_dir'], {'exist_ok': '(True)'}), '(self.output_A_dir, exist_ok=True)\n', (3085, 3119), False, 'import os\n'), ((3218, 3263), 'os.makedirs', 'os.makedirs', (['self.output_B_dir'], {'exist_ok': '(True)'}), '(self.output_B_dir, exist_ok=True)\n', (3229, 3263), False, 'import os\n'), ((11748, 11771), 'os.listdir', 'os.listdir', (['infer_A_dir'], {}), '(infer_A_dir)\n', (11758, 11771), False, 'import os\n'), ((14437, 14465), 'os.listdir', 'os.listdir', (['validation_A_dir'], {}), '(validation_A_dir)\n', (14447, 14465), False, 'import os\n'), ((17123, 17151), 'os.listdir', 'os.listdir', (['validation_B_dir'], {}), '(validation_B_dir)\n', (17133, 17151), False, 'import os\n'), ((4262, 4273), 'time.time', 'time.time', ([], {}), '()\n', (4271, 4273), False, 'import time\n'), ((4471, 4550), 'utils.dataset.trainingDataset', 'trainingDataset', ([], {'datasetA': 'self.dataset_A', 'datasetB': 'self.dataset_B', 'n_frames': '(128)'}), '(datasetA=self.dataset_A, datasetB=self.dataset_B, n_frames=128)\n', (4486, 4550), False, 'from utils.dataset import trainingDataset\n'), ((4609, 4720), 'oneflow.utils.data.DataLoader', 'flow.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self.mini_batch_size', 'shuffle': '(True)', 'drop_last': '(False)'}), '(dataset=dataset, batch_size=self.mini_batch_size,\n shuffle=True, drop_last=False)\n', (4635, 4720), True, 'import oneflow as flow\n'), ((11796, 11827), 'os.path.join', 'os.path.join', (['infer_A_dir', 'file'], {}), '(infer_A_dir, file)\n', (11808, 11827), False, 'import os\n'), ((11849, 11900), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (11861, 11900), False, 'import librosa\n'), ((11919, 12011), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (11941, 12011), True, 'import utils.data_utils as preprocess\n'), ((12073, 12158), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (12099, 12158), True, 'import utils.data_utils as preprocess\n'), ((12211, 12391), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.log_f0s_mean_A', 'std_log_src': 'self.log_f0s_std_A', 'mean_log_target': 'self.log_f0s_mean_B', 'std_log_target': 'self.log_f0s_std_B'}), '(f0=f0, mean_log_src=self.log_f0s_mean_A,\n std_log_src=self.log_f0s_std_A, mean_log_target=self.log_f0s_mean_B,\n std_log_target=self.log_f0s_std_B)\n', (12238, 12391), True, 'import utils.data_utils as preprocess\n'), ((12502, 12581), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (12542, 12581), True, 'import utils.data_utils as preprocess\n'), ((12812, 12837), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (12820, 12837), True, 'import numpy as np\n'), ((12854, 12878), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (12876, 12878), True, 'import oneflow as flow\n'), ((13235, 13270), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (13245, 13270), True, 'import numpy as np\n'), ((13494, 13534), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (13514, 13534), True, 'import numpy as np\n'), ((13570, 13662), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (13610, 13662), True, 'import utils.data_utils as preprocess\n'), ((13718, 13858), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted, decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (13751, 13858), True, 'import utils.data_utils as preprocess\n'), ((14490, 14526), 'os.path.join', 'os.path.join', (['validation_A_dir', 'file'], {}), '(validation_A_dir, file)\n', (14502, 14526), False, 'import os\n'), ((14548, 14599), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (14560, 14599), False, 'import librosa\n'), ((14618, 14710), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (14640, 14710), True, 'import utils.data_utils as preprocess\n'), ((14772, 14857), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (14798, 14857), True, 'import utils.data_utils as preprocess\n'), ((14910, 15090), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.log_f0s_mean_A', 'std_log_src': 'self.log_f0s_std_A', 'mean_log_target': 'self.log_f0s_mean_B', 'std_log_target': 'self.log_f0s_std_B'}), '(f0=f0, mean_log_src=self.log_f0s_mean_A,\n std_log_src=self.log_f0s_std_A, mean_log_target=self.log_f0s_mean_B,\n std_log_target=self.log_f0s_std_B)\n', (14937, 15090), True, 'import utils.data_utils as preprocess\n'), ((15201, 15280), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (15241, 15280), True, 'import utils.data_utils as preprocess\n'), ((15511, 15536), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (15519, 15536), True, 'import numpy as np\n'), ((15553, 15577), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (15575, 15577), True, 'import oneflow as flow\n'), ((15934, 15969), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (15944, 15969), True, 'import numpy as np\n'), ((16193, 16233), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (16213, 16233), True, 'import numpy as np\n'), ((16269, 16361), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (16309, 16361), True, 'import utils.data_utils as preprocess\n'), ((16417, 16557), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted, decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (16450, 16557), True, 'import utils.data_utils as preprocess\n'), ((17176, 17212), 'os.path.join', 'os.path.join', (['validation_B_dir', 'file'], {}), '(validation_B_dir, file)\n', (17188, 17212), False, 'import os\n'), ((17234, 17285), 'librosa.load', 'librosa.load', (['filePath'], {'sr': 'sampling_rate', 'mono': '(True)'}), '(filePath, sr=sampling_rate, mono=True)\n', (17246, 17285), False, 'import librosa\n'), ((17304, 17396), 'utils.data_utils.wav_padding', 'preprocess.wav_padding', ([], {'wav': 'wav', 'sr': 'sampling_rate', 'frame_period': 'frame_period', 'multiple': '(4)'}), '(wav=wav, sr=sampling_rate, frame_period=frame_period,\n multiple=4)\n', (17326, 17396), True, 'import utils.data_utils as preprocess\n'), ((17458, 17543), 'utils.data_utils.world_decompose', 'preprocess.world_decompose', ([], {'wav': 'wav', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(wav=wav, fs=sampling_rate, frame_period=frame_period\n )\n', (17484, 17543), True, 'import utils.data_utils as preprocess\n'), ((17596, 17776), 'utils.data_utils.pitch_conversion', 'preprocess.pitch_conversion', ([], {'f0': 'f0', 'mean_log_src': 'self.log_f0s_mean_B', 'std_log_src': 'self.log_f0s_std_B', 'mean_log_target': 'self.log_f0s_mean_A', 'std_log_target': 'self.log_f0s_std_A'}), '(f0=f0, mean_log_src=self.log_f0s_mean_B,\n std_log_src=self.log_f0s_std_B, mean_log_target=self.log_f0s_mean_A,\n std_log_target=self.log_f0s_std_A)\n', (17623, 17776), True, 'import utils.data_utils as preprocess\n'), ((17887, 17966), 'utils.data_utils.world_encode_spectral_envelop', 'preprocess.world_encode_spectral_envelop', ([], {'sp': 'sp', 'fs': 'sampling_rate', 'dim': 'num_mcep'}), '(sp=sp, fs=sampling_rate, dim=num_mcep)\n', (17927, 17966), True, 'import utils.data_utils as preprocess\n'), ((18197, 18222), 'numpy.array', 'np.array', (['[coded_sp_norm]'], {}), '([coded_sp_norm])\n', (18205, 18222), True, 'import numpy as np\n'), ((18239, 18263), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (18261, 18263), True, 'import oneflow as flow\n'), ((18620, 18655), 'numpy.squeeze', 'np.squeeze', (['coded_sp_converted_norm'], {}), '(coded_sp_converted_norm)\n', (18630, 18655), True, 'import numpy as np\n'), ((18879, 18919), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['coded_sp_converted'], {}), '(coded_sp_converted)\n', (18899, 18919), True, 'import numpy as np\n'), ((18955, 19047), 'utils.data_utils.world_decode_spectral_envelop', 'preprocess.world_decode_spectral_envelop', ([], {'coded_sp': 'coded_sp_converted', 'fs': 'sampling_rate'}), '(coded_sp=coded_sp_converted, fs=\n sampling_rate)\n', (18995, 19047), True, 'import utils.data_utils as preprocess\n'), ((19103, 19243), 'utils.data_utils.world_speech_synthesis', 'preprocess.world_speech_synthesis', ([], {'f0': 'f0_converted', 'decoded_sp': 'decoded_sp_converted', 'ap': 'ap', 'fs': 'sampling_rate', 'frame_period': 'frame_period'}), '(f0=f0_converted, decoded_sp=\n decoded_sp_converted, ap=ap, fs=sampling_rate, frame_period=frame_period)\n', (19136, 19243), True, 'import utils.data_utils as preprocess\n'), ((19602, 19626), 'pickle.dump', 'pickle.dump', (['variable', 'f'], {}), '(variable, f)\n', (19613, 19626), False, 'import pickle\n'), ((19727, 19741), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (19738, 19741), False, 'import pickle\n'), ((20007, 20053), 'os.path.join', 'os.path.join', (['PATH', "('generator_A2B_%d' % epoch)"], {}), "(PATH, 'generator_A2B_%d' % epoch)\n", (20019, 20053), False, 'import os\n'), ((20141, 20187), 'os.path.join', 'os.path.join', (['PATH', "('generator_B2A_%d' % epoch)"], {}), "(PATH, 'generator_B2A_%d' % epoch)\n", (20153, 20187), False, 'import os\n'), ((20277, 20325), 'os.path.join', 'os.path.join', (['PATH', "('discriminator_A_%d' % epoch)"], {}), "(PATH, 'discriminator_A_%d' % epoch)\n", (20289, 20325), False, 'import os\n'), ((20415, 20463), 'os.path.join', 'os.path.join', (['PATH', "('discriminator_B_%d' % epoch)"], {}), "(PATH, 'discriminator_B_%d' % epoch)\n", (20427, 20463), False, 'import os\n'), ((872, 896), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (894, 896), True, 'import oneflow as flow\n'), ((1605, 1616), 'model.model.Generator', 'Generator', ([], {}), '()\n', (1614, 1616), False, 'from model.model import Generator, Discriminator\n'), ((1662, 1673), 'model.model.Generator', 'Generator', ([], {}), '()\n', (1671, 1673), False, 'from model.model import Generator, Discriminator\n'), ((1721, 1736), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (1734, 1736), False, 'from model.model import Generator, Discriminator\n'), ((1784, 1799), 'model.model.Discriminator', 'Discriminator', ([], {}), '()\n', (1797, 1799), False, 'from model.model import Generator, Discriminator\n'), ((6533, 6563), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_B) ** 2)'], {}), '((1 - d_fake_B) ** 2)\n', (6542, 6563), True, 'import oneflow as flow\n'), ((6601, 6631), 'oneflow.mean', 'flow.mean', (['((1 - d_fake_A) ** 2)'], {}), '((1 - d_fake_A) ** 2)\n', (6610, 6631), True, 'import oneflow as flow\n'), ((7965, 7995), 'oneflow.mean', 'flow.mean', (['((1 - d_real_A) ** 2)'], {}), '((1 - d_real_A) ** 2)\n', (7974, 7995), True, 'import oneflow as flow\n'), ((8028, 8058), 'oneflow.mean', 'flow.mean', (['((0 - d_fake_A) ** 2)'], {}), '((0 - d_fake_A) ** 2)\n', (8037, 8058), True, 'import oneflow as flow\n'), ((8157, 8187), 'oneflow.mean', 'flow.mean', (['((1 - d_real_B) ** 2)'], {}), '((1 - d_real_B) ** 2)\n', (8166, 8187), True, 'import oneflow as flow\n'), ((8220, 8250), 'oneflow.mean', 'flow.mean', (['((0 - d_fake_B) ** 2)'], {}), '((0 - d_fake_B) ** 2)\n', (8229, 8250), True, 'import oneflow as flow\n'), ((8402, 8434), 'oneflow.mean', 'flow.mean', (['((0 - d_cycled_A) ** 2)'], {}), '((0 - d_cycled_A) ** 2)\n', (8411, 8434), True, 'import oneflow as flow\n'), ((8469, 8501), 'oneflow.mean', 'flow.mean', (['((0 - d_cycled_B) ** 2)'], {}), '((0 - d_cycled_B) ** 2)\n', (8478, 8501), True, 'import oneflow as flow\n'), ((9848, 9859), 'time.time', 'time.time', ([], {}), '()\n', (9857, 9859), False, 'import time\n'), ((10980, 10991), 'time.time', 'time.time', ([], {}), '()\n', (10989, 10991), False, 'import time\n'), ((11118, 11129), 'time.time', 'time.time', ([], {}), '()\n', (11127, 11129), False, 'import time\n'), ((20573, 20608), 'os.path.join', 'os.path.join', (['PATH', '"""generator_A2B"""'], {}), "(PATH, 'generator_A2B')\n", (20585, 20608), False, 'import os\n'), ((20686, 20721), 'os.path.join', 'os.path.join', (['PATH', '"""generator_B2A"""'], {}), "(PATH, 'generator_B2A')\n", (20698, 20721), False, 'import os\n'), ((20801, 20838), 'os.path.join', 'os.path.join', (['PATH', '"""discriminator_A"""'], {}), "(PATH, 'discriminator_A')\n", (20813, 20838), False, 'import os\n'), ((20918, 20955), 'os.path.join', 'os.path.join', (['PATH', '"""discriminator_B"""'], {}), "(PATH, 'discriminator_B')\n", (20930, 20955), False, 'import os\n'), ((16714, 16736), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (16730, 16736), False, 'import os\n'), ((19400, 19422), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (19416, 19422), False, 'import os\n'), ((6161, 6187), 'oneflow.abs', 'flow.abs', (['(real_A - cycle_A)'], {}), '(real_A - cycle_A)\n', (6169, 6187), True, 'import oneflow as flow\n'), ((6222, 6248), 'oneflow.abs', 'flow.abs', (['(real_B - cycle_B)'], {}), '(real_B - cycle_B)\n', (6230, 6248), True, 'import oneflow as flow\n'), ((6350, 6379), 'oneflow.abs', 'flow.abs', (['(real_A - identity_A)'], {}), '(real_A - identity_A)\n', (6358, 6379), True, 'import oneflow as flow\n'), ((6414, 6443), 'oneflow.abs', 'flow.abs', (['(real_B - identity_B)'], {}), '(real_B - identity_B)\n', (6422, 6443), True, 'import oneflow as flow\n'), ((13004, 13030), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (13015, 13030), True, 'import oneflow as flow\n'), ((14028, 14050), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (14044, 14050), False, 'import os\n'), ((15703, 15729), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (15714, 15729), True, 'import oneflow as flow\n'), ((18389, 18415), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (18400, 18415), True, 'import oneflow as flow\n'), ((12912, 12938), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (12923, 12938), True, 'import oneflow as flow\n'), ((15611, 15637), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (15622, 15637), True, 'import oneflow as flow\n'), ((18297, 18323), 'oneflow.tensor', 'flow.tensor', (['coded_sp_norm'], {}), '(coded_sp_norm)\n', (18308, 18323), True, 'import oneflow as flow\n')] |
import argparse
import json
import os
import time
import torch
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from lab import lab
from utils import datamanager as dm
from utils.exp_log import Logger
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import TensorDataset
parser = argparse.ArgumentParser(description='Run experiment.')
parser.add_argument('-e', '--experiment', default='cnn/cnn1_exp', help="experiment definition (json file)")
parser.add_argument('-d', '--dataset', default='hapt', help="from ['activemiles', 'hhar', 'fusion']")
parser.add_argument('-f', '--nfolds', default=5, help="number of folds", type=int)
parser.add_argument('-s', '--save', dest='save', action='store_true')
class Experiment:
def __init__(self, exp_def_file, dataset, n_folds, save_log):
self.exp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'exp', exp_def_file + '.json')
self.exp_name = exp_def_file.split('/')[-1]
self.dataset = dataset
self.n_folds = n_folds
self.k_fold = 1
self.save_log = save_log
self.logger = None
@staticmethod
def __load_data__(dataset, gyro, preprocess):
return dm.load_dataset(dataset, seq_length=100, gyro=gyro, preprocess=preprocess)
def update(self, **kwargs):
return self.logger.update(self.k_fold, **kwargs)
def run(self):
with open(self.exp_path, 'r') as exp_file:
experiment_definition = json.load(exp_file)
gyro = experiment_definition["gyroscope"]
arch_type = experiment_definition["type"]
name = experiment_definition["name"]
preprocess = experiment_definition["preprocess"]
log_path = os.path.dirname('{}{}..{}log{}'.format(os.path.dirname(os.path.abspath(__file__)),
os.sep, os.sep, os.sep))
self.logger = Logger(exp_name=name, dataset=self.dataset, n_folds=self.n_folds,
save_log=self.save_log, log_path=log_path)
ds = self.__load_data__(self.dataset, gyro=gyro, preprocess=preprocess)
gyro = gyro and ds.x_gyr_train is not None
x, y = np.concatenate((ds.x_acc_train, ds.x_gyr_train), axis=2) if gyro else ds.x_acc_train, ds.y_train
x_ts_np, y_ts_np = np.concatenate((ds.x_acc_test, ds.x_gyr_test), axis=2) if gyro else ds.x_acc_test, ds.y_test
print("Test: features shape, labels shape, mean, standard deviation")
print(x_ts_np.shape, y_ts_np.shape, np.mean(x_ts_np), np.std(x_ts_np))
if arch_type == 'cnn':
x_ts_np = np.reshape(x_ts_np, newshape=(x_ts_np.shape[0], 1, x_ts_np.shape[1], x_ts_np.shape[2]))
elif arch_type == 'dbn':
x = np.reshape(x, newshape=(x.shape[0], x.shape[1] * x.shape[2]))
x_ts_np = np.reshape(x_ts_np, newshape=(x_ts_np.shape[0], x_ts_np.shape[1] * x_ts_np.shape[2]))
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(f'Using device: {device}')
print('Test: features shape, labels shape, mean, standard deviation')
print(x_ts_np.shape, y_ts_np.shape, np.mean(x_ts_np), np.std(x_ts_np))
x_ts = torch.from_numpy(x_ts_np).float().to(device)
y_ts = torch.from_numpy(y_ts_np).long().to(device)
n_out = np.unique(y).size
skf = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=0)
self.k_fold = 1
row = 'fold, time, best_epoch, best_accuracy, best_validation_f1, best_test_f1\n'
for tr_i, va_i in skf.split(X=x, y=y):
x_tr, x_va = x[tr_i], x[va_i]
y_tr, y_va = y[tr_i], y[va_i]
print("Training: features shape, labels shape, mean, standard deviation")
print(x_tr.shape, y_tr.shape, np.mean(x_tr), np.std(x_tr))
print("Validation: features shape, labels shape, mean, standard deviation")
print(x_va.shape, y_va.shape, np.mean(x_va), np.std(x_va))
if arch_type == 'dbn':
lab_experiment = lab.build_experiment(self.exp_path, n_out, seed=0)
start = time.time()
lab_experiment.fit(x_tr, y_tr)
end = time.time()
elapsed = end - start
y_pred_va = lab_experiment.predict(x_va)
best_validation_f1 = f1_score(y_va, y_pred_va, average='weighted')
epochs = lab_experiment.n_epochs
# Test
y_pred = lab_experiment.predict(x_ts_np)
best_accuracy = accuracy_score(y_ts_np, y_pred)
best_test_f1 = f1_score(y_ts_np, y_pred, average='weighted')
row += f'{self.k_fold},{elapsed},{epochs},{best_accuracy},{best_validation_f1},{best_test_f1}\n'
if self.save_log:
log_file_name = f'{self.dataset}_{self.exp_name}.csv'
log_file = os.path.join(log_path, log_file_name)
with open(log_file, "w") as text_file:
text_file.write(row)
else:
if arch_type == 'cnn':
x_tr = np.reshape(x_tr, newshape=(x_tr.shape[0], 1, x_tr.shape[1], x_tr.shape[2]))
x_va = np.reshape(x_va, newshape=(x_va.shape[0], 1, x_va.shape[1], x_va.shape[2]))
x_tr = torch.from_numpy(x_tr).float().to(device)
y_tr = torch.from_numpy(y_tr).long().to(device)
x_va = torch.from_numpy(x_va).float().to(device)
y_va = torch.from_numpy(y_va).long().to(device)
print(np.unique(y_tr.cpu().numpy(), return_counts=True))
print(np.unique(y_va.cpu().numpy(), return_counts=True))
print(np.unique(y_ts.cpu().numpy(), return_counts=True))
lab_experiment = lab.build_experiment(self.exp_path, n_out, seed=0)
print(lab_experiment.model)
lab_experiment.train(train_data=TensorDataset(x_tr, y_tr),
validation_data=TensorDataset(x_va, y_va),
test_data=TensorDataset(x_ts, y_ts),
update_callback=self.update)
self.k_fold += 1
if __name__ == "__main__":
args = parser.parse_args()
experiment = Experiment(args.experiment, args.dataset, args.nfolds, args.save)
experiment.run()
| [
"torch.from_numpy",
"sklearn.model_selection.StratifiedKFold",
"utils.datamanager.load_dataset",
"torch.cuda.is_available",
"numpy.mean",
"numpy.reshape",
"argparse.ArgumentParser",
"numpy.concatenate",
"utils.exp_log.Logger",
"torch.utils.data.TensorDataset",
"numpy.std",
"lab.lab.build_exper... | [((331, 385), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run experiment."""'}), "(description='Run experiment.')\n", (354, 385), False, 'import argparse\n'), ((1236, 1310), 'utils.datamanager.load_dataset', 'dm.load_dataset', (['dataset'], {'seq_length': '(100)', 'gyro': 'gyro', 'preprocess': 'preprocess'}), '(dataset, seq_length=100, gyro=gyro, preprocess=preprocess)\n', (1251, 1310), True, 'from utils import datamanager as dm\n'), ((1939, 2052), 'utils.exp_log.Logger', 'Logger', ([], {'exp_name': 'name', 'dataset': 'self.dataset', 'n_folds': 'self.n_folds', 'save_log': 'self.save_log', 'log_path': 'log_path'}), '(exp_name=name, dataset=self.dataset, n_folds=self.n_folds, save_log=\n self.save_log, log_path=log_path)\n', (1945, 2052), False, 'from utils.exp_log import Logger\n'), ((2980, 3005), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3003, 3005), False, 'import torch\n'), ((3023, 3066), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3035, 3066), False, 'import torch\n'), ((3442, 3510), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.n_folds', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=self.n_folds, shuffle=True, random_state=0)\n', (3457, 3510), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1508, 1527), 'json.load', 'json.load', (['exp_file'], {}), '(exp_file)\n', (1517, 1527), False, 'import json\n'), ((2564, 2580), 'numpy.mean', 'np.mean', (['x_ts_np'], {}), '(x_ts_np)\n', (2571, 2580), True, 'import numpy as np\n'), ((2582, 2597), 'numpy.std', 'np.std', (['x_ts_np'], {}), '(x_ts_np)\n', (2588, 2597), True, 'import numpy as np\n'), ((2653, 2744), 'numpy.reshape', 'np.reshape', (['x_ts_np'], {'newshape': '(x_ts_np.shape[0], 1, x_ts_np.shape[1], x_ts_np.shape[2])'}), '(x_ts_np, newshape=(x_ts_np.shape[0], 1, x_ts_np.shape[1],\n x_ts_np.shape[2]))\n', (2663, 2744), True, 'import numpy as np\n'), ((3230, 3246), 'numpy.mean', 'np.mean', (['x_ts_np'], {}), '(x_ts_np)\n', (3237, 3246), True, 'import numpy as np\n'), ((3248, 3263), 'numpy.std', 'np.std', (['x_ts_np'], {}), '(x_ts_np)\n', (3254, 3263), True, 'import numpy as np\n'), ((3410, 3422), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3419, 3422), True, 'import numpy as np\n'), ((889, 914), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (904, 914), False, 'import os\n'), ((2224, 2280), 'numpy.concatenate', 'np.concatenate', (['(ds.x_acc_train, ds.x_gyr_train)'], {'axis': '(2)'}), '((ds.x_acc_train, ds.x_gyr_train), axis=2)\n', (2238, 2280), True, 'import numpy as np\n'), ((2348, 2402), 'numpy.concatenate', 'np.concatenate', (['(ds.x_acc_test, ds.x_gyr_test)'], {'axis': '(2)'}), '((ds.x_acc_test, ds.x_gyr_test), axis=2)\n', (2362, 2402), True, 'import numpy as np\n'), ((2790, 2851), 'numpy.reshape', 'np.reshape', (['x'], {'newshape': '(x.shape[0], x.shape[1] * x.shape[2])'}), '(x, newshape=(x.shape[0], x.shape[1] * x.shape[2]))\n', (2800, 2851), True, 'import numpy as np\n'), ((2874, 2964), 'numpy.reshape', 'np.reshape', (['x_ts_np'], {'newshape': '(x_ts_np.shape[0], x_ts_np.shape[1] * x_ts_np.shape[2])'}), '(x_ts_np, newshape=(x_ts_np.shape[0], x_ts_np.shape[1] * x_ts_np.\n shape[2]))\n', (2884, 2964), True, 'import numpy as np\n'), ((3885, 3898), 'numpy.mean', 'np.mean', (['x_tr'], {}), '(x_tr)\n', (3892, 3898), True, 'import numpy as np\n'), ((3900, 3912), 'numpy.std', 'np.std', (['x_tr'], {}), '(x_tr)\n', (3906, 3912), True, 'import numpy as np\n'), ((4044, 4057), 'numpy.mean', 'np.mean', (['x_va'], {}), '(x_va)\n', (4051, 4057), True, 'import numpy as np\n'), ((4059, 4071), 'numpy.std', 'np.std', (['x_va'], {}), '(x_va)\n', (4065, 4071), True, 'import numpy as np\n'), ((4142, 4192), 'lab.lab.build_experiment', 'lab.build_experiment', (['self.exp_path', 'n_out'], {'seed': '(0)'}), '(self.exp_path, n_out, seed=0)\n', (4162, 4192), False, 'from lab import lab\n'), ((4217, 4228), 'time.time', 'time.time', ([], {}), '()\n', (4226, 4228), False, 'import time\n'), ((4299, 4310), 'time.time', 'time.time', ([], {}), '()\n', (4308, 4310), False, 'import time\n'), ((4443, 4488), 'sklearn.metrics.f1_score', 'f1_score', (['y_va', 'y_pred_va'], {'average': '"""weighted"""'}), "(y_va, y_pred_va, average='weighted')\n", (4451, 4488), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((4651, 4682), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_ts_np', 'y_pred'], {}), '(y_ts_np, y_pred)\n', (4665, 4682), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((4714, 4759), 'sklearn.metrics.f1_score', 'f1_score', (['y_ts_np', 'y_pred'], {'average': '"""weighted"""'}), "(y_ts_np, y_pred, average='weighted')\n", (4722, 4759), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((5932, 5982), 'lab.lab.build_experiment', 'lab.build_experiment', (['self.exp_path', 'n_out'], {'seed': '(0)'}), '(self.exp_path, n_out, seed=0)\n', (5952, 5982), False, 'from lab import lab\n'), ((1805, 1830), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1820, 1830), False, 'import os\n'), ((5014, 5051), 'os.path.join', 'os.path.join', (['log_path', 'log_file_name'], {}), '(log_path, log_file_name)\n', (5026, 5051), False, 'import os\n'), ((5240, 5315), 'numpy.reshape', 'np.reshape', (['x_tr'], {'newshape': '(x_tr.shape[0], 1, x_tr.shape[1], x_tr.shape[2])'}), '(x_tr, newshape=(x_tr.shape[0], 1, x_tr.shape[1], x_tr.shape[2]))\n', (5250, 5315), True, 'import numpy as np\n'), ((5343, 5418), 'numpy.reshape', 'np.reshape', (['x_va'], {'newshape': '(x_va.shape[0], 1, x_va.shape[1], x_va.shape[2])'}), '(x_va, newshape=(x_va.shape[0], 1, x_va.shape[1], x_va.shape[2]))\n', (5353, 5418), True, 'import numpy as np\n'), ((3281, 3306), 'torch.from_numpy', 'torch.from_numpy', (['x_ts_np'], {}), '(x_ts_np)\n', (3297, 3306), False, 'import torch\n'), ((3341, 3366), 'torch.from_numpy', 'torch.from_numpy', (['y_ts_np'], {}), '(y_ts_np)\n', (3357, 3366), False, 'import torch\n'), ((6076, 6101), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_tr', 'y_tr'], {}), '(x_tr, y_tr)\n', (6089, 6101), False, 'from torch.utils.data import TensorDataset\n'), ((6156, 6181), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_va', 'y_va'], {}), '(x_va, y_va)\n', (6169, 6181), False, 'from torch.utils.data import TensorDataset\n'), ((6230, 6255), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_ts', 'y_ts'], {}), '(x_ts, y_ts)\n', (6243, 6255), False, 'from torch.utils.data import TensorDataset\n'), ((5443, 5465), 'torch.from_numpy', 'torch.from_numpy', (['x_tr'], {}), '(x_tr)\n', (5459, 5465), False, 'import torch\n'), ((5508, 5530), 'torch.from_numpy', 'torch.from_numpy', (['y_tr'], {}), '(y_tr)\n', (5524, 5530), False, 'import torch\n'), ((5572, 5594), 'torch.from_numpy', 'torch.from_numpy', (['x_va'], {}), '(x_va)\n', (5588, 5594), False, 'import torch\n'), ((5637, 5659), 'torch.from_numpy', 'torch.from_numpy', (['y_va'], {}), '(y_va)\n', (5653, 5659), False, 'import torch\n')] |
import numpy as np
import torch
import gym
import argparse
import os
import utils
import TD3
import OurDDPG
import DDPG
from generative_replay import GenerativeReplay
from datetime import datetime
if __name__ == "__main__":
# Hyper parameters
# General
USE_GENERATIVE = True
NO_REPLAY = False
RECORD_TRAINING_TIMES = False
ENV = "InvertedPendulum-v2"
START_TIMESTEPS = 15e3
END = START_TIMESTEPS + 50e3
EVAL_FREQ = 5e3
MAX_TIMESTEPS = 2e5
SEED = 13
# FILE_NAME = ENV + "_" + list(str(datetime.now()).split())[-1]
FILE_NAME = "a"
F_TIME = 5000
VAE_F = 0
TD3_F = 0
MILESTONES = [8, 15, 20, 30, 40, 50, 60, 70, 80, 90]
# TD3 parameters
EXPL_NOISE = 0.1
BATCH_SIZE = 256
DISCOUNT = 0.99
TAU = 0.005
POLICY_NOISE = 0.2
NOISE_CLIP = 0.5
POLICY_FREQ = 2
evaluations = []
td3times = []
vaetimes = []
running_av = 0
print(f"Start new process with {ENV} and file name {FILE_NAME}")
if not os.path.exists("./results"):
os.makedirs("./results")
env = gym.make(ENV)
# Set seeds
env.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
# Some env dimentions
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
# Build TD3
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": DISCOUNT,
"tau": TAU,
"policy_noise": POLICY_NOISE * max_action,
"noise_clip": NOISE_CLIP * max_action,
"policy_freq": POLICY_FREQ
}
policy = TD3.TD3(**kwargs)
# Make the replay component
replay_component = None
if USE_GENERATIVE:
replay_component = GenerativeReplay()
elif NO_REPLAY:
replay_component = utils.ReplayBuffer(state_dim, action_dim, BATCH_SIZE)
else:
replay_component = utils.ReplayBuffer(state_dim, action_dim)
training_moments = []
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
for t in range(int(MAX_TIMESTEPS)):
if TD3_F > 0:
TD3_F -= 1
episode_timesteps += 1
if t >= END:
raise ValueError
# Select action randomly or according to policy based on the start timesteps
if t < START_TIMESTEPS:
action = env.action_space.sample()
episode_num = 0
else:
replay_component.training = True
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * EXPL_NOISE, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay component
VAE_training = replay_component.add(state, action, next_state, reward, done_bool)
if VAE_training:
training_moments.append(episode_num)
state = next_state
episode_reward += reward
# Train agent after collecting sufficient data
if t >= START_TIMESTEPS and TD3_F == 0:
policy.train(replay_component, BATCH_SIZE)
if done:
running_av = 0.4*running_av + 0.6*episode_reward
if t >= START_TIMESTEPS:
if running_av > MILESTONES[0] and TD3_F == 0:
MILESTONES = MILESTONES[1:]
TD3_F = F_TIME
td3times.append(episode_num)
np.save(f"./results/incoming/{FILE_NAME}_td3", td3times)
VAE_F = 0
if running_av < 4:
MILESTONES = [8, 15, 20, 30, 40, 50, 60, 70, 80, 90]
print(f"Episode {episode_num}, reward is {episode_reward}, running average {running_av}, TD3 {TD3_F}, VAE {VAE_F}, {MILESTONES}")
if t >= START_TIMESTEPS:
evaluations.append(episode_reward)
np.save(f"./results/incoming/{FILE_NAME}", evaluations)
if RECORD_TRAINING_TIMES:
np.save(f"./results/incoming/{FILE_NAME}_times", training_moments)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1 | [
"numpy.random.normal",
"torch.manual_seed",
"os.path.exists",
"os.makedirs",
"generative_replay.GenerativeReplay",
"utils.ReplayBuffer",
"numpy.array",
"TD3.TD3",
"numpy.random.seed",
"gym.make",
"numpy.save"
] | [((989, 1002), 'gym.make', 'gym.make', (['ENV'], {}), '(ENV)\n', (997, 1002), False, 'import gym\n'), ((1034, 1057), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (1051, 1057), False, 'import torch\n'), ((1059, 1079), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1073, 1079), True, 'import numpy as np\n'), ((1511, 1528), 'TD3.TD3', 'TD3.TD3', ([], {}), '(**kwargs)\n', (1518, 1528), False, 'import TD3\n'), ((925, 952), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (939, 952), False, 'import os\n'), ((956, 980), 'os.makedirs', 'os.makedirs', (['"""./results"""'], {}), "('./results')\n", (967, 980), False, 'import os\n'), ((1625, 1643), 'generative_replay.GenerativeReplay', 'GenerativeReplay', ([], {}), '()\n', (1641, 1643), False, 'from generative_replay import GenerativeReplay\n'), ((1682, 1735), 'utils.ReplayBuffer', 'utils.ReplayBuffer', (['state_dim', 'action_dim', 'BATCH_SIZE'], {}), '(state_dim, action_dim, BATCH_SIZE)\n', (1700, 1735), False, 'import utils\n'), ((1764, 1805), 'utils.ReplayBuffer', 'utils.ReplayBuffer', (['state_dim', 'action_dim'], {}), '(state_dim, action_dim)\n', (1782, 1805), False, 'import utils\n'), ((3543, 3598), 'numpy.save', 'np.save', (['f"""./results/incoming/{FILE_NAME}"""', 'evaluations'], {}), "(f'./results/incoming/{FILE_NAME}', evaluations)\n", (3550, 3598), True, 'import numpy as np\n'), ((3179, 3235), 'numpy.save', 'np.save', (['f"""./results/incoming/{FILE_NAME}_td3"""', 'td3times'], {}), "(f'./results/incoming/{FILE_NAME}_td3', td3times)\n", (3186, 3235), True, 'import numpy as np\n'), ((3634, 3700), 'numpy.save', 'np.save', (['f"""./results/incoming/{FILE_NAME}_times"""', 'training_moments'], {}), "(f'./results/incoming/{FILE_NAME}_times', training_moments)\n", (3641, 3700), True, 'import numpy as np\n'), ((2329, 2390), 'numpy.random.normal', 'np.random.normal', (['(0)', '(max_action * EXPL_NOISE)'], {'size': 'action_dim'}), '(0, max_action * EXPL_NOISE, size=action_dim)\n', (2345, 2390), True, 'import numpy as np\n'), ((2306, 2321), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (2314, 2321), True, 'import numpy as np\n')] |
from typing import Any, Dict, Iterator, List, Optional, Union
from itertools import chain
from pathlib import Path
import os
import re
from IPython.display import HTML
from pandas import DataFrame, Series
import lunchbox.tools as lbt
import networkx
import numpy as np
from rolling_pin.radon_etl import RadonETL
import rolling_pin.tools as tools
# ------------------------------------------------------------------------------
'''
Contains the RepoETL class, which is used for converted python repository module
dependencies into a directed graph.
'''
class RepoETL():
'''
RepoETL is a class for extracting 1st order dependencies of modules within a
given repository. This information is stored internally as a DataFrame and
can be rendered as networkx, pydot or SVG graphs.
'''
def __init__(
self,
root,
include_regex=r'.*\.py$',
exclude_regex=r'(__init__|test_|_test|mock_)\.py$',
):
# type: (Union[str, Path], str, str) -> None
r'''
Construct RepoETL instance.
Args:
root (str or Path): Full path to repository root directory.
include_regex (str, optional): Files to be included in recursive
directy search. Default: '.*\.py$'.
exclude_regex (str, optional): Files to be excluded in recursive
directy search. Default: '(__init__|test_|_test|mock_)\.py$'.
Raises:
ValueError: If include or exclude regex does not end in '\.py$'.
'''
self._root = root # type: Union[str, Path]
self._data = self._get_data(root, include_regex, exclude_regex) # type: DataFrame
@staticmethod
def _get_imports(fullpath):
# type: (Union[str, Path]) -> List[str]
'''
Get's import statements from a given python module.
Args:
fullpath (str or Path): Path to python module.
Returns:
list(str): List of imported modules.
'''
with open(fullpath) as f:
data = f.readlines() # type: Union[List, Iterator]
data = map(lambda x: x.strip('\n'), data)
data = filter(lambda x: re.search('^import|^from', x), data)
data = map(lambda x: re.sub('from (.*?) .*', '\\1', x), data)
data = map(lambda x: re.sub(' as .*', '', x), data)
data = map(lambda x: re.sub(' *#.*', '', x), data)
data = map(lambda x: re.sub('import ', '', x), data)
data = filter(lambda x: not lbt.is_standard_module(x), data)
return list(data)
@staticmethod
def _get_data(
root,
include_regex=r'.*\.py$',
exclude_regex=r'(__init__|_test)\.py$',
):
# type: (Union[str, Path], str, str) -> DataFrame
r'''
Recursively aggregates and filters all the files found with a given
directory into a DataFrame. Data is used to create directed graphs.
DataFrame has these columns:
* node_name - name of node
* node_type - type of node, can be [module, subpackage, library]
* x - node's x coordinate
* y - node's y coordinate
* dependencies - parent nodes
* subpackages - parent nodes of type subpackage
* fullpath - fullpath to the module a node represents
Args:
root (str or Path): Root directory to be searched.
include_regex (str, optional): Files to be included in recursive
directy search. Default: '.*\.py$'.
exclude_regex (str, optional): Files to be excluded in recursive
directy search. Default: '(__init__|_test)\.py$'.
Raises:
ValueError: If include or exclude regex does not end in '\.py$'.
FileNotFoundError: If no files are found after filtering.
Returns:
DataFrame: DataFrame of file information.
'''
root = Path(root).as_posix()
files = tools.list_all_files(root) # type: Union[Iterator, List]
if include_regex != '':
if not include_regex.endswith(r'\.py$'):
msg = f"Invalid include_regex: '{include_regex}'. "
msg += r"Does not end in '.py$'."
raise ValueError(msg)
files = filter(
lambda x: re.search(include_regex, x.absolute().as_posix()),
files
)
if exclude_regex != '':
files = filter(
lambda x: not re.search(exclude_regex, x.absolute().as_posix()),
files
)
files = list(files)
if len(files) == 0:
msg = f'No files found after filters in directory: {root}.'
raise FileNotFoundError(msg)
# buid DataFrame of nodes and imported dependencies
data = DataFrame()
data['fullpath'] = files
data.fullpath = data.fullpath.apply(lambda x: x.absolute().as_posix())
data['node_name'] = data.fullpath\
.apply(lambda x: re.sub(root, '', x))\
.apply(lambda x: re.sub(r'\.py$', '', x))\
.apply(lambda x: re.sub('^/', '', x))\
.apply(lambda x: re.sub('/', '.', x))
data['subpackages'] = data.node_name\
.apply(lambda x: tools.get_parent_fields(x, '.')).apply(lbt.get_ordered_unique)
data.subpackages = data.subpackages\
.apply(lambda x: list(filter(lambda y: y != '', x)))
data['dependencies'] = data.fullpath\
.apply(RepoETL._get_imports).apply(lbt.get_ordered_unique)
data.dependencies += data.node_name\
.apply(lambda x: ['.'.join(x.split('.')[:-1])])
data.dependencies = data.dependencies\
.apply(lambda x: list(filter(lambda y: y != '', x)))
data['node_type'] = 'module'
# add subpackages as nodes
pkgs = set(chain(*data.subpackages.tolist())) # type: Any
pkgs = pkgs.difference(data.node_name.tolist())
pkgs = sorted(list(pkgs))
pkgs = Series(pkgs)\
.apply(
lambda x: dict(
node_name=x,
node_type='subpackage',
dependencies=tools.get_parent_fields(x, '.'),
subpackages=tools.get_parent_fields(x, '.'),
)).tolist()
pkgs = DataFrame(pkgs)
data = data.append(pkgs, ignore_index=True, sort=True)
# add library dependencies as nodes
libs = set(chain(*data.dependencies.tolist())) # type: Any
libs = libs.difference(data.node_name.tolist())
libs = sorted(list(libs))
libs = Series(libs)\
.apply(
lambda x: dict(
node_name=x,
node_type='library',
dependencies=[],
subpackages=[],
)).tolist()
libs = DataFrame(libs)
data = data.append(libs, ignore_index=True, sort=True)
data.drop_duplicates('node_name', inplace=True)
data.reset_index(drop=True, inplace=True)
# define node coordinates
data['x'] = 0
data['y'] = 0
data = RepoETL._calculate_coordinates(data)
data = RepoETL._anneal_coordinate(data, 'x', 'y')
data = RepoETL._center_coordinate(data, 'x', 'y')
data.sort_values('fullpath', inplace=True)
data.reset_index(drop=True, inplace=True)
cols = [
'node_name',
'node_type',
'x',
'y',
'dependencies',
'subpackages',
'fullpath',
]
data = data[cols]
return data
@staticmethod
def _calculate_coordinates(data):
# type: (DataFrame) -> DataFrame
'''
Calculate inital x, y coordinates for each node in given DataFrame.
Node are startified by type along the y axis.
Args:
DataFrame: DataFrame of nodes.
Returns:
DataFrame: DataFrame with x and y coordinate columns.
'''
# set initial node coordinates
data['y'] = 0
for item in ['module', 'subpackage', 'library']:
mask = data.node_type == item
n = data[mask].shape[0]
index = data[mask].index
data.loc[index, 'x'] = list(range(n))
# move non-library nodes down the y axis according to how nested
# they are
if item != 'library':
data.loc[index, 'y'] = data.loc[index, 'node_name']\
.apply(lambda x: len(x.split('.')))
# move all module nodes beneath supackage nodes on the y axis
max_ = data[data.node_type == 'subpackage'].y.max()
index = data[data.node_type == 'module'].index
data.loc[index, 'y'] += max_
data.loc[index, 'y'] += data.loc[index, 'subpackages'].apply(len)
# reverse y axis
max_ = data.y.max()
data.y = -1 * data.y + max_
return data
@staticmethod
def _anneal_coordinate(data, anneal_axis='x', pin_axis='y', iterations=10):
# type: (DataFrame, str, str, int) -> DataFrame
'''
Iteratively align nodes in the anneal axis according to the mean
position of their connected nodes. Node anneal coordinates are rectified
at the end of each iteration according to a pin axis, so that they do
not overlap. This mean that they are sorted at each level of the pin
axis.
Args:
data (DataFrame): DataFrame with x column.
anneal_axis (str, optional): Coordinate column to be annealed.
Default: 'x'.
pin_axis (str, optional): Coordinate column to be held constant.
Default: 'y'.
iterations (int, optional): Number of times to update x coordinates.
Default: 10.
Returns:
DataFrame: DataFrame with annealed anneal axis coordinates.
'''
x = anneal_axis
y = pin_axis
for iteration in range(iterations):
# create directed graph from data
graph = RepoETL._to_networkx_graph(data)
# reverse connectivity every other iteration
if iteration % 2 == 0:
graph = graph.reverse()
# get mean coordinate of each node in directed graph
for name in graph.nodes:
tree = networkx.bfs_tree(graph, name)
mu = np.mean([graph.nodes[n][x] for n in tree])
graph.nodes[name][x] = mu
# update data coordinate column
for node in graph.nodes:
mask = data[data.node_name == node].index
data.loc[mask, x] = graph.nodes[node][x]
# rectify data coordinate column, so that no two nodes overlap
data.sort_values(x, inplace=True)
for yi in data[y].unique():
mask = data[data[y] == yi].index
values = data.loc[mask, x].tolist()
values = list(range(len(values)))
data.loc[mask, x] = values
return data
@staticmethod
def _center_coordinate(data, center_axis='x', pin_axis='y'):
# (DataFrame, str, str) -> DataFrame
'''
Sorted center_axis coordinates at each level of the pin axis.
Args:
data (DataFrame): DataFrame with x column.
anneal_column (str, optional): Coordinate column to be annealed.
Default: 'x'.
pin_axis (str, optional): Coordinate column to be held constant.
Default: 'y'.
iterations (int, optional): Number of times to update x coordinates.
Default: 10.
Returns:
DataFrame: DataFrame with centered center axis coordinates.
'''
x = center_axis
y = pin_axis
max_ = data[x].max()
for yi in data[y].unique():
mask = data[data[y] == yi].index
l_max = data.loc[mask, x].max()
delta = max_ - l_max
data.loc[mask, x] += (delta / 2)
return data
@staticmethod
def _to_networkx_graph(data):
# (DataFrame) -> networkx.DiGraph
'''
Converts given DataFrame into networkx directed graph.
Args:
DataFrame: DataFrame of nodes.
Returns:
networkx.DiGraph: Graph of nodes.
'''
graph = networkx.DiGraph()
data.apply(
lambda x: graph.add_node(
x.node_name,
**{k: getattr(x, k) for k in x.index}
),
axis=1
)
data.apply(
lambda x: [graph.add_edge(p, x.node_name) for p in x.dependencies],
axis=1
)
return graph
def to_networkx_graph(self):
# () -> networkx.DiGraph
'''
Converts internal data into networkx directed graph.
Returns:
networkx.DiGraph: Graph of nodes.
'''
return RepoETL._to_networkx_graph(self._data)
def to_dot_graph(self, orient='tb', orthogonal_edges=False, color_scheme=None):
# (str, bool, Optional[Dict[str, str]]) -> pydot.Dot
'''
Converts internal data into pydot graph.
Args:
orient (str, optional): Graph layout orientation. Default: tb.
Options include:
* tb - top to bottom
* bt - bottom to top
* lr - left to right
* rl - right to left
orthogonal_edges (bool, optional): Whether graph edges should have
non-right angles. Default: False.
color_scheme: (dict, optional): Color scheme to be applied to graph.
Default: rolling_pin.tools.COLOR_SCHEME
Raises:
ValueError: If orient is invalid.
Returns:
pydot.Dot: Dot graph of nodes.
'''
orient = orient.lower()
orientations = ['tb', 'bt', 'lr', 'rl']
if orient not in orientations:
msg = f'Invalid orient value. {orient} not in {orientations}.'
raise ValueError(msg)
# set color scheme of graph
if color_scheme is None:
color_scheme = tools.COLOR_SCHEME
# create dot graph
graph = self.to_networkx_graph()
dot = networkx.drawing.nx_pydot.to_pydot(graph)
# set layout orientation
dot.set_rankdir(orient.upper())
# set graph background color
dot.set_bgcolor(color_scheme['background'])
# set edge draw type
if orthogonal_edges:
dot.set_splines('ortho')
# set draw parameters for each node in graph
for node in dot.get_nodes():
# set node shape, color and font attributes
node.set_shape('rect')
node.set_style('filled')
node.set_color(color_scheme['node'])
node.set_fillcolor(color_scheme['node'])
node.set_fontname('Courier')
nx_node = re.sub('"', '', node.get_name())
nx_node = graph.nodes[nx_node]
# if networkx node has no attributes skip it
# this should not ever occur but might
if nx_node == {}:
continue # pragma: no cover
# set node x, y coordinates
node.set_pos(f"{nx_node['x']},{nx_node['y']}!")
# vary node font color by noe type
if nx_node['node_type'] == 'library':
node.set_fontcolor(color_scheme['node_library_font'])
elif nx_node['node_type'] == 'subpackage':
node.set_fontcolor(color_scheme['node_subpackage_font'])
else:
node.set_fontcolor(color_scheme['node_module_font'])
# set draw parameters for each edge in graph
for edge in dot.get_edges():
# get networkx source node of edge
nx_node = dot.get_node(edge.get_source())
nx_node = nx_node[0].get_name()
nx_node = re.sub('"', '', nx_node)
nx_node = graph.nodes[nx_node]
# if networkx source node has no attributes skip it
# this should not ever occur but might
if nx_node == {}:
continue # pragma: no cover
# vary edge color by its source node type
if nx_node['node_type'] == 'library':
edge.set_color(color_scheme['edge_library'])
elif nx_node['node_type'] == 'subpackage':
edge.set_color(color_scheme['edge_subpackage'])
else:
# this line is actually covered by pytest doesn't think so
edge.set_color(color_scheme['edge_module']) # pragma: no cover
return dot
def to_dataframe(self):
# type: () -> DataFrame
'''
Retruns:
DataFrame: DataFrame of nodes representing repo modules.
'''
return self._data.copy()
def to_html(
self,
layout='dot',
orthogonal_edges=False,
color_scheme=None,
as_png=False
):
# type: (str, bool, Optional[Dict[str, str]], bool) -> HTML
'''
For use in inline rendering of graph data in Jupyter Lab.
Args:
layout (str, optional): Graph layout style.
Options include: circo, dot, fdp, neato, sfdp, twopi.
Default: dot.
orthogonal_edges (bool, optional): Whether graph edges should have
non-right angles. Default: False.
color_scheme: (dict, optional): Color scheme to be applied to graph.
Default: rolling_pin.tools.COLOR_SCHEME
as_png (bool, optional): Display graph as a PNG image instead of
SVG. Useful for display on Github. Default: False.
Returns:
IPython.display.HTML: HTML object for inline display.
'''
if color_scheme is None:
color_scheme = tools.COLOR_SCHEME
dot = self.to_dot_graph(
orthogonal_edges=orthogonal_edges,
color_scheme=color_scheme,
)
return tools.dot_to_html(dot, layout=layout, as_png=as_png)
def write(
self,
fullpath,
layout='dot',
orient='tb',
orthogonal_edges=False,
color_scheme=None
):
# type: (Union[str, Path], str, str, bool, Optional[Dict[str, str]]) -> RepoETL
'''
Writes internal data to a given filepath.
Formats supported: svg, dot, png, json.
Args:
fulllpath (str or Path): File to be written to.
layout (str, optional): Graph layout style.
Options include: circo, dot, fdp, neato, sfdp, twopi. Default: dot.
orient (str, optional): Graph layout orientation. Default: tb.
Options include:
* tb - top to bottom
* bt - bottom to top
* lr - left to right
* rl - right to left
orthogonal_edges (bool, optional): Whether graph edges should have
non-right angles. Default: False.
color_scheme: (dict, optional): Color scheme to be applied to graph.
Default: rolling_pin.tools.COLOR_SCHEME
Raises:
ValueError: If invalid file extension given.
Returns:
RepoETL: Self.
'''
if isinstance(fullpath, Path):
fullpath = fullpath.absolute().as_posix()
_, ext = os.path.splitext(fullpath)
ext = re.sub(r'^\.', '', ext)
if re.search('^json$', ext, re.I):
self._data.to_json(fullpath, orient='records')
return self
if color_scheme is None:
color_scheme = tools.COLOR_SCHEME
graph = self.to_dot_graph(
orient=orient,
orthogonal_edges=orthogonal_edges,
color_scheme=color_scheme,
)
try:
tools.write_dot_graph(graph, fullpath, layout=layout,)
except ValueError:
msg = f'Invalid extension found: {ext}. '
msg += 'Valid extensions include: svg, dot, png, json.'
raise ValueError(msg)
return self
# ------------------------------------------------------------------------------
def write_repo_architecture(
source, target, exclude_regex='test|mock', orient='lr'
):
# type: (Union[str, Path], Union[str, Path], str, str) -> None
'''
Convenience function for writing a repo architecture graph.
Args:
source (str or Path): Repo directory.
target (str or Path): Target filepath.
exclude_regex (str, optional): Exclude files that match this regex pattern.
Default: 'test|mock'.
orient (str, optional): Graph orientation. Default: lr.
'''
etl = RepoETL(source)
data = etl._data.copy()
func = lambda x: not bool(re.search(exclude_regex, x))
mask = data.node_name.apply(func)
data = data[mask]
data.reset_index(inplace=True, drop=True)
data.dependencies = data.dependencies.apply(lambda x: list(filter(func, x)))
etl._data = data
etl.write(target, orient=orient)
def write_repo_plots_and_tables(source, plot_path, table_dir):
# type: (Union[str, Path], Union[str, Path], Union[str, Path]) -> None
'''
Convenience function for writing repo plot and table files.
Args:
source (str or Path): Repo directory.
plot_path (str or Path): Plot filepath.
table_dir (str or Path): Table parent directory.
'''
etl = RadonETL(source)
etl.write_plots(plot_path)
etl.write_tables(table_dir)
| [
"networkx.drawing.nx_pydot.to_pydot",
"numpy.mean",
"pandas.Series",
"rolling_pin.tools.write_dot_graph",
"rolling_pin.tools.list_all_files",
"pathlib.Path",
"networkx.DiGraph",
"os.path.splitext",
"rolling_pin.tools.get_parent_fields",
"lunchbox.tools.is_standard_module",
"rolling_pin.tools.dot... | [((21738, 21754), 'rolling_pin.radon_etl.RadonETL', 'RadonETL', (['source'], {}), '(source)\n', (21746, 21754), False, 'from rolling_pin.radon_etl import RadonETL\n'), ((4007, 4033), 'rolling_pin.tools.list_all_files', 'tools.list_all_files', (['root'], {}), '(root)\n', (4027, 4033), True, 'import rolling_pin.tools as tools\n'), ((4871, 4882), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (4880, 4882), False, 'from pandas import DataFrame, Series\n'), ((6393, 6408), 'pandas.DataFrame', 'DataFrame', (['pkgs'], {}), '(pkgs)\n', (6402, 6408), False, 'from pandas import DataFrame, Series\n'), ((6946, 6961), 'pandas.DataFrame', 'DataFrame', (['libs'], {}), '(libs)\n', (6955, 6961), False, 'from pandas import DataFrame, Series\n'), ((12530, 12548), 'networkx.DiGraph', 'networkx.DiGraph', ([], {}), '()\n', (12546, 12548), False, 'import networkx\n'), ((14463, 14504), 'networkx.drawing.nx_pydot.to_pydot', 'networkx.drawing.nx_pydot.to_pydot', (['graph'], {}), '(graph)\n', (14497, 14504), False, 'import networkx\n'), ((18281, 18333), 'rolling_pin.tools.dot_to_html', 'tools.dot_to_html', (['dot'], {'layout': 'layout', 'as_png': 'as_png'}), '(dot, layout=layout, as_png=as_png)\n', (18298, 18333), True, 'import rolling_pin.tools as tools\n'), ((19668, 19694), 'os.path.splitext', 'os.path.splitext', (['fullpath'], {}), '(fullpath)\n', (19684, 19694), False, 'import os\n'), ((19709, 19732), 're.sub', 're.sub', (['"""^\\\\."""', '""""""', 'ext'], {}), "('^\\\\.', '', ext)\n", (19715, 19732), False, 'import re\n'), ((19744, 19774), 're.search', 're.search', (['"""^json$"""', 'ext', 're.I'], {}), "('^json$', ext, re.I)\n", (19753, 19774), False, 'import re\n'), ((16152, 16176), 're.sub', 're.sub', (['"""\\""""', '""""""', 'nx_node'], {}), '(\'"\', \'\', nx_node)\n', (16158, 16176), False, 'import re\n'), ((20123, 20176), 'rolling_pin.tools.write_dot_graph', 'tools.write_dot_graph', (['graph', 'fullpath'], {'layout': 'layout'}), '(graph, fullpath, layout=layout)\n', (20144, 20176), True, 'import rolling_pin.tools as tools\n'), ((2178, 2207), 're.search', 're.search', (['"""^import|^from"""', 'x'], {}), "('^import|^from', x)\n", (2187, 2207), False, 'import re\n'), ((2244, 2277), 're.sub', 're.sub', (['"""from (.*?) .*"""', '"""\\\\1"""', 'x'], {}), "('from (.*?) .*', '\\\\1', x)\n", (2250, 2277), False, 'import re\n'), ((2314, 2337), 're.sub', 're.sub', (['""" as .*"""', '""""""', 'x'], {}), "(' as .*', '', x)\n", (2320, 2337), False, 'import re\n'), ((2374, 2396), 're.sub', 're.sub', (['""" *#.*"""', '""""""', 'x'], {}), "(' *#.*', '', x)\n", (2380, 2396), False, 'import re\n'), ((2433, 2457), 're.sub', 're.sub', (['"""import """', '""""""', 'x'], {}), "('import ', '', x)\n", (2439, 2457), False, 'import re\n'), ((3969, 3979), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (3973, 3979), False, 'from pathlib import Path\n'), ((5225, 5244), 're.sub', 're.sub', (['"""/"""', '"""."""', 'x'], {}), "('/', '.', x)\n", (5231, 5244), False, 'import re\n'), ((10495, 10525), 'networkx.bfs_tree', 'networkx.bfs_tree', (['graph', 'name'], {}), '(graph, name)\n', (10512, 10525), False, 'import networkx\n'), ((10547, 10589), 'numpy.mean', 'np.mean', (['[graph.nodes[n][x] for n in tree]'], {}), '([graph.nodes[n][x] for n in tree])\n', (10554, 10589), True, 'import numpy as np\n'), ((21072, 21099), 're.search', 're.search', (['exclude_regex', 'x'], {}), '(exclude_regex, x)\n', (21081, 21099), False, 'import re\n'), ((2501, 2526), 'lunchbox.tools.is_standard_module', 'lbt.is_standard_module', (['x'], {}), '(x)\n', (2523, 2526), True, 'import lunchbox.tools as lbt\n'), ((5174, 5193), 're.sub', 're.sub', (['"""^/"""', '""""""', 'x'], {}), "('^/', '', x)\n", (5180, 5193), False, 'import re\n'), ((5322, 5353), 'rolling_pin.tools.get_parent_fields', 'tools.get_parent_fields', (['x', '"""."""'], {}), "(x, '.')\n", (5345, 5353), True, 'import rolling_pin.tools as tools\n'), ((6076, 6088), 'pandas.Series', 'Series', (['pkgs'], {}), '(pkgs)\n', (6082, 6088), False, 'from pandas import DataFrame, Series\n'), ((6690, 6702), 'pandas.Series', 'Series', (['libs'], {}), '(libs)\n', (6696, 6702), False, 'from pandas import DataFrame, Series\n'), ((5119, 5142), 're.sub', 're.sub', (['"""\\\\.py$"""', '""""""', 'x'], {}), "('\\\\.py$', '', x)\n", (5125, 5142), False, 'import re\n'), ((6252, 6283), 'rolling_pin.tools.get_parent_fields', 'tools.get_parent_fields', (['x', '"""."""'], {}), "(x, '.')\n", (6275, 6283), True, 'import rolling_pin.tools as tools\n'), ((6317, 6348), 'rolling_pin.tools.get_parent_fields', 'tools.get_parent_fields', (['x', '"""."""'], {}), "(x, '.')\n", (6340, 6348), True, 'import rolling_pin.tools as tools\n'), ((5068, 5087), 're.sub', 're.sub', (['root', '""""""', 'x'], {}), "(root, '', x)\n", (5074, 5087), False, 'import re\n')] |
# coding = utf-8
# coding by liuyunfei
# origin code from open3d samples(github)
import numpy as np
import open3d as op3
import matplotlib.pyplot as plt
import copy
import time
from trajectory_io import *
import os
import sys
if __name__ == "__main__":
op3.utility.set_verbosity_level(op3.utility.VerbosityLevel.Debug)
source_raw = op3.io.read_point_cloud("demodata/ICP/cloud_bin_0.pcd")
target_raw = op3.io.read_point_cloud("demodata/ICP/cloud_bin_1.pcd")
source = source_raw.voxel_down_sample(voxel_size = 0.02)
target = target_raw.voxel_down_sample(voxel_size = 0.02)
trans = [[0.862,0.011,-0.507,0.0],
[-0.139,0.967,-0.215,0.7],
[0.487,0.255,0.835,-1.4],
[0.0,0.0,0.0,1.0]]
source.transform(trans)
flip_tranform = [[1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,1]]
source.transform(flip_tranform)
target.transform(flip_tranform)
vis =op3.visualization.Visualizer()
vis.create_window(width=1280,height=720)
vis.add_geometry(source)
vis.add_geometry(target)
threshold =0.05
icp_iteration =200
save_image = False
time.sleep(6)
for i in range(icp_iteration):
reg_p2l = op3.registration.registration_icp(
source,
target,
threshold,
np.identity(4),
op3.registration.TransformationEstimationPointToPlane(),
op3.registration.ICPConvergenceCriteria(max_iteration=1)
)
source.transform(reg_p2l.transformation)
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
time.sleep(0.05)
if save_image:
vis.capture_screen_image("temp_%04d.jpg" %i)
vis.destroy_window()
| [
"numpy.identity",
"open3d.registration.TransformationEstimationPointToPlane",
"open3d.visualization.Visualizer",
"time.sleep",
"open3d.utility.set_verbosity_level",
"open3d.registration.ICPConvergenceCriteria",
"open3d.io.read_point_cloud"
] | [((262, 327), 'open3d.utility.set_verbosity_level', 'op3.utility.set_verbosity_level', (['op3.utility.VerbosityLevel.Debug'], {}), '(op3.utility.VerbosityLevel.Debug)\n', (293, 327), True, 'import open3d as op3\n'), ((346, 401), 'open3d.io.read_point_cloud', 'op3.io.read_point_cloud', (['"""demodata/ICP/cloud_bin_0.pcd"""'], {}), "('demodata/ICP/cloud_bin_0.pcd')\n", (369, 401), True, 'import open3d as op3\n'), ((419, 474), 'open3d.io.read_point_cloud', 'op3.io.read_point_cloud', (['"""demodata/ICP/cloud_bin_1.pcd"""'], {}), "('demodata/ICP/cloud_bin_1.pcd')\n", (442, 474), True, 'import open3d as op3\n'), ((985, 1015), 'open3d.visualization.Visualizer', 'op3.visualization.Visualizer', ([], {}), '()\n', (1013, 1015), True, 'import open3d as op3\n'), ((1189, 1202), 'time.sleep', 'time.sleep', (['(6)'], {}), '(6)\n', (1199, 1202), False, 'import time\n'), ((1673, 1689), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1683, 1689), False, 'import time\n'), ((1366, 1380), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1377, 1380), True, 'import numpy as np\n'), ((1394, 1449), 'open3d.registration.TransformationEstimationPointToPlane', 'op3.registration.TransformationEstimationPointToPlane', ([], {}), '()\n', (1447, 1449), True, 'import open3d as op3\n'), ((1463, 1519), 'open3d.registration.ICPConvergenceCriteria', 'op3.registration.ICPConvergenceCriteria', ([], {'max_iteration': '(1)'}), '(max_iteration=1)\n', (1502, 1519), True, 'import open3d as op3\n')] |
from builtins import zip
from . import (get_explicit_k_path as _raw_explicit_path, get_path as
_raw_get_path)
DEPRECATION_DOCS_URL = "http://seekpath.readthedocs.io/en/latest/maindoc.html#aiida-integration"
def _aiida_to_tuple(aiida_structure):
"""
Convert an AiiDA structure to a tuple of the format
(cell, scaled_positions, element_numbers).
.. deprecated:: 1.8
Use the methods in AiiDA instead.
:param aiida_structure: the AiiDA structure
:return: (structure_tuple, kind_info, kinds) where structure_tuple
is a tuple of format (cell, scaled_positions, element_numbers);
kind_info is a dictionary mapping the kind_names to
the numbers used in element_numbers. When possible, it uses
the Z number of the element, otherwise it uses numbers > 1000;
kinds is a list of the kinds of the structure.
"""
import warnings
warnings.warn(
'this method has been deprecated and moved to AiiDA, see {}'.format(
DEPRECATION_DOCS_URL), DeprecationWarning)
import numpy as np
from aiida.common.constants import elements
def get_new_number(the_list, start_from):
"""
Get the first integer >= start_from not yet in the list
"""
retval = start_from
comp_list = sorted(_ for _ in the_list if _ >= start_from)
current_pos = 0
found = False
while not found:
if len(comp_list) <= current_pos:
return retval
if retval == comp_list[current_pos]:
current_pos += 1
retval += 1
else:
found = True
return retval
Z = {v['symbol']: k for k, v in elements.items()}
cell = np.array(aiida_structure.cell)
abs_pos = np.array([_.position for _ in aiida_structure.sites])
rel_pos = np.dot(abs_pos, np.linalg.inv(cell))
kinds = {k.name: k for k in aiida_structure.kinds}
kind_numbers = {}
for kind in aiida_structure.kinds:
if len(kind.symbols) == 1:
realnumber = Z[kind.symbols[0]]
if realnumber in list(kind_numbers.values()):
number = get_new_number(
list(kind_numbers.values()), start_from=realnumber * 1000)
else:
number = realnumber
kind_numbers[kind.name] = number
else:
number = get_new_number(
list(kind_numbers.values()), start_from=200000)
kind_numbers[kind.name] = number
numbers = [kind_numbers[s.kind_name] for s in aiida_structure.sites]
return ((cell, rel_pos, numbers), kind_numbers, list(aiida_structure.kinds))
def _tuple_to_aiida(structure_tuple, kind_info=None, kinds=None):
"""
Convert an tuple of the format
(cell, scaled_positions, element_numbers) to an AiiDA structure.
Unless the element_numbers are identical to the Z number of the atoms,
you should pass both kind_info and kinds, with the same format as returned
by get_tuple_from_aiida_structure.
.. deprecated:: 1.8
Use the methods in AiiDA instead.
:param structure_tuple: the structure in format (structure_tuple, kind_info)
:param kind_info: a dictionary mapping the kind_names to
the numbers used in element_numbers. If not provided, assumes {element_name: element_Z}
:param kinds: a list of the kinds of the structure.
"""
import warnings
warnings.warn(
'this method has been deprecated and moved to AiiDA, see {}'.format(
DEPRECATION_DOCS_URL), DeprecationWarning)
from aiida.common.constants import elements
from aiida.orm.data.structure import Kind, Site, StructureData
import numpy as np
import copy
if kind_info is None and kinds is not None:
raise ValueError("If you pass kind_info, you should also pass kinds")
if kinds is None and kind_info is not None:
raise ValueError("If you pass kinds, you should also pass kind_info")
Z = {v['symbol']: k for k, v in elements.items()}
cell, rel_pos, numbers = structure_tuple
if kind_info:
_kind_info = copy.copy(kind_info)
_kinds = copy.copy(kinds)
else:
try:
# For each site
symbols = [elements[num]['symbol'] for num in numbers]
except KeyError as e:
raise ValueError(
"You did not pass kind_info, but at least one number "
"is not a valid Z number: {}".format(e.message))
_kind_info = {elements[num]['symbol']: num for num in set(numbers)}
# Get the default kinds
_kinds = [Kind(symbols=sym) for sym in set(symbols)]
_kinds_dict = {k.name: k for k in _kinds}
# Now I will use in any case _kinds and _kind_info
if len(_kind_info.values()) != len(set(_kind_info.values())):
raise ValueError(
"There is at least a number repeated twice in kind_info!")
# Invert the mapping
mapping_num_kindname = {v: k for k, v in _kind_info.items()}
# Create the actual mapping
try:
mapping_to_kinds = {
num: _kinds_dict[kindname]
for num, kindname in mapping_num_kindname.items()
}
except KeyError as e:
raise ValueError("Unable to find '{}' in the kinds list".format(
e.message))
try:
site_kinds = [mapping_to_kinds[num] for num in numbers]
except KeyError as e:
raise ValueError(
"Unable to find kind in kind_info for number {}".format(e.message))
out_structure = StructureData(cell=cell)
for k in _kinds:
out_structure.append_kind(k)
abs_pos = np.dot(rel_pos, cell)
if len(abs_pos) != len(site_kinds):
raise ValueError(
"The length of the positions array is different from the "
"length of the element numbers")
for kind, pos in zip(site_kinds, abs_pos):
out_structure.append_site(Site(kind_name=kind.name, position=pos))
return out_structure
def get_explicit_k_path(structure,
with_time_reversal=True,
reference_distance=0.025,
recipe='hpkot',
threshold=1.e-7):
"""
Return the kpoint path for band structure (in scaled and absolute
coordinates), given a crystal structure,
using the paths proposed in the various publications (see description
of the 'recipe' input parameter). The parameters are the same
as get get_explicit_k_path in __init__, but here all structures are
input and returned as AiiDA structures rather than tuples, and similarly
k-points-related information as a AiiDA KpointsData class.
.. deprecated:: 1.8
Use the methods in AiiDA instead.
:param structure: The AiiDA StructureData for which we want to obtain
the suggested path.
:param with_time_reversal: if False, and the group has no inversion
symmetry, additional lines are returned.
:param reference_distance: a reference target distance between neighboring
k-points in the path, in units of 1/ang. The actual value will be as
close as possible to this value, to have an integer number of points in
each path.
:param recipe: choose the reference publication that defines the special
points and paths.
Currently, the following value is implemented:
'hpkot': HPKOT paper:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Band structure
diagram paths based on crystallography, Comp. Mat. Sci. 128, 140 (2017).
DOI: 10.1016/j.commatsci.2016.10.015
:param threshold: the threshold to use to verify if we are in
and edge case (e.g., a tetragonal cell, but a==c). For instance,
in the tI lattice, if abs(a-c) < threshold, a EdgeCaseWarning is issued.
Note that depending on the bravais lattice, the meaning of the
threshold is different (angle, length, ...)
:return: a dictionary with the following
keys:
- has_inversion_symmetry: True or False, depending on whether the
input crystal structure has inversion symmetry or not.
- augmented_path: if True, it means that the path was
augmented with the -k points (this happens if both
has_inversion_symmetry is False, and the user set
with_time_reversal=False in the input)
- primitive_structure: the StructureData for the primitive cell
- reciprocal_primitive_lattice: reciprocal-cell vectors for the
primitive cell (vectors are rows: reciprocal_primitive_lattice[0,:]
is the first vector)
- volume_original_wrt_prim: volume ratio of the user-provided cell
with respect to the the crystallographic primitive cell
- explicit_kpoints: An AiiDA KPointsData object (without weights)
with the kpoints and the respective labels.
For each segment, the two endpoints are always included,
independently of the length.
- explicit_kpoints_linearcoord: array of floats, giving the
coordinate at which to plot the corresponding point.
- segments: a list of length-2 tuples, with the start and end index
of each segment. **Note**! The indices are supposed to be used as
follows: the labels for the i-th segment are given by::
segment_indices = segments[i]
segment_points = explicit_kpoints.get_kpoints[slice(*segment_indices)]
This means, in particular, that if you want the label of the start
and end points, you should do::
start_point = explicit_kpoints.get_kpoints[segment_indices[0]]
stop_point = explicit_kpoints.get_kpoints[segment_indices[1]-1]
(note the minus one!)
Also, note that if segments[i-1][1] == segments[i][0] + 1 it means
that the point was calculated only once, and it belongs to both
paths. Instead, if segments[i-1][1] == segments[i][0], then
this is a 'break' point in the path (e.g., segments[i-1][1] is the
X point, and segments[i][0] is the R point, and typically in a
graphical representation they are shown at the same coordinate,
with a label "R|X").
"""
import warnings
warnings.warn(
'this method has been deprecated and moved to AiiDA, see {}'.format(
DEPRECATION_DOCS_URL), DeprecationWarning)
import copy
from aiida.orm import DataFactory
struc_tuple, kind_info, kinds = _aiida_to_tuple(structure)
retdict = _raw_explicit_path(struc_tuple)
# Replace primitive structure with AiiDA StructureData
primitive_lattice = retdict.pop('primitive_lattice')
primitive_positions = retdict.pop('primitive_positions')
primitive_types = retdict.pop('primitive_types')
primitive_tuple = (primitive_lattice, primitive_positions, primitive_types)
primitive_structure = _tuple_to_aiida(primitive_tuple, kind_info, kinds)
retdict['primitive_structure'] = primitive_structure
# Remove reciprocal_primitive_lattice, recalculated by kpoints class
retdict.pop('reciprocal_primitive_lattice')
KpointsData = DataFactory('array.kpoints')
kpoints_abs = retdict.pop('explicit_kpoints_abs')
kpoints_rel = retdict.pop('explicit_kpoints_rel')
kpoints_labels = retdict.pop('explicit_kpoints_labels')
# Expects something of the type [[0,'X'],[34,'L'],...]
# So I generate it, skipping empty labels
labels = [[idx, label] for idx, label in enumerate(kpoints_labels) if label]
kpoints = KpointsData()
kpoints.set_cell_from_structure(primitive_structure)
kpoints.set_kpoints(kpoints_abs, cartesian=True, labels=labels)
retdict['explicit_kpoints'] = kpoints
return retdict
def get_path(structure,
with_time_reversal=True,
reference_distance=0.025,
recipe='hpkot',
threshold=1.e-7):
"""
Return the kpoint path information for band structure given a
crystal structure, using the paths from the chosen recipe/reference.
The parameters are the same
as get get_path in __init__, but here all structures are
input and returned as AiiDA structures rather than tuples.
If you use this module, please cite the paper of the corresponding
recipe (see parameter below).
.. deprecated:: 1.8
Use the methods in AiiDA instead.
:param structure: The crystal structure for which we want to obtain
the suggested path. It should be an AiiDA StructureData object.
:param with_time_reversal: if False, and the group has no inversion
symmetry, additional lines are returned as described in the HPKOT
paper.
:param recipe: choose the reference publication that defines the special
points and paths.
Currently, the following value is implemented:
'hpkot': HPKOT paper:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Band structure
diagram paths based on crystallography, Comp. Mat. Sci. 128, 140 (2017).
DOI: 10.1016/j.commatsci.2016.10.015
:param threshold: the threshold to use to verify if we are in
and edge case (e.g., a tetragonal cell, but a==c). For instance,
in the tI lattice, if abs(a-c) < threshold, a EdgeCaseWarning is issued.
Note that depending on the bravais lattice, the meaning of the
threshold is different (angle, length, ...)
:return: a dictionary with the following
keys:
- point_coords: a dictionary with label -> float coordinates
- path: a list of length-2 tuples, with the labels of the starting
and ending point of each label section
- has_inversion_symmetry: True or False, depending on whether the
input crystal structure has inversion symmetry or not.
- augmented_path: if True, it means that the path was
augmented with the -k points (this happens if both
has_inversion_symmetry is False, and the user set
with_time_reversal=False in the input)
- bravais_lattice: the Bravais lattice string (like 'cP', 'tI', ...)
- bravais_lattice_extended: the specific case used to define labels and
coordinates (like 'cP1', 'tI2', ...)
- conv_structure: AiiDA StructureData for the crystallographic conventional
cell
- primitive_structure: AiiDA StructureData for the crystallographic primitive
cell
- reciprocal_primitive_lattice: reciprocal-cell vectors for the
primitive cell (vectors are rows: reciprocal_primitive_lattice[0,:]
is the first vector)
- primitive_transformation_matrix: the transformation matrix P between
the conventional and the primitive cell
- inverse_primitive_transformation_matrix: the inverse of the matrix P
(the determinant is integer and gives the ratio in volume between
the conventional and primitive cells)
- volume_original_wrt_conv: volume ratio of the user-provided cell
with respect to the the crystallographic conventional cell
- volume_original_wrt_prim: volume ratio of the user-provided cell
with respect to the the crystallographic primitive cell
:note: An EdgeCaseWarning is issued for edge cases (e.g. if a==b==c for
orthorhombic systems). In this case, still one of the valid cases
is picked.
"""
import warnings
warnings.warn(
'this method has been deprecated and moved to AiiDA, see {}'.format(
DEPRECATION_DOCS_URL), DeprecationWarning)
import copy
from aiida.orm import DataFactory
struc_tuple, kind_info, kinds = _aiida_to_tuple(structure)
retdict = _raw_get_path(struc_tuple)
# Replace conv structure with AiiDA StructureData
conv_lattice = retdict.pop('conv_lattice')
conv_positions = retdict.pop('conv_positions')
conv_types = retdict.pop('conv_types')
conv_tuple = (conv_lattice, conv_positions, conv_types)
conv_structure = _tuple_to_aiida(conv_tuple, kind_info, kinds)
retdict['conv_structure'] = conv_structure
# Replace primitive structure with AiiDA StructureData
primitive_lattice = retdict.pop('primitive_lattice')
primitive_positions = retdict.pop('primitive_positions')
primitive_types = retdict.pop('primitive_types')
primitive_tuple = (primitive_lattice, primitive_positions, primitive_types)
primitive_structure = _tuple_to_aiida(primitive_tuple, kind_info, kinds)
retdict['primitive_structure'] = primitive_structure
return retdict
| [
"aiida.orm.data.structure.Kind",
"aiida.orm.data.structure.Site",
"builtins.zip",
"numpy.array",
"numpy.dot",
"aiida.orm.DataFactory",
"aiida.orm.data.structure.StructureData",
"numpy.linalg.inv",
"aiida.common.constants.elements.items",
"copy.copy"
] | [((1763, 1793), 'numpy.array', 'np.array', (['aiida_structure.cell'], {}), '(aiida_structure.cell)\n', (1771, 1793), True, 'import numpy as np\n'), ((1808, 1861), 'numpy.array', 'np.array', (['[_.position for _ in aiida_structure.sites]'], {}), '([_.position for _ in aiida_structure.sites])\n', (1816, 1861), True, 'import numpy as np\n'), ((5586, 5610), 'aiida.orm.data.structure.StructureData', 'StructureData', ([], {'cell': 'cell'}), '(cell=cell)\n', (5599, 5610), False, 'from aiida.orm.data.structure import Kind, Site, StructureData\n'), ((5683, 5704), 'numpy.dot', 'np.dot', (['rel_pos', 'cell'], {}), '(rel_pos, cell)\n', (5689, 5704), True, 'import numpy as np\n'), ((5909, 5933), 'builtins.zip', 'zip', (['site_kinds', 'abs_pos'], {}), '(site_kinds, abs_pos)\n', (5912, 5933), False, 'from builtins import zip\n'), ((11258, 11286), 'aiida.orm.DataFactory', 'DataFactory', (['"""array.kpoints"""'], {}), "('array.kpoints')\n", (11269, 11286), False, 'from aiida.orm import DataFactory\n'), ((1892, 1911), 'numpy.linalg.inv', 'np.linalg.inv', (['cell'], {}), '(cell)\n', (1905, 1911), True, 'import numpy as np\n'), ((4161, 4181), 'copy.copy', 'copy.copy', (['kind_info'], {}), '(kind_info)\n', (4170, 4181), False, 'import copy\n'), ((4199, 4215), 'copy.copy', 'copy.copy', (['kinds'], {}), '(kinds)\n', (4208, 4215), False, 'import copy\n'), ((1733, 1749), 'aiida.common.constants.elements.items', 'elements.items', ([], {}), '()\n', (1747, 1749), False, 'from aiida.common.constants import elements\n'), ((4059, 4075), 'aiida.common.constants.elements.items', 'elements.items', ([], {}), '()\n', (4073, 4075), False, 'from aiida.common.constants import elements\n'), ((4657, 4674), 'aiida.orm.data.structure.Kind', 'Kind', ([], {'symbols': 'sym'}), '(symbols=sym)\n', (4661, 4674), False, 'from aiida.orm.data.structure import Kind, Site, StructureData\n'), ((5969, 6008), 'aiida.orm.data.structure.Site', 'Site', ([], {'kind_name': 'kind.name', 'position': 'pos'}), '(kind_name=kind.name, position=pos)\n', (5973, 6008), False, 'from aiida.orm.data.structure import Kind, Site, StructureData\n')] |
from .myqt import QT
import pyqtgraph as pg
import numpy as np
from .base import WidgetBase
from .baselist import ClusterBaseList
from .peelercontroller import spike_visible_modes
from .tools import ParamDialog
from .. import labelcodes
class SpikeModel(QT.QAbstractItemModel):
def __init__(self, parent =None, controller=None):
QT.QAbstractItemModel.__init__(self,parent)
self.controller = controller
self.refresh_colors()
def columnCount(self , parentIndex):
return 6
def rowCount(self, parentIndex):
#~ if not parentIndex.isValid() and self.cc.peak_label is not None:
if not parentIndex.isValid():
self.visible_ind, = np.nonzero(self.controller.spikes['visible'])
return self.visible_ind.size
else :
return 0
def index(self, row, column, parentIndex):
if not parentIndex.isValid():
if column==0:
childItem = row
return self.createIndex(row, column, None)
else:
return QT.QModelIndex()
def parent(self, index):
return QT.QModelIndex()
def data(self, index, role):
if not index.isValid():
return None
if role not in (QT.Qt.DisplayRole, QT.Qt.DecorationRole):
return
col = index.column()
row = index.row()
#~ t_start = 0.
abs_ind = self.visible_ind[row]
spike = self.controller.spikes[abs_ind]
spike_time = (spike['index']+ spike['jitter'])/self.controller.dataio.sample_rate
if role ==QT.Qt.DisplayRole :
if col == 0:
return '{}'.format(abs_ind)
elif col == 1:
return '{}'.format(spike['segment'])
elif col == 2:
return '{}'.format(spike['index'])
elif col == 3:
return '{:.2f}'.format(spike['jitter'])
elif col == 4:
return '{:.4f}'.format(spike_time)
elif col == 5:
return '{}'.format(spike['cluster_label'])
else:
return None
elif role == QT.Qt.DecorationRole :
if col != 0: return None
if spike['cluster_label'] in self.icons:
return self.icons[spike['cluster_label']]
else:
return None
else :
return None
def flags(self, index):
if not index.isValid():
return QT.Qt.NoItemFlags
return QT.Qt.ItemIsEnabled | QT.Qt.ItemIsSelectable #| Qt.ItemIsDragEnabled
def headerData(self, section, orientation, role):
if orientation == QT.Qt.Horizontal and role == QT.Qt.DisplayRole:
return ['num', 'seg_num', 'index', 'jitter', 'time', 'cluster_label'][section]
return
def refresh_colors(self):
self.icons = { }
for k, color in self.controller.qcolors.items():
pix = QT.QPixmap(10,10 )
pix.fill(color)
self.icons[k] = QT.QIcon(pix)
#~ self.icons[-1] = QIcon(':/user-trash.png')
#~ self.layoutChanged.emit()
self.refresh()
def refresh(self):
self.layoutChanged.emit()
class SpikeList(WidgetBase):
def __init__(self,controller=None, parent=None):
WidgetBase.__init__(self, parent=parent, controller=controller)
self.controller = controller
self.layout = QT.QVBoxLayout()
self.setLayout(self.layout)
self.layout.addWidget(QT.QLabel('<b>All spikes</b>') )
self.combo = QT.QComboBox()
self.layout.addWidget(self.combo)
self.combo.addItems(spike_visible_modes)
self.combo.currentTextChanged.connect(self.change_visible_mode)
self.tree = QT.QTreeView(minimumWidth = 100, uniformRowHeights = True,
selectionMode= QT.QAbstractItemView.ExtendedSelection, selectionBehavior = QT.QTreeView.SelectRows,
contextMenuPolicy = QT.Qt.CustomContextMenu,)
self.layout.addWidget(self.tree)
#~ self.tree.customContextMenuRequested.connect(self.open_context_menu)
self.model = SpikeModel(controller=self.controller)
self.tree.setModel(self.model)
self.tree.selectionModel().selectionChanged.connect(self.on_tree_selection)
for i in range(self.model.columnCount(None)):
self.tree.resizeColumnToContents(i)
self.tree.setColumnWidth(0,80)
def refresh(self):
self.model.refresh_colors()
def on_tree_selection(self):
self.controller.spikes['selected'][:] = False
for index in self.tree.selectedIndexes():
if index.column() == 0:
ind = self.model.visible_ind[index.row()]
self.controller.spikes['selected'][ind] = True
self.spike_selection_changed.emit()
def on_spike_selection_changed(self):
self.tree.selectionModel().selectionChanged.disconnect(self.on_tree_selection)
row_selected, = np.nonzero(self.controller.spikes['selected'][self.model.visible_ind])
if row_selected.size>100:#otherwise this is verry slow
row_selected = row_selected[:10]
# change selection
self.tree.selectionModel().clearSelection()
flags = QT.QItemSelectionModel.Select #| QItemSelectionModel.Rows
itemsSelection = QT.QItemSelection()
for r in row_selected:
for c in range(2):
index = self.tree.model().index(r,c,QT.QModelIndex())
ir = QT.QItemSelectionRange( index )
itemsSelection.append(ir)
self.tree.selectionModel().select(itemsSelection , flags)
# set selection visible
if len(row_selected)>=1:
index = self.tree.model().index(row_selected[0],0,QT.QModelIndex())
self.tree.scrollTo(index)
self.tree.selectionModel().selectionChanged.connect(self.on_tree_selection)
def change_visible_mode(self, mode):
self.controller.change_spike_visible_mode(mode)
self.cluster_visibility_changed.emit()
self.model.refresh()
def open_context_menu(self):
pass
class ClusterSpikeList(ClusterBaseList):
_special_label = [labelcodes.LABEL_UNCLASSIFIED]
def make_menu(self):
self.menu = QT.QMenu()
act = self.menu.addAction('Show all')
act.triggered.connect(self.show_all)
act = self.menu.addAction('Hide all')
act.triggered.connect(self.hide_all)
| [
"numpy.nonzero"
] | [((5180, 5250), 'numpy.nonzero', 'np.nonzero', (["self.controller.spikes['selected'][self.model.visible_ind]"], {}), "(self.controller.spikes['selected'][self.model.visible_ind])\n", (5190, 5250), True, 'import numpy as np\n'), ((708, 753), 'numpy.nonzero', 'np.nonzero', (["self.controller.spikes['visible']"], {}), "(self.controller.spikes['visible'])\n", (718, 753), True, 'import numpy as np\n')] |
"""Tests for Argo checks."""
import numpy as np
from numpy import ma
import pytest
import argortqcpy.profile
from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck
def test_check_is_required(fake_check):
"""Check that the base check is required."""
assert fake_check.is_required()
def test_output_ensure_output_for_property(profile_from_dataset):
"""Test ensuring a property is given an output array."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property(profile_from_dataset):
"""Test setting a flag for a given property."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.GOOD)
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property_where(profile_from_dataset):
"""Test setting a flag for a given property for a limited set of indices."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.PROBABLY_GOOD, where=slice(None, 2))
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[:2] == ArgoQcFlag.PROBABLY_GOOD.value)
assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)
def test_output_set_output_flag_for_property_where_array(profile_from_dataset):
"""Test setting a flag for a given property for indices limited by array."""
output = CheckOutput(profile=profile_from_dataset)
where = np.full_like(profile_from_dataset.get_property_data("PRES"), False, dtype=bool)
where[0] = True
where[-1] = True
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", ArgoQcFlag.PROBABLY_GOOD, where=where)
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[0] == ArgoQcFlag.PROBABLY_GOOD.value)
assert np.all(flags[1:-1] == ArgoQcFlag.GOOD.value)
assert np.all(flags[-1] == ArgoQcFlag.PROBABLY_GOOD.value)
@pytest.mark.parametrize(
"lower,higher",
(
(ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.BAD),
(ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.PROBABLY_BAD),
(ArgoQcFlag.PROBABLY_BAD, ArgoQcFlag.BAD),
),
)
def test_output_set_output_flag_for_property_with_precendence(profile_from_dataset, lower, higher):
"""Test setting a flag for a given property for a limited set of indices."""
output = CheckOutput(profile=profile_from_dataset)
output.ensure_output_for_property("PRES")
output.set_output_flag_for_property("PRES", lower, where=slice(None, 2))
output.set_output_flag_for_property("PRES", higher, where=slice(None, 1))
output.set_output_flag_for_property("PRES", lower, where=slice(None, 2))
flags = output.get_output_flags_for_property("PRES")
assert flags is not None
assert isinstance(flags, ma.MaskedArray)
assert np.all(flags[:1] == higher.value)
assert np.all(flags[1:2] == lower.value)
assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)
@pytest.mark.parametrize(
"pressure_values",
(
range(10),
[1, 3, 5, 10, 100],
[0, 2, 2.5, 6.85],
),
)
def test_pressure_increasing_check_all_pass(mocker, pressure_values):
"""Test that the pressure increasing test succeeds."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))
pic = PressureIncreasingCheck(profile, None)
output = pic.run()
assert np.all(output.get_output_flags_for_property("PRES").data == ArgoQcFlag.GOOD.value)
@pytest.mark.parametrize(
"pressure_values,expected",
(
(
[0, 2, 1, 5],
[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.value],
),
),
)
def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):
"""Test that the pressure increasing works when some values are bad."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))
pic = PressureIncreasingCheck(profile, None)
output = pic.run()
assert np.all(output.get_output_flags_for_property("PRES").data == expected)
@pytest.mark.parametrize(
"pressure_values,expected",
(
(
[0] * 4,
[ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value],
),
(
[0, 1, 1, 2],
[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.value],
),
),
)
def test_pressure_increasing_check_some_constants(mocker, pressure_values, expected):
"""Test that the pressure increasing works when some values are constant."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))
pic = PressureIncreasingCheck(profile, None)
output = pic.run()
assert np.all(output.get_output_flags_for_property("PRES").data == expected)
@pytest.mark.parametrize(
"pressure_values,expected",
(
(
[0, 1, 2, 1, 1.5, 3, 5],
[
ArgoQcFlag.GOOD.value,
ArgoQcFlag.GOOD.value,
ArgoQcFlag.GOOD.value,
ArgoQcFlag.BAD.value,
ArgoQcFlag.BAD.value,
ArgoQcFlag.GOOD.value,
ArgoQcFlag.GOOD.value,
],
),
(
[
[0, 1, 2, 3],
[0, 1, 0, 1],
],
[
[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value],
[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value],
],
),
),
)
def test_pressure_increasing_check_some_decreasing(mocker, pressure_values, expected):
"""Test that the pressure increasing works when some values are decreasing."""
profile = mocker.patch.object(argortqcpy.profile, "Profile")
profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))
pic = PressureIncreasingCheck(profile, None)
output = pic.run()
assert np.all(output.get_output_flags_for_property("PRES").data == expected)
| [
"argortqcpy.checks.CheckOutput",
"pytest.mark.parametrize",
"argortqcpy.checks.PressureIncreasingCheck",
"numpy.ma.masked_array",
"numpy.all"
] | [((2607, 2797), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lower,higher"""', '((ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.BAD), (ArgoQcFlag.PROBABLY_GOOD,\n ArgoQcFlag.PROBABLY_BAD), (ArgoQcFlag.PROBABLY_BAD, ArgoQcFlag.BAD))'], {}), "('lower,higher', ((ArgoQcFlag.PROBABLY_GOOD,\n ArgoQcFlag.BAD), (ArgoQcFlag.PROBABLY_GOOD, ArgoQcFlag.PROBABLY_BAD), (\n ArgoQcFlag.PROBABLY_BAD, ArgoQcFlag.BAD)))\n", (2630, 2797), False, 'import pytest\n'), ((4218, 4390), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pressure_values,expected"""', '(([0, 2, 1, 5], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.\n BAD.value, ArgoQcFlag.GOOD.value]),)'], {}), "('pressure_values,expected', (([0, 2, 1, 5], [\n ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value,\n ArgoQcFlag.GOOD.value]),))\n", (4241, 4390), False, 'import pytest\n'), ((4912, 5190), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pressure_values,expected"""', '(([0] * 4, [ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.\n value, ArgoQcFlag.BAD.value]), ([0, 1, 1, 2], [ArgoQcFlag.GOOD.value,\n ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.value]))'], {}), "('pressure_values,expected', (([0] * 4, [ArgoQcFlag.\n GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.\n value]), ([0, 1, 1, 2], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value,\n ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.value])))\n", (4935, 5190), False, 'import pytest\n'), ((5762, 6251), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pressure_values,expected"""', '(([0, 1, 2, 1, 1.5, 3, 5], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value,\n ArgoQcFlag.GOOD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value,\n ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value]), ([[0, 1, 2, 3], [0, 1, \n 0, 1]], [[ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD\n .value, ArgoQcFlag.GOOD.value], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD\n .value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value]]))'], {}), "('pressure_values,expected', (([0, 1, 2, 1, 1.5, 3, \n 5], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.\n value, ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value, ArgoQcFlag.GOOD.\n value, ArgoQcFlag.GOOD.value]), ([[0, 1, 2, 3], [0, 1, 0, 1]], [[\n ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value,\n ArgoQcFlag.GOOD.value], [ArgoQcFlag.GOOD.value, ArgoQcFlag.GOOD.value,\n ArgoQcFlag.BAD.value, ArgoQcFlag.BAD.value]])))\n", (5785, 6251), False, 'import pytest\n'), ((459, 500), 'argortqcpy.checks.CheckOutput', 'CheckOutput', ([], {'profile': 'profile_from_dataset'}), '(profile=profile_from_dataset)\n', (470, 500), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((691, 729), 'numpy.all', 'np.all', (['(flags == ArgoQcFlag.GOOD.value)'], {}), '(flags == ArgoQcFlag.GOOD.value)\n', (697, 729), True, 'import numpy as np\n'), ((865, 906), 'argortqcpy.checks.CheckOutput', 'CheckOutput', ([], {'profile': 'profile_from_dataset'}), '(profile=profile_from_dataset)\n', (876, 906), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((1162, 1200), 'numpy.all', 'np.all', (['(flags == ArgoQcFlag.GOOD.value)'], {}), '(flags == ArgoQcFlag.GOOD.value)\n', (1168, 1200), True, 'import numpy as np\n'), ((1371, 1412), 'argortqcpy.checks.CheckOutput', 'CheckOutput', ([], {'profile': 'profile_from_dataset'}), '(profile=profile_from_dataset)\n', (1382, 1412), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((1699, 1750), 'numpy.all', 'np.all', (['(flags[:2] == ArgoQcFlag.PROBABLY_GOOD.value)'], {}), '(flags[:2] == ArgoQcFlag.PROBABLY_GOOD.value)\n', (1705, 1750), True, 'import numpy as np\n'), ((1762, 1804), 'numpy.all', 'np.all', (['(flags[2:] == ArgoQcFlag.GOOD.value)'], {}), '(flags[2:] == ArgoQcFlag.GOOD.value)\n', (1768, 1804), True, 'import numpy as np\n'), ((1981, 2022), 'argortqcpy.checks.CheckOutput', 'CheckOutput', ([], {'profile': 'profile_from_dataset'}), '(profile=profile_from_dataset)\n', (1992, 2022), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((2434, 2484), 'numpy.all', 'np.all', (['(flags[0] == ArgoQcFlag.PROBABLY_GOOD.value)'], {}), '(flags[0] == ArgoQcFlag.PROBABLY_GOOD.value)\n', (2440, 2484), True, 'import numpy as np\n'), ((2496, 2540), 'numpy.all', 'np.all', (['(flags[1:-1] == ArgoQcFlag.GOOD.value)'], {}), '(flags[1:-1] == ArgoQcFlag.GOOD.value)\n', (2502, 2540), True, 'import numpy as np\n'), ((2552, 2603), 'numpy.all', 'np.all', (['(flags[-1] == ArgoQcFlag.PROBABLY_GOOD.value)'], {}), '(flags[-1] == ArgoQcFlag.PROBABLY_GOOD.value)\n', (2558, 2603), True, 'import numpy as np\n'), ((3025, 3066), 'argortqcpy.checks.CheckOutput', 'CheckOutput', ([], {'profile': 'profile_from_dataset'}), '(profile=profile_from_dataset)\n', (3036, 3066), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((3489, 3522), 'numpy.all', 'np.all', (['(flags[:1] == higher.value)'], {}), '(flags[:1] == higher.value)\n', (3495, 3522), True, 'import numpy as np\n'), ((3534, 3567), 'numpy.all', 'np.all', (['(flags[1:2] == lower.value)'], {}), '(flags[1:2] == lower.value)\n', (3540, 3567), True, 'import numpy as np\n'), ((3579, 3621), 'numpy.all', 'np.all', (['(flags[2:] == ArgoQcFlag.GOOD.value)'], {}), '(flags[2:] == ArgoQcFlag.GOOD.value)\n', (3585, 3621), True, 'import numpy as np\n'), ((4058, 4096), 'argortqcpy.checks.PressureIncreasingCheck', 'PressureIncreasingCheck', (['profile', 'None'], {}), '(profile, None)\n', (4081, 4096), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((4765, 4803), 'argortqcpy.checks.PressureIncreasingCheck', 'PressureIncreasingCheck', (['profile', 'None'], {}), '(profile, None)\n', (4788, 4803), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((5615, 5653), 'argortqcpy.checks.PressureIncreasingCheck', 'PressureIncreasingCheck', (['profile', 'None'], {}), '(profile, None)\n', (5638, 5653), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((6886, 6924), 'argortqcpy.checks.PressureIncreasingCheck', 'PressureIncreasingCheck', (['profile', 'None'], {}), '(profile, None)\n', (6909, 6924), False, 'from argortqcpy.checks import ArgoQcFlag, CheckOutput, PressureIncreasingCheck\n'), ((4013, 4045), 'numpy.ma.masked_array', 'ma.masked_array', (['pressure_values'], {}), '(pressure_values)\n', (4028, 4045), False, 'from numpy import ma\n'), ((4720, 4752), 'numpy.ma.masked_array', 'ma.masked_array', (['pressure_values'], {}), '(pressure_values)\n', (4735, 4752), False, 'from numpy import ma\n'), ((5570, 5602), 'numpy.ma.masked_array', 'ma.masked_array', (['pressure_values'], {}), '(pressure_values)\n', (5585, 5602), False, 'from numpy import ma\n'), ((6841, 6873), 'numpy.ma.masked_array', 'ma.masked_array', (['pressure_values'], {}), '(pressure_values)\n', (6856, 6873), False, 'from numpy import ma\n')] |
import pickle
from pathlib import Path
from typing import Iterable, List, Optional, Tuple, Union
import cv2
import numpy as np
from lanedetection.image import LaneImage
class CameraSettings(object):
def __init__(self,
cam_matrix: Optional[np.ndarray] = None,
distortion_coefs: Optional[np.ndarray] = None,
image_dims: Optional[np.ndarray] = None
):
super().__init__()
self.cam_matrix = cam_matrix
self.distortion_coefs = distortion_coefs
self.image_dims = image_dims
def calibrate(
self,
image_paths: Iterable[Path],
chessboard_dims: Tuple[int, int],
camera_image_dimensions: Optional[Tuple[int, int]] = None
) -> None:
# Generate object_points and image_points
obj_points, img_points, img_dims = self._generate_obj_and_image_points(
image_paths=image_paths,
chessboard_dims=chessboard_dims
)
if not camera_image_dimensions:
camera_image_dimensions = img_dims
if not self.image_dims:
self.image_dims = camera_image_dimensions
camera_matrix, distortion_coefs = self._calculate_camera_matrix(
obj_points=obj_points,
img_points=img_points
)
self.cam_matrix = camera_matrix
self.distortion_coefs = distortion_coefs
@staticmethod
def _gen_obj_points_array(chessboard_dims: Tuple[int, int]) -> np.ndarray:
"""
Generate the object points for the chessboard dimensions given
Args:
chessboard_dims (Tuple[int, int]): The dimensions in the chessboard images used for calibration that is
expected by OpenCV - namely the number of inside corners for (x, y)
Returns:
np.ndarray with (x * y) entries
"""
num_x, num_y = chessboard_dims
obj_points = np.zeros((num_x * num_y, 3), np.float32)
obj_points[:, :2] = np.mgrid[0:num_x, 0:num_y].T.reshape(-1, 2)
return obj_points
def _generate_obj_and_image_points(
self,
image_paths: Iterable[Path],
chessboard_dims: Tuple[int, int]
) -> Tuple[List[np.ndarray], List[np.ndarray], Tuple[int, int]]:
"""
Generate the object points and image points lists from a directory of calibration images
Args:
image_paths: List of Path variable
chessboard_dims (Tuple[int, int]): The dimensions in the chessboard images used for calibration that is
expected by OpenCV - namely the number of inside corners for (x, y)
Returns:
object_points (List[np.ndarray]): The list of object points
image_points (List[np.ndarray]): The list of detected chessboard coordinates
image_dimensions (Tuple[int, int]): The dimensions of the last image
"""
op_array = self._gen_obj_points_array(chessboard_dims=chessboard_dims)
obj_points = []
img_points = []
for path in image_paths:
image = LaneImage(image_path=path)
are_corners_found, corner_coords = cv2.findChessboardCorners(
image=image.apply_colorspace('gray'),
patternSize=chessboard_dims
)
if are_corners_found:
obj_points.append(op_array)
img_points.append(corner_coords)
image_dims = image.size
return obj_points, img_points, image_dims
def _calculate_camera_matrix(
self,
obj_points: List[np.ndarray],
img_points: List[np.ndarray]
) -> Tuple[np.ndarray, np.ndarray]:
camera_matrix = cv2.initCameraMatrix2D(
objectPoints=obj_points,
imagePoints=img_points,
imageSize=self.image_dims
)
ret_val, camera_matrix, distortion_coefs, _, _ = cv2.calibrateCamera(
objectPoints=obj_points,
imageSize=self.image_dims,
imagePoints=img_points,
cameraMatrix=camera_matrix,
distCoeffs=np.zeros((3, 3))
)
return camera_matrix, distortion_coefs
def save(self, save_path: Union[Path, str]) -> None:
if isinstance(save_path, str):
save_path = Path(save_path)
save_path.write_bytes(pickle.dumps(self.__dict__))
@classmethod
def load(cls, file_path: Union[Path, str]) -> 'CameraSettings':
if isinstance(file_path, str):
file_path = Path(file_path)
obj_dict = pickle.loads(file_path.read_bytes())
return cls(** obj_dict)
| [
"pathlib.Path",
"pickle.dumps",
"numpy.zeros",
"cv2.initCameraMatrix2D",
"lanedetection.image.LaneImage"
] | [((1954, 1994), 'numpy.zeros', 'np.zeros', (['(num_x * num_y, 3)', 'np.float32'], {}), '((num_x * num_y, 3), np.float32)\n', (1962, 1994), True, 'import numpy as np\n'), ((3759, 3861), 'cv2.initCameraMatrix2D', 'cv2.initCameraMatrix2D', ([], {'objectPoints': 'obj_points', 'imagePoints': 'img_points', 'imageSize': 'self.image_dims'}), '(objectPoints=obj_points, imagePoints=img_points,\n imageSize=self.image_dims)\n', (3781, 3861), False, 'import cv2\n'), ((3129, 3155), 'lanedetection.image.LaneImage', 'LaneImage', ([], {'image_path': 'path'}), '(image_path=path)\n', (3138, 3155), False, 'from lanedetection.image import LaneImage\n'), ((4354, 4369), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (4358, 4369), False, 'from pathlib import Path\n'), ((4401, 4428), 'pickle.dumps', 'pickle.dumps', (['self.__dict__'], {}), '(self.__dict__)\n', (4413, 4428), False, 'import pickle\n'), ((4579, 4594), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (4583, 4594), False, 'from pathlib import Path\n'), ((4158, 4174), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4166, 4174), True, 'import numpy as np\n')] |
import numpy as np
from psyneulink.components.functions.function import Linear, Logistic
from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.components.process import Process
from psyneulink.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.components.system import System
from psyneulink.globals.keywords import FULL_CONNECTIVITY_MATRIX, LEARNING, LEARNING_PROJECTION
from psyneulink.globals.preferences.componentpreferenceset import REPORT_OUTPUT_PREF, VERBOSE_PREF
from psyneulink.library.mechanisms.processing.objective.comparatormechanism import MSE
class TestStroop:
def test_stroop_model(self):
process_prefs = {
REPORT_OUTPUT_PREF: True,
VERBOSE_PREF: False
}
# system_prefs = {
# REPORT_OUTPUT_PREF: True,
# VERBOSE_PREF: False
# }
colors = TransferMechanism(
size=2,
function=Linear,
name="Colors",
)
words = TransferMechanism(
default_variable=[0, 0],
size=2,
function=Linear,
name="Words",
)
response = TransferMechanism(
default_variable=[0, 0],
function=Logistic,
name="Response",
)
color_naming_process = Process(
default_variable=[1, 2.5],
pathway=[colors, FULL_CONNECTIVITY_MATRIX, response],
learning=LEARNING_PROJECTION,
target=[0, 1],
name='Color Naming',
prefs=process_prefs,
)
word_reading_process = Process(
default_variable=[.5, 3],
pathway=[words, FULL_CONNECTIVITY_MATRIX, response],
name='Word Reading',
learning=LEARNING_PROJECTION,
target=[1, 0],
prefs=process_prefs,
)
# s = System(
# processes=[color_naming_process, word_reading_process],
# name='Stroop Model',
# targets=[0, 0],
# prefs=system_prefs,
# )
# stim_dict = {
# colors: [
# [1,0],
# [0,1]
# ],
# words: [
# [0,1],
# [1,0]
# ]
# }
# target_dict = {
# response: [
# [1,0],
# [0,1]
# ]
# }
# results = s.run(
# num_trials=10,
# inputs=stim_dict,
# targets=target_dict,
# )
expected_color_results = [
np.array([0.88079708, 0.88079708]),
np.array([0.85997037, 0.88340023]),
np.array([0.83312329, 0.88585176]),
np.array([0.79839127, 0.88816536]),
np.array([0.75384913, 0.89035312]),
np.array([0.69835531, 0.89242571]),
np.array([0.63303376, 0.89439259]),
np.array([0.56245802, 0.8962622 ]),
np.array([0.49357614, 0.89804208]),
np.array([0.43230715, 0.89973899]),
]
expected_word_results = [
np.array([0.88079708, 0.88079708]),
np.array([0.88340023, 0.85997037]),
np.array([0.88585176, 0.83312329]),
np.array([0.88816536, 0.79839127]),
np.array([0.89035312, 0.75384913]),
np.array([0.89242571, 0.69835531]),
np.array([0.89439259, 0.63303376]),
np.array([0.8962622, 0.56245802]),
np.array([0.89804208, 0.49357614]),
np.array([0.89973899, 0.43230715]),
]
for i in range(10):
cr = color_naming_process.execute(input=[1, 1], target=[0, 1])
wr = word_reading_process.execute(input=[1, 1], target=[1, 0])
np.testing.assert_allclose(cr, expected_color_results[i], atol=1e-08, err_msg='Failed on expected_color_results[{0}]'.format(i))
np.testing.assert_allclose(wr, expected_word_results[i], atol=1e-08, err_msg='Failed on expected_word_results[{0}]'.format(i))
def test_stroop_model_learning(self):
process_prefs = {
REPORT_OUTPUT_PREF: True,
VERBOSE_PREF: False,
}
system_prefs = {
REPORT_OUTPUT_PREF: True,
VERBOSE_PREF: False,
}
colors = TransferMechanism(
default_variable=[0, 0],
function=Linear,
name="Colors",
)
words = TransferMechanism(
default_variable=[0, 0],
function=Linear,
name="Words",
)
hidden = TransferMechanism(
default_variable=[0, 0],
function=Logistic,
name="Hidden",
)
response = TransferMechanism(
default_variable=[0, 0],
function=Logistic(),
name="Response",
)
TransferMechanism(
default_variable=[0, 0],
function=Logistic,
name="Output",
)
CH_Weights_matrix = np.arange(4).reshape((2, 2))
WH_Weights_matrix = np.arange(4).reshape((2, 2))
HO_Weights_matrix = np.arange(4).reshape((2, 2))
CH_Weights = MappingProjection(
name='Color-Hidden Weights',
matrix=CH_Weights_matrix,
)
WH_Weights = MappingProjection(
name='Word-Hidden Weights',
matrix=WH_Weights_matrix,
)
HO_Weights = MappingProjection(
name='Hidden-Output Weights',
matrix=HO_Weights_matrix,
)
color_naming_process = Process(
default_variable=[1, 2.5],
pathway=[colors, CH_Weights, hidden, HO_Weights, response],
learning=LEARNING,
target=[2, 2],
name='Color Naming',
prefs=process_prefs,
)
word_reading_process = Process(
default_variable=[.5, 3],
pathway=[words, WH_Weights, hidden],
name='Word Reading',
learning=LEARNING,
target=[3, 3],
prefs=process_prefs,
)
s = System(
processes=[color_naming_process, word_reading_process],
targets=[20, 20],
name='Stroop Model',
prefs=system_prefs,
)
def show_target():
print('\nColor Naming\n\tInput: {}\n\tTarget: {}'.format(colors.input_states.values_as_lists, s.targets))
print('Wording Reading:\n\tInput: {}\n\tTarget: {}\n'.format(words.input_states.values_as_lists, s.targets))
print('Response: \n', response.output_values[0])
print('Hidden-Output:')
print(HO_Weights.mod_matrix)
print('Color-Hidden:')
print(CH_Weights.mod_matrix)
print('Word-Hidden:')
print(WH_Weights.mod_matrix)
stim_list_dict = {
colors: [[1, 1]],
words: [[-2, -2]]
}
target_list_dict = {response: [[1, 1]]}
results = s.run(
num_trials=2,
inputs=stim_list_dict,
targets=target_list_dict,
call_after_trial=show_target,
)
results_list = []
for elem in s.results:
for nested_elem in elem:
nested_elem = nested_elem.tolist()
try:
iter(nested_elem)
except TypeError:
nested_elem = [nested_elem]
results_list.extend(nested_elem)
objective_response = s.mechanisms[3]
objective_hidden = s.mechanisms[7]
from pprint import pprint
pprint(CH_Weights.__dict__)
print(CH_Weights._parameter_states["matrix"].value)
print(CH_Weights.mod_matrix)
expected_output = [
(colors.output_states[0].value, np.array([1., 1.])),
(words.output_states[0].value, np.array([-2., -2.])),
(hidden.output_states[0].value, np.array([0.13227553, 0.01990677])),
(response.output_states[0].value, np.array([0.51044657, 0.5483048])),
(objective_response.output_states[0].value, np.array([0.48955343, 0.4516952])),
(objective_response.output_states[MSE].value, np.array(0.22184555903789838)),
(objective_hidden.output_states[0].value, np.array([0., 0.])),
(CH_Weights.mod_matrix, np.array([
[ 0.02512045, 1.02167245],
[ 2.02512045, 3.02167245],
])),
(WH_Weights.mod_matrix, np.array([
[-0.05024091, 0.9566551 ],
[ 1.94975909, 2.9566551 ],
])),
(HO_Weights.mod_matrix, np.array([
[ 0.03080958, 1.02830959],
[ 2.00464242, 3.00426575],
])),
(results, [[np.array([0.50899214, 0.54318254])], [np.array([0.51044657, 0.5483048])]]),
]
for i in range(len(expected_output)):
val, expected = expected_output[i]
# setting absolute tolerance to be in accordance with reference_output precision
# if you do not specify, assert_allcose will use a relative tolerance of 1e-07,
# which WILL FAIL unless you gather higher precision values to use as reference
np.testing.assert_allclose(val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))
| [
"psyneulink.components.functions.function.Logistic",
"numpy.array",
"psyneulink.components.projections.pathway.mappingprojection.MappingProjection",
"psyneulink.components.process.Process",
"psyneulink.components.system.System",
"psyneulink.components.mechanisms.processing.transfermechanism.TransferMechan... | [((944, 1001), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'size': '(2)', 'function': 'Linear', 'name': '"""Colors"""'}), "(size=2, function=Linear, name='Colors')\n", (961, 1001), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1066, 1152), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'size': '(2)', 'function': 'Linear', 'name': '"""Words"""'}), "(default_variable=[0, 0], size=2, function=Linear, name=\n 'Words')\n", (1083, 1152), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1227, 1305), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'function': 'Logistic', 'name': '"""Response"""'}), "(default_variable=[0, 0], function=Logistic, name='Response')\n", (1244, 1305), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1385, 1568), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[1, 2.5]', 'pathway': '[colors, FULL_CONNECTIVITY_MATRIX, response]', 'learning': 'LEARNING_PROJECTION', 'target': '[0, 1]', 'name': '"""Color Naming"""', 'prefs': 'process_prefs'}), "(default_variable=[1, 2.5], pathway=[colors,\n FULL_CONNECTIVITY_MATRIX, response], learning=LEARNING_PROJECTION,\n target=[0, 1], name='Color Naming', prefs=process_prefs)\n", (1392, 1568), False, 'from psyneulink.components.process import Process\n'), ((1676, 1858), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0.5, 3]', 'pathway': '[words, FULL_CONNECTIVITY_MATRIX, response]', 'name': '"""Word Reading"""', 'learning': 'LEARNING_PROJECTION', 'target': '[1, 0]', 'prefs': 'process_prefs'}), "(default_variable=[0.5, 3], pathway=[words, FULL_CONNECTIVITY_MATRIX,\n response], name='Word Reading', learning=LEARNING_PROJECTION, target=[1,\n 0], prefs=process_prefs)\n", (1683, 1858), False, 'from psyneulink.components.process import Process\n'), ((4400, 4474), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'function': 'Linear', 'name': '"""Colors"""'}), "(default_variable=[0, 0], function=Linear, name='Colors')\n", (4417, 4474), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4538, 4611), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'function': 'Linear', 'name': '"""Words"""'}), "(default_variable=[0, 0], function=Linear, name='Words')\n", (4555, 4611), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4676, 4752), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'function': 'Logistic', 'name': '"""Hidden"""'}), "(default_variable=[0, 0], function=Logistic, name='Hidden')\n", (4693, 4752), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((4955, 5031), 'psyneulink.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'default_variable': '[0, 0]', 'function': 'Logistic', 'name': '"""Output"""'}), "(default_variable=[0, 0], function=Logistic, name='Output')\n", (4972, 5031), False, 'from psyneulink.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((5273, 5345), 'psyneulink.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'name': '"""Color-Hidden Weights"""', 'matrix': 'CH_Weights_matrix'}), "(name='Color-Hidden Weights', matrix=CH_Weights_matrix)\n", (5290, 5345), False, 'from psyneulink.components.projections.pathway.mappingprojection import MappingProjection\n'), ((5402, 5473), 'psyneulink.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'name': '"""Word-Hidden Weights"""', 'matrix': 'WH_Weights_matrix'}), "(name='Word-Hidden Weights', matrix=WH_Weights_matrix)\n", (5419, 5473), False, 'from psyneulink.components.projections.pathway.mappingprojection import MappingProjection\n'), ((5530, 5603), 'psyneulink.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'name': '"""Hidden-Output Weights"""', 'matrix': 'HO_Weights_matrix'}), "(name='Hidden-Output Weights', matrix=HO_Weights_matrix)\n", (5547, 5603), False, 'from psyneulink.components.projections.pathway.mappingprojection import MappingProjection\n'), ((5671, 5850), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[1, 2.5]', 'pathway': '[colors, CH_Weights, hidden, HO_Weights, response]', 'learning': 'LEARNING', 'target': '[2, 2]', 'name': '"""Color Naming"""', 'prefs': 'process_prefs'}), "(default_variable=[1, 2.5], pathway=[colors, CH_Weights, hidden,\n HO_Weights, response], learning=LEARNING, target=[2, 2], name=\n 'Color Naming', prefs=process_prefs)\n", (5678, 5850), False, 'from psyneulink.components.process import Process\n'), ((5957, 6108), 'psyneulink.components.process.Process', 'Process', ([], {'default_variable': '[0.5, 3]', 'pathway': '[words, WH_Weights, hidden]', 'name': '"""Word Reading"""', 'learning': 'LEARNING', 'target': '[3, 3]', 'prefs': 'process_prefs'}), "(default_variable=[0.5, 3], pathway=[words, WH_Weights, hidden],\n name='Word Reading', learning=LEARNING, target=[3, 3], prefs=process_prefs)\n", (5964, 6108), False, 'from psyneulink.components.process import Process\n'), ((6200, 6325), 'psyneulink.components.system.System', 'System', ([], {'processes': '[color_naming_process, word_reading_process]', 'targets': '[20, 20]', 'name': '"""Stroop Model"""', 'prefs': 'system_prefs'}), "(processes=[color_naming_process, word_reading_process], targets=[20,\n 20], name='Stroop Model', prefs=system_prefs)\n", (6206, 6325), False, 'from psyneulink.components.system import System\n'), ((7728, 7755), 'pprint.pprint', 'pprint', (['CH_Weights.__dict__'], {}), '(CH_Weights.__dict__)\n', (7734, 7755), False, 'from pprint import pprint\n'), ((2664, 2698), 'numpy.array', 'np.array', (['[0.88079708, 0.88079708]'], {}), '([0.88079708, 0.88079708])\n', (2672, 2698), True, 'import numpy as np\n'), ((2712, 2746), 'numpy.array', 'np.array', (['[0.85997037, 0.88340023]'], {}), '([0.85997037, 0.88340023])\n', (2720, 2746), True, 'import numpy as np\n'), ((2760, 2794), 'numpy.array', 'np.array', (['[0.83312329, 0.88585176]'], {}), '([0.83312329, 0.88585176])\n', (2768, 2794), True, 'import numpy as np\n'), ((2808, 2842), 'numpy.array', 'np.array', (['[0.79839127, 0.88816536]'], {}), '([0.79839127, 0.88816536])\n', (2816, 2842), True, 'import numpy as np\n'), ((2856, 2890), 'numpy.array', 'np.array', (['[0.75384913, 0.89035312]'], {}), '([0.75384913, 0.89035312])\n', (2864, 2890), True, 'import numpy as np\n'), ((2904, 2938), 'numpy.array', 'np.array', (['[0.69835531, 0.89242571]'], {}), '([0.69835531, 0.89242571])\n', (2912, 2938), True, 'import numpy as np\n'), ((2952, 2986), 'numpy.array', 'np.array', (['[0.63303376, 0.89439259]'], {}), '([0.63303376, 0.89439259])\n', (2960, 2986), True, 'import numpy as np\n'), ((3000, 3033), 'numpy.array', 'np.array', (['[0.56245802, 0.8962622]'], {}), '([0.56245802, 0.8962622])\n', (3008, 3033), True, 'import numpy as np\n'), ((3048, 3082), 'numpy.array', 'np.array', (['[0.49357614, 0.89804208]'], {}), '([0.49357614, 0.89804208])\n', (3056, 3082), True, 'import numpy as np\n'), ((3096, 3130), 'numpy.array', 'np.array', (['[0.43230715, 0.89973899]'], {}), '([0.43230715, 0.89973899])\n', (3104, 3130), True, 'import numpy as np\n'), ((3189, 3223), 'numpy.array', 'np.array', (['[0.88079708, 0.88079708]'], {}), '([0.88079708, 0.88079708])\n', (3197, 3223), True, 'import numpy as np\n'), ((3237, 3271), 'numpy.array', 'np.array', (['[0.88340023, 0.85997037]'], {}), '([0.88340023, 0.85997037])\n', (3245, 3271), True, 'import numpy as np\n'), ((3285, 3319), 'numpy.array', 'np.array', (['[0.88585176, 0.83312329]'], {}), '([0.88585176, 0.83312329])\n', (3293, 3319), True, 'import numpy as np\n'), ((3333, 3367), 'numpy.array', 'np.array', (['[0.88816536, 0.79839127]'], {}), '([0.88816536, 0.79839127])\n', (3341, 3367), True, 'import numpy as np\n'), ((3381, 3415), 'numpy.array', 'np.array', (['[0.89035312, 0.75384913]'], {}), '([0.89035312, 0.75384913])\n', (3389, 3415), True, 'import numpy as np\n'), ((3429, 3463), 'numpy.array', 'np.array', (['[0.89242571, 0.69835531]'], {}), '([0.89242571, 0.69835531])\n', (3437, 3463), True, 'import numpy as np\n'), ((3477, 3511), 'numpy.array', 'np.array', (['[0.89439259, 0.63303376]'], {}), '([0.89439259, 0.63303376])\n', (3485, 3511), True, 'import numpy as np\n'), ((3525, 3558), 'numpy.array', 'np.array', (['[0.8962622, 0.56245802]'], {}), '([0.8962622, 0.56245802])\n', (3533, 3558), True, 'import numpy as np\n'), ((3572, 3606), 'numpy.array', 'np.array', (['[0.89804208, 0.49357614]'], {}), '([0.89804208, 0.49357614])\n', (3580, 3606), True, 'import numpy as np\n'), ((3620, 3654), 'numpy.array', 'np.array', (['[0.89973899, 0.43230715]'], {}), '([0.89973899, 0.43230715])\n', (3628, 3654), True, 'import numpy as np\n'), ((4896, 4906), 'psyneulink.components.functions.function.Logistic', 'Logistic', ([], {}), '()\n', (4904, 4906), False, 'from psyneulink.components.functions.function import Linear, Logistic\n'), ((5108, 5120), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5117, 5120), True, 'import numpy as np\n'), ((5165, 5177), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5174, 5177), True, 'import numpy as np\n'), ((5222, 5234), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5231, 5234), True, 'import numpy as np\n'), ((7925, 7945), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (7933, 7945), True, 'import numpy as np\n'), ((7989, 8011), 'numpy.array', 'np.array', (['[-2.0, -2.0]'], {}), '([-2.0, -2.0])\n', (7997, 8011), True, 'import numpy as np\n'), ((8056, 8090), 'numpy.array', 'np.array', (['[0.13227553, 0.01990677]'], {}), '([0.13227553, 0.01990677])\n', (8064, 8090), True, 'import numpy as np\n'), ((8139, 8172), 'numpy.array', 'np.array', (['[0.51044657, 0.5483048]'], {}), '([0.51044657, 0.5483048])\n', (8147, 8172), True, 'import numpy as np\n'), ((8231, 8264), 'numpy.array', 'np.array', (['[0.48955343, 0.4516952]'], {}), '([0.48955343, 0.4516952])\n', (8239, 8264), True, 'import numpy as np\n'), ((8325, 8354), 'numpy.array', 'np.array', (['(0.22184555903789838)'], {}), '(0.22184555903789838)\n', (8333, 8354), True, 'import numpy as np\n'), ((8411, 8431), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (8419, 8431), True, 'import numpy as np\n'), ((8468, 8530), 'numpy.array', 'np.array', (['[[0.02512045, 1.02167245], [2.02512045, 3.02167245]]'], {}), '([[0.02512045, 1.02167245], [2.02512045, 3.02167245]])\n', (8476, 8530), True, 'import numpy as np\n'), ((8622, 8683), 'numpy.array', 'np.array', (['[[-0.05024091, 0.9566551], [1.94975909, 2.9566551]]'], {}), '([[-0.05024091, 0.9566551], [1.94975909, 2.9566551]])\n', (8630, 8683), True, 'import numpy as np\n'), ((8772, 8834), 'numpy.array', 'np.array', (['[[0.03080958, 1.02830959], [2.00464242, 3.00426575]]'], {}), '([[0.03080958, 1.02830959], [2.00464242, 3.00426575]])\n', (8780, 8834), True, 'import numpy as np\n'), ((8910, 8944), 'numpy.array', 'np.array', (['[0.50899214, 0.54318254]'], {}), '([0.50899214, 0.54318254])\n', (8918, 8944), True, 'import numpy as np\n'), ((8948, 8981), 'numpy.array', 'np.array', (['[0.51044657, 0.5483048]'], {}), '([0.51044657, 0.5483048])\n', (8956, 8981), True, 'import numpy as np\n')] |
from __future__ import division
import pandas
import numpy as np
import math
wFactor = 250000/50000
def AMS(s, b):
""" Approximate Median Significance defined as:
AMS = sqrt(
2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s}
)
where b_r = 10, b = background, s = signal, log is natural logarithm """
br = 10.0
radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s)
if radicand < 0:
print('radicand is negative. Exiting')
exit()
else:
return math.sqrt(radicand)
def amsScanQuick(inData, wFactor=250000./50000.):
'''Determine optimum AMS and cut,
wFactor used rescale weights to get comparable AMSs'''
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
return amsMax, threshold | [
"numpy.sum",
"math.sqrt",
"math.log"
] | [((714, 773), 'numpy.sum', 'np.sum', (["inData.loc[inData['gen_target'] == 1, 'gen_weight']"], {}), "(inData.loc[inData['gen_target'] == 1, 'gen_weight'])\n", (720, 773), True, 'import numpy as np\n'), ((782, 841), 'numpy.sum', 'np.sum', (["inData.loc[inData['gen_target'] == 0, 'gen_weight']"], {}), "(inData.loc[inData['gen_target'] == 0, 'gen_weight'])\n", (788, 841), True, 'import numpy as np\n'), ((538, 557), 'math.sqrt', 'math.sqrt', (['radicand'], {}), '(radicand)\n', (547, 557), False, 'import math\n'), ((400, 428), 'math.log', 'math.log', (['(1.0 + s / (b + br))'], {}), '(1.0 + s / (b + br))\n', (408, 428), False, 'import math\n')] |
import numpy as np
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import models
from torch.autograd import Variable
class AdversarialLayer(torch.autograd.Function):
def __init__(self, high_value=1.0, max_iter_value=10000.0):
self.iter_num = 0
self.alpha = 10
self.low = 0.0
self.high = high_value
self.max_iter = max_iter_value
def forward(self, input):
self.iter_num += 1
output = input * 1.0
return output
def backward(self, gradOutput):
self.coeff = np.float(2.0 * (self.high - self.low) / (1.0 + np.exp(-self.alpha*self.iter_num / self.max_iter)) - (self.high - self.low) + self.low)
return -self.coeff * gradOutput
class SilenceLayer(torch.autograd.Function):
def __init__(self):
pass
def forward(self, input):
return input * 1.0
def backward(self, gradOutput):
return 0 * gradOutput
# convnet without the last layer
class AlexNetFc(nn.Module):
def __init__(self, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000):
super(AlexNetFc, self).__init__()
model_alexnet = models.alexnet(pretrained=True)
self.features = model_alexnet.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module("classifier"+str(i), model_alexnet.classifier[i])
self.feature_layers = nn.Sequential(self.features, self.classifier)
self.use_bottleneck = use_bottleneck
self.new_cls = new_cls
if new_cls:
if self.use_bottleneck:
self.bottleneck = nn.Linear(4096, bottleneck_dim)
self.bottleneck.weight.data.normal_(0, 0.005)
self.bottleneck.bias.data.fill_(0.0)
self.fc = nn.Linear(bottleneck_dim, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = bottleneck_dim
else:
self.fc = nn.Linear(4096, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = 4096
else:
self.fc = model_alexnet.classifier[6]
self.__in_features = 4096
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
if self.use_bottleneck and self.new_cls:
x = self.bottleneck(x)
y = self.fc(x)
return x, y
def output_num(self):
return self.__in_features
resnet_dict = {"ResNet18":models.resnet18, "ResNet34":models.resnet34, "ResNet50":models.resnet50, "ResNet101":models.resnet101, "ResNet152":models.resnet152}
class ResNetFc(nn.Module):
def __init__(self, resnet_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000):
super(ResNetFc, self).__init__()
model_resnet = resnet_dict[resnet_name](pretrained=True)
self.conv1 = model_resnet.conv1
self.bn1 = model_resnet.bn1
self.relu = model_resnet.relu
self.maxpool = model_resnet.maxpool
self.layer1 = model_resnet.layer1
self.layer2 = model_resnet.layer2
self.layer3 = model_resnet.layer3
self.layer4 = model_resnet.layer4
self.avgpool = model_resnet.avgpool
self.feature_layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, \
self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool)
self.use_bottleneck = use_bottleneck
self.new_cls = new_cls
if new_cls:
if self.use_bottleneck:
self.bottleneck = nn.Linear(model_resnet.fc.in_features, bottleneck_dim)
self.bottleneck.weight.data.normal_(0, 0.005)
self.bottleneck.bias.data.fill_(0.0)
self.fc = nn.Linear(bottleneck_dim, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = bottleneck_dim
else:
self.fc = nn.Linear(model_resnet.fc.in_features, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = model_resnet.fc.in_features
else:
self.fc = model_resnet.fc
self.__in_features = model_resnet.fc.in_features
def forward(self, x):
x = self.feature_layers(x)
x = x.view(x.size(0), -1)
if self.use_bottleneck and self.new_cls:
x = self.bottleneck(x)
y = self.fc(x)
return x, y
def output_num(self):
return self.__in_features
vgg_dict = {"VGG11":models.vgg11, "VGG13":models.vgg13, "VGG16":models.vgg16, "VGG19":models.vgg19, "VGG11BN":models.vgg11_bn, "VGG13BN":models.vgg13_bn, "VGG16BN":models.vgg16_bn, "VGG19BN":models.vgg19_bn}
class VGGFc(nn.Module):
def __init__(self, vgg_name, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000):
super(VGGFc, self).__init__()
model_vgg = vgg_dict[vgg_name](pretrained=True)
self.features = model_vgg.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module("classifier"+str(i), model_vgg.classifier[i])
self.feature_layers = nn.Sequential(self.features, self.classifier)
self.use_bottleneck = use_bottleneck
self.new_cls = new_cls
if new_cls:
if self.use_bottleneck:
self.bottleneck = nn.Linear(4096, bottleneck_dim)
self.bottleneck.weight.data.normal_(0, 0.005)
self.bottleneck.bias.data.fill_(0.0)
self.fc = nn.Linear(bottleneck_dim, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = bottleneck_dim
else:
self.fc = nn.Linear(4096, class_num)
self.fc.weight.data.normal_(0, 0.01)
self.fc.bias.data.fill_(0.0)
self.__in_features = 4096
else:
self.fc = model_vgg.classifier[6]
self.__in_features = 4096
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 25088)
x = self.classifier(x)
if self.use_bottleneck and self.new_cls:
x = self.bottleneck(x)
y = self.fc(x)
return x, y
def output_num(self):
return self.__in_features
class AdversarialNetwork(nn.Module):
def __init__(self, in_feature):
super(AdversarialNetwork, self).__init__()
self.ad_layer1 = nn.Linear(in_feature, 1024)
self.ad_layer2 = nn.Linear(1024,1024)
self.ad_layer3 = nn.Linear(1024, 1)
self.ad_layer1.weight.data.normal_(0, 0.01)
self.ad_layer2.weight.data.normal_(0, 0.01)
self.ad_layer3.weight.data.normal_(0, 0.3)
self.ad_layer1.bias.data.fill_(0.0)
self.ad_layer2.bias.data.fill_(0.0)
self.ad_layer3.bias.data.fill_(0.0)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.dropout1 = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.5)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.ad_layer1(x)
x = self.relu1(x)
x = self.dropout1(x)
x = self.ad_layer2(x)
x = self.relu2(x)
ad_features = self.dropout2(x)
x = self.ad_layer3(ad_features)
y = self.sigmoid(x)
return y, ad_features
def ad_feature_dim(self):
return 1024
def output_num(self):
return 1
class SmallAdversarialNetwork(nn.Module):
def __init__(self, in_feature):
super(SmallAdversarialNetwork, self).__init__()
self.ad_layer1 = nn.Linear(in_feature, 256)
self.ad_layer2 = nn.Linear(256, 1)
self.ad_layer1.weight.data.normal_(0, 0.01)
self.ad_layer2.weight.data.normal_(0, 0.01)
self.ad_layer1.bias.data.fill_(0.0)
self.ad_layer2.bias.data.fill_(0.0)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(0.5)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.ad_layer1(x)
x = self.relu1(x)
x = self.dropout1(x)
x = self.ad_layer2(x)
x = self.sigmoid(x)
return x
def output_num(self):
return 1
class LittleAdversarialNetwork(nn.Module):
def __init__(self, in_feature):
super(LittleAdversarialNetwork, self).__init__()
self.in_feature = in_feature
self.ad_layer1 = nn.Linear(in_feature, 2)
self.ad_layer1.weight.data.normal_(0, 0.01)
self.ad_layer1.bias.data.fill_(0.0)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
ad_features = self.ad_layer1(x)
y = self.softmax(ad_features)
return y, ad_features
def ad_feature_dim():
return self.in_feature
def output_num(self):
return 2
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torchvision.models.alexnet",
"numpy.exp",
"torch.nn.Linear",
"copy.deepcopy"
] | [((1145, 1176), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1159, 1176), False, 'from torchvision import models\n'), ((1242, 1257), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1255, 1257), True, 'import torch.nn as nn\n'), ((1390, 1435), 'torch.nn.Sequential', 'nn.Sequential', (['self.features', 'self.classifier'], {}), '(self.features, self.classifier)\n', (1403, 1435), True, 'import torch.nn as nn\n'), ((3227, 3357), 'torch.nn.Sequential', 'nn.Sequential', (['self.conv1', 'self.bn1', 'self.relu', 'self.maxpool', 'self.layer1', 'self.layer2', 'self.layer3', 'self.layer4', 'self.avgpool'], {}), '(self.conv1, self.bn1, self.relu, self.maxpool, self.layer1,\n self.layer2, self.layer3, self.layer4, self.avgpool)\n', (3240, 3357), True, 'import torch.nn as nn\n'), ((4963, 4978), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (4976, 4978), True, 'import torch.nn as nn\n'), ((5109, 5154), 'torch.nn.Sequential', 'nn.Sequential', (['self.features', 'self.classifier'], {}), '(self.features, self.classifier)\n', (5122, 5154), True, 'import torch.nn as nn\n'), ((6332, 6359), 'torch.nn.Linear', 'nn.Linear', (['in_feature', '(1024)'], {}), '(in_feature, 1024)\n', (6341, 6359), True, 'import torch.nn as nn\n'), ((6381, 6402), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (6390, 6402), True, 'import torch.nn as nn\n'), ((6423, 6441), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1)'], {}), '(1024, 1)\n', (6432, 6441), True, 'import torch.nn as nn\n'), ((6722, 6731), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6729, 6731), True, 'import torch.nn as nn\n'), ((6749, 6758), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6756, 6758), True, 'import torch.nn as nn\n'), ((6779, 6794), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (6789, 6794), True, 'import torch.nn as nn\n'), ((6815, 6830), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (6825, 6830), True, 'import torch.nn as nn\n'), ((6850, 6862), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6860, 6862), True, 'import torch.nn as nn\n'), ((7366, 7392), 'torch.nn.Linear', 'nn.Linear', (['in_feature', '(256)'], {}), '(in_feature, 256)\n', (7375, 7392), True, 'import torch.nn as nn\n'), ((7414, 7431), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (7423, 7431), True, 'import torch.nn as nn\n'), ((7625, 7634), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7632, 7634), True, 'import torch.nn as nn\n'), ((7655, 7670), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (7665, 7670), True, 'import torch.nn as nn\n'), ((7690, 7702), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (7700, 7702), True, 'import torch.nn as nn\n'), ((8087, 8111), 'torch.nn.Linear', 'nn.Linear', (['in_feature', '(2)'], {}), '(in_feature, 2)\n', (8096, 8111), True, 'import torch.nn as nn\n'), ((8219, 8236), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (8229, 8236), True, 'import torch.nn as nn\n'), ((8533, 8554), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (8546, 8554), False, 'import copy\n'), ((1583, 1614), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'bottleneck_dim'], {}), '(4096, bottleneck_dim)\n', (1592, 1614), True, 'import torch.nn as nn\n'), ((1744, 1780), 'torch.nn.Linear', 'nn.Linear', (['bottleneck_dim', 'class_num'], {}), '(bottleneck_dim, class_num)\n', (1753, 1780), True, 'import torch.nn as nn\n'), ((1955, 1981), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'class_num'], {}), '(4096, class_num)\n', (1964, 1981), True, 'import torch.nn as nn\n'), ((3528, 3582), 'torch.nn.Linear', 'nn.Linear', (['model_resnet.fc.in_features', 'bottleneck_dim'], {}), '(model_resnet.fc.in_features, bottleneck_dim)\n', (3537, 3582), True, 'import torch.nn as nn\n'), ((3712, 3748), 'torch.nn.Linear', 'nn.Linear', (['bottleneck_dim', 'class_num'], {}), '(bottleneck_dim, class_num)\n', (3721, 3748), True, 'import torch.nn as nn\n'), ((3923, 3972), 'torch.nn.Linear', 'nn.Linear', (['model_resnet.fc.in_features', 'class_num'], {}), '(model_resnet.fc.in_features, class_num)\n', (3932, 3972), True, 'import torch.nn as nn\n'), ((5302, 5333), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'bottleneck_dim'], {}), '(4096, bottleneck_dim)\n', (5311, 5333), True, 'import torch.nn as nn\n'), ((5463, 5499), 'torch.nn.Linear', 'nn.Linear', (['bottleneck_dim', 'class_num'], {}), '(bottleneck_dim, class_num)\n', (5472, 5499), True, 'import torch.nn as nn\n'), ((5674, 5700), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'class_num'], {}), '(4096, class_num)\n', (5683, 5700), True, 'import torch.nn as nn\n'), ((617, 668), 'numpy.exp', 'np.exp', (['(-self.alpha * self.iter_num / self.max_iter)'], {}), '(-self.alpha * self.iter_num / self.max_iter)\n', (623, 668), True, 'import numpy as np\n')] |
from os import path
import numpy as np
import MDAnalysis
from miscell.file_util import check_dir_exist_and_make
from miscell.hd5_util import save_array_to_hd5, read_array_from_hd5
from miscell.na_bp import d_n_bp
class FoldersBuilder:
def __init__(self, rootfolder, host):
self.rootfolder = rootfolder
self.host = host
self.host_folder = path.join(rootfolder, host)
self.discre_h5 = path.join(self.host_folder, 'discretized.hdf5')
self.theta_h5 = path.join(self.host_folder, 'theta.hdf5')
def initialize_folders(self):
for folder in [self.rootfolder, self.host_folder]:
check_dir_exist_and_make(folder)
class Discretizer(FoldersBuilder):
key = 'discretized'
def __init__(self, rootfolder, host):
super().__init__(rootfolder, host)
self.n_bp = d_n_bp[host]
self.discre_array = None
def make_discre_h5(self):
crd, dcd = self.get_crd_dcd()
mda_u = MDAnalysis.Universe(crd, dcd)
n_frame = len(mda_u.trajectory)
discre_array = np.zeros((n_frame, self.n_bp, 3))
basepairs = self.get_basepairs()
for frame_id in range(n_frame):
mda_u.trajectory[frame_id]
discre_array[frame_id] = self.get_midpoint_array(basepairs)
self.discre_array = discre_array
save_array_to_hd5(self.discre_h5, self.key, self.discre_array)
def read_discre_h5(self):
self.discre_array = read_array_from_hd5(self.discre_h5, self.key)
def get_basepairs(self, u, short_segid=False):
basepairs = {}
bp_id = 1
if short_segid:
segid_i = 'STR1'
segid_j = 'STR2'
else:
segid_i = 'STRAND1'
segid_j = 'STRAND2'
for resid_i in range(1, self.n_bp + 1):
# resid_i: guide strand resid # resid_j: target strand resid
resid_j = miscell.get_antistrand_resid(resid_i, n_bp=self.n_bp)
seq_i = self.sequence['guide']
seq_j = self.sequence['target']
resname_i = seq_i[resid_i - 1]
resname_j = seq_j[resid_j - 1]
temp = (u.select_atoms('segid {0} and resid {1} and name {2}'.format(segid_i, resid_i, c8_c6[resname_i])),
u.select_atoms('segid {0} and resid {1} and name {2}'.format(segid_j, resid_j, c8_c6[resname_j])))
basepairs[bp_id] = temp
bp_id += 1
return basepairs
def get_midpoint_array(self, basepairs):
midpoint_array = np.zeros((self.n_bp, 3))
for bp_id in range(self.n_bp):
atom1 = basepairs[bp_id+1][0]
atom2 = basepairs[bp_id+1][1]
midpoint = (atom1.positions[0] + atom2.positions[0]) / 2
midpoint_array[bp_id] = midpoint
return midpoint_array
def get_crd_dcd(self):
return 'avg-crd', 'fit-dcd' | [
"miscell.file_util.check_dir_exist_and_make",
"miscell.hd5_util.read_array_from_hd5",
"os.path.join",
"miscell.hd5_util.save_array_to_hd5",
"numpy.zeros",
"MDAnalysis.Universe"
] | [((367, 394), 'os.path.join', 'path.join', (['rootfolder', 'host'], {}), '(rootfolder, host)\n', (376, 394), False, 'from os import path\n'), ((421, 468), 'os.path.join', 'path.join', (['self.host_folder', '"""discretized.hdf5"""'], {}), "(self.host_folder, 'discretized.hdf5')\n", (430, 468), False, 'from os import path\n'), ((493, 534), 'os.path.join', 'path.join', (['self.host_folder', '"""theta.hdf5"""'], {}), "(self.host_folder, 'theta.hdf5')\n", (502, 534), False, 'from os import path\n'), ((975, 1004), 'MDAnalysis.Universe', 'MDAnalysis.Universe', (['crd', 'dcd'], {}), '(crd, dcd)\n', (994, 1004), False, 'import MDAnalysis\n'), ((1068, 1101), 'numpy.zeros', 'np.zeros', (['(n_frame, self.n_bp, 3)'], {}), '((n_frame, self.n_bp, 3))\n', (1076, 1101), True, 'import numpy as np\n'), ((1343, 1405), 'miscell.hd5_util.save_array_to_hd5', 'save_array_to_hd5', (['self.discre_h5', 'self.key', 'self.discre_array'], {}), '(self.discre_h5, self.key, self.discre_array)\n', (1360, 1405), False, 'from miscell.hd5_util import save_array_to_hd5, read_array_from_hd5\n'), ((1465, 1510), 'miscell.hd5_util.read_array_from_hd5', 'read_array_from_hd5', (['self.discre_h5', 'self.key'], {}), '(self.discre_h5, self.key)\n', (1484, 1510), False, 'from miscell.hd5_util import save_array_to_hd5, read_array_from_hd5\n'), ((2529, 2553), 'numpy.zeros', 'np.zeros', (['(self.n_bp, 3)'], {}), '((self.n_bp, 3))\n', (2537, 2553), True, 'import numpy as np\n'), ((641, 673), 'miscell.file_util.check_dir_exist_and_make', 'check_dir_exist_and_make', (['folder'], {}), '(folder)\n', (665, 673), False, 'from miscell.file_util import check_dir_exist_and_make\n')] |
import numpy as np
import os
import subprocess
from scipy.stats import gamma
from scipy.special import gammainc
class ASRV(object):
"""
This class specifies Among-site Rate Variation (ASRV) heterogenous rate calculation
"""
def __init__(self , alpha=1):
self.num_catg = 8
self.alpha = alpha
self.scale = 1/self.alpha
def parse_alpha(self, logfile):
"""
parse alpha value for fitted gamma distribution from file
"""
# path configuration
path = os.path.abspath(os.path.curdir)
root_path = path.rpartition('/')[0].replace(" ", "\ ")
path = os.path.join(root_path, 'data')
proc = subprocess.Popen('grep "^alpha" %s' % (os.path.join(path, logfile)),
stdout=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
try:
self.alpha = float(stdout.decode().split(' ')[1])
except ValueError as val_err:
print("Failed to parse inferred alpha parameter:", val_err)
exit()
except (IndexError, FileNotFoundError) as err:
print("Failed to open profile file with alpha parameter:", err)
exit()
def calc_rates(self):
"""
calculate average value in each category as rate in ASRV
Returns
-------
rates : list
list of tuples representing rate and log probability of each rate: [(r_1, log(w_1)),...,(r_k, log(w_k))]
"""
threshold = 1e-6
log_weight = np.log(1 / self.num_catg) # weight (probability) of each rate for ASRV
perc_points = [0]
perc_points.extend([gamma.ppf(i/self.num_catg, a=self.alpha, scale=self.scale) for i in range(1, self.num_catg)])
perc_points.append(gamma.ppf(1-threshold, a=self.alpha, scale=self.scale))
rates = []
for i in range(len(perc_points)-1):
a, b = perc_points[i], perc_points[i+1]
r_i = (gammainc(self.alpha+1, b*self.alpha) - gammainc(self.alpha+1, a*self.alpha)) * self.num_catg
rates.append((r_i, log_weight))
return rates
| [
"numpy.log",
"os.path.join",
"scipy.stats.gamma.ppf",
"os.path.abspath",
"scipy.special.gammainc"
] | [((538, 569), 'os.path.abspath', 'os.path.abspath', (['os.path.curdir'], {}), '(os.path.curdir)\n', (553, 569), False, 'import os\n'), ((648, 679), 'os.path.join', 'os.path.join', (['root_path', '"""data"""'], {}), "(root_path, 'data')\n", (660, 679), False, 'import os\n'), ((1597, 1622), 'numpy.log', 'np.log', (['(1 / self.num_catg)'], {}), '(1 / self.num_catg)\n', (1603, 1622), True, 'import numpy as np\n'), ((1853, 1909), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(1 - threshold)'], {'a': 'self.alpha', 'scale': 'self.scale'}), '(1 - threshold, a=self.alpha, scale=self.scale)\n', (1862, 1909), False, 'from scipy.stats import gamma\n'), ((743, 770), 'os.path.join', 'os.path.join', (['path', 'logfile'], {}), '(path, logfile)\n', (755, 770), False, 'import os\n'), ((1732, 1792), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(i / self.num_catg)'], {'a': 'self.alpha', 'scale': 'self.scale'}), '(i / self.num_catg, a=self.alpha, scale=self.scale)\n', (1741, 1792), False, 'from scipy.stats import gamma\n'), ((2061, 2101), 'scipy.special.gammainc', 'gammainc', (['(self.alpha + 1)', '(b * self.alpha)'], {}), '(self.alpha + 1, b * self.alpha)\n', (2069, 2101), False, 'from scipy.special import gammainc\n'), ((2100, 2140), 'scipy.special.gammainc', 'gammainc', (['(self.alpha + 1)', '(a * self.alpha)'], {}), '(self.alpha + 1, a * self.alpha)\n', (2108, 2140), False, 'from scipy.special import gammainc\n')] |
import cv2
import numpy as np
class PinholeCamera:
"""
Class representing Video Camera used by robot, it stores params of this camera needed in Visual Odometry
"""
def __init__(self, width, height, int_matrix):
self.cam_width = width
self.cam_height = height
self.fx = int_matrix[0][0] # Focal of camera in x axis
self.fy = int_matrix[1][1] # Focal of camera in y axis
self.cx = int_matrix[0][2] # Principal point in x axis in pixels
self.cy = int_matrix[1][2] # Principal point in y axis in pixels
self.intrinsic_matrix = int_matrix # Intrinsic matrix of camera
class VisualOdometry:
"""
Class which is responsible for full process of Visual Odometry.
The whole process of using this class comes to use VisualOdometry.update(image) when passing consecutive frames from
Video Camera, VisualOdometry will then count translation of robot, current coordinates of robot will be accessible
at param "cur_coords" after passing second frame with update() because first frame is needed to initialize the
whole process.
"""
def __init__(self, cam):
self.frame_stage = 0
self.cam = cam
self.cur_frame = None
self.prev_frame = None
self.cur_rotation = None
self.prev_rotation = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.cur_translation = [[0], [0], [0]]
self.prev_translation = [[0], [0], [0]]
self.cur_coords = [[0], [0], [0]]
self.base_coords = [[0], [0], [0]]
self.kp_prev = None
self.kp_cur = None
self.des_prev = None
self.des_cur = None
self.detector = cv2.ORB_create(nfeatures=2000, edgeThreshold=31)
# self.detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)
# Choosing FlannBasedMatcher with params compatible with ORB detector as class matcher.
FLANN_INDEX_LSH = 6
index_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=12, # 6 # the number of hash tables to use, between 10 a 30 usually
key_size=20, # 12 # the size of the hash key in bits (between 10 and 20 usually)
multi_probe_level=2) # 1 # the number of bits to shift to check for neighboring buckets (0 is regular LSH, 2 is recommended).
search_params = dict(checks=300) # ilość sprawdzeń punktów im więcej tym dokładniej ale wolniej
# self.kMinNumFeature = 500
self.matcher = cv2.FlannBasedMatcher(index_params, search_params)
def get_position(self, scale):
"""
:return:
"""
x = self.cur_coords[0][0] / scale
y = self.cur_coords[1][0] / scale
z = self.cur_coords[2][0] / scale
return [z, x, y]
def processFirstFrame(self):
"""
Processing first passed image to get from it KeyPoints and Descriptors which will be a reference for next image
:return:
"""
# self.kp_prev, self.des_prev = self.detector.detectAndCompute(self.cur_frame, None)
self.kp_prev = self.detector.detect(self.cur_frame, None)
self.kp_prev, self.des_prev = self.detector.compute(self.cur_frame, self.kp_prev)
# self.kp_old = np.array([x.pt for x in self.kp_old], dtype=np.float32)
self.frame_stage = 1
self.prev_frame = self.cur_frame
def processFrame(self):
"""
Processing every next frame after the first one.
Firstly new KeyPoints and Descriptors are obtained from new image. Then matches between previous and current
descriptors are found. Then coordinates in pixels of each match are obtained. Then Essential Matrix is found
using OpenCV function based on Ransac method. Then current position is obtained using OpenCV function
'recoverPose'. With obtained values, new translation, rotation and coordinates are found. Current coordinates
are accessible with 'cur_coords' param.
:return:
"""
# self.kp_cur, self.des_cur = self.detector.detectAndCompute(self.cur_frame, None)
self.kp_cur = self.detector.detect(self.cur_frame, None)
self.kp_cur, self.des_cur = self.detector.compute(self.cur_frame, self.kp_cur)
try:
matches = self.matcher.knnMatch(self.des_prev, self.des_cur, k=2) #k - ile znajdzie matchy; jeśli 2 to szuka dwóch i jeśli znajdzie to wybiera ten który ma znacznie mniejszy dystans od drugiego; jeśli nie znalazł dwóch albo nie ma takiego z lepszym dystansem to nie znajduje żadnego
matchesMask = [[0, 0] for i in range(len(matches))]
good = []
for i, match_lista in enumerate(matches):
if len(match_lista) != 2:
continue
m, n = match_lista[0], match_lista[1]
if m.distance < 0.8 * n.distance:
matchesMask[i] = [1, 0]
good.append(m)
# elif n.distance < 0.8 * m.distance:
# matchesMask[i] = [0, 1]
# good.append(n)
# print(m)
# print(n)
# print('')
kp1 = []
kp2 = []
for match in good:
kp1.append(self.kp_prev[match.queryIdx].pt)
kp2.append(self.kp_cur[match.trainIdx].pt)
kp1 = np.asarray(kp1)
kp2 = np.asarray(kp2)
cam_matrix = np.asarray(self.cam.intrinsic_matrix)
if len(kp1) > 5:
E, mask = cv2.findEssentialMat(kp1, kp2, cam_matrix, prob=0.999, method=cv2.RANSAC)
_, self.cur_rotation, self.cur_translation, mask = cv2.recoverPose(E, np.float64(kp1), np.float64(kp2), cam_matrix)
self.cur_rotation = self.cur_rotation @ self.prev_rotation
self.cur_translation = self.prev_translation + self.prev_rotation @ self.cur_translation
self.cur_coords = self.cur_rotation @ self.base_coords + self.cur_translation
self.kp_prev = self.kp_cur
self.des_prev = self.des_cur
self.prev_frame = self.cur_frame
self.prev_rotation = self.cur_rotation
self.prev_translation = self.cur_translation
except:
pass
def update(self, img):
"""
Appropriate processing of passed image
:param img: input image, for example from Video Camera
"""
self.cur_frame = img
if self.frame_stage == 1:
self.processFrame()
elif self.frame_stage == 0:
self.processFirstFrame()
# self.last_frame = self.new_frame
| [
"numpy.float64",
"cv2.findEssentialMat",
"numpy.asarray",
"cv2.ORB_create",
"cv2.FlannBasedMatcher"
] | [((1680, 1728), 'cv2.ORB_create', 'cv2.ORB_create', ([], {'nfeatures': '(2000)', 'edgeThreshold': '(31)'}), '(nfeatures=2000, edgeThreshold=31)\n', (1694, 1728), False, 'import cv2\n'), ((2560, 2610), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (2581, 2610), False, 'import cv2\n'), ((5458, 5473), 'numpy.asarray', 'np.asarray', (['kp1'], {}), '(kp1)\n', (5468, 5473), True, 'import numpy as np\n'), ((5492, 5507), 'numpy.asarray', 'np.asarray', (['kp2'], {}), '(kp2)\n', (5502, 5507), True, 'import numpy as np\n'), ((5533, 5570), 'numpy.asarray', 'np.asarray', (['self.cam.intrinsic_matrix'], {}), '(self.cam.intrinsic_matrix)\n', (5543, 5570), True, 'import numpy as np\n'), ((5628, 5701), 'cv2.findEssentialMat', 'cv2.findEssentialMat', (['kp1', 'kp2', 'cam_matrix'], {'prob': '(0.999)', 'method': 'cv2.RANSAC'}), '(kp1, kp2, cam_matrix, prob=0.999, method=cv2.RANSAC)\n', (5648, 5701), False, 'import cv2\n'), ((5788, 5803), 'numpy.float64', 'np.float64', (['kp1'], {}), '(kp1)\n', (5798, 5803), True, 'import numpy as np\n'), ((5805, 5820), 'numpy.float64', 'np.float64', (['kp2'], {}), '(kp2)\n', (5815, 5820), True, 'import numpy as np\n')] |
import math
import cv2 as cv
import numpy as np
class ImageProcessor:
"""Class for image processing.
Attributes:
"""
def __init__(self, fp=None):
"""Initialize image process class.
Args:
fp (str) : File path to the image.
"""
if fp is not None:
self.load_img(fp)
else:
self.img = None
self.processed_img = None
self.width = None
self.height = None
self.channels = None
def load_img(self, fp):
"""Load image from disk
Args:
fp (str): Path to image file
"""
self.img = cv.imread(fp)
cv.cvtColor(self.img, cv.COLOR_BGR2RGB, self.img)
self.processed_img = self.img
self.update_img_property()
def update_img_property(self):
"""Update image properties, including height, width and channels"""
self.height, self.width, self.channels = self.img.shape
def get_img(self):
"""Get image"""
return self.processed_img
def set_img(self, img):
"""Set given image to class image"""
self.img = img
def restore_changes(self):
"""Restore changes of image"""
self.processed_img = self.img
def save_changes(self):
"""Save changes on the processed image"""
self.img = self.processed_img
self.update_img_property()
def save_img(self, fp):
"""Save the image to disk"""
cv.cvtColor(self.img, cv.COLOR_BGR2RGB, self.img)
cv.imwrite(fp, self.img)
def show(self, img=None, name='Image'):
"""Display image. Press 'esc' to exit.
Args:
img (numpy.array): Image array representation.
name (str): Name of the window.
"""
if img is None:
img = self.img
cv.cvtColor(img, cv.COLOR_RGB2BGR, img)
cv.imshow(name, img)
if cv.waitKey(0) == 27:
cv.destroyAllWindows()
def get_split_color(self, mode):
"""Split image color
Args:
mode (str): b - blue; r - red; g - green.
Returns:
Single channel image.
"""
if mode == 'b':
img = self.img[:, :, 2]
elif mode == 'r':
img = self.img[:, :, 0]
elif mode == 'g':
img = self.img[:, :, 1]
else:
raise Exception("Color option not exist!")
self.processed_img = img
return img
def get_pixel_color(self, height, width):
"""Get pixel color
Args:
height (int): Height position.
width (int): Width position.
Returns:
A tuple of rgb color.
"""
return self.img[height][width]
def get_shape(self):
"""Get image shape.
Returns:
Height, width and number of channels.
"""
return self.height, self.width, self.channels
def shift(self, x, y, cut=False):
"""Shift the image vertically and horizontally.
If cut the shifted image, the part shifted out will not
appear and the image size remain the same. If not cut the
image, the blank area will be filled with black. Size of
image will increase.
Args:
x (int): Number of pixels shift on height
y (int): Number of pixels shift on width
cut (bool): Cut the image or not.
Returns:
Numpy array representation of shifted image.
"""
transform_mat = np.array([[1, 0, x],
[0, 1, y],
[0, 0, 1]], dtype=np.int32)
height, width, channels = self.get_shape()
if not cut:
img = self.create_blank_img(height + abs(x), width + abs(y))
for i in range(self.height):
for j in range(self.width):
# Get new position
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(transform_mat, src)
if x >= 0 and y >= 0:
img[dst[0]][dst[1]] = self.img[i][j]
elif y >= 0:
img[i][dst[1]] = self.img[i][j]
elif x >= 0:
img[dst[0]][j] = self.img[i][j]
else:
img[i][j] = self.img[i][j]
else:
img = self.create_blank_img()
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(transform_mat, src)
if 0 <= dst[0] < self.height:
if 0 <= dst[1] < self.width:
img[dst[0]][dst[1]] = self.img[i][j]
self.processed_img = img
return img
def rotate(self, angle, clockwise=True, cut=True):
"""Rotates the image clockwise or anti-clockwise.
Rotate the image. Keep the full image or cutting edges.
Args:
angle (int): The angle of rotations.
clockwise (bool): Clockwise or not.
cut (bool): If rotation cutting the image or not.
Returns:
Rotated image.
"""
if not clockwise:
angle = -angle
rad = angle * math.pi / 180.0
cos_a = math.cos(rad)
sin_a = math.sin(rad)
height, width, channels = self.get_shape()
trans_descartes = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * height, -0.5 * width, 1]], dtype=np.float32)
trans_back = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * height, 0.5 * width, 1]], dtype=np.float32)
rotate_mat = np.array([[cos_a, sin_a, 0],
[-sin_a, cos_a, 0],
[0, 0, 1]])
trans_mat = np.dot(np.dot(trans_descartes, rotate_mat), trans_back)
if cut:
img = self.create_blank_img()
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(src, trans_mat)
x = int(dst[0])
y = int(dst[1])
if 0 <= x < height and 0 <= y < width:
img[x][y] = self.img[i][j]
else:
org_x1 = np.array([0.5 * height, -0.5 * width, 1], dtype=np.int32)
org_x2 = np.array([-0.5 * height, -0.5 * width, 1], dtype=np.int32)
new_x1 = np.dot(org_x1, rotate_mat)
new_x2 = np.dot(org_x2, rotate_mat)
new_height = 2 * math.ceil(max(abs(new_x1[0]), abs(new_x2[0])))
new_width = 2 * math.ceil(max(abs(new_x1[1]), abs(new_x2[1])))
img = self.create_blank_img(new_height + 1, new_width + 1)
new_trans_back = np.array([[-1, 0, 0],
[0, 1, 0],
[0.5 * new_height, 0.5 * new_width, 1]], dtype=np.float32)
new_trans_mat = np.dot(np.dot(trans_descartes, rotate_mat), new_trans_back)
for i in range(self.height):
for j in range(self.width):
src = np.array([i, j, 1], dtype=np.int32)
dst = np.dot(src, new_trans_mat)
x = int(dst[0])
y = int(dst[1])
img[x][y] = self.img[i][j]
self.processed_img = img
return img
def resize(self, m, n):
"""Resize the image
Args:
m (float): scaler on heght.
n (float): scaler on width.
Returns:
Resized image.
"""
height, width, channels = self.get_shape()
height = int(height * m)
width = int(width * n)
img = self.create_blank_img(height, width, channels)
for i in range(height):
for j in range(width):
src_i = int(i / m)
src_j = int(j / n)
img[i][j] = self.img[src_i][src_j]
self.processed_img = img
return img
def trans_gray(self, level=256):
"""Transform an RGB image to Gray Scale image.
Gray scale can be quantized to 256, 128, 64, 32,
16, 8, 4, 2 levels.
Args:
level (int): Quantization level. Default 256.
Returns:
Gray scale image.
"""
if self.img is None:
return
n = math.log2(level)
if n < 1 or n > 8:
raise ValueError('Quantization level wrong! Must be exponential value of 2')
# Turn image from RGB to Gray scale image
img = self.create_blank_img(channels=1)
step = 256 / level
if self.channels is 3:
for i in range(self.height):
for j in range(self.width):
pixel = self.img[i][j]
gray = 0.299 * pixel[0] + 0.587 * pixel[1] + 0.114 * pixel[2]
mapped_gray = int(gray / step) / (level - 1) * 255
img[i][j] = round(mapped_gray)
else:
for i in range(self.height):
for j in range(self.width):
pixel = self.img[i][j]
mapped_gray = int(pixel / step) / (level - 1) * 255
img[i][j] = round(mapped_gray)
self.processed_img = img
return img
def create_blank_img(self, height=None, width=None, channels=3):
"""Create a blank pure black image.
Default create a blank black image with same height,
width and channels as the loaded image.
Args:
height (int): Height of new image. Measured by pixels.
width (int): Width of new image. Measured by pixels.
channels (int): Channels. Default 3, RGB.
Returns:
New image.
"""
if not height and not width:
height, width, _ = self.get_shape()
if not height or not width:
raise Exception("Invalid height or width!")
if channels is None:
channels = 1
size = (height, width, channels)
img = np.zeros(size, dtype=np.uint8)
return img
def get_hist(self):
"""Get histogram of given image
Returns:
Image of histogram of the image.
"""
hist = np.zeros(256, dtype=np.uint32)
hist_img = np.zeros((256, 256, 3), dtype=np.uint8)
img = self.trans_gray()
for i in range(self.height):
for j in range(self.width):
# print(img[i][j][0])
g_p = int(img[i][j])
hist[g_p] += 1
# Maximum count in all 256 levels
max_freq = max(hist)
for i in range(256):
x = (i, 255)
# Calculate the relative frequency compared to maximum frequency
p = int(255 - hist[i] * 255 / max_freq)
y = (i, p)
cv.line(hist_img, x, y, (0, 255, 0))
return hist_img
def hist_equalization(self):
"""Histogram equalization of the image.
Returns:
Image after histogram equalization.
"""
hist = np.zeros(256, dtype=np.uint32)
img = self.trans_gray()
for i in range(self.height):
for j in range(self.width):
g_p = int(img[i][j])
hist[g_p] += 1
hist_c = np.zeros(256, dtype=np.uint32)
hist_c[0] = hist[0]
for i in range(1, 256):
hist_c[i] = hist_c[i - 1] + hist[i]
factor = 255.0 / (self.height * self.width)
for i in range(self.height):
for j in range(self.width):
g_p = int(img[i][j])
g_q = int(factor * hist_c[g_p])
img[i][j] = g_q
self.processed_img = img
return img
def smooth(self, h=None):
"""Smooth
Args:
h (numpy.array): Smooth operator
Return:
Image after smoothing.
"""
height = self.height
width = self.width
img = self.trans_gray()
filtered_img = self.create_blank_img(channels=1)
if h is None:
h = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16.0
for i in range(height):
for j in range(width):
if i in [0, height - 1] or j in [0, width - 1]:
filtered_img[i][j] = img[i][j]
else:
x = img[i - 1:i + 2, j - 1:j + 2]
x = x.squeeze()
m = np.multiply(x, h)
filtered_img[i][j] = m.sum()
self.processed_img = filtered_img
return filtered_img
def sharpen(self):
"""Sharpen/Edge detection
Returns:
Processed image
"""
sobel_x = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]], dtype=np.int8)
sobel_y = np.array([[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]], dtype=np.int8)
height = self.height
width = self.width
img = self.trans_gray()
filtered_img = self.create_blank_img(channels=1)
for i in range(1, height - 1):
for j in range(1, width - 1):
x = np.multiply((img[i - 1:i + 2, j - 1:j + 2]).squeeze(), sobel_x)
y = np.multiply((img[i - 1:i + 2, j - 1:j + 2]).squeeze(), sobel_y)
filtered_img[i][j] = abs(x.sum()) + abs(y.sum())
self.processed_img = filtered_img
return filtered_img
| [
"cv2.imwrite",
"numpy.multiply",
"cv2.line",
"math.log2",
"cv2.imshow",
"math.cos",
"numpy.array",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.dot",
"cv2.cvtColor",
"math.sin",
"cv2.imread"
] | [((664, 677), 'cv2.imread', 'cv.imread', (['fp'], {}), '(fp)\n', (673, 677), True, 'import cv2 as cv\n'), ((686, 735), 'cv2.cvtColor', 'cv.cvtColor', (['self.img', 'cv.COLOR_BGR2RGB', 'self.img'], {}), '(self.img, cv.COLOR_BGR2RGB, self.img)\n', (697, 735), True, 'import cv2 as cv\n'), ((1499, 1548), 'cv2.cvtColor', 'cv.cvtColor', (['self.img', 'cv.COLOR_BGR2RGB', 'self.img'], {}), '(self.img, cv.COLOR_BGR2RGB, self.img)\n', (1510, 1548), True, 'import cv2 as cv\n'), ((1557, 1581), 'cv2.imwrite', 'cv.imwrite', (['fp', 'self.img'], {}), '(fp, self.img)\n', (1567, 1581), True, 'import cv2 as cv\n'), ((1864, 1903), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2BGR', 'img'], {}), '(img, cv.COLOR_RGB2BGR, img)\n', (1875, 1903), True, 'import cv2 as cv\n'), ((1912, 1932), 'cv2.imshow', 'cv.imshow', (['name', 'img'], {}), '(name, img)\n', (1921, 1932), True, 'import cv2 as cv\n'), ((3569, 3628), 'numpy.array', 'np.array', (['[[1, 0, x], [0, 1, y], [0, 0, 1]]'], {'dtype': 'np.int32'}), '([[1, 0, x], [0, 1, y], [0, 0, 1]], dtype=np.int32)\n', (3577, 3628), True, 'import numpy as np\n'), ((5435, 5448), 'math.cos', 'math.cos', (['rad'], {}), '(rad)\n', (5443, 5448), False, 'import math\n'), ((5465, 5478), 'math.sin', 'math.sin', (['rad'], {}), '(rad)\n', (5473, 5478), False, 'import math\n'), ((5557, 5646), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0.5 * height, -0.5 * width, 1]]'], {'dtype': 'np.float32'}), '([[-1, 0, 0], [0, 1, 0], [0.5 * height, -0.5 * width, 1]], dtype=np\n .float32)\n', (5565, 5646), True, 'import numpy as np\n'), ((5735, 5823), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0.5 * height, 0.5 * width, 1]]'], {'dtype': 'np.float32'}), '([[-1, 0, 0], [0, 1, 0], [0.5 * height, 0.5 * width, 1]], dtype=np.\n float32)\n', (5743, 5823), True, 'import numpy as np\n'), ((5902, 5962), 'numpy.array', 'np.array', (['[[cos_a, sin_a, 0], [-sin_a, cos_a, 0], [0, 0, 1]]'], {}), '([[cos_a, sin_a, 0], [-sin_a, cos_a, 0], [0, 0, 1]])\n', (5910, 5962), True, 'import numpy as np\n'), ((8697, 8713), 'math.log2', 'math.log2', (['level'], {}), '(level)\n', (8706, 8713), False, 'import math\n'), ((10404, 10434), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (10412, 10434), True, 'import numpy as np\n'), ((10609, 10639), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.uint32'}), '(256, dtype=np.uint32)\n', (10617, 10639), True, 'import numpy as np\n'), ((10659, 10698), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {'dtype': 'np.uint8'}), '((256, 256, 3), dtype=np.uint8)\n', (10667, 10698), True, 'import numpy as np\n'), ((11443, 11473), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.uint32'}), '(256, dtype=np.uint32)\n', (11451, 11473), True, 'import numpy as np\n'), ((11670, 11700), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.uint32'}), '(256, dtype=np.uint32)\n', (11678, 11700), True, 'import numpy as np\n'), ((13115, 13176), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {'dtype': 'np.int8'}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=np.int8)\n', (13123, 13176), True, 'import numpy as np\n'), ((13251, 13312), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {'dtype': 'np.int8'}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]], dtype=np.int8)\n', (13259, 13312), True, 'import numpy as np\n'), ((1944, 1957), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1954, 1957), True, 'import cv2 as cv\n'), ((1977, 1999), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1997, 1999), True, 'import cv2 as cv\n'), ((6052, 6087), 'numpy.dot', 'np.dot', (['trans_descartes', 'rotate_mat'], {}), '(trans_descartes, rotate_mat)\n', (6058, 6087), True, 'import numpy as np\n'), ((6573, 6630), 'numpy.array', 'np.array', (['[0.5 * height, -0.5 * width, 1]'], {'dtype': 'np.int32'}), '([0.5 * height, -0.5 * width, 1], dtype=np.int32)\n', (6581, 6630), True, 'import numpy as np\n'), ((6652, 6710), 'numpy.array', 'np.array', (['[-0.5 * height, -0.5 * width, 1]'], {'dtype': 'np.int32'}), '([-0.5 * height, -0.5 * width, 1], dtype=np.int32)\n', (6660, 6710), True, 'import numpy as np\n'), ((6733, 6759), 'numpy.dot', 'np.dot', (['org_x1', 'rotate_mat'], {}), '(org_x1, rotate_mat)\n', (6739, 6759), True, 'import numpy as np\n'), ((6781, 6807), 'numpy.dot', 'np.dot', (['org_x2', 'rotate_mat'], {}), '(org_x2, rotate_mat)\n', (6787, 6807), True, 'import numpy as np\n'), ((7060, 7155), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 1, 0], [0.5 * new_height, 0.5 * new_width, 1]]'], {'dtype': 'np.float32'}), '([[-1, 0, 0], [0, 1, 0], [0.5 * new_height, 0.5 * new_width, 1]],\n dtype=np.float32)\n', (7068, 7155), True, 'import numpy as np\n'), ((11206, 11242), 'cv2.line', 'cv.line', (['hist_img', 'x', 'y', '(0, 255, 0)'], {}), '(hist_img, x, y, (0, 255, 0))\n', (11213, 11242), True, 'import cv2 as cv\n'), ((7265, 7300), 'numpy.dot', 'np.dot', (['trans_descartes', 'rotate_mat'], {}), '(trans_descartes, rotate_mat)\n', (7271, 7300), True, 'import numpy as np\n'), ((12465, 12508), 'numpy.array', 'np.array', (['[[1, 2, 1], [2, 4, 2], [1, 2, 1]]'], {}), '([[1, 2, 1], [2, 4, 2], [1, 2, 1]])\n', (12473, 12508), True, 'import numpy as np\n'), ((3992, 4027), 'numpy.array', 'np.array', (['[i, j, 1]'], {'dtype': 'np.int32'}), '([i, j, 1], dtype=np.int32)\n', (4000, 4027), True, 'import numpy as np\n'), ((4054, 4080), 'numpy.dot', 'np.dot', (['transform_mat', 'src'], {}), '(transform_mat, src)\n', (4060, 4080), True, 'import numpy as np\n'), ((4607, 4642), 'numpy.array', 'np.array', (['[i, j, 1]'], {'dtype': 'np.int32'}), '([i, j, 1], dtype=np.int32)\n', (4615, 4642), True, 'import numpy as np\n'), ((4669, 4695), 'numpy.dot', 'np.dot', (['transform_mat', 'src'], {}), '(transform_mat, src)\n', (4675, 4695), True, 'import numpy as np\n'), ((6271, 6306), 'numpy.array', 'np.array', (['[i, j, 1]'], {'dtype': 'np.int32'}), '([i, j, 1], dtype=np.int32)\n', (6279, 6306), True, 'import numpy as np\n'), ((6333, 6355), 'numpy.dot', 'np.dot', (['src', 'trans_mat'], {}), '(src, trans_mat)\n', (6339, 6355), True, 'import numpy as np\n'), ((7429, 7464), 'numpy.array', 'np.array', (['[i, j, 1]'], {'dtype': 'np.int32'}), '([i, j, 1], dtype=np.int32)\n', (7437, 7464), True, 'import numpy as np\n'), ((7491, 7517), 'numpy.dot', 'np.dot', (['src', 'new_trans_mat'], {}), '(src, new_trans_mat)\n', (7497, 7517), True, 'import numpy as np\n'), ((12835, 12852), 'numpy.multiply', 'np.multiply', (['x', 'h'], {}), '(x, h)\n', (12846, 12852), True, 'import numpy as np\n')] |
import dataclasses
import math
import json
import numpy as np
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
import geopandas as gpd
import shapely.geometry as shpg
from scipy.interpolate import interp1d
@dataclasses.dataclass
class UncClass:
_default_unc = 0.15 # 15%
mean: float
unit_name: str = ""
std_perc: float = _default_unc
def std_unit(self):
return self.mean * self.std_perc
def std_perc_100(self):
return self.std_perc * 100.
def perc_100_to_perc(self, perc_100):
if perc_100 < 1:
raise ValueError("Input percentage needs to be greater than 1")
self.std_perc = perc_100 / 100.
def unit_to_perc(self, unit_std):
self.std_perc = unit_std / self.mean
class ModelDefaults:
def __init__(self):
file_root = os.path.dirname(os.path.abspath(__file__))
# print(file_root)
defaults_file = "defaults.json"
infile = os.path.join(file_root, defaults_file)
with open(infile) as load_file:
j_data = json.load(load_file)
inputs = j_data['input_data'][0]
self.general_unc = inputs["general_unc"]
self.max_pf = inputs['pf']
self.pf_unit = "MPa"
self.density = inputs["density"]
self.density_unit = "kg/m^3"
self.hydro = inputs["hydro"]
self.hydro_unit = "MPa/km"
self.hydro_under = inputs["hydro_under"]
self.hydro_upper = inputs["hydro_upper"]
self.dip = inputs["dip"]
self.dip_unit = "deg"
az_unc = inputs["az_unc"]
self.az_unit = "deg"
self.az_unc_perc = az_unc / 360.
self.sv = (self.density * 9.81) / 1000 # MPa/km
self.max_sv = (5000 * 9.81) / 1000
self.stress_unit = "MPa/km"
self.sh_max_az = inputs["sh_max_az"]
self.sh_min_az = inputs["sh_min_az"]
self.mu = inputs["mu"]
self.mu_unit = "unitless"
self.F_mu = (math.sqrt(self.mu ** 2 + 1)) ** 2
abs_shmax = self.F_mu * (self.sv - self.hydro) + self.hydro
abs_shmin = ((self.sv - self.hydro) / self.F_mu) + self.hydro
self.shmax_r = abs_shmax
self.shmin_r = (abs_shmax - self.sv) / 2
self.shmax_ss = abs_shmax
self.shmin_ss = abs_shmin
self.shmax_n = (self.sv - abs_shmin) / 2
self.shmin_n = abs_shmin
class ModelInputs:
def __init__(self, input_dict):
"""
Parameters
----------
input_dict
"""
defaults = ModelDefaults()
if "max_pf" in input_dict.keys():
self.max_pf = input_dict["max_pf"]
else:
self.max_pf = defaults.max_pf
if "dip" in input_dict.keys():
if "dip_unc" in input_dict.keys():
self.Dip = UncClass(input_dict["dip"], defaults.dip_unit, input_dict["dip_unc"] / input_dict["dip"])
else:
self.Dip = UncClass(input_dict["dip"], defaults.dip_unit)
else:
self.Dip = UncClass(defaults.dip, defaults.dip_unit)
if "sv" in input_dict.keys():
if input_dict["sv"] > defaults.max_sv:
warnings.warn('Vertical stress gradient for density > 5000 kg/m^3. Are you sure you input a gradient?')
if "sv_unc" in input_dict.keys():
self.Sv = UncClass(input_dict["sv"], defaults.stress_unit, input_dict["sv_unc"])
else:
self.Sv = UncClass(input_dict["sv"], defaults.stress_unit)
else:
self.Sv = UncClass(defaults.sv, defaults.stress_unit)
if "hydro" in input_dict.keys():
self.SHydro = UncClass(input_dict["hydro"], defaults.stress_unit)
else:
if "pf_max" in input_dict.keys():
new_hydro = input_dict["pf_max"] / input_dict["depth"]
self.SHydro = UncClass(new_hydro, defaults.stress_unit)
else:
self.SHydro = UncClass(defaults.hydro, defaults.stress_unit)
if "hydro_under" in input_dict.keys():
self.hydro_l = self.SHydro.mean * input_dict["hydro_under"]
else:
self.hydro_l = self.SHydro.mean * defaults.hydro_under
if "hydro_upper" in input_dict.keys():
self.hydro_u = self.SHydro.mean * input_dict["hydro_upper"]
else:
self.hydro_u = self.SHydro.mean * defaults.hydro_upper
if "mu" in input_dict.keys():
if "mu_unc" in input_dict.keys():
self.Mu = UncClass(input_dict["mu"], defaults.mu_unit, input_dict["mu_unc"])
else:
self.Mu = UncClass(defaults.mu, defaults.mu_unit)
else:
self.Mu = UncClass(defaults.mu, defaults.mu_unit)
if "shmax" in input_dict.keys():
shmax = float(input_dict['shmax'])
if "shmin" in input_dict.keys():
shmin = float(input_dict['shmin'])
if shmax > self.Sv.mean > shmin:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxSS = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinSS = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxSS = UncClass(shmax, defaults.stress_unit)
self.ShMinSS = UncClass(shmin, defaults.stress_unit)
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
elif shmax > shmin > self.Sv.mean:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxR = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinR = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxR = UncClass(shmax, defaults.stress_unit)
self.ShMinR = UncClass(shmin, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
elif self.Sv.mean > shmax > shmin:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxN = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinN = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxN = UncClass(shmax, defaults.stress_unit)
self.ShMinN = UncClass(shmin, defaults.stress_unit)
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
else:
# print("default")
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
if "shmaxaz" in input_dict.keys():
if "az_unc" in input_dict.keys():
self.ShMaxAz = UncClass(input_dict["shmaxaz"], defaults.az_unit, input_dict["az_unc"])
else:
self.ShMaxAz = UncClass(input_dict["shmaxaz"], defaults.az_unit, defaults.az_unc_perc)
else:
self.ShMaxAz = UncClass(defaults.sh_max_az, defaults.az_unit, defaults.az_unc_perc)
if "shminaz" in input_dict.keys():
if "az_unc" in input_dict.keys():
self.ShMinAz = UncClass(input_dict["shminaz"], defaults.az_unit, input_dict["az_unc"])
else:
self.ShMinAz = UncClass(input_dict["shminaz"], defaults.az_unit, defaults.az_unc_perc)
else:
if "shmaxaz" in input_dict.keys():
self.ShMinAz = UncClass(self.ShMaxAz.mean + 90., defaults.az_unit, defaults.az_unc_perc)
else:
self.ShMinAz = UncClass(defaults.sh_min_az, defaults.az_unit, defaults.az_unc_perc)
def plot_uncertainty(self, stress, depth):
fig, axs = plt.subplots(2, 4, sharex='none', sharey='all')
n_samples = 1000
dip = np.random.normal(self.Dip.mean, self.Dip.std_unit(), n_samples)
mu = np.random.normal(self.Mu.mean, self.Mu.std_perc, n_samples)
s_v = np.random.normal(self.Sv.mean, self.Sv.std_unit(), n_samples)
# s_hydro = np.random.normal(self.SHydro.mean, self.SHydro.std_unit(), 500)
# lower_pf = -0.04
# upper_pf = 1.18
hydro1 = self.SHydro.mean - self.hydro_l
hydro2 = self.SHydro.mean + self.hydro_u
s_hydro = (hydro2 - hydro1) * np.random.random(n_samples) + hydro1
if stress == "reverse":
sh_max = np.random.normal(self.ShMaxR.mean, self.ShMaxR.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinR.mean, self.ShMinR.std_unit(), n_samples)
elif stress == "strike-slip":
sh_max = np.random.normal(self.ShMaxSS.mean, self.ShMaxSS.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinSS.mean, self.ShMinSS.std_unit(), n_samples)
elif stress == "normal":
sh_max = np.random.normal(self.ShMaxN.mean, self.ShMaxN.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinN.mean, self.ShMinN.std_unit(), n_samples)
else:
sh_max = np.random.normal(0, 1, n_samples)
sh_min = np.random.normal(0, 1, n_samples)
warnings.warn("Stress field not properly defined.", UserWarning)
shmax_az = np.random.normal(self.ShMaxAz.mean, self.ShMaxAz.std_unit(), n_samples)
shmin_az = np.random.normal(self.ShMinAz.mean, self.ShMinAz.std_unit(), n_samples)
s_v = s_v * depth
s_hydro = s_hydro * depth
sh_max = sh_max * depth
sh_min = sh_min * depth
plot_datas = [dip, mu, s_v, s_hydro, sh_max, sh_min, shmax_az, shmin_az]
titles = ["Dip", "Mu", "Vert. Stress [MPa]", "Hydro. Pres. [MPa]", "SHMax [MPa]", "Shmin [MPa]",
"Shmax Azimuth", "Shmin Azimuth"]
i = 0
for ax1 in axs:
for ax in ax1:
data = plot_datas[i]
ax.hist(data, 50)
ax.axvline(np.median(data), color="black")
ax.set_title(titles[i])
quantiles = np.quantile(data, [0.01, 0.5, 0.99])
if titles[i] == "Mu":
quantiles = np.around(quantiles, decimals=2)
else:
quantiles = np.around(quantiles, decimals=0)
ax.set_xticks(quantiles)
i = i + 1
fig.tight_layout()
class SegmentDet2dResult:
"""
"""
def __init__(self, x1, y1, x2, y2, result, metadata):
""""""
self.p1 = (x1, y1)
self.p2 = (x2, y2)
# self.pf_results = pf_results
if "line_id" in metadata:
self.line_id = metadata["line_id"]
if "seg_id" in metadata:
self.seg_id = metadata["seg_id"]
self.result = result
class MeshFaceResult:
"""
"""
def __init__(self, face_num, triangle, p1, p2, p3, pf_results):
"""
Parameters
----------
face_num
p1
p2
p3
"""
self.face_num = face_num
self.triangle = triangle
self.p1 = p1
self.p2 = p2
self.p3 = p3
# if pf_results.size == 0:
# x = np.array([0., 0., 0.])
# y = np.array([0., 0.5, 1.])
# self.ecdf = np.column_stack((x, y))
# pf_results.sort()
# n = pf_results.size
# y = np.linspace(1.0 / n, 1, n)
# self.ecdf = np.column_stack((pf_results, y))
pf1 = pf_results[:, 0]
mu1 = pf_results[:, 1]
slip_tend = pf_results[:, 2]
inds = slip_tend >= mu1
n1 = pf1.size
pf2 = pf1[inds]
n2 = pf2.size
if n2 == 0:
max_pf = np.max(pf1)
x = np.array([max_pf, max_pf, max_pf])
# x = np.empty(5000)
# x.fill(np.nan)
y = np.array([0., 0.5, 1.])
# y = np.linspace(0., 1., 5000)
self.ecdf = np.column_stack((x, y))
self.no_fail = True
elif n2 < 100 & n2 > 0:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
n2_2 = 100
z = np.linspace(1 / n2_2, 1, n2_2)
pf2_interp = interp1d(y, pf2, kind='linear')
pf2_2 = pf2_interp(z)
self.ecdf = np.column_stack((pf2_2, z))
else:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
self.ecdf = np.column_stack((pf2, y))
def ecdf_cutoff(self, cutoff):
"""
Parameters
----------
cutoff: float
Returns
-------
"""
# self.ecdf[:, 0] = self.ecdf[:, 0] - hydrostatic_pres
cutoff = cutoff / 100
ind_fail = (np.abs(self.ecdf[:, 1] - cutoff)).argmin()
fail_pressure = self.ecdf[ind_fail, 0]
return fail_pressure
class SegmentMC2dResult:
"""
"""
def __init__(self, x1, y1, x2, y2, pf_results, metadata):
""""""
self.p1 = (x1, y1)
self.p2 = (x2, y2)
# self.pf_results = pf_results
if "line_id" in metadata:
self.line_id = metadata["line_id"]
if "seg_id" in metadata:
self.seg_id = metadata["seg_id"]
pf1 = pf_results[:, 0]
mu1 = pf_results[:, 1]
slip_tend = pf_results[:, 2]
inds = slip_tend >= mu1
n1 = pf1.size
pf2 = pf1[inds]
n2 = pf2.size
if n2 == 0:
max_pf = np.max(pf1)
x = np.array([max_pf, max_pf, max_pf])
# x = np.empty(5000)
# x.fill(np.nan)
y = np.array([0., 0.5, 1.])
# y = np.linspace(0., 1., 5000)
self.ecdf = np.column_stack((x, y))
self.no_fail = True
elif n2 < 100 & n2 > 0:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
n2_2 = 100
z = np.linspace(1 / n2_2, 1, n2_2)
pf2_interp = interp1d(y, pf2, kind='linear')
pf2_2 = pf2_interp(z)
self.ecdf = np.column_stack((pf2_2, z))
else:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
self.ecdf = np.column_stack((pf2, y))
def ecdf_cutoff(self, cutoff):
"""
Parameters
----------
cutoff: float
Returns
-------
"""
# self.ecdf[:, 0] = self.ecdf[:, 0] - hydrostatic_pres
if self.no_fail:
# print(self.ecdf[:, 0])
fail_pressure = max(self.ecdf[:, 0])
else:
ind_fail = (np.abs(self.ecdf[:, 1] - cutoff)).argmin()
fail_pressure = self.ecdf[ind_fail, 0]
return fail_pressure
def pressure_cutoff(self, cutoff):
"""
Parameters
----------
cutoff: float
Returns
-------
"""
if self.no_fail:
fail_prob = 0.
else:
ind_fail = (np.abs(self.ecdf[:, 0] - cutoff)).argmin()
fail_prob = self.ecdf[ind_fail, 1]
return fail_prob
def append_results(self, results):
pf1 = results[:, 0]
mu1 = results[:, 1]
slip_tend = results[:, 2]
inds = slip_tend >= mu1
n1 = self.ecdf[:, 0].size
pf2 = pf1[inds]
n2 = pf2.size
if n2 == 0 & self.no_fail:
return
else:
self.no_fail = False
pf2 = np.concatenate((pf2, self.ecdf[:, 0]))
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
self.ecdf = np.column_stack((pf2, y))
def plot_ecdf(self, pressure):
fig, ax = plt.subplots()
out_ecdf = self.ecdf
ax.plot(out_ecdf[:, 0], out_ecdf[:, 1], drawstyle='steps')
def plot_hist(self, n_bins=25):
fig, ax = plt.subplots()
hist_data = self.ecdf[:,0]
ax.hist(hist_data, bins=n_bins)
class Results2D:
"""
Class to manage 2D results.
"""
def __init__(self, input_list, **kwargs):
"""
Parameters
----------
input_list
"""
self.results_gdf = None
self.segment_list = input_list
num_lines = len(np.unique([obj.line_id for obj in input_list]))
self.lines = []
for line in range(num_lines):
line_list = [obj for obj in input_list if obj.line_id == line]
line_list.sort(key=lambda obj: obj.seg_id, reverse=False)
self.lines.append(line_list)
if "cutoff" in kwargs:
self.cutoff = float(kwargs["cutoff"])
if "ecdf" in input_list[0].__dict__:
self.type = "mc"
elif "result" in input_list[0].__dict__:
self.type = "det"
x = []
y = []
result = []
for obj in input_list:
x.append(obj.p1[0])
x.append(obj.p2[0])
y.append(obj.p1[1])
y.append(obj.p2[1])
if self.type == "mc":
result.append(obj.ecdf_cutoff(self.cutoff))
if self.type == "det":
result.append(obj.result)
self.xmin = min(x)
self.xmax = max(x)
self.ymin = min(y)
self.ymax = max(y)
self.plotmin = min(result)
self.plotmax = max(result)
def update_cutoff(self, cutoff):
"""
Returns
-------
"""
if self.type == "det":
return
result = []
for line in self.lines:
for obj in line:
result.append(obj.ecdf_cutoff(cutoff))
self.plotmin = min(result)
self.plotmax = max(result)
self.cutoff = cutoff
return
def plot_ecdf(self, pressure):
fig, ax = plt.subplots()
for line in self.lines:
if len(line) != 1:
ecdf_stack = []
for segment in line:
ecdf_stack.append(segment.ecdf)
else:
out_ecdf = line[0].ecdf
ax.plot(out_ecdf[:, 0], out_ecdf[:, 1], 'k-')
def generate_gpd_df(self, crs, pres_cutoff=2.0, prob_cutoff=5):
segs_geo = []
line_id_list = []
seg_id_list = []
prob_list = []
pres_list = []
ind_list = []
ind = 1
for seg in self.segment_list:
geom = shpg.LineString([seg.p1, seg.p2])
segs_geo.append(geom)
line_id_list.append(seg.line_id)
seg_id_list.append(seg.seg_id)
pres_list.append(seg.ecdf_cutoff(prob_cutoff))
prob_list.append(seg.pressure_cutoff(pres_cutoff))
ind_list.append(ind)
ind = ind + 1
gdf_dict = {'index': ind_list, 'geometry': segs_geo, 'line_id': line_id_list, 'seg_id': seg_id_list, 'cutoff_prob': prob_list, 'cutoff_pres': pres_list}
gdf = gpd.GeoDataFrame(gdf_dict, crs=crs)
self.results_gdf = gdf
return
def rebuild_seg_list(self):
self.segment_list = [seg for line in self.lines for seg in line]
| [
"math.sqrt",
"numpy.column_stack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.random.random",
"numpy.max",
"numpy.linspace",
"numpy.concatenate",
"warnings.warn",
"geopandas.GeoDataFrame",
"numpy.random.normal",
"numpy.abs",
"shapely.geometry.LineString",
"numpy.around",
"os.pat... | [((979, 1017), 'os.path.join', 'os.path.join', (['file_root', 'defaults_file'], {}), '(file_root, defaults_file)\n', (991, 1017), False, 'import os\n'), ((9116, 9163), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(4)'], {'sharex': '"""none"""', 'sharey': '"""all"""'}), "(2, 4, sharex='none', sharey='all')\n", (9128, 9163), True, 'import matplotlib.pyplot as plt\n'), ((9280, 9339), 'numpy.random.normal', 'np.random.normal', (['self.Mu.mean', 'self.Mu.std_perc', 'n_samples'], {}), '(self.Mu.mean, self.Mu.std_perc, n_samples)\n', (9296, 9339), True, 'import numpy as np\n'), ((17176, 17190), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17188, 17190), True, 'import matplotlib.pyplot as plt\n'), ((17343, 17357), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (17355, 17357), True, 'import matplotlib.pyplot as plt\n'), ((19263, 19277), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19275, 19277), True, 'import matplotlib.pyplot as plt\n'), ((20377, 20412), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['gdf_dict'], {'crs': 'crs'}), '(gdf_dict, crs=crs)\n', (20393, 20412), True, 'import geopandas as gpd\n'), ((868, 893), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (883, 893), False, 'import os\n'), ((1079, 1099), 'json.load', 'json.load', (['load_file'], {}), '(load_file)\n', (1088, 1099), False, 'import json\n'), ((1982, 2009), 'math.sqrt', 'math.sqrt', (['(self.mu ** 2 + 1)'], {}), '(self.mu ** 2 + 1)\n', (1991, 2009), False, 'import math\n'), ((13024, 13035), 'numpy.max', 'np.max', (['pf1'], {}), '(pf1)\n', (13030, 13035), True, 'import numpy as np\n'), ((13052, 13086), 'numpy.array', 'np.array', (['[max_pf, max_pf, max_pf]'], {}), '([max_pf, max_pf, max_pf])\n', (13060, 13086), True, 'import numpy as np\n'), ((13165, 13190), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (13173, 13190), True, 'import numpy as np\n'), ((13257, 13280), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (13272, 13280), True, 'import numpy as np\n'), ((14876, 14887), 'numpy.max', 'np.max', (['pf1'], {}), '(pf1)\n', (14882, 14887), True, 'import numpy as np\n'), ((14904, 14938), 'numpy.array', 'np.array', (['[max_pf, max_pf, max_pf]'], {}), '([max_pf, max_pf, max_pf])\n', (14912, 14938), True, 'import numpy as np\n'), ((15017, 15042), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (15025, 15042), True, 'import numpy as np\n'), ((15109, 15132), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (15124, 15132), True, 'import numpy as np\n'), ((16941, 16979), 'numpy.concatenate', 'np.concatenate', (['(pf2, self.ecdf[:, 0])'], {}), '((pf2, self.ecdf[:, 0]))\n', (16955, 16979), True, 'import numpy as np\n'), ((17045, 17071), 'numpy.linspace', 'np.linspace', (['(1 / n2)', '(1)', 'n2'], {}), '(1 / n2, 1, n2)\n', (17056, 17071), True, 'import numpy as np\n'), ((17096, 17121), 'numpy.column_stack', 'np.column_stack', (['(pf2, y)'], {}), '((pf2, y))\n', (17111, 17121), True, 'import numpy as np\n'), ((17724, 17770), 'numpy.unique', 'np.unique', (['[obj.line_id for obj in input_list]'], {}), '([obj.line_id for obj in input_list])\n', (17733, 17770), True, 'import numpy as np\n'), ((19865, 19898), 'shapely.geometry.LineString', 'shpg.LineString', (['[seg.p1, seg.p2]'], {}), '([seg.p1, seg.p2])\n', (19880, 19898), True, 'import shapely.geometry as shpg\n'), ((3193, 3306), 'warnings.warn', 'warnings.warn', (['"""Vertical stress gradient for density > 5000 kg/m^3. Are you sure you input a gradient?"""'], {}), "(\n 'Vertical stress gradient for density > 5000 kg/m^3. Are you sure you input a gradient?'\n )\n", (3206, 3306), False, 'import warnings\n'), ((9689, 9716), 'numpy.random.random', 'np.random.random', (['n_samples'], {}), '(n_samples)\n', (9705, 9716), True, 'import numpy as np\n'), ((11389, 11425), 'numpy.quantile', 'np.quantile', (['data', '[0.01, 0.5, 0.99]'], {}), '(data, [0.01, 0.5, 0.99])\n', (11400, 11425), True, 'import numpy as np\n'), ((13443, 13469), 'numpy.linspace', 'np.linspace', (['(1 / n2)', '(1)', 'n2'], {}), '(1 / n2, 1, n2)\n', (13454, 13469), True, 'import numpy as np\n'), ((13509, 13539), 'numpy.linspace', 'np.linspace', (['(1 / n2_2)', '(1)', 'n2_2'], {}), '(1 / n2_2, 1, n2_2)\n', (13520, 13539), True, 'import numpy as np\n'), ((13565, 13596), 'scipy.interpolate.interp1d', 'interp1d', (['y', 'pf2'], {'kind': '"""linear"""'}), "(y, pf2, kind='linear')\n", (13573, 13596), False, 'from scipy.interpolate import interp1d\n'), ((13655, 13682), 'numpy.column_stack', 'np.column_stack', (['(pf2_2, z)'], {}), '((pf2_2, z))\n', (13670, 13682), True, 'import numpy as np\n'), ((13795, 13821), 'numpy.linspace', 'np.linspace', (['(1 / n2)', '(1)', 'n2'], {}), '(1 / n2, 1, n2)\n', (13806, 13821), True, 'import numpy as np\n'), ((13846, 13871), 'numpy.column_stack', 'np.column_stack', (['(pf2, y)'], {}), '((pf2, y))\n', (13861, 13871), True, 'import numpy as np\n'), ((14140, 14172), 'numpy.abs', 'np.abs', (['(self.ecdf[:, 1] - cutoff)'], {}), '(self.ecdf[:, 1] - cutoff)\n', (14146, 14172), True, 'import numpy as np\n'), ((15295, 15321), 'numpy.linspace', 'np.linspace', (['(1 / n2)', '(1)', 'n2'], {}), '(1 / n2, 1, n2)\n', (15306, 15321), True, 'import numpy as np\n'), ((15361, 15391), 'numpy.linspace', 'np.linspace', (['(1 / n2_2)', '(1)', 'n2_2'], {}), '(1 / n2_2, 1, n2_2)\n', (15372, 15391), True, 'import numpy as np\n'), ((15417, 15448), 'scipy.interpolate.interp1d', 'interp1d', (['y', 'pf2'], {'kind': '"""linear"""'}), "(y, pf2, kind='linear')\n", (15425, 15448), False, 'from scipy.interpolate import interp1d\n'), ((15507, 15534), 'numpy.column_stack', 'np.column_stack', (['(pf2_2, z)'], {}), '((pf2_2, z))\n', (15522, 15534), True, 'import numpy as np\n'), ((15647, 15673), 'numpy.linspace', 'np.linspace', (['(1 / n2)', '(1)', 'n2'], {}), '(1 / n2, 1, n2)\n', (15658, 15673), True, 'import numpy as np\n'), ((15698, 15723), 'numpy.column_stack', 'np.column_stack', (['(pf2, y)'], {}), '((pf2, y))\n', (15713, 15723), True, 'import numpy as np\n'), ((10414, 10447), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n_samples'], {}), '(0, 1, n_samples)\n', (10430, 10447), True, 'import numpy as np\n'), ((10469, 10502), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'n_samples'], {}), '(0, 1, n_samples)\n', (10485, 10502), True, 'import numpy as np\n'), ((10515, 10579), 'warnings.warn', 'warnings.warn', (['"""Stress field not properly defined."""', 'UserWarning'], {}), "('Stress field not properly defined.', UserWarning)\n", (10528, 10579), False, 'import warnings\n'), ((11289, 11304), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (11298, 11304), True, 'import numpy as np\n'), ((11496, 11528), 'numpy.around', 'np.around', (['quantiles'], {'decimals': '(2)'}), '(quantiles, decimals=2)\n', (11505, 11528), True, 'import numpy as np\n'), ((11583, 11615), 'numpy.around', 'np.around', (['quantiles'], {'decimals': '(0)'}), '(quantiles, decimals=0)\n', (11592, 11615), True, 'import numpy as np\n'), ((16091, 16123), 'numpy.abs', 'np.abs', (['(self.ecdf[:, 1] - cutoff)'], {}), '(self.ecdf[:, 1] - cutoff)\n', (16097, 16123), True, 'import numpy as np\n'), ((16464, 16496), 'numpy.abs', 'np.abs', (['(self.ecdf[:, 0] - cutoff)'], {}), '(self.ecdf[:, 0] - cutoff)\n', (16470, 16496), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os, sys, shutil, subprocess
import numpy as np
# Use a shell script to read the values
read_virial_script = "read_virial.sh"
shell_command = ["bash", read_virial_script]
proc = subprocess.Popen(shell_command)
proc.wait()
# Load values
indus_virial = np.loadtxt("indus_virial.out", comments='#', dtype=np.float)
plumed_virial = 0.5 * np.loadtxt("plumed_2x_virial.out", comments='#', dtype=np.float)
# Compute percent error
err = (plumed_virial/indus_virial - 1.0) * 100.0
abs_err = np.abs(err)
avg_err = err.mean()
std_err = err.std()
avg_abs_err = abs_err.mean()
std_abs_err = abs_err.std()
print("Error (%)")
print(" Relative: {} +/- {}".format(avg_err, std_err))
print(" Absolute: {} +/- {}".format(avg_abs_err, std_abs_err))
| [
"subprocess.Popen",
"numpy.loadtxt",
"numpy.abs"
] | [((209, 240), 'subprocess.Popen', 'subprocess.Popen', (['shell_command'], {}), '(shell_command)\n', (225, 240), False, 'import os, sys, shutil, subprocess\n'), ((284, 344), 'numpy.loadtxt', 'np.loadtxt', (['"""indus_virial.out"""'], {'comments': '"""#"""', 'dtype': 'np.float'}), "('indus_virial.out', comments='#', dtype=np.float)\n", (294, 344), True, 'import numpy as np\n'), ((520, 531), 'numpy.abs', 'np.abs', (['err'], {}), '(err)\n', (526, 531), True, 'import numpy as np\n'), ((367, 431), 'numpy.loadtxt', 'np.loadtxt', (['"""plumed_2x_virial.out"""'], {'comments': '"""#"""', 'dtype': 'np.float'}), "('plumed_2x_virial.out', comments='#', dtype=np.float)\n", (377, 431), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# coding=utf-8
# ================================================================
#
# Editor : PyCharm
# File name : RFData.py
# Author : LiuBo
# Created date: 2019-05-09 09:21
# Description :
#
# ================================================================
import numpy as np
import struct
class Data(object):
def __init__(self):
self.feature = list()
self.label = list()
class RFData(object):
def __init__(self):
self.train = Data()
self.test = Data()
def get_data_from_sample(self, train_sample_file, test_sample_file,
feature_file_list, class_num, feature_dimension):
train_list = list()
with open(train_sample_file, "r") as f:
line = f.readline()
line = line.strip('\n')
temp_string = line.split(' ')
for i in range(class_num):
temp_list = list()
for j in range(int(temp_string[i])):
line = f.readline()
line = line.strip('\n')
line = line.split(' ')
temp_list.append(int(line[0]))
train_list.append(temp_list)
test_list = list()
with open(test_sample_file, "r") as f:
line = f.readline()
line = line.strip('\n')
temp_string = line.split(' ')
for i in range(class_num):
temp_list = list()
for j in range(int(temp_string[i])):
line = f.readline()
line = line.strip('\n')
line = line.split(' ')
temp_list.append(int(line[0]))
test_list.append(temp_list)
read_format = str(feature_dimension) + "f"
for i in range(class_num):
for j in range(len(train_list[i])):
index = train_list[i][j]
feature_data = None
for k in range(len(feature_file_list)):
with open(feature_file_list[k], "rb") as f:
f.seek(index * feature_dimension * 4)
buf = f.read(feature_dimension * 4)
data = struct.unpack(read_format, buf)
if k == 0:
feature_data = np.array(data)
else:
feature_data = np.append(feature_data, data)
self.train.feature.append(feature_data)
self.train.label.append(i)
for j in range(len(test_list[i])):
index = test_list[i][j]
feature_data = None
for k in range(len(feature_file_list)):
with open(feature_file_list[k], "rb") as f:
f.seek(index * feature_dimension * 4)
buf = f.read(feature_dimension * 4)
data = struct.unpack(read_format, buf)
if k == 0:
feature_data = np.array(data)
else:
feature_data = np.append(feature_data, data)
self.test.feature.append(feature_data)
self.test.label.append(i)
permutation = np.random.permutation(len(self.train.feature))
shuffle_data = np.array(self.train.feature)[permutation]
shuffle_label = np.array(self.train.label)[permutation]
self.train.feature = shuffle_data
self.train.label = shuffle_label
def get_data_from_sample_txt(self, train_sample_file, test_sample_file,
feature_file_list, class_num, feature_dimension_list):
train_list = list()
with open(train_sample_file, "r") as f:
line = f.readline()
line = line.strip('\n')
temp_string = line.split(' ')
for i in range(class_num):
temp_list = list()
for j in range(int(temp_string[i])):
line = f.readline()
line = line.strip('\n')
line = line.split(' ')
temp_list.append(int(line[0]))
train_list.append(temp_list)
test_list = list()
with open(test_sample_file, "r") as f:
line = f.readline()
line = line.strip('\n')
temp_string = line.split(' ')
for i in range(class_num):
temp_list = list()
for j in range(int(temp_string[i])):
line = f.readline()
line = line.strip('\n')
line = line.split(' ')
temp_list.append(int(line[0]))
test_list.append(temp_list)
lines_list = list()
for k in range(len(feature_file_list)):
with open(feature_file_list[k]) as f:
lines = f.readlines()
lines_list.append(lines)
for i in range(class_num):
for j in range(len(train_list[i])):
index = train_list[i][j]
feature_data = None
for k in range(len(feature_file_list)):
temp_string = lines_list[k][index]
temp_string = temp_string.strip('\n')
temp_string = temp_string.split(' ')
data = list()
for n in range(feature_dimension_list[k]):
data.append(float(temp_string[n]))
if k == 0:
feature_data = np.array(data)
else:
feature_data = np.append(feature_data, data)
self.train.feature.append(feature_data)
self.train.label.append(i)
for j in range(len(test_list[i])):
index = test_list[i][j]
feature_data = None
for k in range(len(feature_file_list)):
temp_string = lines_list[k][index]
temp_string = temp_string.strip('\n')
temp_string = temp_string.split(' ')
data = list()
for n in range(feature_dimension_list[k]):
data.append(float(temp_string[n]))
if k == 0:
feature_data = np.array(data)
else:
feature_data = np.append(feature_data, data)
self.test.feature.append(feature_data)
self.test.label.append(i)
permutation = np.random.permutation(len(self.train.feature))
shuffle_data = np.array(self.train.feature)[permutation]
shuffle_label = np.array(self.train.label)[permutation]
self.train.feature = shuffle_data
self.train.label = shuffle_label
def clear_data(self):
self.train.feature = list()
self.train.label = list()
self.test.feature = list()
self.test.label = list()
| [
"numpy.append",
"numpy.array",
"struct.unpack"
] | [((3484, 3512), 'numpy.array', 'np.array', (['self.train.feature'], {}), '(self.train.feature)\n', (3492, 3512), True, 'import numpy as np\n'), ((3551, 3577), 'numpy.array', 'np.array', (['self.train.label'], {}), '(self.train.label)\n', (3559, 3577), True, 'import numpy as np\n'), ((6881, 6909), 'numpy.array', 'np.array', (['self.train.feature'], {}), '(self.train.feature)\n', (6889, 6909), True, 'import numpy as np\n'), ((6948, 6974), 'numpy.array', 'np.array', (['self.train.label'], {}), '(self.train.label)\n', (6956, 6974), True, 'import numpy as np\n'), ((2320, 2351), 'struct.unpack', 'struct.unpack', (['read_format', 'buf'], {}), '(read_format, buf)\n', (2333, 2351), False, 'import struct\n'), ((3059, 3090), 'struct.unpack', 'struct.unpack', (['read_format', 'buf'], {}), '(read_format, buf)\n', (3072, 3090), False, 'import struct\n'), ((5774, 5788), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5782, 5788), True, 'import numpy as np\n'), ((5856, 5885), 'numpy.append', 'np.append', (['feature_data', 'data'], {}), '(feature_data, data)\n', (5865, 5885), True, 'import numpy as np\n'), ((6576, 6590), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (6584, 6590), True, 'import numpy as np\n'), ((6658, 6687), 'numpy.append', 'np.append', (['feature_data', 'data'], {}), '(feature_data, data)\n', (6667, 6687), True, 'import numpy as np\n'), ((2432, 2446), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2440, 2446), True, 'import numpy as np\n'), ((2522, 2551), 'numpy.append', 'np.append', (['feature_data', 'data'], {}), '(feature_data, data)\n', (2531, 2551), True, 'import numpy as np\n'), ((3171, 3185), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3179, 3185), True, 'import numpy as np\n'), ((3261, 3290), 'numpy.append', 'np.append', (['feature_data', 'data'], {}), '(feature_data, data)\n', (3270, 3290), True, 'import numpy as np\n')] |
import sklearn.linear_model
import numpy as np
#using Logistic regression
###########binary classification
data = np.array([[2100, 800],
[2500, 850],
[1800, 760],
[2000, 800],
[2300, 810]])
price = np.array([1, 0, 1, 1, 1])
reg = sklearn.linear_model.LogisticRegression()
reg.fit(data, price)
print(reg.predict([[3000, 900], [1500, 700]]))
#[0 1] | [
"numpy.array"
] | [((116, 191), 'numpy.array', 'np.array', (['[[2100, 800], [2500, 850], [1800, 760], [2000, 800], [2300, 810]]'], {}), '([[2100, 800], [2500, 850], [1800, 760], [2000, 800], [2300, 810]])\n', (124, 191), True, 'import numpy as np\n'), ((234, 259), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 1]'], {}), '([1, 0, 1, 1, 1])\n', (242, 259), True, 'import numpy as np\n')] |
from datasets import TFrecords2Dataset
import tensorflow as tf
import numpy as np
import cv2
import os
import time
from nets import txtbox_384
from processing import ssd_vgg_preprocessing
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.INFO)
show_pic_sum = 10
save_dir = 'pic_test_dataset'
tf.app.flags.DEFINE_string(
'dataset_dir', 'tfrecord_train', 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'num_readers', 2,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'batch_size', 2, 'The number of samples in each batch.')
FLAGS = tf.app.flags.FLAGS
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def draw_polygon(img,x1,y1,x2,y2,x3,y3,x4,y4, color=(255, 0, 0)):
# print(x1, x2, x3, x4, y1, y2, y3, y4)
x1 = int(x1)
x2 = int(x2)
x3 = int(x3)
x4 = int(x4)
y1 = int(y1)
y2 = int(y2)
y3 = int(y3)
y4 = int(y4)
cv2.line(img,(x1,y1),(x2,y2),color,2)
cv2.line(img,(x2,y2),(x3,y3),color,2)
cv2.line(img,(x3,y3),(x4,y4),color,2)
cv2.line(img,(x4,y4),(x1,y1),color,2)
# cv2_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# cv2_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# cv2.imwrite('test.png', img)
return img
def run():
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
print('-----start test-------')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with tf.device('/GPU:0'):
dataset = TFrecords2Dataset.get_datasets(FLAGS.dataset_dir)
print(dataset)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size,
shuffle=True)
print('provider:',provider)
[image, shape, glabels, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4] = provider.get(['image', 'shape',
'object/label',
'object/bbox',
'object/oriented_bbox/x1',
'object/oriented_bbox/x2',
'object/oriented_bbox/x3',
'object/oriented_bbox/x4',
'object/oriented_bbox/y1',
'object/oriented_bbox/y2',
'object/oriented_bbox/y3',
'object/oriented_bbox/y4'
])
print('image:',image)
print('shape:',shape)
print('glabel:',glabels)
print('gboxes:',gbboxes)
gxs = tf.transpose(tf.stack([x1,x2,x3,x4])) #shape = (N,4)
gys = tf.transpose(tf.stack([y1, y2, y3, y4]))
image = tf.identity(image, 'input_image')
text_shape = (384, 384)
image, glabels, gbboxes, gxs, gys= ssd_vgg_preprocessing.preprocess_image(image, glabels,gbboxes,gxs, gys,
text_shape,is_training=True,
data_format='NHWC')
x1, x2 , x3, x4 = tf.unstack(gxs, axis=1)
y1, y2, y3, y4 = tf.unstack(gys, axis=1)
text_net = txtbox_384.TextboxNet()
text_anchors = text_net.anchors(text_shape)
e_localisations, e_scores, e_labels = text_net.bboxes_encode( glabels, gbboxes, text_anchors, gxs, gys)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options, allow_soft_placement=True)
with tf.Session(config=config) as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
j = 0
all_time = 0
try:
while not coord.should_stop() and j < show_pic_sum:
start_time = time.time()
image_sess, label_sess, gbbox_sess, x1_sess, x2_sess, x3_sess, x4_sess, y1_sess, y2_sess, y3_sess, y4_sess,p_localisations, p_scores, p_labels = sess.run([
image, glabels, gbboxes, x1, x2, x3, x4, y1, y2, y3, y4,e_localisations , e_scores, e_labels])
end_time = time.time() - start_time
all_time += end_time
image_np = image_sess
# print(image_np)
# print('label_sess:',label_sess)
p_labels_concat = np.concatenate(p_labels)
p_scores_concat = np.concatenate(p_scores)
debug = False
if debug is True:
print(p_labels)
print('l_labels:', len(p_labels_concat[p_labels_concat.nonzero()]),p_labels_concat[p_labels_concat.nonzero()] )
print('p_socres:', len(p_scores_concat[p_scores_concat.nonzero()]), p_scores_concat[p_scores_concat.nonzero()])
# print(img_np.shape)
print('label_sess:', np.array(list(label_sess)).shape, list(label_sess))
img_np = np.array(image_np)
cv2.imwrite('{}/{}.png'.format(save_dir, j), img_np)
img_np = cv2.imread('{}/{}.png'.format(save_dir, j))
h, w, d = img_np.shape
label_sess = list(label_sess)
# for i , label in enumerate(label_sess):
i = 0
num_correct = 0
for label in label_sess:
# print(int(label) == 1)
if int(label) == 1:
num_correct += 1
img_np = draw_polygon(img_np,x1_sess[i] * w, y1_sess[i]*h, x2_sess[i]*w, y2_sess[i]*h, x3_sess[i]*w, y3_sess[i]*h, x4_sess[i]*w, y4_sess[i]*h)
if int(label) == 0:
img_np = draw_polygon(img_np,x1_sess[i] * w, y1_sess[i]*h, x2_sess[i]*w, y2_sess[i]*h, x3_sess[i]*w, y3_sess[i]*h, x4_sess[i]*w, y4_sess[i]*h, color=(0, 0, 255))
i += 1
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
cv2.imwrite('{}'.format(os.path.join(save_dir, str(j)+'.png')), img_np)
j+= 1
print('correct:', num_correct)
except tf.errors.OutOfRangeError:
print('done')
finally:
print('done')
coord.request_stop()
print('all time:', all_time, 'average:', all_time / show_pic_sum)
coord.join(threads=threads)
if __name__ == '__main__':
run()
| [
"tensorflow.unstack",
"tensorflow.logging.set_verbosity",
"numpy.array",
"tensorflow.GPUOptions",
"os.path.exists",
"processing.ssd_vgg_preprocessing.preprocess_image",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"cv2.line",
"numpy.concatenate",
"tensorflow.ConfigProto",
"tensorflow.... | [((266, 307), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (290, 307), True, 'import tensorflow as tf\n'), ((362, 478), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', '"""tfrecord_train"""', '"""The directory where the dataset files are stored."""'], {}), "('dataset_dir', 'tfrecord_train',\n 'The directory where the dataset files are stored.')\n", (388, 478), True, 'import tensorflow as tf\n'), ((484, 600), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_readers"""', '(2)', '"""The number of parallel readers that read data from the dataset."""'], {}), "('num_readers', 2,\n 'The number of parallel readers that read data from the dataset.')\n", (511, 600), True, 'import tensorflow as tf\n'), ((612, 700), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(2)', '"""The number of samples in each batch."""'], {}), "('batch_size', 2,\n 'The number of samples in each batch.')\n", (639, 700), True, 'import tensorflow as tf\n'), ((741, 765), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (755, 765), False, 'import os\n'), ((772, 793), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (783, 793), False, 'import os\n'), ((1059, 1102), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', 'color', '(2)'], {}), '(img, (x1, y1), (x2, y2), color, 2)\n', (1067, 1102), False, 'import cv2\n'), ((1102, 1145), 'cv2.line', 'cv2.line', (['img', '(x2, y2)', '(x3, y3)', 'color', '(2)'], {}), '(img, (x2, y2), (x3, y3), color, 2)\n', (1110, 1145), False, 'import cv2\n'), ((1145, 1188), 'cv2.line', 'cv2.line', (['img', '(x3, y3)', '(x4, y4)', 'color', '(2)'], {}), '(img, (x3, y3), (x4, y4), color, 2)\n', (1153, 1188), False, 'import cv2\n'), ((1188, 1231), 'cv2.line', 'cv2.line', (['img', '(x4, y4)', '(x1, y1)', 'color', '(2)'], {}), '(img, (x4, y4), (x1, y1), color, 2)\n', (1196, 1231), False, 'import cv2\n'), ((4223, 4273), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.7)'}), '(per_process_gpu_memory_fraction=0.7)\n', (4236, 4273), True, 'import tensorflow as tf\n'), ((4290, 4388), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)', 'gpu_options': 'gpu_options', 'allow_soft_placement': '(True)'}), '(log_device_placement=False, gpu_options=gpu_options,\n allow_soft_placement=True)\n', (4304, 4388), True, 'import tensorflow as tf\n'), ((1568, 1592), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1582, 1592), False, 'import os\n'), ((1603, 1624), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1614, 1624), False, 'import os\n'), ((1635, 1654), 'tensorflow.device', 'tf.device', (['"""/GPU:0"""'], {}), "('/GPU:0')\n", (1644, 1654), True, 'import tensorflow as tf\n'), ((1675, 1724), 'datasets.TFrecords2Dataset.get_datasets', 'TFrecords2Dataset.get_datasets', (['FLAGS.dataset_dir'], {}), '(FLAGS.dataset_dir)\n', (1705, 1724), False, 'from datasets import TFrecords2Dataset\n'), ((3502, 3535), 'tensorflow.identity', 'tf.identity', (['image', '"""input_image"""'], {}), "(image, 'input_image')\n", (3513, 3535), True, 'import tensorflow as tf\n'), ((3613, 3740), 'processing.ssd_vgg_preprocessing.preprocess_image', 'ssd_vgg_preprocessing.preprocess_image', (['image', 'glabels', 'gbboxes', 'gxs', 'gys', 'text_shape'], {'is_training': '(True)', 'data_format': '"""NHWC"""'}), "(image, glabels, gbboxes, gxs, gys,\n text_shape, is_training=True, data_format='NHWC')\n", (3651, 3740), False, 'from processing import ssd_vgg_preprocessing\n'), ((3904, 3927), 'tensorflow.unstack', 'tf.unstack', (['gxs'], {'axis': '(1)'}), '(gxs, axis=1)\n', (3914, 3927), True, 'import tensorflow as tf\n'), ((3954, 3977), 'tensorflow.unstack', 'tf.unstack', (['gys'], {'axis': '(1)'}), '(gys, axis=1)\n', (3964, 3977), True, 'import tensorflow as tf\n'), ((4008, 4031), 'nets.txtbox_384.TextboxNet', 'txtbox_384.TextboxNet', ([], {}), '()\n', (4029, 4031), False, 'from nets import txtbox_384\n'), ((4395, 4420), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (4405, 4420), True, 'import tensorflow as tf\n'), ((4447, 4469), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (4467, 4469), True, 'import tensorflow as tf\n'), ((4489, 4530), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess', 'coord'], {}), '(sess, coord)\n', (4517, 4530), True, 'import tensorflow as tf\n'), ((3379, 3405), 'tensorflow.stack', 'tf.stack', (['[x1, x2, x3, x4]'], {}), '([x1, x2, x3, x4])\n', (3387, 3405), True, 'import tensorflow as tf\n'), ((3447, 3473), 'tensorflow.stack', 'tf.stack', (['[y1, y2, y3, y4]'], {}), '([y1, y2, y3, y4])\n', (3455, 3473), True, 'import tensorflow as tf\n'), ((4677, 4688), 'time.time', 'time.time', ([], {}), '()\n', (4686, 4688), False, 'import time\n'), ((5248, 5272), 'numpy.concatenate', 'np.concatenate', (['p_labels'], {}), '(p_labels)\n', (5262, 5272), True, 'import numpy as np\n'), ((5308, 5332), 'numpy.concatenate', 'np.concatenate', (['p_scores'], {}), '(p_scores)\n', (5322, 5332), True, 'import numpy as np\n'), ((5868, 5886), 'numpy.array', 'np.array', (['image_np'], {}), '(image_np)\n', (5876, 5886), True, 'import numpy as np\n'), ((6888, 6927), 'cv2.cvtColor', 'cv2.cvtColor', (['img_np', 'cv2.COLOR_BGR2RGB'], {}), '(img_np, cv2.COLOR_BGR2RGB)\n', (6900, 6927), False, 'import cv2\n'), ((5007, 5018), 'time.time', 'time.time', ([], {}), '()\n', (5016, 5018), False, 'import time\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Relation Classifier model and task functions."""
import copy
import json
import os
from typing import Dict, Text
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from language.mentionmemory.encoders import import_encoders # pylint: disable=unused-import
from language.mentionmemory.tasks import relation_classifier_task
from language.mentionmemory.utils import metric_utils
from language.mentionmemory.utils import test_utils
import ml_collections
import numpy as np
import sklearn.metrics
import tensorflow.compat.v2 as tf
class TacredEvaluationTest(test_utils.TestCase):
"""Test to reproduce results on TACRED dataset."""
def _get_test_data_path(self, file_name):
path = os.path.join('language/mentionmemory/tasks/testdata/tacred',
file_name)
return path
def test_tacred_evaluation(self):
targets_path = self._get_test_data_path('test.gold')
with tf.io.gfile.GFile(targets_path, 'r') as targets_file:
targets = [line.strip() for line in targets_file]
predictions_path = self._get_test_data_path('spanbert_tacred_test.txt')
with tf.io.gfile.GFile(predictions_path, 'r') as predictions_file:
predictions = [line.strip() for line in predictions_file]
labels_dict = {}
for label in targets + predictions:
if label not in labels_dict:
new_index = len(labels_dict)
labels_dict[label] = new_index
labels_list = list(labels_dict.keys())
labels_list.remove('no_relation')
expected_precision, expected_recall, expected_f1, _ = sklearn.metrics.precision_recall_fscore_support(
targets, predictions, labels=labels_list, average='micro')
# See https://arxiv.org/abs/2004.14855, Table 5.
self.assertAlmostEqual(expected_f1, 0.708, places=3)
targets_array = jnp.asarray([labels_dict[x] for x in targets])
predictions_array = jnp.asarray([labels_dict[x] for x in predictions])
actual_tp, actual_fp, actual_fn = metric_utils.compute_tp_fp_fn_weighted(
predictions_array, targets_array, jnp.ones_like(targets_array),
labels_dict['no_relation'])
actual_precision = actual_tp / (actual_tp + actual_fp)
actual_recall = actual_tp / (actual_tp + actual_fn)
actual_f1 = 2 * actual_precision * actual_recall / (
actual_precision + actual_recall)
self.assertAlmostEqual(actual_precision, expected_precision, places=8)
self.assertAlmostEqual(actual_recall, expected_recall, places=8)
self.assertAlmostEqual(actual_f1, expected_f1, places=8)
class RelationClassifierTaskTest(parameterized.TestCase):
"""Tests for RelationClassifierTask task."""
encoder_config = {
'dtype': 'bfloat16',
'vocab_size': 1000,
'max_positions': 128,
'max_length': 128,
'hidden_size': 4,
'intermediate_dim': 8,
'mention_encoding_dim': 4,
'num_attention_heads': 2,
'num_layers': 1,
'dropout_rate': 0.1,
}
model_config = {
'encoder_config': encoder_config,
'encoder_name': 'bert',
'dtype': 'bfloat16',
'num_classes': 7,
'num_layers': 2,
'input_dim': 8,
'hidden_dim': 9,
'dropout_rate': 0.1,
}
config = {
'model_config': model_config,
'ignore_label': 0,
'seed': 0,
}
def assertArrayEqual(self, expected, actual):
expected = expected.ravel().tolist()
actual = actual.ravel().tolist()
self.assertSequenceEqual(expected, actual)
def _gen_raw_sample(
self, config: ml_collections.ConfigDict) -> Dict[Text, np.ndarray]:
"""Generate raw example."""
features = {}
# Generate text
max_length = config.model_config.encoder_config.max_length
features['text_ids'] = np.random.randint(
low=1,
high=config.model_config.encoder_config.vocab_size,
size=(max_length),
dtype=np.int64)
features['text_mask'] = np.ones_like(features['text_ids'])
# Generate labels
features['target'] = np.random.randint(
config.model_config.num_classes, size=1)
# Generate mentions
mention_positions = np.random.choice(
max_length, size=(2 * config.max_mentions_per_sample), replace=False)
mention_positions.sort()
mention_mask = np.random.randint(
2, size=(config.max_mentions_per_sample), dtype=np.int64)
if mention_mask.sum() < 2:
# There should be at least two mentions
mention_mask[0] = 1
mention_mask[1] = 1
mention_start_positions = mention_positions[0::2] * mention_mask
mention_end_positions = mention_positions[1::2] * mention_mask
# Shuffle mentions
p = np.random.permutation(config.max_mentions_per_sample)
mention_start_positions = mention_start_positions[p]
mention_end_positions = mention_end_positions[p]
mention_mask = mention_mask[p]
self.assertTrue(np.all(mention_start_positions[mention_mask == 0] == 0))
self.assertTrue(np.all(mention_end_positions[mention_mask == 0] == 0))
self.assertTrue(np.all(mention_mask[mention_mask == 0] == 0))
features['mention_start_positions'] = mention_start_positions
features['mention_end_positions'] = mention_end_positions
features['mention_mask'] = mention_mask
# Sample object and subject mentions
mention_target_indices = np.random.choice(
np.nonzero(mention_mask)[0], size=(2), replace=False)
features['object_mention_indices'] = mention_target_indices[0]
features['subject_mention_indices'] = mention_target_indices[1]
return features
def _gen_raw_batch(
self, config: ml_collections.ConfigDict) -> Dict[Text, tf.Tensor]:
samples = [
self._gen_raw_sample(config)
for _ in range(config.per_device_batch_size)
]
features = {}
for feature_name in samples[0].keys():
features[feature_name] = np.stack(
[sample[feature_name] for sample in samples])
return features
@parameterized.parameters([
(1, 2, 2, None),
(1, 2, 2, 150),
(2, 2, 2, None),
(2, 2, 2, 150),
(2, 3, 2, None),
(2, 3, 2, 150),
(5, 10, 2, None),
(5, 10, 2, 150),
(5, 10, 7, None),
(5, 10, 7, 150),
(10, 20, 10, None),
(10, 20, 10, 170),
])
def test_loss_fn(self, per_device_batch_size, max_mentions_per_sample,
max_mentions, max_length_with_entity_tokens):
"""Test loss function runs and produces expected values."""
config = copy.deepcopy(self.config)
config['per_device_batch_size'] = per_device_batch_size
config['max_mentions_per_sample'] = max_mentions_per_sample
config['max_mentions'] = max_mentions
config['max_length_with_entity_tokens'] = max_length_with_entity_tokens
config = ml_collections.ConfigDict(config)
raw_batch = self._gen_raw_batch(config)
collater_fn = relation_classifier_task.RelationClassifierTask.make_collater_fn(
config)
postprocess_fn = relation_classifier_task.RelationClassifierTask.make_output_postprocess_fn(
config)
batch = collater_fn(raw_batch)
batch = jax.tree_map(jnp.asarray, batch)
self.assertSequenceEqual(batch['mention_target_weights'].shape,
[2 * config.per_device_batch_size])
self.assertArrayEqual(batch['mention_target_weights'],
np.ones(2 * config.per_device_batch_size))
self.assertSequenceEqual(batch['mention_target_batch_positions'].shape,
[2 * config.per_device_batch_size])
self.assertArrayEqual(
batch['mention_target_batch_positions'],
np.repeat(np.arange(config.per_device_batch_size), [2]))
# Check start / end positions are correctly preserved if entity tokens
# are not used. Otherwise, positions might change.
if max_length_with_entity_tokens is None:
for index in range(config.per_device_batch_size):
subj_index = raw_batch['subject_mention_indices'][index]
obj_index = raw_batch['object_mention_indices'][index]
self.assertEqual(
batch['mention_target_start_positions'][2 * index],
raw_batch['mention_start_positions'][index, subj_index])
self.assertEqual(batch['mention_target_end_positions'][2 * index],
raw_batch['mention_end_positions'][index, subj_index])
self.assertEqual(batch['mention_target_start_positions'][2 * index + 1],
raw_batch['mention_start_positions'][index, obj_index])
self.assertEqual(batch['mention_target_end_positions'][2 * index + 1],
raw_batch['mention_end_positions'][index, obj_index])
expected_mention_target_indices = np.arange(config.per_device_batch_size *
2)
self.assertArrayEqual(batch['mention_target_indices'],
expected_mention_target_indices)
self.assertArrayEqual(batch['mention_subject_indices'],
expected_mention_target_indices[0::2])
self.assertArrayEqual(batch['mention_object_indices'],
expected_mention_target_indices[1::2])
model = relation_classifier_task.RelationClassifierTask.build_model(
config.model_config)
dummy_input = relation_classifier_task.RelationClassifierTask.dummy_input(
config)
init_rng = jax.random.PRNGKey(0)
initial_parameters = model.init(init_rng, dummy_input, True)
loss_fn = relation_classifier_task.RelationClassifierTask.make_loss_fn(
config)
_, metrics, auxiliary_output = loss_fn(config.model_config,
initial_parameters['params'], {},
batch, True)
self.assertEqual(metrics['agg']['denominator'],
config.per_device_batch_size)
features = postprocess_fn(batch, auxiliary_output)
# Check features are JSON-serializable
json.dumps(features)
# Check features match the original batch
for key in batch.keys():
self.assertArrayEqual(np.array(features[key]), batch[key])
if __name__ == '__main__':
absltest.main()
| [
"language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.make_loss_fn",
"language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.build_model",
"jax.tree_map",
"numpy.array",
"copy.deepcopy",
"numpy.arange",
"jax.random.PRNGKey",
"json.dumps",
"language.menti... | [((6564, 6811), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[(1, 2, 2, None), (1, 2, 2, 150), (2, 2, 2, None), (2, 2, 2, 150), (2, 3, 2,\n None), (2, 3, 2, 150), (5, 10, 2, None), (5, 10, 2, 150), (5, 10, 7,\n None), (5, 10, 7, 150), (10, 20, 10, None), (10, 20, 10, 170)]'], {}), '([(1, 2, 2, None), (1, 2, 2, 150), (2, 2, 2, None),\n (2, 2, 2, 150), (2, 3, 2, None), (2, 3, 2, 150), (5, 10, 2, None), (5, \n 10, 2, 150), (5, 10, 7, None), (5, 10, 7, 150), (10, 20, 10, None), (10,\n 20, 10, 170)])\n', (6588, 6811), False, 'from absl.testing import parameterized\n'), ((10770, 10785), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (10783, 10785), False, 'from absl.testing import absltest\n'), ((1375, 1446), 'os.path.join', 'os.path.join', (['"""language/mentionmemory/tasks/testdata/tacred"""', 'file_name'], {}), "('language/mentionmemory/tasks/testdata/tacred', file_name)\n", (1387, 1446), False, 'import os\n'), ((2472, 2518), 'jax.numpy.asarray', 'jnp.asarray', (['[labels_dict[x] for x in targets]'], {}), '([labels_dict[x] for x in targets])\n', (2483, 2518), True, 'import jax.numpy as jnp\n'), ((2543, 2593), 'jax.numpy.asarray', 'jnp.asarray', (['[labels_dict[x] for x in predictions]'], {}), '([labels_dict[x] for x in predictions])\n', (2554, 2593), True, 'import jax.numpy as jnp\n'), ((4377, 4490), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': 'config.model_config.encoder_config.vocab_size', 'size': 'max_length', 'dtype': 'np.int64'}), '(low=1, high=config.model_config.encoder_config.vocab_size,\n size=max_length, dtype=np.int64)\n', (4394, 4490), True, 'import numpy as np\n'), ((4550, 4584), 'numpy.ones_like', 'np.ones_like', (["features['text_ids']"], {}), "(features['text_ids'])\n", (4562, 4584), True, 'import numpy as np\n'), ((4633, 4691), 'numpy.random.randint', 'np.random.randint', (['config.model_config.num_classes'], {'size': '(1)'}), '(config.model_config.num_classes, size=1)\n', (4650, 4691), True, 'import numpy as np\n'), ((4750, 4838), 'numpy.random.choice', 'np.random.choice', (['max_length'], {'size': '(2 * config.max_mentions_per_sample)', 'replace': '(False)'}), '(max_length, size=2 * config.max_mentions_per_sample,\n replace=False)\n', (4766, 4838), True, 'import numpy as np\n'), ((4894, 4967), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'config.max_mentions_per_sample', 'dtype': 'np.int64'}), '(2, size=config.max_mentions_per_sample, dtype=np.int64)\n', (4911, 4967), True, 'import numpy as np\n'), ((5276, 5329), 'numpy.random.permutation', 'np.random.permutation', (['config.max_mentions_per_sample'], {}), '(config.max_mentions_per_sample)\n', (5297, 5329), True, 'import numpy as np\n'), ((7091, 7117), 'copy.deepcopy', 'copy.deepcopy', (['self.config'], {}), '(self.config)\n', (7104, 7117), False, 'import copy\n'), ((7373, 7406), 'ml_collections.ConfigDict', 'ml_collections.ConfigDict', (['config'], {}), '(config)\n', (7398, 7406), False, 'import ml_collections\n'), ((7470, 7542), 'language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.make_collater_fn', 'relation_classifier_task.RelationClassifierTask.make_collater_fn', (['config'], {}), '(config)\n', (7534, 7542), False, 'from language.mentionmemory.tasks import relation_classifier_task\n'), ((7573, 7660), 'language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.make_output_postprocess_fn', 'relation_classifier_task.RelationClassifierTask.make_output_postprocess_fn', (['config'], {}), '(\n config)\n', (7647, 7660), False, 'from language.mentionmemory.tasks import relation_classifier_task\n'), ((7713, 7745), 'jax.tree_map', 'jax.tree_map', (['jnp.asarray', 'batch'], {}), '(jnp.asarray, batch)\n', (7725, 7745), False, 'import jax\n'), ((9323, 9366), 'numpy.arange', 'np.arange', (['(config.per_device_batch_size * 2)'], {}), '(config.per_device_batch_size * 2)\n', (9332, 9366), True, 'import numpy as np\n'), ((9795, 9880), 'language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.build_model', 'relation_classifier_task.RelationClassifierTask.build_model', (['config.model_config'], {}), '(config.model_config\n )\n', (9854, 9880), False, 'from language.mentionmemory.tasks import relation_classifier_task\n'), ((9903, 9970), 'language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.dummy_input', 'relation_classifier_task.RelationClassifierTask.dummy_input', (['config'], {}), '(config)\n', (9962, 9970), False, 'from language.mentionmemory.tasks import relation_classifier_task\n'), ((9995, 10016), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (10013, 10016), False, 'import jax\n'), ((10097, 10165), 'language.mentionmemory.tasks.relation_classifier_task.RelationClassifierTask.make_loss_fn', 'relation_classifier_task.RelationClassifierTask.make_loss_fn', (['config'], {}), '(config)\n', (10157, 10165), False, 'from language.mentionmemory.tasks import relation_classifier_task\n'), ((10578, 10598), 'json.dumps', 'json.dumps', (['features'], {}), '(features)\n', (10588, 10598), False, 'import json\n'), ((1590, 1626), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['targets_path', '"""r"""'], {}), "(targets_path, 'r')\n", (1607, 1626), True, 'import tensorflow.compat.v2 as tf\n'), ((1785, 1825), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.io.gfile.GFile', (['predictions_path', '"""r"""'], {}), "(predictions_path, 'r')\n", (1802, 1825), True, 'import tensorflow.compat.v2 as tf\n'), ((2715, 2743), 'jax.numpy.ones_like', 'jnp.ones_like', (['targets_array'], {}), '(targets_array)\n', (2728, 2743), True, 'import jax.numpy as jnp\n'), ((5496, 5551), 'numpy.all', 'np.all', (['(mention_start_positions[mention_mask == 0] == 0)'], {}), '(mention_start_positions[mention_mask == 0] == 0)\n', (5502, 5551), True, 'import numpy as np\n'), ((5573, 5626), 'numpy.all', 'np.all', (['(mention_end_positions[mention_mask == 0] == 0)'], {}), '(mention_end_positions[mention_mask == 0] == 0)\n', (5579, 5626), True, 'import numpy as np\n'), ((5648, 5692), 'numpy.all', 'np.all', (['(mention_mask[mention_mask == 0] == 0)'], {}), '(mention_mask[mention_mask == 0] == 0)\n', (5654, 5692), True, 'import numpy as np\n'), ((6474, 6528), 'numpy.stack', 'np.stack', (['[sample[feature_name] for sample in samples]'], {}), '([sample[feature_name] for sample in samples])\n', (6482, 6528), True, 'import numpy as np\n'), ((7965, 8006), 'numpy.ones', 'np.ones', (['(2 * config.per_device_batch_size)'], {}), '(2 * config.per_device_batch_size)\n', (7972, 8006), True, 'import numpy as np\n'), ((5964, 5988), 'numpy.nonzero', 'np.nonzero', (['mention_mask'], {}), '(mention_mask)\n', (5974, 5988), True, 'import numpy as np\n'), ((8243, 8282), 'numpy.arange', 'np.arange', (['config.per_device_batch_size'], {}), '(config.per_device_batch_size)\n', (8252, 8282), True, 'import numpy as np\n'), ((10702, 10725), 'numpy.array', 'np.array', (['features[key]'], {}), '(features[key])\n', (10710, 10725), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-18 11:06:13
# @Author : <NAME> & <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
from __future__ import division, print_function, absolute_import
import torch as th
import numpy as np
from torchsar.utils.const import *
from torchsar.dsp.ffts import fft, ifft, fftfreq
from torchsar.dsp.polynomialfit import polyfit, polyval
import matplotlib.pyplot as plt
def bdce_sf(Sr, Fsa, Fsr, rdflag=False, Nroff=0, isplot=False):
r"""Baseband doppler centroid estimation by spectrum fitting
Baseband doppler centroid estimation by spectrum fitting.
Parameters
----------
Sr : numpy array
SAR signal :math:`N_a×N_r` in range-doppler domain (range frequency domain).
Fsa : float
Sampling rate in azimuth
rdflag : bool
Specifies whether the input SAR signal is in range-doppler domain. If not, :func:`dce_sf` excutes FFT in range direction.
isplot : bool
Whether to plot the estimated results.(Default: False)
"""
raise TypeError('Not opened yet!')
def bdce_api(Sr, Fsa, isplot=False):
r"""Baseband doppler centroid estimation by average phase increment
Baseband doppler centroid estimation by average phase increment.
Parameters
----------
Sr : numpy array
SAR raw data or range compressed data
Fsa : float
Sampling rate in azimuth
isplot : bool
Whether to plot the estimated results.(Default: False)
"""
raise TypeError('Not opened yet!')
def bdce_madsen(Sr, Fsa, isplot=False):
r"""Baseband doppler centroid estimation by madsen
Baseband doppler centroid estimation bymadsen.
Parameters
----------
Sr : numpy array
SAR raw data or range compressed data
Fsa : float
Sampling rate in azimuth
isplot : bool
Whether to plot the estimated results.(Default: False)
"""
raise TypeError('Not opened yet!')
def abdce_wda_ori(Sr, Fsa, Fsr, Fc, rate=0.9, isplot=False, islog=False):
r"""Absolute and baseband doppler centroid estimation by wavelength diversity algorithm
Absolute and baseband doppler centroid estimation by Wavelength Diversity Algorithm (WDA).
<<合成孔径雷达成像_算法与实现>> p350.
Parameters
----------
Sr : numpy array
SAR signal :math:`N_a×N_r` in range frequency domain.
Fsa : float
Sampling rate in azimuth.
Fsr : float
Sampling rate in range.
"""
raise TypeError('Not opened yet!')
def abdce_wda_opt(Sr, Fsr, Fsa, Fc, ncpb=None, tr=None, isfftr=False, isplot=False, islog=False):
"""Absolute and baseband doppler centroid estimation by wavelength diversity algorithm
Absolute and baseband doppler centroid estimation by Wavelength Diversity Algorithm (WDA).
<<合成孔径雷达成像_算法与实现>> p350.
Parameters
----------
Sr : 2d-tensor
SAR signal :math:`N_a×N_r` in range frequency domain.
Fsr : float
Sampling rate in range, unit Hz.
Fsa : float
Sampling rate in azimuth, unit Hz.
Fc : float
Carrier frequency, unit Hz.
ncpb : tuple or list, optional
Number of cells per block, so we have blocks (int(Na/ncpb[0])) × (int(Nr/ncpb[1]))
(the default is [Na, Nr], which means all).
tr : 1d-tensor, optional
Time in range (the default is None, which linspace(0, Nr, Nr)).
isplot : bool, optional
Whether to plot the estimation results (the default is False).
isfftr : bool, optional
Whether to do FFT in range (the default is False).
Returns
-------
fadc : 2d-tensor
Absolute doppler centroid frequency, which has the size specified by :attr:`ncpb`.
fbdc : 2d-tensor
Baseband doppler centroid frequency, which has the size specified by :attr:`ncpb`.
Ma : 2d-tensor
Doppler ambiguity number, which has the size specified by :attr:`ncpb`.
"""
raise TypeError('Not opened yet!')
def fullfadc(fdc, shape):
nblks = fdc.shape
Na, Nr = shape
NBa = nblks[0]
NBr = nblks[1]
Na1b = int(np.uint(Na / NBa))
Nr1b = int(np.uint(Nr / NBr))
fc = th.zeros((Na, Nr))
for a in range(NBa):
for r in range(NBr):
fc[int(a * Na1b):min(int((a + 1) * Na1b), Na), int(r * Nr1b):min(int((r + 1) * Nr1b), Nr)] = fdc[a, r]
return fc
if __name__ == "__main__":
import matplotlib.pyplot as plt
import torchsar
datafile = '/mnt/d/DataSets/sar/ALOSPALSAR/mat/ALPSRP050500980-L1.0/ALOS_PALSAR_RAW=IMG-HH-ALPSRP050500980-H1(sl=1el=35345).mat'
# datafile = '/mnt/d/DataSets/sar/ERS/mat/ERS2_SAR_RAW=E2_02558_STD_L0_F327(sl=1el=28682).mat'
sardata, sarplat = torchsar.sarread(datafile)
Fsa = sarplat.params['Fsa']
Fsr = sarplat.params['Fsr']
fr = sarplat.params['fr']
Kr = sarplat.params['Kr']
Fc = sarplat.sensor['Fc']
Sr = sardata.rawdata[:, :, 0] + 1j * sardata.rawdata[:, :, 1]
Sr = Sr[0:1024, 0:2048]
# Sr = Sr[1024:4096, 0:2048]
fr = th.linspace(-Fsr / 2.0, Fsr / 2.0, Sr.shape[1])
# Sr = fftshift(fft(fftshift(Sr, axes=1), axis=1), axes=1)
# Sr = torchsar.range_matched_filtering(Sr, fr, Kr)
# Sr = ifftshift(ifft(ifftshift(Sr, axes=1), axis=1), axes=1)
# aa = torchsar.doppler_center_estimation(Sr, Fsa)
# print(aa)
# Sr = Sr[512:-512, 512:-512]
# Sr = Sr[:, 512:512+1024]
# Sr = Sr[0:512, 0:512]
# Sr = fftshift(fft(fftshift(Sr, axes=1), axis=1), axes=1)
# Sr = fftshift(fft(Sr, axis=1), axes=1)
# Sr = fft(Sr, axis=1)
print(Sr.shape)
accc(Sr, isplot=True)
_, dc_coef = bdce_sf(Sr, Fsa, rdflag=False, isplot=True)
print(dc_coef)
bdce_api(Sr, Fsa, isplot=True)
fadc = abdce_wda_ori(Sr, Fsa, Fsr, Fc)
print(fadc)
| [
"torchsar.sarread",
"torch.zeros",
"torch.linspace",
"numpy.uint"
] | [((4180, 4198), 'torch.zeros', 'th.zeros', (['(Na, Nr)'], {}), '((Na, Nr))\n', (4188, 4198), True, 'import torch as th\n'), ((4726, 4752), 'torchsar.sarread', 'torchsar.sarread', (['datafile'], {}), '(datafile)\n', (4742, 4752), False, 'import torchsar\n'), ((5045, 5092), 'torch.linspace', 'th.linspace', (['(-Fsr / 2.0)', '(Fsr / 2.0)', 'Sr.shape[1]'], {}), '(-Fsr / 2.0, Fsr / 2.0, Sr.shape[1])\n', (5056, 5092), True, 'import torch as th\n'), ((4117, 4134), 'numpy.uint', 'np.uint', (['(Na / NBa)'], {}), '(Na / NBa)\n', (4124, 4134), True, 'import numpy as np\n'), ((4151, 4168), 'numpy.uint', 'np.uint', (['(Nr / NBr)'], {}), '(Nr / NBr)\n', (4158, 4168), True, 'import numpy as np\n')] |
import numpy as np
import torch
import rlkit.torch.pytorch_util as ptu
class StackedReplayBuffer:
def __init__(self, max_replay_buffer_size, time_steps, observation_dim, action_dim, task_indicator_dim, data_usage_reconstruction, data_usage_sac, num_last_samples, permute_samples, encoding_mode):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._task_indicator_dim = task_indicator_dim
self._max_replay_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size, observation_dim), dtype=np.float32)
self._next_obs = np.zeros((max_replay_buffer_size, observation_dim), dtype=np.float32)
self._actions = np.zeros((max_replay_buffer_size, action_dim), dtype=np.float32)
self._rewards = np.zeros((max_replay_buffer_size, 1), dtype=np.float32)
# task indicator computed through encoder
self._base_task_indicators = np.zeros(max_replay_buffer_size, dtype=np.float32)
self._task_indicators = np.zeros((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)
self._next_task_indicators = np.zeros((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)
self._true_task = np.zeros((max_replay_buffer_size, 1), dtype=object) # filled with dicts with keys 'base', 'specification'
self._sparse_rewards = np.zeros((max_replay_buffer_size, 1), dtype=np.float32)
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros((max_replay_buffer_size, 1), dtype='uint8')
self.time_steps = time_steps
self._top = 0
self._size = 0
self._episode_starts = []
# allowed points specify locations in the buffer, that, alone or together with the <self.time_step> last entries
# can be sampled
self._allowed_points = []
self._train_indices = []
self._val_indices = []
self.stats_dict = None
self.data_usage_reconstruction = data_usage_reconstruction
self.data_usage_sac = data_usage_sac
self.num_last_samples = num_last_samples
self.permute_samples = permute_samples
self.encoding_mode = encoding_mode
self.add_zero_elements()
self._cur_episode_start = self._top
def add_zero_elements(self):
# TODO: as already spawned as zeros, actually not zero writing needed, could only advance
for t in range(self.time_steps):
self.add_sample(
np.zeros(self._observation_dim),
np.zeros(self._action_dim),
np.zeros(1),
np.zeros(1, dtype='uint8'),
np.zeros(self._observation_dim),
np.zeros(self._task_indicator_dim),
np.zeros(self._task_indicator_dim),
np.zeros(1)
#env_info=dict(sparse_reward=0)
)
def add_episode(self, episode):
# Assume all array are same length (as they come from same rollout)
length = episode['observations'].shape[0]
# check, if whole episode fits into buffer
if length >= self._max_replay_buffer_size:
error_string =\
"-------------------------------------------------------------------------------------------\n\n" \
"ATTENTION:\n" \
"The current episode was longer than the replay buffer and could not be fitted in.\n" \
"Please consider decreasing the maximum episode length or increasing the task buffer size.\n\n" \
"-------------------------------------------------------------------------------------------"
print(error_string)
return
if self._size + length >= self._max_replay_buffer_size:
# A bit space is not used, but assuming a big buffer it does not matter so much
# TODO: additional 0 samples must be added
self._top = 0
low = self._top
high = self._top + length
self._observations[low:high] = episode['observations']
self._next_obs[low:high] = episode['next_observations']
self._actions[low:high] = episode['actions']
self._rewards[low:high] = episode['rewards']
self._task_indicators[low:high] = episode['task_indicators']
self._next_task_indicators[low:high] = episode['next_task_indicators']
self._terminals[low:high] = episode['terminals']
self._true_task[low:high] = episode['true_tasks']
self._advance_multi(length)
self.terminate_episode()
def add_sample(self, observation, action, reward, terminal,
next_observation, task_indicator, next_task_indicator, true_task, **kwargs):
self._observations[self._top] = observation
self._next_obs[self._top] = next_observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._task_indicators[self._top] = task_indicator
self._next_task_indicators[self._top] = next_task_indicator
self._terminals[self._top] = terminal
self._true_task[self._top] = true_task
self._advance()
def terminate_episode(self):
# store the episode beginning once the episode is over
# n.b. allows last episode to loop but whatever
self._episode_starts.append(self._cur_episode_start)
# TODO: allowed points must be "reset" at buffer overflow
self._allowed_points += list(range(self._cur_episode_start, self._top))
self.add_zero_elements()
self._cur_episode_start = self._top
def size(self):
return self._size
def get_allowed_points(self):
return self._allowed_points
def _advance(self):
self._top = (self._top + 1) % self._max_replay_buffer_size
if self._size < self._max_replay_buffer_size:
self._size += 1
def _advance_multi(self, length):
self._top = (self._top + length) % self._max_replay_buffer_size
if self._size + length <= self._max_replay_buffer_size:
self._size += length
else:
self._size = self._max_replay_buffer_size
def sample_data(self, indices):
return dict(
observations=self._observations[indices],
next_observations=self._next_obs[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
task_indicators=self._task_indicators[indices],
next_task_indicators=self._next_task_indicators[indices],
sparse_rewards=self._sparse_rewards[indices],
terminals=self._terminals[indices],
true_tasks=self._true_task[indices]
)
def get_indices(self, points, batch_size, prio=None):
if prio == 'linear':
# prioritized version: later samples get more weight
weights = np.linspace(0.1, 0.9, points.shape[0])
weights = weights / np.sum(weights)
indices = np.random.choice(points, batch_size, replace=True if batch_size > points.shape[0] else False, p=weights)
elif prio == 'cut':
indices = np.random.choice(points[-self.num_last_samples:], batch_size, replace=True if batch_size > points[-self.num_last_samples:].shape[0] else False)
elif prio == 'tree_sampling':
# instead of using 'np.random.choice' directly on the whole 'points' array, which is O(n)
# and highly inefficient for big replay buffers, we subdivide 'points' in buckets, which we apply
# 'np.random.choice' to.
# 'points' needs to be shuffled already, to ensure i.i.d assumption
root = int(np.sqrt(points.shape[0]))
if root < batch_size:
indices = np.random.choice(points, batch_size, replace=True if batch_size > points.shape[0] else False)
else:
partition = int(points.shape[0] / root)
division = np.random.randint(0, root) # sample a sub-bucket
points_division = points[partition * division: partition * (division + 1)]
replace = True if batch_size > points_division.shape[0] else False
indices = np.random.choice(points_division, batch_size, replace=replace)
else:
indices = np.random.choice(points, batch_size, replace=True if batch_size > points.shape[0] else False)
return indices
# Single transition sample functions
def random_batch(self, indices, batch_size, prio='tree_sampling'):
''' batch of unordered transitions '''
indices = self.get_indices(indices, batch_size, prio=prio)
return self.sample_data(indices)
def sample_sac_data_batch(self, indices, batch_size):
return self.random_batch(indices, batch_size, prio=self.data_usage_sac)
# Sequence sample functions
def sample_few_step_batch(self, points, batch_size, normalize=True):
# the points in time together with their <time_step> many entries from before are sampled
all_indices = []
for ind in points:
all_indices += list(range(ind - self.time_steps, ind + 1))
data = self.sample_data(all_indices)
if normalize:
data = self.normalize_data(data)
for key in data:
data[key] = np.reshape(data[key], (batch_size, self.time_steps + 1, -1))
return data
def sample_random_few_step_batch(self, points, batch_size, normalize=True):
''' batch of unordered small sequences of transitions '''
indices = self.get_indices(points, batch_size, prio=self.data_usage_reconstruction)
return self.sample_few_step_batch(indices, batch_size, normalize=normalize)
def sample_relabeler_data_batch(self, start, batch_size):
points = self._allowed_points[start:start+batch_size]
return self.sample_few_step_batch(points, batch_size)
# Relabeler util function
def relabel_z(self, start, batch_size, z, next_z, y):
points = self._allowed_points[start:start + batch_size]
self._task_indicators[points] = z
self._next_task_indicators[points] = next_z
self._base_task_indicators[points] = y
def get_train_val_indices(self, train_val_percent):
# Split all data from replay buffer into training and validation set
# not very efficient but hopefully readable code in this function
points = np.array(self.get_allowed_points())
train_indices = np.array(self._train_indices)
val_indices = np.array(self._val_indices)
points = points[np.isin(points, train_indices, invert=True)]
points = points[np.isin(points, val_indices, invert=True)]
points = np.random.permutation(points)
splitter = int(points.shape[0] * train_val_percent)
new_train_indices = points[:splitter]
new_val_indices = points[splitter:]
self._train_indices += new_train_indices.tolist()
self._val_indices += new_val_indices.tolist()
self._train_indices.sort()
self._val_indices.sort()
return np.array(self._train_indices), np.array(self._val_indices)
def make_encoder_data(self, data, batch_size, mode='multiply'):
# MLP encoder input: state of last timestep + state, action, reward of all timesteps before
# input is in form [[t-N], ... [t-1], [t]]
# therefore set action and reward of last timestep = 0
# Returns: [batch_size, timesteps, obs+action+reward dim]
# assumes, that a flat encoder flattens the data itself
observations = torch.from_numpy(data['observations'])
actions = torch.from_numpy(data['actions'])
rewards = torch.from_numpy(data['rewards'])
next_observations = torch.from_numpy((data['next_observations']))
observations_encoder_input = observations.clone().detach()[:, :-1, :]
actions_encoder_input = actions.clone().detach()[:, :-1, :]
rewards_encoder_input = rewards.clone().detach()[:, :-1, :]
next_observations_encoder_input = next_observations.clone().detach()[:, :-1, :]
# size: [batch_size, time_steps, obs+action+reward]
encoder_input = torch.cat([observations_encoder_input, actions_encoder_input, rewards_encoder_input, next_observations_encoder_input], dim=-1)
if self.permute_samples:
perm = torch.randperm(encoder_input.shape[1]).long()
encoder_input = encoder_input[:, perm]
if self.encoding_mode == 'trajectory':
# size: [batch_size, time_steps * (obs+action+reward)]
encoder_input = encoder_input.view(batch_size, -1)
if self.encoding_mode == 'transitionSharedY' or self.encoding_mode == 'transitionIndividualY':
pass
return encoder_input.to(ptu.device)
def get_stats(self):
data = self.sample_data(self.get_allowed_points())
stats_dict = dict(
observations={},
next_observations={},
actions={},
rewards={},
)
for key in stats_dict:
stats_dict[key]["max"] = data[key].max(axis=0)
stats_dict[key]["min"] = data[key].min(axis=0)
stats_dict[key]["mean"] = data[key].mean(axis=0)
stats_dict[key]["std"] = data[key].std(axis=0)
return stats_dict
def normalize_data(self, data):
stats_dict = self.stats_dict
for key in stats_dict:
data[key] = (data[key] - stats_dict[key]["mean"]) / (stats_dict[key]["std"] + 1e-9)
return data
def check_enc(self):
if self.data_usage_reconstruction == 'cut':
lastN = self.num_last_samples
else:
lastN = self._max_replay_buffer_size
indices = self.get_allowed_points()[-lastN:]
true_task_list = np.squeeze(self._true_task[indices]).tolist()
base_tasks = list(set([sub['base_task'] for sub in true_task_list]))
base_spec_dict = {}
for base_task in base_tasks:
spec_list = list(set([sub['specification'] for sub in true_task_list if sub['base_task'] == base_task]))
base_spec_dict[base_task] = spec_list
encoding_storage = {}
for base in base_spec_dict.keys():
spec_encoding_dict = {}
reward_mean = np.zeros(len(base_spec_dict[base]))
reward_std = np.zeros(len(base_spec_dict[base]))
for i, spec in enumerate(base_spec_dict[base]):
task_indices = [index for index in indices if (self._true_task[index][0]['base_task'] == base and self._true_task[index][0]['specification'] == spec)]
target = None
if "target" in self._true_task[task_indices[0]][0]:
target = self._true_task[task_indices[0]][0]['target']
encodings = self._task_indicators[task_indices]
mean = np.mean(encodings, axis=0)
std = np.std(encodings, axis=0)
rewards = self._rewards[task_indices]
reward_mean[i] = rewards.mean()
reward_std[i] = rewards.std()
base_task_estimate = np.bincount(self._base_task_indicators[task_indices].astype(int))
spec_encoding_dict[spec] = dict(mean=mean, std=std, base=base_task_estimate, reward_mean=reward_mean[i], reward_std=reward_std[i], target=target)
encoding_storage[base] = spec_encoding_dict
#print("Task: " + str(base) + "," + str(reward_mean.mean()) + "," + str(reward_std.mean()))
return encoding_storage
| [
"numpy.mean",
"numpy.reshape",
"torch.randperm",
"numpy.sqrt",
"numpy.random.choice",
"torch.from_numpy",
"numpy.isin",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.sum",
"numpy.random.randint",
"numpy.std",
"torch.cat",
"numpy.random.permutation"
] | [((534, 603), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, observation_dim)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, observation_dim), dtype=np.float32)\n', (542, 603), True, 'import numpy as np\n'), ((629, 698), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, observation_dim)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, observation_dim), dtype=np.float32)\n', (637, 698), True, 'import numpy as np\n'), ((723, 787), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, action_dim)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, action_dim), dtype=np.float32)\n', (731, 787), True, 'import numpy as np\n'), ((812, 867), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, 1)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, 1), dtype=np.float32)\n', (820, 867), True, 'import numpy as np\n'), ((955, 1005), 'numpy.zeros', 'np.zeros', (['max_replay_buffer_size'], {'dtype': 'np.float32'}), '(max_replay_buffer_size, dtype=np.float32)\n', (963, 1005), True, 'import numpy as np\n'), ((1038, 1110), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, task_indicator_dim)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)\n', (1046, 1110), True, 'import numpy as np\n'), ((1148, 1220), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, task_indicator_dim)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, task_indicator_dim), dtype=np.float32)\n', (1156, 1220), True, 'import numpy as np\n'), ((1247, 1298), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, 1)'], {'dtype': 'object'}), '((max_replay_buffer_size, 1), dtype=object)\n', (1255, 1298), True, 'import numpy as np\n'), ((1386, 1441), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, 1)'], {'dtype': 'np.float32'}), '((max_replay_buffer_size, 1), dtype=np.float32)\n', (1394, 1441), True, 'import numpy as np\n'), ((1533, 1585), 'numpy.zeros', 'np.zeros', (['(max_replay_buffer_size, 1)'], {'dtype': '"""uint8"""'}), "((max_replay_buffer_size, 1), dtype='uint8')\n", (1541, 1585), True, 'import numpy as np\n'), ((10542, 10571), 'numpy.array', 'np.array', (['self._train_indices'], {}), '(self._train_indices)\n', (10550, 10571), True, 'import numpy as np\n'), ((10594, 10621), 'numpy.array', 'np.array', (['self._val_indices'], {}), '(self._val_indices)\n', (10602, 10621), True, 'import numpy as np\n'), ((10775, 10804), 'numpy.random.permutation', 'np.random.permutation', (['points'], {}), '(points)\n', (10796, 10804), True, 'import numpy as np\n'), ((11648, 11686), 'torch.from_numpy', 'torch.from_numpy', (["data['observations']"], {}), "(data['observations'])\n", (11664, 11686), False, 'import torch\n'), ((11705, 11738), 'torch.from_numpy', 'torch.from_numpy', (["data['actions']"], {}), "(data['actions'])\n", (11721, 11738), False, 'import torch\n'), ((11757, 11790), 'torch.from_numpy', 'torch.from_numpy', (["data['rewards']"], {}), "(data['rewards'])\n", (11773, 11790), False, 'import torch\n'), ((11819, 11862), 'torch.from_numpy', 'torch.from_numpy', (["data['next_observations']"], {}), "(data['next_observations'])\n", (11835, 11862), False, 'import torch\n'), ((12253, 12383), 'torch.cat', 'torch.cat', (['[observations_encoder_input, actions_encoder_input, rewards_encoder_input,\n next_observations_encoder_input]'], {'dim': '(-1)'}), '([observations_encoder_input, actions_encoder_input,\n rewards_encoder_input, next_observations_encoder_input], dim=-1)\n', (12262, 12383), False, 'import torch\n'), ((6928, 6966), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', 'points.shape[0]'], {}), '(0.1, 0.9, points.shape[0])\n', (6939, 6966), True, 'import numpy as np\n'), ((7037, 7146), 'numpy.random.choice', 'np.random.choice', (['points', 'batch_size'], {'replace': '(True if batch_size > points.shape[0] else False)', 'p': 'weights'}), '(points, batch_size, replace=True if batch_size > points.\n shape[0] else False, p=weights)\n', (7053, 7146), True, 'import numpy as np\n'), ((9370, 9430), 'numpy.reshape', 'np.reshape', (['data[key]', '(batch_size, self.time_steps + 1, -1)'], {}), '(data[key], (batch_size, self.time_steps + 1, -1))\n', (9380, 9430), True, 'import numpy as np\n'), ((10646, 10689), 'numpy.isin', 'np.isin', (['points', 'train_indices'], {'invert': '(True)'}), '(points, train_indices, invert=True)\n', (10653, 10689), True, 'import numpy as np\n'), ((10715, 10756), 'numpy.isin', 'np.isin', (['points', 'val_indices'], {'invert': '(True)'}), '(points, val_indices, invert=True)\n', (10722, 10756), True, 'import numpy as np\n'), ((11151, 11180), 'numpy.array', 'np.array', (['self._train_indices'], {}), '(self._train_indices)\n', (11159, 11180), True, 'import numpy as np\n'), ((11182, 11209), 'numpy.array', 'np.array', (['self._val_indices'], {}), '(self._val_indices)\n', (11190, 11209), True, 'import numpy as np\n'), ((2527, 2558), 'numpy.zeros', 'np.zeros', (['self._observation_dim'], {}), '(self._observation_dim)\n', (2535, 2558), True, 'import numpy as np\n'), ((2576, 2602), 'numpy.zeros', 'np.zeros', (['self._action_dim'], {}), '(self._action_dim)\n', (2584, 2602), True, 'import numpy as np\n'), ((2620, 2631), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2628, 2631), True, 'import numpy as np\n'), ((2649, 2675), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': '"""uint8"""'}), "(1, dtype='uint8')\n", (2657, 2675), True, 'import numpy as np\n'), ((2693, 2724), 'numpy.zeros', 'np.zeros', (['self._observation_dim'], {}), '(self._observation_dim)\n', (2701, 2724), True, 'import numpy as np\n'), ((2742, 2776), 'numpy.zeros', 'np.zeros', (['self._task_indicator_dim'], {}), '(self._task_indicator_dim)\n', (2750, 2776), True, 'import numpy as np\n'), ((2794, 2828), 'numpy.zeros', 'np.zeros', (['self._task_indicator_dim'], {}), '(self._task_indicator_dim)\n', (2802, 2828), True, 'import numpy as np\n'), ((2846, 2857), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2854, 2857), True, 'import numpy as np\n'), ((6999, 7014), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (7005, 7014), True, 'import numpy as np\n'), ((7192, 7339), 'numpy.random.choice', 'np.random.choice', (['points[-self.num_last_samples:]', 'batch_size'], {'replace': '(True if batch_size > points[-self.num_last_samples:].shape[0] else False)'}), '(points[-self.num_last_samples:], batch_size, replace=True if\n batch_size > points[-self.num_last_samples:].shape[0] else False)\n', (7208, 7339), True, 'import numpy as np\n'), ((13957, 13993), 'numpy.squeeze', 'np.squeeze', (['self._true_task[indices]'], {}), '(self._true_task[indices])\n', (13967, 13993), True, 'import numpy as np\n'), ((15032, 15058), 'numpy.mean', 'np.mean', (['encodings'], {'axis': '(0)'}), '(encodings, axis=0)\n', (15039, 15058), True, 'import numpy as np\n'), ((15081, 15106), 'numpy.std', 'np.std', (['encodings'], {'axis': '(0)'}), '(encodings, axis=0)\n', (15087, 15106), True, 'import numpy as np\n'), ((8356, 8454), 'numpy.random.choice', 'np.random.choice', (['points', 'batch_size'], {'replace': '(True if batch_size > points.shape[0] else False)'}), '(points, batch_size, replace=True if batch_size > points.\n shape[0] else False)\n', (8372, 8454), True, 'import numpy as np\n'), ((12433, 12471), 'torch.randperm', 'torch.randperm', (['encoder_input.shape[1]'], {}), '(encoder_input.shape[1])\n', (12447, 12471), False, 'import torch\n'), ((7726, 7750), 'numpy.sqrt', 'np.sqrt', (['points.shape[0]'], {}), '(points.shape[0])\n', (7733, 7750), True, 'import numpy as np\n'), ((7812, 7910), 'numpy.random.choice', 'np.random.choice', (['points', 'batch_size'], {'replace': '(True if batch_size > points.shape[0] else False)'}), '(points, batch_size, replace=True if batch_size > points.\n shape[0] else False)\n', (7828, 7910), True, 'import numpy as np\n'), ((8007, 8033), 'numpy.random.randint', 'np.random.randint', (['(0)', 'root'], {}), '(0, root)\n', (8024, 8033), True, 'import numpy as np\n'), ((8257, 8319), 'numpy.random.choice', 'np.random.choice', (['points_division', 'batch_size'], {'replace': 'replace'}), '(points_division, batch_size, replace=replace)\n', (8273, 8319), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import six
import collections
import copy
from torch.utils.data import Dataset
from collections import defaultdict
import h5py
from scipy.stats import norm
import os
SKIP_TYPES = six.string_types
class SimpleDataset(Dataset):
'''
Assuming X and y are numpy arrays and
with X.shape = (n_samples, n_features)
y.shape = (n_samples,)
'''
def __init__(self, X, y=None):
self.X = X
self.y = y
def __len__(self):
return (len(self.X))
def __getitem__(self, i):
data = self.X[i]
#data = np.array(data).astype(np.float32)
if self.y is not None:
return dict(input=data, label=self.y[i])
else:
return dict(input=data)
class FastTensorDataLoader:
"""
A DataLoader-like object for a set of tensors that can be much faster than
TensorDataset + DataLoader because dataloader grabs individual indices of
the dataset and calls cat (slow).
Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6
"""
def __init__(self, *tensors, tensor_names, batch_size=32, shuffle=False):
"""
Initialize a FastTensorDataLoader.
:param *tensors: tensors to store. Must have the same length @ dim 0.
:param tensor_names: name of tensors (for feed_dict)
:param batch_size: batch size to load.
:param shuffle: if True, shuffle the data *in-place* whenever an
iterator is created out of this object.
:returns: A FastTensorDataLoader.
"""
assert all(t.shape[0] == tensors[0].shape[0] for t in tensors)
self.tensors = tensors
self.tensor_names = tensor_names
self.dataset_len = self.tensors[0].shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
# Calculate # batches
n_batches, remainder = divmod(self.dataset_len, self.batch_size)
if remainder > 0:
n_batches += 1
self.n_batches = n_batches
def __iter__(self):
if self.shuffle:
r = torch.randperm(self.dataset_len)
self.tensors = [t[r] for t in self.tensors]
self.i = 0
return self
def __next__(self):
if self.i >= self.dataset_len:
raise StopIteration
batch = {}
for k in range(len(self.tensor_names)):
batch.update({self.tensor_names[k]: self.tensors[k][self.i:self.i+self.batch_size]})
self.i += self.batch_size
return batch
def __len__(self):
return self.n_batches
'''standardize_dataset function is from utils_jared.py'''
def standardize_dataset(dataset, offset, scale):
norm_ds = copy.deepcopy(dataset)
norm_ds['x'] = (norm_ds['x'] - offset) / scale
return norm_ds
'''load_datasets function is from utils_jared.py'''
def load_datasets(dataset_file):
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def load_cox_gaussian_data():
dataset_file = os.path.join(os.path.dirname(__file__),
'datasets/gaussian_survival_data.h5')
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def prepare_data(x, label):
if isinstance(label, dict):
e, t = label['e'], label['t']
# Sort training data for accurate partial likelihood calculation.
sort_idx = np.argsort(t)[::-1]
x = x[sort_idx]
e = e[sort_idx]
t = t[sort_idx]
#return x, {'e': e, 't': t} this is for parse_data(x, label); see the third line in the parse_data function.
#return {'x': x, 'e': e, 't': t}
return x, e, t
def probe_infnan(v, name, extras={}):
nps = torch.isnan(v)
s = nps.sum().item()
if s > 0:
print('>>> {} >>>'.format(name))
print(name, s)
print(v[nps])
for k, val in extras.items():
print(k, val, val.sum().item())
quit()
class Identity(nn.Module):
def forward(self, *args):
if len(args) == 1:
return args[0]
return args
def get_batcnnorm(bn, nr_features=None, nr_dims=1):
if isinstance(bn, nn.Module):
return bn
assert 1 <= nr_dims <= 3
if bn in (True, 'async'):
clz_name = 'BatchNorm{}d'.format(nr_dims)
return getattr(nn, clz_name)(nr_features)
else:
raise ValueError('Unknown type of batch normalization: {}.'.format(bn))
def get_dropout(dropout, nr_dims=1):
if isinstance(dropout, nn.Module):
return dropout
if dropout is True:
dropout = 0.5
if nr_dims == 1:
return nn.Dropout(dropout, True)
else:
clz_name = 'Dropout{}d'.format(nr_dims)
return getattr(nn, clz_name)(dropout)
def get_activation(act):
if isinstance(act, nn.Module):
return act
assert type(act) is str, 'Unknown type of activation: {}.'.format(act)
act_lower = act.lower()
if act_lower == 'identity':
return Identity()
elif act_lower == 'relu':
return nn.ReLU(True)
elif act_lower == 'selu':
return nn.SELU(True)
elif act_lower == 'sigmoid':
return nn.Sigmoid()
elif act_lower == 'tanh':
return nn.Tanh()
else:
try:
return getattr(nn, act)
except AttributeError:
raise ValueError('Unknown activation function: {}.'.format(act))
def get_optimizer(optimizer, model, *args, **kwargs):
if isinstance(optimizer, (optim.Optimizer)):
return optimizer
if type(optimizer) is str:
try:
optimizer = getattr(optim, optimizer)
except AttributeError:
raise ValueError('Unknown optimizer type: {}.'.format(optimizer))
return optimizer(filter(lambda p: p.requires_grad, model.parameters()), *args, **kwargs)
def stmap(func, iterable):
if isinstance(iterable, six.string_types):
return func(iterable)
elif isinstance(iterable, (collections.Sequence, collections.UserList)):
return [stmap(func, v) for v in iterable]
elif isinstance(iterable, collections.Set):
return {stmap(func, v) for v in iterable}
elif isinstance(iterable, (collections.Mapping, collections.UserDict)):
return {k: stmap(func, v) for k, v in iterable.items()}
else:
return func(iterable)
def _as_tensor(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
return o
if torch.is_tensor(o):
return o
return torch.from_numpy(np.array(o))
def as_tensor(obj):
return stmap(_as_tensor, obj)
def _as_numpy(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
o = o
if torch.is_tensor(o):
return o.cpu().numpy()
return np.array(o)
def as_numpy(obj):
return stmap(_as_numpy, obj)
def _as_float(o):
if isinstance(o, SKIP_TYPES):
return o
if torch.is_tensor(o):
return o.item()
arr = as_numpy(o)
assert arr.size == 1
return float(arr)
def as_float(obj):
return stmap(_as_float, obj)
def _as_cpu(o):
from torch.autograd import Variable
if isinstance(o, Variable) or torch.is_tensor(o):
return o.cpu()
return o
def as_cpu(obj):
return stmap(_as_cpu, obj)
## For synthetic dataset creation
import math
from sklearn.datasets import make_moons
from scipy.stats import norm
# Create a simple dataset
def create_twomoon_dataset(n, p):
relevant, y = make_moons(n_samples=n, shuffle=True, noise=0.1, random_state=None)
print(y.shape)
noise_vector = norm.rvs(loc=0, scale=1, size=[n,p-2])
data = np.concatenate([relevant, noise_vector], axis=1)
print(data.shape)
return data, y
def create_sin_dataset(n,p):
x1=5*(np.random.uniform(0,1,n)).reshape(-1,1)
x2=5*(np.random.uniform(0,1,n)).reshape(-1,1)
y=np.sin(x1)*np.cos(x2)**3
relevant=np.hstack((x1,x2))
noise_vector = norm.rvs(loc=0, scale=1, size=[n,p-2])
data = np.concatenate([relevant, noise_vector], axis=1)
return data, y.astype(np.float32)
def create_simple_sin_dataset(n, p):
'''This dataset was added to provide an example of L1 norm reg failure for presentation.
'''
assert p == 2
x1 = np.random.uniform(-math.pi, math.pi, n).reshape(n ,1)
x2 = np.random.uniform(-math.pi, math.pi, n).reshape(n, 1)
y = np.sin(x1)
data = np.concatenate([x1, x2], axis=1)
print("data.shape: {}".format(data.shape))
return data, y
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.randperm",
"torch.nn.Tanh",
"numpy.hstack",
"scipy.stats.norm.rvs",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"numpy.sin",
"torch.nn.Sigmoid",
"numpy.concatenate",
"h5py.File",
"sklearn.datasets.make_moons",
"torch.is_tensor",
"os.path... | [((2803, 2825), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (2816, 2825), False, 'import copy\n'), ((2998, 3015), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3009, 3015), False, 'from collections import defaultdict\n'), ((3344, 3361), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3355, 3361), False, 'from collections import defaultdict\n'), ((4022, 4036), 'torch.isnan', 'torch.isnan', (['v'], {}), '(v)\n', (4033, 4036), False, 'import torch\n'), ((6816, 6834), 'torch.is_tensor', 'torch.is_tensor', (['o'], {}), '(o)\n', (6831, 6834), False, 'import torch\n'), ((7114, 7132), 'torch.is_tensor', 'torch.is_tensor', (['o'], {}), '(o)\n', (7129, 7132), False, 'import torch\n'), ((7176, 7187), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (7184, 7187), True, 'import numpy as np\n'), ((7320, 7338), 'torch.is_tensor', 'torch.is_tensor', (['o'], {}), '(o)\n', (7335, 7338), False, 'import torch\n'), ((7882, 7949), 'sklearn.datasets.make_moons', 'make_moons', ([], {'n_samples': 'n', 'shuffle': '(True)', 'noise': '(0.1)', 'random_state': 'None'}), '(n_samples=n, shuffle=True, noise=0.1, random_state=None)\n', (7892, 7949), False, 'from sklearn.datasets import make_moons\n'), ((7988, 8029), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': '(1)', 'size': '[n, p - 2]'}), '(loc=0, scale=1, size=[n, p - 2])\n', (7996, 8029), False, 'from scipy.stats import norm\n'), ((8038, 8086), 'numpy.concatenate', 'np.concatenate', (['[relevant, noise_vector]'], {'axis': '(1)'}), '([relevant, noise_vector], axis=1)\n', (8052, 8086), True, 'import numpy as np\n'), ((8303, 8322), 'numpy.hstack', 'np.hstack', (['(x1, x2)'], {}), '((x1, x2))\n', (8312, 8322), True, 'import numpy as np\n'), ((8341, 8382), 'scipy.stats.norm.rvs', 'norm.rvs', ([], {'loc': '(0)', 'scale': '(1)', 'size': '[n, p - 2]'}), '(loc=0, scale=1, size=[n, p - 2])\n', (8349, 8382), False, 'from scipy.stats import norm\n'), ((8391, 8439), 'numpy.concatenate', 'np.concatenate', (['[relevant, noise_vector]'], {'axis': '(1)'}), '([relevant, noise_vector], axis=1)\n', (8405, 8439), True, 'import numpy as np\n'), ((8771, 8781), 'numpy.sin', 'np.sin', (['x1'], {}), '(x1)\n', (8777, 8781), True, 'import numpy as np\n'), ((8793, 8825), 'numpy.concatenate', 'np.concatenate', (['[x1, x2]'], {'axis': '(1)'}), '([x1, x2], axis=1)\n', (8807, 8825), True, 'import numpy as np\n'), ((3025, 3053), 'h5py.File', 'h5py.File', (['dataset_file', '"""r"""'], {}), "(dataset_file, 'r')\n", (3034, 3053), False, 'import h5py\n'), ((3255, 3280), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3270, 3280), False, 'import os\n'), ((3371, 3399), 'h5py.File', 'h5py.File', (['dataset_file', '"""r"""'], {}), "(dataset_file, 'r')\n", (3380, 3399), False, 'import h5py\n'), ((3722, 3735), 'numpy.argsort', 'np.argsort', (['t'], {}), '(t)\n', (3732, 3735), True, 'import numpy as np\n'), ((4932, 4957), 'torch.nn.Dropout', 'nn.Dropout', (['dropout', '(True)'], {}), '(dropout, True)\n', (4942, 4957), True, 'import torch.nn as nn\n'), ((6881, 6892), 'numpy.array', 'np.array', (['o'], {}), '(o)\n', (6889, 6892), True, 'import numpy as np\n'), ((7579, 7597), 'torch.is_tensor', 'torch.is_tensor', (['o'], {}), '(o)\n', (7594, 7597), False, 'import torch\n'), ((8265, 8275), 'numpy.sin', 'np.sin', (['x1'], {}), '(x1)\n', (8271, 8275), True, 'import numpy as np\n'), ((2174, 2206), 'torch.randperm', 'torch.randperm', (['self.dataset_len'], {}), '(self.dataset_len)\n', (2188, 2206), False, 'import torch\n'), ((5350, 5363), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (5357, 5363), True, 'import torch.nn as nn\n'), ((8276, 8286), 'numpy.cos', 'np.cos', (['x2'], {}), '(x2)\n', (8282, 8286), True, 'import numpy as np\n'), ((8646, 8685), 'numpy.random.uniform', 'np.random.uniform', (['(-math.pi)', 'math.pi', 'n'], {}), '(-math.pi, math.pi, n)\n', (8663, 8685), True, 'import numpy as np\n'), ((8709, 8748), 'numpy.random.uniform', 'np.random.uniform', (['(-math.pi)', 'math.pi', 'n'], {}), '(-math.pi, math.pi, n)\n', (8726, 8748), True, 'import numpy as np\n'), ((5409, 5422), 'torch.nn.SELU', 'nn.SELU', (['(True)'], {}), '(True)\n', (5416, 5422), True, 'import torch.nn as nn\n'), ((8169, 8195), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (8186, 8195), True, 'import numpy as np\n'), ((8219, 8245), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (8236, 8245), True, 'import numpy as np\n'), ((5471, 5483), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5481, 5483), True, 'import torch.nn as nn\n'), ((5529, 5538), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5536, 5538), True, 'import torch.nn as nn\n')] |
import mrl
import numpy as np
import torch, torch.nn.functional as F
import os
class GoalEnvReward(mrl.Module):
def __init__(self):
"""Wraps environment's compute reward function"""
super().__init__(
'goal_reward', required_agent_modules=['env'], locals=locals())
def _setup(self):
assert self.env.goal_env, "Environment must be a goal environment!"
assert hasattr(self.env, 'compute_reward'), "Environment must have compute reward defined!"
def __call__(self, achieved_goals, goals, info):
return self.env.compute_reward(achieved_goals, goals, info)
class NeighborReward(mrl.Module):
def __init__(self, max_neighbor_distance = 1, optimize_every = 5, batch_size = 1000, temperature = 1.):
"""Wraps environment's compute reward function. Should probably only be used for first-visit achievment."""
super().__init__(
'goal_reward', required_agent_modules=['replay_buffer', 'neighbor_embedding_network'], locals=locals())
self.step = 0
self.optimize_every = optimize_every
self.batch_size = batch_size
self.temperature = temperature
if max_neighbor_distance != 1: # this is the number of steps from which to count two goals as neighbors.
raise NotImplementedError
def _setup(self):
assert self.env.goal_env, "Environment must be a goal environment!"
assert hasattr(self.env, 'compute_reward'), "Environment must have compute reward defined!"
self.optimizer = torch.optim.Adam(
self.neighbor_embedding_network.model.parameters(),
lr=self.config.critic_lr, # just using critic hparams for now
weight_decay=self.config.critic_weight_decay)
def _optimize(self):
pag_buffer = self.replay_buffer.buffer.BUFF.buffer_previous_ag
ag_buffer = self.replay_buffer.buffer.BUFF.buffer_ag
self.step +=1
if self.step % self.optimize_every == 0 and len(ag_buffer):
sample_idxs = np.random.randint(len(ag_buffer), size=self.batch_size)
ags = ag_buffer.get_batch(sample_idxs)
pos = pag_buffer.get_batch(sample_idxs)
# mix it up to keep it symmetric for now...
temp = ags[:len(ags) //2].copy()
ags[:len(ags) //2] = pos[:len(ags) //2]
pos[:len(ags) //2] = temp
# get random negative samples by a 1 index roll
neg = np.roll(pos, 1, axis=0)
# move to torch
ags = self.torch(ags)
pos = self.torch(pos)
neg = self.torch(neg)
# get embeddings
embs = self.neighbor_embedding_network(torch.cat((ags, pos, neg), dim=0))
ags, pos, neg = torch.chunk(embs, 3)
pos_logits = -self.temperature * torch.norm(ags - pos, dim = 1)
neg_logits = -self.temperature * torch.norm(ags - neg, dim = 1)
# use soft targets
loss = F.binary_cross_entropy_with_logits(torch.exp(pos_logits), torch.ones_like(pos_logits) * 0.99) +\
F.binary_cross_entropy_with_logits(torch.exp(neg_logits), torch.ones_like(pos_logits) * 0.01)
self.logger.add_tabular('intrinsic_reward_loss', self.numpy(loss))
# optimize
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def __call__(self, achieved_goals, goals, info):
"""Should return 0 for ags, gs that are predicted to be neighbors, -1 otherwise, as a numpy array"""
ags = achieved_goals.reshape(-1, achieved_goals.shape[-1])
dgs = goals.reshape(-1, achieved_goals.shape[-1])
ags = self.torch(ags)
dgs = self.torch(dgs)
# get embeddings
embs = self.neighbor_embedding_network(torch.cat((ags, dgs), dim=0))
ags, dgs = torch.chunk(embs, 2)
# predict whether ags and dgs are transition neighbors
preds = torch.exp(-self.temperature * torch.norm(ags - dgs, dim = 1))
return -self.numpy(preds < 0.5).astype(np.float32)
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save({
'opt_state_dict': self.optimizer.state_dict()
}, path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
checkpoint = torch.load(path)
self.optimizer.load_state_dict(checkpoint['opt_state_dict'])
def load(self, save_folder):
self._load_props(['random_process'], save_folder) | [
"torch.ones_like",
"numpy.roll",
"torch.load",
"os.path.join",
"torch.exp",
"torch.norm",
"torch.chunk",
"torch.cat"
] | [((3566, 3586), 'torch.chunk', 'torch.chunk', (['embs', '(2)'], {}), '(embs, 2)\n', (3577, 3586), False, 'import torch, torch.nn.functional as F\n'), ((3826, 3877), 'os.path.join', 'os.path.join', (['save_folder', "(self.module_name + '.pt')"], {}), "(save_folder, self.module_name + '.pt')\n", (3838, 3877), False, 'import os\n'), ((4009, 4060), 'os.path.join', 'os.path.join', (['save_folder', "(self.module_name + '.pt')"], {}), "(save_folder, self.module_name + '.pt')\n", (4021, 4060), False, 'import os\n'), ((4078, 4094), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (4088, 4094), False, 'import torch, torch.nn.functional as F\n'), ((2291, 2314), 'numpy.roll', 'np.roll', (['pos', '(1)'], {'axis': '(0)'}), '(pos, 1, axis=0)\n', (2298, 2314), True, 'import numpy as np\n'), ((2548, 2568), 'torch.chunk', 'torch.chunk', (['embs', '(3)'], {}), '(embs, 3)\n', (2559, 2568), False, 'import torch, torch.nn.functional as F\n'), ((3521, 3549), 'torch.cat', 'torch.cat', (['(ags, dgs)'], {'dim': '(0)'}), '((ags, dgs), dim=0)\n', (3530, 3549), False, 'import torch, torch.nn.functional as F\n'), ((2491, 2524), 'torch.cat', 'torch.cat', (['(ags, pos, neg)'], {'dim': '(0)'}), '((ags, pos, neg), dim=0)\n', (2500, 2524), False, 'import torch, torch.nn.functional as F\n'), ((2609, 2637), 'torch.norm', 'torch.norm', (['(ags - pos)'], {'dim': '(1)'}), '(ags - pos, dim=1)\n', (2619, 2637), False, 'import torch, torch.nn.functional as F\n'), ((2679, 2707), 'torch.norm', 'torch.norm', (['(ags - neg)'], {'dim': '(1)'}), '(ags - neg, dim=1)\n', (2689, 2707), False, 'import torch, torch.nn.functional as F\n'), ((3689, 3717), 'torch.norm', 'torch.norm', (['(ags - dgs)'], {'dim': '(1)'}), '(ags - dgs, dim=1)\n', (3699, 3717), False, 'import torch, torch.nn.functional as F\n'), ((2784, 2805), 'torch.exp', 'torch.exp', (['pos_logits'], {}), '(pos_logits)\n', (2793, 2805), False, 'import torch, torch.nn.functional as F\n'), ((2894, 2915), 'torch.exp', 'torch.exp', (['neg_logits'], {}), '(neg_logits)\n', (2903, 2915), False, 'import torch, torch.nn.functional as F\n'), ((2807, 2834), 'torch.ones_like', 'torch.ones_like', (['pos_logits'], {}), '(pos_logits)\n', (2822, 2834), False, 'import torch, torch.nn.functional as F\n'), ((2917, 2944), 'torch.ones_like', 'torch.ones_like', (['pos_logits'], {}), '(pos_logits)\n', (2932, 2944), False, 'import torch, torch.nn.functional as F\n')] |
import math
from formats.sound import sadl
from formats.sound import sample_transform
import pygame as pg
import numpy as np
from pg_utils.sound.StreamPlayerAbstract import StreamPlayerAbstract
class SADLStreamPlayer(StreamPlayerAbstract):
def __init__(self):
super(SADLStreamPlayer, self).__init__()
self.sadl: sadl.SADL = None
self.target_rate = pg.mixer.get_init()[0]
self.target_channels = pg.mixer.get_init()[2]
def add_samples(self, first_init=False):
sample_steps = 20
if first_init:
sample_steps *= 3 # If the sound gets cut increase this number (slower load, better playback)
new_samples = np.array(self.sadl.decode(sample_steps))
new_samples = sample_transform.change_channels(new_samples, self.target_channels)
new_samples = sample_transform.change_sample_rate(new_samples, self.sadl.sample_rate, self.target_rate)
new_samples = new_samples.swapaxes(0, 1)
if new_samples.shape[0] == 0:
self.loading = False
self.loading_finished = True
return
copy_size = min(new_samples.shape[0], self.sound_buffer.shape[0] - self.buffer_offset)
self.sound_buffer[self.buffer_offset:self.buffer_offset + copy_size] = new_samples[:copy_size]
self.buffer_offset += copy_size
def start_sound(self, snd_obj: sadl.SADL, loops=0):
if self.sound_obj is not None:
self.sound_obj.stop()
if self.sadl is not snd_obj:
self.sadl = snd_obj
alloc_size = int(math.ceil(snd_obj.num_samples * self.target_rate / snd_obj.sample_rate))
self.sound_obj = pg.sndarray.make_sound(np.zeros((alloc_size, self.target_channels), dtype=np.int16))
self.sound_buffer = pg.sndarray.samples(self.sound_obj)
self.loading_finished = False
self.buffer_offset = 0
self.add_samples(first_init=True)
if not self.loading_finished:
self.loading = True
self.sound_obj.set_volume(self.volume)
self.sound_obj.play(loops=loops)
| [
"math.ceil",
"formats.sound.sample_transform.change_channels",
"numpy.zeros",
"formats.sound.sample_transform.change_sample_rate",
"pygame.sndarray.samples",
"pygame.mixer.get_init"
] | [((744, 811), 'formats.sound.sample_transform.change_channels', 'sample_transform.change_channels', (['new_samples', 'self.target_channels'], {}), '(new_samples, self.target_channels)\n', (776, 811), False, 'from formats.sound import sample_transform\n'), ((834, 927), 'formats.sound.sample_transform.change_sample_rate', 'sample_transform.change_sample_rate', (['new_samples', 'self.sadl.sample_rate', 'self.target_rate'], {}), '(new_samples, self.sadl.sample_rate,\n self.target_rate)\n', (869, 927), False, 'from formats.sound import sample_transform\n'), ((380, 399), 'pygame.mixer.get_init', 'pg.mixer.get_init', ([], {}), '()\n', (397, 399), True, 'import pygame as pg\n'), ((434, 453), 'pygame.mixer.get_init', 'pg.mixer.get_init', ([], {}), '()\n', (451, 453), True, 'import pygame as pg\n'), ((1789, 1824), 'pygame.sndarray.samples', 'pg.sndarray.samples', (['self.sound_obj'], {}), '(self.sound_obj)\n', (1808, 1824), True, 'import pygame as pg\n'), ((1570, 1641), 'math.ceil', 'math.ceil', (['(snd_obj.num_samples * self.target_rate / snd_obj.sample_rate)'], {}), '(snd_obj.num_samples * self.target_rate / snd_obj.sample_rate)\n', (1579, 1641), False, 'import math\n'), ((1695, 1755), 'numpy.zeros', 'np.zeros', (['(alloc_size, self.target_channels)'], {'dtype': 'np.int16'}), '((alloc_size, self.target_channels), dtype=np.int16)\n', (1703, 1755), True, 'import numpy as np\n')] |
import numpy as np
from skimage import morphology
import matplotlib.pyplot as plt
a = np.array(
[[0,1,1,1,0,0,0,1,1,1,0],
[0,1,1,1,0,0,0,1,1,1,0],
[0,0,1,1,1,0,1,1,1,0,0],
[0,0,0,1,1,1,1,1,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0],
[0,0,0,0,1,1,1,0,0,0,0],
[0,0,0,1,1,1,1,1,0,0,0],
[0,0,1,1,1,0,1,1,1,0,0],
[0,1,1,1,0,0,0,1,1,1,0],
[0,1,1,1,0,0,0,1,1,1,0]])
skel = morphology.skeletonize(a).astype('bool')
b = np.zeros(a.shape)
b[a==1] = 125
b[skel==1] = 255
plt.imshow(b, cmap=plt.cm.gray, interpolation=None)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.array",
"numpy.zeros",
"skimage.morphology.skeletonize",
"matplotlib.pyplot.show"
] | [((88, 505), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, \n 0, 1, 1, 1, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0,\n 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0,\n 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1,\n 1, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0,\n 0, 0, 1, 1, 1, 0]]'], {}), '([[0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 0, 0, 0, 1, 1, 1,\n 0], [0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0\n ], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [\n 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0,\n 1, 1, 1, 0, 0, 0, 1, 1, 1, 0]])\n', (96, 505), True, 'import numpy as np\n'), ((483, 500), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (491, 500), True, 'import numpy as np\n'), ((533, 584), 'matplotlib.pyplot.imshow', 'plt.imshow', (['b'], {'cmap': 'plt.cm.gray', 'interpolation': 'None'}), '(b, cmap=plt.cm.gray, interpolation=None)\n', (543, 584), True, 'import matplotlib.pyplot as plt\n'), ((585, 595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (593, 595), True, 'import matplotlib.pyplot as plt\n'), ((437, 462), 'skimage.morphology.skeletonize', 'morphology.skeletonize', (['a'], {}), '(a)\n', (459, 462), False, 'from skimage import morphology\n')] |
from utils.critique import sample_users
from utils.modelnames import critiquing_models
import numpy as np
import pandas as pd
def critiquing(matrix_Train, matrix_Test, keyphrase_freq, dataset_name, model,
parameters_row, critiquing_model_name, item_keyphrase_freq=None, num_users_sampled=10,
num_items_sampled=5, max_iteration_threshold=10,topk=10,lamb=10,keyphrase_selection_method="pop"):
num_users = matrix_Train.shape[0]
num_keyphrases = keyphrase_freq.shape[1]
keyphrase_popularity = np.sum(item_keyphrase_freq, axis=1)
columns = ['user_id', 'item_id', 'target_rank', 'iteration', 'critiqued_keyphrase', 'item_rank', 'item_score', 'num_existing_keyphrases', 'result', 'lambda']
df = pd.DataFrame(columns=columns)
row = {}
target_ranks = [1]
# Randomly select test users
np.random.seed(1201)
test_users = np.random.choice(num_users, num_users_sampled, replace=False)
critiquing_model = critiquing_models[critiquing_model_name](keyphrase_freq=keyphrase_freq,
item_keyphrase_freq=item_keyphrase_freq,
row=row,
matrix_Train=matrix_Train,
matrix_Test=matrix_Test,
test_users=test_users,
target_ranks=target_ranks,
num_items_sampled=num_items_sampled,
num_keyphrases=num_keyphrases,
df=df,
max_iteration_threshold=max_iteration_threshold,
keyphrase_popularity=keyphrase_popularity,
dataset_name=dataset_name,
model=model,
parameters_row=parameters_row,
topk=topk,
lamb=lamb,
keyphrase_selection_method=keyphrase_selection_method)
df = critiquing_model.start_critiquing()
return df
| [
"pandas.DataFrame",
"numpy.sum",
"numpy.random.seed",
"numpy.random.choice"
] | [((536, 571), 'numpy.sum', 'np.sum', (['item_keyphrase_freq'], {'axis': '(1)'}), '(item_keyphrase_freq, axis=1)\n', (542, 571), True, 'import numpy as np\n'), ((744, 773), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (756, 773), True, 'import pandas as pd\n'), ((850, 870), 'numpy.random.seed', 'np.random.seed', (['(1201)'], {}), '(1201)\n', (864, 870), True, 'import numpy as np\n'), ((888, 949), 'numpy.random.choice', 'np.random.choice', (['num_users', 'num_users_sampled'], {'replace': '(False)'}), '(num_users, num_users_sampled, replace=False)\n', (904, 949), True, 'import numpy as np\n')] |
from __future__ import division
from builtins import range
from sciunit import Score
import numpy
from sciunit.utils import assert_dimensionless
class ZScore_somaticSpiking(Score):
"""
Mean of Z scores. A float indicating the sum of standardized difference
from reference means for somatic spiking features.
"""
def __init__(self, score, related_data={}):
if not isinstance(score, Exception) and not isinstance(score, float):
raise InvalidScoreError("Score must be a float.")
else:
super(ZScore_somaticSpiking,self).__init__(score, related_data=related_data)
@classmethod
def compute(cls, observation, prediction):
"""Computes average of z-scores from observation and prediction for somatic spiking features"""
feature_errors=numpy.array([])
features_names=(list(observation.keys()))
feature_results_dict={}
bad_features = []
for i in range (0, len(features_names)):
p_value = prediction[features_names[i]]['feature mean']
o_mean = float(observation[features_names[i]]['Mean'])
o_std = float(observation[features_names[i]]['Std'])
p_std = prediction[features_names[i]]['feature sd']
try:
feature_error = abs(p_value - o_mean)/o_std
feature_error = assert_dimensionless(feature_error)
except ZeroDivisionError:
feature_error = float("inf")
feature_error = float("inf")
except (TypeError,AssertionError) as e:
feature_error = e
#feature_errors=numpy.append(feature_errors,feature_error)
feature_result={features_names[i]: feature_error}
feature_results_dict.update(feature_result)
if numpy.isnan(feature_error) or numpy.isinf(feature_error):
bad_features.append(features_names[i])
else:
feature_errors=numpy.append(feature_errors,feature_error)
score_avg=numpy.nanmean(feature_errors)
return score_avg, feature_results_dict, features_names, bad_features
def __str__(self):
return 'ZScore_avg = %.2f' % self.score
| [
"numpy.append",
"numpy.array",
"numpy.nanmean",
"numpy.isnan",
"sciunit.utils.assert_dimensionless",
"numpy.isinf"
] | [((815, 830), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (826, 830), False, 'import numpy\n'), ((2044, 2073), 'numpy.nanmean', 'numpy.nanmean', (['feature_errors'], {}), '(feature_errors)\n', (2057, 2073), False, 'import numpy\n'), ((1365, 1400), 'sciunit.utils.assert_dimensionless', 'assert_dimensionless', (['feature_error'], {}), '(feature_error)\n', (1385, 1400), False, 'from sciunit.utils import assert_dimensionless\n'), ((1821, 1847), 'numpy.isnan', 'numpy.isnan', (['feature_error'], {}), '(feature_error)\n', (1832, 1847), False, 'import numpy\n'), ((1851, 1877), 'numpy.isinf', 'numpy.isinf', (['feature_error'], {}), '(feature_error)\n', (1862, 1877), False, 'import numpy\n'), ((1983, 2026), 'numpy.append', 'numpy.append', (['feature_errors', 'feature_error'], {}), '(feature_errors, feature_error)\n', (1995, 2026), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
'''Tests for io.py'''
import pytest
import numpy as np
import os
import warnings
import shutil
import copy
from collections import OrderedDict as odict
import pyuvdata
from pyuvdata import UVCal, UVData, UVFlag
from pyuvdata.utils import parse_polstr, parse_jpolstr
import glob
import sys
from .. import io
from ..io import HERACal, HERAData
from ..datacontainer import DataContainer
from ..utils import polnum2str, polstr2num, jnum2str, jstr2num
from ..data import DATA_PATH
from hera_qm.data import DATA_PATH as QM_DATA_PATH
class Test_HERACal(object):
def setup_method(self):
self.fname_xx = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
self.fname_yy = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits")
self.fname_both = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.HH.uvcA.omni.calfits")
self.fname_t0 = os.path.join(DATA_PATH, 'test_input/zen.2458101.44615.xx.HH.uv.abs.calfits_54x_only')
self.fname_t1 = os.path.join(DATA_PATH, 'test_input/zen.2458101.45361.xx.HH.uv.abs.calfits_54x_only')
self.fname_t2 = os.path.join(DATA_PATH, 'test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only')
def test_init(self):
hc = HERACal(self.fname_xx)
assert hc.filepaths == [self.fname_xx]
hc = HERACal([self.fname_xx, self.fname_yy])
assert hc.filepaths == [self.fname_xx, self.fname_yy]
hc = HERACal((self.fname_xx, self.fname_yy))
assert hc.filepaths == [self.fname_xx, self.fname_yy]
with pytest.raises(TypeError):
hc = HERACal([0, 1])
with pytest.raises(ValueError):
hc = HERACal(None)
def test_read(self):
# test one file with both polarizations and a non-None total quality array
hc = HERACal(self.fname_both)
gains, flags, quals, total_qual = hc.read()
uvc = UVCal()
uvc.read_calfits(self.fname_both)
np.testing.assert_array_equal(uvc.gain_array[0, 0, :, :, 0].T, gains[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.flag_array[0, 0, :, :, 0].T, flags[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.quality_array[0, 0, :, :, 0].T, quals[9, parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(uvc.total_quality_array[0, :, :, 0].T, total_qual[parse_jpolstr('jxx', x_orientation=hc.x_orientation)])
np.testing.assert_array_equal(np.unique(uvc.freq_array), hc.freqs)
np.testing.assert_array_equal(np.unique(uvc.time_array), hc.times)
assert hc.pols == [parse_jpolstr('jxx', x_orientation=hc.x_orientation), parse_jpolstr('jyy', x_orientation=hc.x_orientation)]
assert set([ant[0] for ant in hc.ants]) == set(uvc.ant_array)
# test list loading
hc = HERACal([self.fname_xx, self.fname_yy])
gains, flags, quals, total_qual = hc.read()
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert hc.freqs.shape == (1024,)
assert hc.times.shape == (3,)
assert sorted(hc.pols) == [parse_jpolstr('jxx', x_orientation=hc.x_orientation), parse_jpolstr('jyy', x_orientation=hc.x_orientation)]
def test_read_select(self):
# test read multiple files and select times
hc = io.HERACal([self.fname_t0, self.fname_t1, self.fname_t2])
g, _, _, _ = hc.read()
g2, _, _, _ = hc.read(times=hc.times[30:90])
np.testing.assert_array_equal(g2[54, 'Jee'], g[54, 'Jee'][30:90, :])
# test read multiple files and select freqs/chans
hc = io.HERACal([self.fname_t0, self.fname_t1, self.fname_t2])
g, _, _, _ = hc.read()
g2, _, _, _ = hc.read(frequencies=hc.freqs[0:100])
g3, _, _, _ = hc.read(freq_chans=np.arange(100))
np.testing.assert_array_equal(g2[54, 'Jee'], g[54, 'Jee'][:, 0:100])
np.testing.assert_array_equal(g3[54, 'Jee'], g[54, 'Jee'][:, 0:100])
# test select on antenna numbers
hc = io.HERACal([self.fname_xx, self.fname_yy])
g, _, _, _ = hc.read(antenna_nums=[9, 10])
hc2 = io.HERACal(self.fname_both)
g2, _, _, _ = hc.read(antenna_nums=[9, 10])
for k in g2:
assert k[0] in [9, 10]
np.testing.assert_array_equal(g[k], g2[k])
# test select on pols
hc = io.HERACal(self.fname_xx)
g, _, _, _ = hc.read()
hc2 = io.HERACal(self.fname_both)
g2, _, _, _ = hc.read(pols=['Jee'])
for k in g2:
np.testing.assert_array_equal(g[k], g2[k])
def test_write(self):
hc = HERACal(self.fname_both)
gains, flags, quals, total_qual = hc.read()
for key in gains.keys():
gains[key] *= 2.0 + 1.0j
flags[key] = np.logical_not(flags[key])
quals[key] *= 2.0
for key in total_qual.keys():
total_qual[key] *= 2
hc.update(gains=gains, flags=flags, quals=quals, total_qual=total_qual)
hc.write_calfits('test.calfits', clobber=True)
gains_in, flags_in, quals_in, total_qual_in = hc.read()
hc2 = HERACal('test.calfits')
gains_out, flags_out, quals_out, total_qual_out = hc2.read()
for key in gains_in.keys():
np.testing.assert_array_equal(gains_in[key] * (2.0 + 1.0j), gains_out[key])
np.testing.assert_array_equal(np.logical_not(flags_in[key]), flags_out[key])
np.testing.assert_array_equal(quals_in[key] * (2.0), quals_out[key])
for key in total_qual.keys():
np.testing.assert_array_equal(total_qual_in[key] * (2.0), total_qual_out[key])
os.remove('test.calfits')
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.filterwarnings("ignore:invalid value encountered in double_scalars")
class Test_HERAData(object):
def setup_method(self):
self.uvh5_1 = os.path.join(DATA_PATH, "zen.2458116.61019.xx.HH.XRS_downselected.uvh5")
self.uvh5_2 = os.path.join(DATA_PATH, "zen.2458116.61765.xx.HH.XRS_downselected.uvh5")
self.miriad_1 = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.miriad_2 = os.path.join(DATA_PATH, "zen.2458043.13298.xx.HH.uvORA")
self.uvfits = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvA.vis.uvfits')
self.four_pol = [os.path.join(DATA_PATH, 'zen.2457698.40355.{}.HH.uvcA'.format(pol))
for pol in ['xx', 'yy', 'xy', 'yx']]
def test_init(self):
# single uvh5 file
hd = HERAData(self.uvh5_1)
assert hd.filepaths == [self.uvh5_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
assert len(hd.freqs) == 1024
assert len(hd.bls) == 3
assert len(hd.times) == 60
assert len(hd.lsts) == 60
assert hd._writers == {}
# multiple uvh5 files
files = [self.uvh5_1, self.uvh5_2]
hd = HERAData(files)
individual_hds = {files[0]: HERAData(files[0]), files[1]: HERAData(files[1])}
assert hd.filepaths == files
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
for f in files:
assert len(hd.freqs[f]) == 1024
np.testing.assert_array_equal(hd.freqs[f], individual_hds[f].freqs)
assert len(hd.bls[f]) == 3
np.testing.assert_array_equal(hd.bls[f], individual_hds[f].bls)
assert len(hd.times[f]) == 60
np.testing.assert_array_equal(hd.times[f], individual_hds[f].times)
assert len(hd.lsts[f]) == 60
np.testing.assert_array_equal(hd.lsts[f], individual_hds[f].lsts)
assert not hasattr(hd, '_writers')
# miriad
hd = HERAData(self.miriad_1, filetype='miriad')
assert hd.filepaths == [self.miriad_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is None
# uvfits
hd = HERAData(self.uvfits, filetype='uvfits')
assert hd.filepaths == [self.uvfits]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is None
# test errors
with pytest.raises(TypeError):
hd = HERAData([1, 2])
with pytest.raises(ValueError):
hd = HERAData(None)
with pytest.raises(NotImplementedError):
hd = HERAData(self.uvh5_1, 'not a real type')
with pytest.raises(IOError):
hd = HERAData('fake path')
def test_add(self):
hd = HERAData(self.uvh5_1)
hd.read()
# test add
hd2 = copy.deepcopy(hd)
hd2.polarization_array[0] = -6
hd3 = hd + hd2
assert len(hd3._polnum_indices) == 2
def test_select(self):
hd = HERAData(self.uvh5_1)
hd.read()
hd2 = copy.deepcopy(hd)
hd2.polarization_array[0] = -6
hd += hd2
# blt select
d1 = hd.get_data(53, 54, 'xx')
hd.select(bls=[(53, 54)])
assert len(hd._blt_slices) == 1
d2 = hd.get_data(53, 54, 'xx')
np.testing.assert_array_almost_equal(d1, d2)
hd.select(times=np.unique(hd.time_array)[-5:])
d3 = hd.get_data(53, 54, 'xx')
np.testing.assert_array_almost_equal(d2[-5:], d3)
# pol select
hd.select(polarizations=['yy'])
assert len(hd._polnum_indices) == 1
def test_reset(self):
hd = HERAData(self.uvh5_1)
hd.read()
hd.reset()
assert hd.data_array is None
assert hd.flag_array is None
assert hd.nsample_array is None
assert hd.filepaths == [self.uvh5_1]
for meta in hd.HERAData_metas:
assert getattr(hd, meta) is not None
assert len(hd.freqs) == 1024
assert len(hd.bls) == 3
assert len(hd.times) == 60
assert len(hd.lsts) == 60
assert hd._writers == {}
def test_get_metadata_dict(self):
hd = HERAData(self.uvh5_1)
metas = hd.get_metadata_dict()
for meta in hd.HERAData_metas:
assert meta in metas
assert len(metas['freqs']) == 1024
assert len(metas['bls']) == 3
assert len(metas['times']) == 60
assert len(metas['lsts']) == 60
np.testing.assert_array_equal(metas['times'], np.unique(list(metas['times_by_bl'].values())))
np.testing.assert_array_equal(metas['lsts'], np.unique(list(metas['lsts_by_bl'].values())))
def test_determine_blt_slicing(self):
hd = HERAData(self.uvh5_1)
for s in hd._blt_slices.values():
assert isinstance(s, slice)
for bl, s in hd._blt_slices.items():
np.testing.assert_array_equal(np.arange(180)[np.logical_and(hd.ant_1_array == bl[0],
hd.ant_2_array == bl[1])], np.arange(180)[s])
# test check for non-regular spacing
with pytest.raises(NotImplementedError):
hd.select(blt_inds=[0, 1, 3, 5, 23, 48])
hd._determine_blt_slicing()
def test_determine_pol_indexing(self):
hd = HERAData(self.uvh5_1)
assert hd._polnum_indices == {-5: 0}
hd = HERAData(self.four_pol, filetype='miriad')
hd.read(bls=[(53, 53)], axis='polarization')
assert hd._polnum_indices == {-8: 3, -7: 2, -6: 1, -5: 0}
def test_get_slice(self):
hd = HERAData(self.uvh5_1)
hd.read()
for bl in hd.bls:
np.testing.assert_array_equal(hd._get_slice(hd.data_array, bl), hd.get_data(bl))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, (54, 53, 'EE')),
hd.get_data((54, 53, 'EE')))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, (53, 54))[parse_polstr('XX', x_orientation=hd.x_orientation)],
hd.get_data((53, 54, 'EE')))
np.testing.assert_array_equal(hd._get_slice(hd.data_array, 'EE')[(53, 54)],
hd.get_data((53, 54, 'EE')))
with pytest.raises(KeyError):
hd._get_slice(hd.data_array, (1, 2, 3, 4))
hd = HERAData(self.four_pol, filetype='miriad')
d, f, n = hd.read(bls=[(80, 81)])
for p in d.pols():
np.testing.assert_array_almost_equal(hd._get_slice(hd.data_array, (80, 81, p)),
hd.get_data((80, 81, p)))
np.testing.assert_array_almost_equal(hd._get_slice(hd.data_array, (81, 80, p)),
hd.get_data((81, 80, p)))
def test_set_slice(self):
hd = HERAData(self.uvh5_1)
hd.read()
np.random.seed(21)
for bl in hd.bls:
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, bl, new_vis)
np.testing.assert_array_almost_equal(new_vis, hd.get_data(bl))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, (54, 53, 'xx'), new_vis)
np.testing.assert_array_almost_equal(np.conj(new_vis), hd.get_data((53, 54, 'xx')))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
hd._set_slice(hd.data_array, (53, 54), {'xx': new_vis})
np.testing.assert_array_almost_equal(new_vis, hd.get_data((53, 54, 'xx')))
new_vis = np.random.randn(60, 1024) + np.random.randn(60, 1024) * 1.0j
to_set = {(53, 54): new_vis, (54, 54): 2 * new_vis, (53, 53): 3 * new_vis}
hd._set_slice(hd.data_array, 'XX', to_set)
np.testing.assert_array_almost_equal(new_vis, hd.get_data((53, 54, 'xx')))
with pytest.raises(KeyError):
hd._set_slice(hd.data_array, (1, 2, 3, 4), None)
def test_build_datacontainers(self):
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl], hd.get_data(bl))
np.testing.assert_array_almost_equal(f[bl], hd.get_flags(bl))
np.testing.assert_array_almost_equal(n[bl], hd.get_nsamples(bl))
for dc in [d, f, n]:
assert isinstance(dc, DataContainer)
for k in dc.antpos.keys():
assert np.all(dc.antpos[k] == hd.antpos[k])
assert len(dc.antpos) == 52
assert len(hd.antpos) == 52
for k in dc.data_antpos.keys():
assert np.all(dc.data_antpos[k] == hd.data_antpos[k])
assert len(dc.data_antpos) == 2
assert len(hd.data_antpos) == 2
assert np.all(dc.freqs == hd.freqs)
assert np.all(dc.times == hd.times)
assert np.all(dc.lsts == hd.lsts)
for k in dc.times_by_bl.keys():
assert np.all(dc.times_by_bl[k] == hd.times_by_bl[k])
assert np.all(dc.times_by_bl[k] == dc.times_by_bl[(k[1], k[0])])
assert np.all(dc.lsts_by_bl[k] == hd.lsts_by_bl[k])
assert np.all(dc.lsts_by_bl[k] == dc.lsts_by_bl[(k[1], k[0])])
def test_write_read_filter_cache_scratch(self):
# most of write_filter_cache_scratch and all of read_filter_cache_scratch are covered in
# test_delay_filter.test_load_dayenu_filter_and_write()
# This test covers a few odds and ends that are not covered.
# The case not covered is writing a filter cache with no skip_keys
# or filter directory.
io.write_filter_cache_scratch(filter_cache={'crazy': 'universe'})
# make sure file (and only one file) was written.
assert len(glob.glob(os.getcwd() + '/*.filter_cache')) == 1
# make sure read works and read cache is identical to written cache.
cache = io.read_filter_cache_scratch(os.getcwd())
assert cache['crazy'] == 'universe'
assert len(cache) == 1
# now cleanup cache files.
cleanup = glob.glob(os.getcwd() + '/*.filter_cache')
for file in cleanup:
os.remove(file)
@pytest.mark.filterwarnings("ignore:miriad does not support partial loading")
def test_read(self):
# uvh5
hd = HERAData(self.uvh5_1)
d, f, n = hd.read(bls=(53, 54, 'xx'), frequencies=hd.freqs[0:100], times=hd.times[0:10])
assert hd.last_read_kwargs['bls'] == (53, 54, parse_polstr('XX'))
assert hd.last_read_kwargs['polarizations'] is None
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(53, 54, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[53, 54, 'ee'].shape == (10, 100)
with pytest.raises(ValueError):
d, f, n = hd.read(polarizations=['xy'])
# assert return data = False
o = hd.read(bls=[(53, 53), (53, 54)], return_data=False)
assert o is None
# assert __getitem__ isn't a read when key exists
o = hd[(53, 53, 'ee')]
assert len(hd._blt_slices) == 2
# assert __getitem__ is a read when key does not exist
o = hd[(54, 54, 'ee')]
assert len(hd._blt_slices) == 1
# test read with extra UVData kwargs
hd = HERAData(self.uvh5_1)
d, f, n = hd.read(bls=hd.bls[:2], frequencies=hd.freqs[:100], multidim_index=True)
k = list(d.keys())[0]
assert len(d) == 2
assert d[k].shape == (hd.Ntimes, 100)
# test read list
hd = HERAData([self.uvh5_1, self.uvh5_2])
d, f, n = hd.read(axis='blt')
for dc in [d, f, n]:
assert len(dc) == 3
assert len(dc.times) == 120
assert len(dc.lsts) == 120
assert len(dc.freqs) == 1024
for i in [53, 54]:
for j in [53, 54]:
assert (i, j, 'ee') in dc
for bl in dc:
assert dc[bl].shape == (120, 1024)
# miriad
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = hd.read()
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = hd.read(bls=(52, 53), polarizations=['XX'], frequencies=d.freqs[0:30], times=d.times[0:10])
assert hd.last_read_kwargs['polarizations'] == ['XX']
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(52, 53, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[52, 53, 'ee'].shape == (10, 30)
with pytest.raises(NotImplementedError):
d, f, n = hd.read(read_data=False)
# uvfits
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read(bls=(0, 1, 'xx'), freq_chans=list(range(10)))
assert hd.last_read_kwargs['freq_chans'] == list(range(10))
for dc in [d, f, n]:
assert len(dc) == 1
assert list(dc.keys()) == [(0, 1, parse_polstr('XX', x_orientation=hd.x_orientation))]
assert dc[0, 1, 'ee'].shape == (60, 10)
with pytest.raises(NotImplementedError):
d, f, n = hd.read(read_data=False)
def test_getitem(self):
hd = HERAData(self.uvh5_1)
hd.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(hd[bl], hd.get_data(bl))
def test_update(self):
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
for bl in hd.bls:
d[bl] *= (2.0 + 1.0j)
f[bl] = np.logical_not(f[bl])
n[bl] += 1
hd.update(data=d, flags=f, nsamples=n)
d2, f2, n2 = hd.build_datacontainers()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl], d2[bl])
np.testing.assert_array_equal(f[bl], f2[bl])
np.testing.assert_array_equal(n[bl], n2[bl])
def test_partial_write(self):
hd = HERAData(self.uvh5_1)
assert hd._writers == {}
d, f, n = hd.read(bls=hd.bls[0])
assert hd.last_read_kwargs['bls'] == (53, 53, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(53, 53, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True)
assert 'out.h5' in hd._writers
assert isinstance(hd._writers['out.h5'], HERAData)
for meta in hd.HERAData_metas:
try:
assert np.all(getattr(hd, meta) == getattr(hd._writers['out.h5'], meta))
except BaseException:
for k in getattr(hd, meta).keys():
assert np.all(getattr(hd, meta)[k] == getattr(hd._writers['out.h5'], meta)[k])
d, f, n = hd.read(bls=hd.bls[1])
assert hd.last_read_kwargs['bls'] == (53, 54, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(53, 54, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True)
d, f, n = hd.read(bls=hd.bls[2])
assert hd.last_read_kwargs['bls'] == (54, 54, parse_polstr('XX', x_orientation=hd.x_orientation))
d[(54, 54, 'EE')] *= (2.0 + 1.0j)
hd.partial_write('out.h5', data=d, clobber=True, inplace=True)
d_after, _, _ = hd.build_datacontainers()
np.testing.assert_array_almost_equal(d[(54, 54, 'EE')], d_after[(54, 54, 'EE')])
hd = HERAData(self.uvh5_1)
d, f, n = hd.read()
hd2 = HERAData('out.h5')
d2, f2, n2 = hd2.read()
for bl in hd.bls:
np.testing.assert_array_almost_equal(d[bl] * (2.0 + 1.0j), d2[bl])
np.testing.assert_array_equal(f[bl], f2[bl])
np.testing.assert_array_equal(n[bl], n2[bl])
os.remove('out.h5')
# test errors
hd = HERAData(self.miriad_1, filetype='miriad')
with pytest.raises(NotImplementedError):
hd.partial_write('out.uv')
hd = HERAData([self.uvh5_1, self.uvh5_2])
with pytest.raises(NotImplementedError):
hd.partial_write('out.h5')
hd = HERAData(self.uvh5_1)
def test_iterate_over_bls(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_bls(Nbls=2):
for dc in (d, f, n):
assert len(dc.keys()) == 1 or len(dc.keys()) == 2
assert list(dc.values())[0].shape == (60, 1024)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls():
for dc in (d, f, n):
assert len(d.keys()) == 1
assert list(d.values())[0].shape == (120, 1024)
# try cover include_autos = False.
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls(include_autos=False):
for dc in (d, f, n):
assert len(d.keys()) == 1
bl = list(d.keys())[0]
# make sure no autos present.
assert bl[0] != bl[1]
assert list(d.values())[0].shape == (120, 1024)
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = next(hd.iterate_over_bls(bls=[(52, 53, 'xx')]))
assert list(d.keys()) == [(52, 53, parse_polstr('XX', x_orientation=hd.x_orientation))]
with pytest.raises(NotImplementedError):
next(hd.iterate_over_bls())
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_bls(chunk_by_redundant_group=True, Nbls=1):
# check that all baselines in chunk are redundant
# this will be the case when Nbls = 1
bl_lens = np.asarray([hd.antpos[bl[0]] - hd.antpos[bl[1]] for bl in d])
assert np.all(np.isclose(bl_lens - bl_lens[0], 0., atol=1.0))
for dc in (d, f, n):
assert list(d.values())[0].shape == (60, 1024)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_bls(chunk_by_redundant_group=True):
for dc in (d, f, n):
assert list(d.values())[0].shape == (120, 1024)
with pytest.raises(NotImplementedError):
hd = HERAData(self.miriad_1, filetype='miriad')
d, f, n = next(hd.iterate_over_bls(bls=[(52, 53, 'xx')], chunk_by_redundant_group=True))
def test_iterate_over_freqs(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_freqs(Nchans=256):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (60, 256)
hd = HERAData([self.uvh5_1, self.uvh5_2])
for (d, f, n) in hd.iterate_over_freqs(Nchans=512):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (120, 512)
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read()
d, f, n = next(hd.iterate_over_freqs(Nchans=2, freqs=d.freqs[0:2]))
for value in d.values():
assert value.shape == (60, 2)
with pytest.raises(NotImplementedError):
next(hd.iterate_over_bls())
def test_iterate_over_times(self):
hd = HERAData(self.uvh5_1)
for (d, f, n) in hd.iterate_over_times(Nints=20):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (20, 1024)
hd.read(frequencies=hd.freqs[0:512])
hd.write_uvh5('out1.h5', clobber=True)
hd.read(frequencies=hd.freqs[512:])
hd.write_uvh5('out2.h5', clobber=True)
hd = HERAData(['out1.h5', 'out2.h5'])
for (d, f, n) in hd.iterate_over_times(Nints=30):
for dc in (d, f, n):
assert len(dc.keys()) == 3
assert list(dc.values())[0].shape == (30, 1024)
os.remove('out1.h5')
os.remove('out2.h5')
hd = HERAData(self.uvfits, filetype='uvfits')
d, f, n = hd.read()
d, f, n = next(hd.iterate_over_times(Nints=2, times=d.times[0:2]))
for value in d.values():
assert value.shape == (2, 64)
with pytest.raises(NotImplementedError):
next(hd.iterate_over_times())
def test_uvflag_compatibility(self):
# Test that UVFlag is able to successfully init from the HERAData object
uv = UVData()
uv.read_uvh5(self.uvh5_1)
uvf1 = UVFlag(uv)
hd = HERAData(self.uvh5_1)
hd.read()
uvf2 = UVFlag(hd)
assert uvf1 == uvf2
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
class Test_Visibility_IO_Legacy(object):
def test_load_vis(self):
# inheretied testing from the old abscal_funcs.UVData2AbsCalDict
# load into pyuvdata object
self.data_file = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
self.uvd = UVData()
self.uvd.read_miriad(self.data_file)
self.freq_array = np.unique(self.uvd.freq_array)
self.antpos, self.ants = self.uvd.get_ENU_antpos(center=True, pick_data_ants=True)
self.antpos = odict(list(zip(self.ants, self.antpos)))
self.time_array = np.unique(self.uvd.time_array)
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
data, flags = io.load_vis(fname, pop_autos=False)
assert data[(24, 25, 'ee')].shape == (60, 64)
assert flags[(24, 25, 'ee')].shape == (60, 64)
assert (24, 24, parse_polstr('EE', x_orientation=self.uvd.x_orientation)) in data
data, flags = io.load_vis([fname])
assert data[(24, 25, 'ee')].shape == (60, 64)
# test pop autos
data, flags = io.load_vis(fname, pop_autos=True)
assert (24, 24, parse_polstr('EE', x_orientation=self.uvd.x_orientation)) not in data
# test uvd object
uvd = UVData()
uvd.read_miriad(fname)
data, flags = io.load_vis(uvd)
assert data[(24, 25, 'ee')].shape == (60, 64)
data, flags = io.load_vis([uvd])
assert data[(24, 25, 'ee')].shape == (60, 64)
# test multiple
fname2 = os.path.join(DATA_PATH, "zen.2458043.13298.xx.HH.uvORA")
data, flags = io.load_vis([fname, fname2])
assert data[(24, 25, 'ee')].shape == (120, 64)
assert flags[(24, 25, 'ee')].shape == (120, 64)
# test w/ meta
d, f, ap, a, f, t, l, p = io.load_vis([fname, fname2], return_meta=True)
assert len(ap[24]) == 3
assert len(f) == len(self.freq_array)
with pytest.raises(NotImplementedError):
d, f = io.load_vis(fname, filetype='not_a_real_filetype')
with pytest.raises(NotImplementedError):
d, f = io.load_vis(['str1', 'str2'], filetype='not_a_real_filetype')
# test w/ meta pick_data_ants
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
d, f, ap, a, f, t, l, p = io.load_vis(fname, return_meta=True, pick_data_ants=False)
assert len(ap[24]) == 3
assert len(a) == 47
assert len(f) == len(self.freq_array)
with pytest.raises(TypeError):
d, f = io.load_vis(1.0)
def test_load_vis_nested(self):
# duplicated testing from firstcal.UVData_to_dict
filename1 = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
filename2 = os.path.join(DATA_PATH, 'zen.2458043.13298.xx.HH.uvORA')
uvd1 = UVData()
uvd1.read_miriad(filename1)
uvd2 = UVData()
uvd2.read_miriad(filename2)
if uvd1.phase_type != 'drift':
uvd1.unphase_to_drift()
if uvd2.phase_type != 'drift':
uvd2.unphase_to_drift()
uvd = uvd1 + uvd2
d, f = io.load_vis([uvd1, uvd2], nested_dict=True)
for i, j in d:
for pol in d[i, j]:
uvpol = list(uvd1.polarization_array).index(polstr2num(pol, x_orientation=uvd1.x_orientation))
uvmask = np.all(
np.array(list(zip(uvd.ant_1_array, uvd.ant_2_array))) == [i, j], axis=1)
np.testing.assert_equal(d[i, j][pol], np.resize(
uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape))
np.testing.assert_equal(f[i, j][pol], np.resize(
uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape))
d, f = io.load_vis([filename1, filename2], nested_dict=True)
for i, j in d:
for pol in d[i, j]:
uvpol = list(uvd.polarization_array).index(polstr2num(pol, x_orientation=uvd.x_orientation))
uvmask = np.all(
np.array(list(zip(uvd.ant_1_array, uvd.ant_2_array))) == [i, j], axis=1)
np.testing.assert_equal(d[i, j][pol], np.resize(
uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape))
np.testing.assert_equal(f[i, j][pol], np.resize(
uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape))
uvd = UVData()
uvd.read_miriad(filename1)
assert len(io.load_vis([uvd], nested_dict=True)[0]) == uvd.Nbls
# reorder baseline array
uvd.baseline_array = uvd.baseline_array[np.argsort(uvd.baseline_array)]
assert len(io.load_vis(filename1, nested_dict=True)[0]) == uvd.Nbls
@pytest.mark.filterwarnings("ignore:The expected shape of the ENU array")
@pytest.mark.filterwarnings("ignore:antenna_diameters is not set")
@pytest.mark.filterwarnings("ignore:Unicode equal comparison failed")
def test_write_vis(self):
# get data
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2458044.41632.xx.HH.XRAA.uvh5"))
data, flgs, ap, a, f, t, l, p = io.load_vis(uvd, return_meta=True)
nsample = copy.deepcopy(data)
for k in nsample.keys():
nsample[k] = np.ones_like(nsample[k], np.float)
# test basic execution
uvd = io.write_vis("ex.uvh5", data, l, f, ap, start_jd=2458044, return_uvd=True, overwrite=True, verbose=True, x_orientation='east', filetype='uvh5')
hd = HERAData("ex.uvh5")
hd.read()
assert os.path.exists('ex.uvh5')
assert uvd.data_array.shape == (1680, 1, 64, 1)
assert hd.data_array.shape == (1680, 1, 64, 1)
assert np.allclose(data[(24, 25, 'ee')][30, 32], uvd.get_data(24, 25, 'ee')[30, 32])
assert np.allclose(data[(24, 25, 'ee')][30, 32], hd.get_data(24, 25, 'ee')[30, 32])
assert hd.x_orientation.lower() == 'east'
for ant in ap:
np.testing.assert_array_almost_equal(hd.antpos[ant], ap[ant])
os.remove("ex.uvh5")
# test with nsample and flags
uvd = io.write_vis("ex.uv", data, l, f, ap, start_jd=2458044, flags=flgs, nsamples=nsample, x_orientation='east', return_uvd=True, overwrite=True, verbose=True)
assert uvd.nsample_array.shape == (1680, 1, 64, 1)
assert uvd.flag_array.shape == (1680, 1, 64, 1)
assert np.allclose(nsample[(24, 25, 'ee')][30, 32], uvd.get_nsamples(24, 25, 'ee')[30, 32])
assert np.allclose(flgs[(24, 25, 'ee')][30, 32], uvd.get_flags(24, 25, 'ee')[30, 32])
assert uvd.x_orientation.lower() == 'east'
# test exceptions
pytest.raises(AttributeError, io.write_vis, "ex.uv", data, l, f, ap)
pytest.raises(AttributeError, io.write_vis, "ex.uv", data, l, f, ap, start_jd=2458044, filetype='foo')
if os.path.exists('ex.uv'):
shutil.rmtree('ex.uv')
def test_update_vis(self):
# load in cal
fname = os.path.join(DATA_PATH, "zen.2458043.12552.xx.HH.uvORA")
outname = os.path.join(DATA_PATH, "test_output/zen.2458043.12552.xx.HH.modified.uvORA")
uvd = UVData()
uvd.read_miriad(fname)
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(fname, return_meta=True)
# make some modifications
new_data = {key: (2. + 1.j) * val for key, val in data.items()}
new_flags = {key: np.logical_not(val) for key, val in flags.items()}
io.update_vis(fname, outname, data=new_data, flags=new_flags,
add_to_history='hello world', clobber=True, telescope_name='PAPER')
# test modifications
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(outname, return_meta=True)
for k in data.keys():
assert np.all(new_data[k] == data[k])
assert np.all(new_flags[k] == flags[k])
uvd2 = UVData()
uvd2.read_miriad(outname)
assert pyuvdata.utils._check_histories(uvd2.history, uvd.history + 'hello world')
assert uvd2.telescope_name == 'PAPER'
shutil.rmtree(outname)
# Coverage for errors
with pytest.raises(TypeError):
io.update_vis(uvd, outname, data=new_data, flags=new_flags, filetype_out='not_a_real_filetype',
add_to_history='hello world', clobber=True, telescope_name='PAPER')
with pytest.raises(NotImplementedError):
io.update_vis(fname, outname, data=new_data, flags=new_flags, filetype_in='not_a_real_filetype',
add_to_history='hello world', clobber=True, telescope_name='PAPER')
# #now try the same thing but with a UVData object instead of path
io.update_vis(uvd, outname, data=new_data, flags=new_flags,
add_to_history='hello world', clobber=True, telescope_name='PAPER')
data, flags, antpos, ants, freqs, times, lsts, pols = io.load_vis(outname, return_meta=True)
for k in data.keys():
assert np.all(new_data[k] == data[k])
assert np.all(new_flags[k] == flags[k])
uvd2 = UVData()
uvd2.read_miriad(outname)
assert pyuvdata.utils._check_histories(uvd2.history, uvd.history + 'hello world')
assert uvd2.telescope_name == 'PAPER'
shutil.rmtree(outname)
class Test_Calibration_IO_Legacy(object):
def test_load_cal(self):
with pytest.raises(TypeError):
io.load_cal(1.0)
fname = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
gains, flags = io.load_cal(fname)
assert len(gains.keys()) == 18
assert len(flags.keys()) == 18
cal = UVCal()
cal.read_calfits(fname)
gains, flags = io.load_cal(cal)
assert len(gains.keys()) == 18
assert len(flags.keys()) == 18
with pytest.raises(TypeError):
io.load_cal([fname, cal])
fname_xx = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
fname_yy = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits")
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal([fname_xx, fname_yy], return_meta=True)
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert freqs.shape == (1024,)
assert times.shape == (3,)
assert sorted(pols) == [parse_jpolstr('jxx', x_orientation=cal.x_orientation), parse_jpolstr('jyy', x_orientation=cal.x_orientation)]
cal_xx, cal_yy = UVCal(), UVCal()
cal_xx.read_calfits(fname_xx)
cal_yy.read_calfits(fname_yy)
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal([cal_xx, cal_yy], return_meta=True)
assert len(gains.keys()) == 36
assert len(flags.keys()) == 36
assert len(quals.keys()) == 36
assert freqs.shape == (1024,)
assert times.shape == (3,)
assert sorted(pols) == [parse_jpolstr('jxx', x_orientation=cal_xx.x_orientation), parse_jpolstr('jyy', x_orientation=cal_yy.x_orientation)]
def test_write_cal(self):
# create fake data
ants = np.arange(10)
pols = np.array(['Jnn'])
freqs = np.linspace(100e6, 200e6, 64, endpoint=False)
Nfreqs = len(freqs)
times = np.linspace(2458043.1, 2458043.6, 100)
Ntimes = len(times)
gains = {}
quality = {}
flags = {}
total_qual = {}
for i, p in enumerate(pols):
total_qual[p] = np.ones((Ntimes, Nfreqs), np.float)
for j, a in enumerate(ants):
gains[(a, p)] = np.ones((Ntimes, Nfreqs), np.complex)
quality[(a, p)] = np.ones((Ntimes, Nfreqs), np.float) * 2
flags[(a, p)] = np.zeros((Ntimes, Nfreqs), np.bool)
# set some terms to zero
gains[(5, 'Jnn')][20:30] *= 0
# test basic execution
uvc = io.write_cal("ex.calfits", gains, freqs, times, flags=flags, quality=quality,
total_qual=total_qual, overwrite=True, return_uvc=True, write_file=True)
assert os.path.exists("ex.calfits")
assert uvc.gain_array.shape == (10, 1, 64, 100, 1)
assert np.allclose(uvc.gain_array[5].min(), 1.0)
assert np.allclose(uvc.gain_array[0, 0, 0, 0, 0], (1 + 0j))
assert np.allclose(np.sum(uvc.gain_array), (64000 + 0j))
assert not np.any(uvc.flag_array[0, 0, 0, 0, 0])
assert np.sum(uvc.flag_array) == 640
assert np.allclose(uvc.quality_array[0, 0, 0, 0, 0], 2)
assert np.allclose(np.sum(uvc.quality_array), 128000.0)
assert len(uvc.antenna_numbers) == 10
assert uvc.total_quality_array is not None
if os.path.exists('ex.calfits'):
os.remove('ex.calfits')
# test execution with different parameters
uvc = io.write_cal("ex.calfits", gains, freqs, times, overwrite=True)
if os.path.exists('ex.calfits'):
os.remove('ex.calfits')
# test single integration write
gains2 = odict(list(map(lambda k: (k, gains[k][:1]), gains.keys())))
uvc = io.write_cal("ex.calfits", gains2, freqs, times[:1], return_uvc=True, outdir='./')
assert np.allclose(uvc.integration_time, 0.0)
assert uvc.Ntimes == 1
assert os.path.exists('ex.calfits')
os.remove('ex.calfits')
# test multiple pol
for k in list(gains.keys()):
gains[(k[0], 'Jyy')] = gains[k].conj()
uvc = io.write_cal("ex.calfits", gains, freqs, times, return_uvc=True, outdir='./')
assert uvc.gain_array.shape == (10, 1, 64, 100, 2)
np.testing.assert_array_almost_equal(uvc.gain_array[0, 0, :, :, 0], uvc.gain_array[0, 0, :, :, 1].conj())
os.remove('ex.calfits')
# test zero check
gains[(0, 'Jnn')][:] = 0.0
uvc1 = io.write_cal("ex.calfits", gains, freqs, times, return_uvc=True, write_file=False, outdir='./', zero_check=True)
uvc2 = io.write_cal("ex.calfits", gains, freqs, times, return_uvc=True, write_file=False, outdir='./', zero_check=False)
assert np.allclose(uvc1.gain_array[0, 0, :, :, 0], 1.0)
assert np.allclose(uvc2.gain_array[0, 0, :, :, 0], 0.0)
# test antenna number and names ordering
antnums2antnames = {a: "THISANT{}".format(a + 1) for a in ants}
uvc = io.write_cal("ex.calfits", gains, freqs, times, antnums2antnames=antnums2antnames,
return_uvc=True, write_file=False)
assert sorted(uvc.antenna_names) == sorted(antnums2antnames.values())
def test_update_cal(self):
# load in cal
fname = os.path.join(DATA_PATH, "test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits")
outname = os.path.join(DATA_PATH, "test_output/zen.2457698.40355.xx.HH.uvc.modified.calfits.")
cal = UVCal()
cal.read_calfits(fname)
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal(fname, return_meta=True)
# make some modifications
new_gains = {key: (2. + 1.j) * val for key, val in gains.items()}
new_flags = {key: np.logical_not(val) for key, val in flags.items()}
new_quals = {key: 2. * val for key, val in quals.items()}
io.update_cal(fname, outname, gains=new_gains, flags=new_flags, quals=new_quals,
add_to_history='hello world', clobber=True, telescope_name='MWA')
# test modifications
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal(outname, return_meta=True)
for k in gains.keys():
assert np.all(new_gains[k] == gains[k])
assert np.all(new_flags[k] == flags[k])
assert np.all(new_quals[k] == quals[k])
cal2 = UVCal()
cal2.read_calfits(outname)
assert pyuvdata.utils._check_histories(cal2.history, cal.history + 'hello world')
assert cal2.telescope_name == 'MWA'
os.remove(outname)
# now try the same thing but with a UVCal object instead of path
io.update_cal(cal, outname, gains=new_gains, flags=new_flags, quals=new_quals,
add_to_history='hello world', clobber=True, telescope_name='MWA')
gains, flags, quals, total_qual, ants, freqs, times, pols = io.load_cal(outname, return_meta=True)
for k in gains.keys():
assert np.all(new_gains[k] == gains[k])
assert np.all(new_flags[k] == flags[k])
assert np.all(new_quals[k] == quals[k])
cal2 = UVCal()
cal2.read_calfits(outname)
assert pyuvdata.utils._check_histories(cal2.history, cal.history + 'hello world')
assert cal2.telescope_name == 'MWA'
os.remove(outname)
class Test_Flags_IO(object):
def test_load_flags_npz(self):
npzfile = os.path.join(DATA_PATH, "test_input/zen.2458101.45361.xx.HH.uvOCR_53x_54x_only.flags.applied.npz")
flags = io.load_flags(npzfile, filetype='npz')
assert (53, 54, parse_polstr('XX')) in flags
for f in flags.values():
assert f.shape == (60, 1024)
np.testing.assert_array_equal(f[:, 0:50], True)
np.testing.assert_array_equal(f[:, -50:], True)
assert not np.all(f)
flags, meta = io.load_flags(npzfile, filetype='npz', return_meta=True)
assert len(meta['freqs']) == 1024
assert len(meta['times']) == 60
assert 'history' in meta
def test_load_flags_h5_baseline(self):
h5file = os.path.join(QM_DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.testuvflag.flags.h5')
flags, meta = io.load_flags(h5file, return_meta=True)
assert len(meta['freqs']) == 256
assert len(meta['times']) == 3
assert 'history' in meta
assert (20, 105, 'xx') in flags
for k in flags.keys():
assert len(k) == 3
assert flags[k].shape == (3, 256)
def test_load_flags_h5_antenna(self):
h5file = os.path.join(QM_DATA_PATH, 'antenna_flags.h5')
flags, meta = io.load_flags(h5file, return_meta=True)
assert len(meta['freqs']) == 256
assert len(meta['times']) == 3
assert 'history' in meta
assert (20, 'Jxx') in flags
for k in flags.keys():
assert len(k) == 2
assert flags[k].shape == (3, 256)
def test_load_flags_h5_waterfall(self):
h5file = os.path.join(QM_DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits.g.flags.h5')
flags, meta = io.load_flags(h5file, return_meta=True)
assert len(meta['freqs']) == 256
assert len(meta['times']) == 3
assert 'history' in meta
assert 'Jxx' in flags
for k in flags.keys():
assert isinstance(k, str)
assert flags[k].shape == (3, 256)
def test_load_flags_errors(self):
with pytest.raises(ValueError):
flags = io.load_flags('some/path', filetype='not_a_type')
h5file = os.path.join(QM_DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.testuvflag.h5')
with pytest.raises(AssertionError):
flags = io.load_flags(h5file)
class Test_Meta_IO(object):
def test_read_write_redcal_meta(self):
# load file, write it back out, reread it, tests that it all agrees
meta_path = os.path.join(DATA_PATH, "test_input/zen.2458098.43124.downsample.redcal_meta.hdf5")
fc_meta, omni_meta, freqs, times, lsts, antpos, history = io.read_redcal_meta(meta_path)
out_path = os.path.join(DATA_PATH, "test_output/redcal_meta_io_test.hdf5")
io.save_redcal_meta(out_path, fc_meta, omni_meta, freqs, times, lsts, antpos, history)
fc_meta2, omni_meta2, freqs2, times2, lsts2, antpos2, history2 = io.read_redcal_meta(out_path)
for key1 in fc_meta:
for key2 in fc_meta[key1]:
np.testing.assert_array_equal(fc_meta[key1][key2], fc_meta2[key1][key2])
for key1 in omni_meta:
for key2 in omni_meta[key1]:
np.testing.assert_array_equal(omni_meta[key1][key2], omni_meta2[key1][key2])
np.testing.assert_array_equal(freqs, freqs2)
np.testing.assert_array_equal(times, times2)
np.testing.assert_array_equal(lsts, lsts2)
for ant in antpos:
assert np.all(antpos[ant] == antpos2[ant])
assert history == history2
os.remove(out_path)
def test_get_file_times():
filepaths = sorted(glob.glob(DATA_PATH + "/zen.2458042.*.xx.HH.uvXA"))
# test execution
dlsts, dtimes, larrs, tarrs = io.get_file_times(filepaths, filetype='miriad')
assert np.isclose(larrs[0][0], 4.7293432458811866)
assert np.isclose(larrs[0][-1], 4.7755393587036084)
assert np.isclose(dlsts[0], 0.00078298496309189868)
assert len(dlsts) == 2
assert len(dtimes) == 2
assert len(larrs) == 2
assert len(tarrs) == 2
assert len(larrs[0]) == 60
assert len(tarrs[0]) == 60
# test if fed as a str
dlsts, dtimes, larrs, tarrs = io.get_file_times(filepaths[0], filetype='miriad')
assert isinstance(dlsts, (float, np.float))
assert isinstance(dtimes, (float, np.float))
assert larrs.ndim == 1
assert tarrs.ndim == 1
# test uvh5
fp = os.path.join(DATA_PATH, 'zen.2458098.43124.downsample.uvh5')
dlsts, dtimes, larrs, tarrs = io.get_file_times(fp, filetype='uvh5')
assert np.isclose(larrs[0], 1.3356485363481176)
assert np.isclose(larrs[-1], 1.3669679333582787)
assert np.isclose(dlsts, 0.015659698505080533)
# test uvh5 no lsts in header
fp = os.path.join(DATA_PATH, 'test_input/zen.2458863.28532.HH.no_lsts_in_header.uvh5')
dlsts, dtimes, larrs, tarrs = io.get_file_times(fp, filetype='uvh5')
assert np.isclose(larrs[0], 1.00925787)
assert np.isclose(larrs[1], 1.00996256)
# exceptions
pytest.raises(ValueError, io.get_file_times, fp, filetype='foo')
def test_baselines_from_filelist_position(tmpdir):
tmp_path = tmpdir.strpath
filelist = [os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"),
os.path.join(DATA_PATH, "test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5")]
# below, we test whether for each file we get a chunk of baselines whose length is either greater then 0
# or less then then 3 (the number of total baselines in this dataset)
baselines = []
for file in filelist:
baseline_chunk = io.baselines_from_filelist_position(file, filelist)
assert len(baseline_chunk) > 0 and len(baseline_chunk) < 3
baselines += baseline_chunk
# Next, we check whether the total number of chunked baselines equals the original number of baselines
assert len(baselines) == 3
# sort baselines by the sum of antenna number
ant_sum = [bl[0] + bl[1] for bl in baselines]
sum_indices = np.argsort(ant_sum)
baselines_sorted = [baselines[m] for m in sum_indices]
# check that the sorted baselines are all of the original baselines.
assert baselines_sorted == [(53, 53), (53, 54), (54, 54)]
# test case when there are less baselines then files
filelist_1bl = [os.path.join(tmp_path, "zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"),
os.path.join(tmp_path, "zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5")]
# to do this, we first generate single baseline files.
for fi, fo in zip(filelist, filelist_1bl):
hd = io.HERAData(fi)
hd.read(bls=[(53, 54)])
hd.write_uvh5(fo, clobber=True)
# then we get baseline chunks
baseline_chunk = io.baselines_from_filelist_position(filelist_1bl[0], filelist_1bl)
assert baseline_chunk == [(53, 54)]
baseline_chunk = io.baselines_from_filelist_position(filelist_1bl[1], filelist_1bl)
assert baseline_chunk == []
def test_throw_away_flagged_ants_parser():
sys.argv = [sys.argv[0], 'input', 'output', '--yaml_file', 'test']
ap = io.throw_away_flagged_ants_parser()
args = ap.parse_args()
assert args.infilename == 'input'
assert args.outfilename == 'output'
assert not args.clobber
assert not args.throw_away_fully_flagged_data_baselines
assert args.yaml_file == 'test'
def test_throw_away_flagged_ants(tmpdir):
strpath = tmpdir.strpath
inputfile = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
outputfile = os.path.join(strpath, 'trimmed_output.uvh5')
yaml_file = os.path.join(DATA_PATH, '2458043.yaml')
hd = io.HERAData(inputfile)
hd.read()
for ant in [24, 25, 37, 38, 52]:
assert ant in set(hd.ant_1_array).union(set(hd.ant_2_array))
io.throw_away_flagged_ants(inputfile, outputfile, yaml_file)
hdo = io.HERAData(outputfile)
for ant in set(hd.ant_1_array).union(set(hd.ant_2_array)):
if ant in [24, 25, 37, 38]:
assert ant not in set(hdo.ant_1_array).union(set(hdo.ant_2_array))
else:
assert ant in set(hdo.ant_1_array).union(set(hdo.ant_2_array))
# now fully flag antenna 11 by setting flags to True.
hdt = copy.deepcopy(hd)
dt, ft, nt = hdt.build_datacontainers()
for k in ft:
ft[k] = np.zeros_like(ft[k], dtype=bool)
if k[0] in [52] or k[1] in [52]:
ft[k] = np.ones_like(ft[k], dtype=bool)
hdt.update(flags=ft)
manual_file = os.path.join(strpath, 'manually_flagged.uvh5')
manual_file_trimmed = os.path.join(strpath, 'manually_flagged_trimmed.uvh5')
hdt.write_uvh5(manual_file)
io.throw_away_flagged_ants(manual_file, manual_file_trimmed, yaml_file=yaml_file,
throw_away_fully_flagged_data_baselines=True)
hdo = io.HERAData(manual_file_trimmed)
for ant in set(hd.ant_1_array).union(set(hd.ant_2_array)):
if ant in [52, 37, 38, 24, 25]:
assert ant not in set(hdo.ant_1_array).union(set(hdo.ant_2_array))
else:
assert ant in set(hdo.ant_1_array).union(set(hdo.ant_2_array))
| [
"pytest.mark.filterwarnings",
"numpy.logical_not",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"pyuvdata.UVData",
"numpy.arange",
"os.remove",
"os.path.exists",
"pyuvdata.UVCal",
"numpy.testing.assert_array_almost_equal",
"numpy.asarray",
"pyuvdata.UVFlag",
"numpy.linspace",
"pyuvda... | [((5915, 6012), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:It seems that the latitude and longitude are in radians"""'], {}), "(\n 'ignore:It seems that the latitude and longitude are in radians')\n", (5941, 6012), False, 'import pytest\n'), ((6009, 6099), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:The default for the `center` keyword has changed"""'], {}), "(\n 'ignore:The default for the `center` keyword has changed')\n", (6035, 6099), False, 'import pytest\n'), ((6096, 6152), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Mean of empty slice"""'], {}), "('ignore:Mean of empty slice')\n", (6122, 6152), False, 'import pytest\n'), ((6154, 6239), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:invalid value encountered in double_scalars"""'], {}), "('ignore:invalid value encountered in double_scalars'\n )\n", (6180, 6239), False, 'import pytest\n'), ((26749, 26839), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:The default for the `center` keyword has changed"""'], {}), "(\n 'ignore:The default for the `center` keyword has changed')\n", (26775, 26839), False, 'import pytest\n'), ((16480, 16556), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:miriad does not support partial loading"""'], {}), "('ignore:miriad does not support partial loading')\n", (16506, 16556), False, 'import pytest\n'), ((31559, 31631), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:The expected shape of the ENU array"""'], {}), "('ignore:The expected shape of the ENU array')\n", (31585, 31631), False, 'import pytest\n'), ((31637, 31702), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:antenna_diameters is not set"""'], {}), "('ignore:antenna_diameters is not set')\n", (31663, 31702), False, 'import pytest\n'), ((31708, 31776), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Unicode equal comparison failed"""'], {}), "('ignore:Unicode equal comparison failed')\n", (31734, 31776), False, 'import pytest\n'), ((47544, 47586), 'numpy.isclose', 'np.isclose', (['larrs[0][0]', '(4.729343245881187)'], {}), '(larrs[0][0], 4.729343245881187)\n', (47554, 47586), True, 'import numpy as np\n'), ((47599, 47642), 'numpy.isclose', 'np.isclose', (['larrs[0][-1]', '(4.775539358703608)'], {}), '(larrs[0][-1], 4.775539358703608)\n', (47609, 47642), True, 'import numpy as np\n'), ((47655, 47698), 'numpy.isclose', 'np.isclose', (['dlsts[0]', '(0.0007829849630918987)'], {}), '(dlsts[0], 0.0007829849630918987)\n', (47665, 47698), True, 'import numpy as np\n'), ((48161, 48221), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458098.43124.downsample.uvh5"""'], {}), "(DATA_PATH, 'zen.2458098.43124.downsample.uvh5')\n", (48173, 48221), False, 'import os\n'), ((48306, 48346), 'numpy.isclose', 'np.isclose', (['larrs[0]', '(1.3356485363481176)'], {}), '(larrs[0], 1.3356485363481176)\n', (48316, 48346), True, 'import numpy as np\n'), ((48358, 48399), 'numpy.isclose', 'np.isclose', (['larrs[-1]', '(1.3669679333582787)'], {}), '(larrs[-1], 1.3669679333582787)\n', (48368, 48399), True, 'import numpy as np\n'), ((48411, 48450), 'numpy.isclose', 'np.isclose', (['dlsts', '(0.015659698505080533)'], {}), '(dlsts, 0.015659698505080533)\n', (48421, 48450), True, 'import numpy as np\n'), ((48495, 48580), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458863.28532.HH.no_lsts_in_header.uvh5"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458863.28532.HH.no_lsts_in_header.uvh5')\n", (48507, 48580), False, 'import os\n'), ((48661, 48693), 'numpy.isclose', 'np.isclose', (['larrs[0]', '(1.00925787)'], {}), '(larrs[0], 1.00925787)\n', (48671, 48693), True, 'import numpy as np\n'), ((48705, 48737), 'numpy.isclose', 'np.isclose', (['larrs[1]', '(1.00996256)'], {}), '(larrs[1], 1.00996256)\n', (48715, 48737), True, 'import numpy as np\n'), ((48760, 48824), 'pytest.raises', 'pytest.raises', (['ValueError', 'io.get_file_times', 'fp'], {'filetype': '"""foo"""'}), "(ValueError, io.get_file_times, fp, filetype='foo')\n", (48773, 48824), False, 'import pytest\n'), ((49787, 49806), 'numpy.argsort', 'np.argsort', (['ant_sum'], {}), '(ant_sum)\n', (49797, 49806), True, 'import numpy as np\n'), ((51225, 51285), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.40141.xx.HH.XRAA.uvh5"""'], {}), "(DATA_PATH, 'zen.2458043.40141.xx.HH.XRAA.uvh5')\n", (51237, 51285), False, 'import os\n'), ((51303, 51347), 'os.path.join', 'os.path.join', (['strpath', '"""trimmed_output.uvh5"""'], {}), "(strpath, 'trimmed_output.uvh5')\n", (51315, 51347), False, 'import os\n'), ((51364, 51403), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""2458043.yaml"""'], {}), "(DATA_PATH, '2458043.yaml')\n", (51376, 51403), False, 'import os\n'), ((51991, 52008), 'copy.deepcopy', 'copy.deepcopy', (['hd'], {}), '(hd)\n', (52004, 52008), False, 'import copy\n'), ((52255, 52301), 'os.path.join', 'os.path.join', (['strpath', '"""manually_flagged.uvh5"""'], {}), "(strpath, 'manually_flagged.uvh5')\n", (52267, 52301), False, 'import os\n'), ((52328, 52382), 'os.path.join', 'os.path.join', (['strpath', '"""manually_flagged_trimmed.uvh5"""'], {}), "(strpath, 'manually_flagged_trimmed.uvh5')\n", (52340, 52382), False, 'import os\n'), ((703, 781), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits')\n", (715, 781), False, 'import os\n'), ((806, 884), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits')\n", (818, 884), False, 'import os\n'), ((911, 987), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.HH.uvcA.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.HH.uvcA.omni.calfits')\n", (923, 987), False, 'import os\n'), ((1012, 1101), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.44615.xx.HH.uv.abs.calfits_54x_only"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.44615.xx.HH.uv.abs.calfits_54x_only')\n", (1024, 1101), False, 'import os\n'), ((1122, 1211), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.45361.xx.HH.uv.abs.calfits_54x_only"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.45361.xx.HH.uv.abs.calfits_54x_only')\n", (1134, 1211), False, 'import os\n'), ((1232, 1321), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.46106.xx.HH.uv.abs.calfits_54x_only')\n", (1244, 1321), False, 'import os\n'), ((2013, 2020), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (2018, 2020), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((3690, 3758), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["g2[54, 'Jee']", "g[54, 'Jee'][30:90, :]"], {}), "(g2[54, 'Jee'], g[54, 'Jee'][30:90, :])\n", (3719, 3758), True, 'import numpy as np\n'), ((4044, 4112), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["g2[54, 'Jee']", "g[54, 'Jee'][:, 0:100]"], {}), "(g2[54, 'Jee'], g[54, 'Jee'][:, 0:100])\n", (4073, 4112), True, 'import numpy as np\n'), ((4121, 4189), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["g3[54, 'Jee']", "g[54, 'Jee'][:, 0:100]"], {}), "(g3[54, 'Jee'], g[54, 'Jee'][:, 0:100])\n", (4150, 4189), True, 'import numpy as np\n'), ((5886, 5911), 'os.remove', 'os.remove', (['"""test.calfits"""'], {}), "('test.calfits')\n", (5895, 5911), False, 'import os\n'), ((6314, 6386), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458116.61019.xx.HH.XRS_downselected.uvh5"""'], {}), "(DATA_PATH, 'zen.2458116.61019.xx.HH.XRS_downselected.uvh5')\n", (6326, 6386), False, 'import os\n'), ((6409, 6481), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458116.61765.xx.HH.XRS_downselected.uvh5"""'], {}), "(DATA_PATH, 'zen.2458116.61765.xx.HH.XRS_downselected.uvh5')\n", (6421, 6481), False, 'import os\n'), ((6506, 6562), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (6518, 6562), False, 'import os\n'), ((6587, 6643), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.13298.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.13298.xx.HH.uvORA')\n", (6599, 6643), False, 'import os\n'), ((6666, 6731), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvA.vis.uvfits"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvA.vis.uvfits')\n", (6678, 6731), False, 'import os\n'), ((9009, 9026), 'copy.deepcopy', 'copy.deepcopy', (['hd'], {}), '(hd)\n', (9022, 9026), False, 'import copy\n'), ((9229, 9246), 'copy.deepcopy', 'copy.deepcopy', (['hd'], {}), '(hd)\n', (9242, 9246), False, 'import copy\n'), ((9486, 9530), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['d1', 'd2'], {}), '(d1, d2)\n', (9522, 9530), True, 'import numpy as np\n'), ((9633, 9682), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['d2[-5:]', 'd3'], {}), '(d2[-5:], d3)\n', (9669, 9682), True, 'import numpy as np\n'), ((13108, 13126), 'numpy.random.seed', 'np.random.seed', (['(21)'], {}), '(21)\n', (13122, 13126), True, 'import numpy as np\n'), ((21524, 21600), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (["d[54, 54, 'EE']", "d_after[54, 54, 'EE']"], {}), "(d[54, 54, 'EE'], d_after[54, 54, 'EE'])\n", (21560, 21600), True, 'import numpy as np\n'), ((21961, 21980), 'os.remove', 'os.remove', (['"""out.h5"""'], {}), "('out.h5')\n", (21970, 21980), False, 'import os\n'), ((26060, 26080), 'os.remove', 'os.remove', (['"""out1.h5"""'], {}), "('out1.h5')\n", (26069, 26080), False, 'import os\n'), ((26089, 26109), 'os.remove', 'os.remove', (['"""out2.h5"""'], {}), "('out2.h5')\n", (26098, 26109), False, 'import os\n'), ((26570, 26578), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (26576, 26578), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((26628, 26638), 'pyuvdata.UVFlag', 'UVFlag', (['uv'], {}), '(uv)\n', (26634, 26638), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((26707, 26717), 'pyuvdata.UVFlag', 'UVFlag', (['hd'], {}), '(hd)\n', (26713, 26717), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((27040, 27096), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (27052, 27096), False, 'import os\n'), ((27116, 27124), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (27122, 27124), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((27196, 27226), 'numpy.unique', 'np.unique', (['self.uvd.freq_array'], {}), '(self.uvd.freq_array)\n', (27205, 27226), True, 'import numpy as np\n'), ((27407, 27437), 'numpy.unique', 'np.unique', (['self.uvd.time_array'], {}), '(self.uvd.time_array)\n', (27416, 27437), True, 'import numpy as np\n'), ((27455, 27511), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (27467, 27511), False, 'import os\n'), ((28084, 28092), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (28090, 28092), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((28354, 28410), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.13298.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.13298.xx.HH.uvORA')\n", (28366, 28410), False, 'import os\n'), ((29061, 29117), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (29073, 29117), False, 'import os\n'), ((29508, 29564), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (29520, 29564), False, 'import os\n'), ((29585, 29641), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.13298.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.13298.xx.HH.uvORA')\n", (29597, 29641), False, 'import os\n'), ((29657, 29665), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (29663, 29665), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((29717, 29725), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (29723, 29725), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((31248, 31256), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (31254, 31256), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((31840, 31848), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (31846, 31848), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((32026, 32045), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (32039, 32045), False, 'import copy\n'), ((32395, 32420), 'os.path.exists', 'os.path.exists', (['"""ex.uvh5"""'], {}), "('ex.uvh5')\n", (32409, 32420), False, 'import os\n'), ((32872, 32892), 'os.remove', 'os.remove', (['"""ex.uvh5"""'], {}), "('ex.uvh5')\n", (32881, 32892), False, 'import os\n'), ((33496, 33564), 'pytest.raises', 'pytest.raises', (['AttributeError', 'io.write_vis', '"""ex.uv"""', 'data', 'l', 'f', 'ap'], {}), "(AttributeError, io.write_vis, 'ex.uv', data, l, f, ap)\n", (33509, 33564), False, 'import pytest\n'), ((33573, 33679), 'pytest.raises', 'pytest.raises', (['AttributeError', 'io.write_vis', '"""ex.uv"""', 'data', 'l', 'f', 'ap'], {'start_jd': '(2458044)', 'filetype': '"""foo"""'}), "(AttributeError, io.write_vis, 'ex.uv', data, l, f, ap,\n start_jd=2458044, filetype='foo')\n", (33586, 33679), False, 'import pytest\n'), ((33687, 33710), 'os.path.exists', 'os.path.exists', (['"""ex.uv"""'], {}), "('ex.uv')\n", (33701, 33710), False, 'import os\n'), ((33817, 33873), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458043.12552.xx.HH.uvORA"""'], {}), "(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')\n", (33829, 33873), False, 'import os\n'), ((33892, 33969), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_output/zen.2458043.12552.xx.HH.modified.uvORA"""'], {}), "(DATA_PATH, 'test_output/zen.2458043.12552.xx.HH.modified.uvORA')\n", (33904, 33969), False, 'import os\n'), ((33984, 33992), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (33990, 33992), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((34745, 34753), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (34751, 34753), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((34803, 34877), 'pyuvdata.utils._check_histories', 'pyuvdata.utils._check_histories', (['uvd2.history', "(uvd.history + 'hello world')"], {}), "(uvd2.history, uvd.history + 'hello world')\n", (34834, 34877), False, 'import pyuvdata\n'), ((34932, 34954), 'shutil.rmtree', 'shutil.rmtree', (['outname'], {}), '(outname)\n', (34945, 34954), False, 'import shutil\n'), ((35961, 35969), 'pyuvdata.UVData', 'UVData', ([], {}), '()\n', (35967, 35969), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((36019, 36093), 'pyuvdata.utils._check_histories', 'pyuvdata.utils._check_histories', (['uvd2.history', "(uvd.history + 'hello world')"], {}), "(uvd2.history, uvd.history + 'hello world')\n", (36050, 36093), False, 'import pyuvdata\n'), ((36148, 36170), 'shutil.rmtree', 'shutil.rmtree', (['outname'], {}), '(outname)\n', (36161, 36170), False, 'import shutil\n'), ((36329, 36407), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits')\n", (36341, 36407), False, 'import os\n'), ((36543, 36550), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (36548, 36550), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((36799, 36877), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits')\n", (36811, 36877), False, 'import os\n'), ((36897, 36975), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.yy.HH.uvc.omni.calfits')\n", (36909, 36975), False, 'import os\n'), ((38074, 38087), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (38083, 38087), True, 'import numpy as np\n'), ((38103, 38120), 'numpy.array', 'np.array', (["['Jnn']"], {}), "(['Jnn'])\n", (38111, 38120), True, 'import numpy as np\n'), ((38137, 38194), 'numpy.linspace', 'np.linspace', (['(100000000.0)', '(200000000.0)', '(64)'], {'endpoint': '(False)'}), '(100000000.0, 200000000.0, 64, endpoint=False)\n', (38148, 38194), True, 'import numpy as np\n'), ((38227, 38265), 'numpy.linspace', 'np.linspace', (['(2458043.1)', '(2458043.6)', '(100)'], {}), '(2458043.1, 2458043.6, 100)\n', (38238, 38265), True, 'import numpy as np\n'), ((39042, 39070), 'os.path.exists', 'os.path.exists', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (39056, 39070), False, 'import os\n'), ((39202, 39254), 'numpy.allclose', 'np.allclose', (['uvc.gain_array[0, 0, 0, 0, 0]', '(1 + 0.0j)'], {}), '(uvc.gain_array[0, 0, 0, 0, 0], 1 + 0.0j)\n', (39213, 39254), True, 'import numpy as np\n'), ((39437, 39485), 'numpy.allclose', 'np.allclose', (['uvc.quality_array[0, 0, 0, 0, 0]', '(2)'], {}), '(uvc.quality_array[0, 0, 0, 0, 0], 2)\n', (39448, 39485), True, 'import numpy as np\n'), ((39658, 39686), 'os.path.exists', 'os.path.exists', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (39672, 39686), False, 'import os\n'), ((39864, 39892), 'os.path.exists', 'os.path.exists', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (39878, 39892), False, 'import os\n'), ((40159, 40197), 'numpy.allclose', 'np.allclose', (['uvc.integration_time', '(0.0)'], {}), '(uvc.integration_time, 0.0)\n', (40170, 40197), True, 'import numpy as np\n'), ((40244, 40272), 'os.path.exists', 'os.path.exists', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (40258, 40272), False, 'import os\n'), ((40281, 40304), 'os.remove', 'os.remove', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (40290, 40304), False, 'import os\n'), ((40695, 40718), 'os.remove', 'os.remove', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (40704, 40718), False, 'import os\n'), ((41053, 41101), 'numpy.allclose', 'np.allclose', (['uvc1.gain_array[0, 0, :, :, 0]', '(1.0)'], {}), '(uvc1.gain_array[0, 0, :, :, 0], 1.0)\n', (41064, 41101), True, 'import numpy as np\n'), ((41117, 41165), 'numpy.allclose', 'np.allclose', (['uvc2.gain_array[0, 0, :, :, 0]', '(0.0)'], {}), '(uvc2.gain_array[0, 0, :, :, 0], 0.0)\n', (41128, 41165), True, 'import numpy as np\n'), ((41595, 41673), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits"""'], {}), "(DATA_PATH, 'test_input/zen.2457698.40355.xx.HH.uvc.omni.calfits')\n", (41607, 41673), False, 'import os\n'), ((41692, 41780), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_output/zen.2457698.40355.xx.HH.uvc.modified.calfits."""'], {}), "(DATA_PATH,\n 'test_output/zen.2457698.40355.xx.HH.uvc.modified.calfits.')\n", (41704, 41780), False, 'import os\n'), ((41791, 41798), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (41796, 41798), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((42704, 42711), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (42709, 42711), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((42762, 42836), 'pyuvdata.utils._check_histories', 'pyuvdata.utils._check_histories', (['cal2.history', "(cal.history + 'hello world')"], {}), "(cal2.history, cal.history + 'hello world')\n", (42793, 42836), False, 'import pyuvdata\n'), ((42889, 42907), 'os.remove', 'os.remove', (['outname'], {}), '(outname)\n', (42898, 42907), False, 'import os\n'), ((43466, 43473), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (43471, 43473), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((43524, 43598), 'pyuvdata.utils._check_histories', 'pyuvdata.utils._check_histories', (['cal2.history', "(cal.history + 'hello world')"], {}), "(cal2.history, cal.history + 'hello world')\n", (43555, 43598), False, 'import pyuvdata\n'), ((43651, 43669), 'os.remove', 'os.remove', (['outname'], {}), '(outname)\n', (43660, 43669), False, 'import os\n'), ((43754, 43856), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.45361.xx.HH.uvOCR_53x_54x_only.flags.applied.npz"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.45361.xx.HH.uvOCR_53x_54x_only.flags.applied.npz')\n", (43766, 43856), False, 'import os\n'), ((44444, 44523), 'os.path.join', 'os.path.join', (['QM_DATA_PATH', '"""zen.2457698.40355.xx.HH.uvcAA.testuvflag.flags.h5"""'], {}), "(QM_DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.testuvflag.flags.h5')\n", (44456, 44523), False, 'import os\n'), ((44907, 44953), 'os.path.join', 'os.path.join', (['QM_DATA_PATH', '"""antenna_flags.h5"""'], {}), "(QM_DATA_PATH, 'antenna_flags.h5')\n", (44919, 44953), False, 'import os\n'), ((45335, 45422), 'os.path.join', 'os.path.join', (['QM_DATA_PATH', '"""zen.2457698.40355.xx.HH.uvcAA.omni.calfits.g.flags.h5"""'], {}), "(QM_DATA_PATH,\n 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits.g.flags.h5')\n", (45347, 45422), False, 'import os\n'), ((45906, 45979), 'os.path.join', 'os.path.join', (['QM_DATA_PATH', '"""zen.2457698.40355.xx.HH.uvcAA.testuvflag.h5"""'], {}), "(QM_DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.testuvflag.h5')\n", (45918, 45979), False, 'import os\n'), ((46235, 46322), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458098.43124.downsample.redcal_meta.hdf5"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458098.43124.downsample.redcal_meta.hdf5')\n", (46247, 46322), False, 'import os\n'), ((46436, 46499), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_output/redcal_meta_io_test.hdf5"""'], {}), "(DATA_PATH, 'test_output/redcal_meta_io_test.hdf5')\n", (46448, 46499), False, 'import os\n'), ((47031, 47075), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['freqs', 'freqs2'], {}), '(freqs, freqs2)\n', (47060, 47075), True, 'import numpy as np\n'), ((47084, 47128), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['times', 'times2'], {}), '(times, times2)\n', (47113, 47128), True, 'import numpy as np\n'), ((47137, 47179), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['lsts', 'lsts2'], {}), '(lsts, lsts2)\n', (47166, 47179), True, 'import numpy as np\n'), ((47306, 47325), 'os.remove', 'os.remove', (['out_path'], {}), '(out_path)\n', (47315, 47325), False, 'import os\n'), ((47378, 47428), 'glob.glob', 'glob.glob', (["(DATA_PATH + '/zen.2458042.*.xx.HH.uvXA')"], {}), "(DATA_PATH + '/zen.2458042.*.xx.HH.uvXA')\n", (47387, 47428), False, 'import glob\n'), ((48924, 49017), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5')\n", (48936, 49017), False, 'import os\n'), ((49031, 49125), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5"""'], {}), "(DATA_PATH,\n 'test_input/zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5')\n", (49043, 49125), False, 'import os\n'), ((50078, 50155), 'os.path.join', 'os.path.join', (['tmp_path', '"""zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5"""'], {}), "(tmp_path, 'zen.2458101.46106.xx.HH.OCR_53x_54x_only.first.uvh5')\n", (50090, 50155), False, 'import os\n'), ((50177, 50255), 'os.path.join', 'os.path.join', (['tmp_path', '"""zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5"""'], {}), "(tmp_path, 'zen.2458101.46106.xx.HH.OCR_53x_54x_only.second.uvh5')\n", (50189, 50255), False, 'import os\n'), ((52086, 52118), 'numpy.zeros_like', 'np.zeros_like', (['ft[k]'], {'dtype': 'bool'}), '(ft[k], dtype=bool)\n', (52099, 52118), True, 'import numpy as np\n'), ((1670, 1694), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1683, 1694), False, 'import pytest\n'), ((1742, 1767), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1755, 1767), False, 'import pytest\n'), ((2652, 2677), 'numpy.unique', 'np.unique', (['uvc.freq_array'], {}), '(uvc.freq_array)\n', (2661, 2677), True, 'import numpy as np\n'), ((2727, 2752), 'numpy.unique', 'np.unique', (['uvc.time_array'], {}), '(uvc.time_array)\n', (2736, 2752), True, 'import numpy as np\n'), ((4501, 4543), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['g[k]', 'g2[k]'], {}), '(g[k], g2[k])\n', (4530, 4543), True, 'import numpy as np\n'), ((4764, 4806), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['g[k]', 'g2[k]'], {}), '(g[k], g2[k])\n', (4793, 4806), True, 'import numpy as np\n'), ((5019, 5045), 'numpy.logical_not', 'np.logical_not', (['flags[key]'], {}), '(flags[key])\n', (5033, 5045), True, 'import numpy as np\n'), ((5502, 5577), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['(gains_in[key] * (2.0 + 1.0j))', 'gains_out[key]'], {}), '(gains_in[key] * (2.0 + 1.0j), gains_out[key])\n', (5531, 5577), True, 'import numpy as np\n'), ((5679, 5745), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['(quals_in[key] * 2.0)', 'quals_out[key]'], {}), '(quals_in[key] * 2.0, quals_out[key])\n', (5708, 5745), True, 'import numpy as np\n'), ((5798, 5874), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['(total_qual_in[key] * 2.0)', 'total_qual_out[key]'], {}), '(total_qual_in[key] * 2.0, total_qual_out[key])\n', (5827, 5874), True, 'import numpy as np\n'), ((7673, 7740), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hd.freqs[f]', 'individual_hds[f].freqs'], {}), '(hd.freqs[f], individual_hds[f].freqs)\n', (7702, 7740), True, 'import numpy as np\n'), ((7792, 7855), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hd.bls[f]', 'individual_hds[f].bls'], {}), '(hd.bls[f], individual_hds[f].bls)\n', (7821, 7855), True, 'import numpy as np\n'), ((7910, 7977), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hd.times[f]', 'individual_hds[f].times'], {}), '(hd.times[f], individual_hds[f].times)\n', (7939, 7977), True, 'import numpy as np\n'), ((8031, 8096), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['hd.lsts[f]', 'individual_hds[f].lsts'], {}), '(hd.lsts[f], individual_hds[f].lsts)\n', (8060, 8096), True, 'import numpy as np\n'), ((8582, 8606), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8595, 8606), False, 'import pytest\n'), ((8655, 8680), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8668, 8680), False, 'import pytest\n'), ((8727, 8761), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (8740, 8761), False, 'import pytest\n'), ((8834, 8856), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (8847, 8856), False, 'import pytest\n'), ((11333, 11367), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (11346, 11367), False, 'import pytest\n'), ((12476, 12499), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (12489, 12499), False, 'import pytest\n'), ((13385, 13410), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13400, 13410), True, 'import numpy as np\n'), ((13553, 13569), 'numpy.conj', 'np.conj', (['new_vis'], {}), '(new_vis)\n', (13560, 13569), True, 'import numpy as np\n'), ((13619, 13644), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13634, 13644), True, 'import numpy as np\n'), ((13846, 13871), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13861, 13871), True, 'import numpy as np\n'), ((14138, 14161), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (14151, 14161), False, 'import pytest\n'), ((15057, 15085), 'numpy.all', 'np.all', (['(dc.freqs == hd.freqs)'], {}), '(dc.freqs == hd.freqs)\n', (15063, 15085), True, 'import numpy as np\n'), ((15105, 15133), 'numpy.all', 'np.all', (['(dc.times == hd.times)'], {}), '(dc.times == hd.times)\n', (15111, 15133), True, 'import numpy as np\n'), ((15153, 15179), 'numpy.all', 'np.all', (['(dc.lsts == hd.lsts)'], {}), '(dc.lsts == hd.lsts)\n', (15159, 15179), True, 'import numpy as np\n'), ((16233, 16244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16242, 16244), False, 'import os\n'), ((16458, 16473), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (16467, 16473), False, 'import os\n'), ((17093, 17118), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17106, 17118), False, 'import pytest\n'), ((18883, 18917), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (18896, 18917), False, 'import pytest\n'), ((19403, 19437), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (19416, 19437), False, 'import pytest\n'), ((19839, 19860), 'numpy.logical_not', 'np.logical_not', (['f[bl]'], {}), '(f[bl])\n', (19853, 19860), True, 'import numpy as np\n'), ((20016, 20067), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['d[bl]', 'd2[bl]'], {}), '(d[bl], d2[bl])\n', (20052, 20067), True, 'import numpy as np\n'), ((20080, 20124), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['f[bl]', 'f2[bl]'], {}), '(f[bl], f2[bl])\n', (20109, 20124), True, 'import numpy as np\n'), ((20137, 20181), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['n[bl]', 'n2[bl]'], {}), '(n[bl], n2[bl])\n', (20166, 20181), True, 'import numpy as np\n'), ((21772, 21838), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['(d[bl] * (2.0 + 1.0j))', 'd2[bl]'], {}), '(d[bl] * (2.0 + 1.0j), d2[bl])\n', (21808, 21838), True, 'import numpy as np\n'), ((21851, 21895), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['f[bl]', 'f2[bl]'], {}), '(f[bl], f2[bl])\n', (21880, 21895), True, 'import numpy as np\n'), ((21908, 21952), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['n[bl]', 'n2[bl]'], {}), '(n[bl], n2[bl])\n', (21937, 21952), True, 'import numpy as np\n'), ((22073, 22107), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (22086, 22107), False, 'import pytest\n'), ((22211, 22245), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (22224, 22245), False, 'import pytest\n'), ((23504, 23538), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (23517, 23538), False, 'import pytest\n'), ((23835, 23898), 'numpy.asarray', 'np.asarray', (['[(hd.antpos[bl[0]] - hd.antpos[bl[1]]) for bl in d]'], {}), '([(hd.antpos[bl[0]] - hd.antpos[bl[1]]) for bl in d])\n', (23845, 23898), True, 'import numpy as np\n'), ((24306, 24340), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (24319, 24340), False, 'import pytest\n'), ((25275, 25309), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (25288, 25309), False, 'import pytest\n'), ((26356, 26390), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (26369, 26390), False, 'import pytest\n'), ((28770, 28804), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (28783, 28804), False, 'import pytest\n'), ((28889, 28923), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (28902, 28923), False, 'import pytest\n'), ((29331, 29355), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (29344, 29355), False, 'import pytest\n'), ((31445, 31475), 'numpy.argsort', 'np.argsort', (['uvd.baseline_array'], {}), '(uvd.baseline_array)\n', (31455, 31475), True, 'import numpy as np\n'), ((31871, 31931), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""zen.2458044.41632.xx.HH.XRAA.uvh5"""'], {}), "(DATA_PATH, 'zen.2458044.41632.xx.HH.XRAA.uvh5')\n", (31883, 31931), False, 'import os\n'), ((32104, 32138), 'numpy.ones_like', 'np.ones_like', (['nsample[k]', 'np.float'], {}), '(nsample[k], np.float)\n', (32116, 32138), True, 'import numpy as np\n'), ((32802, 32863), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['hd.antpos[ant]', 'ap[ant]'], {}), '(hd.antpos[ant], ap[ant])\n', (32838, 32863), True, 'import numpy as np\n'), ((33724, 33746), 'shutil.rmtree', 'shutil.rmtree', (['"""ex.uv"""'], {}), "('ex.uv')\n", (33737, 33746), False, 'import shutil\n'), ((34256, 34275), 'numpy.logical_not', 'np.logical_not', (['val'], {}), '(val)\n', (34270, 34275), True, 'import numpy as np\n'), ((34647, 34677), 'numpy.all', 'np.all', (['(new_data[k] == data[k])'], {}), '(new_data[k] == data[k])\n', (34653, 34677), True, 'import numpy as np\n'), ((34697, 34729), 'numpy.all', 'np.all', (['(new_flags[k] == flags[k])'], {}), '(new_flags[k] == flags[k])\n', (34703, 34729), True, 'import numpy as np\n'), ((34999, 35023), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (35012, 35023), False, 'import pytest\n'), ((35240, 35274), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (35253, 35274), False, 'import pytest\n'), ((35863, 35893), 'numpy.all', 'np.all', (['(new_data[k] == data[k])'], {}), '(new_data[k] == data[k])\n', (35869, 35893), True, 'import numpy as np\n'), ((35913, 35945), 'numpy.all', 'np.all', (['(new_flags[k] == flags[k])'], {}), '(new_flags[k] == flags[k])\n', (35919, 35945), True, 'import numpy as np\n'), ((36257, 36281), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (36270, 36281), False, 'import pytest\n'), ((36715, 36739), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (36728, 36739), False, 'import pytest\n'), ((37454, 37461), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (37459, 37461), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((37463, 37470), 'pyuvdata.UVCal', 'UVCal', ([], {}), '()\n', (37468, 37470), False, 'from pyuvdata import UVCal, UVData, UVFlag\n'), ((38442, 38477), 'numpy.ones', 'np.ones', (['(Ntimes, Nfreqs)', 'np.float'], {}), '((Ntimes, Nfreqs), np.float)\n', (38449, 38477), True, 'import numpy as np\n'), ((39282, 39304), 'numpy.sum', 'np.sum', (['uvc.gain_array'], {}), '(uvc.gain_array)\n', (39288, 39304), True, 'import numpy as np\n'), ((39339, 39376), 'numpy.any', 'np.any', (['uvc.flag_array[0, 0, 0, 0, 0]'], {}), '(uvc.flag_array[0, 0, 0, 0, 0])\n', (39345, 39376), True, 'import numpy as np\n'), ((39392, 39414), 'numpy.sum', 'np.sum', (['uvc.flag_array'], {}), '(uvc.flag_array)\n', (39398, 39414), True, 'import numpy as np\n'), ((39513, 39538), 'numpy.sum', 'np.sum', (['uvc.quality_array'], {}), '(uvc.quality_array)\n', (39519, 39538), True, 'import numpy as np\n'), ((39700, 39723), 'os.remove', 'os.remove', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (39709, 39723), False, 'import os\n'), ((39906, 39929), 'os.remove', 'os.remove', (['"""ex.calfits"""'], {}), "('ex.calfits')\n", (39915, 39929), False, 'import os\n'), ((42071, 42090), 'numpy.logical_not', 'np.logical_not', (['val'], {}), '(val)\n', (42085, 42090), True, 'import numpy as np\n'), ((42552, 42584), 'numpy.all', 'np.all', (['(new_gains[k] == gains[k])'], {}), '(new_gains[k] == gains[k])\n', (42558, 42584), True, 'import numpy as np\n'), ((42604, 42636), 'numpy.all', 'np.all', (['(new_flags[k] == flags[k])'], {}), '(new_flags[k] == flags[k])\n', (42610, 42636), True, 'import numpy as np\n'), ((42656, 42688), 'numpy.all', 'np.all', (['(new_quals[k] == quals[k])'], {}), '(new_quals[k] == quals[k])\n', (42662, 42688), True, 'import numpy as np\n'), ((43314, 43346), 'numpy.all', 'np.all', (['(new_gains[k] == gains[k])'], {}), '(new_gains[k] == gains[k])\n', (43320, 43346), True, 'import numpy as np\n'), ((43366, 43398), 'numpy.all', 'np.all', (['(new_flags[k] == flags[k])'], {}), '(new_flags[k] == flags[k])\n', (43372, 43398), True, 'import numpy as np\n'), ((43418, 43450), 'numpy.all', 'np.all', (['(new_quals[k] == quals[k])'], {}), '(new_quals[k] == quals[k])\n', (43424, 43450), True, 'import numpy as np\n'), ((44047, 44094), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['f[:, 0:50]', '(True)'], {}), '(f[:, 0:50], True)\n', (44076, 44094), True, 'import numpy as np\n'), ((44107, 44154), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['f[:, -50:]', '(True)'], {}), '(f[:, -50:], True)\n', (44136, 44154), True, 'import numpy as np\n'), ((45791, 45816), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45804, 45816), False, 'import pytest\n'), ((45993, 46022), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (46006, 46022), False, 'import pytest\n'), ((47226, 47261), 'numpy.all', 'np.all', (['(antpos[ant] == antpos2[ant])'], {}), '(antpos[ant] == antpos2[ant])\n', (47232, 47261), True, 'import numpy as np\n'), ((52180, 52211), 'numpy.ones_like', 'np.ones_like', (['ft[k]'], {'dtype': 'bool'}), '(ft[k], dtype=bool)\n', (52192, 52211), True, 'import numpy as np\n'), ((2559, 2611), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (2572, 2611), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((2791, 2843), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (2804, 2843), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((2845, 2897), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jyy"""'], {'x_orientation': 'hc.x_orientation'}), "('jyy', x_orientation=hc.x_orientation)\n", (2858, 2897), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((3334, 3386), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (3347, 3386), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((3388, 3440), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jyy"""'], {'x_orientation': 'hc.x_orientation'}), "('jyy', x_orientation=hc.x_orientation)\n", (3401, 3440), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((4020, 4034), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (4029, 4034), True, 'import numpy as np\n'), ((5620, 5649), 'numpy.logical_not', 'np.logical_not', (['flags_in[key]'], {}), '(flags_in[key])\n', (5634, 5649), True, 'import numpy as np\n'), ((12192, 12242), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (12204, 12242), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((13176, 13201), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13191, 13201), True, 'import numpy as np\n'), ((13413, 13438), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13428, 13438), True, 'import numpy as np\n'), ((13647, 13672), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13662, 13672), True, 'import numpy as np\n'), ((13874, 13899), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13889, 13899), True, 'import numpy as np\n'), ((14719, 14755), 'numpy.all', 'np.all', (['(dc.antpos[k] == hd.antpos[k])'], {}), '(dc.antpos[k] == hd.antpos[k])\n', (14725, 14755), True, 'import numpy as np\n'), ((14903, 14949), 'numpy.all', 'np.all', (['(dc.data_antpos[k] == hd.data_antpos[k])'], {}), '(dc.data_antpos[k] == hd.data_antpos[k])\n', (14909, 14949), True, 'import numpy as np\n'), ((15247, 15293), 'numpy.all', 'np.all', (['(dc.times_by_bl[k] == hd.times_by_bl[k])'], {}), '(dc.times_by_bl[k] == hd.times_by_bl[k])\n', (15253, 15293), True, 'import numpy as np\n'), ((15317, 15372), 'numpy.all', 'np.all', (['(dc.times_by_bl[k] == dc.times_by_bl[k[1], k[0]])'], {}), '(dc.times_by_bl[k] == dc.times_by_bl[k[1], k[0]])\n', (15323, 15372), True, 'import numpy as np\n'), ((15398, 15442), 'numpy.all', 'np.all', (['(dc.lsts_by_bl[k] == hd.lsts_by_bl[k])'], {}), '(dc.lsts_by_bl[k] == hd.lsts_by_bl[k])\n', (15404, 15442), True, 'import numpy as np\n'), ((15466, 15519), 'numpy.all', 'np.all', (['(dc.lsts_by_bl[k] == dc.lsts_by_bl[k[1], k[0]])'], {}), '(dc.lsts_by_bl[k] == dc.lsts_by_bl[k[1], k[0]])\n', (15472, 15519), True, 'import numpy as np\n'), ((16384, 16395), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16393, 16395), False, 'import os\n'), ((16783, 16801), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {}), "('XX')\n", (16795, 16801), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((20380, 20430), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (20392, 20430), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((21054, 21104), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (21066, 21104), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((21301, 21351), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (21313, 21351), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((23923, 23970), 'numpy.isclose', 'np.isclose', (['(bl_lens - bl_lens[0])', '(0.0)'], {'atol': '(1.0)'}), '(bl_lens - bl_lens[0], 0.0, atol=1.0)\n', (23933, 23970), True, 'import numpy as np\n'), ((27703, 27759), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""EE"""'], {'x_orientation': 'self.uvd.x_orientation'}), "('EE', x_orientation=self.uvd.x_orientation)\n", (27715, 27759), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((27973, 28029), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""EE"""'], {'x_orientation': 'self.uvd.x_orientation'}), "('EE', x_orientation=self.uvd.x_orientation)\n", (27985, 28029), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((37318, 37371), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'cal.x_orientation'}), "('jxx', x_orientation=cal.x_orientation)\n", (37331, 37371), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((37373, 37426), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jyy"""'], {'x_orientation': 'cal.x_orientation'}), "('jyy', x_orientation=cal.x_orientation)\n", (37386, 37426), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((37885, 37941), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'cal_xx.x_orientation'}), "('jxx', x_orientation=cal_xx.x_orientation)\n", (37898, 37941), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((37943, 37999), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jyy"""'], {'x_orientation': 'cal_yy.x_orientation'}), "('jyy', x_orientation=cal_yy.x_orientation)\n", (37956, 37999), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((38551, 38588), 'numpy.ones', 'np.ones', (['(Ntimes, Nfreqs)', 'np.complex'], {}), '((Ntimes, Nfreqs), np.complex)\n', (38558, 38588), True, 'import numpy as np\n'), ((38695, 38730), 'numpy.zeros', 'np.zeros', (['(Ntimes, Nfreqs)', 'np.bool'], {}), '((Ntimes, Nfreqs), np.bool)\n', (38703, 38730), True, 'import numpy as np\n'), ((43932, 43950), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {}), "('XX')\n", (43944, 43950), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((44178, 44187), 'numpy.all', 'np.all', (['f'], {}), '(f)\n', (44184, 44187), True, 'import numpy as np\n'), ((46783, 46855), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['fc_meta[key1][key2]', 'fc_meta2[key1][key2]'], {}), '(fc_meta[key1][key2], fc_meta2[key1][key2])\n', (46812, 46855), True, 'import numpy as np\n'), ((46945, 47021), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['omni_meta[key1][key2]', 'omni_meta2[key1][key2]'], {}), '(omni_meta[key1][key2], omni_meta2[key1][key2])\n', (46974, 47021), True, 'import numpy as np\n'), ((2143, 2195), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (2156, 2195), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((2278, 2330), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (2291, 2330), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((2416, 2468), 'pyuvdata.utils.parse_jpolstr', 'parse_jpolstr', (['"""jxx"""'], {'x_orientation': 'hc.x_orientation'}), "('jxx', x_orientation=hc.x_orientation)\n", (2429, 2468), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((9555, 9579), 'numpy.unique', 'np.unique', (['hd.time_array'], {}), '(hd.time_array)\n', (9564, 9579), True, 'import numpy as np\n'), ((11102, 11116), 'numpy.arange', 'np.arange', (['(180)'], {}), '(180)\n', (11111, 11116), True, 'import numpy as np\n'), ((11117, 11181), 'numpy.logical_and', 'np.logical_and', (['(hd.ant_1_array == bl[0])', '(hd.ant_2_array == bl[1])'], {}), '(hd.ant_1_array == bl[0], hd.ant_2_array == bl[1])\n', (11131, 11181), True, 'import numpy as np\n'), ((11256, 11270), 'numpy.arange', 'np.arange', (['(180)'], {}), '(180)\n', (11265, 11270), True, 'import numpy as np\n'), ((13204, 13229), 'numpy.random.randn', 'np.random.randn', (['(60)', '(1024)'], {}), '(60, 1024)\n', (13219, 13229), True, 'import numpy as np\n'), ((23438, 23488), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (23450, 23488), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((30343, 30412), 'numpy.resize', 'np.resize', (['uvd.data_array[uvmask][:, 0, :, uvpol]', 'd[i, j][pol].shape'], {}), '(uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape)\n', (30352, 30412), True, 'import numpy as np\n'), ((30489, 30558), 'numpy.resize', 'np.resize', (['uvd.flag_array[uvmask][:, 0, :, uvpol]', 'f[i, j][pol].shape'], {}), '(uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape)\n', (30498, 30558), True, 'import numpy as np\n'), ((30995, 31064), 'numpy.resize', 'np.resize', (['uvd.data_array[uvmask][:, 0, :, uvpol]', 'd[i, j][pol].shape'], {}), '(uvd.data_array[uvmask][:, 0, :, uvpol], d[i, j][pol].shape)\n', (31004, 31064), True, 'import numpy as np\n'), ((31141, 31210), 'numpy.resize', 'np.resize', (['uvd.flag_array[uvmask][:, 0, :, uvpol]', 'f[i, j][pol].shape'], {}), '(uvd.flag_array[uvmask][:, 0, :, uvpol], f[i, j][pol].shape)\n', (31150, 31210), True, 'import numpy as np\n'), ((38623, 38658), 'numpy.ones', 'np.ones', (['(Ntimes, Nfreqs)', 'np.float'], {}), '((Ntimes, Nfreqs), np.float)\n', (38630, 38658), True, 'import numpy as np\n'), ((16072, 16083), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16081, 16083), False, 'import os\n'), ((16972, 17022), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (16984, 17022), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((18763, 18813), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (18775, 18813), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n'), ((19285, 19335), 'pyuvdata.utils.parse_polstr', 'parse_polstr', (['"""XX"""'], {'x_orientation': 'hd.x_orientation'}), "('XX', x_orientation=hd.x_orientation)\n", (19297, 19335), False, 'from pyuvdata.utils import parse_polstr, parse_jpolstr\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 16 17:04:17 2019
@author: <NAME>
"""
import numpy as np
import random
from collections import Counter
def nextLiteral(cnf):
"""returns the first literal in the first clause.
"""
return list(cnf[0].keys())[0]
def randomChoice(cnf):
num_clauses = len(cnf)-1
clause = np.random.randint(0, num_clauses)
rand_lit = random.choice(list(cnf[clause].keys()))
return rand_lit
def DLIS_max(cnf):
return DLIS(cnf, take = "max")
def DLIS(cnf, take = "min"):
"""returns the literal that appears in most clauses
"""
#inefficient implementation
literal_count = {}
for clause in cnf:
for literal in clause:
literal_count[literal] = literal_count.get(literal, 0) + 1
if take == "max":
#max performs terribly
pop_literal = max(literal_count, key=lambda key: literal_count[key])
else:
#min performs better
pop_literal = min(literal_count, key=lambda key: literal_count[key])
return pop_literal
def BOHM(cnf):
"""satisfy or reduce size of many preferably short clauses
best heuristic of 1992!
described: https://www.tcs.cs.tu-bs.de/documents/albert_schimpf_bachelors_thesis.pdf
https://baldur.iti.kit.edu/sat/files/l05.pdf
https://books.google.nl/books?id=5spuCQAAQBAJ&pg=PA66&lpg=PA66&dq=BOHM%E2%80%99s+Heuristic&source=bl&ots=LZW8LyS_UO&sig=ACfU3U3c80aM_2CGQgfXeD6Q3BmccS3CXg&hl=en&sa=X&ved=2ahUKEwjqqLLKlcPgAhUDfFAKHUDWCekQ6AEwBHoECAYQAQ#v=onepage&q=BOHM%E2%80%99s%20Heuristic&f=false
"""
#store the score of each literal
literal_score = {}
#hyper-parameters suggested to be set to those values
alpha = 1
beta = 2
#counter of literals per clause length
len_lit_count = {}
literals = set()
for clause in cnf:
l = len(clause)
if(not l in len_lit_count):
len_lit_count[l] = Counter()
for literal in clause:
len_lit_count[l][literal] += 1
literals.add(literal)
literal_score = []
#Loop over all literals
for literal in literals:
vector = []
#Loop over all length of clauses:
for l in len_lit_count.keys():
score = alpha*max(len_lit_count[l][literal], len_lit_count[l][-literal])
score += beta*min(len_lit_count[l][literal], len_lit_count[l][-literal])
vector.append(score)
literal_score.append( (literal, vector) )
#returns the literal that is not dominated by any other.
literal_score.sort(key= lambda item: item[1], reverse=True)
bohms_favorite = literal_score[0][0]
return bohms_favorite
def paretoDominant(cnf):
"""satisfy or reduce size of many preferably short clauses, based on BOHM.
"""
#store the score of each literal
literal_score = {}
#dictionary that stores the clauses indexed in their length
len_clauses = {}
#hyperparameters suggested to be set to those values
alpha = 1
beta = 2
#makes the dictionary that stores the clauses indexed in their length
for clause in cnf:
if len_clauses.get(len(clause), True):
len_clauses[len(clause)] = []
len_clauses[len(clause)].append(clause)
#Loop over all literals
for clause in cnf:
for literal in clause:
#negative literals are evaluated together with positive literals
if literal < 0:
continue
vector = []
#Loop over all length of clauses:
for lc in len_clauses:
pos_count = 0
neg_count = 0
#for every literal in the every clause, check if it is the
#same as the literal of interest
for c in len_clauses[lc]:
for lit in c:
if lit == literal:
pos_count += 1
if lit == -literal:
neg_count += 1
vector.append(alpha*max(pos_count, neg_count) +
beta*min(pos_count, neg_count))
#unsure of implementation
literal_score[literal] = np.linalg.norm(np.array(vector))
#returns the literal that is not dominated by any other.
pareto = max(literal_score, key=lambda key: literal_score[key])
return pareto
| [
"collections.Counter",
"numpy.array",
"numpy.random.randint"
] | [((354, 387), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_clauses'], {}), '(0, num_clauses)\n', (371, 387), True, 'import numpy as np\n'), ((2013, 2022), 'collections.Counter', 'Counter', ([], {}), '()\n', (2020, 2022), False, 'from collections import Counter\n'), ((4518, 4534), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (4526, 4534), True, 'import numpy as np\n')] |
from PeptideBuilder import Geometry
import PeptideBuilder
import Bio.PDB
from Bio.PDB import calc_angle, rotaxis, Vector
from math import *
import numpy as np
def bytes2string(tbt_array):
return tbt_array.numpy().astype(dtype=np.uint8).tostring().split(b'\00')[0].decode("utf-8")
def generateAA(aaName):
geo = Geometry.geometry(aaName)
geo.phi=0
geo.psi_im1=0
structure = PeptideBuilder.initialize_res(geo)
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
# print(list(structure.get_atoms())[1].get_coord(), list(structure.get_atoms())[1])
out = Bio.PDB.PDBIO()
out.set_structure(structure)
out.save( "example.pdb" )
return structure[0]['A'][1]
def transform(structure):
tx = -np.pi/2.0
Rx = np.array([[1,0,0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
for atom in structure.get_atoms():
atom.transform(Rx, np.array([0,0,0]))
nAtom = list(structure.get_atoms())[0]
nV = nAtom.get_coord()
I = np.identity(3)
for atom in structure.get_atoms():
atom.transform(I, -nV)
R = rotaxis(np.pi, list(structure.get_atoms())[1].get_vector())
for atom in structure.get_atoms():
atom.transform(R, np.array([0,0,0]))
return structure | [
"numpy.identity",
"PeptideBuilder.Geometry.geometry",
"numpy.array",
"PeptideBuilder.initialize_res"
] | [((316, 341), 'PeptideBuilder.Geometry.geometry', 'Geometry.geometry', (['aaName'], {}), '(aaName)\n', (333, 341), False, 'from PeptideBuilder import Geometry\n'), ((381, 415), 'PeptideBuilder.initialize_res', 'PeptideBuilder.initialize_res', (['geo'], {}), '(geo)\n', (410, 415), False, 'import PeptideBuilder\n'), ((654, 668), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (665, 668), True, 'import numpy as np\n'), ((1332, 1346), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1343, 1346), True, 'import numpy as np\n'), ((565, 584), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (573, 584), True, 'import numpy as np\n'), ((852, 871), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (860, 871), True, 'import numpy as np\n'), ((1243, 1262), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1251, 1262), True, 'import numpy as np\n'), ((1530, 1549), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1538, 1549), True, 'import numpy as np\n')] |
"""
This script compute all power spectra and write them to disk.
It uses the window function provided in the dictionnary file.
Optionally, it applies a calibration to the maps, a kspace filter and deconvolve the pixel window function.
The spectra are then combined in mean auto, cross and noise power spectrum and written to disk.
If write_all_spectra=True, each individual spectrum is also written to disk.
"""
from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing
from pixell import enmap
import numpy as np
import healpy as hp
import sys
import data_analysis_utils
import time
d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
surveys = d["surveys"]
lmax = d["lmax"]
niter = d["niter"]
type = d["type"]
binning_file = d["binning_file"]
write_all_spectra = d["write_splits_spectra"]
deconvolve_pixwin = d["deconvolve_pixwin"]
window_dir = "windows"
mcm_dir = "mcms"
specDir = "spectra"
plot_dir = "plots/maps/"
pspy_utils.create_directory(plot_dir)
pspy_utils.create_directory(specDir)
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
spin_pairs = ["spin0xspin0", "spin0xspin2", "spin2xspin0", "spin2xspin2"]
ncomp = 3
master_alms = {}
nsplit = {}
pixwin_l = {}
template = {}
filter = {}
# compute the alms
for sv in surveys:
arrays = d["arrays_%s" % sv]
template[sv] = so_map.read_map(d["window_T_%s_%s" % (sv, arrays[0])])
ks_f = d["k_filter_%s" % sv]
if template[sv].pixel == "CAR" and ks_f["apply"]:
shape, wcs = template[sv].data.shape, template[sv].data.wcs
if ks_f["type"] == "binary_cross":
filter[sv] = so_map_preprocessing.build_std_filter(shape, wcs, vk_mask=ks_f["vk_mask"], hk_mask=ks_f["hk_mask"], dtype=np.float32)
elif ks_f["type"] == "gauss":
filter[sv] = so_map_preprocessing.build_sigurd_filter(shape, wcs, ks_f["lbounds"], dtype=np.float32)
else:
print("you need to specify a valid filter type")
sys.exit()
for ar in arrays:
win_T = so_map.read_map(d["window_T_%s_%s" % (sv, ar)])
win_pol = so_map.read_map(d["window_pol_%s_%s" % (sv, ar)])
window_tuple = (win_T, win_pol)
del win_T, win_pol
maps = d["maps_%s_%s" % (sv, ar)]
nsplit[sv] = len(maps)
cal = d["cal_%s_%s" % (sv, ar)]
print("%s split of survey: %s, array %s"%(nsplit[sv], sv, ar))
if deconvolve_pixwin:
# ok so this is a bit overcomplicated because we need to take into account CAR and HEALPIX
# for CAR the pixel window function deconvolution is done in Fourier space and take into account
# the anisotropy if the pixwin
# In HEALPIX it's a simple 1d function in multipole space
# we also need to take account the case where we have projected Planck into a CAR pixellisation since
# the native pixel window function of Planck need to be deconvolved
if window_tuple[0].pixel == "CAR":
wy, wx = enmap.calc_window(window_tuple[0].data.shape)
inv_pixwin_lxly = (wy[:,None] * wx[None,:]) ** (-1)
inv_pixwin_lxly = inv_pixwin_lxly.astype(np.float32)
pixwin_l[sv] = np.ones(2 * lmax)
if "planck" in sv.lower():
print("Deconvolve Planck pixel window function")
# we include this special case for Planck projected in CAR taking into account the Planck native pixellisation
# we should check if the projection doesn't include an extra pixel window
inv_pixwin_lxly = None
pixwin_l[sv] = hp.pixwin(2048)
elif window_tuple[0].pixel == "HEALPIX":
pixwin_l[sv] = hp.pixwin(window_tuple[0].nside)
else:
inv_pixwin_lxly = None
t = time.time()
for k, map in enumerate(maps):
if window_tuple[0].pixel == "CAR":
split = so_map.read_map(map, geometry=window_tuple[0].data.geometry)
if d["src_free_maps_%s" % sv] == True:
point_source_map_name = map.replace("srcfree.fits", "model.fits")
if point_source_map_name == map:
raise ValueError("No model map is provided! Check map names!")
point_source_map = so_map.read_map(point_source_map_name)
point_source_mask = so_map.read_map(d["ps_mask_%s_%s" % (sv, ar)])
split = data_analysis_utils.get_coadded_map(split, point_source_map, point_source_mask)
if ks_f["apply"]:
print("apply kspace filter on %s" %map)
binary = so_map.read_map("%s/binary_%s_%s.fits" % (window_dir, sv, ar))
split = data_analysis_utils.get_filtered_map(split, binary, filter[sv], inv_pixwin_lxly=inv_pixwin_lxly, weighted_filter=ks_f["weighted"])
else:
print("WARNING: no kspace filter is applied")
if deconvolve_pixwin:
binary = so_map.read_map("%s/binary_%s_%s.fits" % (window_dir, sv, ar))
split = data_analysis_utils.fourier_mult(split, binary, inv_pixwin_lxly)
elif window_tuple[0].pixel == "HEALPIX":
split = so_map.read_map(map)
split.data *= cal
if d["remove_mean"] == True:
split = data_analysis_utils.remove_mean(split, window_tuple, ncomp)
master_alms[sv, ar, k] = sph_tools.get_alms(split, window_tuple, niter, lmax)
print(time.time()- t)
# compute the transfer functions
_, _, lb, _ = pspy_utils.read_binning_file(binning_file, lmax)
tf_array = {}
for id_sv, sv in enumerate(surveys):
tf_survey = np.ones(len(lb))
ks_f = d["k_filter_%s" % sv]
if ks_f["apply"]:
if ks_f["tf"] == "analytic":
print("compute analytic kspace tf %s" % sv)
_, kf_tf = so_map_preprocessing.analytical_tf(template[sv], filter[sv], binning_file, lmax)
else:
print("use kspace tf from file %s" % sv)
_, _, kf_tf, _ = np.loadtxt(ks_f["tf"], unpack=True)
tf_survey *= np.sqrt(np.abs(kf_tf[:len(lb)]))
if deconvolve_pixwin:
# this should be checked with simulations since maybe this should be done at the mcm level
_, pw = pspy_utils.naive_binning(np.arange(len(pixwin_l[sv])), pixwin_l[sv], binning_file, lmax)
tf_survey *= pw
for id_ar, ar in enumerate(d["arrays_%s" % sv]):
tf_array[sv, ar] = tf_survey.copy()
if d["deconvolve_map_maker_tf_%s" % sv]:
print("deconvolve map maker tf %s %s" % (sv, ar))
_, mm_tf = np.loadtxt("mm_tf_%s_%s.dat" % (sv, ar), unpack=True)
tf_array[sv, ar] *= mm_tf[:len(lb)]
np.savetxt(specDir + "/tf_%s_%s.dat" % (sv, ar),
np.transpose([lb, tf_array[sv, ar]]))
# compute the power spectra
ps_dict = {}
for id_sv1, sv1 in enumerate(surveys):
for id_ar1, ar1 in enumerate(d["arrays_%s" % sv1]):
for id_sv2, sv2 in enumerate(surveys):
for id_ar2, ar2 in enumerate(d["arrays_%s" % sv2]):
if (id_sv1 == id_sv2) & (id_ar1 > id_ar2) : continue
if (id_sv1 > id_sv2) : continue
for spec in spectra:
ps_dict[spec, "auto"] = []
ps_dict[spec, "cross"] = []
nsplits_1 = nsplit[sv1]
nsplits_2 = nsplit[sv2]
for s1 in range(nsplits_1):
for s2 in range(nsplits_2):
if (sv1 == sv2) & (ar1 == ar2) & (s1>s2) : continue
mbb_inv, Bbl = so_mcm.read_coupling(prefix="%s/%s_%sx%s_%s" % (mcm_dir, sv1, ar1, sv2, ar2),
spin_pairs=spin_pairs)
l, ps_master = so_spectra.get_spectra_pixell(master_alms[sv1, ar1, s1],
master_alms[sv2, ar2, s2],
spectra=spectra)
spec_name="%s_%s_%sx%s_%s_%d%d" % (type, sv1, ar1, sv2, ar2, s1, s2)
lb, ps = so_spectra.bin_spectra(l,
ps_master,
binning_file,
lmax,
type=type,
mbb_inv=mbb_inv,
spectra=spectra)
data_analysis_utils.deconvolve_tf(lb, ps, tf_array[sv1, ar1], tf_array[sv2, ar2], ncomp, lmax)
if write_all_spectra:
so_spectra.write_ps(specDir + "/%s.dat" % spec_name, lb, ps, type, spectra=spectra)
for count, spec in enumerate(spectra):
if (s1 == s2) & (sv1 == sv2):
if count == 0:
print("auto %s_%s X %s_%s %d%d" % (sv1, ar1, sv2, ar2, s1, s2))
ps_dict[spec, "auto"] += [ps[spec]]
else:
if count == 0:
print("cross %s_%s X %s_%s %d%d" % (sv1, ar1, sv2, ar2, s1, s2))
ps_dict[spec, "cross"] += [ps[spec]]
ps_dict_auto_mean = {}
ps_dict_cross_mean = {}
ps_dict_noise_mean = {}
for spec in spectra:
ps_dict_cross_mean[spec] = np.mean(ps_dict[spec, "cross"], axis=0)
spec_name_cross = "%s_%s_%sx%s_%s_cross" % (type, sv1, ar1, sv2, ar2)
if ar1 == ar2 and sv1 == sv2:
# Average TE / ET so that for same array same season TE = ET
ps_dict_cross_mean[spec] = (np.mean(ps_dict[spec, "cross"], axis=0) + np.mean(ps_dict[spec[::-1], "cross"], axis=0)) / 2.
if sv1 == sv2:
ps_dict_auto_mean[spec] = np.mean(ps_dict[spec, "auto"], axis=0)
spec_name_auto = "%s_%s_%sx%s_%s_auto" % (type, sv1, ar1, sv2, ar2)
ps_dict_noise_mean[spec] = (ps_dict_auto_mean[spec] - ps_dict_cross_mean[spec]) / nsplit[sv1]
spec_name_noise = "%s_%s_%sx%s_%s_noise" % (type, sv1, ar1, sv2, ar2)
so_spectra.write_ps(specDir + "/%s.dat" % spec_name_cross, lb, ps_dict_cross_mean, type, spectra=spectra)
if sv1 == sv2:
so_spectra.write_ps(specDir+"/%s.dat" % spec_name_auto, lb, ps_dict_auto_mean, type, spectra=spectra)
so_spectra.write_ps(specDir+"/%s.dat" % spec_name_noise, lb, ps_dict_noise_mean, type, spectra=spectra)
| [
"pspy.pspy_utils.read_binning_file",
"sys.exit",
"pspy.so_map_preprocessing.analytical_tf",
"data_analysis_utils.remove_mean",
"pspy.so_dict.so_dict",
"pspy.so_spectra.write_ps",
"data_analysis_utils.get_filtered_map",
"pspy.sph_tools.get_alms",
"numpy.mean",
"pixell.enmap.calc_window",
"pspy.so... | [((632, 649), 'pspy.so_dict.so_dict', 'so_dict.so_dict', ([], {}), '()\n', (647, 649), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((966, 1003), 'pspy.pspy_utils.create_directory', 'pspy_utils.create_directory', (['plot_dir'], {}), '(plot_dir)\n', (993, 1003), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((1004, 1040), 'pspy.pspy_utils.create_directory', 'pspy_utils.create_directory', (['specDir'], {}), '(specDir)\n', (1031, 1040), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((5939, 5987), 'pspy.pspy_utils.read_binning_file', 'pspy_utils.read_binning_file', (['binning_file', 'lmax'], {}), '(binning_file, lmax)\n', (5967, 5987), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((1355, 1409), 'pspy.so_map.read_map', 'so_map.read_map', (["d['window_T_%s_%s' % (sv, arrays[0])]"], {}), "(d['window_T_%s_%s' % (sv, arrays[0])])\n", (1370, 1409), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((2065, 2112), 'pspy.so_map.read_map', 'so_map.read_map', (["d['window_T_%s_%s' % (sv, ar)]"], {}), "(d['window_T_%s_%s' % (sv, ar)])\n", (2080, 2112), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((2131, 2180), 'pspy.so_map.read_map', 'so_map.read_map', (["d['window_pol_%s_%s' % (sv, ar)]"], {}), "(d['window_pol_%s_%s' % (sv, ar)])\n", (2146, 2180), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((3959, 3970), 'time.time', 'time.time', ([], {}), '()\n', (3968, 3970), False, 'import time\n'), ((1647, 1768), 'pspy.so_map_preprocessing.build_std_filter', 'so_map_preprocessing.build_std_filter', (['shape', 'wcs'], {'vk_mask': "ks_f['vk_mask']", 'hk_mask': "ks_f['hk_mask']", 'dtype': 'np.float32'}), "(shape, wcs, vk_mask=ks_f['vk_mask'],\n hk_mask=ks_f['hk_mask'], dtype=np.float32)\n", (1684, 1768), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((5778, 5830), 'pspy.sph_tools.get_alms', 'sph_tools.get_alms', (['split', 'window_tuple', 'niter', 'lmax'], {}), '(split, window_tuple, niter, lmax)\n', (5796, 5830), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((6250, 6335), 'pspy.so_map_preprocessing.analytical_tf', 'so_map_preprocessing.analytical_tf', (['template[sv]', 'filter[sv]', 'binning_file', 'lmax'], {}), '(template[sv], filter[sv], binning_file, lmax\n )\n', (6284, 6335), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((6427, 6462), 'numpy.loadtxt', 'np.loadtxt', (["ks_f['tf']"], {'unpack': '(True)'}), "(ks_f['tf'], unpack=True)\n", (6437, 6462), True, 'import numpy as np\n'), ((7014, 7067), 'numpy.loadtxt', 'np.loadtxt', (["('mm_tf_%s_%s.dat' % (sv, ar))"], {'unpack': '(True)'}), "('mm_tf_%s_%s.dat' % (sv, ar), unpack=True)\n", (7024, 7067), True, 'import numpy as np\n'), ((7205, 7241), 'numpy.transpose', 'np.transpose', (['[lb, tf_array[sv, ar]]'], {}), '([lb, tf_array[sv, ar]])\n', (7217, 7241), True, 'import numpy as np\n'), ((1828, 1920), 'pspy.so_map_preprocessing.build_sigurd_filter', 'so_map_preprocessing.build_sigurd_filter', (['shape', 'wcs', "ks_f['lbounds']"], {'dtype': 'np.float32'}), "(shape, wcs, ks_f['lbounds'], dtype\n =np.float32)\n", (1868, 1920), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((2003, 2013), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2011, 2013), False, 'import sys\n'), ((3099, 3144), 'pixell.enmap.calc_window', 'enmap.calc_window', (['window_tuple[0].data.shape'], {}), '(window_tuple[0].data.shape)\n', (3116, 3144), False, 'from pixell import enmap\n'), ((3313, 3330), 'numpy.ones', 'np.ones', (['(2 * lmax)'], {}), '(2 * lmax)\n', (3320, 3330), True, 'import numpy as np\n'), ((4090, 4150), 'pspy.so_map.read_map', 'so_map.read_map', (['map'], {'geometry': 'window_tuple[0].data.geometry'}), '(map, geometry=window_tuple[0].data.geometry)\n', (4105, 4150), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((5664, 5723), 'data_analysis_utils.remove_mean', 'data_analysis_utils.remove_mean', (['split', 'window_tuple', 'ncomp'], {}), '(split, window_tuple, ncomp)\n', (5695, 5723), False, 'import data_analysis_utils\n'), ((5874, 5885), 'time.time', 'time.time', ([], {}), '()\n', (5883, 5885), False, 'import time\n'), ((11291, 11400), 'pspy.so_spectra.write_ps', 'so_spectra.write_ps', (["(specDir + '/%s.dat' % spec_name_cross)", 'lb', 'ps_dict_cross_mean', 'type'], {'spectra': 'spectra'}), "(specDir + '/%s.dat' % spec_name_cross, lb,\n ps_dict_cross_mean, type, spectra=spectra)\n", (11310, 11400), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((3746, 3761), 'healpy.pixwin', 'hp.pixwin', (['(2048)'], {}), '(2048)\n', (3755, 3761), True, 'import healpy as hp\n'), ((3847, 3879), 'healpy.pixwin', 'hp.pixwin', (['window_tuple[0].nside'], {}), '(window_tuple[0].nside)\n', (3856, 3879), True, 'import healpy as hp\n'), ((4488, 4526), 'pspy.so_map.read_map', 'so_map.read_map', (['point_source_map_name'], {}), '(point_source_map_name)\n', (4503, 4526), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((4567, 4613), 'pspy.so_map.read_map', 'so_map.read_map', (["d['ps_mask_%s_%s' % (sv, ar)]"], {}), "(d['ps_mask_%s_%s' % (sv, ar)])\n", (4582, 4613), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((4642, 4721), 'data_analysis_utils.get_coadded_map', 'data_analysis_utils.get_coadded_map', (['split', 'point_source_map', 'point_source_mask'], {}), '(split, point_source_map, point_source_mask)\n', (4677, 4721), False, 'import data_analysis_utils\n'), ((4846, 4908), 'pspy.so_map.read_map', 'so_map.read_map', (["('%s/binary_%s_%s.fits' % (window_dir, sv, ar))"], {}), "('%s/binary_%s_%s.fits' % (window_dir, sv, ar))\n", (4861, 4908), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((4937, 5071), 'data_analysis_utils.get_filtered_map', 'data_analysis_utils.get_filtered_map', (['split', 'binary', 'filter[sv]'], {'inv_pixwin_lxly': 'inv_pixwin_lxly', 'weighted_filter': "ks_f['weighted']"}), "(split, binary, filter[sv],\n inv_pixwin_lxly=inv_pixwin_lxly, weighted_filter=ks_f['weighted'])\n", (4973, 5071), False, 'import data_analysis_utils\n'), ((5518, 5538), 'pspy.so_map.read_map', 'so_map.read_map', (['map'], {}), '(map)\n', (5533, 5538), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((10388, 10427), 'numpy.mean', 'np.mean', (["ps_dict[spec, 'cross']"], {'axis': '(0)'}), "(ps_dict[spec, 'cross'], axis=0)\n", (10395, 10427), True, 'import numpy as np\n'), ((11448, 11555), 'pspy.so_spectra.write_ps', 'so_spectra.write_ps', (["(specDir + '/%s.dat' % spec_name_auto)", 'lb', 'ps_dict_auto_mean', 'type'], {'spectra': 'spectra'}), "(specDir + '/%s.dat' % spec_name_auto, lb,\n ps_dict_auto_mean, type, spectra=spectra)\n", (11467, 11555), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((11570, 11679), 'pspy.so_spectra.write_ps', 'so_spectra.write_ps', (["(specDir + '/%s.dat' % spec_name_noise)", 'lb', 'ps_dict_noise_mean', 'type'], {'spectra': 'spectra'}), "(specDir + '/%s.dat' % spec_name_noise, lb,\n ps_dict_noise_mean, type, spectra=spectra)\n", (11589, 11679), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((5256, 5318), 'pspy.so_map.read_map', 'so_map.read_map', (["('%s/binary_%s_%s.fits' % (window_dir, sv, ar))"], {}), "('%s/binary_%s_%s.fits' % (window_dir, sv, ar))\n", (5271, 5318), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((5351, 5415), 'data_analysis_utils.fourier_mult', 'data_analysis_utils.fourier_mult', (['split', 'binary', 'inv_pixwin_lxly'], {}), '(split, binary, inv_pixwin_lxly)\n', (5383, 5415), False, 'import data_analysis_utils\n'), ((8168, 8273), 'pspy.so_mcm.read_coupling', 'so_mcm.read_coupling', ([], {'prefix': "('%s/%s_%sx%s_%s' % (mcm_dir, sv1, ar1, sv2, ar2))", 'spin_pairs': 'spin_pairs'}), "(prefix='%s/%s_%sx%s_%s' % (mcm_dir, sv1, ar1, sv2, ar2\n ), spin_pairs=spin_pairs)\n", (8188, 8273), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((8369, 8473), 'pspy.so_spectra.get_spectra_pixell', 'so_spectra.get_spectra_pixell', (['master_alms[sv1, ar1, s1]', 'master_alms[sv2, ar2, s2]'], {'spectra': 'spectra'}), '(master_alms[sv1, ar1, s1], master_alms[sv2,\n ar2, s2], spectra=spectra)\n', (8398, 8473), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((8822, 8928), 'pspy.so_spectra.bin_spectra', 'so_spectra.bin_spectra', (['l', 'ps_master', 'binning_file', 'lmax'], {'type': 'type', 'mbb_inv': 'mbb_inv', 'spectra': 'spectra'}), '(l, ps_master, binning_file, lmax, type=type, mbb_inv\n =mbb_inv, spectra=spectra)\n', (8844, 8928), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((9341, 9439), 'data_analysis_utils.deconvolve_tf', 'data_analysis_utils.deconvolve_tf', (['lb', 'ps', 'tf_array[sv1, ar1]', 'tf_array[sv2, ar2]', 'ncomp', 'lmax'], {}), '(lb, ps, tf_array[sv1, ar1], tf_array[sv2,\n ar2], ncomp, lmax)\n', (9374, 9439), False, 'import data_analysis_utils\n'), ((10906, 10944), 'numpy.mean', 'np.mean', (["ps_dict[spec, 'auto']"], {'axis': '(0)'}), "(ps_dict[spec, 'auto'], axis=0)\n", (10913, 10944), True, 'import numpy as np\n'), ((9511, 9599), 'pspy.so_spectra.write_ps', 'so_spectra.write_ps', (["(specDir + '/%s.dat' % spec_name)", 'lb', 'ps', 'type'], {'spectra': 'spectra'}), "(specDir + '/%s.dat' % spec_name, lb, ps, type, spectra=\n spectra)\n", (9530, 9599), False, 'from pspy import pspy_utils, so_dict, so_map, sph_tools, so_mcm, so_spectra, so_map_preprocessing\n'), ((10726, 10765), 'numpy.mean', 'np.mean', (["ps_dict[spec, 'cross']"], {'axis': '(0)'}), "(ps_dict[spec, 'cross'], axis=0)\n", (10733, 10765), True, 'import numpy as np\n'), ((10768, 10813), 'numpy.mean', 'np.mean', (["ps_dict[spec[::-1], 'cross']"], {'axis': '(0)'}), "(ps_dict[spec[::-1], 'cross'], axis=0)\n", (10775, 10813), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch
import numpy as np
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
# Custom Implementation because the Voice Conversion Cycle GAN
# paper assumes GLU won't reduce the dimension of tensor by 2.
def forward(self, input):
return input * torch.sigmoid(input)
class PixelShuffle(nn.Module):
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
# Custom Implementation because PyTorch PixelShuffle requires,
# 4D input. Whereas, in this case we have have 3D array
self.upscale_factor = upscale_factor
def forward(self, input):
n = input.shape[0]
c_out = input.shape[1] // 2
w_new = input.shape[2] * 2
return input.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
# self.residualLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=out_channels,
# affine=True),
# GLU(),
# nn.Conv1d(in_channels=out_channels,
# out_channels=in_channels,
# kernel_size=kernel_size,
# stride=1,
# padding=padding),
# nn.InstanceNorm1d(
# num_features=in_channels,
# affine=True)
# )
self.conv1d_layer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv_layer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.conv1d_out_layer = nn.Sequential(nn.Conv1d(in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding),
nn.InstanceNorm1d(num_features=in_channels,
affine=True))
def forward(self, input):
h1_norm = self.conv1d_layer(input)
h1_gates_norm = self.conv_layer_gates(input)
# GLU
h1_glu = h1_norm * torch.sigmoid(h1_gates_norm)
h2_norm = self.conv1d_out_layer(h1_glu)
return input + h2_norm
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm1d(num_features=out_channels,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class upSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(upSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
PixelShuffle(upscale_factor=2),
nn.InstanceNorm1d(num_features=out_channels // 2,
affine=True))
def forward(self, input):
return self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
self.conv1_gates = nn.Conv1d(in_channels=24,
out_channels=128,
kernel_size=15,
stride=1,
padding=7)
# Downsample Layer
self.downSample1 = downSample_Generator(in_channels=128,
out_channels=256,
kernel_size=5,
stride=2,
padding=1)
self.downSample2 = downSample_Generator(in_channels=256,
out_channels=512,
kernel_size=5,
stride=2,
padding=2)
# Residual Blocks
self.residualLayer1 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer2 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer3 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer4 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer5 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
self.residualLayer6 = ResidualLayer(in_channels=512,
out_channels=1024,
kernel_size=3,
stride=1,
padding=1)
# UpSample Layer
self.upSample1 = upSample_Generator(in_channels=512,
out_channels=1024,
kernel_size=5,
stride=1,
padding=2)
self.upSample2 = upSample_Generator(in_channels=1024 // 2,
out_channels=512,
kernel_size=5,
stride=1,
padding=2)
self.lastConvLayer = nn.Conv1d(in_channels=512 // 2,
out_channels=24,
kernel_size=15,
stride=1,
padding=7)
def forward(self, input):
# GLU
conv1 = self.conv1(input) * torch.sigmoid(self.conv1_gates(input))
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
residual_layer_1 = self.residualLayer1(downsample2)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
upSample_layer_1 = self.upSample1(residual_layer_6)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
return output
class DownSample_Discriminator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DownSample_Discriminator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
self.convLayerGates = nn.Sequential(nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding),
nn.InstanceNorm2d(num_features=out_channels,
affine=True))
def forward(self, input):
# GLU
return self.convLayer(input) * torch.sigmoid(self.convLayerGates(input))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
self.convLayer1_gates = nn.Conv2d(in_channels=1,
out_channels=128,
kernel_size=[3, 3],
stride=[1, 2])
# Note: Kernel Size have been modified in the PyTorch implementation
# compared to the actual paper, as to retain dimensionality. Unlike,
# TensorFlow, PyTorch doesn't have padding='same', hence, kernel sizes
# were altered to retain the dimensionality after each layer
# DownSample Layer
self.downSample1 = DownSample_Discriminator(in_channels=128,
out_channels=256,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample2 = DownSample_Discriminator(in_channels=256,
out_channels=512,
kernel_size=[3, 3],
stride=[2, 2],
padding=0)
self.downSample3 = DownSample_Discriminator(in_channels=512,
out_channels=1024,
kernel_size=[6, 3],
stride=[1, 2],
padding=0)
# Fully Connected Layer
self.fc = nn.Linear(in_features=1024,
out_features=1)
# def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
# convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# padding=padding),
# nn.InstanceNorm2d(num_features=out_channels,
# affine=True),
# GLU())
# return convLayer
def forward(self, input):
# input has shape [batch_size, num_features, time]
# discriminator requires shape [batchSize, 1, num_features, time]
input = input.unsqueeze(1)
# GLU
pad_input = nn.ZeroPad2d((1, 0, 1, 1))
layer1 = self.convLayer1(
pad_input(input)) * torch.sigmoid(self.convLayer1_gates(pad_input(input)))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample1 = self.downSample1(pad_input(layer1))
pad_input = nn.ZeroPad2d((1, 0, 1, 0))
downSample2 = self.downSample2(pad_input(downSample1))
pad_input = nn.ZeroPad2d((1, 0, 3, 2))
downSample3 = self.downSample3(pad_input(downSample2))
downSample3 = downSample3.contiguous().permute(0, 2, 3, 1).contiguous()
# fc = torch.sigmoid(self.fc(downSample3))
# Taking off sigmoid layer to avoid vanishing gradient problem
fc = self.fc(downSample3)
return fc
if __name__ == '__main__':
# Generator Dimensionality Testing
input = torch.randn(10, 24, 1100) # (N, C_in, Width) For Conv1d
np.random.seed(0)
print(np.random.randn(10))
input = np.random.randn(158, 24, 128)
input = torch.from_numpy(input).float()
# print(input)
generator = Generator()
output = generator(input)
print("Output shape Generator", output.shape)
# Discriminator Dimensionality Testing
# input = torch.randn(32, 1, 24, 128) # (N, C_in, height, width) For Conv2d
discriminator = Discriminator()
output = discriminator(output)
print("Output shape Discriminator", output.shape)
| [
"torch.nn.ZeroPad2d",
"torch.nn.InstanceNorm1d",
"torch.sigmoid",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"numpy.random.seed",
"torch.nn.Linear",
"numpy.random.randn",
"torch.nn.Conv1d",
"torch.randn"
] | [((16781, 16806), 'torch.randn', 'torch.randn', (['(10)', '(24)', '(1100)'], {}), '(10, 24, 1100)\n', (16792, 16806), False, 'import torch\n'), ((16842, 16859), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (16856, 16859), True, 'import numpy as np\n'), ((16903, 16932), 'numpy.random.randn', 'np.random.randn', (['(158)', '(24)', '(128)'], {}), '(158, 24, 128)\n', (16918, 16932), True, 'import numpy as np\n'), ((7138, 7223), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(24)', 'out_channels': '(128)', 'kernel_size': '(15)', 'stride': '(1)', 'padding': '(7)'}), '(in_channels=24, out_channels=128, kernel_size=15, stride=1, padding=7\n )\n', (7147, 7223), True, 'import torch.nn as nn\n'), ((7371, 7456), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(24)', 'out_channels': '(128)', 'kernel_size': '(15)', 'stride': '(1)', 'padding': '(7)'}), '(in_channels=24, out_channels=128, kernel_size=15, stride=1, padding=7\n )\n', (7380, 7456), True, 'import torch.nn as nn\n'), ((10652, 10741), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(512 // 2)', 'out_channels': '(24)', 'kernel_size': '(15)', 'stride': '(1)', 'padding': '(7)'}), '(in_channels=512 // 2, out_channels=24, kernel_size=15, stride=1,\n padding=7)\n', (10661, 10741), True, 'import torch.nn as nn\n'), ((13190, 13267), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': '[3, 3]', 'stride': '[1, 2]'}), '(in_channels=1, out_channels=128, kernel_size=[3, 3], stride=[1, 2])\n', (13199, 13267), True, 'import torch.nn as nn\n'), ((13408, 13485), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': '[3, 3]', 'stride': '[1, 2]'}), '(in_channels=1, out_channels=128, kernel_size=[3, 3], stride=[1, 2])\n', (13417, 13485), True, 'import torch.nn as nn\n'), ((15020, 15063), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(1024)', 'out_features': '(1)'}), '(in_features=1024, out_features=1)\n', (15029, 15063), True, 'import torch.nn as nn\n'), ((15970, 15996), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(1, 0, 1, 1)'], {}), '((1, 0, 1, 1))\n', (15982, 15996), True, 'import torch.nn as nn\n'), ((16139, 16165), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(1, 0, 1, 0)'], {}), '((1, 0, 1, 0))\n', (16151, 16165), True, 'import torch.nn as nn\n'), ((16245, 16271), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(1, 0, 1, 0)'], {}), '((1, 0, 1, 0))\n', (16257, 16271), True, 'import torch.nn as nn\n'), ((16356, 16382), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(1, 0, 3, 2)'], {}), '((1, 0, 3, 2))\n', (16368, 16382), True, 'import torch.nn as nn\n'), ((16870, 16889), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (16885, 16889), True, 'import numpy as np\n'), ((334, 354), 'torch.sigmoid', 'torch.sigmoid', (['input'], {}), '(input)\n', (347, 354), False, 'import torch\n'), ((2293, 2411), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (2302, 2411), True, 'import torch.nn as nn\n'), ((2658, 2715), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (2675, 2715), True, 'import torch.nn as nn\n'), ((2824, 2942), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (2833, 2942), True, 'import torch.nn as nn\n'), ((3209, 3266), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (3226, 3266), True, 'import torch.nn as nn\n'), ((3379, 3497), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'out_channels', 'out_channels': 'in_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=out_channels, out_channels=in_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (3388, 3497), True, 'import torch.nn as nn\n'), ((3764, 3820), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'in_channels', 'affine': '(True)'}), '(num_features=in_channels, affine=True)\n', (3781, 3820), True, 'import torch.nn as nn\n'), ((4055, 4083), 'torch.sigmoid', 'torch.sigmoid', (['h1_gates_norm'], {}), '(h1_gates_norm)\n', (4068, 4083), False, 'import torch\n'), ((4379, 4502), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (4388, 4502), True, 'import torch.nn as nn\n'), ((4734, 4791), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (4751, 4791), True, 'import torch.nn as nn\n'), ((4895, 5018), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (4904, 5018), True, 'import torch.nn as nn\n'), ((5280, 5337), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (5297, 5337), True, 'import torch.nn as nn\n'), ((5726, 5849), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (5735, 5849), True, 'import torch.nn as nn\n'), ((6152, 6214), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': '(out_channels // 2)', 'affine': '(True)'}), '(num_features=out_channels // 2, affine=True)\n', (6169, 6214), True, 'import torch.nn as nn\n'), ((6318, 6441), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (6327, 6441), True, 'import torch.nn as nn\n'), ((6780, 6842), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': '(out_channels // 2)', 'affine': '(True)'}), '(num_features=out_channels // 2, affine=True)\n', (6797, 6842), True, 'import torch.nn as nn\n'), ((11917, 12040), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (11926, 12040), True, 'import torch.nn as nn\n'), ((12272, 12329), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (12289, 12329), True, 'import torch.nn as nn\n'), ((12432, 12555), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (12441, 12555), True, 'import torch.nn as nn\n'), ((12812, 12869), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (12829, 12869), True, 'import torch.nn as nn\n'), ((16945, 16968), 'torch.from_numpy', 'torch.from_numpy', (['input'], {}), '(input)\n', (16961, 16968), False, 'import torch\n')] |
#
import numpy as np
from scipy.optimize import minimize
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import shift
from statsmodels.tsa.stattools import ccovf
def chisqr_align(reference, target, roi, order=1, init=0.1, bound=1):
'''
Align a target signal to a reference signal within a region of interest (ROI)
by minimizing the chi-squared between the two signals. Depending on the shape
of your signals providing a highly constrained prior is necessary when using a
gradient based optimization technique in order to avoid local solutions.
Args:
reference (1d array/list): signal that won't be shifted
target (1d array/list): signal to be shifted to reference
roi (tuple): region of interest to compute chi-squared
order (int): order of spline interpolation for shifting target signal
init (int): initial guess to offset between the two signals
bound (int): symmetric bounds for constraining the shift search around initial guess
Returns:
shift (float): offset between target and reference signal
Todo:
* include uncertainties on spectra
* update chi-squared metric for uncertainties
* include loss function on chi-sqr
'''
# convert to int to avoid indexing issues
ROI = slice(int(roi[0]), int(roi[1]), 1)
# normalize ref within ROI
reference = reference/np.mean(reference[ROI])
# define objective function: returns the array to be minimized
def fcn2min(x):
shifted = shift(target,x,order=order)
shifted = shifted/np.mean(shifted[ROI])
return np.sum( ((reference - shifted)**2 )[ROI] )
# set up bounds for pos/neg shifts
minb = min( [(init-bound),(init+bound)] )
maxb = max( [(init-bound),(init+bound)] )
# minimize chi-squared between the two signals
result = minimize(fcn2min,init,method='L-BFGS-B',bounds=[ (minb,maxb) ])
return result.x[0]
def phase_align(reference, target, roi, res=100):
'''
Cross-correlate data within region of interest at a precision of 1./res
if data is cross-correlated at native resolution (i.e. res=1) this function
can only achieve integer precision
Args:
reference (1d array/list): signal that won't be shifted
target (1d array/list): signal to be shifted to reference
roi (tuple): region of interest to compute chi-squared
res (int): factor to increase resolution of data via linear interpolation
Returns:
shift (float): offset between target and reference signal
'''
# convert to int to avoid indexing issues
ROI = slice(int(roi[0]), int(roi[1]), 1)
# interpolate data onto a higher resolution grid
x,r1 = highres(reference[ROI],kind='linear',res=res)
x,r2 = highres(target[ROI],kind='linear',res=res)
# subtract mean
r1 -= r1.mean()
r2 -= r2.mean()
# compute cross covariance
cc = ccovf(r1,r2,demean=False,unbiased=False)
# determine if shift if positive/negative
if np.argmax(cc) == 0:
cc = ccovf(r2,r1,demean=False,unbiased=False)
mod = -1
else:
mod = 1
# often found this method to be more accurate then the way below
return np.argmax(cc)*mod*(1./res)
# interpolate data onto a higher resolution grid
x,r1 = highres(reference[ROI],kind='linear',res=res)
x,r2 = highres(target[ROI],kind='linear',res=res)
# subtract off mean
r1 -= r1.mean()
r1 -= r2.mean()
# compute the phase-only correlation function
product = np.fft.fft(r1) * np.fft.fft(r2).conj()
cc = np.fft.fftshift(np.fft.ifft(product))
# manipulate the output from np.fft
l = reference[ROI].shape[0]
shifts = np.linspace(-0.5*l,0.5*l,l*res)
# plt.plot(shifts,cc,'k-'); plt.show()
return shifts[np.argmax(cc.real)]
def highres(y,kind='cubic',res=100):
'''
Interpolate data onto a higher resolution grid by a factor of *res*
Args:
y (1d array/list): signal to be interpolated
kind (str): order of interpolation (see docs for scipy.interpolate.interp1d)
res (int): factor to increase resolution of data via linear interpolation
Returns:
shift (float): offset between target and reference signal
'''
y = np.array(y)
x = np.arange(0, y.shape[0])
f = interp1d(x, y,kind='cubic')
xnew = np.linspace(0, x.shape[0]-1, x.shape[0]*res)
ynew = f(xnew)
return xnew,ynew
if __name__ == "__main__":
from scipy import signal
import matplotlib.pyplot as plt
NPTS = 100
SHIFTVAL = 4
NOISE = 1e-2 # can perturb offset retrieval from true
print('true signal offset:',SHIFTVAL)
# generate some noisy data and simulate a shift
y = signal.gaussian(NPTS, std=4) + np.random.normal(1,NOISE,NPTS)
shifted = shift( signal.gaussian(NPTS, std=4) ,SHIFTVAL) + np.random.normal(1,NOISE,NPTS)
# align the shifted spectrum back to the real
s = phase_align(y, shifted, [10,90])
print('phase shift value to align is',s)
# chi squared alignment at native resolution
s = chisqr_align(y, shifted, [10,90], init=-3.5,bound=2)
print('chi square alignment',s)
# make some diagnostic plots
plt.plot(y,label='original data')
plt.plot(shifted,label='shifted data')
plt.plot(shift(shifted,s,mode='nearest'),ls='--',label='aligned data')
plt.legend(loc='best')
plt.show()
| [
"numpy.random.normal",
"numpy.mean",
"scipy.ndimage.interpolation.shift",
"matplotlib.pyplot.legend",
"scipy.optimize.minimize",
"matplotlib.pyplot.plot",
"statsmodels.tsa.stattools.ccovf",
"numpy.argmax",
"scipy.interpolate.interp1d",
"numpy.fft.fft",
"numpy.array",
"numpy.linspace",
"numpy... | [((1889, 1954), 'scipy.optimize.minimize', 'minimize', (['fcn2min', 'init'], {'method': '"""L-BFGS-B"""', 'bounds': '[(minb, maxb)]'}), "(fcn2min, init, method='L-BFGS-B', bounds=[(minb, maxb)])\n", (1897, 1954), False, 'from scipy.optimize import minimize\n'), ((2972, 3015), 'statsmodels.tsa.stattools.ccovf', 'ccovf', (['r1', 'r2'], {'demean': '(False)', 'unbiased': '(False)'}), '(r1, r2, demean=False, unbiased=False)\n', (2977, 3015), False, 'from statsmodels.tsa.stattools import ccovf\n'), ((3762, 3801), 'numpy.linspace', 'np.linspace', (['(-0.5 * l)', '(0.5 * l)', '(l * res)'], {}), '(-0.5 * l, 0.5 * l, l * res)\n', (3773, 3801), True, 'import numpy as np\n'), ((4327, 4338), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4335, 4338), True, 'import numpy as np\n'), ((4347, 4371), 'numpy.arange', 'np.arange', (['(0)', 'y.shape[0]'], {}), '(0, y.shape[0])\n', (4356, 4371), True, 'import numpy as np\n'), ((4380, 4408), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y'], {'kind': '"""cubic"""'}), "(x, y, kind='cubic')\n", (4388, 4408), False, 'from scipy.interpolate import interp1d\n'), ((4419, 4467), 'numpy.linspace', 'np.linspace', (['(0)', '(x.shape[0] - 1)', '(x.shape[0] * res)'], {}), '(0, x.shape[0] - 1, x.shape[0] * res)\n', (4430, 4467), True, 'import numpy as np\n'), ((5270, 5304), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'label': '"""original data"""'}), "(y, label='original data')\n", (5278, 5304), True, 'import matplotlib.pyplot as plt\n'), ((5308, 5347), 'matplotlib.pyplot.plot', 'plt.plot', (['shifted'], {'label': '"""shifted data"""'}), "(shifted, label='shifted data')\n", (5316, 5347), True, 'import matplotlib.pyplot as plt\n'), ((5427, 5449), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5437, 5449), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5462, 5464), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1450), 'numpy.mean', 'np.mean', (['reference[ROI]'], {}), '(reference[ROI])\n', (1434, 1450), True, 'import numpy as np\n'), ((1557, 1586), 'scipy.ndimage.interpolation.shift', 'shift', (['target', 'x'], {'order': 'order'}), '(target, x, order=order)\n', (1562, 1586), False, 'from scipy.ndimage.interpolation import shift\n'), ((1648, 1689), 'numpy.sum', 'np.sum', (['((reference - shifted) ** 2)[ROI]'], {}), '(((reference - shifted) ** 2)[ROI])\n', (1654, 1689), True, 'import numpy as np\n'), ((3068, 3081), 'numpy.argmax', 'np.argmax', (['cc'], {}), '(cc)\n', (3077, 3081), True, 'import numpy as np\n'), ((3101, 3144), 'statsmodels.tsa.stattools.ccovf', 'ccovf', (['r2', 'r1'], {'demean': '(False)', 'unbiased': '(False)'}), '(r2, r1, demean=False, unbiased=False)\n', (3106, 3144), False, 'from statsmodels.tsa.stattools import ccovf\n'), ((3590, 3604), 'numpy.fft.fft', 'np.fft.fft', (['r1'], {}), '(r1)\n', (3600, 3604), True, 'import numpy as np\n'), ((3654, 3674), 'numpy.fft.ifft', 'np.fft.ifft', (['product'], {}), '(product)\n', (3665, 3674), True, 'import numpy as np\n'), ((3856, 3874), 'numpy.argmax', 'np.argmax', (['cc.real'], {}), '(cc.real)\n', (3865, 3874), True, 'import numpy as np\n'), ((4792, 4820), 'scipy.signal.gaussian', 'signal.gaussian', (['NPTS'], {'std': '(4)'}), '(NPTS, std=4)\n', (4807, 4820), False, 'from scipy import signal\n'), ((4823, 4855), 'numpy.random.normal', 'np.random.normal', (['(1)', 'NOISE', 'NPTS'], {}), '(1, NOISE, NPTS)\n', (4839, 4855), True, 'import numpy as np\n'), ((4917, 4949), 'numpy.random.normal', 'np.random.normal', (['(1)', 'NOISE', 'NPTS'], {}), '(1, NOISE, NPTS)\n', (4933, 4949), True, 'import numpy as np\n'), ((5360, 5393), 'scipy.ndimage.interpolation.shift', 'shift', (['shifted', 's'], {'mode': '"""nearest"""'}), "(shifted, s, mode='nearest')\n", (5365, 5393), False, 'from scipy.ndimage.interpolation import shift\n'), ((1611, 1632), 'numpy.mean', 'np.mean', (['shifted[ROI]'], {}), '(shifted[ROI])\n', (1618, 1632), True, 'import numpy as np\n'), ((3266, 3279), 'numpy.argmax', 'np.argmax', (['cc'], {}), '(cc)\n', (3275, 3279), True, 'import numpy as np\n'), ((4875, 4903), 'scipy.signal.gaussian', 'signal.gaussian', (['NPTS'], {'std': '(4)'}), '(NPTS, std=4)\n', (4890, 4903), False, 'from scipy import signal\n'), ((3607, 3621), 'numpy.fft.fft', 'np.fft.fft', (['r2'], {}), '(r2)\n', (3617, 3621), True, 'import numpy as np\n')] |
#myNNApp
import numpy as np
from myNN import MyHiddenLayer, MyOutputLayer
import matplotlib.pyplot as plt
def main():
# write your app here
# example:
numb_obs = 10
numb_hidden = 2
W = np.ones(numb_hidden)
my_hidden = MyHiddenLayer(W, 1)
my_output = MyOutputLayer([-1, 1])
x = np.linspace(-10,10,numb_obs)
x = -1
print(my_hidden.output(x))
y = my_output.output(my_hidden.output(x))
print(y)
# end of example
if __name__ == "__main__":
main()
| [
"myNN.MyOutputLayer",
"myNN.MyHiddenLayer",
"numpy.linspace",
"numpy.ones"
] | [((209, 229), 'numpy.ones', 'np.ones', (['numb_hidden'], {}), '(numb_hidden)\n', (216, 229), True, 'import numpy as np\n'), ((246, 265), 'myNN.MyHiddenLayer', 'MyHiddenLayer', (['W', '(1)'], {}), '(W, 1)\n', (259, 265), False, 'from myNN import MyHiddenLayer, MyOutputLayer\n'), ((282, 304), 'myNN.MyOutputLayer', 'MyOutputLayer', (['[-1, 1]'], {}), '([-1, 1])\n', (295, 304), False, 'from myNN import MyHiddenLayer, MyOutputLayer\n'), ((314, 344), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'numb_obs'], {}), '(-10, 10, numb_obs)\n', (325, 344), True, 'import numpy as np\n')] |
import sys
import gym
import numpy as np
from scipy.stats import norm
from keras.layers import Dense, Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras import backend as K
EPISODES = 3000
# A2C(Advantage Actor-Critic) agent for the Cartpole
class A2CAgent:
def __init__(self, state_size, action_size):
# if you want to see Cartpole learning, then change to True
self.render = False
self.load_model = False
self.state_size = state_size
self.action_size = action_size
self.value_size = 1
# get gym environment name
# these are hyper parameters for the A3C
self.actor_lr = 0.0001
self.critic_lr = 0.001
self.discount_factor = .9
self.hidden1, self.hidden2 = 24, 24
# create model for actor and critic network
self.actor, self.critic = self.build_model()
# method for training actor and critic network
self.optimizer = [self.actor_optimizer(), self.critic_optimizer()]
if self.load_model:
self.actor.load_weights("./save_model/cartpole_actor.h5")
self.critic.load_weights("./save_model/cartpole_critic.h5")
def build_model(self):
state = Input(batch_shape=(None, self.state_size))
actor_input = Dense(30, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform')(state)
# actor_hidden = Dense(self.hidden2, activation='relu')(actor_input)
mu_0 = Dense(self.action_size, activation='tanh', kernel_initializer='he_uniform')(actor_input)
sigma_0 = Dense(self.action_size, activation='softplus', kernel_initializer='he_uniform')(actor_input)
mu = Lambda(lambda x: x * 2)(mu_0)
sigma = Lambda(lambda x: x + 0.0001)(sigma_0)
critic_input = Dense(30, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform')(state)
# value_hidden = Dense(self.hidden2, activation='relu')(critic_input)
state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(critic_input)
actor = Model(inputs=state, outputs=(mu, sigma))
critic = Model(inputs=state, outputs=state_value)
actor._make_predict_function()
critic._make_predict_function()
actor.summary()
critic.summary()
return actor, critic
def actor_optimizer(self):
action = K.placeholder(shape=(None, 1))
advantages = K.placeholder(shape=(None, 1))
# mu = K.placeholder(shape=(None, self.action_size))
# sigma_sq = K.placeholder(shape=(None, self.action_size))
mu, sigma_sq = self.actor.output
pdf = 1. / K.sqrt(2. * np.pi * sigma_sq) * K.exp(-K.square(action - mu) / (2. * sigma_sq))
log_pdf = K.log(pdf + K.epsilon())
entropy = K.sum(0.5 * (K.log(2. * np.pi * sigma_sq) + 1.))
exp_v = log_pdf * advantages
exp_v = K.sum(exp_v + 0.01 * entropy)
actor_loss = -exp_v
optimizer = Adam(lr=self.actor_lr)
updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
train = K.function([self.actor.input, action, advantages], [], updates=updates)
return train
# make loss function for Value approximation
def critic_optimizer(self):
discounted_reward = K.placeholder(shape=(None, 1))
value = self.critic.output
loss = K.mean(K.square(discounted_reward - value))
optimizer = Adam(lr=self.critic_lr)
updates = optimizer.get_updates(self.critic.trainable_weights, [], loss)
train = K.function([self.critic.input, discounted_reward], [], updates=updates)
return train
# using the output of policy network, pick action stochastically
def get_action(self, state):
mu, sigma_sq = self.actor.predict(np.reshape(state, [1, self.state_size]))
# sigma_sq = np.log(np.exp(sigma_sq + 1))
epsilon = np.random.randn(self.action_size)
# action = norm.rvs(loc=mu, scale=sigma_sq,size=1)
action = mu + np.sqrt(sigma_sq) * epsilon
action = np.clip(action, -2, 2)
return action
# update policy network every episode
def train_model(self, state, action, reward, next_state, done):
target = np.zeros((1, self.value_size))
advantages = np.zeros((1, self.action_size))
value = self.critic.predict(state)[0]
next_value = self.critic.predict(next_state)[0]
if done:
advantages[0] = reward - value
target[0][0] = reward
else:
advantages[0] = reward + self.discount_factor * (next_value) - value
target[0][0] = reward + self.discount_factor * next_value
self.optimizer[0]([state, action, advantages])
self.optimizer[1]([state, target])
if __name__ == "__main__":
# In case of CartPole-v1, maximum length of episode is 500
env = gym.make('Pendulum-v0')
# get size of state and action from environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
# make A2C agent
agent = A2CAgent(state_size, action_size)
scores, episodes = [], []
for e in range(EPISODES):
done = False
score = 0
state = env.reset()
state = np.reshape(state, [1, state_size])
while not done:
if agent.render:
env.render()
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
reward /= 10
next_state = np.reshape(next_state, [1, state_size])
# if an action make the episode end, then gives penalty of -100
agent.train_model(state, action, reward, next_state, done)
score += reward
state = next_state
if done:
# every episode, plot the play time
scores.append(score)
episodes.append(e)
print("episode:", e, " score:", score)
# if the mean of scores of last 10 episode is bigger than 490
# stop training
if np.mean(scores[-min(10, len(scores)):]) > -20:
sys.exit()
# save the model
if e % 50 == 0:
agent.actor.save_weights("./save_model/cartpole_actor.h5")
agent.critic.save_weights("./save_model/cartpole_critic.h5")
| [
"numpy.clip",
"numpy.sqrt",
"keras.backend.sum",
"sys.exit",
"keras.layers.Dense",
"gym.make",
"numpy.reshape",
"keras.backend.square",
"keras.backend.placeholder",
"keras.models.Model",
"keras.backend.epsilon",
"keras.optimizers.Adam",
"keras.backend.sqrt",
"keras.backend.log",
"numpy.r... | [((4955, 4978), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (4963, 4978), False, 'import gym\n'), ((1258, 1300), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, self.state_size)'}), '(batch_shape=(None, self.state_size))\n', (1263, 1300), False, 'from keras.layers import Dense, Input, Lambda\n'), ((2123, 2163), 'keras.models.Model', 'Model', ([], {'inputs': 'state', 'outputs': '(mu, sigma)'}), '(inputs=state, outputs=(mu, sigma))\n', (2128, 2163), False, 'from keras.models import Model\n'), ((2181, 2221), 'keras.models.Model', 'Model', ([], {'inputs': 'state', 'outputs': 'state_value'}), '(inputs=state, outputs=state_value)\n', (2186, 2221), False, 'from keras.models import Model\n'), ((2431, 2461), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None, 1)'}), '(shape=(None, 1))\n', (2444, 2461), True, 'from keras import backend as K\n'), ((2483, 2513), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None, 1)'}), '(shape=(None, 1))\n', (2496, 2513), True, 'from keras import backend as K\n'), ((2950, 2979), 'keras.backend.sum', 'K.sum', (['(exp_v + 0.01 * entropy)'], {}), '(exp_v + 0.01 * entropy)\n', (2955, 2979), True, 'from keras import backend as K\n'), ((3029, 3051), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.actor_lr'}), '(lr=self.actor_lr)\n', (3033, 3051), False, 'from keras.optimizers import Adam\n'), ((3155, 3226), 'keras.backend.function', 'K.function', (['[self.actor.input, action, advantages]', '[]'], {'updates': 'updates'}), '([self.actor.input, action, advantages], [], updates=updates)\n', (3165, 3226), True, 'from keras import backend as K\n'), ((3358, 3388), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None, 1)'}), '(shape=(None, 1))\n', (3371, 3388), True, 'from keras import backend as K\n'), ((3506, 3529), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.critic_lr'}), '(lr=self.critic_lr)\n', (3510, 3529), False, 'from keras.optimizers import Adam\n'), ((3627, 3698), 'keras.backend.function', 'K.function', (['[self.critic.input, discounted_reward]', '[]'], {'updates': 'updates'}), '([self.critic.input, discounted_reward], [], updates=updates)\n', (3637, 3698), True, 'from keras import backend as K\n'), ((3974, 4007), 'numpy.random.randn', 'np.random.randn', (['self.action_size'], {}), '(self.action_size)\n', (3989, 4007), True, 'import numpy as np\n'), ((4134, 4156), 'numpy.clip', 'np.clip', (['action', '(-2)', '(2)'], {}), '(action, -2, 2)\n', (4141, 4156), True, 'import numpy as np\n'), ((4307, 4337), 'numpy.zeros', 'np.zeros', (['(1, self.value_size)'], {}), '((1, self.value_size))\n', (4315, 4337), True, 'import numpy as np\n'), ((4359, 4390), 'numpy.zeros', 'np.zeros', (['(1, self.action_size)'], {}), '((1, self.action_size))\n', (4367, 4390), True, 'import numpy as np\n'), ((5336, 5370), 'numpy.reshape', 'np.reshape', (['state', '[1, state_size]'], {}), '(state, [1, state_size])\n', (5346, 5370), True, 'import numpy as np\n'), ((1323, 1416), 'keras.layers.Dense', 'Dense', (['(30)'], {'input_dim': 'self.state_size', 'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(30, input_dim=self.state_size, activation='relu', kernel_initializer=\n 'he_uniform')\n", (1328, 1416), False, 'from keras.layers import Dense, Input, Lambda\n'), ((1511, 1586), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""tanh"""', 'kernel_initializer': '"""he_uniform"""'}), "(self.action_size, activation='tanh', kernel_initializer='he_uniform')\n", (1516, 1586), False, 'from keras.layers import Dense, Input, Lambda\n'), ((1618, 1697), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""softplus"""', 'kernel_initializer': '"""he_uniform"""'}), "(self.action_size, activation='softplus', kernel_initializer='he_uniform')\n", (1623, 1697), False, 'from keras.layers import Dense, Input, Lambda\n'), ((1725, 1748), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x * 2)'], {}), '(lambda x: x * 2)\n', (1731, 1748), False, 'from keras.layers import Dense, Input, Lambda\n'), ((1771, 1799), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x + 0.0001)'], {}), '(lambda x: x + 0.0001)\n', (1777, 1799), False, 'from keras.layers import Dense, Input, Lambda\n'), ((1833, 1926), 'keras.layers.Dense', 'Dense', (['(30)'], {'input_dim': 'self.state_size', 'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(30, input_dim=self.state_size, activation='relu', kernel_initializer=\n 'he_uniform')\n", (1838, 1926), False, 'from keras.layers import Dense, Input, Lambda\n'), ((2029, 2091), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'kernel_initializer': '"""he_uniform"""'}), "(1, activation='linear', kernel_initializer='he_uniform')\n", (2034, 2091), False, 'from keras.layers import Dense, Input, Lambda\n'), ((3448, 3483), 'keras.backend.square', 'K.square', (['(discounted_reward - value)'], {}), '(discounted_reward - value)\n', (3456, 3483), True, 'from keras import backend as K\n'), ((3865, 3904), 'numpy.reshape', 'np.reshape', (['state', '[1, self.state_size]'], {}), '(state, [1, self.state_size])\n', (3875, 3904), True, 'import numpy as np\n'), ((5612, 5651), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (5622, 5651), True, 'import numpy as np\n'), ((2705, 2735), 'keras.backend.sqrt', 'K.sqrt', (['(2.0 * np.pi * sigma_sq)'], {}), '(2.0 * np.pi * sigma_sq)\n', (2711, 2735), True, 'from keras import backend as K\n'), ((2815, 2826), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2824, 2826), True, 'from keras import backend as K\n'), ((4089, 4106), 'numpy.sqrt', 'np.sqrt', (['sigma_sq'], {}), '(sigma_sq)\n', (4096, 4106), True, 'import numpy as np\n'), ((2859, 2888), 'keras.backend.log', 'K.log', (['(2.0 * np.pi * sigma_sq)'], {}), '(2.0 * np.pi * sigma_sq)\n', (2864, 2888), True, 'from keras import backend as K\n'), ((6258, 6268), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6266, 6268), False, 'import sys\n'), ((2744, 2765), 'keras.backend.square', 'K.square', (['(action - mu)'], {}), '(action - mu)\n', (2752, 2765), True, 'from keras import backend as K\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 10:54:50 2020
@author: Simon
"""
import numpy as np
from sleep import SleepSet
import config as cfg
import features
import pandas as pd
import dateparser
from tqdm import tqdm
from datetime import datetime
from scipy.ndimage.morphology import binary_dilation
if __name__=='__main__':
ss = SleepSet(cfg.folder_unisens).stratify()
header = ['Code',
'Epochs',
'% artefact 30s',
'% artefact 300s',]
table = pd.DataFrame(columns = header)
for p in tqdm(ss, 'caluclating'):
p.reset()
art_30 = p.get_artefacts(only_sleeptime=True, wsize=30)
art_300 = p.get_artefacts(only_sleeptime=True, wsize=300)
row = {'Code': p.code,
'Epochs': p.epochs_hypno,
'% artefact 30s': f'{np.mean(art_30)*100:.1f}',
'% artefact 300s': f'{np.mean(art_300)*100:.1f}'}
table = table.append(row, ignore_index=True, sort=False)
table.to_excel(cfg.documents + '/artefact_loss_v2.xls')
| [
"pandas.DataFrame",
"numpy.mean",
"sleep.SleepSet",
"tqdm.tqdm"
] | [((522, 550), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'header'}), '(columns=header)\n', (534, 550), True, 'import pandas as pd\n'), ((571, 594), 'tqdm.tqdm', 'tqdm', (['ss', '"""caluclating"""'], {}), "(ss, 'caluclating')\n", (575, 594), False, 'from tqdm import tqdm\n'), ((349, 377), 'sleep.SleepSet', 'SleepSet', (['cfg.folder_unisens'], {}), '(cfg.folder_unisens)\n', (357, 377), False, 'from sleep import SleepSet\n'), ((853, 868), 'numpy.mean', 'np.mean', (['art_30'], {}), '(art_30)\n', (860, 868), True, 'import numpy as np\n'), ((917, 933), 'numpy.mean', 'np.mean', (['art_300'], {}), '(art_300)\n', (924, 933), True, 'import numpy as np\n')] |
# Helper code to plot a binary decision region.
#
# <NAME> (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
# Our input is (x,y) -- 2D. Output is the scalar y^ computed by the
# dot-product of (1, x, y) with theta (1 is for the bias, and theta_0 is the
# bias parameter). This is a plane equation in 3D.
# Therefore, the plot we aim to produce is 3D -- some scalar as a function
# of two parameters. The contours are then the equi-value lines of the 3D
# plot, and we're only interested in the main contour at value 0 -- meaning
# the line where the plane intersects the x/y plane.
#
# Note: if we flip all values here we get the same intersection.
theta = np.array([[-4], [0.5], [1]])
fig, ax = plt.subplots()
fig.set_tight_layout(True)
xs = np.linspace(-4, 8, 200)
ys = np.linspace(-4, 8, 200)
xsgrid, ysgrid = np.meshgrid(xs, ys)
plane = np.zeros_like(xsgrid)
for i in range(xsgrid.shape[0]):
for j in range(xsgrid.shape[1]):
plane[i, j] = np.array([1, xsgrid[i, j], ysgrid[i, j]]).dot(theta)
cs = ax.contour(xsgrid, ysgrid, plane, levels=[0])
cs.clabel(inline=1)
ax.grid(True)
ax.annotate(r'here $\hat{y}(x) > 0$', xy=(4, 4), fontsize=20)
ax.annotate(r'here $\hat{y}(x) < 0$', xy=(0, 0), fontsize=20)
fig.savefig('line.png', dpi=80)
plt.show()
| [
"numpy.array",
"numpy.linspace",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((833, 861), 'numpy.array', 'np.array', (['[[-4], [0.5], [1]]'], {}), '([[-4], [0.5], [1]])\n', (841, 861), True, 'import numpy as np\n'), ((877, 891), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (889, 891), True, 'import matplotlib.pyplot as plt\n'), ((933, 956), 'numpy.linspace', 'np.linspace', (['(-4)', '(8)', '(200)'], {}), '(-4, 8, 200)\n', (944, 956), True, 'import numpy as np\n'), ((966, 989), 'numpy.linspace', 'np.linspace', (['(-4)', '(8)', '(200)'], {}), '(-4, 8, 200)\n', (977, 989), True, 'import numpy as np\n'), ((1011, 1030), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (1022, 1030), True, 'import numpy as np\n'), ((1043, 1064), 'numpy.zeros_like', 'np.zeros_like', (['xsgrid'], {}), '(xsgrid)\n', (1056, 1064), True, 'import numpy as np\n'), ((1492, 1502), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1500, 1502), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1210), 'numpy.array', 'np.array', (['[1, xsgrid[i, j], ysgrid[i, j]]'], {}), '([1, xsgrid[i, j], ysgrid[i, j]])\n', (1177, 1210), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from .math_functions import euclidean_dist_2_pts
def extract_img_markers(img, workspace_ratio=1.0):
"""
Extract working area from an image thanks to 4 Niryo's markers
:param img: OpenCV image which contain 4 Niryo's markers
:param workspace_ratio: Ratio between the width and the height of the area represented by the markers
:return: extracted and warped working area image
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_thresh = cv2.adaptiveThreshold(gray, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
thresholdType=cv2.THRESH_BINARY, blockSize=15, C=25)
list_good_candidates = find_markers_from_img_thresh(img_thresh)
if not list_good_candidates or len(list_good_candidates) > 6:
return None
if len(list_good_candidates) == 4:
list_good_candidates = sort_markers_detection(list_good_candidates)
else:
list_good_candidates = complicated_sort_markers(list_good_candidates, workspace_ratio=workspace_ratio)
if list_good_candidates is None:
return None
im_cut = extract_sub_img(img, list_good_candidates, ratio_w_h=workspace_ratio)
return im_cut
def extract_sub_img(img, list_corners, ratio_w_h=1.0):
"""
Extract an small image from a big one using a Perspective Warp
:param img: Big image from which the small one will be extracted
:param list_corners: corners list of the small image
:param ratio_w_h: Width over Height ratio of the area. It helps to not stretch the working area image
:return: extracted and warped image
"""
if list_corners is None or len(list_corners) != 4:
return None
if ratio_w_h >= 1.0:
target_w_area = int(round(ratio_w_h * 200))
target_h_area = 200
else:
ratio_w_h = 1.0 / ratio_w_h
target_h_area = int(round(ratio_w_h * 200))
target_w_area = 200
points_grid = []
for marker in list_corners:
points_grid.append(marker.get_center())
points_grid = np.array(points_grid, dtype=np.float32)
final_pts = np.array(
[[0, 0], [target_w_area - 1, 0],
[target_w_area - 1, target_h_area - 1], [0, target_h_area - 1]],
dtype=np.float32)
transfo_matrix = cv2.getPerspectiveTransform(points_grid, final_pts)
# print transfo_matrix
# print np.linalg.det(transfo_matrix)
area_im = cv2.warpPerspective(img, transfo_matrix, (target_w_area, target_h_area))
return area_im
def draw_markers(img, workspace_ratio=1.0):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_thresh = cv2.adaptiveThreshold(gray, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C,
thresholdType=cv2.THRESH_BINARY, blockSize=15, C=32)
list_good_candidates = find_markers_from_img_thresh(img_thresh)
if not list_good_candidates:
return False, img
im_draw = img.copy()
for marker in list_good_candidates:
cx, cy = marker.get_center()
radius = marker.get_radius()
cv2.circle(im_draw, (cx, cy), radius, (0, 0, 255), 2)
if len(list_good_candidates) > 6:
return False, im_draw
if len(list_good_candidates) == 4:
list_good_candidates = sort_markers_detection(list_good_candidates)
else:
list_good_candidates = complicated_sort_markers(list_good_candidates, workspace_ratio=workspace_ratio)
if list_good_candidates is None:
return False, im_draw
for i, marker in enumerate(list_good_candidates[:4]):
cx, cy = marker.get_center()
radius = marker.get_radius()
cv2.circle(im_draw, (cx, cy), radius, (0, 200, 0), 2)
cv2.putText(im_draw, "{}".format(i + 1),
(cx + 5, cy - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 0), 3)
cv2.putText(im_draw, "{}".format(i + 1),
(cx + 5, cy - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 200, 0), 2)
return True, im_draw
class PotentialMarker:
def __init__(self, center, radius, cnt):
self.center = center
self.x = center[0]
self.y = center[1]
self.radius = radius
self.contour = cnt
self.is_merged = False
def get_center(self):
return self.center
def __str__(self):
return "{} - {} - {}".format(self.x, self.y, self.radius)
def __repr__(self):
return self.__str__()
class Marker:
def __init__(self, potential_marker):
self.list_centers = [potential_marker.get_center()]
self.list_radius = [potential_marker.radius]
self.list_contours = [potential_marker.contour]
self.cx = self.list_centers[0][0]
self.cy = self.list_centers[0][1]
self.radius = potential_marker.radius
self.identifiant = None
self.value_for_id = None
def get_radius(self):
return self.radius
def get_center(self):
return self.cx, self.cy
def add_circle(self, obj_potential_marker):
self.list_centers.append(obj_potential_marker.get_center())
self.list_radius.append(obj_potential_marker.radius)
obj_potential_marker.is_merged = True
(x, y) = np.mean(self.list_centers, axis=0)
self.cx, self.cy = int(round(x)), int(round(y))
self.radius = int(round(max(self.list_radius)))
def nb_circles(self):
return len(self.list_centers)
def get_id_from_slice(self, img_thresh):
x, y, w, h = self.cx - 1, self.cy - 1, 3, 3
self.value_for_id = np.mean(img_thresh[y:y + h, x:x + w])
# return value_for_id
if self.value_for_id > 200:
self.identifiant = "A"
else:
self.identifiant = "B"
return self.identifiant
# return value_for_id
def __str__(self):
return "{} - {}".format(self.nb_circles(), self.list_centers)
def __repr__(self):
return self.__str__()
def sort_markers_detection(list_markers):
def rotate(l, n):
return l[n:] + l[:n]
list_sort_y = sorted(list_markers, key=lambda m: m.cy)
top1, top2, bottom1, bottom2 = list_sort_y
if top1.cx < top2.cx:
top_left = top1
top_right = top2
else:
top_left = top2
top_right = top1
if bottom1.cx < bottom2.cx:
bottom_left = bottom1
bottom_right = bottom2
else:
bottom_left = bottom2
bottom_right = bottom1
list_markers_unsorted = [top_left, top_right, bottom_right, bottom_left]
list_id = [marker.identifiant for marker in list_markers_unsorted]
if list_id.count("A") == 1:
list_corners_sorted = rotate(list_markers_unsorted, n=list_id.index("A"))
elif list_id.count("B") == 1:
list_corners_sorted = rotate(list_markers_unsorted, n=list_id.index("B"))
else:
return list_markers_unsorted
return list_corners_sorted
def complicated_sort_markers(list_markers, workspace_ratio):
import itertools
if workspace_ratio >= 1.0:
target_w_area = int(round(workspace_ratio * 200))
target_h_area = 200
else:
ratio_w_h = 1.0 / workspace_ratio
target_h_area = int(round(ratio_w_h * 200))
target_w_area = 200
list_id = [marker.identifiant for marker in list_markers]
count_A = list_id.count("A")
count_B = list_id.count("B")
if count_A < 3 > count_B:
return None
if count_A < count_B:
id_first_marker = "A"
id_second_marker = "B"
else:
id_first_marker = "B"
id_second_marker = "A"
list_combinaisons = []
list_marker_1 = [marker for marker in list_markers if marker.identifiant == id_first_marker]
list_marker_2 = [marker for marker in list_markers if marker.identifiant == id_second_marker]
if list_marker_1:
list_combinaisons_marker_2 = itertools.combinations(list_marker_2, 3)
for marker1 in list_marker_1:
for combi_markers2 in list_combinaisons_marker_2:
combin = [marker1] + list(combi_markers2)
list_combinaisons.append(sort_markers_detection(combin))
else:
for combinaison in itertools.combinations(list_marker_2, 4):
list_combinaisons.append(combinaison)
if not list_combinaisons:
return None
final_pts = np.array(
[[0, 0], [target_w_area - 1, 0],
[target_w_area - 1, target_h_area - 1], [0, target_h_area - 1]],
dtype=np.float32)
list_det_transfo_matrix = []
for combin in list_combinaisons:
points_grid = np.array([[mark.cx, mark.cy] for mark in combin], dtype=np.float32)
transfo_matrix = cv2.getPerspectiveTransform(points_grid, final_pts)
list_det_transfo_matrix.append(np.linalg.det(transfo_matrix))
best_combin_ind = np.argmin(abs(np.array(list_det_transfo_matrix) - 1))
best_markers = list_combinaisons[best_combin_ind]
return best_markers
def find_markers_from_img_thresh(img_thresh, max_dist_between_centers=3, min_radius_circle=4,
max_radius_circle=35, min_radius_marker=7):
contours = cv2.findContours(img_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
list_potential_markers = []
for cnt in contours:
(x, y), radius = cv2.minEnclosingCircle(cnt)
if not min_radius_circle < radius < max_radius_circle:
continue
center = (int(round(x)), int(round(y)))
radius = int(radius)
list_potential_markers.append(PotentialMarker(center, radius, cnt))
list_potential_markers = sorted(list_potential_markers, key=lambda m: m.x)
list_good_candidates = []
for i, potential_marker in enumerate(list_potential_markers):
if potential_marker.is_merged:
continue
marker1 = Marker(potential_marker)
center_marker = marker1.get_center()
for potential_marker2 in list_potential_markers[i + 1:]:
if potential_marker.is_merged:
continue
center_potential = potential_marker2.get_center()
if center_potential[0] - center_marker[0] > max_dist_between_centers:
break
dist = euclidean_dist_2_pts(center_marker, center_potential)
if dist <= max_dist_between_centers:
marker1.add_circle(potential_marker2)
center_marker = marker1.get_center()
if marker1.nb_circles() > 2 and marker1.radius >= min_radius_marker:
list_good_candidates.append(marker1)
marker1.get_id_from_slice(img_thresh)
return list_good_candidates
| [
"numpy.mean",
"cv2.getPerspectiveTransform",
"cv2.minEnclosingCircle",
"numpy.linalg.det",
"itertools.combinations",
"numpy.array",
"cv2.adaptiveThreshold",
"cv2.warpPerspective",
"cv2.circle",
"cv2.cvtColor",
"cv2.findContours"
] | [((447, 484), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (459, 484), False, 'import cv2\n'), ((503, 650), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray'], {'maxValue': '(255)', 'adaptiveMethod': 'cv2.ADAPTIVE_THRESH_MEAN_C', 'thresholdType': 'cv2.THRESH_BINARY', 'blockSize': '(15)', 'C': '(25)'}), '(gray, maxValue=255, adaptiveMethod=cv2.\n ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=15, C=25\n )\n', (524, 650), False, 'import cv2\n'), ((2079, 2118), 'numpy.array', 'np.array', (['points_grid'], {'dtype': 'np.float32'}), '(points_grid, dtype=np.float32)\n', (2087, 2118), True, 'import numpy as np\n'), ((2135, 2263), 'numpy.array', 'np.array', (['[[0, 0], [target_w_area - 1, 0], [target_w_area - 1, target_h_area - 1], [0,\n target_h_area - 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [target_w_area - 1, 0], [target_w_area - 1, target_h_area -\n 1], [0, target_h_area - 1]], dtype=np.float32)\n', (2143, 2263), True, 'import numpy as np\n'), ((2307, 2358), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['points_grid', 'final_pts'], {}), '(points_grid, final_pts)\n', (2334, 2358), False, 'import cv2\n'), ((2442, 2514), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'transfo_matrix', '(target_w_area, target_h_area)'], {}), '(img, transfo_matrix, (target_w_area, target_h_area))\n', (2461, 2514), False, 'import cv2\n'), ((2591, 2628), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2603, 2628), False, 'import cv2\n'), ((2647, 2794), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray'], {'maxValue': '(255)', 'adaptiveMethod': 'cv2.ADAPTIVE_THRESH_MEAN_C', 'thresholdType': 'cv2.THRESH_BINARY', 'blockSize': '(15)', 'C': '(32)'}), '(gray, maxValue=255, adaptiveMethod=cv2.\n ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY, blockSize=15, C=32\n )\n', (2668, 2794), False, 'import cv2\n'), ((8358, 8486), 'numpy.array', 'np.array', (['[[0, 0], [target_w_area - 1, 0], [target_w_area - 1, target_h_area - 1], [0,\n target_h_area - 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [target_w_area - 1, 0], [target_w_area - 1, target_h_area -\n 1], [0, target_h_area - 1]], dtype=np.float32)\n', (8366, 8486), True, 'import numpy as np\n'), ((3099, 3152), 'cv2.circle', 'cv2.circle', (['im_draw', '(cx, cy)', 'radius', '(0, 0, 255)', '(2)'], {}), '(im_draw, (cx, cy), radius, (0, 0, 255), 2)\n', (3109, 3152), False, 'import cv2\n'), ((3674, 3727), 'cv2.circle', 'cv2.circle', (['im_draw', '(cx, cy)', 'radius', '(0, 200, 0)', '(2)'], {}), '(im_draw, (cx, cy), radius, (0, 200, 0), 2)\n', (3684, 3727), False, 'import cv2\n'), ((5237, 5271), 'numpy.mean', 'np.mean', (['self.list_centers'], {'axis': '(0)'}), '(self.list_centers, axis=0)\n', (5244, 5271), True, 'import numpy as np\n'), ((5576, 5613), 'numpy.mean', 'np.mean', (['img_thresh[y:y + h, x:x + w]'], {}), '(img_thresh[y:y + h, x:x + w])\n', (5583, 5613), True, 'import numpy as np\n'), ((7889, 7929), 'itertools.combinations', 'itertools.combinations', (['list_marker_2', '(3)'], {}), '(list_marker_2, 3)\n', (7911, 7929), False, 'import itertools\n'), ((8199, 8239), 'itertools.combinations', 'itertools.combinations', (['list_marker_2', '(4)'], {}), '(list_marker_2, 4)\n', (8221, 8239), False, 'import itertools\n'), ((8601, 8668), 'numpy.array', 'np.array', (['[[mark.cx, mark.cy] for mark in combin]'], {'dtype': 'np.float32'}), '([[mark.cx, mark.cy] for mark in combin], dtype=np.float32)\n', (8609, 8668), True, 'import numpy as np\n'), ((8695, 8746), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['points_grid', 'final_pts'], {}), '(points_grid, final_pts)\n', (8722, 8746), False, 'import cv2\n'), ((9160, 9228), 'cv2.findContours', 'cv2.findContours', (['img_thresh', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (9176, 9228), False, 'import cv2\n'), ((9315, 9342), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (9337, 9342), False, 'import cv2\n'), ((8786, 8815), 'numpy.linalg.det', 'np.linalg.det', (['transfo_matrix'], {}), '(transfo_matrix)\n', (8799, 8815), True, 'import numpy as np\n'), ((8854, 8887), 'numpy.array', 'np.array', (['list_det_transfo_matrix'], {}), '(list_det_transfo_matrix)\n', (8862, 8887), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.