code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, <NAME>"
__license__ = "GPL"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
import numpy as np
from scipy.interpolate import interp1d
from scipy.optimize import fsolve
from scipy.optimize import root
import matplotlib.pyplot as plt
from src.UtilsMod import build_interp_func
class AeroMod(object):
def __init__(self, Yacht, rho=1.225, mu=0.0000181):
"""
Initializes an Aero Model, given a set of sails
"""
# physical params
self.rho = rho
self.mu = mu
self.flat = 1.0
self.reef = 1.0
self.ftj = 1.0
self.rfm = 1.0
# set sails and measure what is need once
self.yacht = Yacht
self.sails = self.yacht.sails[:2]
# are we upwind?
self.up = self.sails[1].up
self._measure_sails()
self._measure_windage()
# coeffs interp function
self.fcdmult = build_interp_func("fcdmult")
self.kheff = build_interp_func("kheff")
def _measure_windage(self):
self.boa = self.yacht.boa
self.loa = self.yacht.loa
self.fbav = 0.625 * self.yacht.ff + 0.375 * self.yacht.fa
def _measure_sails(self):
self.fractionality = 1.0; b2=0.
for sail in self.sails:
sail.measure(self.rfm, self.ftj)
if sail.type == "main":
self.fractionality /= sail.P + sail.BAD
b1 = sail.P_r + sail.BAD
self.roach = sail.roach
tf = (0.16*(sail.CE-0.024)/sail.P+0.94)*sail.P+sail.BAD
if sail.type == "jib":
self.fractionality *= sail.IG_r
b2 = sail.I*sail.IG_r/sail.IG
self.overlap = sail.LPG_r / sail.J
self.HBI = sail.HBI
self.eff_span_corr = (
1.1
+ 0.08 * (self.roach - 0.2)
+ 0.5 * (0.68 + 0.31 * self.fractionality + 0.0075 * self.overlap - 1.1)
)
self.b = max(b1, b2)
# assumes no mizain mast
self.heff_height_max_spi = max(tf+self.HBI, 0)
# prototype top function in hydro mod
def update(self, vb, phi, tws, twa, flat, RED):
"""
Update the aero model for current iter
"""
self.vb = max(0, vb)
self.phi = max(0, phi)
self.tws = tws
self.twa = twa
# gradual flatening of the sails with tws increase, min is 0.62 from 17 knots
self.flat = np.where(tws<2.5, 1, np.where(tws < 8.5, 0.81 + 0.19 * np.cos((tws - 2.5) / 6 * np.pi), 0.62))
self.ftj = max(RED-1., 0.)
self.rfm = min(RED, 1.)
self._measure_sails()
self._update_windTriangle()
self._area()
self._compute_forces()
return self.Fx, self.Fy, self.Mx
def _compute_forces(self):
"""
Computes forces for equilibrium.
"""
# get new coeffs
self._get_coeffs()
# instead of writing many time
awa = self.awa / 180.0 * np.pi
# lift and drag
self.lift = 0.5 * self.rho * self.aws ** 2 * self.area * self.cl
self.drag = 0.5 * self.rho * self.aws ** 2 * self.area * self.cd + self._get_Rw(awa)
# project into yacht coordinate system
self.Fx = self.lift * np.sin(awa) - self.drag * np.cos(awa)
self.Fy = self.lift * np.cos(awa) + self.drag * np.sin(awa)
# heeling moment
self.Mx = self.Fy * self._vce() * np.cos(self.phi / 180.0 * np.pi)
# side-force is horizontal component of Fh
self.Fy *= np.cos(np.deg2rad(self.phi))
def _get_Rw(self, awa):
Rw = 0.5 * self.rho * self.aws ** 2 * self._get_Aref(awa) * 0.816
return Rw * np.cos(awa / 180.0 * np.pi)
def _get_Aref(self, awa):
# only hull part
d = 0.5 * (1 - np.cos(awa / 90.0 * np.pi))
return self.fbav * ((1 - d) * self.boa + d * self.loa)
def _get_coeffs(self):
"""
generate sail-set total lift and drag coefficient.
"""
# lift (Clmax) and parasitic drag (Cd0max)
self.cl = 0.0
self.cd = 0.0
kpp = 0.0
for sail in self.sails:
self.cl += sail.cl(self.awa) * sail.area * sail.bk
self.cd += sail.cd(self.awa) * sail.area * sail.bk
kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp
self.cl /= self.area
self.cd /= self.area
# viscous quadratic parasitic drag and induced drag
devisor_1 = self.area * self.cl ** 2
devisor_2 = np.pi * self._heff(self.awa) ** 2
self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)
# fraction of parasitic drag due to jib
self.fcdj = 0.0
for sail in self.sails:
if sail.type == "jib":
self.fcdj = (
sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)
)
# final lift and drag
self.cd = self.cd * (
self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)
) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)
self.cl = self.flat * self.cl
def _update_windTriangle(self):
"""
find AWS and AWA for a given TWS, TWA and VB
"""
_awa_ = lambda awa: self.vb * np.sin(awa / 180.0 * np.pi) - self.tws * np.sin(
(self.twa - awa) / 180.0 * np.pi
)
self.awa = fsolve(_awa_, self.twa)[0]
self.aws = np.sqrt(
(self.tws * np.sin(self.twa / 180.0 * np.pi)) ** 2
+ (self.tws * np.cos(self.twa / 180.0 * np.pi) + self.vb) ** 2
)
def _area(self):
"""
Fill sail area variable
"""
self.area = 0.0
for sail in self.sails:
self.area += sail.area
def _vce(self):
"""
Vectical centre of effort lift/drag weigted
"""
sum = 0.0
for sail in self.sails:
cl2 = sail.cl(self.awa)**2
cd2 = sail.cd(self.awa)**2
sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)
self._area()
deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG
Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH
return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))
def phi_up(self):
"""
heel angle correction for AWA and AWS (5.51), this is in Radians!
"""
return 0.5 * (self.phi + 10 * (self.phi / 30.0) ** 2) / 180.0 * np.pi
def _heff(self, awa):
awa = max(0, min(awa, 90))
if self.up:
cheff = self.eff_span_corr * self.kheff(awa)
else:
cheff = 1.0 / self.b * self.reef * self.heff_height_max_spi
return (self.b + self.HBI) * cheff
#
# -- utility functions
#
def debbug(self):
for sail in self.yacht.sails:
sail.debbug_coeffs()
flat = np.linspace(0, 1, 64)
awa = np.linspace(0, 90, 64)
res1 = np.empty_like(flat)
res2 = np.empty_like(awa)
for i in range(64):
res1[i] = self.fcdmult(flat[i])
res2[i] = self.kheff(awa[i])
plt.plot(flat, res1)
plt.show()
plt.plot(awa, res2)
plt.show()
def print_state(self):
self.update(self.vb, self.phi, self.tws, self.twa, self.twa)
print("AeroMod state:")
print(" TWA is : %.2f (deg)" % self.twa)
print(" TWS is : %.2f (m/s)" % self.tws)
print(" AWA is : %.2f (deg)" % self.awa)
print(" AWS is : %.2f (m/s)" % self.aws)
print(" Vb is : %.2f (m/s)" % self.vb)
print(" Heel is : %.2f (deg)" % self.phi)
print(" Drive is: %.2f (N)" % self.Fx)
print(" SSF is : %.2f (N)" % self.Fy)
print(" HM is : %.2f (Nm)" % self.Mx)
print(" Cl is : %.2f (-)" % self.cl)
print(" Cd is : %.2f (-)" % self.cd)
print(" Flat is : %.2f (-)" % self.flat)
print(" Sail area:")
for sail in self.sails:
print(" - " + sail.type + " : %.2f (m^2)" % sail.area)
# if __name__ == "__main__":
# aero = AeroMod(sails=[Main(24.5, 5.5),
# Jib(17.3, 4.4)])
# aero.debbug()
# aero.print_state()
| [
"scipy.optimize.fsolve",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.deg2rad",
"numpy.empty_like",
"numpy.cos",
"src.UtilsMod.build_interp_func",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((999, 1027), 'src.UtilsMod.build_interp_func', 'build_interp_func', (['"""fcdmult"""'], {}), "('fcdmult')\n", (1016, 1027), False, 'from src.UtilsMod import build_interp_func\n'), ((1049, 1075), 'src.UtilsMod.build_interp_func', 'build_interp_func', (['"""kheff"""'], {}), "('kheff')\n", (1066, 1075), False, 'from src.UtilsMod import build_interp_func\n'), ((7120, 7141), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(64)'], {}), '(0, 1, 64)\n', (7131, 7141), True, 'import numpy as np\n'), ((7156, 7178), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', '(64)'], {}), '(0, 90, 64)\n', (7167, 7178), True, 'import numpy as np\n'), ((7194, 7213), 'numpy.empty_like', 'np.empty_like', (['flat'], {}), '(flat)\n', (7207, 7213), True, 'import numpy as np\n'), ((7229, 7247), 'numpy.empty_like', 'np.empty_like', (['awa'], {}), '(awa)\n', (7242, 7247), True, 'import numpy as np\n'), ((7369, 7389), 'matplotlib.pyplot.plot', 'plt.plot', (['flat', 'res1'], {}), '(flat, res1)\n', (7377, 7389), True, 'import matplotlib.pyplot as plt\n'), ((7398, 7408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7406, 7408), True, 'import matplotlib.pyplot as plt\n'), ((7417, 7436), 'matplotlib.pyplot.plot', 'plt.plot', (['awa', 'res2'], {}), '(awa, res2)\n', (7425, 7436), True, 'import matplotlib.pyplot as plt\n'), ((7445, 7455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7453, 7455), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3558), 'numpy.cos', 'np.cos', (['(self.phi / 180.0 * np.pi)'], {}), '(self.phi / 180.0 * np.pi)\n', (3532, 3558), True, 'import numpy as np\n'), ((3637, 3657), 'numpy.deg2rad', 'np.deg2rad', (['self.phi'], {}), '(self.phi)\n', (3647, 3657), True, 'import numpy as np\n'), ((3783, 3810), 'numpy.cos', 'np.cos', (['(awa / 180.0 * np.pi)'], {}), '(awa / 180.0 * np.pi)\n', (3789, 3810), True, 'import numpy as np\n'), ((5569, 5592), 'scipy.optimize.fsolve', 'fsolve', (['_awa_', 'self.twa'], {}), '(_awa_, self.twa)\n', (5575, 5592), False, 'from scipy.optimize import fsolve\n'), ((3352, 3363), 'numpy.sin', 'np.sin', (['awa'], {}), '(awa)\n', (3358, 3363), True, 'import numpy as np\n'), ((3378, 3389), 'numpy.cos', 'np.cos', (['awa'], {}), '(awa)\n', (3384, 3389), True, 'import numpy as np\n'), ((3420, 3431), 'numpy.cos', 'np.cos', (['awa'], {}), '(awa)\n', (3426, 3431), True, 'import numpy as np\n'), ((3446, 3457), 'numpy.sin', 'np.sin', (['awa'], {}), '(awa)\n', (3452, 3457), True, 'import numpy as np\n'), ((3891, 3917), 'numpy.cos', 'np.cos', (['(awa / 90.0 * np.pi)'], {}), '(awa / 90.0 * np.pi)\n', (3897, 3917), True, 'import numpy as np\n'), ((6220, 6238), 'numpy.sqrt', 'np.sqrt', (['(cl2 + cd2)'], {}), '(cl2 + cd2)\n', (6227, 6238), True, 'import numpy as np\n'), ((5446, 5473), 'numpy.sin', 'np.sin', (['(awa / 180.0 * np.pi)'], {}), '(awa / 180.0 * np.pi)\n', (5452, 5473), True, 'import numpy as np\n'), ((5487, 5527), 'numpy.sin', 'np.sin', (['((self.twa - awa) / 180.0 * np.pi)'], {}), '((self.twa - awa) / 180.0 * np.pi)\n', (5493, 5527), True, 'import numpy as np\n'), ((6373, 6409), 'numpy.sqrt', 'np.sqrt', (['(self.cl ** 2 + self.cd ** 2)'], {}), '(self.cl ** 2 + self.cd ** 2)\n', (6380, 6409), True, 'import numpy as np\n'), ((2587, 2618), 'numpy.cos', 'np.cos', (['((tws - 2.5) / 6 * np.pi)'], {}), '((tws - 2.5) / 6 * np.pi)\n', (2593, 2618), True, 'import numpy as np\n'), ((5648, 5680), 'numpy.sin', 'np.sin', (['(self.twa / 180.0 * np.pi)'], {}), '(self.twa / 180.0 * np.pi)\n', (5654, 5680), True, 'import numpy as np\n'), ((5713, 5745), 'numpy.cos', 'np.cos', (['(self.twa / 180.0 * np.pi)'], {}), '(self.twa / 180.0 * np.pi)\n', (5719, 5745), True, 'import numpy as np\n')] |
"""P2S10 TD3 v5 with 60x60 front and orientation from ac3.ipynb
Automatically generated by Colaboratory.
# Twin-Delayed DDPG
On a custom car env
state:
1. 40x40 cutout: 25 embeddings || car is at mid ( grid embeddings)
2. 25 cnn embeddings `+` [distance, orientation, -orientation, self.angle, -self.angle]
NOTE: Embeddings are actually 25 rectangles
Action space: angle and speed with range[-20,20]
NOTE: predicted action speed is interpolated b/w [3,6]
TODO: add Dabba delivery system in place ?? DONE
"""
# import libraries
import pygame
import gym_dabbewala
from gym import wrappers
import gym
from PIL import Image as PILImage
import math
from collections import deque
from torch.autograd import Variable
import torchvision.transforms as T
import torch.nn.functional as F
import torch.nn as nn
import torch
import matplotlib.pyplot as plt
import numpy as np
import random
import time
import os
import sys
import ai
"""## We make a function that evaluates the policy by calculating its average reward over 10 episodes"""
def evaluate_policy(policy, eval_episodes=10):
avg_reward = 0.
for _ in range(eval_episodes):
obs = env.reset()
# print(f'pickup{env.x1, env.y1}; drop{env.x2,env.y2}')
done = False
while not done:
action = policy.select_action(obs['surround'], obs['orientation'])
obs, reward, done, _ = env.step(action)
env.render()
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print("Average Reward over the Evaluation Step: %f" % (avg_reward))
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
"""## We set the parameters"""
env_name = "DabbeWala-v0"
# seed = 0 # Random seed number
start_timesteps = 2e4 # Number of iterations/timesteps before which the model randomly chooses an action, and after which it starts to use the policy network
eval_freq = 5e3 # How often the evaluation step is performed (after how many timesteps)
max_timesteps = 5e5 # Total number of iterations/timesteps
save_models = True # Boolean checker whether or not to save the pre-trained model
expl_noise = 0.15 # Exploration noise - STD value of exploration Gaussian noise
batch_size = 100 # Size of the batch
discount = 0.99 # Discount factor gamma, used in the calculation of the total discounted reward
tau = 0.005 # Target network update rate
policy_noise = 0.25 # STD of Gaussian noise added to the actions for the exploration purposes
noise_clip = 0.5# Maximum value of the Gaussian noise added to the actions (policy)
policy_freq = 2# Number of iterations to wait before the policy network (Actor model) is updated
"""## We create a file name for the two saved models: the Actor and Critic models"""
file_name = "%s_%s" % ("TD3", env_name)
"""## We create a folder inside which will be saved the trained models"""
if save_models and not os.path.exists("./pytorch_models"):
os.makedirs("./pytorch_models")
"""## We create the PyBullet environment"""
env = gym.make(env_name)
# torch.manual_seed(seed)
# np.random.seed(seed)
state_dim = env.observation_space["surround"].shape[2]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
min_action = float(env.action_space.low[0])
""" ## We create the policy network (the Actor model)"""
policy = ai.TD3(state_dim, action_dim, max_action)
"""## We create the Experience Replay memory"""
replay_buffer = ai.ReplayBuffer()
"""## We define a list where all the evaluation results over 10 episodes are stored"""
evaluations = [evaluate_policy(policy)]
max_episode_steps = env._max_episode_steps
"""## We initialize the variables"""
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
t0 = time.time()
"""## Training"""
max_timesteps = 500000
# We start the main loop over 500,000 timesteps
while total_timesteps < max_timesteps:
# If the episode is done
if done:
# If we are not at the very beginning, we start the training process of the model
if total_timesteps != 0:
# if total_timesteps%100 == 0:
print("Timesteps: {} Ep_Number: {} Reward: {:.2f} Cocaine: {} Mild_Cocaine {} Sadness: {} Death: {}".format(total_timesteps, episode_num, episode_reward, cocaine, mild_cocaine, sadness, death))
policy.train(replay_buffer, episode_timesteps, batch_size, discount, tau, policy_noise, noise_clip, policy_freq)
# We evaluate the episode and we save the policy
if timesteps_since_eval >= eval_freq:
timesteps_since_eval %= eval_freq
evaluations.append(evaluate_policy(policy))
policy.save(file_name, directory=model_path)
np.save("./results/%s" % (file_name), evaluations)
# When the training step is done, we reset the state of the environment
obs = env.reset()
# Set the Done to False
done = False
# Set rewards and episode timesteps to zero
episode_reward = 0
episode_timesteps = 0
episode_num += 1
#pos and neg reward counter
cocaine = 0
sadness = 0
mild_cocaine = 0
death = 0
# Before 10000 timesteps, we play random actions
if total_timesteps < start_timesteps:
action = env.action_space.sample()
else: # After 10000 timesteps, we switch to the model
# action = policy.select_action(np.array(obs))
action = policy.select_action(obs['surround'], obs['orientation'])
# If the explore_noise parameter is not 0, we add noise to the action and we clip it
if expl_noise != 0:
action = (action + np.random.normal(0, expl_noise, size=env.action_space.shape[0])).clip(env.action_space.low, env.action_space.high)
# The agent performs the action in the environment, then reaches the next state and receives the reward
new_obs, reward, done, _ = env.step(action)
# We check if the episode is done
done_bool = 0 if episode_timesteps + 1 == env._max_episode_steps else float(done)
# We increase the total reward
episode_reward += reward
# see pos and neg reward counts
if reward > 0.00:
cocaine += 1
elif reward == -0.01:
mild_cocaine += 1
elif reward < -0.6:
death += 1
else:
sadness += 1
# We store the new transition into the Experience Replay memory (ReplayBuffer)
replay_buffer.add((obs['surround'], obs['orientation'], new_obs['surround'], new_obs['orientation'], action, reward, done_bool))
# We update the state, the episode timestep, the total timesteps, and the timesteps since the evaluation of the policy
obs = new_obs
episode_timesteps += 1
total_timesteps += 1
timesteps_since_eval += 1
# We add the last policy evaluation to our list of evaluations and we save our model
evaluations.append(evaluate_policy(policy))
if save_models:
policy.save("%s" % (file_name), directory="./pytorch_models")
env.close()
| [
"numpy.random.normal",
"os.path.exists",
"os.makedirs",
"ai.ReplayBuffer",
"ai.TD3",
"torch.cuda.is_available",
"time.time",
"gym.make",
"numpy.save"
] | [((3245, 3263), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (3253, 3263), False, 'import gym\n'), ((3598, 3639), 'ai.TD3', 'ai.TD3', (['state_dim', 'action_dim', 'max_action'], {}), '(state_dim, action_dim, max_action)\n', (3604, 3639), False, 'import ai\n'), ((3713, 3730), 'ai.ReplayBuffer', 'ai.ReplayBuffer', ([], {}), '()\n', (3728, 3730), False, 'import ai\n'), ((4056, 4067), 'time.time', 'time.time', ([], {}), '()\n', (4065, 4067), False, 'import time\n'), ((3154, 3185), 'os.makedirs', 'os.makedirs', (['"""./pytorch_models"""'], {}), "('./pytorch_models')\n", (3165, 3185), False, 'import os\n'), ((1763, 1788), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1786, 1788), False, 'import torch\n'), ((3110, 3144), 'os.path.exists', 'os.path.exists', (['"""./pytorch_models"""'], {}), "('./pytorch_models')\n", (3124, 3144), False, 'import os\n'), ((5085, 5133), 'numpy.save', 'np.save', (["('./results/%s' % file_name)", 'evaluations'], {}), "('./results/%s' % file_name, evaluations)\n", (5092, 5133), True, 'import numpy as np\n'), ((6136, 6199), 'numpy.random.normal', 'np.random.normal', (['(0)', 'expl_noise'], {'size': 'env.action_space.shape[0]'}), '(0, expl_noise, size=env.action_space.shape[0])\n', (6152, 6199), True, 'import numpy as np\n')] |
from random import random
import numpy as np
import math
class MCM:
'''
输入函数函数,积分上下限,实验次数,即可计算蒙特卡洛积分
用__init__初始化
solve有两种方法,投点法和平均值法
'''
def __init__(self, f, xlim, ylim=(0,1), times=10000):
'''
f是函数,按照正常python函数写,返回函数表达式
xlim和ylim是元组或者列表
times是实验次数,根据电脑性能选择合理的值
'''
self.f = f
self.xlim = xlim
self.ylim = ylim
self.times = times
def __choose_point(self, xlim, ylim):
x = random() * (xlim[1] - xlim[0]) + xlim[0]
y = random() * (ylim[1] - ylim[0]) + ylim[0]
return x, y
def solve_point(self):
a = 0
for i in range(self.times):
x, y = self.__choose_point(self.xlim, self.ylim)
if self.f(x) > y:
a += 1
return (a / self.times) * (self.xlim[1] - self.xlim[0]) * (self.ylim[1] - self.ylim[0])
def solve_average(self):
sum=0
a = np.random.rand(self.times)*(self.xlim[1] - self.xlim[0]) + self.xlim[0]
for i in range(self.times):
sum+=self.f(a[i])
return (self.xlim[1] - self.xlim[0]) * sum / self.times
def f(x):
return math.sin(x)
mcm = MCM(f, (0, 3.141592965355), (0, 2), 1000000)
print(mcm.solve_point())
print(mcm.solve_average()) | [
"random.random",
"math.sin",
"numpy.random.rand"
] | [((1214, 1225), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1222, 1225), False, 'import math\n'), ((506, 514), 'random.random', 'random', ([], {}), '()\n', (512, 514), False, 'from random import random\n'), ((560, 568), 'random.random', 'random', ([], {}), '()\n', (566, 568), False, 'from random import random\n'), ((978, 1004), 'numpy.random.rand', 'np.random.rand', (['self.times'], {}), '(self.times)\n', (992, 1004), True, 'import numpy as np\n')] |
import sys
sys.path.append('/home/xuchengjun/ZXin/smap')
import torch
from torch.utils.data import DataLoader
import os
import argparse
import numpy as np
import copy
import time
from IPython import embed
from dataset.p2p_dataset import P2PDataset
from model.refine_model.refinenet import RefineNet
# from lib.utils.model_serialization import load_state_dict
from exps.refinenet_root2.config import cfg
from path import Path
def main(opt):
load_epoch = opt.load_epoch
test_dataset = P2PDataset(dataset_path=cfg.DATA_DIR, root_idx=cfg.DATASET.ROOT_IDX)
test_loader = DataLoader(test_dataset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False)
model = RefineNet()
model = model.to(opt.device)
model_path = os.path.join('/media/xuchengjun/zx/human_pose/pth/main/11.20', "RefineNet_epoch_%03d.pth" % load_epoch)
model.load_state_dict(torch.load(model_path))
model.eval()
min_root_error = 1000
min_idx = 0
while True:
# model_path = os.path.join('/home/xuchengjun/ZXin/human_pose/pth/11.20', "RefineNet_epoch_%03d.pth" % load_epoch)
# if not os.path.exists(ckpt_file):
# print("No ckpt of epoch {}".format(load_epoch))
# print("Best real_error iter is {}, error is {}".format(min_idx, min_root_error))
# break
# load_state_dict(model, torch.load(ckpt_file))
# model.load_state_dict(torch.load(model_path))
# model.eval()
count = 0
root_error = 0
time_total = 0.0
for i, (inp, gt_t) in enumerate(test_loader):
inp = inp.to(opt.device)
gt_t = gt_t
with torch.no_grad():
start_time = time.time()
pred_t = model(inp)
# embed()
time_total += time.time() - start_time
pred_t = pred_t.cpu()
# loss = criterion(pred, gt)
for j in range(len(pred_t)):
gt = copy.deepcopy(gt_t[j].numpy())
gt.resize((15, 3))
pred = copy.deepcopy(pred_t[j].numpy())
pred.resize((15, 3))
count += 1
root_error += np.linalg.norm(np.abs(pred - gt), axis=1)
# embed()
# print(root_error)
print_root_error = root_error/count
mean_root_error = np.mean(print_root_error)
print("Root error of epoch {} is {}, mean is {}".format(load_epoch, print_root_error, mean_root_error))
if mean_root_error < min_root_error:
min_root_error = mean_root_error
min_idx = load_epoch
# load_epoch += cfg.SAVE_FREQ
print("Time per inference is {}".format(time_total / len(test_loader)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-load_epoch', type=int, default=300)
parser.add_argument('--device', type=str, default='cuda:1')
opt = parser.parse_args()
main(opt)
| [
"numpy.mean",
"numpy.abs",
"argparse.ArgumentParser",
"model.refine_model.refinenet.RefineNet",
"torch.load",
"time.time",
"os.path.join",
"dataset.p2p_dataset.P2PDataset",
"torch.utils.data.DataLoader",
"torch.no_grad",
"sys.path.append"
] | [((11, 56), 'sys.path.append', 'sys.path.append', (['"""/home/xuchengjun/ZXin/smap"""'], {}), "('/home/xuchengjun/ZXin/smap')\n", (26, 56), False, 'import sys\n'), ((493, 561), 'dataset.p2p_dataset.P2PDataset', 'P2PDataset', ([], {'dataset_path': 'cfg.DATA_DIR', 'root_idx': 'cfg.DATASET.ROOT_IDX'}), '(dataset_path=cfg.DATA_DIR, root_idx=cfg.DATASET.ROOT_IDX)\n', (503, 561), False, 'from dataset.p2p_dataset import P2PDataset\n'), ((580, 651), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'cfg.TEST.BATCH_SIZE', 'shuffle': '(False)'}), '(test_dataset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False)\n', (590, 651), False, 'from torch.utils.data import DataLoader\n'), ((664, 675), 'model.refine_model.refinenet.RefineNet', 'RefineNet', ([], {}), '()\n', (673, 675), False, 'from model.refine_model.refinenet import RefineNet\n'), ((726, 834), 'os.path.join', 'os.path.join', (['"""/media/xuchengjun/zx/human_pose/pth/main/11.20"""', "('RefineNet_epoch_%03d.pth' % load_epoch)"], {}), "('/media/xuchengjun/zx/human_pose/pth/main/11.20', \n 'RefineNet_epoch_%03d.pth' % load_epoch)\n", (738, 834), False, 'import os\n'), ((2802, 2827), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2825, 2827), False, 'import argparse\n'), ((856, 878), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (866, 878), False, 'import torch\n'), ((2381, 2406), 'numpy.mean', 'np.mean', (['print_root_error'], {}), '(print_root_error)\n', (2388, 2406), True, 'import numpy as np\n'), ((1638, 1653), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1651, 1653), False, 'import torch\n'), ((1684, 1695), 'time.time', 'time.time', ([], {}), '()\n', (1693, 1695), False, 'import time\n'), ((1788, 1799), 'time.time', 'time.time', ([], {}), '()\n', (1797, 1799), False, 'import time\n'), ((2217, 2234), 'numpy.abs', 'np.abs', (['(pred - gt)'], {}), '(pred - gt)\n', (2223, 2234), True, 'import numpy as np\n')] |
import numpy as np
# The worker class is a member of the trainer class, the trainer can have multiple workers
class Worker():
def __init__(self, settings, sess, number, trainerNumber, network, queues, coord):
self.localAC = network
self.name = 'worker{}'.format(number)
self.number = number
self.settings = settings
self.trainerQueue = queues["trainer"]
self.trainerNumber = trainerNumber
self.coord = coord
self.sess = sess
self.playerActionQueue = queues["playerAction"]
self.game = None
self.playerActionQueue.put({"WindowSettings": True if self.name == "worker0" else False,
"worker": number})
self.episodeInProgress = True
self.values = []
def work(self, gameData):
gameData # Get data from the game
if gameData[0] == "CurrentFrame": # Process the next action based on frame
frame = gameData[1]
feedDict = {self.localAC.frame: [frame]}
actionDist, value = self.sess.run([self.localAC.logits,
self.localAC.value],
feed_dict=feedDict)
action = np.random.choice(actionDist[0], p=actionDist[0])
action = np.argmax(actionDist==action)
self.playerActionQueue.put(action)
self.values.append(value[0,0])
elif gameData[0] == "Bootstrap": # Bootstrap from bootstrap data
print("{} is bootstrapping!".format(self.name))
episodeData = gameData[1]
bootstrapValues = self.values[0:len(episodeData)]
self.values = self.values[len(episodeData)::]
workerData = {"episodeData": episodeData,
"values": bootstrapValues,
"bootStrapValue": self.values[0],
"score": 0,
"worker": self.number,
"trainer": self.trainerNumber}
self.trainerQueue.put(workerData)
elif gameData[0] == "EpisodeData": # Episode is finished, perform training and logging
episodeData = gameData[1]
score = gameData[2]
workerData = {"episodeData": episodeData,
"values": self.values,
"bootStrapValue": 0,
"score": score,
"worker": self.number,
"trainer": self.trainerNumber}
self.trainerQueue.put(workerData)
self.values = []
elif gameData[0] == "Game closed!": # Game has been closed
print("{}s game closed, saving and quitting program!".format(self.name))
self.coord.request_stop()
else:
print("Invalid game data! got: {}".format(gameData[0])) | [
"numpy.random.choice",
"numpy.argmax"
] | [((1256, 1304), 'numpy.random.choice', 'np.random.choice', (['actionDist[0]'], {'p': 'actionDist[0]'}), '(actionDist[0], p=actionDist[0])\n', (1272, 1304), True, 'import numpy as np\n'), ((1326, 1357), 'numpy.argmax', 'np.argmax', (['(actionDist == action)'], {}), '(actionDist == action)\n', (1335, 1357), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 10:12:52 2019
@author: gregz
"""
import argparse as ap
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
import sys
import warnings
from scipy.interpolate import interp1d, griddata
from math_utils import biweight
from input_utils import setup_logging
from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans
from astropy.convolution import Gaussian2DKernel
from astropy.io import fits
from astropy.modeling.models import Polynomial2D, Gaussian2D
from astropy.modeling.fitting import LevMarLSQFitter, FittingWithOutlierRemoval
from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std
from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra
from sklearn.decomposition import PCA
from astropy.table import Table
def get_fiber_to_fiber(spec, wave):
aw = wave.ravel()
As = spec.ravel()
inds = np.argsort(aw)
for j in np.arange(3):
nw = np.array([np.mean(w) for w in np.array_split(aw[inds], 3500)])
ns = np.array([biweight(s) for s in np.array_split(As[inds], 3500)])
I = interp1d(nw, ns, kind='quadratic', bounds_error=False, fill_value='extrapolate')
ftf = spec * 0.
for i in np.arange(spec.shape[0]):
ftf[i] = spec[i] / I(wave[i])
As = (spec / ftf).ravel()
return ftf
def get_mastersky(spec, ftf, wave, sel=None):
if sel is None:
sel = np.ones((spec.shape[0],), dtype=bool)
aw = wave[sel].ravel()
As = ((spec/ ftf)[sel]).ravel()
inds = np.argsort(aw)
nw = np.array([np.mean(w) for w in np.array_split(aw[inds], 3500)])
ns = np.array([biweight(s) for s in np.array_split(As[inds], 3500)])
I = interp1d(nw, ns, kind='quadratic', bounds_error=False, fill_value='extrapolate')
sky = spec * 0.
for i in np.arange(spec.shape[0]):
sky[i] = I(wave[i])
return sky, I
def get_rolling_mastersky(spec, ftf, wave, sel=None, size=24):
if sel is None:
sel = np.ones((spec.shape[0],), dtype=bool)
fibnum = np.arange(spec.shape[0])
sky = spec * 0.
for i in np.arange(spec.shape[0]):
selk = (np.abs(fibnum - i) <= size/2.)
sel2 = sel * selk
print('Working on Fiber %i with %i fibers' % (i+1, sel2.sum()))
aw = wave[sel2].ravel()
As = ((spec[sel2]/ ftf[sel2])).ravel()
inds = np.argsort(aw)
nw = np.array([np.mean(w) for w in np.array_split(aw[inds], 3500)])
ns = np.array([biweight(s) for s in np.array_split(As[inds], 3500)])
good = np.isfinite(ns)
I = interp1d(nw[good], ns[good], kind='quadratic', bounds_error=False, fill_value='extrapolate')
sky[i] = I(wave[i])
return sky
def correct_amplifier_offsets(y, xp, yp, channel, order=1, kernel=12.):
xc = xp[np.nanargmax(y)]
yc = yp[np.nanargmax(y)]
d = np.sqrt((xp-xc)**2 + (yp-yc)**2)
k = y * 1.
k[y==0.] = np.nan
def split_fit(var, ind=140):
model = k * 0.
model[:ind] = convolve(k[:ind], Gaussian1DKernel(kernel), boundary='extend')
model[ind:] = convolve(k[ind:], Gaussian1DKernel(kernel), boundary='extend')
return model
for i in np.arange(5.):
model = split_fit(k)
mstd = mad_std(k - model, ignore_nan=True)
k[(k-model)>2.5*mstd] = np.nan
model = split_fit(k)
loc = 2.5
k[y==0.] = np.nan
k[d < loc+1.0] = np.nan
model = split_fit(k)
bad = np.isnan(k)
good = ~bad
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
stdfunc=mad_std)
D = fitter(Polynomial2D(1), xp[good], yp[good], (y/model)[good])
try:
smodel = D[0](xp, yp)
mask = D[1]
except:
smodel = D[1](xp, yp)
mask = D[0]
cor = model * smodel
bl, bml = biweight(k[:140]-cor[:140], calc_std=True)
bl, bmr = biweight(k[140:]-cor[140:], calc_std=True)
if (bml>0.03) or (bmr>0.03):
print("Cannot Make Correction")
z = np.ones(y.shape)
if channel == 'orange':
z[:140] = 1.025
z[140:] = 0.975
return z, k
return cor/biweight(cor), k
def get_pca_sky_residuals(data, ncomponents=5):
pca = PCA(n_components=ncomponents)
H = pca.fit_transform(data)
A = np.dot(H, pca.components_)
return pca, A
def get_residual_map(data, pca):
res = data * 0.
for i in np.arange(data.shape[1]):
sel = np.isfinite(data[:, i])
coeff = np.dot(data[sel, i], pca.components_.T[sel])
model = np.dot(coeff, pca.components_)
res[:, i] = model
return res
def get_arc_pca(arcskysub, good, mask, components=15):
X = arcskysub
X[:, ~mask] = 0.
X[~good] = 0.
X = X.swapaxes(0, 1)
pca, A = get_pca_sky_residuals(X, ncomponents=components)
return pca
def norm_spec_to_per_A(spec, wave):
dw = np.diff(wave, axis=1)
dw = np.hstack([dw[:, 0:1], dw])
return spec / dw
def rectify(skysub, wave, def_wave):
skysub_rect = np.zeros((skysub.shape[0], len(def_wave)))
for i in np.arange(skysub.shape[0]):
skysub_rect[i] = interp1d(wave[i], skysub[i], kind='linear', bounds_error=False,
fill_value=0.0)(def_wave)
return skysub_rect
def get_apcor(Xc, Yc, d, y):
x = np.linspace(0, 5.5, 13)
A = x * 0.
for i in np.arange(len(x)):
theta = np.random.rand(20000) * 2. * np.pi
r = np.random.rand(20000)*0.5 + x[i]
xr = np.cos(theta) * r
yr = np.sin(theta) * r
fr = 0.59 / np.sqrt(3.)
in_footprint = np.zeros(r.shape, dtype=bool)
sel = (d > (x[i] - fr)) * (d < (x[i]+0.5+fr))
for xC, yC in zip(Xc[sel], Yc[sel]):
in_footprint += np.sqrt((xr-xC)**2 + (yr-yC)**2) < fr
coverage = (in_footprint > 0).sum() / 20000.
A[i] = coverage
c = np.interp(d, x, A, right=0.0)
apcor = np.nansum(y[d<5.]) / np.nansum(y[d<5.]/c[d<5.])
return apcor
def find_centroid(pos, y, fibarea, fit_param=None):
d = np.sqrt(pos[:, 0]**2 + pos[:, 1]**2)
median, std = biweight(y, calc_std=True)
sel = d<3.
y = y - np.nanpercentile(y, 25)
ind = np.nanargmax(y[sel])
xc, yc = (pos[sel][ind, 0], pos[sel][ind, 1])
d = np.sqrt((pos[:, 0] - xc)**2 + (pos[:, 1] - yc)**2)
median, std = biweight(y[d>3.], calc_std=True)
a = y[sel][ind]
G = Gaussian2D(x_mean=xc, y_mean=yc, amplitude=a)
d = np.sqrt((pos[:, 0] - xc)**2 + (pos[:, 1] - yc)**2)
sel = (d <= 2.0) * np.isfinite(y)
fit = LevMarLSQFitter()(G, pos[sel, 0], pos[sel, 1], y[sel])
new_model= np.sqrt(fit(pos[:, 0], pos[:, 1])*y)
new_model[np.isnan(new_model)] = 0.0
fitquality = False
if np.nanmax(new_model) > 5 * std:
fitquality = True
grid_x, grid_y = np.meshgrid(np.linspace(xc-5., xc+5., 101),
np.linspace(yc-5., yc+5., 101))
norm = np.sum(fit(grid_x.ravel(), grid_y.ravel())) * 0.1**2
Xc = pos[:, 0] - xc
Yc = pos[:, 1] - yc
apcor = get_apcor(Xc, Yc, d, y)
return fit.x_mean.value, fit.y_mean.value, fitquality, fit, new_model / norm * fibarea, apcor
def fix_centroid(pos, y, fibarea, fit_param=None):
median, std = biweight(y, calc_std=True)
y = y - median
xc, yc, xs, ys, th = fit_param
G = Gaussian2D(x_mean=xc, y_mean=yc, x_stddev=xs, y_stddev=ys,
theta=th, amplitude=1.)
G.x_mean.fixed = True
G.y_mean.fixed = True
G.x_stddev.fixed = True
G.y_stddev.fixed = True
G.theta.fixed = True
d = np.sqrt((pos[:, 0] - xc)**2 + (pos[:, 1] - yc)**2)
Xc = pos[:, 0] - xc
Yc = pos[:, 1] - yc
sel = (d < 2.0) * np.isfinite(y)
M = G(pos[sel, 0], pos[sel, 1])
norm = biweight(y[sel] / M)
G.amplitude.value = norm
fit = G
new_model= np.sqrt(fit(pos[:, 0], pos[:, 1])*y)
new_model[np.isnan(new_model)] = 0.0
fitquality = False
if np.nanmax(new_model) > 5 * std:
fitquality = True
grid_x, grid_y = np.meshgrid(np.linspace(xc-5., xc+5., 101),
np.linspace(yc-5., yc+5., 101))
norm = np.sum(fit(grid_x.ravel(), grid_y.ravel())) * 0.1**2
apcor = get_apcor(Xc, Yc, d, y)
return fit.x_mean.value, fit.y_mean.value, fitquality, fit, new_model / norm * fibarea, apcor
def get_standard(objname, commonwave):
filename = op.join('/Users/gregz/cure/virus_early/virus_config/'
'standards',
'm' + objname.lower() + '.dat.txt')
try:
wave, standardmag = np.loadtxt(filename, usecols=(0, 1),
unpack=True)
fnu = 10**(0.4 * (-48.6 - standardmag))
standard_flam = fnu * 2.99792e18 / wave**2
standard_wave = wave
flam = np.interp(commonwave, standard_wave, standard_flam)
return flam
except:
return commonwave * 0.
def get_script_path():
return op.dirname(op.realpath(sys.argv[0]))
def get_wave_cor(spec, ftf, wave, mastersky, masterwave):
Y = spec / ftf
mask, cont = identify_sky_pixels(mastersky)
std = mad_std((mastersky-cont)[~mask])
loc, values = find_peaks((mastersky-cont), thresh=100*std)
waves = np.interp(loc, np.arange(len(masterwave)), masterwave)
Waves = np.zeros((280, len(waves))) * np.nan
Norms = np.zeros((280, len(waves))) * np.nan
for i in np.arange(Y.shape[0]):
if np.isfinite(Y[i]).sum() > 200:
mask, cont = identify_sky_pixels(Y[i])
std = mad_std((Y[i]-cont)[~mask])
loc, values = find_peaks((Y[i]-cont), thresh=25*std)
wav = np.interp(loc, np.arange(Y.shape[1]), wave[i])
ng = np.isfinite(Y[i]-cont)
I = interp1d(wave[i][ng], (Y[i]-cont)[ng], kind='quadratic',
bounds_error=False, fill_value=0.0)
for j in np.arange(len(waves)):
if len(wav):
if np.min(np.abs(wav - waves[j])) < 1.5:
Waves[i, j] = wav[np.argmin(np.abs(wav - waves[j]))]
Norms[i, j] = I(Waves[i, j])
return Waves, Norms
def extract_columns(model, chunk, mask=None):
if model.ndim == 1:
model = model[: , np.newaxis]
if mask is None:
mask = np.isfinite(chunk)
num1 = np.nansum(chunk * model**2 * mask, axis=0)
num2 = np.nansum(model**3 * mask, axis=0)
norm = num1 / num2
return norm
def get_skyline_mask(sky_rect, mlen=3):
quick_sky = biweight(sky_rect, axis=0)
mask, cont = identify_sky_pixels(quick_sky)
std_sky = mad_std((quick_sky-cont)[~mask])
loc, values = find_peaks((quick_sky-cont), thresh=15*std_sky)
loc = np.array(np.round(loc), dtype=int)
loc = loc[(loc>10) * (loc<(len(quick_sky)-10))]
Marray = sky_rect * 0.
for i in np.arange(-mlen, mlen+1):
Marray[:, loc+i] = np.nan
return Marray
def get_extraction_model(skysub_rect, sky_rect, def_wave, nchunks=15,
func=find_centroid, fit_params=None):
XC, YC, Nmod, w, XS, YS, TH = ([], [], [], [], [], [], [])
for chunk, schunk, wi in zip(np.array_split(skysub_rect, nchunks, axis=1),
np.array_split(sky_rect, nchunks, axis=1),
np.array_split(def_wave, nchunks)):
mod = biweight(chunk, axis=1)
xc, yc, q, fit, nmod, apcor = func(pos, mod, fibarea, fit_param=fit_params)
if not too_bright:
model = nmod
model = model / np.nansum(model) * apcor
else:
model = mod
model = model / np.nansum(model) * apcor
if q:
Nmod.append(model)
w.append(np.mean(wi))
XC.append(xc)
YC.append(yc)
XS.append(fit.x_stddev.value)
YS.append(fit.y_stddev.value)
TH.append(fit.theta.value)
return [np.array(xi) for xi in [w, XC, YC, XS, YS, TH]], Nmod, skysub_rect, sky_rect
def get_maxsn_y(skysub, sky, wave, def_wave, pos):
sky_rect = rectify(sky, wave, def_wave)
skysub_rect = rectify(skysub, wave, def_wave)
skyline_mask = get_skyline_mask(sky_rect)
skysub_rect[np.isnan(skyline_mask)] = np.nan
G1 = Gaussian1DKernel(1.5)
smooth = skysub_rect * 0.
for i in np.arange(skysub_rect.shape[0]):
smooth[i] = convolve(skysub_rect[i], G1, preserve_nan=True)
x = pos[:, 0]
y = pos[:, 1]
D = np.sqrt((x - x[:, np.newaxis])**2 + (y - y[:, np.newaxis])**2)
for i in np.arange(D.shape[0]):
D[i, :] = np.array(D[i, :] < 1.5, dtype=float)
T = smooth * 0.
for i in np.arange(smooth.shape[1]):
T[:, i] = np.nansum(smooth[:, i] * D, axis=1)
loc1, loc2 = np.unravel_index(np.nanargmax(T), T.shape)
return T[:, loc2], def_wave[loc2]
def get_source(y, std, spec, pos, fibarea, newftf, wave, sky, check=False):
# =============================================================================
# Bright Limit
# =============================================================================
too_bright = np.nanmax((y-1.)/std) > 100000.
# =============================================================================
# Get fit to collapsed spectra
# =============================================================================
xc, yc, quality_flag, fit, mod, apcor = find_centroid(pos, y, fibarea)
d = np.sqrt((xp - xc)**2 + (yp -yc)**2)
sel = d > (np.max(d) - 2.5)
dum, std = biweight(y[sel], calc_std=True)
SN = np.nanmax((y-1.)/std)
if check:
return SN
args.log.info('Maximum S/N fiber: %0.2f' % SN)
if too_bright:
sky, I = get_mastersky(spec, newftf, wave, sel=sel)
return xc, yc, quality_flag, fit, mod, apcor, sky, sel, too_bright
def get_skysub(S, sky, err, d, masksky, channel):
Sky = biweight(S[d > 5.], axis=0)
sci = biweight(S[d < 1.5], axis=0)
masksci = sci > Sky*1.2
masksky = masksky * (~masksci)
skysub = S - Sky
totsky = 0. * skysub
totsky[:] = Sky
intermediate = skysub * 1.
intermediate[:, masksky] = np.nan
G1 = Gaussian1DKernel(5.5)
for k in np.arange(S.shape[0]):
intermediate[k] = interpolate_replace_nans(intermediate[k], G1)
for k in np.arange(S.shape[1]):
intermediate[:, k] = interpolate_replace_nans(intermediate[:, k], G1)
if channel == 'farred':
mask = masksky * True
mask[1500:] = False
mask[:50] = False
pca = PCA(n_components=55)
else:
mask = masksky * True
mask[1900:] = False
mask[:50] = False
pca = PCA(n_components=35)
y = (skysub - intermediate)[:, mask]
y[np.isnan(y)] = 0.0
if mask.sum() > 60:
pca.fit_transform(y.swapaxes(0, 1))
res = get_residual_map(skysub - intermediate, pca)
res[:, ~masksky] = 0.0
skysub[:] -= res
totsky[:] += res
skyfibers = d > 2.
dummy = skysub * 1.
ll = np.nanpercentile(dummy, 5, axis=0)
hh = np.nanpercentile(dummy, 95, axis=0)
bl, norm = biweight(skysub[:, 200:-200] / err[:, 200:-200], calc_std=True)
err *= norm
dummy[dummy > 3.*err] = np.nan
dummy[(dummy<ll) + (dummy>hh)] = np.nan
dummy[~skyfibers, :] = np.nan
dummy[:, masksky] = np.nan
dummy1 = dummy * 1.
G = Gaussian2DKernel(7.)
dummy = convolve(dummy, G, boundary='extend')
while np.isnan(dummy).sum():
dummy = interpolate_replace_nans(dummy, G)
skysub[:] -= dummy
totsky[:] += dummy
totsky[:, ~masksky] = 0.0
return skysub, totsky, dummy1
warnings.filterwarnings("ignore")
DIRNAME = get_script_path()
parser = ap.ArgumentParser(add_help=True)
parser.add_argument("-d", "--directory",
help='''base directory for reductions''',
type=str, default="")
parser.add_argument("-c", "--caldirectory",
help='''cal directory for reductions''',
type=str, default="/work/03946/hetdex/maverick/LRS2/CALS")
parser.add_argument("multiname",
help='''e.g., multi_20170126_0000011_exp02_orange.fits''',
type=str)
parser.add_argument("-dw", "--delta_wavelength",
help='''Delta Wavelength in linear units for output''',
default=None, type=float)
parser.add_argument("--fit_params", default=None,
help='''e.g., "0.0, 0.0, 1.0, 1.0"''',
type=str)
args = None
#args = ['dummy', 'multi_20181116_0000010_exp01_red.fits',
# '-c', '/Users/gregz/cure/panacea', '-d', '/Users/gregz/cure/panacea']
args = parser.parse_args(args=args)
args.log = setup_logging('lrs2_experiment')
channel_dict = {'uv': 'BL', 'orange': 'BR', 'red': 'RL', 'farred': 'RR'}
# =============================================================================
# Load Data (images and spectra)
# =============================================================================
date = args.multiname.split('_')[1]
channel = args.multiname.split('_')[-1][:-5]
calfile = op.join(args.caldirectory, 'cal_%s_%s.fits' % (date, channel))
m = fits.open(op.join(args.directory, args.multiname))
c = fits.open(calfile)
pos = m['fiber_positions'].data
xp, yp = (pos[:, 0], pos[:, 1])
def_wave = m['extracted_spectrum'].data[0]
trace = c['trace'].data
wave = c['wavelength'].data
fltimage = c['masterFlat'].data
arcimage = c['masterarc'].data
image = m['image'].data
cosmics = m['cosmics'].data
fltspec = get_spectra(fltimage, trace)
arcspec = get_spectra(arcimage, trace)
spec, chi2, errspec = get_spectra(image, trace, array_mod=fltimage)
fltspec, arcspec, spec, errspec = [norm_spec_to_per_A(X, wave)
for X in [fltspec, arcspec, spec, errspec]]
fibarea = 1. / 2. * np.sqrt(3.) * 0.59**2
# =============================================================================
# Masking high chi2 values (cosmics and defects that don't flatfield)
# =============================================================================
mask = chi2 > 5
N = mask.shape[0] * mask.shape[1] * 1.
args.log.info('%s: %0.3f masked for chi2' % (args.multiname, (mask.sum() / N)))
spec[mask] = np.nan
if channel == 'uv':
spec[:, 208:211] = np.nan
# =============================================================================
# Getting Fiber to Fiber
# =============================================================================
ftf = get_fiber_to_fiber(fltspec, wave)
goodfibers = biweight(ftf, axis=1) > 0.5
# =============================================================================
# Reducing Arc
# =============================================================================
arcsky, J = get_mastersky(arcspec, ftf, wave)
yarc = biweight(arcspec / ftf / arcsky, axis=1)
arccor, karc = correct_amplifier_offsets(yarc, xp, yp, channel)
arcftf = ftf * arccor[:, np.newaxis]
arcsky, J = get_mastersky(arcspec, arcftf, wave)
arcskysub = arcspec / arcftf - arcsky
arcskysub_rect = rectify(arcskysub, wave, def_wave)
mask, cont = identify_sky_pixels(J(def_wave))
pca = get_arc_pca(arcskysub_rect, goodfibers, mask, components=95)
# =============================================================================
# Reducing Science
# =============================================================================
sky, I = get_mastersky(spec, ftf, wave)
y = biweight((spec / ftf / sky)[:, 400:600], axis=1)
cor, keep = correct_amplifier_offsets(y, xp, yp, channel)
newftf = ftf * cor[:, np.newaxis]
good = biweight(newftf, axis=1) > 0.5
spec[~good] = np.nan
sel = np.isfinite(keep)
sky, I = get_mastersky(spec, newftf, wave, sel=sel)
iskysub = spec / newftf - sky
ynew, wnew = get_maxsn_y(iskysub, sky, wave, def_wave, pos)
ynew += 1.
yold = biweight(spec / newftf / sky, axis=1)
stdnew = mad_std(ynew[sel])
stdold = mad_std(yold[sel])
wold = def_wave[int(len(def_wave)/2.)]
sel = (np.abs(y - 1.) < 3. * stdold) * good
SN = []
cnt = 0
for y, std in zip([ynew, yold], [stdnew, stdold]):
sn = get_source(y, std, spec, pos, fibarea, newftf, wave, sky, check=True)
if cnt == 0:
args.log.info('Emission SN: %0.2f' % sn)
if cnt == 1:
args.log.info('Continuum SN: %0.2f' % sn)
SN.append(sn)
cnt += 1
loc = np.argmax(SN)
y = [ynew, yold][loc]
std = [stdnew, stdold][loc]
w = [wnew, wold][loc]
xc, yc, quality_flag, fit, mod, apcor, sky, sel, too_bright = get_source(y, std, spec, pos, fibarea, newftf, wave, sky)
# =============================================================================
# Get Wavelength Adjustment
# =============================================================================
if not too_bright:
Waves, Norms = get_wave_cor(spec, newftf, wave, I(def_wave), def_wave)
shift = biweight(Waves - biweight(Waves, axis=0), axis=1)
shift[np.isnan(shift)] = 0.0
args.log.info('Wavelength Shift: %0.2f' % biweight(shift))
wave = wave - shift[:, np.newaxis]
adj = biweight(Norms / biweight(Norms, axis=0), axis=1)
#newftf = newftf * adj[:, np.newaxis]
sky, I = get_mastersky(spec, newftf, wave, sel=sel)
# =============================================================================
# Get PCA residual sky
# =============================================================================
skysub = spec / newftf - sky
skysub_rect = rectify(skysub, wave, def_wave)
spec_rect = rectify(spec / newftf, wave, def_wave)
error_rect = rectify(errspec / newftf, wave, def_wave)
sky_rect = rectify(sky, wave, def_wave)
skysub_rect_orig = skysub_rect * 1.
sky_rect_orig = sky_rect * 1.
# =============================================================================
# Get Extraction Model
# =============================================================================
xs = fit.x_stddev.value
ys = fit.y_stddev.value
th = fit.theta.value
darfile = op.join(DIRNAME, 'lrs2_config/dar_%s.dat' % channel_dict[channel])
T = Table.read(darfile, format='ascii.fixed_width_two_line')
xdar = np.interp(w, T['wave'], T['x_0'])
ydar = np.interp(w, T['wave'], T['y_0'])
if args.fit_params is not None:
fit_p_list = [float(i.replace(' ', '')) for i in args.fit_params.split(',')]
xc, yc, xs, ys = fit_p_list
xoff = biweight(xc - xdar)
yoff = biweight(yc - ydar)
fit_params = [np.interp(def_wave, T['wave'], T['x_0']+xoff),
np.interp(def_wave, T['wave'], T['y_0']+yoff),
xs, ys, th]
N = int(len(def_wave) / 25)
inds = np.arange(int(N/2), len(def_wave), N)
apcor = inds * 0.
W = inds * 0.
for j, i in enumerate(inds):
W[j] = def_wave[i]
xc = fit_params[0][i]
yc = fit_params[1][i]
d = np.sqrt((pos[:, 0] - xc)**2 + (pos[:, 1] - yc)**2)
Xc = pos[:, 0] - xc
Yc = pos[:, 1] - yc
y = Gaussian2D(x_mean=xc, y_mean=yc, x_stddev=fit_params[2],
y_stddev=fit_params[3], theta=fit_params[4])(pos[:, 0], pos[:, 1])
apcor[j] = get_apcor(Xc, Yc, d, y)
try:
apcor = np.polyval(np.polyfit(W, apcor, 3), def_wave)
except:
args.log.warning('Aperture Correction failed due to modeling issue')
apcor = np.ones(def_wave.shape)
args.log.info('%s: %0.2f %0.2f %0.2f %0.2f' % (args.multiname, fit_params[0][1032], fit_params[1][1032], fit_params[2],
fit_params[3]))
weight = skysub * 0.
for i in np.arange(skysub.shape[1]):
xc = fit_params[0][i]
yc = fit_params[1][i]
y = Gaussian2D(x_mean=xc, y_mean=yc, x_stddev=fit_params[2],
y_stddev=fit_params[3], theta=fit_params[4])(pos[:, 0], pos[:, 1])
weight[:, i] = y / np.sum(y)
d = np.sqrt((pos[:, 0] - fit_params[0][int(len(def_wave)/2)])**2 +
(pos[:, 1] - fit_params[1][int(len(def_wave)/2)])**2)
skyline_mask = get_skyline_mask(sky_rect, mlen=5)
skysub_rect, totsky, dummy1 = get_skysub(spec_rect, sky, error_rect, d, np.isnan(skyline_mask.sum(axis=0)), channel)
spec_rect = extract_columns(weight, skysub_rect_orig)
# =============================================================================
# Get Extraction
# =============================================================================
mask = np.isfinite(skysub_rect)
total_cal = (m['extracted_spectrum'].data[-1] /
m[0].header['EXPTIME'] / m[0].header['MILLUM'] /
m[0].header['THROUGHP'])
spectrum = np.nansum(mask * weight * skysub_rect, axis=0) / np.nansum(mask * weight**2, axis=0)
error = np.sqrt(np.nansum(mask * weight * error_rect**2, axis=0) / np.nansum(mask * weight**2, axis=0))
calibrated = spectrum * total_cal / apcor
mask = np.isfinite(sky_rect)
spectrum_sky = np.nansum(mask * weight * sky_rect, axis=0) / np.nansum(mask * weight**2, axis=0)
calibrated_sky = spectrum_sky * total_cal
spectrum_sum = np.nansum(skysub_rect, axis=0)
calibrated_all = spectrum_sum * total_cal
calibrated_ext = spec_rect * total_cal
calibrated_err = error * total_cal /apcor
fits.PrimaryHDU([def_wave, calibrated, calibrated_sky, calibrated_all, calibrated_ext, spectrum_sum,
calibrated_err], header=m[0].header).writeto(
args.multiname.replace('multi', 'spectrum'), overwrite=True)
fits.PrimaryHDU(np.array(skysub_rect, dtype='float32'), header=m[0].header).writeto(args.multiname.replace('multi', 'skysub'),
overwrite=True)
fits.PrimaryHDU(np.array(mask, dtype='float32'), header=m[0].header).writeto(args.multiname.replace('multi', 'mask'),
overwrite=True)
fits.PrimaryHDU(np.array(weight, dtype='float32'), header=m[0].header).writeto(args.multiname.replace('multi', 'weight'),
overwrite=True) | [
"numpy.nanargmax",
"numpy.sqrt",
"numpy.nanpercentile",
"numpy.random.rand",
"numpy.hstack",
"numpy.polyfit",
"input_utils.setup_logging",
"math_utils.biweight",
"scipy.interpolate.interp1d",
"numpy.argsort",
"astropy.convolution.Gaussian1DKernel",
"numpy.array_split",
"numpy.array",
"nump... | [((15661, 15694), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (15684, 15694), False, 'import warnings\n'), ((15734, 15766), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'add_help': '(True)'}), '(add_help=True)\n', (15751, 15766), True, 'import argparse as ap\n'), ((16751, 16783), 'input_utils.setup_logging', 'setup_logging', (['"""lrs2_experiment"""'], {}), "('lrs2_experiment')\n", (16764, 16783), False, 'from input_utils import setup_logging\n'), ((17143, 17205), 'os.path.join', 'op.join', (['args.caldirectory', "('cal_%s_%s.fits' % (date, channel))"], {}), "(args.caldirectory, 'cal_%s_%s.fits' % (date, channel))\n", (17150, 17205), True, 'import os.path as op\n'), ((17265, 17283), 'astropy.io.fits.open', 'fits.open', (['calfile'], {}), '(calfile)\n', (17274, 17283), False, 'from astropy.io import fits\n'), ((17568, 17596), 'fiber_utils_remedy.get_spectra', 'get_spectra', (['fltimage', 'trace'], {}), '(fltimage, trace)\n', (17579, 17596), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((17607, 17635), 'fiber_utils_remedy.get_spectra', 'get_spectra', (['arcimage', 'trace'], {}), '(arcimage, trace)\n', (17618, 17635), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((17658, 17703), 'fiber_utils_remedy.get_spectra', 'get_spectra', (['image', 'trace'], {'array_mod': 'fltimage'}), '(image, trace, array_mod=fltimage)\n', (17669, 17703), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((18818, 18858), 'math_utils.biweight', 'biweight', (['(arcspec / ftf / arcsky)'], {'axis': '(1)'}), '(arcspec / ftf / arcsky, axis=1)\n', (18826, 18858), False, 'from math_utils import biweight\n'), ((19436, 19484), 'math_utils.biweight', 'biweight', (['(spec / ftf / sky)[:, 400:600]'], {'axis': '(1)'}), '((spec / ftf / sky)[:, 400:600], axis=1)\n', (19444, 19484), False, 'from math_utils import biweight\n'), ((19642, 19659), 'numpy.isfinite', 'np.isfinite', (['keep'], {}), '(keep)\n', (19653, 19659), True, 'import numpy as np\n'), ((19820, 19857), 'math_utils.biweight', 'biweight', (['(spec / newftf / sky)'], {'axis': '(1)'}), '(spec / newftf / sky, axis=1)\n', (19828, 19857), False, 'from math_utils import biweight\n'), ((19867, 19885), 'astropy.stats.mad_std', 'mad_std', (['ynew[sel]'], {}), '(ynew[sel])\n', (19874, 19885), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((19895, 19913), 'astropy.stats.mad_std', 'mad_std', (['yold[sel]'], {}), '(yold[sel])\n', (19902, 19913), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((20314, 20327), 'numpy.argmax', 'np.argmax', (['SN'], {}), '(SN)\n', (20323, 20327), True, 'import numpy as np\n'), ((21898, 21964), 'os.path.join', 'op.join', (['DIRNAME', "('lrs2_config/dar_%s.dat' % channel_dict[channel])"], {}), "(DIRNAME, 'lrs2_config/dar_%s.dat' % channel_dict[channel])\n", (21905, 21964), True, 'import os.path as op\n'), ((21969, 22025), 'astropy.table.Table.read', 'Table.read', (['darfile'], {'format': '"""ascii.fixed_width_two_line"""'}), "(darfile, format='ascii.fixed_width_two_line')\n", (21979, 22025), False, 'from astropy.table import Table\n'), ((22033, 22066), 'numpy.interp', 'np.interp', (['w', "T['wave']", "T['x_0']"], {}), "(w, T['wave'], T['x_0'])\n", (22042, 22066), True, 'import numpy as np\n'), ((22074, 22107), 'numpy.interp', 'np.interp', (['w', "T['wave']", "T['y_0']"], {}), "(w, T['wave'], T['y_0'])\n", (22083, 22107), True, 'import numpy as np\n'), ((22260, 22279), 'math_utils.biweight', 'biweight', (['(xc - xdar)'], {}), '(xc - xdar)\n', (22268, 22279), False, 'from math_utils import biweight\n'), ((22287, 22306), 'math_utils.biweight', 'biweight', (['(yc - ydar)'], {}), '(yc - ydar)\n', (22295, 22306), False, 'from math_utils import biweight\n'), ((23358, 23384), 'numpy.arange', 'np.arange', (['skysub.shape[1]'], {}), '(skysub.shape[1])\n', (23367, 23384), True, 'import numpy as np\n'), ((24168, 24192), 'numpy.isfinite', 'np.isfinite', (['skysub_rect'], {}), '(skysub_rect)\n', (24179, 24192), True, 'import numpy as np\n'), ((24593, 24614), 'numpy.isfinite', 'np.isfinite', (['sky_rect'], {}), '(sky_rect)\n', (24604, 24614), True, 'import numpy as np\n'), ((24769, 24799), 'numpy.nansum', 'np.nansum', (['skysub_rect'], {'axis': '(0)'}), '(skysub_rect, axis=0)\n', (24778, 24799), True, 'import numpy as np\n'), ((957, 971), 'numpy.argsort', 'np.argsort', (['aw'], {}), '(aw)\n', (967, 971), True, 'import numpy as np\n'), ((985, 997), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (994, 997), True, 'import numpy as np\n'), ((1597, 1611), 'numpy.argsort', 'np.argsort', (['aw'], {}), '(aw)\n', (1607, 1611), True, 'import numpy as np\n'), ((1765, 1850), 'scipy.interpolate.interp1d', 'interp1d', (['nw', 'ns'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(nw, ns, kind='quadratic', bounds_error=False, fill_value='extrapolate'\n )\n", (1773, 1850), False, 'from scipy.interpolate import interp1d, griddata\n'), ((1879, 1903), 'numpy.arange', 'np.arange', (['spec.shape[0]'], {}), '(spec.shape[0])\n', (1888, 1903), True, 'import numpy as np\n'), ((2100, 2124), 'numpy.arange', 'np.arange', (['spec.shape[0]'], {}), '(spec.shape[0])\n', (2109, 2124), True, 'import numpy as np\n'), ((2158, 2182), 'numpy.arange', 'np.arange', (['spec.shape[0]'], {}), '(spec.shape[0])\n', (2167, 2182), True, 'import numpy as np\n'), ((2910, 2950), 'numpy.sqrt', 'np.sqrt', (['((xp - xc) ** 2 + (yp - yc) ** 2)'], {}), '((xp - xc) ** 2 + (yp - yc) ** 2)\n', (2917, 2950), True, 'import numpy as np\n'), ((3240, 3254), 'numpy.arange', 'np.arange', (['(5.0)'], {}), '(5.0)\n', (3249, 3254), True, 'import numpy as np\n'), ((3498, 3509), 'numpy.isnan', 'np.isnan', (['k'], {}), '(k)\n', (3506, 3509), True, 'import numpy as np\n'), ((3882, 3926), 'math_utils.biweight', 'biweight', (['(k[:140] - cor[:140])'], {'calc_std': '(True)'}), '(k[:140] - cor[:140], calc_std=True)\n', (3890, 3926), False, 'from math_utils import biweight\n'), ((3939, 3983), 'math_utils.biweight', 'biweight', (['(k[140:] - cor[140:])'], {'calc_std': '(True)'}), '(k[140:] - cor[140:], calc_std=True)\n', (3947, 3983), False, 'from math_utils import biweight\n'), ((4284, 4313), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'ncomponents'}), '(n_components=ncomponents)\n', (4287, 4313), False, 'from sklearn.decomposition import PCA\n'), ((4354, 4380), 'numpy.dot', 'np.dot', (['H', 'pca.components_'], {}), '(H, pca.components_)\n', (4360, 4380), True, 'import numpy as np\n'), ((4466, 4490), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (4475, 4490), True, 'import numpy as np\n'), ((4941, 4962), 'numpy.diff', 'np.diff', (['wave'], {'axis': '(1)'}), '(wave, axis=1)\n', (4948, 4962), True, 'import numpy as np\n'), ((4972, 4999), 'numpy.hstack', 'np.hstack', (['[dw[:, 0:1], dw]'], {}), '([dw[:, 0:1], dw])\n', (4981, 4999), True, 'import numpy as np\n'), ((5133, 5159), 'numpy.arange', 'np.arange', (['skysub.shape[0]'], {}), '(skysub.shape[0])\n', (5142, 5159), True, 'import numpy as np\n'), ((5368, 5391), 'numpy.linspace', 'np.linspace', (['(0)', '(5.5)', '(13)'], {}), '(0, 5.5, 13)\n', (5379, 5391), True, 'import numpy as np\n'), ((5932, 5961), 'numpy.interp', 'np.interp', (['d', 'x', 'A'], {'right': '(0.0)'}), '(d, x, A, right=0.0)\n', (5941, 5961), True, 'import numpy as np\n'), ((6100, 6140), 'numpy.sqrt', 'np.sqrt', (['(pos[:, 0] ** 2 + pos[:, 1] ** 2)'], {}), '(pos[:, 0] ** 2 + pos[:, 1] ** 2)\n', (6107, 6140), True, 'import numpy as np\n'), ((6155, 6181), 'math_utils.biweight', 'biweight', (['y'], {'calc_std': '(True)'}), '(y, calc_std=True)\n', (6163, 6181), False, 'from math_utils import biweight\n'), ((6243, 6263), 'numpy.nanargmax', 'np.nanargmax', (['y[sel]'], {}), '(y[sel])\n', (6255, 6263), True, 'import numpy as np\n'), ((6322, 6376), 'numpy.sqrt', 'np.sqrt', (['((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)'], {}), '((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)\n', (6329, 6376), True, 'import numpy as np\n'), ((6391, 6426), 'math_utils.biweight', 'biweight', (['y[d > 3.0]'], {'calc_std': '(True)'}), '(y[d > 3.0], calc_std=True)\n', (6399, 6426), False, 'from math_utils import biweight\n'), ((6452, 6497), 'astropy.modeling.models.Gaussian2D', 'Gaussian2D', ([], {'x_mean': 'xc', 'y_mean': 'yc', 'amplitude': 'a'}), '(x_mean=xc, y_mean=yc, amplitude=a)\n', (6462, 6497), False, 'from astropy.modeling.models import Polynomial2D, Gaussian2D\n'), ((6506, 6560), 'numpy.sqrt', 'np.sqrt', (['((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)'], {}), '((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)\n', (6513, 6560), True, 'import numpy as np\n'), ((7288, 7314), 'math_utils.biweight', 'biweight', (['y'], {'calc_std': '(True)'}), '(y, calc_std=True)\n', (7296, 7314), False, 'from math_utils import biweight\n'), ((7377, 7464), 'astropy.modeling.models.Gaussian2D', 'Gaussian2D', ([], {'x_mean': 'xc', 'y_mean': 'yc', 'x_stddev': 'xs', 'y_stddev': 'ys', 'theta': 'th', 'amplitude': '(1.0)'}), '(x_mean=xc, y_mean=yc, x_stddev=xs, y_stddev=ys, theta=th,\n amplitude=1.0)\n', (7387, 7464), False, 'from astropy.modeling.models import Polynomial2D, Gaussian2D\n'), ((7620, 7674), 'numpy.sqrt', 'np.sqrt', (['((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)'], {}), '((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)\n', (7627, 7674), True, 'import numpy as np\n'), ((7803, 7823), 'math_utils.biweight', 'biweight', (['(y[sel] / M)'], {}), '(y[sel] / M)\n', (7811, 7823), False, 'from math_utils import biweight\n'), ((9131, 9161), 'fiber_utils_remedy.identify_sky_pixels', 'identify_sky_pixels', (['mastersky'], {}), '(mastersky)\n', (9150, 9161), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((9172, 9206), 'astropy.stats.mad_std', 'mad_std', (['(mastersky - cont)[~mask]'], {}), '((mastersky - cont)[~mask])\n', (9179, 9206), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((9223, 9269), 'fiber_utils_remedy.find_peaks', 'find_peaks', (['(mastersky - cont)'], {'thresh': '(100 * std)'}), '(mastersky - cont, thresh=100 * std)\n', (9233, 9269), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((9446, 9467), 'numpy.arange', 'np.arange', (['Y.shape[0]'], {}), '(Y.shape[0])\n', (9455, 9467), True, 'import numpy as np\n'), ((10377, 10421), 'numpy.nansum', 'np.nansum', (['(chunk * model ** 2 * mask)'], {'axis': '(0)'}), '(chunk * model ** 2 * mask, axis=0)\n', (10386, 10421), True, 'import numpy as np\n'), ((10431, 10467), 'numpy.nansum', 'np.nansum', (['(model ** 3 * mask)'], {'axis': '(0)'}), '(model ** 3 * mask, axis=0)\n', (10440, 10467), True, 'import numpy as np\n'), ((10562, 10588), 'math_utils.biweight', 'biweight', (['sky_rect'], {'axis': '(0)'}), '(sky_rect, axis=0)\n', (10570, 10588), False, 'from math_utils import biweight\n'), ((10606, 10636), 'fiber_utils_remedy.identify_sky_pixels', 'identify_sky_pixels', (['quick_sky'], {}), '(quick_sky)\n', (10625, 10636), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((10651, 10685), 'astropy.stats.mad_std', 'mad_std', (['(quick_sky - cont)[~mask]'], {}), '((quick_sky - cont)[~mask])\n', (10658, 10685), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((10702, 10751), 'fiber_utils_remedy.find_peaks', 'find_peaks', (['(quick_sky - cont)'], {'thresh': '(15 * std_sky)'}), '(quick_sky - cont, thresh=15 * std_sky)\n', (10712, 10751), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((10887, 10913), 'numpy.arange', 'np.arange', (['(-mlen)', '(mlen + 1)'], {}), '(-mlen, mlen + 1)\n', (10896, 10913), True, 'import numpy as np\n'), ((12299, 12320), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', (['(1.5)'], {}), '(1.5)\n', (12315, 12320), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((12364, 12395), 'numpy.arange', 'np.arange', (['skysub_rect.shape[0]'], {}), '(skysub_rect.shape[0])\n', (12373, 12395), True, 'import numpy as np\n'), ((12509, 12575), 'numpy.sqrt', 'np.sqrt', (['((x - x[:, np.newaxis]) ** 2 + (y - y[:, np.newaxis]) ** 2)'], {}), '((x - x[:, np.newaxis]) ** 2 + (y - y[:, np.newaxis]) ** 2)\n', (12516, 12575), True, 'import numpy as np\n'), ((12585, 12606), 'numpy.arange', 'np.arange', (['D.shape[0]'], {}), '(D.shape[0])\n', (12594, 12606), True, 'import numpy as np\n'), ((12696, 12722), 'numpy.arange', 'np.arange', (['smooth.shape[1]'], {}), '(smooth.shape[1])\n', (12705, 12722), True, 'import numpy as np\n'), ((13480, 13520), 'numpy.sqrt', 'np.sqrt', (['((xp - xc) ** 2 + (yp - yc) ** 2)'], {}), '((xp - xc) ** 2 + (yp - yc) ** 2)\n', (13487, 13520), True, 'import numpy as np\n'), ((13563, 13594), 'math_utils.biweight', 'biweight', (['y[sel]'], {'calc_std': '(True)'}), '(y[sel], calc_std=True)\n', (13571, 13594), False, 'from math_utils import biweight\n'), ((13604, 13630), 'numpy.nanmax', 'np.nanmax', (['((y - 1.0) / std)'], {}), '((y - 1.0) / std)\n', (13613, 13630), True, 'import numpy as np\n'), ((13920, 13948), 'math_utils.biweight', 'biweight', (['S[d > 5.0]'], {'axis': '(0)'}), '(S[d > 5.0], axis=0)\n', (13928, 13948), False, 'from math_utils import biweight\n'), ((13958, 13986), 'math_utils.biweight', 'biweight', (['S[d < 1.5]'], {'axis': '(0)'}), '(S[d < 1.5], axis=0)\n', (13966, 13986), False, 'from math_utils import biweight\n'), ((14194, 14215), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', (['(5.5)'], {}), '(5.5)\n', (14210, 14215), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((14229, 14250), 'numpy.arange', 'np.arange', (['S.shape[0]'], {}), '(S.shape[0])\n', (14238, 14250), True, 'import numpy as np\n'), ((14337, 14358), 'numpy.arange', 'np.arange', (['S.shape[1]'], {}), '(S.shape[1])\n', (14346, 14358), True, 'import numpy as np\n'), ((15044, 15078), 'numpy.nanpercentile', 'np.nanpercentile', (['dummy', '(5)'], {'axis': '(0)'}), '(dummy, 5, axis=0)\n', (15060, 15078), True, 'import numpy as np\n'), ((15088, 15123), 'numpy.nanpercentile', 'np.nanpercentile', (['dummy', '(95)'], {'axis': '(0)'}), '(dummy, 95, axis=0)\n', (15104, 15123), True, 'import numpy as np\n'), ((15139, 15202), 'math_utils.biweight', 'biweight', (['(skysub[:, 200:-200] / err[:, 200:-200])'], {'calc_std': '(True)'}), '(skysub[:, 200:-200] / err[:, 200:-200], calc_std=True)\n', (15147, 15202), False, 'from math_utils import biweight\n'), ((15395, 15416), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(7.0)'], {}), '(7.0)\n', (15411, 15416), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((15428, 15465), 'astropy.convolution.convolve', 'convolve', (['dummy', 'G'], {'boundary': '"""extend"""'}), "(dummy, G, boundary='extend')\n", (15436, 15465), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((17220, 17259), 'os.path.join', 'op.join', (['args.directory', 'args.multiname'], {}), '(args.directory, args.multiname)\n', (17227, 17259), True, 'import os.path as op\n'), ((18561, 18582), 'math_utils.biweight', 'biweight', (['ftf'], {'axis': '(1)'}), '(ftf, axis=1)\n', (18569, 18582), False, 'from math_utils import biweight\n'), ((19584, 19608), 'math_utils.biweight', 'biweight', (['newftf'], {'axis': '(1)'}), '(newftf, axis=1)\n', (19592, 19608), False, 'from math_utils import biweight\n'), ((22321, 22368), 'numpy.interp', 'np.interp', (['def_wave', "T['wave']", "(T['x_0'] + xoff)"], {}), "(def_wave, T['wave'], T['x_0'] + xoff)\n", (22330, 22368), True, 'import numpy as np\n'), ((22382, 22429), 'numpy.interp', 'np.interp', (['def_wave', "T['wave']", "(T['y_0'] + yoff)"], {}), "(def_wave, T['wave'], T['y_0'] + yoff)\n", (22391, 22429), True, 'import numpy as np\n'), ((22673, 22727), 'numpy.sqrt', 'np.sqrt', (['((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)'], {}), '((pos[:, 0] - xc) ** 2 + (pos[:, 1] - yc) ** 2)\n', (22680, 22727), True, 'import numpy as np\n'), ((24355, 24401), 'numpy.nansum', 'np.nansum', (['(mask * weight * skysub_rect)'], {'axis': '(0)'}), '(mask * weight * skysub_rect, axis=0)\n', (24364, 24401), True, 'import numpy as np\n'), ((24404, 24441), 'numpy.nansum', 'np.nansum', (['(mask * weight ** 2)'], {'axis': '(0)'}), '(mask * weight ** 2, axis=0)\n', (24413, 24441), True, 'import numpy as np\n'), ((24630, 24673), 'numpy.nansum', 'np.nansum', (['(mask * weight * sky_rect)'], {'axis': '(0)'}), '(mask * weight * sky_rect, axis=0)\n', (24639, 24673), True, 'import numpy as np\n'), ((24676, 24713), 'numpy.nansum', 'np.nansum', (['(mask * weight ** 2)'], {'axis': '(0)'}), '(mask * weight ** 2, axis=0)\n', (24685, 24713), True, 'import numpy as np\n'), ((1164, 1249), 'scipy.interpolate.interp1d', 'interp1d', (['nw', 'ns'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(nw, ns, kind='quadratic', bounds_error=False, fill_value='extrapolate'\n )\n", (1172, 1249), False, 'from scipy.interpolate import interp1d, griddata\n'), ((1286, 1310), 'numpy.arange', 'np.arange', (['spec.shape[0]'], {}), '(spec.shape[0])\n', (1295, 1310), True, 'import numpy as np\n'), ((1485, 1522), 'numpy.ones', 'np.ones', (['(spec.shape[0],)'], {'dtype': 'bool'}), '((spec.shape[0],), dtype=bool)\n', (1492, 1522), True, 'import numpy as np\n'), ((2049, 2086), 'numpy.ones', 'np.ones', (['(spec.shape[0],)'], {'dtype': 'bool'}), '((spec.shape[0],), dtype=bool)\n', (2056, 2086), True, 'import numpy as np\n'), ((2423, 2437), 'numpy.argsort', 'np.argsort', (['aw'], {}), '(aw)\n', (2433, 2437), True, 'import numpy as np\n'), ((2606, 2621), 'numpy.isfinite', 'np.isfinite', (['ns'], {}), '(ns)\n', (2617, 2621), True, 'import numpy as np\n'), ((2634, 2730), 'scipy.interpolate.interp1d', 'interp1d', (['nw[good]', 'ns[good]'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(nw[good], ns[good], kind='quadratic', bounds_error=False,\n fill_value='extrapolate')\n", (2642, 2730), False, 'from scipy.interpolate import interp1d, griddata\n'), ((2856, 2871), 'numpy.nanargmax', 'np.nanargmax', (['y'], {}), '(y)\n', (2868, 2871), True, 'import numpy as np\n'), ((2885, 2900), 'numpy.nanargmax', 'np.nanargmax', (['y'], {}), '(y)\n', (2897, 2900), True, 'import numpy as np\n'), ((3299, 3334), 'astropy.stats.mad_std', 'mad_std', (['(k - model)'], {'ignore_nan': '(True)'}), '(k - model, ignore_nan=True)\n', (3306, 3334), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((3565, 3582), 'astropy.modeling.fitting.LevMarLSQFitter', 'LevMarLSQFitter', ([], {}), '()\n', (3580, 3582), False, 'from astropy.modeling.fitting import LevMarLSQFitter, FittingWithOutlierRemoval\n'), ((3667, 3682), 'astropy.modeling.models.Polynomial2D', 'Polynomial2D', (['(1)'], {}), '(1)\n', (3679, 3682), False, 'from astropy.modeling.models import Polynomial2D, Gaussian2D\n'), ((4067, 4083), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (4074, 4083), True, 'import numpy as np\n'), ((4506, 4529), 'numpy.isfinite', 'np.isfinite', (['data[:, i]'], {}), '(data[:, i])\n', (4517, 4529), True, 'import numpy as np\n'), ((4546, 4590), 'numpy.dot', 'np.dot', (['data[sel, i]', 'pca.components_.T[sel]'], {}), '(data[sel, i], pca.components_.T[sel])\n', (4552, 4590), True, 'import numpy as np\n'), ((4607, 4637), 'numpy.dot', 'np.dot', (['coeff', 'pca.components_'], {}), '(coeff, pca.components_)\n', (4613, 4637), True, 'import numpy as np\n'), ((5652, 5681), 'numpy.zeros', 'np.zeros', (['r.shape'], {'dtype': 'bool'}), '(r.shape, dtype=bool)\n', (5660, 5681), True, 'import numpy as np\n'), ((5974, 5995), 'numpy.nansum', 'np.nansum', (['y[d < 5.0]'], {}), '(y[d < 5.0])\n', (5983, 5995), True, 'import numpy as np\n'), ((5995, 6029), 'numpy.nansum', 'np.nansum', (['(y[d < 5.0] / c[d < 5.0])'], {}), '(y[d < 5.0] / c[d < 5.0])\n', (6004, 6029), True, 'import numpy as np\n'), ((6209, 6232), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '(25)'], {}), '(y, 25)\n', (6225, 6232), True, 'import numpy as np\n'), ((6580, 6594), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (6591, 6594), True, 'import numpy as np\n'), ((6605, 6622), 'astropy.modeling.fitting.LevMarLSQFitter', 'LevMarLSQFitter', ([], {}), '()\n', (6620, 6622), False, 'from astropy.modeling.fitting import LevMarLSQFitter, FittingWithOutlierRemoval\n'), ((6727, 6746), 'numpy.isnan', 'np.isnan', (['new_model'], {}), '(new_model)\n', (6735, 6746), True, 'import numpy as np\n'), ((6784, 6804), 'numpy.nanmax', 'np.nanmax', (['new_model'], {}), '(new_model)\n', (6793, 6804), True, 'import numpy as np\n'), ((6875, 6911), 'numpy.linspace', 'np.linspace', (['(xc - 5.0)', '(xc + 5.0)', '(101)'], {}), '(xc - 5.0, xc + 5.0, 101)\n', (6886, 6911), True, 'import numpy as np\n'), ((6940, 6976), 'numpy.linspace', 'np.linspace', (['(yc - 5.0)', '(yc + 5.0)', '(101)'], {}), '(yc - 5.0, yc + 5.0, 101)\n', (6951, 6976), True, 'import numpy as np\n'), ((7741, 7755), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (7752, 7755), True, 'import numpy as np\n'), ((7932, 7951), 'numpy.isnan', 'np.isnan', (['new_model'], {}), '(new_model)\n', (7940, 7951), True, 'import numpy as np\n'), ((7989, 8009), 'numpy.nanmax', 'np.nanmax', (['new_model'], {}), '(new_model)\n', (7998, 8009), True, 'import numpy as np\n'), ((8080, 8116), 'numpy.linspace', 'np.linspace', (['(xc - 5.0)', '(xc + 5.0)', '(101)'], {}), '(xc - 5.0, xc + 5.0, 101)\n', (8091, 8116), True, 'import numpy as np\n'), ((8145, 8181), 'numpy.linspace', 'np.linspace', (['(yc - 5.0)', '(yc + 5.0)', '(101)'], {}), '(yc - 5.0, yc + 5.0, 101)\n', (8156, 8181), True, 'import numpy as np\n'), ((8617, 8666), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'usecols': '(0, 1)', 'unpack': '(True)'}), '(filename, usecols=(0, 1), unpack=True)\n', (8627, 8666), True, 'import numpy as np\n'), ((8849, 8900), 'numpy.interp', 'np.interp', (['commonwave', 'standard_wave', 'standard_flam'], {}), '(commonwave, standard_wave, standard_flam)\n', (8858, 8900), True, 'import numpy as np\n'), ((9010, 9034), 'os.path.realpath', 'op.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (9021, 9034), True, 'import os.path as op\n'), ((10347, 10365), 'numpy.isfinite', 'np.isfinite', (['chunk'], {}), '(chunk)\n', (10358, 10365), True, 'import numpy as np\n'), ((10769, 10782), 'numpy.round', 'np.round', (['loc'], {}), '(loc)\n', (10777, 10782), True, 'import numpy as np\n'), ((11195, 11239), 'numpy.array_split', 'np.array_split', (['skysub_rect', 'nchunks'], {'axis': '(1)'}), '(skysub_rect, nchunks, axis=1)\n', (11209, 11239), True, 'import numpy as np\n'), ((11274, 11315), 'numpy.array_split', 'np.array_split', (['sky_rect', 'nchunks'], {'axis': '(1)'}), '(sky_rect, nchunks, axis=1)\n', (11288, 11315), True, 'import numpy as np\n'), ((11350, 11383), 'numpy.array_split', 'np.array_split', (['def_wave', 'nchunks'], {}), '(def_wave, nchunks)\n', (11364, 11383), True, 'import numpy as np\n'), ((11400, 11423), 'math_utils.biweight', 'biweight', (['chunk'], {'axis': '(1)'}), '(chunk, axis=1)\n', (11408, 11423), False, 'from math_utils import biweight\n'), ((12257, 12279), 'numpy.isnan', 'np.isnan', (['skyline_mask'], {}), '(skyline_mask)\n', (12265, 12279), True, 'import numpy as np\n'), ((12417, 12464), 'astropy.convolution.convolve', 'convolve', (['skysub_rect[i]', 'G1'], {'preserve_nan': '(True)'}), '(skysub_rect[i], G1, preserve_nan=True)\n', (12425, 12464), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((12626, 12662), 'numpy.array', 'np.array', (['(D[i, :] < 1.5)'], {'dtype': 'float'}), '(D[i, :] < 1.5, dtype=float)\n', (12634, 12662), True, 'import numpy as np\n'), ((12742, 12777), 'numpy.nansum', 'np.nansum', (['(smooth[:, i] * D)'], {'axis': '(1)'}), '(smooth[:, i] * D, axis=1)\n', (12751, 12777), True, 'import numpy as np\n'), ((12812, 12827), 'numpy.nanargmax', 'np.nanargmax', (['T'], {}), '(T)\n', (12824, 12827), True, 'import numpy as np\n'), ((13157, 13183), 'numpy.nanmax', 'np.nanmax', (['((y - 1.0) / std)'], {}), '((y - 1.0) / std)\n', (13166, 13183), True, 'import numpy as np\n'), ((14278, 14323), 'astropy.convolution.interpolate_replace_nans', 'interpolate_replace_nans', (['intermediate[k]', 'G1'], {}), '(intermediate[k], G1)\n', (14302, 14323), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((14389, 14437), 'astropy.convolution.interpolate_replace_nans', 'interpolate_replace_nans', (['intermediate[:, k]', 'G1'], {}), '(intermediate[:, k], G1)\n', (14413, 14437), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((14564, 14584), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(55)'}), '(n_components=55)\n', (14567, 14584), False, 'from sklearn.decomposition import PCA\n'), ((14693, 14713), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(35)'}), '(n_components=35)\n', (14696, 14713), False, 'from sklearn.decomposition import PCA\n'), ((14761, 14772), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (14769, 14772), True, 'import numpy as np\n'), ((15515, 15549), 'astropy.convolution.interpolate_replace_nans', 'interpolate_replace_nans', (['dummy', 'G'], {}), '(dummy, G)\n', (15539, 15549), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((17862, 17874), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (17869, 17874), True, 'import numpy as np\n'), ((19960, 19975), 'numpy.abs', 'np.abs', (['(y - 1.0)'], {}), '(y - 1.0)\n', (19966, 19975), True, 'import numpy as np\n'), ((20878, 20893), 'numpy.isnan', 'np.isnan', (['shift'], {}), '(shift)\n', (20886, 20893), True, 'import numpy as np\n'), ((22780, 22886), 'astropy.modeling.models.Gaussian2D', 'Gaussian2D', ([], {'x_mean': 'xc', 'y_mean': 'yc', 'x_stddev': 'fit_params[2]', 'y_stddev': 'fit_params[3]', 'theta': 'fit_params[4]'}), '(x_mean=xc, y_mean=yc, x_stddev=fit_params[2], y_stddev=\n fit_params[3], theta=fit_params[4])\n', (22790, 22886), False, 'from astropy.modeling.models import Polynomial2D, Gaussian2D\n'), ((22990, 23013), 'numpy.polyfit', 'np.polyfit', (['W', 'apcor', '(3)'], {}), '(W, apcor, 3)\n', (23000, 23013), True, 'import numpy as np\n'), ((23118, 23141), 'numpy.ones', 'np.ones', (['def_wave.shape'], {}), '(def_wave.shape)\n', (23125, 23141), True, 'import numpy as np\n'), ((23446, 23552), 'astropy.modeling.models.Gaussian2D', 'Gaussian2D', ([], {'x_mean': 'xc', 'y_mean': 'yc', 'x_stddev': 'fit_params[2]', 'y_stddev': 'fit_params[3]', 'theta': 'fit_params[4]'}), '(x_mean=xc, y_mean=yc, x_stddev=fit_params[2], y_stddev=\n fit_params[3], theta=fit_params[4])\n', (23456, 23552), False, 'from astropy.modeling.models import Polynomial2D, Gaussian2D\n'), ((23612, 23621), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (23618, 23621), True, 'import numpy as np\n'), ((24456, 24506), 'numpy.nansum', 'np.nansum', (['(mask * weight * error_rect ** 2)'], {'axis': '(0)'}), '(mask * weight * error_rect ** 2, axis=0)\n', (24465, 24506), True, 'import numpy as np\n'), ((24507, 24544), 'numpy.nansum', 'np.nansum', (['(mask * weight ** 2)'], {'axis': '(0)'}), '(mask * weight ** 2, axis=0)\n', (24516, 24544), True, 'import numpy as np\n'), ((24924, 25065), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['[def_wave, calibrated, calibrated_sky, calibrated_all, calibrated_ext,\n spectrum_sum, calibrated_err]'], {'header': 'm[0].header'}), '([def_wave, calibrated, calibrated_sky, calibrated_all,\n calibrated_ext, spectrum_sum, calibrated_err], header=m[0].header)\n', (24939, 25065), False, 'from astropy.io import fits\n'), ((1631, 1641), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (1638, 1641), True, 'import numpy as np\n'), ((1703, 1714), 'math_utils.biweight', 'biweight', (['s'], {}), '(s)\n', (1711, 1714), False, 'from math_utils import biweight\n'), ((2200, 2218), 'numpy.abs', 'np.abs', (['(fibnum - i)'], {}), '(fibnum - i)\n', (2206, 2218), True, 'import numpy as np\n'), ((3076, 3100), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', (['kernel'], {}), '(kernel)\n', (3092, 3100), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((3161, 3185), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', (['kernel'], {}), '(kernel)\n', (3177, 3185), False, 'from astropy.convolution import Gaussian1DKernel, convolve, interpolate_replace_nans\n'), ((4207, 4220), 'math_utils.biweight', 'biweight', (['cor'], {}), '(cor)\n', (4215, 4220), False, 'from math_utils import biweight\n'), ((5186, 5265), 'scipy.interpolate.interp1d', 'interp1d', (['wave[i]', 'skysub[i]'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(wave[i], skysub[i], kind='linear', bounds_error=False, fill_value=0.0)\n", (5194, 5265), False, 'from scipy.interpolate import interp1d, griddata\n'), ((5548, 5561), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5554, 5561), True, 'import numpy as np\n'), ((5579, 5592), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5585, 5592), True, 'import numpy as np\n'), ((5617, 5629), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5624, 5629), True, 'import numpy as np\n'), ((9536, 9561), 'fiber_utils_remedy.identify_sky_pixels', 'identify_sky_pixels', (['Y[i]'], {}), '(Y[i])\n', (9555, 9561), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((9580, 9609), 'astropy.stats.mad_std', 'mad_std', (['(Y[i] - cont)[~mask]'], {}), '((Y[i] - cont)[~mask])\n', (9587, 9609), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std\n'), ((9634, 9674), 'fiber_utils_remedy.find_peaks', 'find_peaks', (['(Y[i] - cont)'], {'thresh': '(25 * std)'}), '(Y[i] - cont, thresh=25 * std)\n', (9644, 9674), False, 'from fiber_utils_remedy import find_peaks, identify_sky_pixels, get_spectra\n'), ((9755, 9779), 'numpy.isfinite', 'np.isfinite', (['(Y[i] - cont)'], {}), '(Y[i] - cont)\n', (9766, 9779), True, 'import numpy as np\n'), ((9794, 9893), 'scipy.interpolate.interp1d', 'interp1d', (['wave[i][ng]', '(Y[i] - cont)[ng]'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(wave[i][ng], (Y[i] - cont)[ng], kind='quadratic', bounds_error=\n False, fill_value=0.0)\n", (9802, 9893), False, 'from scipy.interpolate import interp1d, griddata\n'), ((11971, 11983), 'numpy.array', 'np.array', (['xi'], {}), '(xi)\n', (11979, 11983), True, 'import numpy as np\n'), ((13531, 13540), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (13537, 13540), True, 'import numpy as np\n'), ((15476, 15491), 'numpy.isnan', 'np.isnan', (['dummy'], {}), '(dummy)\n', (15484, 15491), True, 'import numpy as np\n'), ((20835, 20858), 'math_utils.biweight', 'biweight', (['Waves'], {'axis': '(0)'}), '(Waves, axis=0)\n', (20843, 20858), False, 'from math_utils import biweight\n'), ((20947, 20962), 'math_utils.biweight', 'biweight', (['shift'], {}), '(shift)\n', (20955, 20962), False, 'from math_utils import biweight\n'), ((21031, 21054), 'math_utils.biweight', 'biweight', (['Norms'], {'axis': '(0)'}), '(Norms, axis=0)\n', (21039, 21054), False, 'from math_utils import biweight\n'), ((25181, 25219), 'numpy.array', 'np.array', (['skysub_rect'], {'dtype': '"""float32"""'}), "(skysub_rect, dtype='float32')\n", (25189, 25219), True, 'import numpy as np\n'), ((25381, 25412), 'numpy.array', 'np.array', (['mask'], {'dtype': '"""float32"""'}), "(mask, dtype='float32')\n", (25389, 25412), True, 'import numpy as np\n'), ((25572, 25605), 'numpy.array', 'np.array', (['weight'], {'dtype': '"""float32"""'}), "(weight, dtype='float32')\n", (25580, 25605), True, 'import numpy as np\n'), ((1022, 1032), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (1029, 1032), True, 'import numpy as np\n'), ((1098, 1109), 'math_utils.biweight', 'biweight', (['s'], {}), '(s)\n', (1106, 1109), False, 'from math_utils import biweight\n'), ((1651, 1681), 'numpy.array_split', 'np.array_split', (['aw[inds]', '(3500)'], {}), '(aw[inds], 3500)\n', (1665, 1681), True, 'import numpy as np\n'), ((1724, 1754), 'numpy.array_split', 'np.array_split', (['As[inds]', '(3500)'], {}), '(As[inds], 3500)\n', (1738, 1754), True, 'import numpy as np\n'), ((2461, 2471), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (2468, 2471), True, 'import numpy as np\n'), ((2537, 2548), 'math_utils.biweight', 'biweight', (['s'], {}), '(s)\n', (2545, 2548), False, 'from math_utils import biweight\n'), ((5455, 5476), 'numpy.random.rand', 'np.random.rand', (['(20000)'], {}), '(20000)\n', (5469, 5476), True, 'import numpy as np\n'), ((5502, 5523), 'numpy.random.rand', 'np.random.rand', (['(20000)'], {}), '(20000)\n', (5516, 5523), True, 'import numpy as np\n'), ((5809, 5849), 'numpy.sqrt', 'np.sqrt', (['((xr - xC) ** 2 + (yr - yC) ** 2)'], {}), '((xr - xC) ** 2 + (yr - yC) ** 2)\n', (5816, 5849), True, 'import numpy as np\n'), ((9706, 9727), 'numpy.arange', 'np.arange', (['Y.shape[1]'], {}), '(Y.shape[1])\n', (9715, 9727), True, 'import numpy as np\n'), ((11771, 11782), 'numpy.mean', 'np.mean', (['wi'], {}), '(wi)\n', (11778, 11782), True, 'import numpy as np\n'), ((1042, 1072), 'numpy.array_split', 'np.array_split', (['aw[inds]', '(3500)'], {}), '(aw[inds], 3500)\n', (1056, 1072), True, 'import numpy as np\n'), ((1119, 1149), 'numpy.array_split', 'np.array_split', (['As[inds]', '(3500)'], {}), '(As[inds], 3500)\n', (1133, 1149), True, 'import numpy as np\n'), ((2481, 2511), 'numpy.array_split', 'np.array_split', (['aw[inds]', '(3500)'], {}), '(aw[inds], 3500)\n', (2495, 2511), True, 'import numpy as np\n'), ((2558, 2588), 'numpy.array_split', 'np.array_split', (['As[inds]', '(3500)'], {}), '(As[inds], 3500)\n', (2572, 2588), True, 'import numpy as np\n'), ((9480, 9497), 'numpy.isfinite', 'np.isfinite', (['Y[i]'], {}), '(Y[i])\n', (9491, 9497), True, 'import numpy as np\n'), ((11589, 11605), 'numpy.nansum', 'np.nansum', (['model'], {}), '(model)\n', (11598, 11605), True, 'import numpy as np\n'), ((11680, 11696), 'numpy.nansum', 'np.nansum', (['model'], {}), '(model)\n', (11689, 11696), True, 'import numpy as np\n'), ((10016, 10038), 'numpy.abs', 'np.abs', (['(wav - waves[j])'], {}), '(wav - waves[j])\n', (10022, 10038), True, 'import numpy as np\n'), ((10099, 10121), 'numpy.abs', 'np.abs', (['(wav - waves[j])'], {}), '(wav - waves[j])\n', (10105, 10121), True, 'import numpy as np\n')] |
from unittest import TestCase
from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index
import numpy as np
import tempfile
class UtilitiesTestCase(TestCase):
def test_get_bin_index_works_fine(self):
self.assertEqual(get_bin_index(0.0, 4), 0)
self.assertEqual(get_bin_index(0.24, 4), 0)
self.assertEqual(get_bin_index(0.25, 4), 1)
self.assertEqual(get_bin_index(0.49, 4), 1)
self.assertEqual(get_bin_index(0.5, 4), 2)
self.assertEqual(get_bin_index(0.74, 4), 2)
self.assertEqual(get_bin_index(0.75, 4), 3)
self.assertEqual(get_bin_index(1.0, 4), 3)
class ParameterDefinitionTestCase(TestCase):
def test_works_fine(self):
parameter_definition = ParameterDefinition(MinMax(0.0, 5.9), float)
self.assertIsInstance(parameter_definition.value, MinMax)
self.assertEqual(parameter_definition.param_type, float)
class OptimizationStatsTestCase(TestCase):
def setUp(self):
y = np.array(
[
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
"Class 2",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 1",
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
]
)
predicted = np.array(
[
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
"Class 1",
"Class 1",
"Class 1",
"Class 2",
"Class 1",
"Class 1",
"Class 2",
"Class 2",
"Class 1",
"Class 2",
"Class 1",
"Class 2",
"Class 2",
"Class 2",
]
)
self.__stats = OptimizationStats(predicted, y)
def test_works_fine(self):
self.assertEqual(self.__stats._accuracy, 0.5)
self.assertEqual(self.__stats._precision, 0.5199999999999999)
self.assertEqual(self.__stats._cohen_kappa, 0.0)
self.assertEqual(self.__stats._f1_score, 0.505050505050505)
class MinMaxTestCase(TestCase):
def test_works_fine(self):
minmax = MinMax(0.0, 5.9)
self.assertEqual(minmax.min, 0.0)
self.assertEqual(minmax.max, 5.9)
| [
"niaaml.get_bin_index",
"numpy.array",
"niaaml.MinMax",
"niaaml.OptimizationStats"
] | [((1005, 1247), 'numpy.array', 'np.array', (["['Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 1', 'Class 2',\n 'Class 2', 'Class 2', 'Class 2', 'Class 1', 'Class 1', 'Class 2',\n 'Class 1', 'Class 2', 'Class 1', 'Class 1', 'Class 1', 'Class 1',\n 'Class 2', 'Class 1']"], {}), "(['Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 1', 'Class 2',\n 'Class 2', 'Class 2', 'Class 2', 'Class 1', 'Class 1', 'Class 2',\n 'Class 1', 'Class 2', 'Class 1', 'Class 1', 'Class 1', 'Class 1',\n 'Class 2', 'Class 1'])\n", (1013, 1247), True, 'import numpy as np\n'), ((1613, 1855), 'numpy.array', 'np.array', (["['Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 2', 'Class 2',\n 'Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 1', 'Class 1',\n 'Class 2', 'Class 2', 'Class 1', 'Class 2', 'Class 1', 'Class 2',\n 'Class 2', 'Class 2']"], {}), "(['Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 2', 'Class 2',\n 'Class 1', 'Class 1', 'Class 1', 'Class 2', 'Class 1', 'Class 1',\n 'Class 2', 'Class 2', 'Class 1', 'Class 2', 'Class 1', 'Class 2',\n 'Class 2', 'Class 2'])\n", (1621, 1855), True, 'import numpy as np\n'), ((2225, 2256), 'niaaml.OptimizationStats', 'OptimizationStats', (['predicted', 'y'], {}), '(predicted, y)\n', (2242, 2256), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((2620, 2636), 'niaaml.MinMax', 'MinMax', (['(0.0)', '(5.9)'], {}), '(0.0, 5.9)\n', (2626, 2636), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((253, 274), 'niaaml.get_bin_index', 'get_bin_index', (['(0.0)', '(4)'], {}), '(0.0, 4)\n', (266, 274), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((304, 326), 'niaaml.get_bin_index', 'get_bin_index', (['(0.24)', '(4)'], {}), '(0.24, 4)\n', (317, 326), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((356, 378), 'niaaml.get_bin_index', 'get_bin_index', (['(0.25)', '(4)'], {}), '(0.25, 4)\n', (369, 378), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((408, 430), 'niaaml.get_bin_index', 'get_bin_index', (['(0.49)', '(4)'], {}), '(0.49, 4)\n', (421, 430), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((460, 481), 'niaaml.get_bin_index', 'get_bin_index', (['(0.5)', '(4)'], {}), '(0.5, 4)\n', (473, 481), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((511, 533), 'niaaml.get_bin_index', 'get_bin_index', (['(0.74)', '(4)'], {}), '(0.74, 4)\n', (524, 533), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((563, 585), 'niaaml.get_bin_index', 'get_bin_index', (['(0.75)', '(4)'], {}), '(0.75, 4)\n', (576, 585), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((615, 636), 'niaaml.get_bin_index', 'get_bin_index', (['(1.0)', '(4)'], {}), '(1.0, 4)\n', (628, 636), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n'), ((770, 786), 'niaaml.MinMax', 'MinMax', (['(0.0)', '(5.9)'], {}), '(0.0, 5.9)\n', (776, 786), False, 'from niaaml import ParameterDefinition, MinMax, OptimizationStats, get_bin_index\n')] |
import numpy as np
from .initialization import *
from .conviction_helper_functions import *
import networkx as nx
# Phase 2
# Behaviors
def check_progress(params, step, sL, s):
'''
Driving processes: completion of previously funded proposals
'''
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = []
failed = []
for j in proposals:
if network.nodes[j]['status'] == 'active':
grant_size = network.nodes[j]['funds_requested']
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
failure_rate = 1.0/(base_failure_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
elif np.random.rand() < failure_rate:
failed.append(j)
return({'completed':completed, 'failed':failed})
# Mechanisms
def complete_proposal(params, step, sL, s, _input):
'''
Book-keeping of failed and completed proposals. Update network object
'''
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
#competitors = get_edges_by_type(network, 'conflict')
completed = _input['completed']
for j in completed:
network.nodes[j]['status']='completed'
# for c in proposals:
# if (j,c) in competitors:
# conflict = network.edges[(j,c)]['conflict']
# for i in participants:
# network.edges[(i,c)]['affinity'] = network.edges[(i,c)]['affinity'] *(1-conflict)
for i in participants:
force = network.edges[(i,j)]['affinity']
# sentiment = network.nodes[i]['sentiment']
# network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
failed = _input['failed']
for j in failed:
network.nodes[j]['status']='failed'
for i in participants:
force = -network.edges[(i,j)]['affinity']
# sentiment = network.nodes[i]['sentiment']
# network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
key = 'network'
value = network
return (key, value)
# Phase 3
# Behaviors
def participants_decisions(params, step, sL, s):
'''
High sentiment, high affinity =>buy
Low sentiment, low affinities => burn
Assign tokens to top affinities
'''
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
#sensitivity = params['sensitivity']
gain = .01
delta_holdings={}
proposals_supported ={}
for i in participants:
#engagement_rate = .3*network.nodes[i]['sentiment']
engagement_rate = .3*initial_sentiment
if np.random.rand()<engagement_rate:
#force = network.nodes[i]['sentiment']-sensitivity
force = initial_sentiment - sensitivity
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
support = []
for j in candidates:
booster = social_affinity_booster(network, j, i)
affinity = network.edges[(i, j)]['affinity']+booster
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
if cutoff <.5:
cutoff = .5
if affinity > cutoff:
support.append(j)
proposals_supported[i] = support
else:
delta_holdings[i] = 0
proposals_supported[i] = [j for j in candidates if network.edges[(i,j)]['tokens']>0 ]
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
# Mechanisms
def update_tokens(params, step, sL, s, _input):
'''
Description:
Udate everyones holdings and their conviction for each proposal
'''
network = s['network']
delta_holdings = _input['delta_holdings']
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
proposals_supported = _input['proposals_supported']
participants = get_nodes_by_type(network, 'participant')
for i in participants:
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
supported = proposals_supported[i]
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
for j in candidates:
if j in supported:
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
else:
network.edges[(i, j)]['tokens'] = 0
prior_conviction = network.edges[(i, j)]['conviction']
current_tokens = network.edges[(i, j)]['tokens']
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
for j in candidates:
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
total_tokens = np.sum([network.edges[(i, j)]['tokens'] for i in participants ])
if total_tokens < min_supp:
network.nodes[j]['status'] = 'killed'
key = 'network'
value = network
return (key, value)
| [
"numpy.sum",
"numpy.log",
"numpy.random.rand",
"numpy.max"
] | [((4687, 4747), 'numpy.sum', 'np.sum', (["[network.edges[i, j]['affinity'] for j in supported]"], {}), "([network.edges[i, j]['affinity'] for j in supported])\n", (4693, 4747), True, 'import numpy as np\n'), ((5366, 5431), 'numpy.sum', 'np.sum', (["[network.edges[i, j]['conviction'] for i in participants]"], {}), "([network.edges[i, j]['conviction'] for i in participants])\n", (5372, 5431), True, 'import numpy as np\n'), ((5458, 5519), 'numpy.sum', 'np.sum', (["[network.edges[i, j]['tokens'] for i in participants]"], {}), "([network.edges[i, j]['tokens'] for i in participants])\n", (5464, 5519), True, 'import numpy as np\n'), ((3016, 3032), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3030, 3032), True, 'import numpy as np\n'), ((694, 710), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (708, 710), True, 'import numpy as np\n'), ((576, 594), 'numpy.log', 'np.log', (['grant_size'], {}), '(grant_size)\n', (582, 594), True, 'import numpy as np\n'), ((659, 677), 'numpy.log', 'np.log', (['grant_size'], {}), '(grant_size)\n', (665, 677), True, 'import numpy as np\n'), ((778, 794), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (792, 794), True, 'import numpy as np\n'), ((3488, 3549), 'numpy.max', 'np.max', (["[network.edges[i, p]['affinity'] for p in candidates]"], {}), "([network.edges[i, p]['affinity'] for p in candidates])\n", (3494, 3549), True, 'import numpy as np\n')] |
import subprocess
import numpy as np
import matplotlib.pyplot as plt
runs = 50
def outlier_filter(datas, threshold = 2):
datas = np.array(datas)
z = np.abs((datas - datas.mean()) / datas.std())
return datas[z < threshold]
def data_processing(data_set, n):
catgories = data_set[0].shape[0]
samples = data_set[0].shape[1]
final = np.zeros((catgories, samples))
for c in range(catgories):
for s in range(samples):
final[c][s] = \
outlier_filter([data_set[i][c][s] for i in range(n)]).mean()
return final
if __name__ == "__main__":
Ys = []
for i in range(runs):
comp_proc = subprocess.run('sudo taskset -c 11 ./client_measure > measure_time_list', shell = True)
output = np.loadtxt('measure_time_list', dtype = 'float').T
Ys.append(np.delete(output, 0, 0))
X = output[0]
Y = data_processing(Ys, runs)
fig, ax = plt.subplots(1, 1, sharey = True)
ax.set_title('Fibonacci driver time measure (iterative)', fontsize = 16)
ax.set_xlabel(r'$n_{th}$ fibonacci', fontsize = 16)
ax.set_ylabel('time (ns)', fontsize = 16)
ax.plot(X, Y[0], marker = '+', markersize = 7, label = 'kernel')
ax.plot(X, Y[1], marker = '*', markersize = 3, label = 'user')
ax.plot(X, Y[2], marker = '^', markersize = 3, label = 'kernel to user')
ax.legend(loc = 'upper left')
plt.savefig('statistic_plot_time.png') | [
"matplotlib.pyplot.savefig",
"numpy.delete",
"subprocess.run",
"numpy.array",
"numpy.zeros",
"numpy.loadtxt",
"matplotlib.pyplot.subplots"
] | [((135, 150), 'numpy.array', 'np.array', (['datas'], {}), '(datas)\n', (143, 150), True, 'import numpy as np\n'), ((355, 385), 'numpy.zeros', 'np.zeros', (['(catgories, samples)'], {}), '((catgories, samples))\n', (363, 385), True, 'import numpy as np\n'), ((985, 1016), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '(True)'}), '(1, 1, sharey=True)\n', (997, 1016), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1488), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""statistic_plot_time.png"""'], {}), "('statistic_plot_time.png')\n", (1461, 1488), True, 'import matplotlib.pyplot as plt\n'), ((719, 808), 'subprocess.run', 'subprocess.run', (['"""sudo taskset -c 11 ./client_measure > measure_time_list"""'], {'shell': '(True)'}), "('sudo taskset -c 11 ./client_measure > measure_time_list',\n shell=True)\n", (733, 808), False, 'import subprocess\n'), ((824, 870), 'numpy.loadtxt', 'np.loadtxt', (['"""measure_time_list"""'], {'dtype': '"""float"""'}), "('measure_time_list', dtype='float')\n", (834, 870), True, 'import numpy as np\n'), ((893, 916), 'numpy.delete', 'np.delete', (['output', '(0)', '(0)'], {}), '(output, 0, 0)\n', (902, 916), True, 'import numpy as np\n')] |
"""Collect training data from MIDI files."""
import argparse
from pathlib import Path
import numpy as np
from pypianoroll import Multitrack, Track
FAMILY_NAMES = [
"drum",
"bass",
"guitar",
"string",
"piano",
]
FAMILY_THRESHOLDS = [
(2, 24), # drum
(1, 96), # bass
(2, 156), # guitar
(2, 156), # string,
(2, 156), # piano
]
def parse_arguments():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(
description="Collect training data from MIDI files"
)
parser.add_argument(
"-i",
"--input_dir",
type=Path,
required=True,
help="directory containing MIDI files",
)
parser.add_argument(
"-o",
"--output_filename",
type=Path,
required=True,
help="output filename",
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="whether to search directory recursively",
)
return parser.parse_args()
def check_which_family(track):
def is_piano(program, is_drum):
return not is_drum and (
(program >= 0 and program <= 7)
or (program >= 16 and program <= 23)
)
def is_guitar(program):
return program >= 24 and program <= 31
def is_bass(program):
return program >= 32 and program <= 39
def is_string(program):
return program >= 40 and program <= 51
# drum, bass, guitar, string, piano
def is_instr_act(program, is_drum):
return np.array(
[
is_drum,
is_bass(program),
is_guitar(program),
is_string(program),
is_piano(program, is_drum),
]
)
instr_act = is_instr_act(track.program, track.is_drum)
return instr_act
def segment_quality(pianoroll, threshold_pitch, threshold_beats):
pitch_sum = np.sum(np.sum(pianoroll.pianoroll, axis=0) > 0)
beat_sum = np.sum(np.sum(pianoroll.pianoroll, axis=1) > 0)
return (
(pitch_sum >= threshold_pitch) and (beat_sum >= threshold_beats),
(pitch_sum, beat_sum),
)
def main():
"""Main function."""
num_consecutive_bar = 4
resolution = 12
down_sample = 2
count_total_segments = 0
ok_segment_list = []
hop_size = num_consecutive_bar / 4
args = parse_arguments()
if args.recursive:
filenames = args.input_dir.rglob("*.mid")
else:
filenames = args.input_dir.glob("*.mid")
for filename in filenames:
print("Processing {}".format(filename))
multitrack = Multitrack(filename)
downbeat = multitrack.downbeat
num_bar = len(downbeat) // resolution
hop_iter = 0
song_ok_segments = []
for bidx in range(num_bar - num_consecutive_bar):
if hop_iter > 0:
hop_iter -= 1
continue
st = bidx * resolution
ed = st + num_consecutive_bar * resolution
best_instr = [
Track(
pianoroll=np.zeros((num_consecutive_bar * resolution, 128))
)
] * 5
best_score = [-1] * 5
for track in multitrack.tracks:
tmp_map = check_which_family(track)
in_family = np.where(tmp_map)[0]
if not len(in_family):
continue
family = in_family[0]
tmp_pianoroll = track[st:ed:down_sample]
is_ok, score = segment_quality(
tmp_pianoroll,
FAMILY_THRESHOLDS[family][0],
FAMILY_THRESHOLDS[family][1],
)
if is_ok and sum(score) > best_score[family]:
track.name = FAMILY_NAMES[family]
best_instr[family] = track[st:ed:down_sample]
best_score[family] = sum(score)
hop_iter = np.random.randint(0, 1) + hop_size
song_ok_segments.append(
Multitrack(tracks=best_instr, beat_resolution=12)
)
count_ok_segment = len(song_ok_segments)
if count_ok_segment > 6:
seed = (6, count_ok_segment // 2)
if count_ok_segment > 11:
seed = (11, count_ok_segment // 3)
if count_ok_segment > 15:
seed = (15, count_ok_segment // 4)
rand_idx = np.random.permutation(count_ok_segment)[: max(seed)]
song_ok_segments = [song_ok_segments[ridx] for ridx in rand_idx]
ok_segment_list.extend(song_ok_segments)
count_ok_segment = len(rand_idx)
else:
ok_segment_list.extend(song_ok_segments)
count_total_segments += len(song_ok_segments)
print(
"current: {} | cumulative: {}".format(count_ok_segment, count_total_segments)
)
print("-" * 30)
print(count_total_segments)
num_item = len(ok_segment_list)
compiled_list = []
for lidx in range(num_item):
multi_track = ok_segment_list[lidx]
pianorolls = []
for tracks in multi_track.tracks:
pianorolls.append(tracks.pianoroll[:, :, np.newaxis])
pianoroll_compiled = np.reshape(
np.concatenate(pianorolls, axis=2)[:, 24:108, :],
(num_consecutive_bar, resolution, 84, 5),
)
pianoroll_compiled = pianoroll_compiled[np.newaxis, :] > 0
compiled_list.append(pianoroll_compiled.astype(bool))
result = np.concatenate(compiled_list, axis=0)
print("output shape: {}".format(result.shape))
if args.outfile.endswith(".npz"):
np.savez_compressed(
args.outfile,
nonzero=np.array(result.nonzero()),
shape=result.shape,
)
else:
np.save(args.outfile, result)
print("Successfully saved training data to : {}".format(args.outfile))
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"numpy.where",
"pypianoroll.Multitrack",
"numpy.sum",
"numpy.random.randint",
"numpy.zeros",
"numpy.concatenate",
"numpy.save",
"numpy.random.permutation"
] | [((450, 526), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collect training data from MIDI files"""'}), "(description='Collect training data from MIDI files')\n", (473, 526), False, 'import argparse\n'), ((5588, 5625), 'numpy.concatenate', 'np.concatenate', (['compiled_list'], {'axis': '(0)'}), '(compiled_list, axis=0)\n', (5602, 5625), True, 'import numpy as np\n'), ((2644, 2664), 'pypianoroll.Multitrack', 'Multitrack', (['filename'], {}), '(filename)\n', (2654, 2664), False, 'from pypianoroll import Multitrack, Track\n'), ((5878, 5907), 'numpy.save', 'np.save', (['args.outfile', 'result'], {}), '(args.outfile, result)\n', (5885, 5907), True, 'import numpy as np\n'), ((1953, 1988), 'numpy.sum', 'np.sum', (['pianoroll.pianoroll'], {'axis': '(0)'}), '(pianoroll.pianoroll, axis=0)\n', (1959, 1988), True, 'import numpy as np\n'), ((2016, 2051), 'numpy.sum', 'np.sum', (['pianoroll.pianoroll'], {'axis': '(1)'}), '(pianoroll.pianoroll, axis=1)\n', (2022, 2051), True, 'import numpy as np\n'), ((4007, 4030), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (4024, 4030), True, 'import numpy as np\n'), ((4095, 4144), 'pypianoroll.Multitrack', 'Multitrack', ([], {'tracks': 'best_instr', 'beat_resolution': '(12)'}), '(tracks=best_instr, beat_resolution=12)\n', (4105, 4144), False, 'from pypianoroll import Multitrack, Track\n'), ((4490, 4529), 'numpy.random.permutation', 'np.random.permutation', (['count_ok_segment'], {}), '(count_ok_segment)\n', (4511, 4529), True, 'import numpy as np\n'), ((5331, 5365), 'numpy.concatenate', 'np.concatenate', (['pianorolls'], {'axis': '(2)'}), '(pianorolls, axis=2)\n', (5345, 5365), True, 'import numpy as np\n'), ((3361, 3378), 'numpy.where', 'np.where', (['tmp_map'], {}), '(tmp_map)\n', (3369, 3378), True, 'import numpy as np\n'), ((3117, 3166), 'numpy.zeros', 'np.zeros', (['(num_consecutive_bar * resolution, 128)'], {}), '((num_consecutive_bar * resolution, 128))\n', (3125, 3166), True, 'import numpy as np\n')] |
import unittest
from generativepy.nparray import make_nparray, make_nparray_frame
from generativepy.movie import save_frame
from image_test_helper import run_image_test
import numpy as np
"""
Test each function of the nparray module, with 1, 3 and 4 channel output
"""
def draw4(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a transparent blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0, 255]
array[50:350, 100:500] = [0, 128, 196, 64]
def draw3(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle on a brown background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [128, 64, 0]
array[50:350, 100:500] = [0, 128, 196]
def draw1(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a dark grey rectangle on a light greay background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[:,:] = [196]
array[50:350, 100:500] = [64]
def draw3_nofill(array, pixel_width, pixel_height, frame_no, frame_count):
"""
Draw a blue rectangle with no background
:param array:
:param pixel_width:
:param pixel_height:
:param frame_no:
:param frame_count:
:return:
"""
array[50:350, 100:500] = [0, 128, 196]
class TestNparrayModule(unittest.TestCase):
def test_make_nparray_rgba(self):
def creator(file):
make_nparray(file, draw4, 600, 400, channels=4)
self.assertTrue(run_image_test('test_make_nparray_rgba.png', creator))
def test_make_nparray_rgb(self):
def creator(file):
make_nparray(file, draw3, 600, 400, channels=3)
self.assertTrue(run_image_test('test_make_nparray_rgb.png', creator))
def test_make_nparray_gray(self):
def creator(file):
make_nparray(file, draw1, 600, 400, channels=1)
self.assertTrue(run_image_test('test_make_nparray_gray.png', creator))
def test_make_bitmap_frame_rgba(self):
def creator(file):
frame = make_nparray_frame(draw4, 600, 400, channels=4)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgba.png', creator))
def test_make_nparray_frame_rgb(self):
def creator(file):
frame = make_nparray_frame(draw3, 600, 400, channels=3)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_rgb.png', creator))
def test_make_nparray_frame_gray(self):
def creator(file):
frame = make_nparray_frame(draw1, 600, 400, channels=1)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_gray.png', creator))
def test_make_nparray_frame_with_output_rgb(self):
def creator(file):
out = np.full((400, 600, 3), 128, dtype=np.uint)
out[25:100, 50:550] = [0, 0, 0]
frame = make_nparray_frame(draw3_nofill, 600, 400, out=out)
save_frame(file, frame)
self.assertTrue(run_image_test('test_make_nparray_frame_with_output_rgb.png', creator))
| [
"generativepy.movie.save_frame",
"image_test_helper.run_image_test",
"generativepy.nparray.make_nparray_frame",
"generativepy.nparray.make_nparray",
"numpy.full"
] | [((1712, 1759), 'generativepy.nparray.make_nparray', 'make_nparray', (['file', 'draw4', '(600)', '(400)'], {'channels': '(4)'}), '(file, draw4, 600, 400, channels=4)\n', (1724, 1759), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((1785, 1838), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_rgba.png"""', 'creator'], {}), "('test_make_nparray_rgba.png', creator)\n", (1799, 1838), False, 'from image_test_helper import run_image_test\n'), ((1917, 1964), 'generativepy.nparray.make_nparray', 'make_nparray', (['file', 'draw3', '(600)', '(400)'], {'channels': '(3)'}), '(file, draw3, 600, 400, channels=3)\n', (1929, 1964), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((1990, 2042), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_rgb.png"""', 'creator'], {}), "('test_make_nparray_rgb.png', creator)\n", (2004, 2042), False, 'from image_test_helper import run_image_test\n'), ((2122, 2169), 'generativepy.nparray.make_nparray', 'make_nparray', (['file', 'draw1', '(600)', '(400)'], {'channels': '(1)'}), '(file, draw1, 600, 400, channels=1)\n', (2134, 2169), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((2195, 2248), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_gray.png"""', 'creator'], {}), "('test_make_nparray_gray.png', creator)\n", (2209, 2248), False, 'from image_test_helper import run_image_test\n'), ((2341, 2388), 'generativepy.nparray.make_nparray_frame', 'make_nparray_frame', (['draw4', '(600)', '(400)'], {'channels': '(4)'}), '(draw4, 600, 400, channels=4)\n', (2359, 2388), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((2401, 2424), 'generativepy.movie.save_frame', 'save_frame', (['file', 'frame'], {}), '(file, frame)\n', (2411, 2424), False, 'from generativepy.movie import save_frame\n'), ((2450, 2509), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_frame_rgba.png"""', 'creator'], {}), "('test_make_nparray_frame_rgba.png', creator)\n", (2464, 2509), False, 'from image_test_helper import run_image_test\n'), ((2602, 2649), 'generativepy.nparray.make_nparray_frame', 'make_nparray_frame', (['draw3', '(600)', '(400)'], {'channels': '(3)'}), '(draw3, 600, 400, channels=3)\n', (2620, 2649), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((2662, 2685), 'generativepy.movie.save_frame', 'save_frame', (['file', 'frame'], {}), '(file, frame)\n', (2672, 2685), False, 'from generativepy.movie import save_frame\n'), ((2711, 2769), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_frame_rgb.png"""', 'creator'], {}), "('test_make_nparray_frame_rgb.png', creator)\n", (2725, 2769), False, 'from image_test_helper import run_image_test\n'), ((2863, 2910), 'generativepy.nparray.make_nparray_frame', 'make_nparray_frame', (['draw1', '(600)', '(400)'], {'channels': '(1)'}), '(draw1, 600, 400, channels=1)\n', (2881, 2910), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((2923, 2946), 'generativepy.movie.save_frame', 'save_frame', (['file', 'frame'], {}), '(file, frame)\n', (2933, 2946), False, 'from generativepy.movie import save_frame\n'), ((2972, 3031), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_frame_gray.png"""', 'creator'], {}), "('test_make_nparray_frame_gray.png', creator)\n", (2986, 3031), False, 'from image_test_helper import run_image_test\n'), ((3134, 3176), 'numpy.full', 'np.full', (['(400, 600, 3)', '(128)'], {'dtype': 'np.uint'}), '((400, 600, 3), 128, dtype=np.uint)\n', (3141, 3176), True, 'import numpy as np\n'), ((3241, 3292), 'generativepy.nparray.make_nparray_frame', 'make_nparray_frame', (['draw3_nofill', '(600)', '(400)'], {'out': 'out'}), '(draw3_nofill, 600, 400, out=out)\n', (3259, 3292), False, 'from generativepy.nparray import make_nparray, make_nparray_frame\n'), ((3305, 3328), 'generativepy.movie.save_frame', 'save_frame', (['file', 'frame'], {}), '(file, frame)\n', (3315, 3328), False, 'from generativepy.movie import save_frame\n'), ((3354, 3424), 'image_test_helper.run_image_test', 'run_image_test', (['"""test_make_nparray_frame_with_output_rgb.png"""', 'creator'], {}), "('test_make_nparray_frame_with_output_rgb.png', creator)\n", (3368, 3424), False, 'from image_test_helper import run_image_test\n')] |
import torch
from torch import nn
import copy
import numpy as np
import os
import sys
import wandb
from chemprop.models import MoleculeModelDUN
from chemprop.bayes import BayesLinear, neg_log_likeDUN
from chemprop.bayes_utils import scheduler_const
from chemprop.utils import save_checkpoint, load_checkpoint
from chemprop.data import MoleculeDataLoader
from chemprop.nn_utils import NoamLR
from ..train import train
from ..evaluate import evaluate
def train_dun(
model,
train_data,
val_data,
num_workers,
cache,
loss_func,
metric_func,
scaler,
features_scaler,
args,
save_dir
):
# data loaders for dun
train_data_loader = MoleculeDataLoader(
dataset=train_data,
batch_size=args.batch_size_dun,
num_workers=num_workers,
cache=cache,
class_balance=args.class_balance,
shuffle=True,
seed=args.seed
)
val_data_loader = MoleculeDataLoader(
dataset=val_data,
batch_size=args.batch_size_dun,
num_workers=num_workers,
cache=cache
)
# instantiate DUN model with Bayesian linear layers (includes log noise)
model_dun = MoleculeModelDUN(args)
# copy over parameters from pretrained to DUN model
# we take the transpose because the Bayes linear layers have transpose shapes
for (_, param_dun), (_, param_pre) in zip(model_dun.named_parameters(), model.named_parameters()):
param_dun.data = copy.deepcopy(param_pre.data.T)
# instantiate rho for each weight
for layer in model_dun.children():
if isinstance(layer, BayesLinear):
layer.init_rho(args.rho_min_dun, args.rho_max_dun)
for layer in model_dun.encoder.encoder.children():
if isinstance(layer, BayesLinear):
layer.init_rho(args.rho_min_dun, args.rho_max_dun)
# instantiate variational categorical distribution
model_dun.create_log_cat(args)
# move dun model to cuda
if args.cuda:
print('Moving dun model to cuda')
model_dun = model_dun.to(args.device)
# loss_func
loss_func = neg_log_likeDUN
# optimiser
optimizer = torch.optim.Adam(model_dun.parameters(), lr=args.lr_dun_min)
# scheduler
scheduler = NoamLR(
optimizer=optimizer,
warmup_epochs=[2],
total_epochs=[100],
steps_per_epoch=args.train_data_size // args.batch_size_dun,
init_lr=[args.lr_dun_min],
max_lr=[args.lr_dun_max],
final_lr=[args.lr_dun_min]
)
# non sampling mode for first 100 epochs
bbp_switch = 3
# freeze log_cat for first 100 epochs
for name, parameter in model_dun.named_parameters():
if name == 'log_cat':
parameter.requires_grad = False
else:
parameter.requires_grad = True
print("----------DUN training----------")
# training loop
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in range(args.epochs_dun):
print(f'DUN epoch {epoch}')
# start second phase
if epoch == 100:
scheduler = scheduler_const([args.lr_dun_min])
bbp_switch = 4
for name, parameter in model_dun.named_parameters():
parameter.requires_grad = True
n_iter = train(
model=model_dun,
data_loader=train_data_loader,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
bbp_switch=bbp_switch
)
val_scores = evaluate(
model=model_dun,
data_loader=val_data_loader,
args=args,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
scaler=scaler
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
print(f'Validation {args.metric} = {avg_val_score:.6f}')
wandb.log({"Validation MAE": avg_val_score})
print('variational categorical:')
print(torch.exp(model_dun.log_cat) / torch.sum(torch.exp(model_dun.log_cat)))
# Save model checkpoint if improved validation score
if (args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score) and (epoch >= args.presave_dun):
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model_dun.pt'), model_dun, scaler, features_scaler, args)
# load model with best validation score
template = MoleculeModelDUN(args)
for layer in template.children():
if isinstance(layer, BayesLinear):
layer.init_rho(args.rho_min_dun, args.rho_max_dun)
for layer in template.encoder.encoder.children():
if isinstance(layer, BayesLinear):
layer.init_rho(args.rho_min_dun, args.rho_max_dun)
template.create_log_cat(args)
print(f'Best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model_dun = load_checkpoint(os.path.join(save_dir, 'model_dun.pt'), device=args.device, logger=None, template = template)
return model_dun
| [
"wandb.log",
"chemprop.bayes_utils.scheduler_const",
"os.path.join",
"torch.exp",
"chemprop.models.MoleculeModelDUN",
"numpy.nanmean",
"chemprop.nn_utils.NoamLR",
"copy.deepcopy",
"chemprop.data.MoleculeDataLoader"
] | [((686, 866), 'chemprop.data.MoleculeDataLoader', 'MoleculeDataLoader', ([], {'dataset': 'train_data', 'batch_size': 'args.batch_size_dun', 'num_workers': 'num_workers', 'cache': 'cache', 'class_balance': 'args.class_balance', 'shuffle': '(True)', 'seed': 'args.seed'}), '(dataset=train_data, batch_size=args.batch_size_dun,\n num_workers=num_workers, cache=cache, class_balance=args.class_balance,\n shuffle=True, seed=args.seed)\n', (704, 866), False, 'from chemprop.data import MoleculeDataLoader\n'), ((943, 1053), 'chemprop.data.MoleculeDataLoader', 'MoleculeDataLoader', ([], {'dataset': 'val_data', 'batch_size': 'args.batch_size_dun', 'num_workers': 'num_workers', 'cache': 'cache'}), '(dataset=val_data, batch_size=args.batch_size_dun,\n num_workers=num_workers, cache=cache)\n', (961, 1053), False, 'from chemprop.data import MoleculeDataLoader\n'), ((1186, 1208), 'chemprop.models.MoleculeModelDUN', 'MoleculeModelDUN', (['args'], {}), '(args)\n', (1202, 1208), False, 'from chemprop.models import MoleculeModelDUN\n'), ((2276, 2493), 'chemprop.nn_utils.NoamLR', 'NoamLR', ([], {'optimizer': 'optimizer', 'warmup_epochs': '[2]', 'total_epochs': '[100]', 'steps_per_epoch': '(args.train_data_size // args.batch_size_dun)', 'init_lr': '[args.lr_dun_min]', 'max_lr': '[args.lr_dun_max]', 'final_lr': '[args.lr_dun_min]'}), '(optimizer=optimizer, warmup_epochs=[2], total_epochs=[100],\n steps_per_epoch=args.train_data_size // args.batch_size_dun, init_lr=[\n args.lr_dun_min], max_lr=[args.lr_dun_max], final_lr=[args.lr_dun_min])\n', (2282, 2493), False, 'from chemprop.nn_utils import NoamLR\n'), ((4807, 4829), 'chemprop.models.MoleculeModelDUN', 'MoleculeModelDUN', (['args'], {}), '(args)\n', (4823, 4829), False, 'from chemprop.models import MoleculeModelDUN\n'), ((1476, 1507), 'copy.deepcopy', 'copy.deepcopy', (['param_pre.data.T'], {}), '(param_pre.data.T)\n', (1489, 1507), False, 'import copy\n'), ((4071, 4093), 'numpy.nanmean', 'np.nanmean', (['val_scores'], {}), '(val_scores)\n', (4081, 4093), True, 'import numpy as np\n'), ((4167, 4211), 'wandb.log', 'wandb.log', (["{'Validation MAE': avg_val_score}"], {}), "({'Validation MAE': avg_val_score})\n", (4176, 4211), False, 'import wandb\n'), ((5285, 5323), 'os.path.join', 'os.path.join', (['save_dir', '"""model_dun.pt"""'], {}), "(save_dir, 'model_dun.pt')\n", (5297, 5323), False, 'import os\n'), ((3182, 3216), 'chemprop.bayes_utils.scheduler_const', 'scheduler_const', (['[args.lr_dun_min]'], {}), '([args.lr_dun_min])\n', (3197, 3216), False, 'from chemprop.bayes_utils import scheduler_const\n'), ((4268, 4296), 'torch.exp', 'torch.exp', (['model_dun.log_cat'], {}), '(model_dun.log_cat)\n', (4277, 4296), False, 'import torch\n'), ((4661, 4699), 'os.path.join', 'os.path.join', (['save_dir', '"""model_dun.pt"""'], {}), "(save_dir, 'model_dun.pt')\n", (4673, 4699), False, 'import os\n'), ((4309, 4337), 'torch.exp', 'torch.exp', (['model_dun.log_cat'], {}), '(model_dun.log_cat)\n', (4318, 4337), False, 'import torch\n')] |
import json
import os
import os.path
from abc import ABCMeta
from collections import OrderedDict
from typing import Any, Optional, Union
import numpy as np
import torch
from mmhuman3d.core.conventions.keypoints_mapping import (
convert_kps,
get_keypoint_num,
)
from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.models.builder import build_body_model
from .base_dataset import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class HumanImageDataset(BaseDataset, metaclass=ABCMeta):
"""Human Image Dataset.
Args:
data_prefix (str): the prefix of data path.
pipeline (list): a list of dict, where each element represents
a operation defined in `mmhuman3d.datasets.pipelines`.
dataset_name (str | None): the name of dataset. It is used to
identify the type of evaluation metric. Default: None.
body_model (dict | None, optional): the config for body model,
which will be used to generate meshes and keypoints.
Default: None.
ann_file (str | None, optional): the annotation file. When ann_file
is str, the subclass is expected to read from the ann_file.
When ann_file is None, the subclass is expected to read
according to data_prefix.
convention (str, optional): keypoints convention. Keypoints will be
converted from "human_data" to the given one.
Default: "human_data"
test_mode (bool, optional): in train mode or test mode.
Default: False.
"""
def __init__(self,
data_prefix: str,
pipeline: list,
dataset_name: str,
body_model: Optional[Union[dict, None]] = None,
ann_file: Optional[Union[str, None]] = None,
convention: Optional[str] = 'human_data',
test_mode: Optional[bool] = False):
self.convention = convention
self.num_keypoints = get_keypoint_num(convention)
super(HumanImageDataset,
self).__init__(data_prefix, pipeline, ann_file, test_mode,
dataset_name)
if body_model is not None:
self.body_model = build_body_model(body_model)
else:
self.body_model = None
def get_annotation_file(self):
"""Get path of the annotation file."""
ann_prefix = os.path.join(self.data_prefix, 'preprocessed_datasets')
self.ann_file = os.path.join(ann_prefix, self.ann_file)
def load_annotations(self):
"""Load annotation from the annotation file.
Here we simply use :obj:`HumanData` to parse the annotation.
"""
self.get_annotation_file()
# change keypoint from 'human_data' to the given convention
self.human_data = HumanData.fromfile(self.ann_file)
if self.human_data.check_keypoints_compressed():
self.human_data.decompress_keypoints()
if 'keypoints3d' in self.human_data:
keypoints3d = self.human_data['keypoints3d']
assert 'keypoints3d_mask' in self.human_data
keypoints3d_mask = self.human_data['keypoints3d_mask']
keypoints3d, keypoints3d_mask = \
convert_kps(
keypoints3d,
src='human_data',
dst=self.convention,
mask=keypoints3d_mask)
self.human_data.__setitem__('keypoints3d', keypoints3d)
self.human_data.__setitem__('keypoints3d_mask', keypoints3d_mask)
if 'keypoints2d' in self.human_data:
keypoints2d = self.human_data['keypoints2d']
assert 'keypoints2d_mask' in self.human_data
keypoints2d_mask = self.human_data['keypoints2d_mask']
keypoints2d, keypoints2d_mask = \
convert_kps(
keypoints2d,
src='human_data',
dst=self.convention,
mask=keypoints2d_mask)
self.human_data.__setitem__('keypoints2d', keypoints2d)
self.human_data.__setitem__('keypoints2d_mask', keypoints2d_mask)
self.num_data = self.human_data.temporal_len
def prepare_raw_data(self, idx: int):
"""Get item from self.human_data."""
info = {}
info['img_prefix'] = None
image_path = self.human_data['image_path'][idx]
info['image_path'] = os.path.join(self.data_prefix, 'datasets',
self.dataset_name, image_path)
if image_path.endswith('smc'):
device, device_id, frame_id = self.human_data['image_id'][idx]
info['image_id'] = (device, int(device_id), int(frame_id))
info['dataset_name'] = self.dataset_name
info['sample_idx'] = idx
if 'bbox_xywh' in self.human_data:
info['bbox_xywh'] = self.human_data['bbox_xywh'][idx]
x, y, w, h, s = info['bbox_xywh']
cx = x + w / 2
cy = y + h / 2
w = h = max(w, h)
info['center'] = np.array([cx, cy])
info['scale'] = np.array([w, h])
else:
info['bbox_xywh'] = np.zeros((5))
info['center'] = np.zeros((2))
info['scale'] = np.zeros((2))
# in later modules, we will check validity of each keypoint by
# its confidence. Therefore, we do not need the mask of keypoints.
if 'keypoints2d' in self.human_data:
info['keypoints2d'] = self.human_data['keypoints2d'][idx]
else:
info['keypoints2d'] = np.zeros((self.num_keypoints, 3))
if 'keypoints3d' in self.human_data:
info['keypoints3d'] = self.human_data['keypoints3d'][idx]
else:
info['keypoints3d'] = np.zeros((self.num_keypoints, 4))
if 'smpl' in self.human_data:
smpl_dict = self.human_data['smpl']
else:
smpl_dict = {}
if 'smpl' in self.human_data:
if 'has_smpl' in self.human_data:
info['has_smpl'] = int(self.human_data['has_smpl'][idx])
else:
info['has_smpl'] = 1
else:
info['has_smpl'] = 0
if 'body_pose' in smpl_dict:
info['smpl_body_pose'] = smpl_dict['body_pose'][idx]
else:
info['smpl_body_pose'] = np.zeros((23, 3))
if 'global_orient' in smpl_dict:
info['smpl_global_orient'] = smpl_dict['global_orient'][idx]
else:
info['smpl_global_orient'] = np.zeros((3))
if 'betas' in smpl_dict:
info['smpl_betas'] = smpl_dict['betas'][idx]
else:
info['smpl_betas'] = np.zeros((10))
if 'transl' in smpl_dict:
info['smpl_transl'] = smpl_dict['transl'][idx]
else:
info['smpl_transl'] = np.zeros((3))
return info
def prepare_data(self, idx: int):
"""Generate and transform data."""
info = self.prepare_raw_data(idx)
return self.pipeline(info)
def evaluate(self,
outputs: list,
res_folder: str,
metric: Optional[str] = 'joint_error'):
"""Evaluate 3D keypoint results.
Args:
outputs (list): results from model inference.
res_folder (str): path to store results.
metric (str): the type of metric. Default: 'joint_error'
Returns:
dict:
A dict of all evaluation results.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['joint_error']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
# for keeping correctness during multi-gpu test, we sort all results
kpts_dict = {}
for out in outputs:
for (keypoints, idx) in zip(out['keypoints_3d'], out['image_idx']):
kpts_dict[int(idx)] = keypoints.tolist()
kpts = []
for i in range(self.num_data):
kpts.append(kpts_dict[i])
self._write_keypoint_results(kpts, res_file)
info_str = self._report_metric(res_file)
name_value = OrderedDict(info_str)
return name_value
@staticmethod
def _write_keypoint_results(keypoints: Any, res_file: str):
"""Write results into a json file."""
with open(res_file, 'w') as f:
json.dump(keypoints, f, sort_keys=True, indent=4)
def _report_metric(self, res_file: str):
"""Keypoint evaluation.
Report mean per joint position error (MPJPE) and mean per joint
position error after rigid alignment (MPJPE-PA)
"""
with open(res_file, 'r') as fin:
pred_keypoints3d = json.load(fin)
assert len(pred_keypoints3d) == self.num_data
pred_keypoints3d = np.array(pred_keypoints3d)
if self.dataset_name == 'pw3d':
betas = []
body_pose = []
global_orient = []
gender = []
smpl_dict = self.human_data['smpl']
for idx in range(self.num_data):
betas.append(smpl_dict['betas'][idx])
body_pose.append(smpl_dict['body_pose'][idx])
global_orient.append(smpl_dict['global_orient'][idx])
if self.human_data['meta']['gender'][idx] == 'm':
gender.append(0)
else:
gender.append(1)
betas = torch.FloatTensor(betas)
body_pose = torch.FloatTensor(body_pose).view(-1, 69)
global_orient = torch.FloatTensor(global_orient)
gender = torch.Tensor(gender)
gt_output = self.body_model(
betas=betas,
body_pose=body_pose,
global_orient=global_orient,
gender=gender)
gt_keypoints3d = gt_output['joints'].detach().cpu().numpy()
gt_keypoints3d_mask = np.ones((len(pred_keypoints3d), 24))
elif self.dataset_name == 'humman':
betas = []
body_pose = []
global_orient = []
smpl_dict = self.human_data['smpl']
for idx in range(self.num_data):
betas.append(smpl_dict['betas'][idx])
body_pose.append(smpl_dict['body_pose'][idx])
global_orient.append(smpl_dict['global_orient'][idx])
betas = torch.FloatTensor(betas)
body_pose = torch.FloatTensor(body_pose).view(-1, 69)
global_orient = torch.FloatTensor(global_orient)
gt_output = self.body_model(
betas=betas, body_pose=body_pose, global_orient=global_orient)
gt_keypoints3d = gt_output['joints'].detach().cpu().numpy()
gt_keypoints3d_mask = np.ones((len(pred_keypoints3d), 24))
elif self.dataset_name == 'h36m':
gt_keypoints3d = self.human_data['keypoints3d'][:, :, :3]
gt_keypoints3d_mask = np.ones((len(pred_keypoints3d), 17))
else:
raise NotImplementedError()
# SMPL_49 only!
if gt_keypoints3d.shape[1] == 49:
assert pred_keypoints3d.shape[1] == 49
gt_keypoints3d = gt_keypoints3d[:, 25:, :]
pred_keypoints3d = pred_keypoints3d[:, 25:, :]
joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]
gt_keypoints3d = gt_keypoints3d[:, joint_mapper, :]
pred_keypoints3d = pred_keypoints3d[:, joint_mapper, :]
# we only evaluate on 14 lsp joints
pred_pelvis = (pred_keypoints3d[:, 2] + pred_keypoints3d[:, 3]) / 2
gt_pelvis = (gt_keypoints3d[:, 2] + gt_keypoints3d[:, 3]) / 2
# H36M for testing!
elif gt_keypoints3d.shape[1] == 17:
assert pred_keypoints3d.shape[1] == 17
H36M_TO_J17 = [
6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9
]
H36M_TO_J14 = H36M_TO_J17[:14]
joint_mapper = H36M_TO_J14
pred_pelvis = pred_keypoints3d[:, 0]
gt_pelvis = gt_keypoints3d[:, 0]
gt_keypoints3d = gt_keypoints3d[:, joint_mapper, :]
pred_keypoints3d = pred_keypoints3d[:, joint_mapper, :]
# keypoint 24
elif gt_keypoints3d.shape[1] == 24:
assert pred_keypoints3d.shape[1] == 24
joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]
gt_keypoints3d = gt_keypoints3d[:, joint_mapper, :]
pred_keypoints3d = pred_keypoints3d[:, joint_mapper, :]
# we only evaluate on 14 lsp joints
pred_pelvis = (pred_keypoints3d[:, 2] + pred_keypoints3d[:, 3]) / 2
gt_pelvis = (gt_keypoints3d[:, 2] + gt_keypoints3d[:, 3]) / 2
# humman keypoints (not SMPL keypoints)
elif gt_keypoints3d.shape[1] == 133:
assert pred_keypoints3d.shape[1] == 17
H36M_TO_J17 = [
6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9
]
H36M_TO_J14 = H36M_TO_J17[:14]
pred_joint_mapper = H36M_TO_J14
pred_keypoints3d = pred_keypoints3d[:, pred_joint_mapper, :]
# the last two are not mapped
gt_joint_mapper = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0]
gt_keypoints3d = gt_keypoints3d[:, gt_joint_mapper, :]
pred_pelvis = (pred_keypoints3d[:, 2] + pred_keypoints3d[:, 3]) / 2
gt_pelvis = (gt_keypoints3d[:, 2] + gt_keypoints3d[:, 3]) / 2
# TODO: temp solution
joint_mapper = None
gt_keypoints3d_mask = np.ones((len(pred_keypoints3d), 14))
gt_keypoints3d_mask[:, 12:14] = 0 # the last two are invalid
gt_keypoints3d_mask = gt_keypoints3d_mask > 0
else:
raise NotImplementedError
pred_keypoints3d = pred_keypoints3d - pred_pelvis[:, None, :]
gt_keypoints3d = gt_keypoints3d - gt_pelvis[:, None, :]
if joint_mapper is not None:
gt_keypoints3d_mask = gt_keypoints3d_mask[:, joint_mapper] > 0
mpjpe = keypoint_mpjpe(pred_keypoints3d, gt_keypoints3d,
gt_keypoints3d_mask)
mpjpe_pa = keypoint_mpjpe(
pred_keypoints3d,
gt_keypoints3d,
gt_keypoints3d_mask,
alignment='procrustes')
info_str = []
info_str.append(('MPJPE', mpjpe * 1000))
info_str.append(('MPJPE-PA', mpjpe_pa * 1000))
return info_str
| [
"collections.OrderedDict",
"mmhuman3d.models.builder.build_body_model",
"mmhuman3d.data.data_structures.human_data.HumanData.fromfile",
"mmhuman3d.core.conventions.keypoints_mapping.get_keypoint_num",
"os.path.join",
"torch.Tensor",
"json.load",
"numpy.array",
"numpy.zeros",
"mmhuman3d.core.evalua... | [((2087, 2115), 'mmhuman3d.core.conventions.keypoints_mapping.get_keypoint_num', 'get_keypoint_num', (['convention'], {}), '(convention)\n', (2103, 2115), False, 'from mmhuman3d.core.conventions.keypoints_mapping import convert_kps, get_keypoint_num\n'), ((2512, 2567), 'os.path.join', 'os.path.join', (['self.data_prefix', '"""preprocessed_datasets"""'], {}), "(self.data_prefix, 'preprocessed_datasets')\n", (2524, 2567), False, 'import os\n'), ((2592, 2631), 'os.path.join', 'os.path.join', (['ann_prefix', 'self.ann_file'], {}), '(ann_prefix, self.ann_file)\n', (2604, 2631), False, 'import os\n'), ((2929, 2962), 'mmhuman3d.data.data_structures.human_data.HumanData.fromfile', 'HumanData.fromfile', (['self.ann_file'], {}), '(self.ann_file)\n', (2947, 2962), False, 'from mmhuman3d.data.data_structures.human_data import HumanData\n'), ((4553, 4626), 'os.path.join', 'os.path.join', (['self.data_prefix', '"""datasets"""', 'self.dataset_name', 'image_path'], {}), "(self.data_prefix, 'datasets', self.dataset_name, image_path)\n", (4565, 4626), False, 'import os\n'), ((7943, 7992), 'os.path.join', 'os.path.join', (['res_folder', '"""result_keypoints.json"""'], {}), "(res_folder, 'result_keypoints.json')\n", (7955, 7992), False, 'import os\n'), ((8476, 8497), 'collections.OrderedDict', 'OrderedDict', (['info_str'], {}), '(info_str)\n', (8487, 8497), False, 'from collections import OrderedDict\n'), ((9144, 9170), 'numpy.array', 'np.array', (['pred_keypoints3d'], {}), '(pred_keypoints3d)\n', (9152, 9170), True, 'import numpy as np\n'), ((14448, 14517), 'mmhuman3d.core.evaluation.mpjpe.keypoint_mpjpe', 'keypoint_mpjpe', (['pred_keypoints3d', 'gt_keypoints3d', 'gt_keypoints3d_mask'], {}), '(pred_keypoints3d, gt_keypoints3d, gt_keypoints3d_mask)\n', (14462, 14517), False, 'from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe\n'), ((14568, 14665), 'mmhuman3d.core.evaluation.mpjpe.keypoint_mpjpe', 'keypoint_mpjpe', (['pred_keypoints3d', 'gt_keypoints3d', 'gt_keypoints3d_mask'], {'alignment': '"""procrustes"""'}), "(pred_keypoints3d, gt_keypoints3d, gt_keypoints3d_mask,\n alignment='procrustes')\n", (14582, 14665), False, 'from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe\n'), ((2330, 2358), 'mmhuman3d.models.builder.build_body_model', 'build_body_model', (['body_model'], {}), '(body_model)\n', (2346, 2358), False, 'from mmhuman3d.models.builder import build_body_model\n'), ((3359, 3450), 'mmhuman3d.core.conventions.keypoints_mapping.convert_kps', 'convert_kps', (['keypoints3d'], {'src': '"""human_data"""', 'dst': 'self.convention', 'mask': 'keypoints3d_mask'}), "(keypoints3d, src='human_data', dst=self.convention, mask=\n keypoints3d_mask)\n", (3370, 3450), False, 'from mmhuman3d.core.conventions.keypoints_mapping import convert_kps, get_keypoint_num\n'), ((3961, 4052), 'mmhuman3d.core.conventions.keypoints_mapping.convert_kps', 'convert_kps', (['keypoints2d'], {'src': '"""human_data"""', 'dst': 'self.convention', 'mask': 'keypoints2d_mask'}), "(keypoints2d, src='human_data', dst=self.convention, mask=\n keypoints2d_mask)\n", (3972, 4052), False, 'from mmhuman3d.core.conventions.keypoints_mapping import convert_kps, get_keypoint_num\n'), ((5205, 5223), 'numpy.array', 'np.array', (['[cx, cy]'], {}), '([cx, cy])\n', (5213, 5223), True, 'import numpy as np\n'), ((5252, 5268), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (5260, 5268), True, 'import numpy as np\n'), ((5315, 5326), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (5323, 5326), True, 'import numpy as np\n'), ((5358, 5369), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5366, 5369), True, 'import numpy as np\n'), ((5400, 5411), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5408, 5411), True, 'import numpy as np\n'), ((5725, 5758), 'numpy.zeros', 'np.zeros', (['(self.num_keypoints, 3)'], {}), '((self.num_keypoints, 3))\n', (5733, 5758), True, 'import numpy as np\n'), ((5922, 5955), 'numpy.zeros', 'np.zeros', (['(self.num_keypoints, 4)'], {}), '((self.num_keypoints, 4))\n', (5930, 5955), True, 'import numpy as np\n'), ((6497, 6514), 'numpy.zeros', 'np.zeros', (['(23, 3)'], {}), '((23, 3))\n', (6505, 6514), True, 'import numpy as np\n'), ((6685, 6696), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6693, 6696), True, 'import numpy as np\n'), ((6837, 6849), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (6845, 6849), True, 'import numpy as np\n'), ((6994, 7005), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7002, 7005), True, 'import numpy as np\n'), ((8705, 8754), 'json.dump', 'json.dump', (['keypoints', 'f'], {'sort_keys': '(True)', 'indent': '(4)'}), '(keypoints, f, sort_keys=True, indent=4)\n', (8714, 8754), False, 'import json\n'), ((9047, 9061), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (9056, 9061), False, 'import json\n'), ((9777, 9801), 'torch.FloatTensor', 'torch.FloatTensor', (['betas'], {}), '(betas)\n', (9794, 9801), False, 'import torch\n'), ((9896, 9928), 'torch.FloatTensor', 'torch.FloatTensor', (['global_orient'], {}), '(global_orient)\n', (9913, 9928), False, 'import torch\n'), ((9950, 9970), 'torch.Tensor', 'torch.Tensor', (['gender'], {}), '(gender)\n', (9962, 9970), False, 'import torch\n'), ((10721, 10745), 'torch.FloatTensor', 'torch.FloatTensor', (['betas'], {}), '(betas)\n', (10738, 10745), False, 'import torch\n'), ((10840, 10872), 'torch.FloatTensor', 'torch.FloatTensor', (['global_orient'], {}), '(global_orient)\n', (10857, 10872), False, 'import torch\n'), ((9826, 9854), 'torch.FloatTensor', 'torch.FloatTensor', (['body_pose'], {}), '(body_pose)\n', (9843, 9854), False, 'import torch\n'), ((10770, 10798), 'torch.FloatTensor', 'torch.FloatTensor', (['body_pose'], {}), '(body_pose)\n', (10787, 10798), False, 'import torch\n')] |
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
class graph_ntu():
def __init__(self,
max_hop=1,
dilation=1):
self.max_hop = max_hop
self.dilation = dilation
self.lvls = 4 # 25 -> 11 -> 5 -> 1
self.As = []
self.hop_dis = []
self.get_edge()
#for lvl in range(self.lvls):
# self.hop_dis.append(get_hop_distance(self.num_node, self.edge, lvl, max_hop=max_hop))
# self.get_adjacency(lvl)
#self.mapping = upsample_mapping(self.map, self.nodes, self.edge, self.lvls)[::-1]
def __str__(self):
return self.As
def get_edge(self):
self.num_node = []
self.nodes = []
self.center = [20]
self.nodes = []
self.Gs = []
#which nodes are connected
neighbor_base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21),
(6, 5), (7, 6), (8, 7), (9, 21), (10, 9),
(11, 10), (12, 11), (1, 13), (14, 13), (15, 14),
(16, 15), (1, 17), (18, 17), (19, 18), (20, 19),
(22, 8), (23, 8), (24, 12), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_base]
nodes = np.array([i for i in range(25)])
G = nx.Graph()
G.add_nodes_from(nodes) #add list of nodes (numbers)
G.add_edges_from(neighbor_link) #add linked nodes
G = nx.convert_node_labels_to_integers(G, first_label=0)
self_link = [(int(i), int(i)) for i in G] #converti linked nodes to integers
self.map = [np.array([[i, x] for i,x in enumerate(G)])]
self.edge = [np.concatenate((np.array(G.edges), self_link), axis=0)]
self.nodes.append(nodes)
self.num_node.append(len(G))
self.Gs.append(G.copy())
for _ in range(self.lvls-1):
stay = []
start = 1
while True:
remove = []
for i in G:
if len(G.edges(i)) == start and i not in stay:
lost = []
for j,k in G.edges(i):
stay.append(k)
lost.append(k)
recon = [(l,m) for l in lost for m in lost if l!=m]
G.add_edges_from(recon)
remove.append(i)
if start>10: break # Remove as maximum as possible
G.remove_nodes_from(remove)
cycle = nx.cycle_basis(G) # Check if there is a cycle in order to downsample it
if len(cycle)>0:
if len(cycle[0])==len(G):
last = [x for x in G if x not in stay]
G.remove_nodes_from(last)
start+=1
map_i = np.array([[i, x] for i,x in enumerate(G)]) # Keep track graph indices
self.map.append(map_i)
mapping = {} # Change mapping labels
for i, x in enumerate(G):
mapping[int(x)] = i
if int(x)==self.center[-1]:
self.center.append(i)
G = nx.relabel_nodes(G, mapping) # Change labels
G = nx.convert_node_labels_to_integers(G, first_label=0)
nodes = np.array([i for i in range(len(G))])
self.nodes.append(nodes)
self_link = [(int(i), int(i)) for i in G]
G_l = np.concatenate((np.array(G.edges), self_link), axis=0) if len(np.array(G.edges)) > 0 else self_link
self.edge.append(G_l)
self.num_node.append(len(G))
self.Gs.append(G.copy())
'''for i, G in enumerate(self.Gs): # Uncomment this to visualize graphs
plt.clf() # Uncomment this to visualize graphs
nx.draw(G, with_labels = True)
plt.savefig('G_' + str(i) + '.pdf')'''
assert len(self.num_node) == self.lvls
assert len(self.nodes) == self.lvls
assert len(self.edge) == self.lvls
assert len(self.center) == self.lvls
assert len(self.map) == self.lvls | [
"networkx.relabel_nodes",
"networkx.cycle_basis",
"networkx.Graph",
"numpy.array",
"networkx.convert_node_labels_to_integers"
] | [((1345, 1355), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1353, 1355), True, 'import networkx as nx\n'), ((1487, 1539), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['G'], {'first_label': '(0)'}), '(G, first_label=0)\n', (1521, 1539), True, 'import networkx as nx\n'), ((3245, 3273), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'mapping'], {}), '(G, mapping)\n', (3261, 3273), True, 'import networkx as nx\n'), ((3307, 3359), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['G'], {'first_label': '(0)'}), '(G, first_label=0)\n', (3341, 3359), True, 'import networkx as nx\n'), ((2585, 2602), 'networkx.cycle_basis', 'nx.cycle_basis', (['G'], {}), '(G)\n', (2599, 2602), True, 'import networkx as nx\n'), ((1729, 1746), 'numpy.array', 'np.array', (['G.edges'], {}), '(G.edges)\n', (1737, 1746), True, 'import numpy as np\n'), ((3602, 3619), 'numpy.array', 'np.array', (['G.edges'], {}), '(G.edges)\n', (3610, 3619), True, 'import numpy as np\n'), ((3556, 3573), 'numpy.array', 'np.array', (['G.edges'], {}), '(G.edges)\n', (3564, 3573), True, 'import numpy as np\n')] |
import os
import warnings
import sklearn.decomposition
import numpy as np
from .openl3_exceptions import OpenL3Error
with warnings.catch_warnings():
# Suppress TF and Keras warnings when importing
warnings.simplefilter("ignore")
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Input, Conv2D, Permute, BatchNormalization, MaxPooling2D,
Flatten, Activation, Lambda)
import tensorflow.keras.regularizers as regularizers
VALID_FRONTENDS = ("librosa", "kapre")
VALID_INPUT_REPRS = ("linear", "mel128", "mel256")
VALID_CONTENT_TYPES = ("music", "env")
VALID_AUDIO_EMBEDDING_SIZES = (6144, 512)
VALID_IMAGE_EMBEDDING_SIZES = (8192, 512)
def _log10(x):
'''log10 tensorflow function.'''
return tf.math.log(x) / tf.math.log(tf.constant(10, dtype=x.dtype))
def kapre_v0_1_4_magnitude_to_decibel(x, ref_value=1.0, amin=1e-10, dynamic_range=80.0):
'''log10 tensorflow function.'''
amin = tf.cast(amin or 1e-10, dtype=x.dtype)
max_axis = tuple(range(K.ndim(x))[1:]) or None
log_spec = 10. * _log10(K.maximum(x, amin))
return K.maximum(
log_spec - K.max(log_spec, axis=max_axis, keepdims=True),
-dynamic_range)
def __fix_kapre_spec(func):
'''Wraps the kapre composite layer interface to revert .'''
def get_spectrogram(*a, return_decibel=False, **kw):
seq = func(*a, return_decibel=False, **kw)
if return_decibel:
seq.add(Lambda(kapre_v0_1_4_magnitude_to_decibel))
seq.add(Permute((2, 1, 3))) # the output is (None, t, f, ch) instead of (None, f, t, ch), so gotta fix that
return seq
return get_spectrogram
def _validate_audio_frontend(frontend='kapre', input_repr=None, model=None):
'''Make sure that the audio frontend matches the model and input_repr.'''
ndims = len(model.input_shape) if model is not None else None
# if frontend == 'infer': # detect which frontend to use
# if model is None: # default
# frontend = 'kapre'
# elif ndims == 3: # shape: [batch, channel, samples]
# frontend = 'kapre'
# elif ndims == 4: # shape: [batch, frequency, time, channel]
# frontend = 'librosa'
# else:
# raise OpenL3Error(
# 'Invalid model input shape: {}. Expected a model '
# 'with either a 3 or 4 dimensional input, got {}.'.format(model.input_shape, ndims))
if frontend not in VALID_FRONTENDS:
raise OpenL3Error('Invalid frontend "{}". Must be one of {}'.format(frontend, VALID_FRONTENDS))
# validate that our model shape matches our frontend.
if ndims is not None:
if frontend == 'kapre' and ndims != 3:
raise OpenL3Error('Invalid model input shape: {}. Expected 3 dims got {}.'.format(model.input_shape, ndims))
if frontend == 'librosa' and ndims != 4:
raise OpenL3Error('Invalid model input shape: {}. Expected 4 dims got {}.'.format(model.input_shape, ndims))
if input_repr is None:
if frontend == 'librosa':
raise OpenL3Error('You must specify input_repr for a librosa frontend.')
else:
input_repr = 'mel256'
if str(input_repr) not in VALID_INPUT_REPRS:
raise OpenL3Error('Invalid input representation "{}". Must be one of {}'.format(input_repr, VALID_INPUT_REPRS))
return frontend, input_repr
AUDIO_POOLING_SIZES = {
'linear': {
6144: (8, 8),
512: (32, 24),
},
'mel128': {
6144: (4, 8),
512: (16, 24),
},
'mel256': {
6144: (8, 8),
512: (32, 24),
}
}
IMAGE_POOLING_SIZES = {
8192: (7, 7),
512: (28, 28),
}
def load_audio_embedding_model(input_repr, content_type, embedding_size, frontend='kapre'):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 6144 or 512
Embedding dimensionality.
frontend : "kapre" or "librosa"
The audio frontend to use. If frontend == 'kapre', then the kapre frontend will
be included. Otherwise no frontend will be added inside the keras model.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_audio_embedding_model_path(input_repr, content_type)
return load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend=frontend)
def load_audio_embedding_model_from_path(model_path, input_repr, embedding_size, frontend='kapre'):
"""
Loads a model with weights at the given path.
Parameters
----------
model_path : str
Path to model weights HDF5 (.h5) file. Must be in format
`*._<input_repr>_<content_type>.h5` or
`*._<input_repr>_<content_type>-.*.h5`, since model configuration
will be determined from the filename.
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
embedding_size : 6144 or 512
Embedding dimensionality.
frontend : "kapre" or "librosa"
The audio frontend to use. If frontend == 'kapre', then the kapre frontend will
be included. Otherwise no frontend will be added inside the keras model.
Returns
-------
model : tf.keras.Model
Model object.
"""
frontend, input_repr = _validate_audio_frontend(frontend, input_repr)
# Construct embedding model and load model weights
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = AUDIO_MODELS[input_repr](include_frontend=frontend == 'kapre')
m.load_weights(model_path)
# Pooling for final output embedding size
pool_size = AUDIO_POOLING_SIZES[input_repr][embedding_size]
y_a = MaxPooling2D(pool_size=pool_size, padding='same')(m.output)
y_a = Flatten()(y_a)
m = Model(inputs=m.input, outputs=y_a)
m.frontend = frontend
return m
def get_audio_embedding_model_path(input_repr, content_type):
"""
Returns the local path to the model weights file for the model
with the given characteristics
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for model.
content_type : "music" or "env"
Type of content used to train embedding.
Returns
-------
output_path : str
Path to given model object
"""
return os.path.join(os.path.dirname(__file__),
'openl3_audio_{}_{}.h5'.format(input_repr, content_type))
def load_image_embedding_model(input_repr, content_type, embedding_size):
"""
Returns a model with the given characteristics. Loads the model
if the model has not been loaded yet.
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
model_path = get_image_embedding_model_path(input_repr, content_type)
return load_image_embedding_model_from_path(model_path, embedding_size)
def load_image_embedding_model_from_path(model_path, embedding_size):
"""
Loads a model with weights at the given path.
Parameters
----------
model_path : str
Path to model weights HDF5 (.h5) file.
embedding_size : 6144 or 512
Embedding dimensionality.
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for audio model.
content_type : "music" or "env"
Type of content used to train embedding.
embedding_size : 8192 or 512
Embedding dimensionality.
Returns
-------
model : tf.keras.Model
Model object.
"""
# Construct embedding model and load model weights
with warnings.catch_warnings():
warnings.simplefilter("ignore")
m = _construct_image_network()
m.load_weights(model_path)
# Pooling for final output embedding size
pool_size = IMAGE_POOLING_SIZES[embedding_size]
y_i = MaxPooling2D(pool_size=pool_size, padding='same')(m.output)
y_i = Flatten()(y_i)
m = Model(inputs=m.input, outputs=y_i)
return m
def get_image_embedding_model_path(input_repr, content_type):
"""
Returns the local path to the model weights file for the model
with the given characteristics
Parameters
----------
input_repr : "linear", "mel128", or "mel256"
Spectrogram representation used for model.
content_type : "music" or "env"
Type of content used to train embedding.
Returns
-------
output_path : str
Path to given model object
"""
return os.path.join(os.path.dirname(__file__),
'openl3_image_{}_{}.h5'.format(input_repr, content_type))
def _construct_linear_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a linear
spectrogram input (With 257 frequency bins)
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 512
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# SPECTROGRAM PREPROCESSING
# 257 x 197 x 1
from kapre.composed import get_stft_magnitude_layer
spec = __fix_kapre_spec(get_stft_magnitude_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, return_decibel=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else: # NOTE: asr - n_dft because we're not padding (I think?)
input_shape = (n_dft // 2 + 1, int(np.ceil((asr - n_dft) * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_mel128_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a Mel
spectrogram input (with 128 frequency bins).
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 2048
n_mels = 128
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# MELSPECTROGRAM PREPROCESSING
# 128 x 199 x 1
from kapre.composed import get_melspectrogram_layer
spec = __fix_kapre_spec(get_melspectrogram_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, n_mels=n_mels,
sample_rate=asr, return_decibel=True, pad_end=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else:
input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
pool_size_a_4 = (16, 24)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_mel256_audio_network(include_frontend=True):
"""
Returns an uninitialized model object for an audio network with a Mel
spectrogram input (with 256 frequency bins).
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
n_dft = 2048
n_mels = 256
n_hop = 242
asr = 48000
audio_window_dur = 1
if include_frontend:
# INPUT
input_shape = (1, asr * audio_window_dur)
x_a = Input(shape=input_shape, dtype='float32')
# MELSPECTROGRAM PREPROCESSING
# 256 x 199 x 1
from kapre.composed import get_melspectrogram_layer
spec = __fix_kapre_spec(get_melspectrogram_layer)(
input_shape=input_shape,
n_fft=n_dft, hop_length=n_hop, n_mels=n_mels,
sample_rate=asr, return_decibel=True, pad_end=True,
input_data_format='channels_first',
output_data_format='channels_last')
y_a = spec(x_a)
else:
input_shape = (n_mels, int(np.ceil(asr * audio_window_dur / n_hop)), 1)
x_a = y_a = Input(shape=input_shape, dtype='float32')
y_a = BatchNormalization()(y_a)
# CONV BLOCK 1
n_filter_a_1 = 64
filt_size_a_1 = (3, 3)
pool_size_a_1 = (2, 2)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_1, filt_size_a_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_1, strides=2)(y_a)
# CONV BLOCK 2
n_filter_a_2 = 128
filt_size_a_2 = (3, 3)
pool_size_a_2 = (2, 2)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_2, filt_size_a_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_2, strides=2)(y_a)
# CONV BLOCK 3
n_filter_a_3 = 256
filt_size_a_3 = (3, 3)
pool_size_a_3 = (2, 2)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_3, filt_size_a_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = MaxPooling2D(pool_size=pool_size_a_3, strides=2)(y_a)
# CONV BLOCK 4
n_filter_a_4 = 512
filt_size_a_4 = (3, 3)
y_a = Conv2D(n_filter_a_4, filt_size_a_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
y_a = BatchNormalization()(y_a)
y_a = Activation('relu')(y_a)
y_a = Conv2D(n_filter_a_4, filt_size_a_4,
kernel_initializer='he_normal',
name='audio_embedding_layer', padding='same',
kernel_regularizer=regularizers.l2(weight_decay))(y_a)
m = Model(inputs=x_a, outputs=y_a)
return m
def _construct_image_network():
"""
Returns an uninitialized model object for a image network.
Returns
-------
model : tf.keras.Model
Model object.
"""
weight_decay = 1e-5
im_height = 224
im_width = 224
num_channels = 3
x_i = Input(shape=(im_height, im_width, num_channels), dtype='float32')
y_i = BatchNormalization()(x_i)
# CONV BLOCK 1
n_filter_i_1 = 64
filt_size_i_1 = (3, 3)
pool_size_i_1 = (2, 2)
y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_1, filt_size_i_1, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = Activation('relu')(y_i)
y_i = BatchNormalization()(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_1, strides=2, padding='same')(y_i)
# CONV BLOCK 2
n_filter_i_2 = 128
filt_size_i_2 = (3, 3)
pool_size_i_2 = (2, 2)
y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_2, filt_size_i_2, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_2, strides=2, padding='same')(y_i)
# CONV BLOCK 3
n_filter_i_3 = 256
filt_size_i_3 = (3, 3)
pool_size_i_3 = (2, 2)
y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_3, filt_size_i_3, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = MaxPooling2D(pool_size=pool_size_i_3, strides=2, padding='same')(y_i)
# CONV BLOCK 4
n_filter_i_4 = 512
filt_size_i_4 = (3, 3)
pool_size_i_4 = (28, 28)
y_i = Conv2D(n_filter_i_4, filt_size_i_4, padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
y_i = BatchNormalization()(y_i)
y_i = Activation('relu')(y_i)
y_i = Conv2D(n_filter_i_4, filt_size_i_4,
name='vision_embedding_layer', padding='same',
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(weight_decay))(y_i)
m = Model(inputs=x_i, outputs=y_i)
return m
AUDIO_MODELS = {
'linear': _construct_linear_audio_network,
'mel128': _construct_mel128_audio_network,
'mel256': _construct_mel256_audio_network
}
| [
"tensorflow.math.log",
"tensorflow.keras.backend.ndim",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.cast",
"tensorflow.keras.layers.Input",
"tensorflow.keras.backend.maximum",
"tensorflow.keras.layers.Permute",
"tensorflow.keras.backend.max",
"warnings.simplefilter",
"numpy.ceil",
... | [((123, 148), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (146, 148), False, 'import warnings\n'), ((206, 237), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (227, 237), False, 'import warnings\n'), ((1028, 1065), 'tensorflow.cast', 'tf.cast', (['(amin or 1e-10)'], {'dtype': 'x.dtype'}), '(amin or 1e-10, dtype=x.dtype)\n', (1035, 1065), True, 'import tensorflow as tf\n'), ((6179, 6213), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'm.input', 'outputs': 'y_a'}), '(inputs=m.input, outputs=y_a)\n', (6184, 6213), False, 'from tensorflow.keras import Model\n'), ((8623, 8657), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'm.input', 'outputs': 'y_i'}), '(inputs=m.input, outputs=y_i)\n', (8628, 8657), False, 'from tensorflow.keras import Model\n'), ((13037, 13067), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_a', 'outputs': 'y_a'}), '(inputs=x_a, outputs=y_a)\n', (13042, 13067), False, 'from tensorflow.keras import Model\n'), ((16871, 16901), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_a', 'outputs': 'y_a'}), '(inputs=x_a, outputs=y_a)\n', (16876, 16901), False, 'from tensorflow.keras import Model\n'), ((20670, 20700), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_a', 'outputs': 'y_a'}), '(inputs=x_a, outputs=y_a)\n', (20675, 20700), False, 'from tensorflow.keras import Model\n'), ((20997, 21062), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(im_height, im_width, num_channels)', 'dtype': '"""float32"""'}), "(shape=(im_height, im_width, num_channels), dtype='float32')\n", (21002, 21062), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23739, 23769), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'x_i', 'outputs': 'y_i'}), '(inputs=x_i, outputs=y_i)\n', (23744, 23769), False, 'from tensorflow.keras import Model\n'), ((828, 842), 'tensorflow.math.log', 'tf.math.log', (['x'], {}), '(x)\n', (839, 842), True, 'import tensorflow as tf\n'), ((5791, 5816), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5814, 5816), False, 'import warnings\n'), ((5826, 5857), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5847, 5857), False, 'import warnings\n'), ((6086, 6135), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size', 'padding': '"""same"""'}), "(pool_size=pool_size, padding='same')\n", (6098, 6135), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((6156, 6165), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6163, 6165), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((6757, 6782), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6772, 6782), False, 'import os\n'), ((8283, 8308), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8306, 8308), False, 'import warnings\n'), ((8318, 8349), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8339, 8349), False, 'import warnings\n'), ((8530, 8579), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size', 'padding': '"""same"""'}), "(pool_size=pool_size, padding='same')\n", (8542, 8579), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((8600, 8609), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8607, 8609), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((9175, 9200), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9190, 9200), False, 'import os\n'), ((9765, 9806), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (9770, 9806), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((10396, 10437), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (10401, 10437), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((10449, 10469), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10467, 10469), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((10764, 10784), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10782, 10784), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((10800, 10818), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10810, 10818), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11017, 11037), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11035, 11037), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11053, 11071), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11063, 11071), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11087, 11135), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_1', 'strides': '(2)'}), '(pool_size=pool_size_a_1, strides=2)\n', (11099, 11135), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11431, 11451), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11449, 11451), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11467, 11485), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11477, 11485), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11684, 11704), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11702, 11704), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11720, 11738), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11730, 11738), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((11754, 11802), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_2', 'strides': '(2)'}), '(pool_size=pool_size_a_2, strides=2)\n', (11766, 11802), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12098, 12118), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12116, 12118), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12134, 12152), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (12144, 12152), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12351, 12371), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12369, 12371), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12387, 12405), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (12397, 12405), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12421, 12469), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_3', 'strides': '(2)'}), '(pool_size=pool_size_a_3, strides=2)\n', (12433, 12469), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12738, 12758), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12756, 12758), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((12774, 12792), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (12784, 12792), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((13584, 13625), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (13589, 13625), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14201, 14242), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (14206, 14242), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14254, 14274), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (14272, 14274), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14569, 14589), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (14587, 14589), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14605, 14623), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14615, 14623), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14822, 14842), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (14840, 14842), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14858, 14876), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14868, 14876), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((14892, 14940), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_1', 'strides': '(2)'}), '(pool_size=pool_size_a_1, strides=2)\n', (14904, 14940), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15236, 15256), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (15254, 15256), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15272, 15290), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (15282, 15290), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15489, 15509), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (15507, 15509), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15525, 15543), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (15535, 15543), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15559, 15607), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_2', 'strides': '(2)'}), '(pool_size=pool_size_a_2, strides=2)\n', (15571, 15607), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15903, 15923), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (15921, 15923), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((15939, 15957), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (15949, 15957), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((16156, 16176), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (16174, 16176), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((16192, 16210), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16202, 16210), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((16226, 16274), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_3', 'strides': '(2)'}), '(pool_size=pool_size_a_3, strides=2)\n', (16238, 16274), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((16572, 16592), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (16590, 16592), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((16608, 16626), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16618, 16626), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((17412, 17453), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (17417, 17453), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18028, 18069), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'dtype': '"""float32"""'}), "(shape=input_shape, dtype='float32')\n", (18033, 18069), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18082, 18102), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18100, 18102), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18397, 18417), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18415, 18417), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18433, 18451), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18443, 18451), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18650, 18670), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (18668, 18670), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18686, 18704), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18696, 18704), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((18720, 18768), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_1', 'strides': '(2)'}), '(pool_size=pool_size_a_1, strides=2)\n', (18732, 18768), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19064, 19084), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (19082, 19084), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19100, 19118), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (19110, 19118), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19317, 19337), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (19335, 19337), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19353, 19371), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (19363, 19371), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19387, 19435), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_2', 'strides': '(2)'}), '(pool_size=pool_size_a_2, strides=2)\n', (19399, 19435), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19731, 19751), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (19749, 19751), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19767, 19785), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (19777, 19785), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((19984, 20004), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (20002, 20004), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((20020, 20038), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (20030, 20038), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((20054, 20102), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_a_3', 'strides': '(2)'}), '(pool_size=pool_size_a_3, strides=2)\n', (20066, 20102), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((20371, 20391), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (20389, 20391), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((20407, 20425), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (20417, 20425), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21073, 21093), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (21091, 21093), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21388, 21408), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (21406, 21408), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21424, 21442), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21434, 21442), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21641, 21659), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21651, 21659), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21675, 21695), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (21693, 21695), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((21711, 21775), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_i_1', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=pool_size_i_1, strides=2, padding='same')\n", (21723, 21775), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22071, 22091), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (22089, 22091), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22107, 22125), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22117, 22125), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22324, 22344), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (22342, 22344), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22360, 22378), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22370, 22378), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22394, 22458), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_i_2', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=pool_size_i_2, strides=2, padding='same')\n", (22406, 22458), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22754, 22774), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (22772, 22774), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((22790, 22808), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22800, 22808), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23007, 23027), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (23025, 23027), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23043, 23061), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (23053, 23061), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23077, 23141), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': 'pool_size_i_3', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=pool_size_i_3, strides=2, padding='same')\n", (23089, 23141), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23439, 23459), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (23457, 23459), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((23475, 23493), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (23485, 23493), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((857, 887), 'tensorflow.constant', 'tf.constant', (['(10)'], {'dtype': 'x.dtype'}), '(10, dtype=x.dtype)\n', (868, 887), True, 'import tensorflow as tf\n'), ((1145, 1163), 'tensorflow.keras.backend.maximum', 'K.maximum', (['x', 'amin'], {}), '(x, amin)\n', (1154, 1163), True, 'import tensorflow.keras.backend as K\n'), ((1206, 1251), 'tensorflow.keras.backend.max', 'K.max', (['log_spec'], {'axis': 'max_axis', 'keepdims': '(True)'}), '(log_spec, axis=max_axis, keepdims=True)\n', (1211, 1251), True, 'import tensorflow.keras.backend as K\n'), ((1586, 1604), 'tensorflow.keras.layers.Permute', 'Permute', (['(2, 1, 3)'], {}), '((2, 1, 3))\n', (1593, 1604), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((1527, 1568), 'tensorflow.keras.layers.Lambda', 'Lambda', (['kapre_v0_1_4_magnitude_to_decibel'], {}), '(kapre_v0_1_4_magnitude_to_decibel)\n', (1533, 1568), False, 'from tensorflow.keras.layers import Input, Conv2D, Permute, BatchNormalization, MaxPooling2D, Flatten, Activation, Lambda\n'), ((10321, 10370), 'numpy.ceil', 'np.ceil', (['((asr - n_dft) * audio_window_dur / n_hop)'], {}), '((asr - n_dft) * audio_window_dur / n_hop)\n', (10328, 10370), True, 'import numpy as np\n'), ((10718, 10747), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (10733, 10747), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((10971, 11000), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (10986, 11000), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((11385, 11414), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (11400, 11414), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((11638, 11667), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (11653, 11667), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((12052, 12081), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (12067, 12081), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((12305, 12334), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (12320, 12334), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((12692, 12721), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (12707, 12721), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((12992, 13021), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (13007, 13021), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((14136, 14175), 'numpy.ceil', 'np.ceil', (['(asr * audio_window_dur / n_hop)'], {}), '(asr * audio_window_dur / n_hop)\n', (14143, 14175), True, 'import numpy as np\n'), ((14523, 14552), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (14538, 14552), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((14776, 14805), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (14791, 14805), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((15190, 15219), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (15205, 15219), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((15443, 15472), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (15458, 15472), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((15857, 15886), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (15872, 15886), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((16110, 16139), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (16125, 16139), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((16526, 16555), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (16541, 16555), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((16826, 16855), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (16841, 16855), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((17963, 18002), 'numpy.ceil', 'np.ceil', (['(asr * audio_window_dur / n_hop)'], {}), '(asr * audio_window_dur / n_hop)\n', (17970, 18002), True, 'import numpy as np\n'), ((18351, 18380), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (18366, 18380), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((18604, 18633), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (18619, 18633), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((19018, 19047), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (19033, 19047), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((19271, 19300), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (19286, 19300), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((19685, 19714), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (19700, 19714), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((19938, 19967), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (19953, 19967), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((20325, 20354), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (20340, 20354), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((20625, 20654), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (20640, 20654), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((21342, 21371), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (21357, 21371), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((21595, 21624), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (21610, 21624), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((22025, 22054), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (22040, 22054), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((22278, 22307), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (22293, 22307), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((22708, 22737), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (22723, 22737), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((22961, 22990), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (22976, 22990), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((23393, 23422), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (23408, 23422), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((23694, 23723), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (23709, 23723), True, 'import tensorflow.keras.regularizers as regularizers\n'), ((1093, 1102), 'tensorflow.keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (1099, 1102), True, 'import tensorflow.keras.backend as K\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import scipy.interpolate
def signal_interpolate(x_values, y_values, x_new=None, method="quadratic"):
"""**Interpolate a signal**
Interpolate a signal using different methods.
Parameters
----------
x_values : Union[list, np.array, pd.Series]
The samples corresponding to the values to be interpolated.
y_values : Union[list, np.array, pd.Series]
The values to be interpolated.
x_new : Union[list, np.array, pd.Series] or int
The samples at which to interpolate the y_values. Samples before the first value in x_values
or after the last value in x_values will be extrapolated. If an integer is passed, nex_x
will be considered as the desired length of the interpolated signal between the first and
the last values of x_values. No extrapolation will be done for values before or after the
first and the last values of x_values.
method : str
Method of interpolation. Can be ``"linear"``, ``"nearest"``, ``"zero"``, ``"slinear"``,
``"quadratic"``, ``"cubic"``, ``"previous"``, ``"next"`` or ``"monotone_cubic"``. The
methods ``"zero"``, ``"slinear"``,``"quadratic"`` and ``"cubic"`` refer to a spline
interpolation of zeroth, first, second or third order; whereas ``"previous"`` and
``"next"`` simply return the previous or next value of the point. An integer specifying the
order of the spline interpolator to use.
See `here <https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.
PchipInterpolator.html>`_ for details on the ``"monotone_cubic"`` method.
Returns
-------
array
Vector of interpolated samples.
Examples
--------
.. ipython:: python
import numpy as np
import neurokit2 as nk
import matplotlib.pyplot as plt
# Generate Simulated Signal
signal = nk.signal_simulate(duration=2, sampling_rate=10)
# We want to interpolate to 2000 samples
x_values = np.linspace(0, 2000, num=len(signal), endpoint=False)
x_new = np.linspace(0, 2000, num=2000, endpoint=False)
# Visualize all interpolation methods
@savefig p_signal_interpolate1.png scale=100%
nk.signal_plot([
nk.signal_interpolate(x_values, signal, x_new=x_new, method="zero"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="linear"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="quadratic"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="cubic"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="previous"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="next"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="monotone_cubic")
], labels = ["Zero", "Linear", "Quadratic", "Cubic", "Previous", "Next", "Monotone Cubic"])
# Add original data points
plt.scatter(x_values, signal, label="original datapoints", zorder=3)
@suppress
plt.close()
"""
# Sanity checks
if len(x_values) != len(y_values):
raise ValueError(
"NeuroKit error: signal_interpolate(): x_values and y_values must be of the same length."
)
if isinstance(x_new, int):
if len(x_values) == x_new:
return y_values
else:
if len(x_values) == len(x_new):
return y_values
if method == "monotone_cubic":
interpolation_function = scipy.interpolate.PchipInterpolator(
x_values, y_values, extrapolate=True
)
else:
interpolation_function = scipy.interpolate.interp1d(
x_values,
y_values,
kind=method,
bounds_error=False,
fill_value=([y_values[0]], [y_values[-1]]),
)
if isinstance(x_new, int):
x_new = np.linspace(x_values[0], x_values[-1], x_new)
interpolated = interpolation_function(x_new)
if method == "monotone_cubic":
# Swap out the cubic extrapolation of out-of-bounds segments generated by
# scipy.interpolate.PchipInterpolator for constant extrapolation akin to the behavior of
# scipy.interpolate.interp1d with fill_value=([y_values[0]], [y_values[-1]].
interpolated[: int(x_values[0])] = interpolated[int(x_values[0])]
interpolated[int(x_values[-1]) :] = interpolated[int(x_values[-1])]
return interpolated
| [
"numpy.linspace"
] | [((3929, 3974), 'numpy.linspace', 'np.linspace', (['x_values[0]', 'x_values[-1]', 'x_new'], {}), '(x_values[0], x_values[-1], x_new)\n', (3940, 3974), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2
import copy
class ImageProcessing:
def __init__(self, shape):
self.images = []
self.labels = []
self.filenames = []
self.images_norm = []
self.labels_norm = []
self.shape = tuple([shape[0], shape[1]])
def loadImages(self, dir_name):
for dirname, _, filenames in os.walk(dir_name):
for filename in filenames:
self.filenames.append(filename)
self.labels.append((os.path.basename(dirname)))
image = cv2.imread(os.path.join(dirname, filename))
image = cv2.resize(image, self.shape, interpolation=cv2.INTER_LANCZOS4)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.images.append(image)
print("### ", len(self.images), "images loaded")
def normaliseImages(self):
images = np.array(self.images, dtype=np.float32)
labels = np.array(self.labels)
images = images/255
self.images_norm = copy.deepcopy(images)
self.labels_norm = copy.deepcopy(labels)
print("### Data shape: ", self.images_norm.shape)
def returnData(self):
return self.images_norm, self.labels_norm
def returnFilenames(self):
return self.filenames
if __name__ == "__main__":
ImgProc = ImageProcessing((128, 128))
ImgProc.loadImages("./test_images")
ImgProc.normaliseImages()
| [
"os.path.join",
"numpy.array",
"os.path.basename",
"cv2.cvtColor",
"copy.deepcopy",
"cv2.resize",
"os.walk"
] | [((383, 400), 'os.walk', 'os.walk', (['dir_name'], {}), '(dir_name)\n', (390, 400), False, 'import os\n'), ((931, 970), 'numpy.array', 'np.array', (['self.images'], {'dtype': 'np.float32'}), '(self.images, dtype=np.float32)\n', (939, 970), True, 'import numpy as np\n'), ((989, 1010), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (997, 1010), True, 'import numpy as np\n'), ((1068, 1089), 'copy.deepcopy', 'copy.deepcopy', (['images'], {}), '(images)\n', (1081, 1089), False, 'import copy\n'), ((1118, 1139), 'copy.deepcopy', 'copy.deepcopy', (['labels'], {}), '(labels)\n', (1131, 1139), False, 'import copy\n'), ((650, 713), 'cv2.resize', 'cv2.resize', (['image', 'self.shape'], {'interpolation': 'cv2.INTER_LANCZOS4'}), '(image, self.shape, interpolation=cv2.INTER_LANCZOS4)\n', (660, 713), False, 'import cv2\n'), ((739, 777), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (751, 777), False, 'import cv2\n'), ((528, 553), 'os.path.basename', 'os.path.basename', (['dirname'], {}), '(dirname)\n', (544, 553), False, 'import os\n'), ((592, 623), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (604, 623), False, 'import os\n')] |
#!/usr/bin/python
import os
import json
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader, Subset
from torch.utils.data.sampler import SubsetRandomSampler
import dysts
from dysts.utils import find_significant_frequencies
from dysts.flows import *
from dysts.base import *
from resources.classification_models import Autoencoder, TimeSeriesCollection
import sktime.datasets
from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor
from sklearn.linear_model import RidgeClassifierCV
from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested
all_scores = dict()
np.random.seed(0)
attractor_list = get_attractor_list()
SEQUENCE_LENGTH = 100
BATCH_SUBSAMPLE = 10000
# BATCH_SUBSAMPLE = 5000
# attractor_list = np.random.choice(attractor_list, 40)
# attractor_list = np.random.choice(attractor_list, 80)
# cwd = os.getcwd()
cwd = os.path.dirname(os.path.realpath(__file__))
output_path = cwd + "/results/transfer_learning.json"
print("Saving data to: ", output_path)
dataset_names = np.genfromtxt(cwd + "/resources/ucr_ea_names.txt", dtype='str')
try:
with open(output_path, "r") as file:
all_scores = json.load(file)
except FileNotFoundError:
all_scores = dict()
for data_ind, name in enumerate(dataset_names):
if name in all_scores.keys():
if "score_transfer" in all_scores[name].keys():
print("Skipped " + name, flush=True)
continue
print("Evaluating " + name, flush=True)
all_scores[name] = dict()
X_train, y_train = sktime.datasets.load_UCR_UEA_dataset(name, split="train", return_X_y=True)
X_test, y_test = sktime.datasets.load_UCR_UEA_dataset(name, split="test", return_X_y=True)
X_train_np = from_nested_to_3d_numpy(X_train)
X_test_np = from_nested_to_3d_numpy(X_test)
X_train_np -= np.mean(X_train_np, axis=-1, keepdims=True)
X_train_np /= np.std(X_train_np, axis=-1, keepdims=True)
X_test_np -= np.mean(X_test_np, axis=-1, keepdims=True)
X_test_np /= np.std(X_test_np, axis=-1, keepdims=True)
## Find dominant frequency
all_freqs = list()
for row in X_train_np:
freqs, amps = find_significant_frequencies(row[0], return_amplitudes=True)
sort_inds = np.argsort(amps)[::-1]
freqs, amps = freqs[sort_inds], amps[sort_inds]
try:
all_freqs.append(freqs[0])
except IndexError: pass
main_freq = np.median(all_freqs)
main_period = 2 * 1/main_freq
print("Finished finding dominant frequency.", flush=True)
## Create trajectory ensemble at that random frequency
all_sols = list()
for equation_ind, equation_name in enumerate(attractor_list):
equation = getattr(dysts.flows, equation_name)()
sol = equation.make_trajectory(1000, resample=True, pts_per_period=int(main_period))
if len(sol) < 10: # skip undersampled trajectories
continue
all_sols.append(standardize_ts(sol)[:, 0])
all_sols = np.array(all_sols).T
print("Finished computing surrogate ensemble.", flush=True)
## Train model on ensemble
model = Autoencoder()
training_data = TimeSeriesCollection(all_sols, SEQUENCE_LENGTH)
subset_indices = np.random.choice(np.arange(0, len(training_data)), BATCH_SUBSAMPLE, replace=False) # subsample all traj
train_dataloader = DataLoader(Subset(training_data, subset_indices), batch_size=64, shuffle=True)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(200): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_dataloader, 0):
inputs, outputs = data
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(inputs, outputs)
loss.backward()
optimizer.step()
running_loss += loss.item()
print("Finished training autoencoder.", flush=True)
X_train_nn = from_3d_numpy_to_nested(model.encoder(torch.tensor(X_train_np, dtype=torch.float32)).detach().numpy())
X_test_nn = from_3d_numpy_to_nested(model.encoder(torch.tensor(X_test_np, dtype=torch.float32)).detach().numpy())
transformer = TSFreshFeatureExtractor(show_warnings=False)
X_train_featurized = transformer.fit_transform(X_train_nn)
X_test_featurized = transformer.fit_transform(X_test_nn)
model = RidgeClassifierCV(alphas = np.logspace(-3, 3, 10), normalize = True)
model.fit(X_train_featurized, y_train)
score = model.score(X_test_featurized, y_test)
all_scores[name]["score_transfer"] = model.score(X_test_featurized, y_test)
print(name, score, flush=True)
with open(output_path, 'w') as file:
json.dump(all_scores, file, indent=4)
| [
"numpy.argsort",
"torch.nn.MSELoss",
"dysts.utils.find_significant_frequencies",
"numpy.array",
"sktime.utils.data_processing.from_nested_to_3d_numpy",
"numpy.genfromtxt",
"numpy.mean",
"resources.classification_models.Autoencoder",
"numpy.random.seed",
"numpy.logspace",
"resources.classificatio... | [((682, 699), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (696, 699), True, 'import numpy as np\n'), ((1103, 1166), 'numpy.genfromtxt', 'np.genfromtxt', (["(cwd + '/resources/ucr_ea_names.txt')"], {'dtype': '"""str"""'}), "(cwd + '/resources/ucr_ea_names.txt', dtype='str')\n", (1116, 1166), True, 'import numpy as np\n'), ((965, 991), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (981, 991), False, 'import os\n'), ((1809, 1841), 'sktime.utils.data_processing.from_nested_to_3d_numpy', 'from_nested_to_3d_numpy', (['X_train'], {}), '(X_train)\n', (1832, 1841), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((1858, 1889), 'sktime.utils.data_processing.from_nested_to_3d_numpy', 'from_nested_to_3d_numpy', (['X_test'], {}), '(X_test)\n', (1881, 1889), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((1913, 1956), 'numpy.mean', 'np.mean', (['X_train_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_train_np, axis=-1, keepdims=True)\n', (1920, 1956), True, 'import numpy as np\n'), ((1975, 2017), 'numpy.std', 'np.std', (['X_train_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_train_np, axis=-1, keepdims=True)\n', (1981, 2017), True, 'import numpy as np\n'), ((2035, 2077), 'numpy.mean', 'np.mean', (['X_test_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_test_np, axis=-1, keepdims=True)\n', (2042, 2077), True, 'import numpy as np\n'), ((2095, 2136), 'numpy.std', 'np.std', (['X_test_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_test_np, axis=-1, keepdims=True)\n', (2101, 2136), True, 'import numpy as np\n'), ((2505, 2525), 'numpy.median', 'np.median', (['all_freqs'], {}), '(all_freqs)\n', (2514, 2525), True, 'import numpy as np\n'), ((3203, 3216), 'resources.classification_models.Autoencoder', 'Autoencoder', ([], {}), '()\n', (3214, 3216), False, 'from resources.classification_models import Autoencoder, TimeSeriesCollection\n'), ((3237, 3284), 'resources.classification_models.TimeSeriesCollection', 'TimeSeriesCollection', (['all_sols', 'SEQUENCE_LENGTH'], {}), '(all_sols, SEQUENCE_LENGTH)\n', (3257, 3284), False, 'from resources.classification_models import Autoencoder, TimeSeriesCollection\n'), ((3528, 3540), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3538, 3540), True, 'import torch.nn as nn\n'), ((4314, 4358), 'sktime.transformations.panel.tsfresh.TSFreshFeatureExtractor', 'TSFreshFeatureExtractor', ([], {'show_warnings': '(False)'}), '(show_warnings=False)\n', (4337, 4358), False, 'from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor\n'), ((1235, 1250), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1244, 1250), False, 'import json\n'), ((2245, 2305), 'dysts.utils.find_significant_frequencies', 'find_significant_frequencies', (['row[0]'], {'return_amplitudes': '(True)'}), '(row[0], return_amplitudes=True)\n', (2273, 2305), False, 'from dysts.utils import find_significant_frequencies\n'), ((3070, 3088), 'numpy.array', 'np.array', (['all_sols'], {}), '(all_sols)\n', (3078, 3088), True, 'import numpy as np\n'), ((3444, 3481), 'torch.utils.data.Subset', 'Subset', (['training_data', 'subset_indices'], {}), '(training_data, subset_indices)\n', (3450, 3481), False, 'from torch.utils.data import DataLoader, Subset\n'), ((4839, 4876), 'json.dump', 'json.dump', (['all_scores', 'file'], {'indent': '(4)'}), '(all_scores, file, indent=4)\n', (4848, 4876), False, 'import json\n'), ((2326, 2342), 'numpy.argsort', 'np.argsort', (['amps'], {}), '(amps)\n', (2336, 2342), True, 'import numpy as np\n'), ((4523, 4545), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(10)'], {}), '(-3, 3, 10)\n', (4534, 4545), True, 'import numpy as np\n'), ((4112, 4157), 'torch.tensor', 'torch.tensor', (['X_train_np'], {'dtype': 'torch.float32'}), '(X_train_np, dtype=torch.float32)\n', (4124, 4157), False, 'import torch\n'), ((4231, 4275), 'torch.tensor', 'torch.tensor', (['X_test_np'], {'dtype': 'torch.float32'}), '(X_test_np, dtype=torch.float32)\n', (4243, 4275), False, 'import torch\n')] |
import pytest
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklego.common import flatten
from sklego.meta import DecayEstimator
from tests.conftest import general_checks, classifier_checks, regressor_checks, nonmeta_checks
@pytest.mark.parametrize("test_fn", flatten([
general_checks,
nonmeta_checks,
regressor_checks
]))
def test_estimator_checks_regression(test_fn):
trf = DecayEstimator(LinearRegression())
test_fn(DecayEstimator.__name__, trf)
@pytest.mark.parametrize("test_fn", flatten([
general_checks,
nonmeta_checks,
classifier_checks
]))
def test_estimator_checks_classification(test_fn):
trf = DecayEstimator(LogisticRegression(solver='lbfgs'))
test_fn(DecayEstimator.__name__, trf)
@pytest.mark.parametrize("mod", flatten([LinearRegression(), Ridge(), DecisionTreeRegressor()]))
def test_decay_weight_regr(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100, ))
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize("mod", flatten([DecisionTreeClassifier(), LogisticRegression(solver='lbfgs')]))
def test_decay_weight_clf(mod):
X, y = np.random.normal(0, 1, (100, 100)), (np.random.normal(0, 1, (100, )) < 0).astype(np.int)
mod = DecayEstimator(mod, decay=0.95).fit(X, y)
assert mod.weights_[0] == pytest.approx(0.95**100, abs=0.001)
@pytest.mark.parametrize("mod", flatten([
KNeighborsClassifier(),
]))
def test_throw_warning(mod):
X, y = np.random.normal(0, 1, (100, 100)), np.random.normal(0, 1, (100, )) < 0
with pytest.raises(TypeError) as e:
DecayEstimator(mod, decay=0.95).fit(X, y)
assert "sample_weight" in str(e)
assert type(mod).__name__ in str(e)
| [
"numpy.random.normal",
"pytest.approx",
"sklearn.tree.DecisionTreeRegressor",
"sklego.common.flatten",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.Ridge",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"sklego.meta.DecayEstimator",
"pytest.rai... | [((440, 499), 'sklego.common.flatten', 'flatten', (['[general_checks, nonmeta_checks, regressor_checks]'], {}), '([general_checks, nonmeta_checks, regressor_checks])\n', (447, 499), False, 'from sklego.common import flatten\n'), ((687, 747), 'sklego.common.flatten', 'flatten', (['[general_checks, nonmeta_checks, classifier_checks]'], {}), '([general_checks, nonmeta_checks, classifier_checks])\n', (694, 747), False, 'from sklego.common import flatten\n'), ((587, 605), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (603, 605), False, 'from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression\n'), ((839, 873), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (857, 873), False, 'from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression\n'), ((1060, 1094), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100, 100)'], {}), '(0, 1, (100, 100))\n', (1076, 1094), True, 'import numpy as np\n'), ((1096, 1126), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100,)'], {}), '(0, 1, (100,))\n', (1112, 1126), True, 'import numpy as np\n'), ((1210, 1247), 'pytest.approx', 'pytest.approx', (['(0.95 ** 100)'], {'abs': '(0.001)'}), '(0.95 ** 100, abs=0.001)\n', (1223, 1247), False, 'import pytest\n'), ((1396, 1430), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100, 100)'], {}), '(0, 1, (100, 100))\n', (1412, 1430), True, 'import numpy as np\n'), ((1567, 1604), 'pytest.approx', 'pytest.approx', (['(0.95 ** 100)'], {'abs': '(0.001)'}), '(0.95 ** 100, abs=0.001)\n', (1580, 1604), False, 'import pytest\n'), ((1719, 1753), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100, 100)'], {}), '(0, 1, (100, 100))\n', (1735, 1753), True, 'import numpy as np\n'), ((1800, 1824), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1813, 1824), False, 'import pytest\n'), ((1138, 1169), 'sklego.meta.DecayEstimator', 'DecayEstimator', (['mod'], {'decay': '(0.95)'}), '(mod, decay=0.95)\n', (1152, 1169), False, 'from sklego.meta import DecayEstimator\n'), ((960, 978), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (976, 978), False, 'from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression\n'), ((980, 987), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (985, 987), False, 'from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression\n'), ((989, 1012), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1010, 1012), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((1495, 1526), 'sklego.meta.DecayEstimator', 'DecayEstimator', (['mod'], {'decay': '(0.95)'}), '(mod, decay=0.95)\n', (1509, 1526), False, 'from sklego.meta import DecayEstimator\n'), ((1289, 1313), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1311, 1313), False, 'from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n'), ((1315, 1349), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (1333, 1349), False, 'from sklearn.linear_model import LinearRegression, Ridge, LogisticRegression\n'), ((1755, 1785), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100,)'], {}), '(0, 1, (100,))\n', (1771, 1785), True, 'import numpy as np\n'), ((1651, 1673), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1671, 1673), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1839, 1870), 'sklego.meta.DecayEstimator', 'DecayEstimator', (['mod'], {'decay': '(0.95)'}), '(mod, decay=0.95)\n', (1853, 1870), False, 'from sklego.meta import DecayEstimator\n'), ((1433, 1463), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(100,)'], {}), '(0, 1, (100,))\n', (1449, 1463), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Created on Sun Dec 7 15:09:45 2014
Author: <NAME>
Email: <EMAIL>
"""
import numpy as np
from pycuda.compiler import SourceModule
from pycuda.driver import Context
from pycuda import gpuarray
from of.utils import ipshell
_kernel="""
__global__ void resampler(
double* pts,
double* img,
double* img_wrapped,
int nPts,
int nx,
int ny,
int nz,
int nChannels)
{
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts)
return;
double x = pts[idx*3+0];
double y = pts[idx*3+1];
double z = pts[idx*3+1];
int x0 = floor(x);
int y0 = floor(y);
int z0 = floor(z);
int x1 = x0 + 1;
int y1 = y0 + 1;
int z1 = z0 + 1;
if (x0<0)
return;
if (x1>=nx)
return;
if (y0<0)
return;
if (y1>=ny)
return;
if (z0<0)
return;
if (z1>=nz)
return;
int idx_in_orig_000;
int idx_in_orig_001;
int idx_in_orig_010;
int idx_in_orig_011;
int idx_in_orig_100;
int idx_in_orig_101;
int idx_in_orig_110;
int idx_in_orig_111;
double f000,f001,f010,f011,f100,f101,f110,f111;
double c00,c01,c10,c11;
double c0,c1;
double c=0;
double xx = x-x0;
double yy = y-y0;
double zz = z-z0;
// Order is idx_in_orig_zyx
idx_in_orig_000 = x0 + y0 * nx + z0 * nx * ny;
idx_in_orig_001 = x0 + y1 * nx + z0 * nx * ny;
idx_in_orig_010 = x1 + y0 * nx + z0 * nx * ny;
idx_in_orig_011 = x1 + y1 * nx + z0 * nx * ny;
idx_in_orig_100 = x0 + y0 * nx + z1 * nx * ny;
idx_in_orig_101 = x0 + y1 * nx + z1 * nx * ny;
idx_in_orig_110 = x1 + y0 * nx + z1 * nx * ny;
idx_in_orig_111 = x1 + y1 * nx + z1 * nx * ny;
for (int i=0;i < nChannels; i++){
f000 = img[idx_in_orig_000*nChannels + i];
f001 = img[idx_in_orig_001*nChannels + i];
f010 = img[idx_in_orig_010*nChannels + i];
f011 = img[idx_in_orig_011*nChannels + i];
f100 = img[idx_in_orig_100*nChannels + i];
f101 = img[idx_in_orig_101*nChannels + i];
f110 = img[idx_in_orig_110*nChannels + i];
f111 = img[idx_in_orig_111*nChannels + i];
// Interpolate x
c00 = f000*(1-xx) + f001*xx;
c10 = f010*(1-xx) + f011*xx;
c01 = f100*(1-xx) + f101*xx;
c11 = f110*(1-xx) + f111*xx;
// Interpolate y
c0 = c00*(1-yy)+c10 * yy;
c1 = c01*(1-yy)+c11 * yy;
// Interpolate z
c = c0*(1-zz) + c1 * zz;
img_wrapped[idx*nChannels + i]= c;
}
return;
}
"""
try:
Context.get_device()
except:
import pycuda.autoinit
mod = SourceModule(_kernel)
_resampler = mod.get_function("resampler")
def resampler(pts_gpu,
img_gpu,
img_wrapped_gpu,
nPts=None,
threadsPerBlock=1024):
"""
This function serves a similar purpose to cv2.remap,
but works with gpu data. And in 3D.
Currently, only the bilinear method is implemented.
This function warps img_gpu to img_wrapped_gpu.
Let T denote the transformation such that if (x,y,z) is a point
in the domain of the first image (img_gpu),
then (x',y',z') = T(x,y,z) is a point in the domain of second image
(img_wrapped_gpu).
The warpping is done using the *inverse* of T, not T itself.
In effect, img_warpped_gpu(x,y,z)= img_gpu( T^{-1}(x,y,z)).
Note that in the line above the order is xyz, as is done
in mathematical notation. However, of course that in terms
of code we use img_gpu[z,y,x] (with square brackets).
Input:
pts_gpu: a gpu_array. shape: (nPts,3). dtype: np.float64
img_gpu: a gpu array. dtype=np.float64 (not uint8!)
img_warpped_gpu: a gpu array. dtype=np.float64 (not uint8!)
nPts = number of points (i.e., number of pixels)
"""
do_checks = True
if do_checks:
if not isinstance(pts_gpu,gpuarray.GPUArray):
raise TypeError(type(pts_gpu))
if not isinstance(img_gpu,gpuarray.GPUArray):
raise TypeError(type(img_gpu))
if not isinstance(img_wrapped_gpu,gpuarray.GPUArray):
raise TypeError(type(img_wrapped_gpu))
if pts_gpu.shape[1] !=3:
raise ValueError(pts_gpu.shape)
if img_gpu.dtype != np.float64:
raise ValueError(img_gpu.dtype)
if img_wrapped_gpu.dtype != np.float64:
raise ValueError(img_wrapped_gpu.dtype)
if img_gpu.shape != img_wrapped_gpu.shape:
raise ValueError(img_gpu.shape , img_wrapped_gpu.shape)
# try:
# nChannels=img_gpu.shape[2]
# except IndexError:
# nChannels = 1
nChannels = 1
ny,nx,nz=img_gpu.shape
if nx in (1,2):
raise ValueError(nx,"I am pretty sure this is not what you want")
if nPts is None:
nPts = pts_gpu.shape[0]
nBlocks = int(np.ceil(float(nPts) / float(threadsPerBlock)))
_resampler(pts_gpu,
img_gpu,
img_wrapped_gpu,
np.int32(nPts),
np.int32(nx),
np.int32(ny),
np.int32(nz),
np.int32(nChannels),
grid=(nBlocks,1,1),
block=(threadsPerBlock,1,1))
| [
"pycuda.driver.Context.get_device",
"pycuda.compiler.SourceModule",
"numpy.int32"
] | [((2991, 3012), 'pycuda.compiler.SourceModule', 'SourceModule', (['_kernel'], {}), '(_kernel)\n', (3003, 3012), False, 'from pycuda.compiler import SourceModule\n'), ((2928, 2948), 'pycuda.driver.Context.get_device', 'Context.get_device', ([], {}), '()\n', (2946, 2948), False, 'from pycuda.driver import Context\n'), ((5926, 5940), 'numpy.int32', 'np.int32', (['nPts'], {}), '(nPts)\n', (5934, 5940), True, 'import numpy as np\n'), ((5960, 5972), 'numpy.int32', 'np.int32', (['nx'], {}), '(nx)\n', (5968, 5972), True, 'import numpy as np\n'), ((5992, 6004), 'numpy.int32', 'np.int32', (['ny'], {}), '(ny)\n', (6000, 6004), True, 'import numpy as np\n'), ((6024, 6036), 'numpy.int32', 'np.int32', (['nz'], {}), '(nz)\n', (6032, 6036), True, 'import numpy as np\n'), ((6056, 6075), 'numpy.int32', 'np.int32', (['nChannels'], {}), '(nChannels)\n', (6064, 6075), True, 'import numpy as np\n')] |
from bluesky_live.run_builder import RunBuilder
import pytest
import numpy
from ..plot_builders import RasteredImages
from ..plot_specs import Axes, Figure
@pytest.fixture
def non_snaking_run():
# Test data
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, False)}
with RunBuilder(md) as builder:
builder.add_stream(
"primary", data={"ccd": [1, 2, 3, 4], "x": [0, 1, 0, 1], "y": [0, 0, 1, 1]}
)
run = builder.get_run()
return run
@pytest.fixture
def snaking_run():
# Test data
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, True)}
with RunBuilder(md) as builder:
builder.add_stream(
"primary", data={"ccd": [1, 2, 3, 4], "x": [0, 1, 1, 0], "y": [0, 0, 1, 1]}
)
run = builder.get_run()
return run
def test_rastered_image(non_snaking_run, FigureView):
"Test RasteredImages with a 2D array."
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
assert not model.figure.axes[0].artists
model.add_run(run)
assert model.figure.axes[0].artists
view.close()
def test_x_y_positive_change_x_y_limits(non_snaking_run, FigureView):
"Test x_positive and y_positive change x_limits and y_limits"
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2), x_positive="left", y_positive="down")
view = FigureView(model.figure)
model.add_run(run)
expected_x_lims = expected_y_lims = (1.5, -0.5)
assert model.axes.x_limits == expected_x_lims
assert model.axes.y_limits == expected_y_lims
model.x_positive = "right"
model.y_positive = "up"
expected_x_lims = expected_y_lims = (-0.5, 1.5)
assert model.axes.x_limits == expected_x_lims
assert model.axes.y_limits == expected_y_lims
view.close()
def test_x_y_limits_change_x_y_positive(non_snaking_run, FigureView):
"Test x_limits and y_limits change x_positive and y_positive"
run = non_snaking_run
axes = Axes(x_limits=(1.5, -0.5), y_limits=(1.5, -0.5))
Figure((axes,), title="")
model = RasteredImages("ccd", shape=(2, 2), axes=axes)
view = FigureView(model.figure)
model.add_run(run)
assert model.x_positive == "left"
assert model.y_positive == "down"
model.axes.x_limits = model.axes.y_limits = (-0.5, 1.5)
assert model.x_positive == "right"
assert model.y_positive == "up"
view.close()
def test_non_snaking_image_data(non_snaking_run, FigureView):
run = non_snaking_run
model = RasteredImages("ccd", shape=(2, 2))
model.add_run(run)
view = FigureView(model.figure)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, 4]]
assert numpy.array_equal(actual_data, expected_data)
view.close()
def test_snaking_image_data(snaking_run, FigureView):
run = snaking_run
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
model.add_run(run)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [4, 3]]
assert numpy.array_equal(actual_data, expected_data)
view.close()
def test_non_snaking_image_data_positions(FigureView):
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, False)}
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
with RunBuilder(md) as builder:
ccd = iter([1, 2, 3, 4])
x = iter([0, 1, 0, 1])
y = iter([0, 0, 1, 1])
run = builder.get_run()
model.add_run(run)
# First data point
builder.add_stream(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, numpy.nan], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Second point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Third point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Fourth point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [3, 4]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
view.close()
def test_snaking_image_data_positions(FigureView):
md = {"motors": ["y", "x"], "shape": [2, 2], "snaking": (False, True)}
model = RasteredImages("ccd", shape=(2, 2))
view = FigureView(model.figure)
with RunBuilder(md) as builder:
ccd = iter([1, 2, 3, 4])
x = iter([0, 1, 1, 0])
y = iter([0, 0, 1, 1])
run = builder.get_run()
model.add_run(run)
# First data point
builder.add_stream(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, numpy.nan], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Second point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, numpy.nan]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Third point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [numpy.nan, 3]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
# Fourth point
builder.add_data(
"primary", data={"ccd": [next(ccd)], "x": [next(x)], "y": [next(y)]}
)
actual_data = model.figure.axes[0].artists[0].update()["array"]
expected_data = [[1, 2], [4, 3]]
assert numpy.array_equal(actual_data, expected_data, equal_nan=True)
view.close()
def test_figure_set_after_instantiation():
axes = Axes()
model = RasteredImages("ccd", shape=(2, 2), axes=axes)
assert model.figure is None
figure = Figure((axes,), title="")
assert model.figure is figure
| [
"bluesky_live.run_builder.RunBuilder",
"numpy.array_equal"
] | [((2763, 2808), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {}), '(actual_data, expected_data)\n', (2780, 2808), False, 'import numpy\n'), ((3127, 3172), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {}), '(actual_data, expected_data)\n', (3144, 3172), False, 'import numpy\n'), ((299, 313), 'bluesky_live.run_builder.RunBuilder', 'RunBuilder', (['md'], {}), '(md)\n', (309, 313), False, 'from bluesky_live.run_builder import RunBuilder\n'), ((632, 646), 'bluesky_live.run_builder.RunBuilder', 'RunBuilder', (['md'], {}), '(md)\n', (642, 646), False, 'from bluesky_live.run_builder import RunBuilder\n'), ((3416, 3430), 'bluesky_live.run_builder.RunBuilder', 'RunBuilder', (['md'], {}), '(md)\n', (3426, 3430), False, 'from bluesky_live.run_builder import RunBuilder\n'), ((3895, 3956), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (3912, 3956), False, 'import numpy\n'), ((4241, 4302), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (4258, 4302), False, 'import numpy\n'), ((4578, 4639), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (4595, 4639), False, 'import numpy\n'), ((4908, 4969), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (4925, 4969), False, 'import numpy\n'), ((5208, 5222), 'bluesky_live.run_builder.RunBuilder', 'RunBuilder', (['md'], {}), '(md)\n', (5218, 5222), False, 'from bluesky_live.run_builder import RunBuilder\n'), ((5687, 5748), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (5704, 5748), False, 'import numpy\n'), ((6033, 6094), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (6050, 6094), False, 'import numpy\n'), ((6370, 6431), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (6387, 6431), False, 'import numpy\n'), ((6700, 6761), 'numpy.array_equal', 'numpy.array_equal', (['actual_data', 'expected_data'], {'equal_nan': '(True)'}), '(actual_data, expected_data, equal_nan=True)\n', (6717, 6761), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
The module contains functions to evaluate the optical depth,
to convert this to observed transmission and to convolve the
observed spectrum with the instrumental profile.
"""
__author__ = '<NAME>'
import numpy as np
from scipy.signal import fftconvolve, gaussian
from numba import jit
# ==== VOIGT PROFILE ===============
def H(a, x):
"""Voigt Profile Approximation from T. Tepper-Garcia 2006, 2007."""
P = x**2
H0 = np.exp(-x**2)
Q = 1.5/x**2
return H0 - a/np.sqrt(np.pi)/P * (H0*H0*(4.*P*P + 7.*P + 4. + Q) - Q - 1)
def Voigt(wl, l0, f, N, b, gam, z=0):
"""
Calculate the optical depth Voigt profile.
Parameters
----------
wl : array_like, shape (N)
Wavelength grid in Angstroms at which to evaluate the optical depth.
l0 : float
Rest frame transition wavelength in Angstroms.
f : float
Oscillator strength.
N : float
Column density in units of cm^-2.
b : float
Velocity width of the Voigt profile in cm/s.
gam : float
Radiation damping constant, or Einstein constant (A_ul)
z : float
The redshift of the observed wavelength grid `l`.
Returns
-------
tau : array_like, shape (N)
Optical depth array evaluated at the input grid wavelengths `l`.
"""
# ==== PARAMETERS ==================
c = 2.99792e10 # cm/s
m_e = 9.1094e-28 # g
e = 4.8032e-10 # cgs units
# ==================================
# Calculate Profile
C_a = np.sqrt(np.pi)*e**2*f*l0*1.e-8/m_e/c/b
a = l0*1.e-8*gam/(4.*np.pi*b)
dl_D = b/c*l0
wl = wl/(z+1.)
x = (wl - l0)/dl_D + 0.00001
tau = np.float64(C_a) * N * H(a, x)
return tau
@jit
def convolve_numba(P, kernel):
"""
Define convolution function for a wavelength dependent kernel.
Parameters
----------
P : array_like, shape (N)
Intrinsic line profile.
kernel : np.array, shape (N, M)
Each row of the `kernel` corresponds to the wavelength dependent
line-spread function (LSF) evaluated at each pixel of the input
profile `P`. Each LSF must be normalized!
Returns
-------
P_con : np.array, shape (N)
Resulting profile after performing convolution with `kernel`.
Notes
-----
This function is decorated by the `jit` decorator from `numba`_ in order
to speed up the calculation.
"""
N = kernel.shape[1]/2
pad = np.ones(N)
P_pad = np.concatenate([pad, P, pad])
P_con = np.zeros_like(P)
for i, lsf_i in enumerate(kernel):
P_con[i] = np.sum(P_pad[i:i+2*N+1] * lsf_i)
return P_con
def evaluate_continuum(x, pars, reg_num):
"""
Evaluate the continuum model using Chebyshev polynomials.
All regions are fitted with the same order of polynomials.
Parameters
----------
x : array_like, shape (N)
Input wavelength grid in Ångstrøm.
pars : dict(lmfit.Parameters_)
An instance of lmfit.Parameters_ containing the Chebyshev
coefficients for each region.
reg_num : int
The region number, i.e., the index of the region in the list
:attr:`VoigtFit.DataSet.regions`.
Returns
-------
cont_model : array_like, shape (N)
The continuum Chebyshev polynomial evaluated at the input wavelengths `x`.
"""
cheb_parnames = list()
p_cont = list()
# Find Chebyshev parameters for this region:
# They are named like 'R0_cheb_p0, R0_cheb_p1, R1_cheb_p0, etc...'
for parname in pars.keys():
if 'R%i_cheb' % reg_num in parname:
cheb_parnames.append(parname)
# This should be calculated at the point of generating
# the parameters, since this is a fixed data structure
# Sort the names, to arange the coefficients right:
cheb_parnames = sorted(cheb_parnames)
for parname in cheb_parnames:
p_cont.append(pars[parname].value)
# Calculate Chebyshev polynomium in x-range:
cont_model = np.polynomial.Chebyshev(p_cont, domain=(x.min(), x.max()))
return cont_model(x)
def evaluate_profile(x, pars, z_sys, lines, components, kernel, sampling=3):
"""
Evaluate the observed Voigt profile. The calculated optical depth, `tau`,
is converted to observed transmission, `f`:
.. math:: f = e^{-\\tau}
The observed transmission is subsequently convolved with the instrumental
broadening profile assumed to be Gaussian with a full-width at half maximum
of res. The resolving power is assumed to be constant in velocity space.
Parameters
----------
x : array_like, shape (N)
Wavelength array in Ångstrøm on which to evaluate the profile.
pars : dict(lmfit.Parameters_)
An instance of lmfit.Parameters_ containing the line parameters.
lines : list(:class:`Line <dataset.Line>`)
List of lines to evaluate. Should be a list of
:class:`Line <dataset.Line>` objects.
components : dict
Dictionary containing component data for the defined ions.
See :attr:`VoigtFit.DataSet.components`.
kernel : np.array, shape (N, M) or float
The convolution kernel for each wavelength pixel.
If an array is given, each row of the array must specify
the line-spread function (LSF) at the given wavelength pixel.
The LSF must be normalized!
If a float is given, the resolution FWHM is given in km/s (c/R).
In this case the spectral resolution is assumed
to be constant in velocity space.
sampling : integer [default = 3]
The subsampling factor used for defining the input logarithmically
space wavelength grid. The number of pixels in the evaluation will
be sampling * N, where N is the number of input pixels.
The final profile will be interpolated back onto the original
wavelength grid defined by `x`.
Returns
-------
profile_obs : array_like, shape (N)
Observed line profile convolved with the instrument profile.
"""
if isinstance(kernel, float):
# Create logarithmically binned grid:
dx = np.mean(np.diff(x))
xmin = np.log10(x.min() - 50*dx)
xmax = np.log10(x.max() + 50*dx)
N = sampling * len(x)
profile_wl = np.logspace(xmin, xmax, N)
# Calculate actual pixel size in km/s:
pxs = np.diff(profile_wl)[0] / profile_wl[0] * 299792.458
# Set Gaussian kernel width:
kernel = kernel / pxs / 2.35482
elif isinstance(kernel, np.ndarray):
assert kernel.shape[0] == len(x)
# evaluate on the input grid
profile_wl = x.copy()
else:
err_msg = "Invalid type of `kernel`: %r" % type(kernel)
raise TypeError(err_msg)
tau = np.zeros_like(profile_wl)
# Determine range in which to evaluate the profile:
max_logN = max([val.value for key, val in pars.items() if 'logN' in key])
if max_logN > 19.0:
velspan = 20000.*(1. + 1.0*(max_logN-19.))
else:
velspan = 20000.
for line in lines:
if line.active:
l0, f, gam = line.get_properties()
ion = line.ion
n_comp = len(components[ion])
l_center = l0*(z_sys + 1.)
vel = (profile_wl - l_center)/l_center*299792.
span = (vel >= -velspan)*(vel <= velspan)
ion = ion.replace('*', 'x')
for n in range(n_comp):
z = pars['z%i_%s' % (n, ion)].value
if x.min() < l0*(z+1) < x.max():
b = pars['b%i_%s' % (n, ion)].value
logN = pars['logN%i_%s' % (n, ion)].value
tau[span] += Voigt(profile_wl[span], l0, f, 10**logN, 1.e5*b, gam, z=z)
elif ion == 'HI':
b = pars['b%i_%s' % (n, ion)].value
logN = pars['logN%i_%s' % (n, ion)].value
tau[span] += Voigt(profile_wl[span], l0, f, 10**logN, 1.e5*b, gam, z=z)
else:
continue
profile = np.exp(-tau)
if isinstance(kernel, float):
LSF = gaussian(10*int(kernel) + 1, kernel)
LSF = LSF/LSF.sum()
profile_broad = fftconvolve(profile, LSF, 'same')
# Interpolate onto the data grid:
profile_obs = np.interp(x, profile_wl, profile_broad)
else:
profile_obs = convolve_numba(profile, kernel)
return profile_obs
| [
"numpy.sqrt",
"numpy.ones",
"numpy.float64",
"scipy.signal.fftconvolve",
"numpy.diff",
"numpy.exp",
"numpy.sum",
"numpy.concatenate",
"numpy.interp",
"numpy.logspace",
"numpy.zeros_like"
] | [((461, 476), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (467, 476), True, 'import numpy as np\n'), ((2497, 2507), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (2504, 2507), True, 'import numpy as np\n'), ((2520, 2549), 'numpy.concatenate', 'np.concatenate', (['[pad, P, pad]'], {}), '([pad, P, pad])\n', (2534, 2549), True, 'import numpy as np\n'), ((2562, 2578), 'numpy.zeros_like', 'np.zeros_like', (['P'], {}), '(P)\n', (2575, 2578), True, 'import numpy as np\n'), ((6816, 6841), 'numpy.zeros_like', 'np.zeros_like', (['profile_wl'], {}), '(profile_wl)\n', (6829, 6841), True, 'import numpy as np\n'), ((8100, 8112), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (8106, 8112), True, 'import numpy as np\n'), ((2637, 2675), 'numpy.sum', 'np.sum', (['(P_pad[i:i + 2 * N + 1] * lsf_i)'], {}), '(P_pad[i:i + 2 * N + 1] * lsf_i)\n', (2643, 2675), True, 'import numpy as np\n'), ((6332, 6358), 'numpy.logspace', 'np.logspace', (['xmin', 'xmax', 'N'], {}), '(xmin, xmax, N)\n', (6343, 6358), True, 'import numpy as np\n'), ((8251, 8284), 'scipy.signal.fftconvolve', 'fftconvolve', (['profile', 'LSF', '"""same"""'], {}), "(profile, LSF, 'same')\n", (8262, 8284), False, 'from scipy.signal import fftconvolve, gaussian\n'), ((8349, 8388), 'numpy.interp', 'np.interp', (['x', 'profile_wl', 'profile_broad'], {}), '(x, profile_wl, profile_broad)\n', (8358, 8388), True, 'import numpy as np\n'), ((1712, 1727), 'numpy.float64', 'np.float64', (['C_a'], {}), '(C_a)\n', (1722, 1727), True, 'import numpy as np\n'), ((6187, 6197), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (6194, 6197), True, 'import numpy as np\n'), ((510, 524), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (517, 524), True, 'import numpy as np\n'), ((6420, 6439), 'numpy.diff', 'np.diff', (['profile_wl'], {}), '(profile_wl)\n', (6427, 6439), True, 'import numpy as np\n'), ((1557, 1571), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1564, 1571), True, 'import numpy as np\n')] |
from collections import defaultdict
from tempfile import NamedTemporaryFile
import numpy as np
from celery import group
from celery.decorators import task
from network.tasks.analysis.utils import \
call_bigwig_average_over_bed, generate_intersection_df
from network import models
def get_locus_values(loci, locus_bed_path, ambiguous_bigwig=None,
plus_bigwig=None, minus_bigwig=None):
'''
Finds coverage values for each transcript.
loci - Dict of locus objects from models.LocusGroup.get_loci_dict()
locus_bed_bed - Path to BED file with loci intervals.
'''
if plus_bigwig and minus_bigwig:
plus_tab = NamedTemporaryFile(mode='w')
minus_tab = NamedTemporaryFile(mode='w')
call_bigwig_average_over_bed(
plus_bigwig,
locus_bed_path,
plus_tab.name,
)
call_bigwig_average_over_bed(
minus_bigwig,
locus_bed_path,
minus_tab.name,
)
plus_tab.flush()
minus_tab.flush()
return reconcile_stranded_coverage(
loci,
read_bigwig_average_over_bed_tab_file(loci, plus_tab.name),
read_bigwig_average_over_bed_tab_file(loci, minus_tab.name),
)
elif ambiguous_bigwig:
tab = NamedTemporaryFile(mode='w')
call_bigwig_average_over_bed(
ambiguous_bigwig,
locus_bed_path,
tab.name,
)
tab.flush()
out_values = read_bigwig_average_over_bed_tab_file(loci, tab.name)
return out_values
else:
raise ValueError('Improper bigWig files specified.')
def read_bigwig_average_over_bed_tab_file(loci, tab_file_path):
'''
Read values in bigWigAverageOverBed output file into dict.
'''
pk_to_value = defaultdict(float)
with open(tab_file_path) as f:
for line in f:
name, size, covered, value_sum, mean, mean0 = line.strip().split()
locus_pk = int(name.split('_')[0])
# Rarely, ENCODE uses nan in their bigWig files; if found, set to 0
if value_sum == 'nan':
value_sum = 0
pk_to_value[locus_pk] += float(value_sum)
locus_values = dict()
for locus in loci:
locus_values[locus] = pk_to_value[locus.pk]
return locus_values
def reconcile_stranded_coverage(loci, plus_values, minus_values):
'''
Considering plus and minus strand coverage values, return only coverage
values of the appropriate strand.
'''
reconciled = dict()
for locus in loci:
if locus.strand is None:
reconciled[locus] = plus_values[locus] + \
minus_values[locus]
elif locus.strand == '+':
reconciled[locus] = plus_values[locus]
elif locus.strand == '-':
reconciled[locus] = minus_values[locus]
return reconciled
def generate_locusgroup_bed(locus_group, output_file_obj):
'''
Write a BED file to output_file_obj containing entries for each locus in a
locus group.
'''
OUT = output_file_obj
def write_to_out(locus, interval, index):
'''
Write interval to OUT in BED6 format
'''
if locus.strand:
strand = locus.strand
else:
strand = '.'
OUT.write('\t'.join([
locus.chromosome,
str(interval[0] - 1),
str(interval[1]),
'{}_{}'.format(str(locus.pk), str(index)),
'0',
strand,
]) + '\n')
for locus in models.Locus.objects.filter(group=locus_group):
for i, region in enumerate(locus.regions):
write_to_out(locus, region, i)
OUT.flush()
def set_selected_transcripts_for_genes():
# Find all annotations with genes
annotations = models.Annotation.objects.filter(
gene__isnull=False).distinct()
# Run in parallel for each annotation found
job = group(_set_selected_transcripts_for_genes.s(annotation.pk)
for annotation in annotations)
job.apply_async()
@task
def _set_selected_transcripts_for_genes(annotation_pk):
# Given the annotation, find the appropriate locus_group and
# experiment_type
annotation = models.Annotation.objects.get(pk=annotation_pk)
experiment_type = models.ExperimentType.objects.get(name='RNA-seq')
datasets = models.Dataset.objects.filter(
experiment__project__name='ENCODE',
experiment__experiment_type=experiment_type,
assembly=annotation.assembly,
)
if datasets.exists(): # Check to ensure RNA-seq data exists
locus_group = models.LocusGroup.objects.filter(
assembly=annotation.assembly, group_type='mRNA')
df = generate_intersection_df(locus_group, experiment_type,
datasets=datasets)
for gene in models.Gene.objects.filter(annotation=annotation):
# Check to ensure transcripts exist for the gene
if models.Transcript.objects.filter(gene=gene).exists():
loci = models.Locus.objects.filter(
transcript__gene=gene, group=locus_group)
expression = dict()
for locus in loci:
expression[locus] = np.median(df.loc[locus.pk])
selected_locus = sorted(
expression.items(), key=lambda x: -x[1])[0][0]
selected_transcript = models.Transcript.objects.get(
gene=gene, locus=selected_locus)
gene.selected_transcript = selected_transcript
gene.save()
| [
"numpy.median",
"network.models.Annotation.objects.filter",
"network.models.Transcript.objects.get",
"network.tasks.analysis.utils.generate_intersection_df",
"network.tasks.analysis.utils.call_bigwig_average_over_bed",
"network.models.LocusGroup.objects.filter",
"network.models.Annotation.objects.get",
... | [((1827, 1845), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (1838, 1845), False, 'from collections import defaultdict\n'), ((3586, 3632), 'network.models.Locus.objects.filter', 'models.Locus.objects.filter', ([], {'group': 'locus_group'}), '(group=locus_group)\n', (3613, 3632), False, 'from network import models\n'), ((4273, 4320), 'network.models.Annotation.objects.get', 'models.Annotation.objects.get', ([], {'pk': 'annotation_pk'}), '(pk=annotation_pk)\n', (4302, 4320), False, 'from network import models\n'), ((4343, 4392), 'network.models.ExperimentType.objects.get', 'models.ExperimentType.objects.get', ([], {'name': '"""RNA-seq"""'}), "(name='RNA-seq')\n", (4376, 4392), False, 'from network import models\n'), ((4409, 4553), 'network.models.Dataset.objects.filter', 'models.Dataset.objects.filter', ([], {'experiment__project__name': '"""ENCODE"""', 'experiment__experiment_type': 'experiment_type', 'assembly': 'annotation.assembly'}), "(experiment__project__name='ENCODE',\n experiment__experiment_type=experiment_type, assembly=annotation.assembly)\n", (4438, 4553), False, 'from network import models\n'), ((663, 691), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (681, 691), False, 'from tempfile import NamedTemporaryFile\n'), ((712, 740), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (730, 740), False, 'from tempfile import NamedTemporaryFile\n'), ((750, 822), 'network.tasks.analysis.utils.call_bigwig_average_over_bed', 'call_bigwig_average_over_bed', (['plus_bigwig', 'locus_bed_path', 'plus_tab.name'], {}), '(plus_bigwig, locus_bed_path, plus_tab.name)\n', (778, 822), False, 'from network.tasks.analysis.utils import call_bigwig_average_over_bed, generate_intersection_df\n'), ((878, 952), 'network.tasks.analysis.utils.call_bigwig_average_over_bed', 'call_bigwig_average_over_bed', (['minus_bigwig', 'locus_bed_path', 'minus_tab.name'], {}), '(minus_bigwig, locus_bed_path, minus_tab.name)\n', (906, 952), False, 'from network.tasks.analysis.utils import call_bigwig_average_over_bed, generate_intersection_df\n'), ((4669, 4755), 'network.models.LocusGroup.objects.filter', 'models.LocusGroup.objects.filter', ([], {'assembly': 'annotation.assembly', 'group_type': '"""mRNA"""'}), "(assembly=annotation.assembly, group_type=\n 'mRNA')\n", (4701, 4755), False, 'from network import models\n'), ((4777, 4850), 'network.tasks.analysis.utils.generate_intersection_df', 'generate_intersection_df', (['locus_group', 'experiment_type'], {'datasets': 'datasets'}), '(locus_group, experiment_type, datasets=datasets)\n', (4801, 4850), False, 'from network.tasks.analysis.utils import call_bigwig_average_over_bed, generate_intersection_df\n'), ((4910, 4959), 'network.models.Gene.objects.filter', 'models.Gene.objects.filter', ([], {'annotation': 'annotation'}), '(annotation=annotation)\n', (4936, 4959), False, 'from network import models\n'), ((1312, 1340), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""w"""'}), "(mode='w')\n", (1330, 1340), False, 'from tempfile import NamedTemporaryFile\n'), ((1350, 1422), 'network.tasks.analysis.utils.call_bigwig_average_over_bed', 'call_bigwig_average_over_bed', (['ambiguous_bigwig', 'locus_bed_path', 'tab.name'], {}), '(ambiguous_bigwig, locus_bed_path, tab.name)\n', (1378, 1422), False, 'from network.tasks.analysis.utils import call_bigwig_average_over_bed, generate_intersection_df\n'), ((3845, 3897), 'network.models.Annotation.objects.filter', 'models.Annotation.objects.filter', ([], {'gene__isnull': '(False)'}), '(gene__isnull=False)\n', (3877, 3897), False, 'from network import models\n'), ((5115, 5184), 'network.models.Locus.objects.filter', 'models.Locus.objects.filter', ([], {'transcript__gene': 'gene', 'group': 'locus_group'}), '(transcript__gene=gene, group=locus_group)\n', (5142, 5184), False, 'from network import models\n'), ((5493, 5555), 'network.models.Transcript.objects.get', 'models.Transcript.objects.get', ([], {'gene': 'gene', 'locus': 'selected_locus'}), '(gene=gene, locus=selected_locus)\n', (5522, 5555), False, 'from network import models\n'), ((5037, 5080), 'network.models.Transcript.objects.filter', 'models.Transcript.objects.filter', ([], {'gene': 'gene'}), '(gene=gene)\n', (5069, 5080), False, 'from network import models\n'), ((5318, 5345), 'numpy.median', 'np.median', (['df.loc[locus.pk]'], {}), '(df.loc[locus.pk])\n', (5327, 5345), True, 'import numpy as np\n')] |
import os
import jieba
import numpy as np
import pandas as pd
import torch
from elmoformanylangs import Embedder
from gensim.models import Word2Vec
from sentence_transformers import SentenceTransformer
from transformers import AutoModel, AutoTokenizer
from bert_text_classification.predict import bert_classification_predict
from chitchat.interact import chitchat
from text_classification.predict import classification_predict
from text_similarity.predict import similarity_predict
from bert_text_similarity.predict import bert_similarity_predict
# 获取QA项目根目录
root_path = os.path.dirname(__file__)
df = pd.read_csv(root_path + '/data/qa_data.csv')
questions = df['question'].values
answers = df['answer'].values
# 句子转化为句向量
def sen2vec(model, sentence):
# 对句子进行分词
segment = list(jieba.cut(sentence))
vec = np.zeros(100)
for s in segment:
try:
# 取出s对应的向量相加
vec += model.wv[s]
#出现oov问题,词不在词典中
except:
pass
# 采用加权平均求句向量
vec /= len(segment)
return vec
def elmo2vec(model, sentence):
'''
output_layer参数.
0 for the word encoder
1 for the first LSTM hidden layer
2 for the second LSTM hidden layer
-1 for an average of 3 layers. (default)
-2 for all 3 layers
'''
if isinstance(sentence, str):
segment = list(jieba.cut(sentence))
# 用elmo转换词向量
vec = model.sents2elmo([segment], output_layer=-1)
elif isinstance(sentence, np.ndarray):
segment = [jieba.cut(s) for s in sentence]
# 用elmo转换词向量
vec = model.sents2elmo(segment, output_layer=-1)
# 句向量取均值
return [np.mean(v, axis=0) for v in vec]
# 平均池化
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0]
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
# 用transformers转换句向量
def bert_to_vec(model, tokenizer, sentence):
print('bert encode start')
if isinstance(sentence, np.ndarray):
encoded_input = tokenizer(list(sentence), padding=True,
truncation=True, max_length=128, return_tensors='pt')
else:
encoded_input = tokenizer(sentence, padding=True,
truncation=True, max_length=128, return_tensors='pt')
with torch.no_grad():
model_output = model(**encoded_input)
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print('bert encode finish')
return sentence_embeddings.numpy()
# 用sentence_transformers转换句向量
def sentence_to_vec(model, sentence):
if isinstance(sentence, np.ndarray):
embedding = model.encode(sentence, batch_size=64, show_progress_bar=True, device='cuda:0')
else:
embedding = model.encode(sentence)
return embedding
# 计算余弦相似度
def cosine(a, b):
# 矩阵的积除以矩阵模的积
return np.matmul(a, np.array(b).T) / (np.linalg.norm(a) * np.linalg.norm(b, axis=1))
if __name__ == '__main__':
while True:
text = input('请输入您的问题:').strip()
# 判断是封闭域还是闲聊问题
# TextCNN和Bert fine-tune 作对比
prob_cnn = round(classification_predict(text)[0], 3)
print("TextCNN 预测是闲聊的概率为:", prob_cnn)
prob_bert = round(float(bert_classification_predict(text)[0]), 3)
print("Bert 预测是闲聊的概率为:", prob_bert)
if (prob_cnn > 0.5) or (prob_bert > 0.5):
print("当前输入的问题为闲聊")
print("闲聊回答:", chitchat(text))
continue
else:
print("当前输入的问题为封闭域问题")
while True:
v = int(input("请选择句向量编码方式:\n" +
"1. Word2Vec \n" +
"2. ElMo \n" +
"3. Bert \n" +
"4. sentence-transformers \n",).strip())
if v != 1 and v != 2 and v != 3 and v != 4:
print("输入的句向量编码方式错误,请重新输入")
continue
else:
break
print("正在将问题库中的所有问题转换为句向量...")
# 文本表示,转换为句向量
vec = None
# Word2Vec
if v == 1:
v_str = "Word2Vec"
model_path = root_path + '/word2vec/wiki.model'
model = Word2Vec.load(model_path)
# 生成所有问题的句向量
question_vec = []
for q in questions:
question_vec.append(sen2vec(model, q))
# 生成当前输入问题的句向量
vec = sen2vec(model, text)
# Elmo
elif v == 2:
v_str = "ELMo"
model_path = root_path + '/elmo/zhs.model'
model = Embedder(model_path)
# 生成所有问题的句向量
question_vec = []
for q in questions:
question_vec.extend(elmo2vec(model, q))
# 生成当前输入问题的句向量
vec = elmo2vec(model, text)[0]
# Bert
elif v == 3:
v_str = "Bert"
model_path = root_path + "/chinese-roberta-wwm-ext"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModel.from_pretrained(model_path)
# 生成所有问题的句向量
question_vec = bert_to_vec(model, tokenizer, questions)
# 生成当前输入问题的句向量
vec = bert_to_vec(model, tokenizer, text)
# sentence transformers
elif v == 4:
v_str = "sentence-transformers"
model_path = root_path + "/paraphrase-multilingual-MiniLM-L12-v2"
model = SentenceTransformer(model_path, device='cuda:0')
# 将所有问题库中问题转换为句向量
question_vec = sentence_to_vec(model, questions)
# 生成当前输入问题的句向量
vec = sentence_to_vec(model, text)
# 计算输入的问题和问题库中问题的相似度
similarity = cosine(vec, question_vec)
# Bert的是二维数组,需要拉成一维数组
if v == 3:
similarity = similarity.ravel()
# 取最大相似度
max_similarity = max(similarity)
print(v_str + " 最大相似度:", max_similarity)
index = np.argmax(similarity)
if max_similarity < 0.8:
print('没有找到对应的问题,您想问的是不是:', questions[index])
continue
print(v_str + ' 最相似的问题:', questions[index])
print(v_str + ' 答案:', answers[index][0:100], "...")
top_10_similarity = np.argsort(-similarity)[0:10]
top_10_question = questions[top_10_similarity]
esim_similarity = similarity_predict([text] * 10, top_10_question)
bert_similarity = bert_similarity_predict([text] * 10, top_10_question)
index_dic = {}
print(v_str + ' 和 ESIM Bert Top10 候选集:')
df_top_10 = pd.DataFrame(columns=['question', v_str, 'ESIM', 'Bert'])
pd.set_option('colheader_justify', 'center')
for i, index in enumerate(top_10_similarity):
df_top_10.loc[i] = [top_10_question[i], similarity[index],
esim_similarity[i], bert_similarity[i]]
index_dic[i] = index
print(df_top_10)
esim_index = np.argsort(-esim_similarity)[0]
print('ESIM最相似的问题:第' + str(esim_index) + '个',
questions[index_dic[esim_index]])
print('ESIM答案:', answers[index_dic[esim_index]][0:100], "...")
bert_index = np.argsort(-bert_similarity)[0]
print('Bert最相似的问题:第' + str(bert_index) + '个',
questions[index_dic[bert_index]])
print('Bert答案:', answers[index_dic[bert_index]][0:100], "...") | [
"bert_text_classification.predict.bert_classification_predict",
"pandas.read_csv",
"numpy.argsort",
"numpy.array",
"torch.sum",
"transformers.AutoTokenizer.from_pretrained",
"numpy.linalg.norm",
"numpy.mean",
"transformers.AutoModel.from_pretrained",
"gensim.models.Word2Vec.load",
"pandas.set_op... | [((573, 598), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (588, 598), False, 'import os\n'), ((605, 649), 'pandas.read_csv', 'pd.read_csv', (["(root_path + '/data/qa_data.csv')"], {}), "(root_path + '/data/qa_data.csv')\n", (616, 649), True, 'import pandas as pd\n'), ((820, 833), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (828, 833), True, 'import numpy as np\n'), ((1896, 1948), 'torch.sum', 'torch.sum', (['(token_embeddings * input_mask_expanded)', '(1)'], {}), '(token_embeddings * input_mask_expanded, 1)\n', (1905, 1948), False, 'import torch\n'), ((789, 808), 'jieba.cut', 'jieba.cut', (['sentence'], {}), '(sentence)\n', (798, 808), False, 'import jieba\n'), ((1652, 1670), 'numpy.mean', 'np.mean', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (1659, 1670), True, 'import numpy as np\n'), ((2474, 2489), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2487, 2489), False, 'import torch\n'), ((6137, 6158), 'numpy.argmax', 'np.argmax', (['similarity'], {}), '(similarity)\n', (6146, 6158), True, 'import numpy as np\n'), ((6533, 6581), 'text_similarity.predict.similarity_predict', 'similarity_predict', (['([text] * 10)', 'top_10_question'], {}), '([text] * 10, top_10_question)\n', (6551, 6581), False, 'from text_similarity.predict import similarity_predict\n'), ((6608, 6661), 'bert_text_similarity.predict.bert_similarity_predict', 'bert_similarity_predict', (['([text] * 10)', 'top_10_question'], {}), '([text] * 10, top_10_question)\n', (6631, 6661), False, 'from bert_text_similarity.predict import bert_similarity_predict\n'), ((6754, 6811), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['question', v_str, 'ESIM', 'Bert']"}), "(columns=['question', v_str, 'ESIM', 'Bert'])\n", (6766, 6811), True, 'import pandas as pd\n'), ((6820, 6864), 'pandas.set_option', 'pd.set_option', (['"""colheader_justify"""', '"""center"""'], {}), "('colheader_justify', 'center')\n", (6833, 6864), True, 'import pandas as pd\n'), ((1348, 1367), 'jieba.cut', 'jieba.cut', (['sentence'], {}), '(sentence)\n', (1357, 1367), False, 'import jieba\n'), ((3080, 3097), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (3094, 3097), True, 'import numpy as np\n'), ((3100, 3125), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {'axis': '(1)'}), '(b, axis=1)\n', (3114, 3125), True, 'import numpy as np\n'), ((4340, 4365), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['model_path'], {}), '(model_path)\n', (4353, 4365), False, 'from gensim.models import Word2Vec\n'), ((6422, 6445), 'numpy.argsort', 'np.argsort', (['(-similarity)'], {}), '(-similarity)\n', (6432, 6445), True, 'import numpy as np\n'), ((7129, 7157), 'numpy.argsort', 'np.argsort', (['(-esim_similarity)'], {}), '(-esim_similarity)\n', (7139, 7157), True, 'import numpy as np\n'), ((7355, 7383), 'numpy.argsort', 'np.argsort', (['(-bert_similarity)'], {}), '(-bert_similarity)\n', (7365, 7383), True, 'import numpy as np\n'), ((1511, 1523), 'jieba.cut', 'jieba.cut', (['s'], {}), '(s)\n', (1520, 1523), False, 'import jieba\n'), ((3062, 3073), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (3070, 3073), True, 'import numpy as np\n'), ((3298, 3326), 'text_classification.predict.classification_predict', 'classification_predict', (['text'], {}), '(text)\n', (3320, 3326), False, 'from text_classification.predict import classification_predict\n'), ((3619, 3633), 'chitchat.interact.chitchat', 'chitchat', (['text'], {}), '(text)\n', (3627, 3633), False, 'from chitchat.interact import chitchat\n'), ((4714, 4734), 'elmoformanylangs.Embedder', 'Embedder', (['model_path'], {}), '(model_path)\n', (4722, 4734), False, 'from elmoformanylangs import Embedder\n'), ((3413, 3446), 'bert_text_classification.predict.bert_classification_predict', 'bert_classification_predict', (['text'], {}), '(text)\n', (3440, 3446), False, 'from bert_text_classification.predict import bert_classification_predict\n'), ((5110, 5151), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (5139, 5151), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((5172, 5209), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['model_path'], {}), '(model_path)\n', (5197, 5209), False, 'from transformers import AutoModel, AutoTokenizer\n'), ((5607, 5655), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_path'], {'device': '"""cuda:0"""'}), "(model_path, device='cuda:0')\n", (5626, 5655), False, 'from sentence_transformers import SentenceTransformer\n')] |
import logging
logging.basicConfig(level=logging.DEBUG)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
fgp = __import__('FaST-GP')
ds = __import__('data_simulation')
def get_coords(index):
coords = pd.DataFrame(index=index)
coords['x'] = index.str.split('x').str.get(0).map(float)
coords['y'] = index.str.split('x').str.get(1).map(float)
return coords
def main():
df = pd.read_csv('data/Rep12_MOB_1.csv', index_col=0)
sample_info = get_coords(df.index)
# Run workflow
X = sample_info[['x', 'y']]
dfm = np.log10(df + 1)
l = 10
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
plt.scatter(results['max_delta'], results['max_ll'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.xlabel('$\delta$')
plt.ylabel('Maximum Log Likelihood')
plt.title('lengthscale: {}'.format(l))
plt.savefig('fastgp-fits.png', bbox_inches='tight')
print(results.sort_values('max_delta').head(20))
def plot_LL_curves():
# df = pd.read_csv('data/Rep12_MOB_3.csv', index_col=0)
# sample_info = get_coords(df.index)
# X = sample_info[['x', 'y']]
# dfm = np.log10(df + 1).sample(10, axis=1)
l = 10
X, dfm, true_vals = ds.make_ls_data(l, 250, 10)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
K = fgp.SE_kernel(X, l)
U, S = fgp.factor(K)
UT1 = fgp.get_UT1(U)
n, G = dfm.shape
for g in range(G):
y = dfm.iloc[:, g]
UTy = fgp.get_UTy(U, y)
LLs = []
delta_range = np.logspace(base=np.e, start=-10, stop=10, num=32)
max_ll = -np.inf
max_delta = np.nan
for delta in delta_range:
cur_ll = fgp.LL(delta, UTy, UT1, S, n)
LLs.append(cur_ll)
if cur_ll > max_ll:
max_ll = cur_ll
max_delta = delta
plt.subplot(np.ceil(G / 2.), 2, g + 1)
plt.plot(delta_range, LLs, marker='o', markeredgecolor='w', markersize=2, markeredgewidth=1, c='k')
plt.scatter([max_delta], [max_ll], marker='v', c='r', edgecolor='none', zorder=5)
plt.title(dfm.columns[g])
plt.axvline(true_vals.iloc[g, -1], color='r')
plt.xscale('log')
plt.xlim(np.exp(-11), np.exp(11))
plt.savefig('example_grids.png')
def opt_simulation():
l = 10
logging.info('Sampling ground truth data...')
X, dfm, true_vals = ds.make_ls_data(10, 500, 500)
logging.info('Done')
results = fgp.dyn_de(X, dfm, lengthscale=l, num=32)
true_vals['delta'] = true_vals['s2_e'] / true_vals['s2_t']
plt.subplot(3, 1, 1)
plt.scatter(results['max_delta'], true_vals['delta'], c='k', label=None)
plt.xscale('log')
plt.xlim(np.exp(-11.), np.exp(11.))
plt.yscale('log')
plt.ylim(np.exp(-11.), np.exp(11.))
plt.plot([1e-4, 1e4], [1e-4, 1e4], c='r', label='$ x = y $ line')
plt.legend(loc='upper left')
plt.ylabel('Ground truth $ \delta $')
plt.subplot(3, 1, 2)
plt.scatter(results['max_s2_t_hat'], true_vals['s2_t'], c='k')
plt.xscale('log')
plt.xlim(np.exp(-6.), np.exp(6.))
plt.yscale('log')
plt.ylim(np.exp(-6.), np.exp(6.))
plt.plot([1e-2, 1e2], [1e-2, 1e2], c='r')
plt.ylabel('Ground truth $ \sigma_t^2 $')
plt.subplot(3, 1, 3)
plt.scatter(results['max_mu_hat'], true_vals['mu'], c='k')
plt.xlim(-1, 6)
plt.ylim(-1, 6)
plt.plot([0, 5], [0, 5], c='r')
plt.ylabel('Ground truth $ \mu $')
plt.xlabel('Inferred Value')
plt.savefig('simulation_accuracy.png')
if __name__ == '__main__':
opt_simulation()
# plot_LL_curves()
# main()
| [
"numpy.log10",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"logging.info",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"numpy.logspace",
"matplotlib.pyplot.yscale",
"numpy.ceil",
"matplotli... | [((16, 56), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (35, 56), False, 'import logging\n'), ((253, 278), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index'}), '(index=index)\n', (265, 278), True, 'import pandas as pd\n'), ((441, 489), 'pandas.read_csv', 'pd.read_csv', (['"""data/Rep12_MOB_1.csv"""'], {'index_col': '(0)'}), "('data/Rep12_MOB_1.csv', index_col=0)\n", (452, 489), True, 'import pandas as pd\n'), ((591, 607), 'numpy.log10', 'np.log10', (['(df + 1)'], {}), '(df + 1)\n', (599, 607), True, 'import numpy as np\n'), ((680, 739), 'matplotlib.pyplot.scatter', 'plt.scatter', (["results['max_delta']", "results['max_ll']"], {'c': '"""k"""'}), "(results['max_delta'], results['max_ll'], c='k')\n", (691, 739), True, 'import matplotlib.pyplot as plt\n'), ((744, 761), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (754, 761), True, 'import matplotlib.pyplot as plt\n'), ((804, 827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$"""'], {}), "('$\\\\delta$')\n", (814, 827), True, 'import matplotlib.pyplot as plt\n'), ((831, 867), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Maximum Log Likelihood"""'], {}), "('Maximum Log Likelihood')\n", (841, 867), True, 'import matplotlib.pyplot as plt\n'), ((915, 966), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fastgp-fits.png"""'], {'bbox_inches': '"""tight"""'}), "('fastgp-fits.png', bbox_inches='tight')\n", (926, 966), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2339), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example_grids.png"""'], {}), "('example_grids.png')\n", (2318, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2379, 2424), 'logging.info', 'logging.info', (['"""Sampling ground truth data..."""'], {}), "('Sampling ground truth data...')\n", (2391, 2424), False, 'import logging\n'), ((2483, 2503), 'logging.info', 'logging.info', (['"""Done"""'], {}), "('Done')\n", (2495, 2503), False, 'import logging\n'), ((2630, 2650), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2641, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2655, 2727), 'matplotlib.pyplot.scatter', 'plt.scatter', (["results['max_delta']", "true_vals['delta']"], {'c': '"""k"""', 'label': 'None'}), "(results['max_delta'], true_vals['delta'], c='k', label=None)\n", (2666, 2727), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2749), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2742, 2749), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2811), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2804, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2933), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0001, 10000.0]', '[0.0001, 10000.0]'], {'c': '"""r"""', 'label': '"""$ x = y $ line"""'}), "([0.0001, 10000.0], [0.0001, 10000.0], c='r', label='$ x = y $ line')\n", (2864, 2933), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2955), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2937, 2955), True, 'import matplotlib.pyplot as plt\n'), ((2961, 2999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ground truth $ \\\\delta $"""'], {}), "('Ground truth $ \\\\delta $')\n", (2971, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3004, 3024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3015, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3091), 'matplotlib.pyplot.scatter', 'plt.scatter', (["results['max_s2_t_hat']", "true_vals['s2_t']"], {'c': '"""k"""'}), "(results['max_s2_t_hat'], true_vals['s2_t'], c='k')\n", (3040, 3091), True, 'import matplotlib.pyplot as plt\n'), ((3096, 3113), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3106, 3113), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3173), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3166, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3261), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.01, 100.0]', '[0.01, 100.0]'], {'c': '"""r"""'}), "([0.01, 100.0], [0.01, 100.0], c='r')\n", (3224, 3261), True, 'import matplotlib.pyplot as plt\n'), ((3262, 3304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ground truth $ \\\\sigma_t^2 $"""'], {}), "('Ground truth $ \\\\sigma_t^2 $')\n", (3272, 3304), True, 'import matplotlib.pyplot as plt\n'), ((3309, 3329), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3320, 3329), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3392), 'matplotlib.pyplot.scatter', 'plt.scatter', (["results['max_mu_hat']", "true_vals['mu']"], {'c': '"""k"""'}), "(results['max_mu_hat'], true_vals['mu'], c='k')\n", (3345, 3392), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3412), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(6)'], {}), '(-1, 6)\n', (3405, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3432), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(6)'], {}), '(-1, 6)\n', (3425, 3432), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3468), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 5]', '[0, 5]'], {'c': '"""r"""'}), "([0, 5], [0, 5], c='r')\n", (3445, 3468), True, 'import matplotlib.pyplot as plt\n'), ((3473, 3508), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ground truth $ \\\\mu $"""'], {}), "('Ground truth $ \\\\mu $')\n", (3483, 3508), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3541), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Inferred Value"""'], {}), "('Inferred Value')\n", (3523, 3541), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3585), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""simulation_accuracy.png"""'], {}), "('simulation_accuracy.png')\n", (3558, 3585), True, 'import matplotlib.pyplot as plt\n'), ((775, 786), 'numpy.exp', 'np.exp', (['(-11)'], {}), '(-11)\n', (781, 786), True, 'import numpy as np\n'), ((788, 798), 'numpy.exp', 'np.exp', (['(11)'], {}), '(11)\n', (794, 798), True, 'import numpy as np\n'), ((1582, 1632), 'numpy.logspace', 'np.logspace', ([], {'base': 'np.e', 'start': '(-10)', 'stop': '(10)', 'num': '(32)'}), '(base=np.e, start=-10, stop=10, num=32)\n', (1593, 1632), True, 'import numpy as np\n'), ((1956, 2059), 'matplotlib.pyplot.plot', 'plt.plot', (['delta_range', 'LLs'], {'marker': '"""o"""', 'markeredgecolor': '"""w"""', 'markersize': '(2)', 'markeredgewidth': '(1)', 'c': '"""k"""'}), "(delta_range, LLs, marker='o', markeredgecolor='w', markersize=2,\n markeredgewidth=1, c='k')\n", (1964, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2149), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[max_delta]', '[max_ll]'], {'marker': '"""v"""', 'c': '"""r"""', 'edgecolor': '"""none"""', 'zorder': '(5)'}), "([max_delta], [max_ll], marker='v', c='r', edgecolor='none',\n zorder=5)\n", (2075, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2179), 'matplotlib.pyplot.title', 'plt.title', (['dfm.columns[g]'], {}), '(dfm.columns[g])\n', (2163, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2188, 2233), 'matplotlib.pyplot.axvline', 'plt.axvline', (['true_vals.iloc[g, -1]'], {'color': '"""r"""'}), "(true_vals.iloc[g, -1], color='r')\n", (2199, 2233), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2259), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2252, 2259), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2776), 'numpy.exp', 'np.exp', (['(-11.0)'], {}), '(-11.0)\n', (2769, 2776), True, 'import numpy as np\n'), ((2777, 2789), 'numpy.exp', 'np.exp', (['(11.0)'], {}), '(11.0)\n', (2783, 2789), True, 'import numpy as np\n'), ((2825, 2838), 'numpy.exp', 'np.exp', (['(-11.0)'], {}), '(-11.0)\n', (2831, 2838), True, 'import numpy as np\n'), ((2839, 2851), 'numpy.exp', 'np.exp', (['(11.0)'], {}), '(11.0)\n', (2845, 2851), True, 'import numpy as np\n'), ((3127, 3139), 'numpy.exp', 'np.exp', (['(-6.0)'], {}), '(-6.0)\n', (3133, 3139), True, 'import numpy as np\n'), ((3140, 3151), 'numpy.exp', 'np.exp', (['(6.0)'], {}), '(6.0)\n', (3146, 3151), True, 'import numpy as np\n'), ((3187, 3199), 'numpy.exp', 'np.exp', (['(-6.0)'], {}), '(-6.0)\n', (3193, 3199), True, 'import numpy as np\n'), ((3200, 3211), 'numpy.exp', 'np.exp', (['(6.0)'], {}), '(6.0)\n', (3206, 3211), True, 'import numpy as np\n'), ((1921, 1937), 'numpy.ceil', 'np.ceil', (['(G / 2.0)'], {}), '(G / 2.0)\n', (1928, 1937), True, 'import numpy as np\n'), ((2277, 2288), 'numpy.exp', 'np.exp', (['(-11)'], {}), '(-11)\n', (2283, 2288), True, 'import numpy as np\n'), ((2290, 2300), 'numpy.exp', 'np.exp', (['(11)'], {}), '(11)\n', (2296, 2300), True, 'import numpy as np\n')] |
import numpy as np
import torch
from affogato.affinities import compute_affinities
from torchvision.utils import make_grid
from inferno.extensions.criteria import SorensenDiceLoss
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
self.lens = [len(ds) for ds in self.datasets]
self.start_idx = np.cumsum(self.lens)
self.start_idx[-1] = 0
self.start_idx = np.roll(self.start_idx, 1)
def __len__(self):
return sum(self.lens)
def __getitem__(self, index):
ds_index = np.where(index - self.start_idx >= 0)[0][-1]
item_index = index - self.start_idx[ds_index]
return self.datasets[ds_index][item_index]
class DefaultDataset(torch.utils.data.Dataset):
""" Simple default dataset for generating affinities
from segmentation and mask.
"""
patch_shape = [512, 512] # TODO expose this and other parameters
def to_affinities(self, seg, mask):
seg[~mask] = 0
affs, aff_mask = compute_affinities(seg, self.offsets, have_ignore_label=True)
aff_mask = aff_mask.astype('bool')
affs = 1. - affs
mask_transition, aff_mask2 = compute_affinities(mask, self.offsets)
mask_transition[~aff_mask2.astype('bool')] = 1
aff_mask[~mask_transition.astype('bool')] = True
return affs, aff_mask
@staticmethod
def estimate_n_samples(shape, patch_shape):
# we estimate the number of samples by tiling shape with patch_shape
crops_per_dim = [sh / float(cs) for sh, cs in zip(shape, patch_shape)]
return int(np.prod(crops_per_dim))
def __init__(self, raw, seg, mask_ids, offsets, transforms=None):
self.raw = raw
self.seg = seg
self.mask_ids = mask_ids
self.offsets = offsets
self.transforms = transforms
self.n_samples = self.estimate_n_samples(self.raw.shape, self.patch_shape)
def __getitem__(self, index):
# TODO sample so that we are biased towards the mask
def sample_raw_seg_mask():
offset = [np.random.randint(0, sh - csh) if sh > csh else 0
for sh, csh in zip(self.raw.shape, self.patch_shape)]
bb = tuple(slice(off, off + csh) for off, csh in zip(offset, self.patch_shape))
raw = self.raw[bb]
seg = self.seg[bb]
if self.transforms is not None:
raw, seg = self.transforms(raw, seg)
raw, seg = raw.copy(), seg.copy()
mask = np.isin(seg, self.mask_ids)
return raw, seg, mask
raw, seg, mask = sample_raw_seg_mask()
# TODO ensure that we have some in-mask area
# # some arbitrary but very small pixel threshold
# while mask.sum() < 25:
# raw, seg, mask = sample_raw_seg_mask()
# add channel dim
raw = raw[None]
# make affs and aff_mask
affs, aff_mask = self.to_affinities(seg, mask)
return raw, affs, aff_mask
def __len__(self):
return self.n_samples
class MaskedLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.criterion = SorensenDiceLoss()
def forward(self, pred, y, mask):
mask.requires_grad = False
masked_prediction = pred * mask
loss = self.criterion(masked_prediction, y)
return loss
def default_training(proc_id, net, ds,
pipe, device, step):
loader = torch.utils.data.DataLoader(ds, batch_size=1, num_workers=2)
p_out, p_in = pipe
optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
loss = MaskedLoss()
loss = loss.to(device)
logger = torch.utils.tensorboard.SummaryWriter('./runs/imws')
add_gradients = True
log_frequency = 10
net.train()
while True:
if p_out.poll():
if not p_out.recv():
p_in.send(step)
break
for x, y, mask in loader:
x = x.to(device)
y, mask = y.to(device), mask.to(device)
optimizer.zero_grad()
pred = net(x)
pred.retain_grad()
loss_val = loss(pred, y, mask)
loss_val.backward()
optimizer.step()
logger.add_scalar("loss", loss_val.item(), step)
step += 1
if step % log_frequency == 0:
print("Background training process iteration", step)
x = x[0].detach().cpu()
logger.add_image('input', x, step)
y = y[0].detach().cpu()
if add_gradients:
grads = pred.grad[0].detach().cpu()
grads -= grads.min()
grads /= grads.max()
pred = torch.clamp(pred[0].detach().cpu(), 0.001, 0.999)
tandp = [target.unsqueeze(0) for target in y]
nrow = len(tandp)
tandp.extend([p.unsqueeze(0) for p in pred])
if add_gradients:
tandp.extend([grad.unsqueeze(0) for grad in grads])
tandp = make_grid(tandp, nrow=nrow)
logger.add_image('target_and_prediction', tandp, step)
# for debugging
# return x, y, pred, grads
| [
"torch.utils.tensorboard.SummaryWriter",
"inferno.extensions.criteria.SorensenDiceLoss",
"numpy.prod",
"numpy.roll",
"numpy.where",
"numpy.isin",
"affogato.affinities.compute_affinities",
"numpy.random.randint",
"torch.utils.data.DataLoader",
"numpy.cumsum",
"torchvision.utils.make_grid"
] | [((3514, 3574), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['ds'], {'batch_size': '(1)', 'num_workers': '(2)'}), '(ds, batch_size=1, num_workers=2)\n', (3541, 3574), False, 'import torch\n'), ((3725, 3777), 'torch.utils.tensorboard.SummaryWriter', 'torch.utils.tensorboard.SummaryWriter', (['"""./runs/imws"""'], {}), "('./runs/imws')\n", (3762, 3777), False, 'import torch\n'), ((378, 398), 'numpy.cumsum', 'np.cumsum', (['self.lens'], {}), '(self.lens)\n', (387, 398), True, 'import numpy as np\n'), ((455, 481), 'numpy.roll', 'np.roll', (['self.start_idx', '(1)'], {}), '(self.start_idx, 1)\n', (462, 481), True, 'import numpy as np\n'), ((1046, 1107), 'affogato.affinities.compute_affinities', 'compute_affinities', (['seg', 'self.offsets'], {'have_ignore_label': '(True)'}), '(seg, self.offsets, have_ignore_label=True)\n', (1064, 1107), False, 'from affogato.affinities import compute_affinities\n'), ((1214, 1252), 'affogato.affinities.compute_affinities', 'compute_affinities', (['mask', 'self.offsets'], {}), '(mask, self.offsets)\n', (1232, 1252), False, 'from affogato.affinities import compute_affinities\n'), ((3213, 3231), 'inferno.extensions.criteria.SorensenDiceLoss', 'SorensenDiceLoss', ([], {}), '()\n', (3229, 3231), False, 'from inferno.extensions.criteria import SorensenDiceLoss\n'), ((1637, 1659), 'numpy.prod', 'np.prod', (['crops_per_dim'], {}), '(crops_per_dim)\n', (1644, 1659), True, 'import numpy as np\n'), ((2561, 2588), 'numpy.isin', 'np.isin', (['seg', 'self.mask_ids'], {}), '(seg, self.mask_ids)\n', (2568, 2588), True, 'import numpy as np\n'), ((590, 627), 'numpy.where', 'np.where', (['(index - self.start_idx >= 0)'], {}), '(index - self.start_idx >= 0)\n', (598, 627), True, 'import numpy as np\n'), ((5148, 5175), 'torchvision.utils.make_grid', 'make_grid', (['tandp'], {'nrow': 'nrow'}), '(tandp, nrow=nrow)\n', (5157, 5175), False, 'from torchvision.utils import make_grid\n'), ((2116, 2146), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sh - csh)'], {}), '(0, sh - csh)\n', (2133, 2146), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""The DpuCar is a module which contains the DpuCar class and the related common function
By xiaobo
Contact <EMAIL>
Created on June 7 22:10 2020
"""
# Copyright (C)
#
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
import PIL
import IPython
from io import BytesIO as StringIO
from IPython.display import display
from IPython.display import clear_output
import cv2
from dnndk import n2cube
import numpy as np
from numpy import float32
import os
import matplotlib.pyplot as plt
import time
class DpuCar(object):
def __init__(self, dpu_task, dpu_input_node="x_input_Conv2D", dpu_output_node="y_out_MatMul", dpu_img_size=128):
self.cap = cv2.VideoCapture(0)
self.cap.set(3, 160)
self.cap.set(4, 120)
print(self.cap.get(3), self.cap.get(4))
print(self.cap.get(cv2.CAP_PROP_FPS))
self.dpuInputNode = dpu_input_node
self.dpuOutputNode = dpu_output_node
self.dpuTask = dpu_task
self.dpuImgSize = dpu_img_size
def get_image(self, idx=0):
"""
get a image from sensor, donot care which kind of cam is used.
Args:
idx: the index of sensor, default is 0
"""
if idx == 0:
ret, frame = self.cap.read()
if ret:
return frame
else:
print("Please connect the camera!")
return False
else:
print("The index should be 0!")
def dpuPredictSoftmax(self, img_input):
img_scale = cv2.resize(img_input,(self.dpuImgSize, self.dpuImgSize), interpolation = cv2.INTER_CUBIC)
img1_scale = np.array(img_scale, dtype='float32')
if np.max(img1_scale) > 1:
img1_scale = img1_scale / 255.
input_len = img1_scale.shape[0] * img1_scale.shape[1] * img1_scale.shape[2]
n2cube.dpuSetInputTensorInHWCFP32(self.dpuTask, self.dpuInputNode, img1_scale, input_len)
n2cube.dpuRunTask(self.dpuTask)
conf = n2cube.dpuGetOutputTensorAddress(self.dpuTask, self.dpuOutputNode)
channel = n2cube.dpuGetOutputTensorChannel(self.dpuTask, self.dpuOutputNode)
outScale = n2cube.dpuGetOutputTensorScale(self.dpuTask, self.dpuOutputNode)
size = n2cube.dpuGetOutputTensorSize(self.dpuTask, self.dpuOutputNode)
softmax = n2cube.dpuRunSoftmax(conf, channel, size//channel, outScale)
pdt= np.argmax(softmax, axis=0)
return pdt
class CommonFunction(object):
@classmethod
def img2display(cls, img_mat):
ret, png = cv2.imencode('.png', img_mat)
encoded = base64.b64encode(png)
return Image(data=encoded.decode('ascii'))
@classmethod
def show_img_jupyter(cls, img_mat):
img_mat = cv2.cvtColor(img_mat, cv2.COLOR_BGR2RGB)
f = StringIO()
PIL.Image.fromarray(img_mat).save(f, 'png')
IPython.display.display(IPython.display.Image(data=f.getvalue()))
@classmethod
def clear_output(cls):
clear_output(wait=True) | [
"PIL.Image.fromarray",
"dnndk.n2cube.dpuGetOutputTensorScale",
"cv2.imencode",
"dnndk.n2cube.dpuGetOutputTensorAddress",
"dnndk.n2cube.dpuGetOutputTensorChannel",
"numpy.argmax",
"io.BytesIO",
"IPython.display.clear_output",
"numpy.max",
"numpy.array",
"dnndk.n2cube.dpuRunTask",
"cv2.VideoCapt... | [((1261, 1280), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1277, 1280), False, 'import cv2\n'), ((2147, 2240), 'cv2.resize', 'cv2.resize', (['img_input', '(self.dpuImgSize, self.dpuImgSize)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img_input, (self.dpuImgSize, self.dpuImgSize), interpolation=cv2\n .INTER_CUBIC)\n', (2157, 2240), False, 'import cv2\n'), ((2258, 2294), 'numpy.array', 'np.array', (['img_scale'], {'dtype': '"""float32"""'}), "(img_scale, dtype='float32')\n", (2266, 2294), True, 'import numpy as np\n'), ((2465, 2558), 'dnndk.n2cube.dpuSetInputTensorInHWCFP32', 'n2cube.dpuSetInputTensorInHWCFP32', (['self.dpuTask', 'self.dpuInputNode', 'img1_scale', 'input_len'], {}), '(self.dpuTask, self.dpuInputNode,\n img1_scale, input_len)\n', (2498, 2558), False, 'from dnndk import n2cube\n'), ((2563, 2594), 'dnndk.n2cube.dpuRunTask', 'n2cube.dpuRunTask', (['self.dpuTask'], {}), '(self.dpuTask)\n', (2580, 2594), False, 'from dnndk import n2cube\n'), ((2610, 2676), 'dnndk.n2cube.dpuGetOutputTensorAddress', 'n2cube.dpuGetOutputTensorAddress', (['self.dpuTask', 'self.dpuOutputNode'], {}), '(self.dpuTask, self.dpuOutputNode)\n', (2642, 2676), False, 'from dnndk import n2cube\n'), ((2695, 2761), 'dnndk.n2cube.dpuGetOutputTensorChannel', 'n2cube.dpuGetOutputTensorChannel', (['self.dpuTask', 'self.dpuOutputNode'], {}), '(self.dpuTask, self.dpuOutputNode)\n', (2727, 2761), False, 'from dnndk import n2cube\n'), ((2781, 2845), 'dnndk.n2cube.dpuGetOutputTensorScale', 'n2cube.dpuGetOutputTensorScale', (['self.dpuTask', 'self.dpuOutputNode'], {}), '(self.dpuTask, self.dpuOutputNode)\n', (2811, 2845), False, 'from dnndk import n2cube\n'), ((2861, 2924), 'dnndk.n2cube.dpuGetOutputTensorSize', 'n2cube.dpuGetOutputTensorSize', (['self.dpuTask', 'self.dpuOutputNode'], {}), '(self.dpuTask, self.dpuOutputNode)\n', (2890, 2924), False, 'from dnndk import n2cube\n'), ((2943, 3005), 'dnndk.n2cube.dpuRunSoftmax', 'n2cube.dpuRunSoftmax', (['conf', 'channel', '(size // channel)', 'outScale'], {}), '(conf, channel, size // channel, outScale)\n', (2963, 3005), False, 'from dnndk import n2cube\n'), ((3017, 3043), 'numpy.argmax', 'np.argmax', (['softmax'], {'axis': '(0)'}), '(softmax, axis=0)\n', (3026, 3043), True, 'import numpy as np\n'), ((3168, 3197), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'img_mat'], {}), "('.png', img_mat)\n", (3180, 3197), False, 'import cv2\n'), ((3365, 3405), 'cv2.cvtColor', 'cv2.cvtColor', (['img_mat', 'cv2.COLOR_BGR2RGB'], {}), '(img_mat, cv2.COLOR_BGR2RGB)\n', (3377, 3405), False, 'import cv2\n'), ((3418, 3428), 'io.BytesIO', 'StringIO', ([], {}), '()\n', (3426, 3428), True, 'from io import BytesIO as StringIO\n'), ((3616, 3639), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (3628, 3639), False, 'from IPython.display import clear_output\n'), ((2306, 2324), 'numpy.max', 'np.max', (['img1_scale'], {}), '(img1_scale)\n', (2312, 2324), True, 'import numpy as np\n'), ((3437, 3465), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img_mat'], {}), '(img_mat)\n', (3456, 3465), False, 'import PIL\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader as data
# noinspection PyUnresolvedReferences
import silence_tensorflow.auto # for ignoring tensorflow info and warnings
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
import streamlit as st
from datetime import date
# starting and ending of data frame
start = '2010-01-01'
end = date.today().strftime('%Y-%m-%d')
# decoration
st.title('Stock Trend Prediction')
# data frame
user_input = st.text_input('Enter Stock Ticker', 'SBI')
df = data.DataReader(user_input, 'yahoo', start, end)
print(df)
# Describing Data
st.subheader('Data from '+start.split('-')[0]+' - '+end.split('-')[0])
st.write(df.describe())
# Visualizations
st.subheader('Closing Price vs Time chart')
fig = plt.figure(figsize=(12, 6))
plt.plot(df.Close, 'b')
st.pyplot(fig)
st.subheader('Closing Price vs Time chart with 100MA')
ma100 = df.Close.rolling(100).mean()
fig = plt.figure(figsize=(12, 6))
plt.plot(ma100, 'r')
plt.plot(df.Close, 'b')
st.pyplot(fig)
st.subheader('Closing Price vs Time chart with 100MA & 200MA')
ma100 = df.Close.rolling(100).mean()
ma200 = df.Close.rolling(200).mean()
fig = plt.figure(figsize=(12, 6))
plt.plot(ma100, 'r')
plt.plot(ma200, 'g')
plt.plot(df.Close, 'b')
st.pyplot(fig)
# splitting data into Training and Testing
data_training = pd.DataFrame(df['Close'][0:int(len(df) * 0.70)])
data_testing = pd.DataFrame(df['Close'][int(len(df) * 0.70): int(len(df))])
# scaling down the training data and converting it into an array
scale = MinMaxScaler(feature_range=(0, 1))
data_training_array = scale.fit_transform(data_training)
# Load the model
model = load_model('keras_model.h5')
# testing data
past_100_days = data_training.tail(100)
final_df = past_100_days.append(data_testing, ignore_index=True)
# scaling down the testing data and converting it into an array
input_data = scale.fit_transform(final_df)
# splitting data into x_test and y_test
x_test = []
y_test = []
for i in range(100, input_data.shape[0]):
x_test.append(input_data[i - 100: i])
y_test.append(input_data[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
# Making Prediction
y_predicted = model.predict(x_test)
# scaling up the predicted data
scale_factor = 1/scale.scale_[0]
y_predicted = y_predicted * scale_factor
y_test = y_test * scale_factor
# Final Graph
st.subheader('Predictions vs Original')
fig2 = plt.figure(figsize=(12, 6))
plt.plot(y_test, 'b', label='Original Price')
plt.plot(y_predicted, 'g', label='Predicted Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
st.pyplot(fig2)
| [
"streamlit.pyplot",
"keras.models.load_model",
"matplotlib.pyplot.ylabel",
"pandas_datareader.DataReader",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"streamlit.subheader",
"datetime.date.today",
"streamlit.text_input",
"sklearn.preprocessi... | [((466, 500), 'streamlit.title', 'st.title', (['"""Stock Trend Prediction"""'], {}), "('Stock Trend Prediction')\n", (474, 500), True, 'import streamlit as st\n'), ((528, 570), 'streamlit.text_input', 'st.text_input', (['"""Enter Stock Ticker"""', '"""SBI"""'], {}), "('Enter Stock Ticker', 'SBI')\n", (541, 570), True, 'import streamlit as st\n'), ((576, 624), 'pandas_datareader.DataReader', 'data.DataReader', (['user_input', '"""yahoo"""', 'start', 'end'], {}), "(user_input, 'yahoo', start, end)\n", (591, 624), True, 'import pandas_datareader as data\n'), ((768, 811), 'streamlit.subheader', 'st.subheader', (['"""Closing Price vs Time chart"""'], {}), "('Closing Price vs Time chart')\n", (780, 811), True, 'import streamlit as st\n'), ((818, 845), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (828, 845), True, 'import matplotlib.pyplot as plt\n'), ((846, 869), 'matplotlib.pyplot.plot', 'plt.plot', (['df.Close', '"""b"""'], {}), "(df.Close, 'b')\n", (854, 869), True, 'import matplotlib.pyplot as plt\n'), ((870, 884), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (879, 884), True, 'import streamlit as st\n'), ((886, 940), 'streamlit.subheader', 'st.subheader', (['"""Closing Price vs Time chart with 100MA"""'], {}), "('Closing Price vs Time chart with 100MA')\n", (898, 940), True, 'import streamlit as st\n'), ((984, 1011), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (994, 1011), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1032), 'matplotlib.pyplot.plot', 'plt.plot', (['ma100', '"""r"""'], {}), "(ma100, 'r')\n", (1020, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1056), 'matplotlib.pyplot.plot', 'plt.plot', (['df.Close', '"""b"""'], {}), "(df.Close, 'b')\n", (1041, 1056), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1071), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (1066, 1071), True, 'import streamlit as st\n'), ((1073, 1135), 'streamlit.subheader', 'st.subheader', (['"""Closing Price vs Time chart with 100MA & 200MA"""'], {}), "('Closing Price vs Time chart with 100MA & 200MA')\n", (1085, 1135), True, 'import streamlit as st\n'), ((1216, 1243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1226, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1264), 'matplotlib.pyplot.plot', 'plt.plot', (['ma100', '"""r"""'], {}), "(ma100, 'r')\n", (1252, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1285), 'matplotlib.pyplot.plot', 'plt.plot', (['ma200', '"""g"""'], {}), "(ma200, 'g')\n", (1273, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1309), 'matplotlib.pyplot.plot', 'plt.plot', (['df.Close', '"""b"""'], {}), "(df.Close, 'b')\n", (1294, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1324), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (1319, 1324), True, 'import streamlit as st\n'), ((1584, 1618), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1596, 1618), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1702, 1730), 'keras.models.load_model', 'load_model', (['"""keras_model.h5"""'], {}), "('keras_model.h5')\n", (1712, 1730), False, 'from keras.models import load_model\n'), ((2409, 2448), 'streamlit.subheader', 'st.subheader', (['"""Predictions vs Original"""'], {}), "('Predictions vs Original')\n", (2421, 2448), True, 'import streamlit as st\n'), ((2456, 2483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2466, 2483), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2529), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test', '"""b"""'], {'label': '"""Original Price"""'}), "(y_test, 'b', label='Original Price')\n", (2492, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2581), 'matplotlib.pyplot.plot', 'plt.plot', (['y_predicted', '"""g"""'], {'label': '"""Predicted Price"""'}), "(y_predicted, 'g', label='Predicted Price')\n", (2538, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2592, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2601, 2620), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (2611, 2620), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pyplot as plt\n'), ((2634, 2649), 'streamlit.pyplot', 'st.pyplot', (['fig2'], {}), '(fig2)\n', (2643, 2649), True, 'import streamlit as st\n'), ((2164, 2180), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2172, 2180), True, 'import numpy as np\n'), ((2182, 2198), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2190, 2198), True, 'import numpy as np\n'), ((418, 430), 'datetime.date.today', 'date.today', ([], {}), '()\n', (428, 430), False, 'from datetime import date\n')] |
#! /usr/bin/env python
"""compare float array files."""
import argparse
import os
import numpy as np
import glob
parser = argparse.ArgumentParser(description='compare .float binary files')
parser.add_argument('dir1', help='path to directory containing .float files')
parser.add_argument('dir2', help='path to another directory containing .float files')
def compare_features(features1, features2):
max_error = 0
for i in range(features1.shape[1]):
for j in range(features1.shape[2]):
for k in range(features1.shape[3]):
diff = np.abs(features1[0, i, j, k] - features2[0, i, j, k])
max_error = max(max_error, diff)
if diff > 1e-4:
print(i, j, k, ":", features1[0, i, j, k], features2[0, i, j, k], diff)
print("Largest error:", max_error)
def compare_features_lin(features1, features2):
max_error = 0
for i in range(features1.shape[0]):
diff = np.abs(features1[i] - features2[i])
max_error = max(max_error, diff)
if diff > 2:
print(i, ":", features1[i], features2[i], diff)
print("Largest error:", max_error)
def compare_features_best(features1, features2):
for i in range(features1.shape[0]):
min_error = 1e20
for j in range(features1.shape[0]):
diff = np.abs(features1[i] - features2[j])
if diff < min_error:
min_error = diff
min_j = j
print(i,"->",min_j, " min_error=",min_error)
def compare_features_min(features1, features2):
max_error = 0
for i in range(features1.shape[0]):
diff = np.abs(features1[i] - features2[i])
if diff < 1e-3:
print(i,"-> error=",diff)
def compare_features_fast(features1, features2):
error = np.abs(features1 - features2)
max_error = np.max(error)
avrg_error = np.sum(error)/features1.size
min_1 = np.min(features1)
min_2 = np.min(features2)
max_1 = np.max(features1)
max_2 = np.max(features2)
avrg_1 = np.sum(features1)/features1.size
avrg_2 = np.sum(features2)/features2.size
print("error max:",max_error,"avrg:",avrg_error)
print("avrg1:",avrg_1,"min1:",min_1,"max1:", max_1)
print("avrg2:",avrg_2,"min2:",min_2,"max2:",max_2)
def compare_features_(features1, features2):
for i in range(features1.shape[0]):
min_error = 1e20
for j in range(features1.shape[0]):
diff = np.abs(features1[i] - features2[j])
if diff < min_error:
min_error = diff
min_j = j
print(i,"->",min_j, " min_error=",min_error)
def check(file1, file2):
print("Checking "+file1+" and "+file2)
data1 = np.fromfile(file1, dtype = np.float32)
data2 = np.fromfile(file2, dtype = np.float32)
#compare_features_min(data1, data2)
compare_features_fast(data1, data2)
def _main(args):
dir1 = os.path.expanduser(args.dir1)
dir2 = os.path.expanduser(args.dir2)
if os.path.isfile(dir1) and os.path.isfile(dir2):
check(dir1, dir2)
else:
dir1_files = glob.glob(os.path.join(dir1,"*.floats"))
dir2_files = glob.glob(os.path.join(dir2,"*.floats"))
print(dir1, dir1_files)
print(dir2, dir2_files)
dir1_names = {}
for path in dir1_files:
dir1_names[os.path.basename(path)] = path
dir2_names = {}
for path in dir2_files:
base = os.path.basename(path)
if base in dir1_names:
check(dir1_names[base], path)
if __name__ == '__main__':
_main(parser.parse_args())
| [
"numpy.abs",
"numpy.fromfile",
"argparse.ArgumentParser",
"os.path.join",
"numpy.max",
"os.path.isfile",
"numpy.sum",
"os.path.basename",
"numpy.min",
"os.path.expanduser"
] | [((123, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""compare .float binary files"""'}), "(description='compare .float binary files')\n", (146, 189), False, 'import argparse\n'), ((1699, 1728), 'numpy.abs', 'np.abs', (['(features1 - features2)'], {}), '(features1 - features2)\n', (1705, 1728), True, 'import numpy as np\n'), ((1744, 1757), 'numpy.max', 'np.max', (['error'], {}), '(error)\n', (1750, 1757), True, 'import numpy as np\n'), ((1812, 1829), 'numpy.min', 'np.min', (['features1'], {}), '(features1)\n', (1818, 1829), True, 'import numpy as np\n'), ((1840, 1857), 'numpy.min', 'np.min', (['features2'], {}), '(features2)\n', (1846, 1857), True, 'import numpy as np\n'), ((1868, 1885), 'numpy.max', 'np.max', (['features1'], {}), '(features1)\n', (1874, 1885), True, 'import numpy as np\n'), ((1896, 1913), 'numpy.max', 'np.max', (['features2'], {}), '(features2)\n', (1902, 1913), True, 'import numpy as np\n'), ((2561, 2597), 'numpy.fromfile', 'np.fromfile', (['file1'], {'dtype': 'np.float32'}), '(file1, dtype=np.float32)\n', (2572, 2597), True, 'import numpy as np\n'), ((2610, 2646), 'numpy.fromfile', 'np.fromfile', (['file2'], {'dtype': 'np.float32'}), '(file2, dtype=np.float32)\n', (2621, 2646), True, 'import numpy as np\n'), ((2754, 2783), 'os.path.expanduser', 'os.path.expanduser', (['args.dir1'], {}), '(args.dir1)\n', (2772, 2783), False, 'import os\n'), ((2795, 2824), 'os.path.expanduser', 'os.path.expanduser', (['args.dir2'], {}), '(args.dir2)\n', (2813, 2824), False, 'import os\n'), ((907, 942), 'numpy.abs', 'np.abs', (['(features1[i] - features2[i])'], {}), '(features1[i] - features2[i])\n', (913, 942), True, 'import numpy as np\n'), ((1543, 1578), 'numpy.abs', 'np.abs', (['(features1[i] - features2[i])'], {}), '(features1[i] - features2[i])\n', (1549, 1578), True, 'import numpy as np\n'), ((1773, 1786), 'numpy.sum', 'np.sum', (['error'], {}), '(error)\n', (1779, 1786), True, 'import numpy as np\n'), ((1925, 1942), 'numpy.sum', 'np.sum', (['features1'], {}), '(features1)\n', (1931, 1942), True, 'import numpy as np\n'), ((1969, 1986), 'numpy.sum', 'np.sum', (['features2'], {}), '(features2)\n', (1975, 1986), True, 'import numpy as np\n'), ((2833, 2853), 'os.path.isfile', 'os.path.isfile', (['dir1'], {}), '(dir1)\n', (2847, 2853), False, 'import os\n'), ((2858, 2878), 'os.path.isfile', 'os.path.isfile', (['dir2'], {}), '(dir2)\n', (2872, 2878), False, 'import os\n'), ((1264, 1299), 'numpy.abs', 'np.abs', (['(features1[i] - features2[j])'], {}), '(features1[i] - features2[j])\n', (1270, 1299), True, 'import numpy as np\n'), ((2321, 2356), 'numpy.abs', 'np.abs', (['(features1[i] - features2[j])'], {}), '(features1[i] - features2[j])\n', (2327, 2356), True, 'import numpy as np\n'), ((2943, 2973), 'os.path.join', 'os.path.join', (['dir1', '"""*.floats"""'], {}), "(dir1, '*.floats')\n", (2955, 2973), False, 'import os\n'), ((3003, 3033), 'os.path.join', 'os.path.join', (['dir2', '"""*.floats"""'], {}), "(dir2, '*.floats')\n", (3015, 3033), False, 'import os\n'), ((3274, 3296), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3290, 3296), False, 'import os\n'), ((551, 604), 'numpy.abs', 'np.abs', (['(features1[0, i, j, k] - features2[0, i, j, k])'], {}), '(features1[0, i, j, k] - features2[0, i, j, k])\n', (557, 604), True, 'import numpy as np\n'), ((3167, 3189), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3183, 3189), False, 'import os\n')] |
import pandas as pd
import numpy as np
import sys
import warnings
from slicer.interpretapi.explanation import AttributionExplanation
from slicer import Slicer
# slicer confuses pylint...
# pylint: disable=no-member
class Explanation(AttributionExplanation):
""" This is currently an experimental feature don't depend on this object yet! :)
"""
def __init__(
self,
expected_value,
values,
data = None,
output_shape = tuple(),
interaction_order = 0,
instance_names = None,
input_names = None,
output_names = None,
output_indexes = None,
feature_types = None,
lower_bounds = None,
upper_bounds = None,
main_effects = None,
hierarchical_values = None,
clustering = None
):
input_shape = _compute_shape(data)
values_dims = list(
range(len(input_shape) + interaction_order + len(output_shape))
)
output_dims = range(len(input_shape) + interaction_order, values_dims[-1])
#main_effects_inds = values_dims[0:len(input_shape)] + values_dims[len(input_shape) + interaction_order:]
self.output_names = output_names # TODO: needs to tracked after slicing still
kwargs_dict = {}
if lower_bounds is not None:
kwargs_dict["lower_bounds"] = (values_dims, Slicer(lower_bounds))
if upper_bounds is not None:
kwargs_dict["upper_bounds"] = (values_dims, Slicer(upper_bounds))
if main_effects is not None:
kwargs_dict["main_effects"] = (values_dims, Slicer(main_effects))
if output_indexes is not None:
kwargs_dict["output_indexes"] = (output_dims, Slicer(output_indexes))
if output_names is not None:
kwargs_dict["output_names"] = (output_dims, Slicer(output_names))
if hierarchical_values is not None:
kwargs_dict["hierarchical_values"] = (hierarchical_values, Slicer(hierarchical_values))
if clustering is not None:
self.clustering = clustering
super().__init__(
data,
values,
input_shape,
output_shape,
expected_value,
interaction_order,
instance_names,
input_names,
feature_types,
**kwargs_dict
)
def get_shape(self):
return _compute_shape(self.values)
shape = property(get_shape)
def get_expected_value(self):
return self.base_value
expected_value = property(get_expected_value)
def __repr__(self):
out = ".expected_value =\n"+self.expected_value.__repr__()
out += "\n\n.values =\n"+self.values.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for magic string indexes like "rank(0)".
"""
if not isinstance(item, tuple):
item = (item,)
# convert any magic strings
for i,t in enumerate(item):
if type(t) is str:
if t.startswith("rank("):
t = "abs_rank(" + t[5:] # convert rank() to abs_rank()
if t.startswith("abs_rank("):
rank = int(t[9:-1])
ranks = np.argsort(-np.sum(np.abs(self.values), tuple(j for j in range(len(self.values.shape)) if j != i)))
tmp = list(item)
tmp[i] = ranks[rank]
item = tuple(tmp)
elif t.startswith("pos_rank("):
rank = int(t[9:-1])
ranks = np.argsort(-np.sum(self.values, tuple(j for j in range(len(self.values.shape)) if j != i)))
tmp = list(item)
tmp[i] = ranks[rank]
item = tuple(tmp)
elif t.startswith("neg_rank("):
rank = int(t[9:-1])
ranks = np.argsort(np.sum(self.values, tuple(j for j in range(len(self.values.shape)) if j != i)))
tmp = list(item)
tmp[i] = ranks[rank]
item = tuple(tmp)
else:
ind = np.where(np.array(self.feature_names) == t)[0][0]
tmp = list(item)
tmp[i] = int(ind)
item = tuple(tmp)
out = super().__getitem__(item)
if getattr(self, "clustering", None) is not None:
out.clustering = self.clustering
return out
def _compute_shape(x):
if not hasattr(x, "__len__"):
return tuple()
else:
if type(x) == list:
return (len(x),) + _compute_shape(x[0])
if type(x) == dict:
return (len(x),) + _compute_shape(x[next(iter(x))])
return x.shape | [
"numpy.array",
"numpy.abs",
"slicer.Slicer"
] | [((1402, 1422), 'slicer.Slicer', 'Slicer', (['lower_bounds'], {}), '(lower_bounds)\n', (1408, 1422), False, 'from slicer import Slicer\n'), ((1517, 1537), 'slicer.Slicer', 'Slicer', (['upper_bounds'], {}), '(upper_bounds)\n', (1523, 1537), False, 'from slicer import Slicer\n'), ((1632, 1652), 'slicer.Slicer', 'Slicer', (['main_effects'], {}), '(main_effects)\n', (1638, 1652), False, 'from slicer import Slicer\n'), ((1751, 1773), 'slicer.Slicer', 'Slicer', (['output_indexes'], {}), '(output_indexes)\n', (1757, 1773), False, 'from slicer import Slicer\n'), ((1868, 1888), 'slicer.Slicer', 'Slicer', (['output_names'], {}), '(output_names)\n', (1874, 1888), False, 'from slicer import Slicer\n'), ((2005, 2032), 'slicer.Slicer', 'Slicer', (['hierarchical_values'], {}), '(hierarchical_values)\n', (2011, 2032), False, 'from slicer import Slicer\n'), ((3446, 3465), 'numpy.abs', 'np.abs', (['self.values'], {}), '(self.values)\n', (3452, 3465), True, 'import numpy as np\n'), ((4347, 4375), 'numpy.array', 'np.array', (['self.feature_names'], {}), '(self.feature_names)\n', (4355, 4375), True, 'import numpy as np\n')] |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel_wrapper."""
import unittest
from absl.testing import absltest
from dm_env import specs
import numpy as np
SKIP_OPEN_SPIEL_TESTS = False
SKIP_OPEN_SPIEL_MESSAGE = 'open_spiel not installed.'
try:
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
from acme.wrappers import open_spiel_wrapper
from open_spiel.python import rl_environment
# pytype: enable=import-error
except ModuleNotFoundError:
SKIP_OPEN_SPIEL_TESTS = True
@unittest.skipIf(SKIP_OPEN_SPIEL_TESTS, SKIP_OPEN_SPIEL_MESSAGE)
class OpenSpielWrapperTest(absltest.TestCase):
def test_tic_tac_toe(self):
raw_env = rl_environment.Environment('tic_tac_toe')
env = open_spiel_wrapper.OpenSpielWrapper(raw_env)
# Test converted observation spec.
observation_spec = env.observation_spec()
self.assertEqual(type(observation_spec), open_spiel_wrapper.OLT)
self.assertEqual(type(observation_spec.observation), specs.Array)
self.assertEqual(type(observation_spec.legal_actions), specs.Array)
self.assertEqual(type(observation_spec.terminal), specs.Array)
# Test converted action spec.
action_spec: specs.DiscreteArray = env.action_spec()
self.assertEqual(type(action_spec), specs.DiscreteArray)
self.assertEqual(action_spec.shape, ())
self.assertEqual(action_spec.minimum, 0)
self.assertEqual(action_spec.maximum, 8)
self.assertEqual(action_spec.num_values, 9)
self.assertEqual(action_spec.dtype, np.dtype('int32'))
# Test step.
timestep = env.reset()
self.assertTrue(timestep.first())
_ = env.step([0])
env.close()
if __name__ == '__main__':
absltest.main()
| [
"unittest.skipIf",
"acme.wrappers.open_spiel_wrapper.OpenSpielWrapper",
"absl.testing.absltest.main",
"open_spiel.python.rl_environment.Environment",
"numpy.dtype"
] | [((1109, 1172), 'unittest.skipIf', 'unittest.skipIf', (['SKIP_OPEN_SPIEL_TESTS', 'SKIP_OPEN_SPIEL_MESSAGE'], {}), '(SKIP_OPEN_SPIEL_TESTS, SKIP_OPEN_SPIEL_MESSAGE)\n', (1124, 1172), False, 'import unittest\n'), ((2272, 2287), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2285, 2287), False, 'from absl.testing import absltest\n'), ((1265, 1306), 'open_spiel.python.rl_environment.Environment', 'rl_environment.Environment', (['"""tic_tac_toe"""'], {}), "('tic_tac_toe')\n", (1291, 1306), False, 'from open_spiel.python import rl_environment\n'), ((1317, 1361), 'acme.wrappers.open_spiel_wrapper.OpenSpielWrapper', 'open_spiel_wrapper.OpenSpielWrapper', (['raw_env'], {}), '(raw_env)\n', (1352, 1361), False, 'from acme.wrappers import open_spiel_wrapper\n'), ((2101, 2118), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (2109, 2118), True, 'import numpy as np\n')] |
import taichi as ti
from utils.tools import Pair
from pcg_method import PCG_Solver
import numpy as np
@ti.data_oriented
class Thin_Flame:
def __init__(self , resolution = 512 ) :
shape = (resolution , resolution)
self._sd_cur = ti.var(dt = ti.f32 , shape= shape)
self._sd_nxt = ti.var(dt = ti.f32 , shape= shape)
self._velocity_cur = ti.Vector(2 ,dt = ti.f32 , shape= shape)
self._velocity_nxt = ti.Vector(2 ,dt = ti.f32 , shape= shape)
self._pressure_cur = ti.var(dt = ti.f32 , shape= shape)
self._pressure_nxt = ti.var(dt = ti.f32 , shape= shape)
self.velocity_div = ti.var(dt = ti.f32 , shape= shape)
self.pixel = ti.Vector( 3 , dt = ti.f32 , shape= shape)
self.density_burnt = 1.2
self.density_fuel = 1.0
self.sign_dis = Pair(self._sd_cur , self._sd_nxt)
self.pressure = Pair(self._pressure_cur , self._pressure_nxt)
self.velocity = Pair(self._velocity_cur , self._velocity_nxt)
self.resolution = resolution
self.RK = 3
self.dx = 1.0
self.dt = 0.04
self.speed = 1.0 # 0.5 m/s
self.direction = ti.Vector([0.0 , 1.0])
self.source_pos_x = range(int(resolution /2) - 10 ,int( resolution/2) + 10)
self.out_momentum = ti.Vector([0.0, 5000.0])
self.dcolor = ti.Vector(list(np.random.rand(3) * 0.7 + 0.3) )
self.clamp_sampler = Thin_Flame.Clamping_Sampler(resolution)
self.extra_sampler = Thin_Flame.Extrapolation_Sampler(resolution)
@ti.data_oriented
class Clamping_Sampler:
def __init__(self , res ):
self.resolution = res
@ti.func
def sample(self , field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i, j]
@ti.data_oriented
class Extrapolation_Sampler:
def __init__(self , res):
self.resolution = res
@ti.func
def sample(self, field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i,j] - ti.abs(v - j) if j != int(v) and j < 0 else field[i,j]
@ti.func
def density(self , u , v):
return self.density_burnt if self.sign_dis.curr[u ,v] <= 0 else self.density_fuel
@ti.func
def lerp(self , v1 , v2 , frac):
return v1 + frac * (v2 - v1)
@ti.func
def sample(self , field , u , v):
i = max(0, min(self.resolution - 1, int(u)))
j = max(0, min(self.resolution - 1, int(v)))
return field[i, j]
@ti.func
def bilinear_interpolate(self , field , u ,v , sampler) :
s, t = u - 0.5, v - 0.5
iu, iv = int(s), int(t)
fu, fv = s - iu, t - iv
a = sampler.sample(field, iu + 0.5, iv + 0.5)
b = sampler.sample(field, iu + 1.5, iv + 0.5)
c = sampler.sample(field, iu + 0.5, iv + 1.5)
d = sampler.sample(field, iu + 1.5, iv + 1.5)
return self.lerp(self.lerp(a, b, fu), self.lerp(c, d, fu), fv)
@ti.func
def backtrace(self , vf , u,v , dt ):
p = ti.Vector([u,v]) + 0.5
if ti.static(self.RK == 1) :
p -= dt * vf[u,v] #RK1
elif ti.static(self.RK == 2):
mid = p - 0.5 * dt * vf[u,v]
p -= dt * self.sample(vf, mid[0] , mid[1])
elif ti.static(self.RK == 3) :
v1 = vf[u,v]
p1 = p - 0.5 * dt * v1
v2 = self.sample(vf , p1[0] , p1[1])
p2 = p - 0.75 * dt * v
v3 = self.sample(vf , p2[0] , p2[1])
p -= dt * ( 2/9 * v1 + 1/3 * v2 + 4/9 * v3 )
else :
ti.static_print(f"unsupported order for RK{self.RK}")
return p
@ti.func
def semi_lagrange(self , vf , field , next_field , dt , sampler):
for i , j in vf :
p = self.backtrace( vf , i , j , dt )
next_field[i,j] = self.bilinear_interpolate(field , p[0], p[1] , sampler)
@ti.kernel
# @ti.func
def advection(self , vf: ti.template() , field : ti.template() , sampler: ti.template()):
self.semi_lagrange(vf, field.curr , field.next, self.dt , sampler)
@ti.kernel
def momentum(self, vf : ti.template()):
# TODO effect velocity by density div on flame front
# for i , j in self.sign_dis.curr:
# vf[i , j] = vf
# inv_r = ti.static(4.0 / self.resolution)
res = ti.static(int(self.resolution/2))
# source
for i , j in ti.ndrange((res - 10 , res + 10 ) , (0 , 20)):
# dir_v = ti.Vector([ res - i, 30]).normalized()
vf[i,j] += self.dt * self.out_momentum
# for j in range(self.resolution - 1):
# for i in ti.static(self.source_pos_x) :
# vf[i, j] += self.dt * self.out_momentum * ti.exp( - j * inv_r)
@ti.kernel
def divergence_vel(self , field:ti.template()):
half_inv_dx = ti.static(0.5 / self.dx)
for i , j in field:
vl = self.sample(field, i - 1, j)[0]
vr = self.sample(field, i + 1, j)[0]
vb = self.sample(field, i, j - 1)[1]
vt = self.sample(field, i, j + 1)[1]
vc = self.sample(field, i, j)
# edge check
if i == 0:
vl = -vc[0]
elif i == self.resolution - 1:
vr = -vc[0]
if j == 0:
vb = -vc[1]
elif j == self.resolution - 1:
vt = -vc[1]
# div_v
div = (vr - vl + vt - vb) * half_inv_dx
self.velocity_div[i, j] = div
# @ti.kernel
def projection(self , v_cur : ti.template(), p : ti.template() ):
self.divergence_vel(v_cur)
self.jacobi(p)
# @ti.kernel
def jacobi(self , p:ti.template()) :
for _ in ti.static(range(200)):
self.jacobi_step(p.curr , p.next)
p.swap()
@ti.kernel
def jacobi_step(self , p_cur:ti.template() , p_nxt:ti.template()):
dx_sqr = ti.static(self.dx * self.dx)
for i , j in p_cur :
# pl = p_cur[i - 1 , j]
# pr = p_cur[i + 1 , j]
# pt = p_cur[i , j + 1]
# pb = p_cur[i , j - 1]
pl = self.sample(p_cur , i - 1 , j)#p_cur[i-1, j]#self.sample(p_cur , i - 1 , j)
pr = self.sample(p_cur , i + 1 , j)#p_cur[i+1 ,j]#
pt = self.sample(p_cur , i , j + 1)#p_cur[i, j+1]#
pb = self.sample(p_cur , i , j - 1)#p_cur[i ,j-1]#
p_nxt[i,j] = 0.25 * (pl + pr + pt + pb - dx_sqr * self.velocity_div[i,j])
@ti.kernel
def update_v(self , vf : ti.template() , pf:ti.template()):
half_inv_dx = ti.static( 0.5 / self.dx )
for i,j in vf :
pl = self.sample(pf, i - 1, j)
pr = self.sample(pf, i + 1, j)
pb = self.sample(pf, i, j - 1)
pt = self.sample(pf, i, j + 1)
vf[i, j] = self.sample(vf, i, j) - half_inv_dx * ti.Vector([pr - pl, pt - pb])
@ti.kernel
def update_distance(self, sdf : ti.template() , vf : ti.template()):
# inv_r = ti.static(4.0 / (self.resolution / 20.0)**2)
res = ti.static(int(self.resolution/2))
for i ,j in ti.ndrange((res - 10 , res + 10) , (0 , 20)) :
# dx , dy = self.resolution / 2 - i , j
# d2 = dx * dx + dy * dy
sdf[i , j] = -1.0 #ti.exp(- d2 * inv_r) * -10.0
for i, j in sdf:
# dx , dy = self.resolution / 2 - i , j
# d2 = dx * dx + dy * dy
# sdf[i , j] -= ti.exp(- d2 * inv_r) * 10.0
# sdf[i , j] += self
sdf[i , j] += self.dt * self.speed #(self.speed - vf[i,j].norm())
@ti.kernel
def init_level_set(self):
sdf = ti.static(self.sign_dis.curr)
inv_r = ti.static(4.0 / (self.resolution / 20.0)**2)
for i, j in sdf:
dx , dy = self.resolution / 2 - i , j
d2 = dx * dx + dy * dy
sdf[i , 0] = ti.exp(- d2 * inv_r) * 10.0
# @ti.kernel
def init(self):
self.velocity.curr.fill([0.0,0.0])
self.pressure.curr.fill(0.0)
self.sign_dis.curr.fill(10.0)
self.pixel.fill([0.0 , 0.0 , 0.0 ])
self.init_level_set()
@ti.kernel
def render(self , sdf: ti.template()):
zero = ti.Vector([0.0 , 0.0 , 0.0])
for indices in ti.grouped(sdf):
self.pixel[indices] = self.dcolor * ti.exp(1.0/ (sdf[indices] - 0.01)) if sdf[indices] < 0.0 else zero
def step(self):
# advection
self.advection(self.velocity.curr , self.velocity , self.clamp_sampler)
self.advection(self.velocity.curr , self.sign_dis , self.clamp_sampler)
self.velocity.swap()
self.sign_dis.swap()
# externel force
self.momentum(self.velocity.curr)
# projection
self.projection(self.velocity.curr , self.pressure)
# update
self.update_v(self.velocity.curr , self.pressure.curr)
self.update_distance(self.sign_dis.curr , self.velocity.curr)
self.render(self.sign_dis.curr)
def main():
resolution = 512
ti.init(arch=ti.gpu , kernel_profiler = True)
gui = ti.GUI("Thin Flame" , res= resolution)
solver = Thin_Flame(resolution)
solver.init()
while gui.running:
solver.step()
gui.set_image(solver.pixel)
gui.show()
if __name__ == '__main__':
main() | [
"taichi.ndrange",
"numpy.random.rand",
"taichi.static_print",
"taichi.init",
"taichi.template",
"utils.tools.Pair",
"taichi.exp",
"taichi.abs",
"taichi.static",
"taichi.GUI",
"taichi.Vector",
"taichi.grouped",
"taichi.var"
] | [((9279, 9321), 'taichi.init', 'ti.init', ([], {'arch': 'ti.gpu', 'kernel_profiler': '(True)'}), '(arch=ti.gpu, kernel_profiler=True)\n', (9286, 9321), True, 'import taichi as ti\n'), ((9335, 9371), 'taichi.GUI', 'ti.GUI', (['"""Thin Flame"""'], {'res': 'resolution'}), "('Thin Flame', res=resolution)\n", (9341, 9371), True, 'import taichi as ti\n'), ((250, 280), 'taichi.var', 'ti.var', ([], {'dt': 'ti.f32', 'shape': 'shape'}), '(dt=ti.f32, shape=shape)\n', (256, 280), True, 'import taichi as ti\n'), ((308, 338), 'taichi.var', 'ti.var', ([], {'dt': 'ti.f32', 'shape': 'shape'}), '(dt=ti.f32, shape=shape)\n', (314, 338), True, 'import taichi as ti\n'), ((380, 416), 'taichi.Vector', 'ti.Vector', (['(2)'], {'dt': 'ti.f32', 'shape': 'shape'}), '(2, dt=ti.f32, shape=shape)\n', (389, 416), True, 'import taichi as ti\n'), ((450, 486), 'taichi.Vector', 'ti.Vector', (['(2)'], {'dt': 'ti.f32', 'shape': 'shape'}), '(2, dt=ti.f32, shape=shape)\n', (459, 486), True, 'import taichi as ti\n'), ((520, 550), 'taichi.var', 'ti.var', ([], {'dt': 'ti.f32', 'shape': 'shape'}), '(dt=ti.f32, shape=shape)\n', (526, 550), True, 'import taichi as ti\n'), ((584, 614), 'taichi.var', 'ti.var', ([], {'dt': 'ti.f32', 'shape': 'shape'}), '(dt=ti.f32, shape=shape)\n', (590, 614), True, 'import taichi as ti\n'), ((648, 678), 'taichi.var', 'ti.var', ([], {'dt': 'ti.f32', 'shape': 'shape'}), '(dt=ti.f32, shape=shape)\n', (654, 678), True, 'import taichi as ti\n'), ((704, 740), 'taichi.Vector', 'ti.Vector', (['(3)'], {'dt': 'ti.f32', 'shape': 'shape'}), '(3, dt=ti.f32, shape=shape)\n', (713, 740), True, 'import taichi as ti\n'), ((838, 870), 'utils.tools.Pair', 'Pair', (['self._sd_cur', 'self._sd_nxt'], {}), '(self._sd_cur, self._sd_nxt)\n', (842, 870), False, 'from utils.tools import Pair\n'), ((896, 940), 'utils.tools.Pair', 'Pair', (['self._pressure_cur', 'self._pressure_nxt'], {}), '(self._pressure_cur, self._pressure_nxt)\n', (900, 940), False, 'from utils.tools import Pair\n'), ((966, 1010), 'utils.tools.Pair', 'Pair', (['self._velocity_cur', 'self._velocity_nxt'], {}), '(self._velocity_cur, self._velocity_nxt)\n', (970, 1010), False, 'from utils.tools import Pair\n'), ((1177, 1198), 'taichi.Vector', 'ti.Vector', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1186, 1198), True, 'import taichi as ti\n'), ((1312, 1336), 'taichi.Vector', 'ti.Vector', (['[0.0, 5000.0]'], {}), '([0.0, 5000.0])\n', (1321, 1336), True, 'import taichi as ti\n'), ((3232, 3255), 'taichi.static', 'ti.static', (['(self.RK == 1)'], {}), '(self.RK == 1)\n', (3241, 3255), True, 'import taichi as ti\n'), ((4604, 4645), 'taichi.ndrange', 'ti.ndrange', (['(res - 10, res + 10)', '(0, 20)'], {}), '((res - 10, res + 10), (0, 20))\n', (4614, 4645), True, 'import taichi as ti\n'), ((5037, 5061), 'taichi.static', 'ti.static', (['(0.5 / self.dx)'], {}), '(0.5 / self.dx)\n', (5046, 5061), True, 'import taichi as ti\n'), ((6127, 6155), 'taichi.static', 'ti.static', (['(self.dx * self.dx)'], {}), '(self.dx * self.dx)\n', (6136, 6155), True, 'import taichi as ti\n'), ((6802, 6826), 'taichi.static', 'ti.static', (['(0.5 / self.dx)'], {}), '(0.5 / self.dx)\n', (6811, 6826), True, 'import taichi as ti\n'), ((7345, 7386), 'taichi.ndrange', 'ti.ndrange', (['(res - 10, res + 10)', '(0, 20)'], {}), '((res - 10, res + 10), (0, 20))\n', (7355, 7386), True, 'import taichi as ti\n'), ((7885, 7914), 'taichi.static', 'ti.static', (['self.sign_dis.curr'], {}), '(self.sign_dis.curr)\n', (7894, 7914), True, 'import taichi as ti\n'), ((7931, 7977), 'taichi.static', 'ti.static', (['(4.0 / (self.resolution / 20.0) ** 2)'], {}), '(4.0 / (self.resolution / 20.0) ** 2)\n', (7940, 7977), True, 'import taichi as ti\n'), ((8455, 8481), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (8464, 8481), True, 'import taichi as ti\n'), ((8507, 8522), 'taichi.grouped', 'ti.grouped', (['sdf'], {}), '(sdf)\n', (8517, 8522), True, 'import taichi as ti\n'), ((3198, 3215), 'taichi.Vector', 'ti.Vector', (['[u, v]'], {}), '([u, v])\n', (3207, 3215), True, 'import taichi as ti\n'), ((3307, 3330), 'taichi.static', 'ti.static', (['(self.RK == 2)'], {}), '(self.RK == 2)\n', (3316, 3330), True, 'import taichi as ti\n'), ((4124, 4137), 'taichi.template', 'ti.template', ([], {}), '()\n', (4135, 4137), True, 'import taichi as ti\n'), ((4148, 4161), 'taichi.template', 'ti.template', ([], {}), '()\n', (4159, 4161), True, 'import taichi as ti\n'), ((4173, 4186), 'taichi.template', 'ti.template', ([], {}), '()\n', (4184, 4186), True, 'import taichi as ti\n'), ((4308, 4321), 'taichi.template', 'ti.template', ([], {}), '()\n', (4319, 4321), True, 'import taichi as ti\n'), ((4999, 5012), 'taichi.template', 'ti.template', ([], {}), '()\n', (5010, 5012), True, 'import taichi as ti\n'), ((5763, 5776), 'taichi.template', 'ti.template', ([], {}), '()\n', (5774, 5776), True, 'import taichi as ti\n'), ((5782, 5795), 'taichi.template', 'ti.template', ([], {}), '()\n', (5793, 5795), True, 'import taichi as ti\n'), ((5899, 5912), 'taichi.template', 'ti.template', ([], {}), '()\n', (5910, 5912), True, 'import taichi as ti\n'), ((6072, 6085), 'taichi.template', 'ti.template', ([], {}), '()\n', (6083, 6085), True, 'import taichi as ti\n'), ((6094, 6107), 'taichi.template', 'ti.template', ([], {}), '()\n', (6105, 6107), True, 'import taichi as ti\n'), ((6745, 6758), 'taichi.template', 'ti.template', ([], {}), '()\n', (6756, 6758), True, 'import taichi as ti\n'), ((6764, 6777), 'taichi.template', 'ti.template', ([], {}), '()\n', (6775, 6777), True, 'import taichi as ti\n'), ((7168, 7181), 'taichi.template', 'ti.template', ([], {}), '()\n', (7179, 7181), True, 'import taichi as ti\n'), ((7189, 7202), 'taichi.template', 'ti.template', ([], {}), '()\n', (7200, 7202), True, 'import taichi as ti\n'), ((8424, 8437), 'taichi.template', 'ti.template', ([], {}), '()\n', (8435, 8437), True, 'import taichi as ti\n'), ((3441, 3464), 'taichi.static', 'ti.static', (['(self.RK == 3)'], {}), '(self.RK == 3)\n', (3450, 3464), True, 'import taichi as ti\n'), ((8111, 8130), 'taichi.exp', 'ti.exp', (['(-d2 * inv_r)'], {}), '(-d2 * inv_r)\n', (8117, 8130), True, 'import taichi as ti\n'), ((2207, 2220), 'taichi.abs', 'ti.abs', (['(v - j)'], {}), '(v - j)\n', (2213, 2220), True, 'import taichi as ti\n'), ((3744, 3797), 'taichi.static_print', 'ti.static_print', (['f"""unsupported order for RK{self.RK}"""'], {}), "(f'unsupported order for RK{self.RK}')\n", (3759, 3797), True, 'import taichi as ti\n'), ((7086, 7115), 'taichi.Vector', 'ti.Vector', (['[pr - pl, pt - pb]'], {}), '([pr - pl, pt - pb])\n', (7095, 7115), True, 'import taichi as ti\n'), ((8572, 8607), 'taichi.exp', 'ti.exp', (['(1.0 / (sdf[indices] - 0.01))'], {}), '(1.0 / (sdf[indices] - 0.01))\n', (8578, 8607), True, 'import taichi as ti\n'), ((1374, 1391), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (1388, 1391), True, 'import numpy as np\n')] |
from mal import Anime
from bs4 import BeautifulSoup
import numpy
import requests
def animerec():
p = numpy.random.randint(16000,size=1)
id = int(p[0])
# for i in range(id,16000):
try:
anime = Anime(id)
title = str(anime.title)
titlef = title.replace(' ','_')
titlef = titlef.replace(':','_')
url = 'https://myanimelist.net/anime/'+str(id)+'/'+titlef+'?q=cow&cat=anime'
get = requests.get(url)
site = get.text
soup = BeautifulSoup(site, 'html.parser')
#animeimage
img_tags = soup.find("div",attrs = {'style' : 'text-align: center;'})
x = img_tags.find('a')
y = x.findAll('img')
count = 1
fin = []
for i in y:
if count<=1:
link = i['data-src']
count+=1
else:
pass
#animesynopsis
syn_tags= soup.find('p',attrs ={'itemprop' : 'description'}).text
fin.append(link)
fin.append(title)
fin.append(syn_tags)
return fin
except ValueError:
animerec()
animerec()
| [
"bs4.BeautifulSoup",
"requests.get",
"numpy.random.randint",
"mal.Anime"
] | [((107, 142), 'numpy.random.randint', 'numpy.random.randint', (['(16000)'], {'size': '(1)'}), '(16000, size=1)\n', (127, 142), False, 'import numpy\n'), ((218, 227), 'mal.Anime', 'Anime', (['id'], {}), '(id)\n', (223, 227), False, 'from mal import Anime\n'), ((442, 459), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (454, 459), False, 'import requests\n'), ((499, 533), 'bs4.BeautifulSoup', 'BeautifulSoup', (['site', '"""html.parser"""'], {}), "(site, 'html.parser')\n", (512, 533), False, 'from bs4 import BeautifulSoup\n')] |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this open-source project.
import os
import sys
import json
import random
import argparse
import essentia
import essentia.streaming
from essentia.standard import *
import librosa
import numpy as np
from extractor import FeatureExtractor
from aistplusplus_api.aist_plusplus.loader import AISTDataset
from smplx import SMPL
import torch
parser = argparse.ArgumentParser()
parser.add_argument('--input_video_dir', type=str, default='aist_plusplus_final/all_musics')
parser.add_argument('--input_annotation_dir', type=str, default='aist_plusplus_final')
parser.add_argument('--smpl_dir', type=str, default='smpl')
parser.add_argument('--train_dir', type=str, default='data/aistpp_train_wav')
parser.add_argument('--test_dir', type=str, default='data/aistpp_test_full_wav')
parser.add_argument('--split_train_file', type=str, default='aist_plusplus_final/splits/crossmodal_train.txt')
parser.add_argument('--split_test_file', type=str, default='aist_plusplus_final/splits/crossmodal_test.txt')
parser.add_argument('--split_val_file', type=str, default='aist_plusplus_final/splits/crossmodal_val.txt')
parser.add_argument('--sampling_rate', type=int, default=15360*2)
args = parser.parse_args()
extractor = FeatureExtractor()
if not os.path.exists(args.train_dir):
os.mkdir(args.train_dir)
if not os.path.exists(args.test_dir):
os.mkdir(args.test_dir)
split_train_file = args.split_train_file
split_test_file = args.split_test_file
split_val_file = args.split_val_file
def make_music_dance_set(video_dir, annotation_dir):
print('---------- Extract features from raw audio ----------')
# print(annotation_dir)
aist_dataset = AISTDataset(annotation_dir)
musics = []
dances = []
fnames = []
train = []
test = []
# music_dance_keys = []
# onset_beats = []
audio_fnames = sorted(os.listdir(video_dir))
# dance_fnames = sorted(os.listdir(dance_dir))
# audio_fnames = audio_fnames[:20] # for debug
# print(f'audio_fnames: {audio_fnames}')
train_file = open(split_train_file, 'r')
for fname in train_file.readlines():
train.append(fname.strip())
train_file.close()
test_file = open(split_test_file, 'r')
for fname in test_file.readlines():
test.append(fname.strip())
test_file.close()
test_file = open(split_val_file, 'r')
for fname in test_file.readlines():
test.append(fname.strip())
test_file.close()
ii = 0
all_names = train + test
for audio_fname in all_names:
# if ii > 1:
# break
# ii += 1
video_file = os.path.join(video_dir, audio_fname.split('_')[4] + '.wav')
print(f'Process -> {video_file}')
print(audio_fname)
seq_name, _ = AISTDataset.get_seq_name(audio_fname.replace('cAll', 'c02'))
if (seq_name not in train) and (seq_name not in test):
print(f'Not in set!')
continue
if seq_name in fnames:
print(f'Already scaned!')
continue
sr = args.sampling_rate
loader = None
try:
loader = essentia.standard.MonoLoader(filename=video_file, sampleRate=sr)
except RuntimeError:
continue
fnames.append(seq_name)
print(seq_name)
### load audio features ###
audio = loader()
audio = np.array(audio).T
feature = extract_acoustic_feature(audio, sr)
musics.append(feature.tolist())
### load pose sequence ###
# for seq_name in tqdm(seq_names):
print(f'Process -> {seq_name}')
smpl_poses, smpl_scaling, smpl_trans = AISTDataset.load_motion(
aist_dataset.motion_dir, seq_name)
smpl = None
smpl = SMPL(model_path=args.smpl_dir, gender='MALE', batch_size=1)
keypoints3d = smpl.forward(
global_orient=torch.from_numpy(smpl_poses[:, 0:1]).float(),
body_pose=torch.from_numpy(smpl_poses[:, 1:]).float(),
transl=torch.from_numpy(smpl_trans / smpl_scaling).float(),
).joints.detach().numpy()[:, 0:24, :]
nframes = keypoints3d.shape[0]
dances.append(keypoints3d.reshape(nframes, -1).tolist())
print(np.shape(dances[-1])) # (nframes, 72)
# return None, None, None
return musics, dances, fnames
def extract_acoustic_feature(audio, sr):
melspe_db = extractor.get_melspectrogram(audio, sr)
mfcc = extractor.get_mfcc(melspe_db)
mfcc_delta = extractor.get_mfcc_delta(mfcc)
# mfcc_delta2 = get_mfcc_delta2(mfcc)
audio_harmonic, audio_percussive = extractor.get_hpss(audio)
# harmonic_melspe_db = get_harmonic_melspe_db(audio_harmonic, sr)
# percussive_melspe_db = get_percussive_melspe_db(audio_percussive, sr)
chroma_cqt = extractor.get_chroma_cqt(audio_harmonic, sr, octave=7 if sr==15360*2 else 5)
# chroma_stft = extractor.get_chroma_stft(audio_harmonic, sr)
onset_env = extractor.get_onset_strength(audio_percussive, sr)
tempogram = extractor.get_tempogram(onset_env, sr)
onset_beat = extractor.get_onset_beat(onset_env, sr)[0]
# onset_tempo, onset_beat = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
# onset_beats.append(onset_beat)
onset_env = onset_env.reshape(1, -1)
feature = np.concatenate([
# melspe_db,
mfcc, # 20
mfcc_delta, # 20
# mfcc_delta2,
# harmonic_melspe_db,
# percussive_melspe_db,
# chroma_stft,
chroma_cqt, # 12
onset_env, # 1
onset_beat, # 1
tempogram
], axis=0)
# mfcc, #20
# mfcc_delta, #20
# chroma_cqt, #12
# onset_env, # 1
# onset_beat, #1
feature = feature.transpose(1, 0)
print(f'acoustic feature -> {feature.shape}')
return feature
def align(musics, dances):
print('---------- Align the frames of music and dance ----------')
assert len(musics) == len(dances), \
'the number of audios should be equal to that of videos'
new_musics=[]
new_dances=[]
for i in range(len(musics)):
min_seq_len = min(len(musics[i]), len(dances[i]))
print(f'music -> {np.array(musics[i]).shape}, ' +
f'dance -> {np.array(dances[i]).shape}, ' +
f'min_seq_len -> {min_seq_len}')
new_musics.append([musics[i][j] for j in range(min_seq_len)])
new_dances.append([dances[i][j] for j in range(min_seq_len)])
return new_musics, new_dances, musics
def split_data(fnames):
train = []
test = []
print('---------- Split data into train and test ----------')
print(fnames)
train_file = open(split_train_file, 'r')
for fname in train_file.readlines():
train.append(fnames.index(fname.strip()))
train_file.close()
test_file = open(split_test_file, 'r')
for fname in test_file.readlines():
test.append(fnames.index(fname.strip()))
test_file.close()
test_file = open(split_val_file, 'r')
for fname in test_file.readlines():
test.append(fnames.index(fname.strip()))
test_file.close()
train = np.array(train)
test = np.array(test)
return train, test
def save(args, musics, dances, fnames, musics_raw):
print('---------- Save to text file ----------')
# fnames = sorted(os.listdir(os.path.join(args.input_dance_dir,inner_dir)))
# # fnames = fnames[:20] # for debug
# assert len(fnames)*2 == len(musics) == len(dances), 'alignment'
# fnames = sorted(fnames)
train_idx, test_idx = split_data(fnames)
# train_idx = sorted(train_idx)
print(f'train ids: {[fnames[idx] for idx in train_idx]}')
# test_idx = sorted(test_idx)
print(f'test ids: {[fnames[idx] for idx in test_idx]}')
print('---------- train data ----------')
for idx in train_idx:
with open(os.path.join(args.train_dir, f'{fnames[idx]}.json'), 'w') as f:
sample_dict = {
'id': fnames[idx],
'music_array': musics[idx],
'dance_array': dances[idx]
}
# print(sample_dict)
json.dump(sample_dict, f)
print('---------- test data ----------')
for idx in test_idx:
with open(os.path.join(args.test_dir, f'{fnames[idx]}.json'), 'w') as f:
sample_dict = {
'id': fnames[idx],
'music_array': musics_raw[idx], # musics[idx+i],
'dance_array': dances[idx]
}
# print(sample_dict)
json.dump(sample_dict, f)
if __name__ == '__main__':
musics, dances, fnames = make_music_dance_set(args.input_video_dir, args.input_annotation_dir)
musics, dances, musics_raw = align(musics, dances)
save(args, musics, dances, fnames, musics_raw)
| [
"os.path.exists",
"os.listdir",
"aistplusplus_api.aist_plusplus.loader.AISTDataset.load_motion",
"argparse.ArgumentParser",
"essentia.standard.MonoLoader",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"smplx.SMPL",
"aistplusplus_api.aist_plusplus.loader.AISTDataset",
"os.mkdir",
"numpy.c... | [((452, 477), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (475, 477), False, 'import argparse\n'), ((1313, 1331), 'extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (1329, 1331), False, 'from extractor import FeatureExtractor\n'), ((1340, 1370), 'os.path.exists', 'os.path.exists', (['args.train_dir'], {}), '(args.train_dir)\n', (1354, 1370), False, 'import os\n'), ((1376, 1400), 'os.mkdir', 'os.mkdir', (['args.train_dir'], {}), '(args.train_dir)\n', (1384, 1400), False, 'import os\n'), ((1408, 1437), 'os.path.exists', 'os.path.exists', (['args.test_dir'], {}), '(args.test_dir)\n', (1422, 1437), False, 'import os\n'), ((1443, 1466), 'os.mkdir', 'os.mkdir', (['args.test_dir'], {}), '(args.test_dir)\n', (1451, 1466), False, 'import os\n'), ((1753, 1780), 'aistplusplus_api.aist_plusplus.loader.AISTDataset', 'AISTDataset', (['annotation_dir'], {}), '(annotation_dir)\n', (1764, 1780), False, 'from aistplusplus_api.aist_plusplus.loader import AISTDataset\n'), ((5433, 5525), 'numpy.concatenate', 'np.concatenate', (['[mfcc, mfcc_delta, chroma_cqt, onset_env, onset_beat, tempogram]'], {'axis': '(0)'}), '([mfcc, mfcc_delta, chroma_cqt, onset_env, onset_beat,\n tempogram], axis=0)\n', (5447, 5525), True, 'import numpy as np\n'), ((7288, 7303), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (7296, 7303), True, 'import numpy as np\n'), ((7315, 7329), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (7323, 7329), True, 'import numpy as np\n'), ((1939, 1960), 'os.listdir', 'os.listdir', (['video_dir'], {}), '(video_dir)\n', (1949, 1960), False, 'import os\n'), ((3763, 3821), 'aistplusplus_api.aist_plusplus.loader.AISTDataset.load_motion', 'AISTDataset.load_motion', (['aist_dataset.motion_dir', 'seq_name'], {}), '(aist_dataset.motion_dir, seq_name)\n', (3786, 3821), False, 'from aistplusplus_api.aist_plusplus.loader import AISTDataset\n'), ((3870, 3929), 'smplx.SMPL', 'SMPL', ([], {'model_path': 'args.smpl_dir', 'gender': '"""MALE"""', 'batch_size': '(1)'}), "(model_path=args.smpl_dir, gender='MALE', batch_size=1)\n", (3874, 3929), False, 'from smplx import SMPL\n'), ((3220, 3284), 'essentia.standard.MonoLoader', 'essentia.standard.MonoLoader', ([], {'filename': 'video_file', 'sampleRate': 'sr'}), '(filename=video_file, sampleRate=sr)\n', (3248, 3284), False, 'import essentia\n'), ((3484, 3499), 'numpy.array', 'np.array', (['audio'], {}), '(audio)\n', (3492, 3499), True, 'import numpy as np\n'), ((4345, 4365), 'numpy.shape', 'np.shape', (['dances[-1]'], {}), '(dances[-1])\n', (4353, 4365), True, 'import numpy as np\n'), ((8286, 8311), 'json.dump', 'json.dump', (['sample_dict', 'f'], {}), '(sample_dict, f)\n', (8295, 8311), False, 'import json\n'), ((8694, 8719), 'json.dump', 'json.dump', (['sample_dict', 'f'], {}), '(sample_dict, f)\n', (8703, 8719), False, 'import json\n'), ((8013, 8064), 'os.path.join', 'os.path.join', (['args.train_dir', 'f"""{fnames[idx]}.json"""'], {}), "(args.train_dir, f'{fnames[idx]}.json')\n", (8025, 8064), False, 'import os\n'), ((8401, 8451), 'os.path.join', 'os.path.join', (['args.test_dir', 'f"""{fnames[idx]}.json"""'], {}), "(args.test_dir, f'{fnames[idx]}.json')\n", (8413, 8451), False, 'import os\n'), ((6339, 6358), 'numpy.array', 'np.array', (['musics[i]'], {}), '(musics[i])\n', (6347, 6358), True, 'import numpy as np\n'), ((6397, 6416), 'numpy.array', 'np.array', (['dances[i]'], {}), '(dances[i])\n', (6405, 6416), True, 'import numpy as np\n'), ((3992, 4028), 'torch.from_numpy', 'torch.from_numpy', (['smpl_poses[:, 0:1]'], {}), '(smpl_poses[:, 0:1])\n', (4008, 4028), False, 'import torch\n'), ((4060, 4095), 'torch.from_numpy', 'torch.from_numpy', (['smpl_poses[:, 1:]'], {}), '(smpl_poses[:, 1:])\n', (4076, 4095), False, 'import torch\n'), ((4124, 4167), 'torch.from_numpy', 'torch.from_numpy', (['(smpl_trans / smpl_scaling)'], {}), '(smpl_trans / smpl_scaling)\n', (4140, 4167), False, 'import torch\n')] |
"""
This is a simple script showing how to connect and control a Wasatch Photonics
Raman spectrometer using Wasatch.PY.
In particular, it walks the user through a short process to optimize the working
distance by checking the height of a specific expected Raman peak (the 801.3cm⁻¹
peak of cyclohexane, in this case) matches a prescribed threshold.
"""
import sys
import time
import wasatch
import numpy as np
import scipy.signal
from wasatch.WasatchBus import WasatchBus
from wasatch.WasatchDevice import WasatchDevice
from wasatch.RealUSBDevice import RealUSBDevice
LASER_WARMUP_SEC = 10
EXPECTED_PEAK = 801.3 # cyclohexane (cm⁻¹)
EXPECTED_COUNTS = 4000
PEAK_TOLERANCE_CM = 5 # allow peaks to move by as much as 5cm⁻¹
TEMPFILE = "spectrum.csv" # for debugging
class Workflow:
def __init__(self, integ_time_ms, laser_power_mW):
self.integ_time_ms = integ_time_ms
self.laser_power_mW = laser_power_mW
def connect(self) -> bool:
bus = WasatchBus(use_sim=False)
if not bus.device_ids:
print("No Wasatch USB spectrometers found.")
return False
device_id = bus.device_ids[0]
print(f"connecting to {device_id}")
device_id.device_type = RealUSBDevice(device_id)
device = WasatchDevice(device_id)
ok = device.connect()
if not ok:
print("can't connect to %s", device_id)
return False
# take convenience handles to SpectrometerSettings and FeatureIdentificationDevice
self.settings = device.settings
self.fid = device.hardware
if self.settings.wavelengths is None:
print("script requires Raman spectrometer")
return False
print("connected to %s %s with %d pixels (%.2f, %.2fnm) (%.2f, %.2fcm¹)" % (
self.settings.eeprom.model,
self.settings.eeprom.serial_number,
self.settings.pixels(),
self.settings.wavelengths[0],
self.settings.wavelengths[-1],
self.settings.wavenumbers[0],
self.settings.wavenumbers[-1]))
return True
def optimize_working_distance(self):
print(f"setting integration time to {self.integ_time_ms}ms")
self.fid.set_integration_time_ms(self.integ_time_ms)
print(f"setting laser power to {self.laser_power_mW}mW")
self.fid.set_laser_power_mW(self.laser_power_mW)
done = False
while not done:
done = self.test_working_distance()
def get_spectrum(self):
response = self.fid.get_line()
if response and response.data:
spectrum = response.data.spectrum
# debugging
with open(TEMPFILE, "w") as outfile:
outfile.write("\n".join([f"{x:0.2f}" for x in spectrum]))
return np.asarray(spectrum)
def test_working_distance(self) -> bool:
print("-" * 50)
print("Please insert calibration sample and press <Enter> (ctrl-C to exit)...", end='')
try:
input()
except:
print("Program exiting")
sys.exit(1)
print("Reading dark spectrum")
dark = self.get_spectrum()
if dark is None:
print("failed to take dark")
return False
print("Enabling laser")
self.fid.set_laser_enable(True)
print(f"Waiting {LASER_WARMUP_SEC}sec for laser to warmup (required for MML)")
time.sleep(LASER_WARMUP_SEC)
print("Taking sample spectrum")
sample = self.get_spectrum()
if sample is None:
print("failed to take sample")
return False
print("Disabling laser")
self.fid.set_laser_enable(False)
# generate dark-corrected measurement
measurement = sample - dark
print(f"dark: {dark}...")
print(f"sample: {sample}...")
print(f"measurement: {measurement}...")
# find pixel indices of peaks in the dark-corrected measurement
# (tune find_peaks arguments as applicable to your setup)
peak_pixels = scipy.signal.find_peaks(measurement, height=10000)[0]
print(f"peak pixels: {peak_pixels}")
# see if our "calibration peak" is in the list
peak_pixel = None
for pixel in peak_pixels:
peak_cm = self.settings.wavenumbers[pixel]
if abs(EXPECTED_PEAK - peak_cm) <= PEAK_TOLERANCE_CM:
print(f"found expected {EXPECTED_PEAK}cm⁻¹ at pixel {pixel} ({peak_cm:0.2f}cm⁻¹)")
peak_pixel = pixel
break
if peak_pixel is None:
print(f"Failed to find {EXPECTED_PEAK}cm⁻¹ peak in sample: adjust working distance")
return False
# see if we've achieved the required intensity
counts = measurement[peak_pixel]
if counts < EXPECTED_COUNTS:
print(f"Failed {EXPECTED_PEAK}cm⁻¹ peak counts too low ({counts} < {EXPECTED_COUNTS}): adjust working distance")
return False
print(f"Success! {EXPECTED_PEAK}cm⁻¹ peak found with {counts} counts.")
return True
################################################################################
# main()
################################################################################
workflow = Workflow(integ_time_ms=1000, laser_power_mW=25)
if not workflow.connect():
sys.exit(1)
workflow.optimize_working_distance()
| [
"wasatch.WasatchDevice.WasatchDevice",
"numpy.asarray",
"time.sleep",
"sys.exit",
"wasatch.WasatchBus.WasatchBus",
"wasatch.RealUSBDevice.RealUSBDevice"
] | [((5483, 5494), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5491, 5494), False, 'import sys\n'), ((1037, 1062), 'wasatch.WasatchBus.WasatchBus', 'WasatchBus', ([], {'use_sim': '(False)'}), '(use_sim=False)\n', (1047, 1062), False, 'from wasatch.WasatchBus import WasatchBus\n'), ((1291, 1315), 'wasatch.RealUSBDevice.RealUSBDevice', 'RealUSBDevice', (['device_id'], {}), '(device_id)\n', (1304, 1315), False, 'from wasatch.RealUSBDevice import RealUSBDevice\n'), ((1333, 1357), 'wasatch.WasatchDevice.WasatchDevice', 'WasatchDevice', (['device_id'], {}), '(device_id)\n', (1346, 1357), False, 'from wasatch.WasatchDevice import WasatchDevice\n'), ((3523, 3551), 'time.sleep', 'time.sleep', (['LASER_WARMUP_SEC'], {}), '(LASER_WARMUP_SEC)\n', (3533, 3551), False, 'import time\n'), ((2891, 2911), 'numpy.asarray', 'np.asarray', (['spectrum'], {}), '(spectrum)\n', (2901, 2911), True, 'import numpy as np\n'), ((3176, 3187), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3184, 3187), False, 'import sys\n')] |
"""
Core implementation of :mod:`facet.simulation.partition`
"""
import logging
import math
import operator as op
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, Iterable, Optional, Sequence, Tuple, TypeVar
import numpy as np
import pandas as pd
from pytools.api import AllTracker, inheritdoc
from pytools.fit import FittableMixin
log = logging.getLogger(__name__)
__all__ = [
"Partitioner",
"RangePartitioner",
"ContinuousRangePartitioner",
"IntegerRangePartitioner",
"CategoryPartitioner",
]
#
# Type variables
#
T_Self = TypeVar("T_Self")
T_Values = TypeVar("T_Values")
T_Values_Numeric = TypeVar("T_Values_Numeric", int, float)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# Class definitions
#
class Partitioner(
FittableMixin[Iterable[T_Values]], Generic[T_Values], metaclass=ABCMeta
):
"""
Abstract base class of all partitioners.
"""
DEFAULT_MAX_PARTITIONS = 20
def __init__(self, max_partitions: Optional[int] = None) -> None:
"""
:param max_partitions: the maximum number of partitions to generate; must
be at least 2 (default: {DEFAULT_MAX_PARTITIONS})
"""
if max_partitions is None:
self._max_partitions = Partitioner.DEFAULT_MAX_PARTITIONS
elif max_partitions < 2:
raise ValueError(f"arg max_partitions={max_partitions} must be at least 2")
else:
self._max_partitions = max_partitions
__init__.__doc__ = __init__.__doc__.replace(
"{DEFAULT_MAX_PARTITIONS}", repr(DEFAULT_MAX_PARTITIONS)
)
@property
def max_partitions(self) -> int:
"""
The maximum number of partitions to be generated by this partitioner.
"""
return self._max_partitions
@property
@abstractmethod
def partitions_(self) -> Sequence[T_Values]:
"""
Return central values of the partitions.
Requires that this partitioner has been fitted with a set of observed values.
:return: a sequence of central values for each partition
"""
@property
@abstractmethod
def frequencies_(self) -> Sequence[int]:
"""
Return the count of observed elements in each partition.
:return: a sequence of value counts for each partition
"""
@property
@abstractmethod
def is_categorical(self) -> bool:
"""
``True`` if this is partitioner handles categorical values, ``False`` otherwise.
"""
@abstractmethod
def fit(self: T_Self, values: Iterable[T_Values], **fit_params: Any) -> T_Self:
"""
Calculate the partitioning for the given observed values.
:param values: a sequence of observed values as the empirical basis for
calculating the partitions
:param fit_params: optional fitting parameters
:return: ``self``
"""
@inheritdoc(match="[see superclass]")
class RangePartitioner(
Partitioner[T_Values_Numeric], Generic[T_Values_Numeric], metaclass=ABCMeta
):
"""
Abstract base class for numerical partitioners.
"""
def __init__(
self,
max_partitions: int = None,
lower_bound: Optional[T_Values_Numeric] = None,
upper_bound: Optional[T_Values_Numeric] = None,
) -> None:
"""
:param max_partitions: the maximum number of partitions to make
(default: 20); should be at least 2
:param lower_bound: the lower bound of the elements in the partition
:param upper_bound: the upper bound of the elements in the partition
"""
super().__init__(max_partitions)
if (
lower_bound is not None
and upper_bound is not None
and lower_bound > upper_bound
):
raise ValueError(
f"arg lower_bound > arg upper_bound: [{lower_bound}, {upper_bound})"
)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._step: Optional[T_Values_Numeric] = None
self._frequencies: Optional[Sequence[int]] = None
self._partitions: Optional[Sequence[T_Values_Numeric]] = None
self._partition_bounds: Optional[
Sequence[Tuple[T_Values_Numeric, T_Values_Numeric]]
] = None
@property
def lower_bound(self) -> T_Values_Numeric:
"""
The lower bound of the partitioning.
``Null`` if no explicit lower bound is set.
"""
return self._lower_bound
@property
def upper_bound(self) -> T_Values_Numeric:
"""
The upper bound of the partitioning.
``Null`` if no explicit upper bound is set.
"""
return self._upper_bound
@property
def is_categorical(self) -> bool:
"""
``False``
"""
return False
@property
def partitions_(self) -> Sequence[T_Values_Numeric]:
"""[see superclass]"""
self._ensure_fitted()
return self._partitions
@property
def partition_bounds_(self) -> Sequence[Tuple[T_Values_Numeric, T_Values_Numeric]]:
"""
Return the endpoints of the intervals that delineate each partitions.
:return: sequence of tuples (x, y) for every partition, where x is the
inclusive lower bound of a partition range, and y is the exclusive upper
bound of a partition range
"""
self._ensure_fitted()
return self._partition_bounds
@property
def partition_width_(self) -> T_Values_Numeric:
"""
The width of each partition.
"""
self._ensure_fitted()
return self._step
@property
def frequencies_(self) -> Sequence[int]:
"""[see superclass]"""
self._ensure_fitted()
return self._frequencies
# noinspection PyMissingOrEmptyDocstring
def fit(
self: T_Self,
values: Iterable[T_Values],
**fit_params: Any,
) -> T_Self:
"""[see superclass]"""
self: RangePartitioner # support type hinting in PyCharm
# ensure arg values is an array
if not isinstance(values, np.ndarray):
if isinstance(values, pd.Series):
values = values.values
else:
if not isinstance(values, Sequence):
try:
values = iter(values)
except TypeError:
raise TypeError("arg values must be iterable")
values = np.array(values)
lower_bound = self._lower_bound
upper_bound = self._upper_bound
if lower_bound is None or upper_bound is None:
q3q1 = np.nanquantile(values, q=[0.75, 0.25])
inlier_range = op.sub(*q3q1) * 1.5 # iqr * 1.5
if lower_bound is None:
lower_bound = values[values >= q3q1[1] - inlier_range].min()
if upper_bound is None:
upper_bound = values[values <= q3q1[0] + inlier_range].max()
assert upper_bound >= lower_bound
# calculate the step count based on the maximum number of partitions,
# rounded to the next-largest rounded value ending in 1, 2, or 5
self._step = step = self._step_size(lower_bound, upper_bound)
# calculate centre values of the first and last partition;
# both are rounded to multiples of the step size
first_partition = math.floor((lower_bound + step / 2) / step) * step
last_partition = math.ceil((upper_bound - step / 2) / step) * step
n_partitions = int(round((last_partition - first_partition) / self._step)) + 1
self._partitions = partitions = np.round(
first_partition + np.arange(n_partitions) * self._step,
# round to the nearest power of 10 of the step variable
int(-np.floor(np.log10(self._step))),
).tolist()
center_offset_left = self._partition_center_offset
center_offset_right = self._step - center_offset_left
self._partition_bounds = [
(center - center_offset_left, center + center_offset_right)
for center in partitions
]
# calculate the number of elements in each partitions
# create the bins, starting with the lower bound of the first partition
partition_bins = (first_partition - step / 2) + np.arange(
n_partitions + 1
) * step
partition_indices = np.digitize(values, bins=partition_bins)
# frequency counts will include left and right outliers, hence n_partitions + 2
# and we exclude the first and last element of the result
frequencies = np.bincount(partition_indices, minlength=n_partitions + 2)[1:-1]
self._frequencies = frequencies
return self
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self._frequencies is not None
@staticmethod
def _ceil_step(step: float):
"""
Round the step size (arbitrary float) to a human-readable number like 0.5, 1, 2.
:param step: the step size to round by
:return: the nearest greater or equal step size in the series
(..., 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, ...)
"""
if step <= 0:
raise ValueError("arg step must be positive")
return min(10 ** math.ceil(math.log10(step * m)) / m for m in [1, 2, 5])
@staticmethod
@abstractmethod
def _step_size(
lower_bound: T_Values_Numeric, upper_bound: T_Values_Numeric
) -> T_Values_Numeric:
# Compute the step size (interval length) used in the partitions
pass
@property
@abstractmethod
def _partition_center_offset(self) -> T_Values_Numeric:
# Offset between center and endpoints of an interval
pass
class ContinuousRangePartitioner(RangePartitioner[float]):
"""
Partition numerical values in adjacent intervals of the same length.
The range of intervals and interval size is computed based on attributes
:attr:`.max_partitions`, :attr:`.lower_bound`, and :attr:`.upper_bound`.
Partition boundaries and interval sized are chosen with interpretability in mind and
are always a power of 10, or a multiple of 2 or 5 of a power of 10, e.g.
0.1, 0.2, 0.5, 1.0, 2.0, 5.0, and so on.
The intervals also satisfy the following conditions:
- :attr:`lower_bound` is within the first interval
- :attr:`upper_bound` is within the last interval
For example, with :attr:`.max_partitions` = 10, :attr:`.lower_bound` = 3.3, and
:attr:`.upper_bound` = 4.7, the resulting partitioning would be:
[3.2, 3.4), [3.4, 3.6), [3.6, 3.8), [4.0, 4.2), [4.4, 4.6), [4.6, 4.8]
"""
def _step_size(self, lower_bound: float, upper_bound: float) -> float:
return RangePartitioner._ceil_step(
(upper_bound - lower_bound) / (self.max_partitions - 1)
)
@property
def _partition_center_offset(self) -> float:
return self._step / 2
class IntegerRangePartitioner(RangePartitioner[int]):
"""
Partition integer values in adjacent intervals of the same length.
The range of intervals and interval size is computed based on attributes
:attr:`.max_partitions`, :attr:`.lower_bound`, and :attr:`.upper_bound`.
Partition boundaries and interval sized are chosen with interpretability in mind and
are always an integer and a power of 10, or a multiple of 2 or 5 of a power of 10,
e.g. 1, 2, 5, 10, 20, 50, and so on.
The intervals also satisfy the following conditions:
- :attr:`lower_bound` is within the first interval
- :attr:`upper_bound` is within the last interval
For example, with :attr:`.max_partitions` = 5, :attr:`.lower_bound` = 3, and
:attr:`.upper_bound` = 11, the resulting partitioning would be:
[2, 4), [4, 6), [6, 8), [8, 10), [10, 12)
"""
def _step_size(self, lower_bound: int, upper_bound: int) -> int:
return max(
1,
int(
RangePartitioner._ceil_step(
(upper_bound - lower_bound) / (self.max_partitions - 1)
)
),
)
@property
def _partition_center_offset(self) -> int:
return self._step // 2
@inheritdoc(match="[see superclass]")
class CategoryPartitioner(Partitioner[T_Values]):
"""
Partition categorical values.
Create one partition each per unique value, considering only the
:attr:`.max_partitions` most frequent values.
"""
def __init__(self, max_partitions: Optional[int] = None) -> None:
"""[see superclass]"""
super().__init__(max_partitions=max_partitions)
self._frequencies = None
self._partitions = None
@property
def is_fitted(self) -> bool:
"""[see superclass]"""
return self._frequencies is not None
@property
def is_categorical(self) -> bool:
"""
``True``
"""
return True
@property
def partitions_(self) -> Sequence[T_Values]:
"""[see superclass]"""
self._ensure_fitted()
return self._partitions
@property
def frequencies_(self) -> Sequence[int]:
"""[see superclass]"""
self._ensure_fitted()
return self._frequencies
# noinspection PyMissingOrEmptyDocstring
def fit(self: T_Self, values: Sequence[T_Values], **fit_params: Any) -> T_Self:
"""[see superclass]"""
self: CategoryPartitioner # support type hinting in PyCharm
if not isinstance(values, pd.Series):
if not (isinstance(values, np.ndarray) or isinstance(values, Sequence)):
try:
values = iter(values)
except TypeError:
raise TypeError("arg values must be iterable")
values = pd.Series(data=values)
value_counts = values.value_counts(ascending=False)
max_partitions = self.max_partitions
self._partitions = value_counts.index.values[:max_partitions]
self._frequencies = value_counts.values[:max_partitions]
return self
__tracker.validate()
| [
"logging.getLogger",
"pandas.Series",
"numpy.log10",
"math.ceil",
"math.floor",
"numpy.digitize",
"operator.sub",
"numpy.array",
"numpy.nanquantile",
"pytools.api.inheritdoc",
"math.log10",
"numpy.bincount",
"numpy.arange",
"typing.TypeVar"
] | [((366, 393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (383, 393), False, 'import logging\n'), ((578, 595), 'typing.TypeVar', 'TypeVar', (['"""T_Self"""'], {}), "('T_Self')\n", (585, 595), False, 'from typing import Any, Generic, Iterable, Optional, Sequence, Tuple, TypeVar\n'), ((607, 626), 'typing.TypeVar', 'TypeVar', (['"""T_Values"""'], {}), "('T_Values')\n", (614, 626), False, 'from typing import Any, Generic, Iterable, Optional, Sequence, Tuple, TypeVar\n'), ((646, 685), 'typing.TypeVar', 'TypeVar', (['"""T_Values_Numeric"""', 'int', 'float'], {}), "('T_Values_Numeric', int, float)\n", (653, 685), False, 'from typing import Any, Generic, Iterable, Optional, Sequence, Tuple, TypeVar\n'), ((2976, 3012), 'pytools.api.inheritdoc', 'inheritdoc', ([], {'match': '"""[see superclass]"""'}), "(match='[see superclass]')\n", (2986, 3012), False, 'from pytools.api import AllTracker, inheritdoc\n'), ((12418, 12454), 'pytools.api.inheritdoc', 'inheritdoc', ([], {'match': '"""[see superclass]"""'}), "(match='[see superclass]')\n", (12428, 12454), False, 'from pytools.api import AllTracker, inheritdoc\n'), ((8562, 8602), 'numpy.digitize', 'np.digitize', (['values'], {'bins': 'partition_bins'}), '(values, bins=partition_bins)\n', (8573, 8602), True, 'import numpy as np\n'), ((6789, 6827), 'numpy.nanquantile', 'np.nanquantile', (['values'], {'q': '[0.75, 0.25]'}), '(values, q=[0.75, 0.25])\n', (6803, 6827), True, 'import numpy as np\n'), ((7532, 7575), 'math.floor', 'math.floor', (['((lower_bound + step / 2) / step)'], {}), '((lower_bound + step / 2) / step)\n', (7542, 7575), False, 'import math\n'), ((7608, 7650), 'math.ceil', 'math.ceil', (['((upper_bound - step / 2) / step)'], {}), '((upper_bound - step / 2) / step)\n', (7617, 7650), False, 'import math\n'), ((8780, 8838), 'numpy.bincount', 'np.bincount', (['partition_indices'], {'minlength': '(n_partitions + 2)'}), '(partition_indices, minlength=n_partitions + 2)\n', (8791, 8838), True, 'import numpy as np\n'), ((13995, 14017), 'pandas.Series', 'pd.Series', ([], {'data': 'values'}), '(data=values)\n', (14004, 14017), True, 'import pandas as pd\n'), ((6616, 6632), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (6624, 6632), True, 'import numpy as np\n'), ((6855, 6868), 'operator.sub', 'op.sub', (['*q3q1'], {}), '(*q3q1)\n', (6861, 6868), True, 'import operator as op\n'), ((8477, 8504), 'numpy.arange', 'np.arange', (['(n_partitions + 1)'], {}), '(n_partitions + 1)\n', (8486, 8504), True, 'import numpy as np\n'), ((7826, 7849), 'numpy.arange', 'np.arange', (['n_partitions'], {}), '(n_partitions)\n', (7835, 7849), True, 'import numpy as np\n'), ((9494, 9514), 'math.log10', 'math.log10', (['(step * m)'], {}), '(step * m)\n', (9504, 9514), False, 'import math\n'), ((7958, 7978), 'numpy.log10', 'np.log10', (['self._step'], {}), '(self._step)\n', (7966, 7978), True, 'import numpy as np\n')] |
from numpy import sign
def comp_rot_dir(self):
"""Compute the rotation direction of the fundamental magnetic field induced by the winding
WARNING: rot_dir = -1 to have positive rotor rotating direction, i.e. rotor position moves towards positive angle
Parameters
----------
self : LamSlotMultiWind
A LamSlotMultiWind object
Returns
-------
rot_dir : int
-1 or +1
"""
p = self.get_pole_pair_number()
# Compute unit mmf
MMF, _ = self.comp_mmf_unit(
Nt=20 * p, Na=20 * p
) # 20 points per pole over time and space is enough to capture rotating direction of fundamental mmf
# Extract fundamental from unit mmf
result_p = MMF.get_harmonics(1, "freqs", "wavenumber=" + str(p))
result_n = MMF.get_harmonics(1, "freqs", "wavenumber=" + str(-p))
if result_p["Magnitude"][0] > result_n["Magnitude"][0]:
result = result_p
else:
result = result_n
# Get frequency and wavenumber of fundamental
f = result["freqs"][0]
r = result["wavenumber"][0]
# Rotating direction is the sign of the mechanical speed of the magnetic field fundamental, i.e frequency over wavenumber
rot_dir = int(sign(f / r))
return rot_dir
| [
"numpy.sign"
] | [((1210, 1221), 'numpy.sign', 'sign', (['(f / r)'], {}), '(f / r)\n', (1214, 1221), False, 'from numpy import sign\n')] |
import numpy as np
import pytest
import random
import torch
import densetorch as dt
NUMBER_OF_PARAMETERS_WITH_21_CLASSES = {
"152": 61993301,
"101": 46349653,
"50": 27357525,
"mbv2": 3284565,
}
NUMBER_OF_ENCODER_DECODER_LAYERS = {
"152": (465, 28),
"101": (312, 28),
"50": (159, 28),
"mbv2": (156, 27),
}
def get_dummy_input_tensor(height, width, channels=3, batch=4):
input_tensor = torch.FloatTensor(batch, channels, height, width).float()
return input_tensor
def get_network_output_shape(h, w, output_stride=4):
return np.ceil(h / output_stride), np.ceil(w / output_stride)
@pytest.fixture()
def num_classes():
return random.randint(1, 40)
@pytest.fixture()
def num_channels():
return random.randint(1, 40)
@pytest.fixture()
def input_height():
return random.randint(33, 320)
@pytest.fixture()
def input_width():
return random.randint(33, 320)
@pytest.mark.parametrize(
"enc_fn", [dt.nn.xception65, dt.nn.mobilenetv2, dt.nn.resnet18]
)
def test_encoders(enc_fn, input_height, input_width):
device = "cuda" if torch.cuda.is_available() else "cpu"
encoder = enc_fn(pretrained=False, return_idx=0).to(device)
with torch.no_grad():
input_tensor = get_dummy_input_tensor(
height=input_height, width=input_width
).to(device)
output = encoder(input_tensor)
assert len(output) == 1, f"Expected a single output, got {len(output):d}"
assert output[0].size(0) == input_tensor.size(
0
), f"Batch size mismatch, got {output[0].size(0):d}, expected {input_tensor.size(0):d}"
assert isinstance(output[0], torch.Tensor), "Expected a torch.Tensor as output"
@pytest.mark.parametrize(
"dec_fn", [dt.nn.DLv3plus, dt.nn.MTLWRefineNet, dt.nn.LWRefineNet]
)
def test_decoders(dec_fn, input_height, input_width, num_classes, num_channels):
device = "cuda" if torch.cuda.is_available() else "cpu"
decoder = dec_fn(
input_sizes=num_channels, num_classes=num_classes, collapse_ind=0
).to(device)
with torch.no_grad():
input_tensor = get_dummy_input_tensor(
height=input_height, width=input_width, channels=num_channels,
).to(device)
output = decoder(input_tensor)
if isinstance(output, list):
assert len(output) == 1, f"Expected a single output, got {len(output):d}"
output = output[0]
assert isinstance(output, torch.Tensor), "Expected a torch.Tensor as output"
assert output.size(0) == input_tensor.size(
0
), f"Batch size mismatch, got {output[0].size(0):d}, expected {input_tensor.size(0):d}"
assert (
output.size(1) == num_classes
), f"Channel size mismatch, got {output.size(1):d}, expected {num_classes:d}"
assert output.size(2) == input_tensor.size(
2
), f"Height size mismatch, got {output.size(2):d}, expected {input_tensor.size(2):d}"
assert output.size(3) == input_tensor.size(
3
), f"Width size mismatch, got {output.size(3):d}, expected {input_tensor.size(3):d}"
| [
"numpy.ceil",
"torch.FloatTensor",
"pytest.mark.parametrize",
"torch.cuda.is_available",
"pytest.fixture",
"torch.no_grad",
"random.randint"
] | [((632, 648), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (646, 648), False, 'import pytest\n'), ((704, 720), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (718, 720), False, 'import pytest\n'), ((777, 793), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (791, 793), False, 'import pytest\n'), ((852, 868), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (866, 868), False, 'import pytest\n'), ((926, 1019), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""enc_fn"""', '[dt.nn.xception65, dt.nn.mobilenetv2, dt.nn.resnet18]'], {}), "('enc_fn', [dt.nn.xception65, dt.nn.mobilenetv2, dt.\n nn.resnet18])\n", (949, 1019), False, 'import pytest\n'), ((1721, 1817), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dec_fn"""', '[dt.nn.DLv3plus, dt.nn.MTLWRefineNet, dt.nn.LWRefineNet]'], {}), "('dec_fn', [dt.nn.DLv3plus, dt.nn.MTLWRefineNet, dt.\n nn.LWRefineNet])\n", (1744, 1817), False, 'import pytest\n'), ((679, 700), 'random.randint', 'random.randint', (['(1)', '(40)'], {}), '(1, 40)\n', (693, 700), False, 'import random\n'), ((752, 773), 'random.randint', 'random.randint', (['(1)', '(40)'], {}), '(1, 40)\n', (766, 773), False, 'import random\n'), ((825, 848), 'random.randint', 'random.randint', (['(33)', '(320)'], {}), '(33, 320)\n', (839, 848), False, 'import random\n'), ((899, 922), 'random.randint', 'random.randint', (['(33)', '(320)'], {}), '(33, 320)\n', (913, 922), False, 'import random\n'), ((574, 600), 'numpy.ceil', 'np.ceil', (['(h / output_stride)'], {}), '(h / output_stride)\n', (581, 600), True, 'import numpy as np\n'), ((602, 628), 'numpy.ceil', 'np.ceil', (['(w / output_stride)'], {}), '(w / output_stride)\n', (609, 628), True, 'import numpy as np\n'), ((1098, 1123), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1121, 1123), False, 'import torch\n'), ((1208, 1223), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1221, 1223), False, 'import torch\n'), ((1923, 1948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1946, 1948), False, 'import torch\n'), ((2082, 2097), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2095, 2097), False, 'import torch\n'), ((426, 475), 'torch.FloatTensor', 'torch.FloatTensor', (['batch', 'channels', 'height', 'width'], {}), '(batch, channels, height, width)\n', (443, 475), False, 'import torch\n')] |
from __future__ import print_function
import numpy as np
import tensorflow as tf
try:
import pystan
from collections import OrderedDict
except ImportError:
pass
class PythonModel:
"""
Model wrapper for models written in NumPy/SciPy.
"""
def __init__(self):
self.num_vars = None
def log_prob(self, xs, zs):
return tf.py_func(self._py_log_prob, [xs, zs], [tf.float32])[0]
def _py_log_prob(self, xs, zs):
"""
Arguments
----------
xs : np.ndarray
zs : np.ndarray
n_minibatch x dim(z) array, where each row is a set of
latent variables.
Returns
-------
np.ndarray
n_minibatch array of type np.float32, where each element
is the log pdf evaluated at (z_{b1}, ..., z_{bd})
"""
raise NotImplementedError()
class StanModel:
"""
Model wrapper for models written in Stan.
Arguments
----------
file: see documentation for argument in pystan.stan
model_code: see documentation for argument in pystan.stan
"""
def __init__(self, file=None, model_code=None):
if file is not None:
self.file = file
elif model_code is not None:
self.model_code = model_code
else:
raise
self.flag_init = False
def log_prob(self, xs, zs):
if self.flag_init is False:
self._initialize(xs)
return tf.py_func(self._py_log_prob, [zs], [tf.float32])[0]
def _initialize(self, xs):
print("The following message exists as Stan instantiates the model.")
if hasattr(self, 'file'):
self.model = pystan.stan(file=self.file,
data=xs, iter=1, chains=1)
else:
self.model = pystan.stan(model_code=self.model_code,
data=xs, iter=1, chains=1)
self.num_vars = sum([sum(dim) if sum(dim) != 0 else 1 \
for dim in self.model.par_dims])
self.flag_init = True
def _py_log_prob(self, zs):
"""
Notes
-----
The log_prob() method in Stan requires the input to be a
dictionary data type, with each parameter named
correspondingly; this is because zs lives on the original
(constrained) latent variable space.
Ideally, in Stan it would have log_prob() for both this
input and a flattened vector. Internally, Stan always assumes
unconstrained parameters are flattened vectors, and
constrained parameters are named data structures.
"""
lp = np.zeros((zs.shape[0]), dtype=np.float32)
for b, z in enumerate(zs):
z_dict = OrderedDict()
idx = 0
for dim, par in zip(self.model.par_dims, self.model.model_pars):
elems = np.sum(dim)
if elems == 0:
z_dict[par] = float(z[idx])
idx += 1
else:
z_dict[par] = z[idx:(idx+elems)].reshape(dim)
idx += elems
z_unconst = self.model.unconstrain_pars(z_dict)
lp[b] = self.model.log_prob(z_unconst, adjust_transform=False)
return lp
| [
"collections.OrderedDict",
"numpy.sum",
"numpy.zeros",
"pystan.stan",
"tensorflow.py_func"
] | [((2680, 2719), 'numpy.zeros', 'np.zeros', (['zs.shape[0]'], {'dtype': 'np.float32'}), '(zs.shape[0], dtype=np.float32)\n', (2688, 2719), True, 'import numpy as np\n'), ((364, 417), 'tensorflow.py_func', 'tf.py_func', (['self._py_log_prob', '[xs, zs]', '[tf.float32]'], {}), '(self._py_log_prob, [xs, zs], [tf.float32])\n', (374, 417), True, 'import tensorflow as tf\n'), ((1483, 1532), 'tensorflow.py_func', 'tf.py_func', (['self._py_log_prob', '[zs]', '[tf.float32]'], {}), '(self._py_log_prob, [zs], [tf.float32])\n', (1493, 1532), True, 'import tensorflow as tf\n'), ((1705, 1759), 'pystan.stan', 'pystan.stan', ([], {'file': 'self.file', 'data': 'xs', 'iter': '(1)', 'chains': '(1)'}), '(file=self.file, data=xs, iter=1, chains=1)\n', (1716, 1759), False, 'import pystan\n'), ((1836, 1902), 'pystan.stan', 'pystan.stan', ([], {'model_code': 'self.model_code', 'data': 'xs', 'iter': '(1)', 'chains': '(1)'}), '(model_code=self.model_code, data=xs, iter=1, chains=1)\n', (1847, 1902), False, 'import pystan\n'), ((2778, 2791), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2789, 2791), False, 'from collections import OrderedDict\n'), ((2913, 2924), 'numpy.sum', 'np.sum', (['dim'], {}), '(dim)\n', (2919, 2924), True, 'import numpy as np\n')] |
"""surface.py: Surface element and geometry"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import properties
from .base import ProjectElement
from .data import Int3Array, ScalarArray, Vector3Array
from .texture import HasTexturesMixin
class BaseSurfaceElement(ProjectElement, HasTexturesMixin):
"""Base class for surface elements"""
subtype = properties.StringChoice(
'Category of Surface',
choices=('surface',),
default='surface',
)
_valid_locations = ('vertices', 'faces')
def location_length(self, location):
"""Return correct data length based on location"""
if location == 'faces':
return self.num_cells
return self.num_nodes
@property
def num_nodes(self):
"""get number of nodes"""
raise NotImplementedError()
@property
def num_cells(self):
"""get number of cells"""
raise NotImplementedError()
class SurfaceElement(BaseSurfaceElement): #pylint: disable=too-many-ancestors
"""Contains triangulated surface spatial information and attributes"""
vertices = properties.Instance(
'Spatial coordinates of vertices relative to surface origin',
Vector3Array,
)
triangles = properties.Instance(
'Vertex indices of surface triangles',
Int3Array,
)
@property
def num_nodes(self):
"""get number of nodes"""
return len(self.vertices)
@property
def num_cells(self):
"""get number of cells"""
return len(self.triangles)
@properties.validator
def _validate_mesh(self):
if np.min(self.triangles.array) < 0:
raise properties.ValidationError('Triangles may only have positive integers')
if np.max(self.triangles.array) >= len(self.vertices.array):
raise properties.ValidationError('Triangles expects more vertices than provided')
return True
class SurfaceGridElement(BaseSurfaceElement): #pylint: disable=too-many-ancestors
"""Contains 2D grid spatial information and attributes"""
tensor_u = properties.Array(
'Grid cell widths, u-direction',
shape=('*',),
dtype=float,
)
tensor_v = properties.Array(
'Grid cell widths, v-direction',
shape=('*',),
dtype=float,
)
axis_u = properties.Vector3(
'Vector orientation of u-direction',
default='X',
length=1,
)
axis_v = properties.Vector3(
'Vector orientation of v-direction',
default='Y',
length=1,
)
offset_w = properties.Instance(
'Node offset',
ScalarArray,
required=False,
)
origin = properties.Vector3(
'Origin of the Mesh relative to Project coordinate reference system',
default=[0., 0., 0.],
)
@property
def num_nodes(self):
"""Number of nodes (vertices)"""
return (len(self.tensor_u)+1) * (len(self.tensor_v)+1)
@property
def num_cells(self):
"""Number of cells (faces)"""
return len(self.tensor_u) * len(self.tensor_v)
@properties.validator
def _validate_mesh(self):
"""Check if mesh content is built correctly"""
if not np.abs(self.axis_u.dot(self.axis_v)) < 1e-6: #pylint: disable=no-member
raise properties.ValidationError('axis_u and axis_v must be orthogonal')
if self.offset_w is properties.undefined or self.offset_w is None:
return True
if len(self.offset_w.array) != self.num_nodes:
raise properties.ValidationError(
'Length of offset_w, {zlen}, must equal number of nodes, '
'{nnode}'.format(
zlen=len(self.offset_w),
nnode=self.num_nodes
)
)
return True
| [
"properties.Instance",
"properties.Vector3",
"properties.StringChoice",
"numpy.max",
"numpy.min",
"properties.Array",
"properties.ValidationError"
] | [((479, 570), 'properties.StringChoice', 'properties.StringChoice', (['"""Category of Surface"""'], {'choices': "('surface',)", 'default': '"""surface"""'}), "('Category of Surface', choices=('surface',),\n default='surface')\n", (502, 570), False, 'import properties\n'), ((1268, 1368), 'properties.Instance', 'properties.Instance', (['"""Spatial coordinates of vertices relative to surface origin"""', 'Vector3Array'], {}), "(\n 'Spatial coordinates of vertices relative to surface origin', Vector3Array)\n", (1287, 1368), False, 'import properties\n'), ((1403, 1472), 'properties.Instance', 'properties.Instance', (['"""Vertex indices of surface triangles"""', 'Int3Array'], {}), "('Vertex indices of surface triangles', Int3Array)\n", (1422, 1472), False, 'import properties\n'), ((2282, 2358), 'properties.Array', 'properties.Array', (['"""Grid cell widths, u-direction"""'], {'shape': "('*',)", 'dtype': 'float'}), "('Grid cell widths, u-direction', shape=('*',), dtype=float)\n", (2298, 2358), False, 'import properties\n'), ((2405, 2481), 'properties.Array', 'properties.Array', (['"""Grid cell widths, v-direction"""'], {'shape': "('*',)", 'dtype': 'float'}), "('Grid cell widths, v-direction', shape=('*',), dtype=float)\n", (2421, 2481), False, 'import properties\n'), ((2526, 2604), 'properties.Vector3', 'properties.Vector3', (['"""Vector orientation of u-direction"""'], {'default': '"""X"""', 'length': '(1)'}), "('Vector orientation of u-direction', default='X', length=1)\n", (2544, 2604), False, 'import properties\n'), ((2649, 2727), 'properties.Vector3', 'properties.Vector3', (['"""Vector orientation of v-direction"""'], {'default': '"""Y"""', 'length': '(1)'}), "('Vector orientation of v-direction', default='Y', length=1)\n", (2667, 2727), False, 'import properties\n'), ((2774, 2837), 'properties.Instance', 'properties.Instance', (['"""Node offset"""', 'ScalarArray'], {'required': '(False)'}), "('Node offset', ScalarArray, required=False)\n", (2793, 2837), False, 'import properties\n'), ((2882, 3004), 'properties.Vector3', 'properties.Vector3', (['"""Origin of the Mesh relative to Project coordinate reference system"""'], {'default': '[0.0, 0.0, 0.0]'}), "(\n 'Origin of the Mesh relative to Project coordinate reference system',\n default=[0.0, 0.0, 0.0])\n", (2900, 3004), False, 'import properties\n'), ((1781, 1809), 'numpy.min', 'np.min', (['self.triangles.array'], {}), '(self.triangles.array)\n', (1787, 1809), True, 'import numpy as np\n'), ((1833, 1904), 'properties.ValidationError', 'properties.ValidationError', (['"""Triangles may only have positive integers"""'], {}), "('Triangles may only have positive integers')\n", (1859, 1904), False, 'import properties\n'), ((1916, 1944), 'numpy.max', 'np.max', (['self.triangles.array'], {}), '(self.triangles.array)\n', (1922, 1944), True, 'import numpy as np\n'), ((1992, 2067), 'properties.ValidationError', 'properties.ValidationError', (['"""Triangles expects more vertices than provided"""'], {}), "('Triangles expects more vertices than provided')\n", (2018, 2067), False, 'import properties\n'), ((3529, 3595), 'properties.ValidationError', 'properties.ValidationError', (['"""axis_u and axis_v must be orthogonal"""'], {}), "('axis_u and axis_v must be orthogonal')\n", (3555, 3595), False, 'import properties\n')] |
import numpy as np
from pyecsca.sca import (
align_correlation,
align_peaks,
align_sad,
align_dtw_scale,
align_dtw,
Trace,
InspectorTraceSet,
)
from .utils import Plottable, slow
class AlignTests(Plottable):
def test_align(self):
first_arr = np.array(
[10, 64, 120, 64, 10, 10, 10, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array([10, 10, 10, 10, 50, 80, 50, 20], dtype=np.dtype("i1"))
third_arr = np.array([70, 30, 42, 35, 28, 21, 15, 10, 5], dtype=np.dtype("i1"))
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result, offsets = align_correlation(
a,
b,
c,
reference_offset=1,
reference_length=3,
max_offset=4,
min_correlation=0.65,
)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
np.testing.assert_equal(result[0].samples, first_arr)
np.testing.assert_equal(
result[1].samples,
np.array([10, 50, 80, 50, 20, 0, 0, 0], dtype=np.dtype("i1")),
)
self.assertEqual(len(offsets), 2)
self.assertEqual(offsets[0], 0)
self.assertEqual(offsets[1], 3)
@slow
def test_large_align(self):
example = InspectorTraceSet.read("test/data/example.trs")
result, _ = align_correlation(
*example, reference_offset=100000, reference_length=20000, max_offset=15000
)
self.assertIsNotNone(result)
@slow
def test_large_dtw_align(self):
example = InspectorTraceSet.read("test/data/example.trs")
result = align_dtw(*example[:5])
self.assertIsNotNone(result)
def test_peak_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array(
[10, 10, 10, 10, 90, 40, 50, 20, 10, 17, 16, 10], dtype=np.dtype("i1")
)
a = Trace(first_arr)
b = Trace(second_arr)
result, _ = align_peaks(
a, b, reference_offset=2, reference_length=5, max_offset=3
)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
def test_sad_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10], dtype=np.dtype("i1")
)
second_arr = np.array(
[10, 10, 90, 40, 50, 20, 10, 17, 16, 10, 10], dtype=np.dtype("i1")
)
a = Trace(first_arr)
b = Trace(second_arr)
result, _ = align_sad(
a, b, reference_offset=2, reference_length=5, max_offset=3
)
self.assertEqual(len(result), 2)
def test_dtw_align_scale(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10, 8, 10, 12, 10, 13, 9],
dtype=np.dtype("f2"),
)
second_arr = np.array(
[10, 10, 60, 40, 90, 20, 10, 17, 16, 10, 10, 10, 10, 10, 17, 12, 10],
dtype=np.dtype("f2"),
)
third_arr = np.array(
[10, 30, 20, 21, 15, 8, 10, 37, 21, 77, 20, 28, 25, 10, 9, 10, 15, 9, 10],
dtype=np.dtype("f2"),
)
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result = align_dtw_scale(a, b, c)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
self.assertEqual(np.argmax(result[1].samples), np.argmax(result[2].samples))
self.plot(*result)
result_other = align_dtw_scale(a, b, c, fast=False)
self.assertEqual(
np.argmax(result_other[0].samples), np.argmax(result_other[1].samples)
)
self.assertEqual(
np.argmax(result_other[1].samples), np.argmax(result_other[2].samples)
)
self.plot(*result_other)
def test_dtw_align(self):
first_arr = np.array(
[10, 64, 14, 120, 15, 30, 10, 15, 20, 15, 15, 10, 10, 8, 10, 12, 10, 13, 9],
dtype=np.dtype("i1"),
)
second_arr = np.array(
[10, 10, 60, 40, 90, 20, 10, 17, 16, 10, 10, 10, 10, 10, 17, 12, 10],
dtype=np.dtype("i1"),
)
third_arr = np.array(
[10, 30, 20, 21, 15, 8, 10, 47, 21, 77, 20, 28, 25, 10, 9, 10, 15, 9, 10],
dtype=np.dtype("i1"),
)
a = Trace(first_arr)
b = Trace(second_arr)
c = Trace(third_arr)
result = align_dtw(a, b, c)
self.assertEqual(np.argmax(result[0].samples), np.argmax(result[1].samples))
self.assertEqual(np.argmax(result[1].samples), np.argmax(result[2].samples))
self.plot(*result)
result_other = align_dtw(a, b, c, fast=False)
self.assertEqual(
np.argmax(result_other[0].samples), np.argmax(result_other[1].samples)
)
self.assertEqual(
np.argmax(result_other[1].samples), np.argmax(result_other[2].samples)
)
self.plot(*result_other)
| [
"pyecsca.sca.Trace",
"numpy.dtype",
"numpy.testing.assert_equal",
"pyecsca.sca.align_dtw",
"pyecsca.sca.InspectorTraceSet.read",
"numpy.argmax",
"pyecsca.sca.align_sad",
"pyecsca.sca.align_correlation",
"pyecsca.sca.align_dtw_scale",
"pyecsca.sca.align_peaks"
] | [((562, 578), 'pyecsca.sca.Trace', 'Trace', (['first_arr'], {}), '(first_arr)\n', (567, 578), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((591, 608), 'pyecsca.sca.Trace', 'Trace', (['second_arr'], {}), '(second_arr)\n', (596, 608), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((621, 637), 'pyecsca.sca.Trace', 'Trace', (['third_arr'], {}), '(third_arr)\n', (626, 637), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((664, 770), 'pyecsca.sca.align_correlation', 'align_correlation', (['a', 'b', 'c'], {'reference_offset': '(1)', 'reference_length': '(3)', 'max_offset': '(4)', 'min_correlation': '(0.65)'}), '(a, b, c, reference_offset=1, reference_length=3,\n max_offset=4, min_correlation=0.65)\n', (681, 770), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((948, 1001), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result[0].samples', 'first_arr'], {}), '(result[0].samples, first_arr)\n', (971, 1001), True, 'import numpy as np\n'), ((1334, 1381), 'pyecsca.sca.InspectorTraceSet.read', 'InspectorTraceSet.read', (['"""test/data/example.trs"""'], {}), "('test/data/example.trs')\n", (1356, 1381), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((1402, 1500), 'pyecsca.sca.align_correlation', 'align_correlation', (['*example'], {'reference_offset': '(100000)', 'reference_length': '(20000)', 'max_offset': '(15000)'}), '(*example, reference_offset=100000, reference_length=20000,\n max_offset=15000)\n', (1419, 1500), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((1621, 1668), 'pyecsca.sca.InspectorTraceSet.read', 'InspectorTraceSet.read', (['"""test/data/example.trs"""'], {}), "('test/data/example.trs')\n", (1643, 1668), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((1686, 1709), 'pyecsca.sca.align_dtw', 'align_dtw', (['*example[:5]'], {}), '(*example[:5])\n', (1695, 1709), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2043, 2059), 'pyecsca.sca.Trace', 'Trace', (['first_arr'], {}), '(first_arr)\n', (2048, 2059), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2072, 2089), 'pyecsca.sca.Trace', 'Trace', (['second_arr'], {}), '(second_arr)\n', (2077, 2089), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2110, 2181), 'pyecsca.sca.align_peaks', 'align_peaks', (['a', 'b'], {'reference_offset': '(2)', 'reference_length': '(5)', 'max_offset': '(3)'}), '(a, b, reference_offset=2, reference_length=5, max_offset=3)\n', (2121, 2181), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2580, 2596), 'pyecsca.sca.Trace', 'Trace', (['first_arr'], {}), '(first_arr)\n', (2585, 2596), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2609, 2626), 'pyecsca.sca.Trace', 'Trace', (['second_arr'], {}), '(second_arr)\n', (2614, 2626), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2647, 2716), 'pyecsca.sca.align_sad', 'align_sad', (['a', 'b'], {'reference_offset': '(2)', 'reference_length': '(5)', 'max_offset': '(3)'}), '(a, b, reference_offset=2, reference_length=5, max_offset=3)\n', (2656, 2716), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((3310, 3326), 'pyecsca.sca.Trace', 'Trace', (['first_arr'], {}), '(first_arr)\n', (3315, 3326), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((3339, 3356), 'pyecsca.sca.Trace', 'Trace', (['second_arr'], {}), '(second_arr)\n', (3344, 3356), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((3369, 3385), 'pyecsca.sca.Trace', 'Trace', (['third_arr'], {}), '(third_arr)\n', (3374, 3385), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((3403, 3427), 'pyecsca.sca.align_dtw_scale', 'align_dtw_scale', (['a', 'b', 'c'], {}), '(a, b, c)\n', (3418, 3427), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((3650, 3686), 'pyecsca.sca.align_dtw_scale', 'align_dtw_scale', (['a', 'b', 'c'], {'fast': '(False)'}), '(a, b, c, fast=False)\n', (3665, 3686), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((4483, 4499), 'pyecsca.sca.Trace', 'Trace', (['first_arr'], {}), '(first_arr)\n', (4488, 4499), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((4512, 4529), 'pyecsca.sca.Trace', 'Trace', (['second_arr'], {}), '(second_arr)\n', (4517, 4529), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((4542, 4558), 'pyecsca.sca.Trace', 'Trace', (['third_arr'], {}), '(third_arr)\n', (4547, 4558), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((4576, 4594), 'pyecsca.sca.align_dtw', 'align_dtw', (['a', 'b', 'c'], {}), '(a, b, c)\n', (4585, 4594), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((4817, 4847), 'pyecsca.sca.align_dtw', 'align_dtw', (['a', 'b', 'c'], {'fast': '(False)'}), '(a, b, c, fast=False)\n', (4826, 4847), False, 'from pyecsca.sca import align_correlation, align_peaks, align_sad, align_dtw_scale, align_dtw, Trace, InspectorTraceSet\n'), ((2229, 2257), 'numpy.argmax', 'np.argmax', (['result[0].samples'], {}), '(result[0].samples)\n', (2238, 2257), True, 'import numpy as np\n'), ((2259, 2287), 'numpy.argmax', 'np.argmax', (['result[1].samples'], {}), '(result[1].samples)\n', (2268, 2287), True, 'import numpy as np\n'), ((3454, 3482), 'numpy.argmax', 'np.argmax', (['result[0].samples'], {}), '(result[0].samples)\n', (3463, 3482), True, 'import numpy as np\n'), ((3484, 3512), 'numpy.argmax', 'np.argmax', (['result[1].samples'], {}), '(result[1].samples)\n', (3493, 3512), True, 'import numpy as np\n'), ((3539, 3567), 'numpy.argmax', 'np.argmax', (['result[1].samples'], {}), '(result[1].samples)\n', (3548, 3567), True, 'import numpy as np\n'), ((3569, 3597), 'numpy.argmax', 'np.argmax', (['result[2].samples'], {}), '(result[2].samples)\n', (3578, 3597), True, 'import numpy as np\n'), ((3726, 3760), 'numpy.argmax', 'np.argmax', (['result_other[0].samples'], {}), '(result_other[0].samples)\n', (3735, 3760), True, 'import numpy as np\n'), ((3762, 3796), 'numpy.argmax', 'np.argmax', (['result_other[1].samples'], {}), '(result_other[1].samples)\n', (3771, 3796), True, 'import numpy as np\n'), ((3845, 3879), 'numpy.argmax', 'np.argmax', (['result_other[1].samples'], {}), '(result_other[1].samples)\n', (3854, 3879), True, 'import numpy as np\n'), ((3881, 3915), 'numpy.argmax', 'np.argmax', (['result_other[2].samples'], {}), '(result_other[2].samples)\n', (3890, 3915), True, 'import numpy as np\n'), ((4621, 4649), 'numpy.argmax', 'np.argmax', (['result[0].samples'], {}), '(result[0].samples)\n', (4630, 4649), True, 'import numpy as np\n'), ((4651, 4679), 'numpy.argmax', 'np.argmax', (['result[1].samples'], {}), '(result[1].samples)\n', (4660, 4679), True, 'import numpy as np\n'), ((4706, 4734), 'numpy.argmax', 'np.argmax', (['result[1].samples'], {}), '(result[1].samples)\n', (4715, 4734), True, 'import numpy as np\n'), ((4736, 4764), 'numpy.argmax', 'np.argmax', (['result[2].samples'], {}), '(result[2].samples)\n', (4745, 4764), True, 'import numpy as np\n'), ((4887, 4921), 'numpy.argmax', 'np.argmax', (['result_other[0].samples'], {}), '(result_other[0].samples)\n', (4896, 4921), True, 'import numpy as np\n'), ((4923, 4957), 'numpy.argmax', 'np.argmax', (['result_other[1].samples'], {}), '(result_other[1].samples)\n', (4932, 4957), True, 'import numpy as np\n'), ((5006, 5040), 'numpy.argmax', 'np.argmax', (['result_other[1].samples'], {}), '(result_other[1].samples)\n', (5015, 5040), True, 'import numpy as np\n'), ((5042, 5076), 'numpy.argmax', 'np.argmax', (['result_other[2].samples'], {}), '(result_other[2].samples)\n', (5051, 5076), True, 'import numpy as np\n'), ((351, 365), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (359, 365), True, 'import numpy as np\n'), ((446, 460), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (454, 460), True, 'import numpy as np\n'), ((534, 548), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (542, 548), True, 'import numpy as np\n'), ((1882, 1896), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1890, 1896), True, 'import numpy as np\n'), ((2006, 2020), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (2014, 2020), True, 'import numpy as np\n'), ((2423, 2437), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (2431, 2437), True, 'import numpy as np\n'), ((2543, 2557), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (2551, 2557), True, 'import numpy as np\n'), ((2954, 2968), 'numpy.dtype', 'np.dtype', (['"""f2"""'], {}), "('f2')\n", (2962, 2968), True, 'import numpy as np\n'), ((3111, 3125), 'numpy.dtype', 'np.dtype', (['"""f2"""'], {}), "('f2')\n", (3119, 3125), True, 'import numpy as np\n'), ((3272, 3286), 'numpy.dtype', 'np.dtype', (['"""f2"""'], {}), "('f2')\n", (3280, 3286), True, 'import numpy as np\n'), ((4127, 4141), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (4135, 4141), True, 'import numpy as np\n'), ((4284, 4298), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (4292, 4298), True, 'import numpy as np\n'), ((4445, 4459), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (4453, 4459), True, 'import numpy as np\n'), ((1124, 1138), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1132, 1138), True, 'import numpy as np\n')] |
from matplotlib.testing import setup
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import matplotlib as mpl
import packaging.version
import pytest
import animatplot as amp
from tests.tools import animation_compare
from animatplot.blocks import Block, Title
setup()
class TestTitleBlock:
def test_list_of_str(self):
labels = ['timestep 0', 'timestep 1']
result = Title(labels)
assert labels == result.titles
assert len(result) == 2
def test_invalid_input(self):
with pytest.raises(TypeError):
Title(0)
with pytest.raises(TypeError):
Title([6, 7])
def test_format_str(self):
actual = Title('timestep {num}', num=[1, 2]).titles
assert actual == ['timestep 1', 'timestep 2']
actual = Title('timestep {num}', num=[1]).titles
assert actual == ['timestep 1']
def test_no_replacements(self):
actual = Title('Name').titles
assert actual == ['Name']
def test_multiple_replacements(self):
actual = Title('timestep {num}, max density {n}',
num=[1, 2], n=[500, 10]).titles
expected = ['timestep {num}, max density {n}'.format(num=1, n=500),
'timestep {num}, max density {n}'.format(num=2, n=10)]
assert actual == expected
def test_string_formatting(self):
actual = Title('timestep {values:.2f}', values=[5e7]).titles
assert actual == ['timestep 50000000.00']
def test_format_str_numpy_arrays(self):
actual = Title('timestep {num}', num=np.array([1, 2])).titles
assert actual == ['timestep 1', 'timestep 2']
# Hypothesis test that the strings are always formatted correctly?
def test_text(self):
# TODO test that the right type of object is produced?
title_block = Title('timestep {num}', num=[1, 2])
ax = plt.gca()
assert ax.get_title() == 'timestep 1'
title_block._update(1)
assert ax.get_title() == 'timestep 2'
plt.close('all')
def test_mpl_kwargs(self):
expected = {'loc': 'left', 'fontstyle': 'italic'}
actual = Title('timestep {num}', num=[1, 2], **expected)
assert actual._mpl_kwargs == expected
def assert_jagged_arrays_equal(x, y):
for x, y in zip(x, y):
npt.assert_equal(x, y)
class TestLineBlock:
def test_2d_inputs(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
assert isinstance(line_block, amp.blocks.Line)
npt.assert_equal(line_block.x, x_grid)
npt.assert_equal(line_block.y, y_data)
assert len(line_block) == len(t)
assert isinstance(line_block.line, mpl.lines.Line2D)
xdata, ydata = line_block.line.get_data()
npt.assert_equal(xdata, x)
npt.assert_equal(ydata, y_data[0, :])
def test_update(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x_grid, y_data)
line_block._update(frame=1)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.line.get_ydata(), y_data[1, :])
def test_constant_x(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(x, y_data)
npt.assert_equal(line_block.line.get_xdata(), x)
npt.assert_equal(line_block.x[-1], x)
def test_no_x_input(self):
x = np.linspace(0, 1, 10)
t = np.linspace(0, 1, 5)
x_grid, t_grid = np.meshgrid(x, t)
y_data = np.sin(2 * np.pi * (x_grid + t_grid))
line_block = amp.blocks.Line(y_data)
expected_x = np.arange(10)
npt.assert_equal(line_block.line.get_xdata(), expected_x)
def test_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9])]
line_block = amp.blocks.Line(x_data, y_data)
npt.assert_equal(line_block.y, np.array([[5, 6, 7], [4, 2, 9]]))
npt.assert_equal(line_block.x, np.array([[1, 2, 3], [1, 2, 3]]))
def test_ragged_list_input(self):
x_data = [np.array([1, 2, 3]), np.array([1, 2, 3, 4])]
y_data = [np.array([5, 6, 7]), np.array([4, 2, 9, 10])]
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(y_data)
assert "Must specify x data explicitly" in str(err)
line_block = amp.blocks.Line(x_data, y_data)
assert_jagged_arrays_equal(line_block.x, np.array(x_data))
assert_jagged_arrays_equal(line_block.y, np.array(y_data))
def test_bad_ragged_list_input(self):
x_data = np.array([np.array([1, 2, 3]), np.array([1, 2, 3, 4])])
y_data = np.array([np.array([5, 6, 7]), np.array([4, 2, 9, 10, 11])])
with pytest.raises(ValueError) as err:
line_block = amp.blocks.Line(x_data, y_data)
assert "x & y data must match" in str(err)
def test_bad_input(self):
# incorrect number of args
with pytest.raises(ValueError) as err:
amp.blocks.Line(1, 2, 3)
assert 'Invalid data arguments' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line()
assert 'Invalid data arguments' in str(err.value)
# No y data
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), None)
assert 'Must supply y data' in str(err.value)
with pytest.raises(ValueError) as err:
amp.blocks.Line(None)
assert 'Must supply y data' in str(err.value)
# y data not 2d
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(5, 2, 2))
assert 'y data must be 2-dimensional' in str(err.value)
# 1d x doesn't match y
with pytest.raises(ValueError) as err:
amp.blocks.Line(np.arange(5), np.random.randn(4, 2))
assert 'dimensions of x must be compatible' in str(err.value)
# 2d x doesn't match y
with pytest.raises(ValueError) as err:
x = np.array([np.arange(5), np.arange(5)])
amp.blocks.Line(x, np.random.randn(4, 2), t_axis=1)
assert 'dimensions of x must be compatible' in str(err.value)
def test_kwarg_throughput(self):
x = np.array([np.arange(5), np.arange(5)])
line_block = amp.blocks.Line(x, np.random.randn(2, 5), t_axis=1,
alpha=0.5)
assert line_block.line.get_alpha() == 0.5
class TestComparisons:
@animation_compare(baseline_images='Blocks/Line', nframes=5)
def test_Line(self):
x = np.linspace(0, 2*np.pi, 20)
t = np.linspace(0, 2*np.pi, 5)
X, T = np.meshgrid(x, t)
Y = np.sin(X+T)
block = amp.blocks.Line(X, Y)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh', nframes=3)
def test_Pcolormesh(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Pcolormesh_corner', nframes=3)
def test_Pcolormesh_corner_positions(self):
# Test with size of Z being (nx-1)*(ny-1) like matplotlib expects for 'flat'
# shading
x = np.linspace(-2*np.pi, 2*np.pi, 10)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)[:-1, :-1, :]
block = amp.blocks.Pcolormesh(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - does not have shading='nearest'"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_nearest', nframes=3)
def test_Pcolormesh_nearest(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="nearest"
)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - does not have shading='nearest'"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_auto', nframes=3)
def test_Pcolormesh_nearest(self):
x = np.linspace(-2*np.pi, 2*np.pi, 10)
t = np.linspace(0, 2*np.pi, 3)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="auto"
)
return amp.Animation([block])
@pytest.mark.skipif(
packaging.version.parse(mpl.__version__) < packaging.version.parse("3.3.0"),
reason="matplotlib version too low - shading='gouraud' does not work before 3.3"
)
@animation_compare(baseline_images='Blocks/Pcolormesh_gouraud', nframes=1)
def test_Pcolormesh_gouraud(self):
x = np.linspace(-2*np.pi, 2*np.pi, 100)
t = np.linspace(0, 2*np.pi, 1)
X, Y, T = np.meshgrid(x, x, t)
Z = np.sin(X**2+Y**2-T)
block = amp.blocks.Pcolormesh(
X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading="gouraud"
)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Imshow', nframes=3)
def test_Imshow(self):
x = np.linspace(0, 1, 10)
X, Y = np.meshgrid(x, x)
U = []
for i in range(3):
U.append(X**2+Y**2+i)
block = amp.blocks.Imshow(U)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Quiver', nframes=4)
def test_Quiver(self):
x = np.linspace(0, 1, 10)
X, Y = np.meshgrid(x, x)
U, V = [], []
for i in range(4):
U.append(X**2+Y**2+i)
V.append(X**2+Y**2+i)
block = amp.blocks.Quiver(X, Y, U, V)
return amp.Animation([block])
@animation_compare(baseline_images='Blocks/Nuke', nframes=3)
def test_Nuke(self):
ax = plt.gca()
sizes = []
def animate(i):
sizes.append(i+1)
ax.set_aspect("equal")
ax.pie(sizes)
block = amp.blocks.Nuke(animate, length=3, ax=ax)
return amp.Animation([block])
| [
"numpy.testing.assert_equal",
"animatplot.blocks.Line",
"numpy.array",
"tests.tools.animation_compare",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.gca",
"pytest.raises",
"animatplot.blocks.Pcolormesh",
"animatplot.blocks.Qui... | [((294, 301), 'matplotlib.testing.setup', 'setup', ([], {}), '()\n', (299, 301), False, 'from matplotlib.testing import setup\n'), ((6974, 7033), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Line"""', 'nframes': '(5)'}), "(baseline_images='Blocks/Line', nframes=5)\n", (6991, 7033), False, 'from tests.tools import animation_compare\n'), ((7278, 7343), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Pcolormesh"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Pcolormesh', nframes=3)\n", (7295, 7343), False, 'from tests.tools import animation_compare\n'), ((7654, 7726), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Pcolormesh_corner"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Pcolormesh_corner', nframes=3)\n", (7671, 7726), False, 'from tests.tools import animation_compare\n'), ((8363, 8436), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Pcolormesh_nearest"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Pcolormesh_nearest', nframes=3)\n", (8380, 8436), False, 'from tests.tools import animation_compare\n'), ((8990, 9060), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Pcolormesh_auto"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Pcolormesh_auto', nframes=3)\n", (9007, 9060), False, 'from tests.tools import animation_compare\n'), ((9621, 9694), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Pcolormesh_gouraud"""', 'nframes': '(1)'}), "(baseline_images='Blocks/Pcolormesh_gouraud', nframes=1)\n", (9638, 9694), False, 'from tests.tools import animation_compare\n'), ((10054, 10115), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Imshow"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Imshow', nframes=3)\n", (10071, 10115), False, 'from tests.tools import animation_compare\n'), ((10369, 10430), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Quiver"""', 'nframes': '(4)'}), "(baseline_images='Blocks/Quiver', nframes=4)\n", (10386, 10430), False, 'from tests.tools import animation_compare\n'), ((10734, 10793), 'tests.tools.animation_compare', 'animation_compare', ([], {'baseline_images': '"""Blocks/Nuke"""', 'nframes': '(3)'}), "(baseline_images='Blocks/Nuke', nframes=3)\n", (10751, 10793), False, 'from tests.tools import animation_compare\n'), ((421, 434), 'animatplot.blocks.Title', 'Title', (['labels'], {}), '(labels)\n', (426, 434), False, 'from animatplot.blocks import Block, Title\n'), ((1870, 1905), 'animatplot.blocks.Title', 'Title', (['"""timestep {num}"""'], {'num': '[1, 2]'}), "('timestep {num}', num=[1, 2])\n", (1875, 1905), False, 'from animatplot.blocks import Block, Title\n'), ((1920, 1929), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2077), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2070, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2185, 2232), 'animatplot.blocks.Title', 'Title', (['"""timestep {num}"""'], {'num': '[1, 2]'}), "('timestep {num}', num=[1, 2], **expected)\n", (2190, 2232), False, 'from animatplot.blocks import Block, Title\n'), ((2354, 2376), 'numpy.testing.assert_equal', 'npt.assert_equal', (['x', 'y'], {}), '(x, y)\n', (2370, 2376), True, 'import numpy.testing as npt\n'), ((2442, 2463), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (2453, 2463), True, 'import numpy as np\n'), ((2476, 2496), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (2487, 2496), True, 'import numpy as np\n'), ((2522, 2539), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (2533, 2539), True, 'import numpy as np\n'), ((2557, 2594), 'numpy.sin', 'np.sin', (['(2 * np.pi * (x_grid + t_grid))'], {}), '(2 * np.pi * (x_grid + t_grid))\n', (2563, 2594), True, 'import numpy as np\n'), ((2617, 2648), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x_grid', 'y_data'], {}), '(x_grid, y_data)\n', (2632, 2648), True, 'import animatplot as amp\n'), ((2713, 2751), 'numpy.testing.assert_equal', 'npt.assert_equal', (['line_block.x', 'x_grid'], {}), '(line_block.x, x_grid)\n', (2729, 2751), True, 'import numpy.testing as npt\n'), ((2760, 2798), 'numpy.testing.assert_equal', 'npt.assert_equal', (['line_block.y', 'y_data'], {}), '(line_block.y, y_data)\n', (2776, 2798), True, 'import numpy.testing as npt\n'), ((2960, 2986), 'numpy.testing.assert_equal', 'npt.assert_equal', (['xdata', 'x'], {}), '(xdata, x)\n', (2976, 2986), True, 'import numpy.testing as npt\n'), ((2995, 3032), 'numpy.testing.assert_equal', 'npt.assert_equal', (['ydata', 'y_data[0, :]'], {}), '(ydata, y_data[0, :])\n', (3011, 3032), True, 'import numpy.testing as npt\n'), ((3073, 3094), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (3084, 3094), True, 'import numpy as np\n'), ((3107, 3127), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (3118, 3127), True, 'import numpy as np\n'), ((3153, 3170), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (3164, 3170), True, 'import numpy as np\n'), ((3188, 3225), 'numpy.sin', 'np.sin', (['(2 * np.pi * (x_grid + t_grid))'], {}), '(2 * np.pi * (x_grid + t_grid))\n', (3194, 3225), True, 'import numpy as np\n'), ((3248, 3279), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x_grid', 'y_data'], {}), '(x_grid, y_data)\n', (3263, 3279), True, 'import animatplot as amp\n'), ((3486, 3507), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (3497, 3507), True, 'import numpy as np\n'), ((3520, 3540), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (3531, 3540), True, 'import numpy as np\n'), ((3566, 3583), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (3577, 3583), True, 'import numpy as np\n'), ((3601, 3638), 'numpy.sin', 'np.sin', (['(2 * np.pi * (x_grid + t_grid))'], {}), '(2 * np.pi * (x_grid + t_grid))\n', (3607, 3638), True, 'import numpy as np\n'), ((3661, 3687), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x', 'y_data'], {}), '(x, y_data)\n', (3676, 3687), True, 'import animatplot as amp\n'), ((3754, 3791), 'numpy.testing.assert_equal', 'npt.assert_equal', (['line_block.x[-1]', 'x'], {}), '(line_block.x[-1], x)\n', (3770, 3791), True, 'import numpy.testing as npt\n'), ((3836, 3857), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (3847, 3857), True, 'import numpy as np\n'), ((3870, 3890), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (3881, 3890), True, 'import numpy as np\n'), ((3916, 3933), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (3927, 3933), True, 'import numpy as np\n'), ((3951, 3988), 'numpy.sin', 'np.sin', (['(2 * np.pi * (x_grid + t_grid))'], {}), '(2 * np.pi * (x_grid + t_grid))\n', (3957, 3988), True, 'import numpy as np\n'), ((4011, 4034), 'animatplot.blocks.Line', 'amp.blocks.Line', (['y_data'], {}), '(y_data)\n', (4026, 4034), True, 'import animatplot as amp\n'), ((4057, 4070), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4066, 4070), True, 'import numpy as np\n'), ((4310, 4341), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (4325, 4341), True, 'import animatplot as amp\n'), ((4833, 4864), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (4848, 4864), True, 'import animatplot as amp\n'), ((7071, 7100), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(20)'], {}), '(0, 2 * np.pi, 20)\n', (7082, 7100), True, 'import numpy as np\n'), ((7111, 7139), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(5)'], {}), '(0, 2 * np.pi, 5)\n', (7122, 7139), True, 'import numpy as np\n'), ((7154, 7171), 'numpy.meshgrid', 'np.meshgrid', (['x', 't'], {}), '(x, t)\n', (7165, 7171), True, 'import numpy as np\n'), ((7184, 7197), 'numpy.sin', 'np.sin', (['(X + T)'], {}), '(X + T)\n', (7190, 7197), True, 'import numpy as np\n'), ((7212, 7233), 'animatplot.blocks.Line', 'amp.blocks.Line', (['X', 'Y'], {}), '(X, Y)\n', (7227, 7233), True, 'import animatplot as amp\n'), ((7249, 7271), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (7262, 7271), True, 'import animatplot as amp\n'), ((7387, 7426), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(100)'], {}), '(-2 * np.pi, 2 * np.pi, 100)\n', (7398, 7426), True, 'import numpy as np\n'), ((7435, 7463), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3)'], {}), '(0, 2 * np.pi, 3)\n', (7446, 7463), True, 'import numpy as np\n'), ((7481, 7501), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 't'], {}), '(x, x, t)\n', (7492, 7501), True, 'import numpy as np\n'), ((7514, 7541), 'numpy.sin', 'np.sin', (['(X ** 2 + Y ** 2 - T)'], {}), '(X ** 2 + Y ** 2 - T)\n', (7520, 7541), True, 'import numpy as np\n'), ((7551, 7609), 'animatplot.blocks.Pcolormesh', 'amp.blocks.Pcolormesh', (['X[:, :, 0]', 'Y[:, :, 0]', 'Z'], {'t_axis': '(2)'}), '(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)\n', (7572, 7609), True, 'import animatplot as amp\n'), ((7625, 7647), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (7638, 7647), True, 'import animatplot as amp\n'), ((7890, 7928), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(10)'], {}), '(-2 * np.pi, 2 * np.pi, 10)\n', (7901, 7928), True, 'import numpy as np\n'), ((7937, 7965), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3)'], {}), '(0, 2 * np.pi, 3)\n', (7948, 7965), True, 'import numpy as np\n'), ((7983, 8003), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 't'], {}), '(x, x, t)\n', (7994, 8003), True, 'import numpy as np\n'), ((8066, 8124), 'animatplot.blocks.Pcolormesh', 'amp.blocks.Pcolormesh', (['X[:, :, 0]', 'Y[:, :, 0]', 'Z'], {'t_axis': '(2)'}), '(X[:, :, 0], Y[:, :, 0], Z, t_axis=2)\n', (8087, 8124), True, 'import animatplot as amp\n'), ((8140, 8162), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (8153, 8162), True, 'import animatplot as amp\n'), ((8488, 8527), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(100)'], {}), '(-2 * np.pi, 2 * np.pi, 100)\n', (8499, 8527), True, 'import numpy as np\n'), ((8536, 8564), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3)'], {}), '(0, 2 * np.pi, 3)\n', (8547, 8564), True, 'import numpy as np\n'), ((8582, 8602), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 't'], {}), '(x, x, t)\n', (8593, 8602), True, 'import numpy as np\n'), ((8615, 8642), 'numpy.sin', 'np.sin', (['(X ** 2 + Y ** 2 - T)'], {}), '(X ** 2 + Y ** 2 - T)\n', (8621, 8642), True, 'import numpy as np\n'), ((8652, 8729), 'animatplot.blocks.Pcolormesh', 'amp.blocks.Pcolormesh', (['X[:, :, 0]', 'Y[:, :, 0]', 'Z'], {'t_axis': '(2)', 'shading': '"""nearest"""'}), "(X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading='nearest')\n", (8673, 8729), True, 'import animatplot as amp\n'), ((8767, 8789), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (8780, 8789), True, 'import animatplot as amp\n'), ((9112, 9150), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(10)'], {}), '(-2 * np.pi, 2 * np.pi, 10)\n', (9123, 9150), True, 'import numpy as np\n'), ((9159, 9187), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(3)'], {}), '(0, 2 * np.pi, 3)\n', (9170, 9187), True, 'import numpy as np\n'), ((9205, 9225), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 't'], {}), '(x, x, t)\n', (9216, 9225), True, 'import numpy as np\n'), ((9238, 9265), 'numpy.sin', 'np.sin', (['(X ** 2 + Y ** 2 - T)'], {}), '(X ** 2 + Y ** 2 - T)\n', (9244, 9265), True, 'import numpy as np\n'), ((9275, 9349), 'animatplot.blocks.Pcolormesh', 'amp.blocks.Pcolormesh', (['X[:, :, 0]', 'Y[:, :, 0]', 'Z'], {'t_axis': '(2)', 'shading': '"""auto"""'}), "(X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading='auto')\n", (9296, 9349), True, 'import animatplot as amp\n'), ((9387, 9409), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (9400, 9409), True, 'import animatplot as amp\n'), ((9746, 9785), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)', '(100)'], {}), '(-2 * np.pi, 2 * np.pi, 100)\n', (9757, 9785), True, 'import numpy as np\n'), ((9794, 9822), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1)'], {}), '(0, 2 * np.pi, 1)\n', (9805, 9822), True, 'import numpy as np\n'), ((9840, 9860), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x', 't'], {}), '(x, x, t)\n', (9851, 9860), True, 'import numpy as np\n'), ((9873, 9900), 'numpy.sin', 'np.sin', (['(X ** 2 + Y ** 2 - T)'], {}), '(X ** 2 + Y ** 2 - T)\n', (9879, 9900), True, 'import numpy as np\n'), ((9910, 9987), 'animatplot.blocks.Pcolormesh', 'amp.blocks.Pcolormesh', (['X[:, :, 0]', 'Y[:, :, 0]', 'Z'], {'t_axis': '(2)', 'shading': '"""gouraud"""'}), "(X[:, :, 0], Y[:, :, 0], Z, t_axis=2, shading='gouraud')\n", (9931, 9987), True, 'import animatplot as amp\n'), ((10025, 10047), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (10038, 10047), True, 'import animatplot as amp\n'), ((10155, 10176), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (10166, 10176), True, 'import numpy as np\n'), ((10192, 10209), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (10203, 10209), True, 'import numpy as np\n'), ((10304, 10324), 'animatplot.blocks.Imshow', 'amp.blocks.Imshow', (['U'], {}), '(U)\n', (10321, 10324), True, 'import animatplot as amp\n'), ((10340, 10362), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (10353, 10362), True, 'import animatplot as amp\n'), ((10470, 10491), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (10481, 10491), True, 'import numpy as np\n'), ((10507, 10524), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (10518, 10524), True, 'import numpy as np\n'), ((10660, 10689), 'animatplot.blocks.Quiver', 'amp.blocks.Quiver', (['X', 'Y', 'U', 'V'], {}), '(X, Y, U, V)\n', (10677, 10689), True, 'import animatplot as amp\n'), ((10705, 10727), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (10718, 10727), True, 'import animatplot as amp\n'), ((10832, 10841), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10839, 10841), True, 'import matplotlib.pyplot as plt\n'), ((10993, 11034), 'animatplot.blocks.Nuke', 'amp.blocks.Nuke', (['animate'], {'length': '(3)', 'ax': 'ax'}), '(animate, length=3, ax=ax)\n', (11008, 11034), True, 'import animatplot as amp\n'), ((11050, 11072), 'animatplot.Animation', 'amp.Animation', (['[block]'], {}), '([block])\n', (11063, 11072), True, 'import animatplot as amp\n'), ((554, 578), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (567, 578), False, 'import pytest\n'), ((592, 600), 'animatplot.blocks.Title', 'Title', (['(0)'], {}), '(0)\n', (597, 600), False, 'from animatplot.blocks import Block, Title\n'), ((614, 638), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (627, 638), False, 'import pytest\n'), ((652, 665), 'animatplot.blocks.Title', 'Title', (['[6, 7]'], {}), '([6, 7])\n', (657, 665), False, 'from animatplot.blocks import Block, Title\n'), ((715, 750), 'animatplot.blocks.Title', 'Title', (['"""timestep {num}"""'], {'num': '[1, 2]'}), "('timestep {num}', num=[1, 2])\n", (720, 750), False, 'from animatplot.blocks import Block, Title\n'), ((830, 862), 'animatplot.blocks.Title', 'Title', (['"""timestep {num}"""'], {'num': '[1]'}), "('timestep {num}', num=[1])\n", (835, 862), False, 'from animatplot.blocks import Block, Title\n'), ((964, 977), 'animatplot.blocks.Title', 'Title', (['"""Name"""'], {}), "('Name')\n", (969, 977), False, 'from animatplot.blocks import Block, Title\n'), ((1079, 1144), 'animatplot.blocks.Title', 'Title', (['"""timestep {num}, max density {n}"""'], {'num': '[1, 2]', 'n': '[500, 10]'}), "('timestep {num}, max density {n}', num=[1, 2], n=[500, 10])\n", (1084, 1144), False, 'from animatplot.blocks import Block, Title\n'), ((1416, 1467), 'animatplot.blocks.Title', 'Title', (['"""timestep {values:.2f}"""'], {'values': '[50000000.0]'}), "('timestep {values:.2f}', values=[50000000.0])\n", (1421, 1467), False, 'from animatplot.blocks import Block, Title\n'), ((4187, 4206), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4195, 4206), True, 'import numpy as np\n'), ((4208, 4227), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4216, 4227), True, 'import numpy as np\n'), ((4247, 4266), 'numpy.array', 'np.array', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (4255, 4266), True, 'import numpy as np\n'), ((4268, 4287), 'numpy.array', 'np.array', (['[4, 2, 9]'], {}), '([4, 2, 9])\n', (4276, 4287), True, 'import numpy as np\n'), ((4381, 4413), 'numpy.array', 'np.array', (['[[5, 6, 7], [4, 2, 9]]'], {}), '([[5, 6, 7], [4, 2, 9]])\n', (4389, 4413), True, 'import numpy as np\n'), ((4454, 4486), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 3]]'], {}), '([[1, 2, 3], [1, 2, 3]])\n', (4462, 4486), True, 'import numpy as np\n'), ((4545, 4564), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4553, 4564), True, 'import numpy as np\n'), ((4566, 4588), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (4574, 4588), True, 'import numpy as np\n'), ((4608, 4627), 'numpy.array', 'np.array', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (4616, 4627), True, 'import numpy as np\n'), ((4629, 4652), 'numpy.array', 'np.array', (['[4, 2, 9, 10]'], {}), '([4, 2, 9, 10])\n', (4637, 4652), True, 'import numpy as np\n'), ((4668, 4693), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4681, 4693), False, 'import pytest\n'), ((4727, 4750), 'animatplot.blocks.Line', 'amp.blocks.Line', (['y_data'], {}), '(y_data)\n', (4742, 4750), True, 'import animatplot as amp\n'), ((4915, 4931), 'numpy.array', 'np.array', (['x_data'], {}), '(x_data)\n', (4923, 4931), True, 'import numpy as np\n'), ((4982, 4998), 'numpy.array', 'np.array', (['y_data'], {}), '(y_data)\n', (4990, 4998), True, 'import numpy as np\n'), ((5208, 5233), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5221, 5233), False, 'import pytest\n'), ((5267, 5298), 'animatplot.blocks.Line', 'amp.blocks.Line', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (5282, 5298), True, 'import animatplot as amp\n'), ((5429, 5454), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5442, 5454), False, 'import pytest\n'), ((5475, 5499), 'animatplot.blocks.Line', 'amp.blocks.Line', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (5490, 5499), True, 'import animatplot as amp\n'), ((5571, 5596), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5584, 5596), False, 'import pytest\n'), ((5617, 5634), 'animatplot.blocks.Line', 'amp.blocks.Line', ([], {}), '()\n', (5632, 5634), True, 'import animatplot as amp\n'), ((5727, 5752), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5740, 5752), False, 'import pytest\n'), ((5876, 5901), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5889, 5901), False, 'import pytest\n'), ((5922, 5943), 'animatplot.blocks.Line', 'amp.blocks.Line', (['None'], {}), '(None)\n', (5937, 5943), True, 'import animatplot as amp\n'), ((6036, 6061), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6049, 6061), False, 'import pytest\n'), ((6247, 6272), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6260, 6272), False, 'import pytest\n'), ((6461, 6486), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6474, 6486), False, 'import pytest\n'), ((6813, 6834), 'numpy.random.randn', 'np.random.randn', (['(2)', '(5)'], {}), '(2, 5)\n', (6828, 6834), True, 'import numpy as np\n'), ((8016, 8043), 'numpy.sin', 'np.sin', (['(X ** 2 + Y ** 2 - T)'], {}), '(X ** 2 + Y ** 2 - T)\n', (8022, 8043), True, 'import numpy as np\n'), ((5070, 5089), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5078, 5089), True, 'import numpy as np\n'), ((5091, 5113), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5099, 5113), True, 'import numpy as np\n'), ((5143, 5162), 'numpy.array', 'np.array', (['[5, 6, 7]'], {}), '([5, 6, 7])\n', (5151, 5162), True, 'import numpy as np\n'), ((5164, 5191), 'numpy.array', 'np.array', (['[4, 2, 9, 10, 11]'], {}), '([4, 2, 9, 10, 11])\n', (5172, 5191), True, 'import numpy as np\n'), ((5789, 5801), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (5798, 5801), True, 'import numpy as np\n'), ((6098, 6110), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6107, 6110), True, 'import numpy as np\n'), ((6112, 6136), 'numpy.random.randn', 'np.random.randn', (['(5)', '(2)', '(2)'], {}), '(5, 2, 2)\n', (6127, 6136), True, 'import numpy as np\n'), ((6309, 6321), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6318, 6321), True, 'import numpy as np\n'), ((6323, 6344), 'numpy.random.randn', 'np.random.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (6338, 6344), True, 'import numpy as np\n'), ((6581, 6602), 'numpy.random.randn', 'np.random.randn', (['(4)', '(2)'], {}), '(4, 2)\n', (6596, 6602), True, 'import numpy as np\n'), ((6744, 6756), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6753, 6756), True, 'import numpy as np\n'), ((6758, 6770), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6767, 6770), True, 'import numpy as np\n'), ((1608, 1624), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1616, 1624), True, 'import numpy as np\n'), ((6521, 6533), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6530, 6533), True, 'import numpy as np\n'), ((6535, 6547), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (6544, 6547), True, 'import numpy as np\n')] |
# This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from affine import Affine
from odc.geo import CRS, geom
from odc.geo.geobox import (
GeoBox,
bounding_box_in_pixel_domain,
gbox_boundary,
geobox_intersection_conservative,
geobox_union_conservative,
scaled_down_geobox,
)
from odc.geo.math import apply_affine
from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm
# pylint: disable=pointless-statement,too-many-statements
def test_geobox_simple():
t = GeoBox(4000, 4000, Affine(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0), epsg4326)
expect_lon = np.asarray(
[
151.000125,
151.000375,
151.000625,
151.000875,
151.001125,
151.001375,
151.001625,
151.001875,
151.002125,
151.002375,
]
)
expect_lat = np.asarray(
[
-29.000125,
-29.000375,
-29.000625,
-29.000875,
-29.001125,
-29.001375,
-29.001625,
-29.001875,
-29.002125,
-29.002375,
]
)
expect_resolution = np.asarray([-0.00025, 0.00025])
assert t.coordinates["latitude"].values.shape == (4000,)
assert t.coordinates["longitude"].values.shape == (4000,)
np.testing.assert_almost_equal(t.resolution, expect_resolution)
np.testing.assert_almost_equal(t.coords["latitude"].values[:10], expect_lat)
np.testing.assert_almost_equal(t.coords["longitude"].values[:10], expect_lon)
assert (t == "some random thing") is False
# ensure GeoBox accepts string CRS
assert isinstance(
GeoBox(
4000, 4000, Affine(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0), "epsg:4326"
).crs,
CRS,
)
# Check GeoBox class is hashable
t_copy = GeoBox(t.width, t.height, t.transform, t.crs)
t_other = GeoBox(t.width + 1, t.height, t.transform, t.crs)
assert t_copy is not t
assert t == t_copy
assert len({t, t, t_copy}) == 1
assert len({t, t_copy, t_other}) == 2
def test_xy_from_geobox():
gbox = GeoBox(3, 7, Affine.translation(10, 1000), epsg3857)
xx, yy = xy_from_gbox(gbox)
assert xx.shape == gbox.shape
assert yy.shape == gbox.shape
assert (xx[:, 0] == 10.5).all()
assert (xx[:, 1] == 11.5).all()
assert (yy[0, :] == 1000.5).all()
assert (yy[6, :] == 1006.5).all()
xx_, yy_, A = xy_norm(xx, yy)
assert xx_.shape == xx.shape
assert yy_.shape == yy.shape
np.testing.assert_almost_equal((xx_.min(), xx_.max()), (0, 1))
np.testing.assert_almost_equal((yy_.min(), yy_.max()), (0, 1))
assert (xx_[0] - xx_[1]).sum() != 0
assert (xx_[:, 0] - xx_[:, 1]).sum() != 0
XX, YY = apply_affine(A, xx_, yy_)
np.testing.assert_array_almost_equal(xx, XX)
np.testing.assert_array_almost_equal(yy, YY)
def test_geobox():
points_list = [
[
(148.2697, -35.20111),
(149.31254, -35.20111),
(149.31254, -36.331431),
(148.2697, -36.331431),
],
[
(148.2697, 35.20111),
(149.31254, 35.20111),
(149.31254, 36.331431),
(148.2697, 36.331431),
],
[
(-148.2697, 35.20111),
(-149.31254, 35.20111),
(-149.31254, 36.331431),
(-148.2697, 36.331431),
],
[
(-148.2697, -35.20111),
(-149.31254, -35.20111),
(-149.31254, -36.331431),
(-148.2697, -36.331431),
(148.2697, -35.20111),
],
]
for points in points_list:
polygon = geom.polygon(points, crs=epsg3577)
resolution = (-25, 25)
geobox = GeoBox.from_geopolygon(polygon, resolution)
# check single value resolution equivalence
assert GeoBox.from_geopolygon(polygon, 25) == geobox
assert GeoBox.from_geopolygon(polygon, 25.0) == geobox
assert GeoBox.from_geopolygon(polygon, resolution, crs=geobox.crs) == geobox
assert abs(resolution[0]) > abs(
geobox.extent.boundingbox.left - polygon.boundingbox.left
)
assert abs(resolution[0]) > abs(
geobox.extent.boundingbox.right - polygon.boundingbox.right
)
assert abs(resolution[1]) > abs(
geobox.extent.boundingbox.top - polygon.boundingbox.top
)
assert abs(resolution[1]) > abs(
geobox.extent.boundingbox.bottom - polygon.boundingbox.bottom
)
A = mkA(0, scale=(10, -10), translation=(-48800, -2983006))
w, h = 512, 256
gbox = GeoBox(w, h, A, epsg3577)
assert gbox.shape == (h, w)
assert gbox.transform == A
assert gbox.extent.crs == gbox.crs
assert gbox.geographic_extent.crs == epsg4326
assert gbox.extent.boundingbox.height == h * 10.0
assert gbox.extent.boundingbox.width == w * 10.0
assert gbox.alignment == (4, 0) # 4 because -2983006 % 10 is 4
assert isinstance(str(gbox), str)
assert "EPSG:3577" in repr(gbox)
assert GeoBox(1, 1, mkA(0), epsg4326).geographic_extent.crs == epsg4326
assert GeoBox(1, 1, mkA(0), None).dimensions == ("y", "x")
g2 = gbox[:-10, :-20]
assert g2.shape == (gbox.height - 10, gbox.width - 20)
# step of 1 is ok
g2 = gbox[::1, ::1]
assert g2.shape == gbox.shape
assert gbox[0].shape == (1, gbox.width)
assert gbox[:3].shape == (3, gbox.width)
with pytest.raises(NotImplementedError):
gbox[::2, :]
# too many slices
with pytest.raises(ValueError):
gbox[:1, :1, :]
assert gbox.buffered(0, 10).shape == (gbox.height + 2 * 1, gbox.width)
assert gbox.buffered(10).shape == (gbox.height + 2 * 1, gbox.width + 2 * 1)
assert gbox.buffered(20, 30).shape == (gbox.height + 2 * 3, gbox.width + 2 * 2)
assert (gbox | gbox) == gbox
assert (gbox & gbox) == gbox
assert gbox.is_empty() is False
assert bool(gbox) is True
assert (gbox[:3, :4] & gbox[3:, 4:]).is_empty()
assert (gbox[:3, :4] & gbox[30:, 40:]).is_empty()
with pytest.raises(ValueError):
geobox_intersection_conservative([])
with pytest.raises(ValueError):
geobox_union_conservative([])
# can not combine across CRSs
with pytest.raises(ValueError):
bounding_box_in_pixel_domain(
GeoBox(1, 1, mkA(0), epsg4326), GeoBox(2, 3, mkA(0), epsg3577)
)
def test_gbox_boundary():
xx = np.zeros((2, 6))
bb = gbox_boundary(xx, 3)
assert bb.shape == (4 + (3 - 2) * 4, 2)
assert set(bb.T[0]) == {0.0, 3.0, 6.0}
assert set(bb.T[1]) == {0.0, 1.0, 2.0}
def test_geobox_scale_down():
crs = CRS("EPSG:3857")
A = mkA(0, (111.2, 111.2), translation=(125671, 251465))
for s in [2, 3, 4, 8, 13, 16]:
gbox = GeoBox(233 * s, 755 * s, A, crs)
gbox_ = scaled_down_geobox(gbox, s)
assert gbox_.width == 233
assert gbox_.height == 755
assert gbox_.crs is crs
assert gbox_.extent.contains(gbox.extent)
assert gbox.extent.difference(gbox.extent).area == 0.0
gbox = GeoBox(1, 1, A, crs)
for s in [2, 3, 5]:
gbox_ = scaled_down_geobox(gbox, 3)
assert gbox_.shape == (1, 1)
assert gbox_.crs is crs
assert gbox_.extent.contains(gbox.extent)
| [
"odc.geo.geobox.geobox_intersection_conservative",
"odc.geo.CRS",
"odc.geo.geobox.GeoBox.from_geopolygon",
"affine.Affine",
"numpy.testing.assert_array_almost_equal",
"numpy.asarray",
"numpy.testing.assert_almost_equal",
"odc.geo.testutils.xy_norm",
"affine.Affine.translation",
"odc.geo.geobox.gbo... | [((774, 911), 'numpy.asarray', 'np.asarray', (['[151.000125, 151.000375, 151.000625, 151.000875, 151.001125, 151.001375, \n 151.001625, 151.001875, 151.002125, 151.002375]'], {}), '([151.000125, 151.000375, 151.000625, 151.000875, 151.001125, \n 151.001375, 151.001625, 151.001875, 151.002125, 151.002375])\n', (784, 911), True, 'import numpy as np\n'), ((1070, 1207), 'numpy.asarray', 'np.asarray', (['[-29.000125, -29.000375, -29.000625, -29.000875, -29.001125, -29.001375, -\n 29.001625, -29.001875, -29.002125, -29.002375]'], {}), '([-29.000125, -29.000375, -29.000625, -29.000875, -29.001125, -\n 29.001375, -29.001625, -29.001875, -29.002125, -29.002375])\n', (1080, 1207), True, 'import numpy as np\n'), ((1372, 1403), 'numpy.asarray', 'np.asarray', (['[-0.00025, 0.00025]'], {}), '([-0.00025, 0.00025])\n', (1382, 1403), True, 'import numpy as np\n'), ((1533, 1596), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['t.resolution', 'expect_resolution'], {}), '(t.resolution, expect_resolution)\n', (1563, 1596), True, 'import numpy as np\n'), ((1601, 1677), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["t.coords['latitude'].values[:10]", 'expect_lat'], {}), "(t.coords['latitude'].values[:10], expect_lat)\n", (1631, 1677), True, 'import numpy as np\n'), ((1682, 1759), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["t.coords['longitude'].values[:10]", 'expect_lon'], {}), "(t.coords['longitude'].values[:10], expect_lon)\n", (1712, 1759), True, 'import numpy as np\n'), ((2059, 2104), 'odc.geo.geobox.GeoBox', 'GeoBox', (['t.width', 't.height', 't.transform', 't.crs'], {}), '(t.width, t.height, t.transform, t.crs)\n', (2065, 2104), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((2119, 2168), 'odc.geo.geobox.GeoBox', 'GeoBox', (['(t.width + 1)', 't.height', 't.transform', 't.crs'], {}), '(t.width + 1, t.height, t.transform, t.crs)\n', (2125, 2168), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((2403, 2421), 'odc.geo.testutils.xy_from_gbox', 'xy_from_gbox', (['gbox'], {}), '(gbox)\n', (2415, 2421), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((2658, 2673), 'odc.geo.testutils.xy_norm', 'xy_norm', (['xx', 'yy'], {}), '(xx, yy)\n', (2665, 2673), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((2974, 2999), 'odc.geo.math.apply_affine', 'apply_affine', (['A', 'xx_', 'yy_'], {}), '(A, xx_, yy_)\n', (2986, 2999), False, 'from odc.geo.math import apply_affine\n'), ((3004, 3048), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['xx', 'XX'], {}), '(xx, XX)\n', (3040, 3048), True, 'import numpy as np\n'), ((3053, 3097), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['yy', 'YY'], {}), '(yy, YY)\n', (3089, 3097), True, 'import numpy as np\n'), ((4777, 4832), 'odc.geo.testutils.mkA', 'mkA', (['(0)'], {'scale': '(10, -10)', 'translation': '(-48800, -2983006)'}), '(0, scale=(10, -10), translation=(-48800, -2983006))\n', (4780, 4832), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((4865, 4890), 'odc.geo.geobox.GeoBox', 'GeoBox', (['w', 'h', 'A', 'epsg3577'], {}), '(w, h, A, epsg3577)\n', (4871, 4890), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((6709, 6725), 'numpy.zeros', 'np.zeros', (['(2, 6)'], {}), '((2, 6))\n', (6717, 6725), True, 'import numpy as np\n'), ((6736, 6756), 'odc.geo.geobox.gbox_boundary', 'gbox_boundary', (['xx', '(3)'], {}), '(xx, 3)\n', (6749, 6756), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((6931, 6947), 'odc.geo.CRS', 'CRS', (['"""EPSG:3857"""'], {}), "('EPSG:3857')\n", (6934, 6947), False, 'from odc.geo import CRS, geom\n'), ((6957, 7009), 'odc.geo.testutils.mkA', 'mkA', (['(0)', '(111.2, 111.2)'], {'translation': '(125671, 251465)'}), '(0, (111.2, 111.2), translation=(125671, 251465))\n', (6960, 7009), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((7364, 7384), 'odc.geo.geobox.GeoBox', 'GeoBox', (['(1)', '(1)', 'A', 'crs'], {}), '(1, 1, A, crs)\n', (7370, 7384), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((695, 744), 'affine.Affine', 'Affine', (['(0.00025)', '(0.0)', '(151.0)', '(0.0)', '(-0.00025)', '(-29.0)'], {}), '(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0)\n', (701, 744), False, 'from affine import Affine\n'), ((2350, 2378), 'affine.Affine.translation', 'Affine.translation', (['(10)', '(1000)'], {}), '(10, 1000)\n', (2368, 2378), False, 'from affine import Affine\n'), ((3889, 3923), 'odc.geo.geom.polygon', 'geom.polygon', (['points'], {'crs': 'epsg3577'}), '(points, crs=epsg3577)\n', (3901, 3923), False, 'from odc.geo import CRS, geom\n'), ((3972, 4015), 'odc.geo.geobox.GeoBox.from_geopolygon', 'GeoBox.from_geopolygon', (['polygon', 'resolution'], {}), '(polygon, resolution)\n', (3994, 4015), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((5701, 5735), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (5714, 5735), False, 'import pytest\n'), ((5790, 5815), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5803, 5815), False, 'import pytest\n'), ((6331, 6356), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6344, 6356), False, 'import pytest\n'), ((6366, 6402), 'odc.geo.geobox.geobox_intersection_conservative', 'geobox_intersection_conservative', (['[]'], {}), '([])\n', (6398, 6402), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((6413, 6438), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6426, 6438), False, 'import pytest\n'), ((6448, 6477), 'odc.geo.geobox.geobox_union_conservative', 'geobox_union_conservative', (['[]'], {}), '([])\n', (6473, 6477), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((6522, 6547), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6535, 6547), False, 'import pytest\n'), ((7060, 7092), 'odc.geo.geobox.GeoBox', 'GeoBox', (['(233 * s)', '(755 * s)', 'A', 'crs'], {}), '(233 * s, 755 * s, A, crs)\n', (7066, 7092), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((7109, 7136), 'odc.geo.geobox.scaled_down_geobox', 'scaled_down_geobox', (['gbox', 's'], {}), '(gbox, s)\n', (7127, 7136), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((7425, 7452), 'odc.geo.geobox.scaled_down_geobox', 'scaled_down_geobox', (['gbox', '(3)'], {}), '(gbox, 3)\n', (7443, 7452), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((4084, 4119), 'odc.geo.geobox.GeoBox.from_geopolygon', 'GeoBox.from_geopolygon', (['polygon', '(25)'], {}), '(polygon, 25)\n', (4106, 4119), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((4145, 4182), 'odc.geo.geobox.GeoBox.from_geopolygon', 'GeoBox.from_geopolygon', (['polygon', '(25.0)'], {}), '(polygon, 25.0)\n', (4167, 4182), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((4209, 4268), 'odc.geo.geobox.GeoBox.from_geopolygon', 'GeoBox.from_geopolygon', (['polygon', 'resolution'], {'crs': 'geobox.crs'}), '(polygon, resolution, crs=geobox.crs)\n', (4231, 4268), False, 'from odc.geo.geobox import GeoBox, bounding_box_in_pixel_domain, gbox_boundary, geobox_intersection_conservative, geobox_union_conservative, scaled_down_geobox\n'), ((1911, 1960), 'affine.Affine', 'Affine', (['(0.00025)', '(0.0)', '(151.0)', '(0.0)', '(-0.00025)', '(-29.0)'], {}), '(0.00025, 0.0, 151.0, 0.0, -0.00025, -29.0)\n', (1917, 1960), False, 'from affine import Affine\n'), ((5395, 5401), 'odc.geo.testutils.mkA', 'mkA', (['(0)'], {}), '(0)\n', (5398, 5401), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((6612, 6618), 'odc.geo.testutils.mkA', 'mkA', (['(0)'], {}), '(0)\n', (6615, 6618), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((6644, 6650), 'odc.geo.testutils.mkA', 'mkA', (['(0)'], {}), '(0)\n', (6647, 6650), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n'), ((5319, 5325), 'odc.geo.testutils.mkA', 'mkA', (['(0)'], {}), '(0)\n', (5322, 5325), False, 'from odc.geo.testutils import epsg3577, epsg3857, epsg4326, mkA, xy_from_gbox, xy_norm\n')] |
import cv2
import os
import os.path as osp
import sys
from multiprocessing import Pool
import numpy as np
import glob
try:
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from utils.util import ProgressBar
except ImportError:
pass
def main():
train_or_test = 'train'
qp = 37
channal_num=3
if train_or_test == 'train':
if channal_num == 3:
read_lq_file = '/home/x/data/pycharm/MW-GAN/data/mfqe/qp'+str(qp) + '/'
read_gt_file = '/home/x/data/pycharm/MW-GAN/data/mfqe/raw/'
elif channal_num == 1:
read_lq_file = '/media/iceclear/iceking/YUV_f_img_crop_'+str(qp)+'_yuv/'
read_gt_file = '/media/iceclear/iceking/YUV_GT_img_crop_yuv/'
elif train_or_test == 'test':
read_lq_file = '/media/iceclear/iceking/YUV_f_img_crop_'+str(qp)+'_test/'
read_gt_file = '/media/iceclear/iceking/YUV_GT_img_crop_test/'
else:
print(s)
info_mode = 'GT' # GT or LQ
if info_mode == 'LQ':
read_list = glob.glob(read_lq_file+'/*')
elif info_mode == 'GT':
read_list = glob.glob(read_gt_file+'/*')
# f = open('../yuvInfo.txt','r')
n_thread = 5
crop_sz = 224
step = 448
thres_sz = 22
compression_level = 0 # 3 is the default value in cv2
# CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
# compression time. If read raw images during training, use 0 for faster IO speed.
print('*********** current mode is: ' + info_mode + ' **************')
"""A multi-thread tool to crop sub imags."""
for c in read_list:
filename = os.path.basename(c)
print('>>>>>>>>>>>>>> '+filename+' is starting')
input_folder = c
# if info_mode == 'LQ':
# input_folder = '/media/iceclear/iceking/YUV_nof_img_crop_37_Yonly/'+filename
# elif info_mode == 'GT':
# input_folder = '/media/iceclear/iceking/YUV_GT_img_crop_37_Yonly/'+filename
if train_or_test == 'train':
if channal_num==3:
save_folder = '/home/x/data/pycharm/MW-GAN/data/mfqe/YUV_'+info_mode+'_imgrgb_sub'+ str(crop_sz) +'_'+str(qp)+'/'+filename
elif channal_num==1:
save_folder = '/home/x/data/pycharm/MW-GAN/data/mfqe/YUV_'+info_mode+'_imgyuv_sub'+ str(crop_sz) +'_'+str(qp)+'/'+filename
elif train_or_test == 'test':
save_folder = '/media/iceclear/iceking/YUV_'+info_mode+'_imgrgb_sub'+ str(crop_sz) +'_'+str(qp)+'_test/'+filename
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{:s}] ...'.format(save_folder))
else:
print('Folder [{:s}] already exists. Exit...'.format(save_folder))
# continue
# shutil.rmtree(save_folder,True)
# os.makedirs(save_folder)
# sys.exit(1)
img_list = []
for root, _, file_list in sorted(os.walk(input_folder)):
path = [os.path.join(root, x) for x in file_list] # assume only images in the input_folder
img_list.extend(path)
def update(arg):
pbar.update(arg)
pbar = ProgressBar(len(img_list))
pool = Pool(n_thread)
for path in img_list:
pool.apply_async(worker,
args=(path, save_folder, crop_sz, step, thres_sz, compression_level),
callback=update)
pool.close()
pool.join()
print(filename + 'is done.')
print('All subprocesses done.')
def worker(path, save_folder, crop_sz, step, thres_sz, compression_level):
img_name = os.path.basename(path)
img = cv2.imread(path)
n_channels = len(img.shape)
if n_channels == 2:
h, w = img.shape
elif n_channels == 3:
h, w, c = img.shape
else:
raise ValueError('Wrong image shape - {}'.format(n_channels))
h_space = np.arange(0, h - crop_sz + 1, step)
if h - (h_space[-1] + crop_sz) > thres_sz:
h_space = np.append(h_space, h - crop_sz)
w_space = np.arange(0, w - crop_sz + 1, step)
if w - (w_space[-1] + crop_sz) > thres_sz:
w_space = np.append(w_space, w - crop_sz)
index = 0
for x in h_space:
for y in w_space:
index += 1
if n_channels == 2:
crop_img = img[x:x + crop_sz, y:y + crop_sz]
else:
crop_img = img[x:x + crop_sz, y:y + crop_sz, :]
crop_img = np.ascontiguousarray(crop_img)
# var = np.var(crop_img / 255)
# if var > 0.008:
# print(img_name, index_str, var)
if not os.path.exists(os.path.join(save_folder, img_name.replace('.npy', '_s{:03d}.npy'.format(index)))):
cv2.imwrite(
os.path.join(save_folder, img_name.replace('.npy', '_s{:03d}.npy'.format(index))),
crop_img)
return 'Processing {:s} ...'.format(img_name)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"os.makedirs",
"numpy.arange",
"os.walk",
"os.path.join",
"numpy.ascontiguousarray",
"numpy.append",
"multiprocessing.Pool",
"os.path.basename",
"os.path.abspath",
"cv2.imread",
"glob.glob"
] | [((3688, 3710), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3704, 3710), False, 'import os\n'), ((3721, 3737), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3731, 3737), False, 'import cv2\n'), ((3969, 4004), 'numpy.arange', 'np.arange', (['(0)', '(h - crop_sz + 1)', 'step'], {}), '(0, h - crop_sz + 1, step)\n', (3978, 4004), True, 'import numpy as np\n'), ((4116, 4151), 'numpy.arange', 'np.arange', (['(0)', '(w - crop_sz + 1)', 'step'], {}), '(0, w - crop_sz + 1, step)\n', (4125, 4151), True, 'import numpy as np\n'), ((1037, 1067), 'glob.glob', 'glob.glob', (["(read_lq_file + '/*')"], {}), "(read_lq_file + '/*')\n", (1046, 1067), False, 'import glob\n'), ((1656, 1675), 'os.path.basename', 'os.path.basename', (['c'], {}), '(c)\n', (1672, 1675), False, 'import os\n'), ((3255, 3269), 'multiprocessing.Pool', 'Pool', (['n_thread'], {}), '(n_thread)\n', (3259, 3269), False, 'from multiprocessing import Pool\n'), ((4070, 4101), 'numpy.append', 'np.append', (['h_space', '(h - crop_sz)'], {}), '(h_space, h - crop_sz)\n', (4079, 4101), True, 'import numpy as np\n'), ((4217, 4248), 'numpy.append', 'np.append', (['w_space', '(w - crop_sz)'], {}), '(w_space, w - crop_sz)\n', (4226, 4248), True, 'import numpy as np\n'), ((1114, 1144), 'glob.glob', 'glob.glob', (["(read_gt_file + '/*')"], {}), "(read_gt_file + '/*')\n", (1123, 1144), False, 'import glob\n'), ((2564, 2591), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (2578, 2591), False, 'import os\n'), ((2605, 2629), 'os.makedirs', 'os.makedirs', (['save_folder'], {}), '(save_folder)\n', (2616, 2629), False, 'import os\n'), ((2979, 3000), 'os.walk', 'os.walk', (['input_folder'], {}), '(input_folder)\n', (2986, 3000), False, 'import os\n'), ((4533, 4563), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['crop_img'], {}), '(crop_img)\n', (4553, 4563), True, 'import numpy as np\n'), ((170, 191), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (181, 191), True, 'import os.path as osp\n'), ((3023, 3044), 'os.path.join', 'os.path.join', (['root', 'x'], {}), '(root, x)\n', (3035, 3044), False, 'import os\n')] |
import numpy
n=int(input())
a=numpy.array([input().split() for i in range(n)],int)
b=numpy.array([input().split() for i in range(n)],int)
x=numpy.array([],int)
for i in range(n):
for j in range(n):
x=numpy.append(x,numpy.dot(a[i,:],b[:,j]))
x=numpy.reshape(x,(n,n))
print(x) | [
"numpy.array",
"numpy.dot",
"numpy.reshape"
] | [((144, 164), 'numpy.array', 'numpy.array', (['[]', 'int'], {}), '([], int)\n', (155, 164), False, 'import numpy\n'), ((263, 287), 'numpy.reshape', 'numpy.reshape', (['x', '(n, n)'], {}), '(x, (n, n))\n', (276, 287), False, 'import numpy\n'), ((234, 261), 'numpy.dot', 'numpy.dot', (['a[i, :]', 'b[:, j]'], {}), '(a[i, :], b[:, j])\n', (243, 261), False, 'import numpy\n')] |
'''
<NAME> - ERL VIBOT CNRS 6000 - 2019
This code is the python conversion of Ning Li's release matlab code
Please refer below for references
% The code can only be used for research purpose.
% Please cite the following paper when you use it:
% <NAME>, <NAME>, <NAME>, and <NAME>,
% "Demosaicking DoFP images using Newton��s polynomial interpolation and polarization difference model"
% Optics Express 27, 1376-1391 (2019)
%Note:
% The code is not optimized and may have bugs. There are ways to improve the efficiency of the algorithms.
% Your revision and improvement are welcome!
% All the notes in this code correspond to the cases explained in the
% original paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Newton Polynomial Interpolation %
% %
% Copyright (C) 2019 <NAME>. All rights reserved. %
% <EMAIL> %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
import numpy as np
def interpolate(I):
I = np.double(I)
(m, n) = I.shape
R = np.zeros((m, n, 4))
# Labeling different polatiration channels
O = np.zeros((m, n), dtype=int)
step = 2
O[0:m+1:step, 0:n+1:step] = 0
O[0:m+1:step, 1:n+1:step] = 1
O[1:m+1:step, 1:n+1:step] = 2
O[1:m+1:step, 0:n+1:step] = 3
# Store intermediate results
Y1 = I
Y2 = I
#Y1 = np.double(I)
#Y2 = np.double(I)
# for index in range(R.shape[2]):
R[:, :, 0] = I
R[:, :, 1] = I
R[:, :, 2] = I
R[:, :, 3] = I
'''
% Stage one interpolation: interpolate vertically for case Fig.6(b),
% interpolate horizontally for case Fig.6(c), interpolate in diagonal
% directions for case Fig.6(a). The Eqs.(14)-(17) are simplified in this
% code.
'''
for i in range(3, m-4):
for j in range(3, n-4):
R[i, j, O[i, j]] = I[i, j]
R[i, j, O[i, j+1]] = 0.5*I[i, j] + 0.0625*I[i, j-3] - 0.25*I[i, j-2] + \
0.4375*I[i, j-1] + 0.4375*I[i, j+1] - \
0.25*I[i, j+2] + 0.0625*I[i, j+3]
R[i, j, O[i+1, j]] = 0.5*I[i, j] + 0.0625*I[i-3, j] - 0.25*I[i-2, j] + \
0.4375*I[i-1, j] + 0.4375*I[i+1, j] - \
0.25*I[i+2, j] + 0.0625*I[i+3, j]
Y1[i, j] = 0.5*I[i, j] + 0.0625*I[i-3, j-3] - 0.25*I[i-2, j-2] + 0.4375 * \
I[i-1, j-1] + 0.4375*I[i+1, j+1] - \
0.25*I[i+2, j+2] + 0.0625*I[i+3, j+3]
Y2[i, j] = 0.5*I[i, j] + 0.0625*I[i-3, j+3] - 0.25*I[i-2, j+2] + 0.4375 * \
I[i-1, j+1] + 0.4375*I[i+1, j-1] - \
0.25*I[i+2, j-2] + 0.0625*I[i+3, j-3]
# One can adjust for better result
thao = 5.8
# Fusion of the estimations with edge classifier for case Fig.6(a).
for i in range(3, m-4):
for j in range(3, m-4):
pha1 = 0.0
pha2 = 0.0
for k in range(-2, 3, 2):
for l in range(-2, 3, 2):
pha1 = pha1 + abs(Y1[i+k, j+l] - I[i+k, j+l])
pha2 = pha2 + abs(Y2[i+k, j+l] - I[i+k, j+l])
if (pha1 / pha2) > thao:
R[i, j, O[i+1, j+1]] = Y2[i, j]
elif (pha2/pha1) > thao:
R[i, j, O[i+1, j+1]] = Y1[i, j]
elif (((pha1/pha2) < thao) and ((pha2/pha1) < thao)):
d1 = abs(I[i-1, j-1] - I[i+1, j+1]) + \
abs(2*I[i, j] - I[i-2, j-2] - I[i+2, j+2])
d2 = abs(I[i+1, j-1] - I[i-1, j+1]) + \
abs(2*I[i, j] - I[i+2, j-2] - I[i-2, j+2])
epsl = 0.000000000000001
w1 = 1/(d1 + epsl)
w2 = 1/(d2+epsl)
R[i, j, O[i+1, j+1]] = (w1*Y1[i, j] + w2*Y2[i, j])/(w1 + w2)
RR = R
XX1 = I
XX2 = I
YY1 = I
YY2 = I
# Stage two interpolation: interpolate horizontally for case Fig.6(b),
# interpolate vertically for case Fig.6(c).
for i in range(3, m-4):
for j in range(3, n-4):
XX1[i, j] = R[i, j, O[i, j+1]]
XX2[i, j] = 0.5*I[i, j] + 0.0625 * \
R[i-3, j, O[i, j+1]] - 0.25*I[i-2, j]
XX2[i, j] = XX2[i, j] + 0.4375 * \
R[i-1, j, O[i, j+1]] + 0.4375*R[i+1, j, O[i, j+1]]
XX2[i, j] = XX2[i, j] - 0.25*I[i+2, j] + 0.0625*R[i+3, j, O[i, j+1]]
YY1[i, j] = R[i, j, O[i+1, j]]
YY2[i, j] = 0.5*I[i, j] + 0.0625 * \
R[i, j-3, O[i+1, j]] - 0.25*I[i, j-2]
YY2[i, j] = YY2[i, j] + 0.4375 * \
R[i, j-1, O[i+1, j]] + 0.4375*R[i, j+1, O[i+1, j]]
YY2[i, j] = YY2[i, j] - 0.25*I[i, j+2] + 0.0625*R[i, j+3, O[i+1, j]]
# Fusion of the estimations with edge classifier for case Fig.6(b) and Fig.6(c).
for i in range(3, m-4):
for j in range(3, n-4):
pha1 = 0.0
pha2 = 0.0
for k in range(-2, 3, 2):
for l in range(-2, 3, 2):
pha1 = pha1 + abs(XX1[i+k, j+l] - I[i+k, j+l])
pha2 = pha2 + abs(XX2[i+k, j+l] - I[i+k, j+l])
if (pha1 / pha2) > thao:
R[i, j, O[i, j+1]] = XX2[i, j]
elif (pha2/pha1) > thao:
R[i, j, O[i, j+1]] = XX1[i, j]
elif (((pha1/pha2) < thao) and ((pha2/pha1) < thao)):
d1 = abs(I[i, j-1] - I[i, j+1]) + \
abs(2*I[i, j] - I[i, j-2] - I[i, j+2])
d2 = abs(I[i+1, j] - I[i-1, j]) + \
abs(2*I[i, j] - I[i+2, j] - I[i-2, j])
epsl = 0.000000000000001
w1 = 1/(d1 + epsl)
w2 = 1/(d2 + epsl)
R[i, j, O[i, j+1]] = (w1*XX1[i, j] + w2*XX2[i, j])/(w1 + w2)
pha1 = 0.0
pha2 = 0.0
for k in range(-2, 3, 2):
for l in range(-2, 3, 2):
pha1 = pha1 + abs(YY1[i+k, j+l] - I[i+k, j+l])
pha2 = pha2 + abs(YY2[i+k, j+l] - I[i+k, j+l])
if (pha1 / pha2) > thao:
R[i, j, O[i+1, j]] = YY2[i, j]
elif (pha2/pha1) > thao:
R[i, j, O[i+1, j]] = YY1[i, j]
elif (((pha1/pha2) < thao) and ((pha2/pha1) < thao)):
d1 = abs(I[i, j-1] - I[i, j+1]) + \
abs(2*I[i, j] - I[i, j-2] - I[i, j+2])
d2 = abs(I[i+1, j] - I[i-1, j]) + \
abs(2*I[i, j] - I[i+2, j] - I[i-2, j])
epsl = 0.000000000000001
w1 = 1/(d1 + epsl)
w2 = 1/(d2 + epsl)
R[i, j, O[i, j+1]] = (w1*YY1[i, j] + w2*YY2[i, j])/(w1 + w2)
R = RR
I0 = R[:, :, 0]
I45 = R[:, :, 1]
I90 = R[:, :, 2]
I135 = R[:, :, 3]
return (I0, I45, I90, I135)
| [
"numpy.zeros",
"numpy.double"
] | [((1161, 1173), 'numpy.double', 'np.double', (['I'], {}), '(I)\n', (1170, 1173), True, 'import numpy as np\n'), ((1204, 1223), 'numpy.zeros', 'np.zeros', (['(m, n, 4)'], {}), '((m, n, 4))\n', (1212, 1223), True, 'import numpy as np\n'), ((1280, 1307), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': 'int'}), '((m, n), dtype=int)\n', (1288, 1307), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
se2cnn/layers.py
Implementation of tensorflow layers for operations in SE2N.
Details in MICCAI 2018 paper: "Roto-Translation Covariant Convolutional Networks for Medical Image Analysis".
Released in June 2018
@author: <NAME>, Eindhoven University of Technology, The Netherlands
@author: <NAME>, Eindhoven University of Technology, The Netherlands
________________________________________________________________________
Copyright 2018 <NAME> and <NAME>, Eindhoven University
of Technology, the Netherlands
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
________________________________________________________________________
"""
import tensorflow as tf
import numpy as np
from . import rotation_matrix
# THE CONVOLUTION LAYERS
def z2_se2n(
input_tensor,
kernel,
orientations_nb,
# Optional:
periodicity=2 * np.pi,
diskMask=True,
padding='VALID'):
""" Constructs a group convolutional layer.
(lifting layer from Z2 to SE2N with N input number of orientations)
INPUT:
- input_tensor in Z2, a tensorflow Tensor with expected shape:
[BatchSize, Height, Width, ChannelsIN]
- kernel, a tensorflow Tensor with expected shape:
[kernelSize, kernelSize, ChannelsIN, ChannelsOUT]
- orientations_nb, an integer specifying the number of rotations
INPUT (optional):
- periodicity, rotate in total over 2*np.pi or np.pi
- disk_mask, True or False, specifying whether or not to mask the kernels spatially
OUTPUT:
- output_tensor, the tensor after group convolutions with shape
[BatchSize, Height', Width', orientations_nb, ChannelsOut]
(Height', Width' are reduced sizes due to the valid convolution)
- kernels_formatted, the formated kernels, i.e., the full stack of rotated kernels with shape:
[orientations_nb, kernelSize, kernelSize, ChannelsIN, ChannelsOUT]
"""
# Preparation for group convolutions
# Precompute a rotated stack of kernels
kernel_stack = rotate_lifting_kernels(
kernel, orientations_nb, periodicity=periodicity, diskMask=diskMask)
print("Z2-SE2N ROTATED KERNEL SET SHAPE:",
kernel_stack.get_shape()) # Debug
# Format the kernel stack as a 2D kernel stack (merging the rotation and
# channelsOUT axis)
kernels_as_if_2D = tf.transpose(kernel_stack, [1, 2, 3, 0, 4])
kernelSizeH, kernelSizeW, channelsIN, channelsOUT = map(int, kernel.shape)
kernels_as_if_2D = tf.reshape(
kernels_as_if_2D, [kernelSizeH, kernelSizeW, channelsIN, orientations_nb * channelsOUT])
# Perform the 2D convolution
layer_output = tf.nn.conv2d(
input=input_tensor,
filter=kernels_as_if_2D,
strides=[1, 1, 1, 1],
padding=padding)
# Reshape to an SE2 image (split the orientation and channelsOUT axis)
# Note: the batch size is unknown, hence this dimension needs to be
# obtained using the tensorflow function tf.shape, for the other
# dimensions we keep using tensor.shape since this allows us to keep track
# of the actual shapes (otherwise the shapes get convert to
# "Dimensions(None)").
layer_output = tf.reshape(
layer_output, [tf.shape(layer_output)[0], int(layer_output.shape[1]), int(layer_output.shape[2]), orientations_nb, channelsOUT])
print("OUTPUT SE2N ACTIVATIONS SHAPE:", layer_output.get_shape()) # Debug
return layer_output, kernel_stack
def se2n_se2n(
input_tensor,
kernel,
# Optional:
periodicity=2 * np.pi,
diskMask=True,
padding='VALID'):
""" Constructs a group convolutional layer.
(group convolution layer from SE2N to SE2N with N input number of orientations)
INPUT:
- input_tensor in SE2n, a tensor flow tensor with expected shape:
[BatchSize, nbOrientations, Height, Width, ChannelsIN]
- kernel, a tensorflow Tensor with expected shape:
[kernelSize, kernelSize, nbOrientations, ChannelsIN, ChannelsOUT]
INPUT (optional):
- periodicity, rotate in total over 2*np.pi or np.pi
- disk_mask, True or False, specifying whether or not to mask the
kernels spatially
OUTPUT:
- output_tensor, the tensor after group convolutions with shape
[BatchSize, Height', Width', nbOrientations, ChannelsOut]
(Height', Width' are the reduced sizes due to the valid convolution)
- kernels_formatted, the formated kernels, i.e., the full stack of
rotated kernels with shape [nbOrientations, kernelSize, kernelSize, nbOrientations, channelsIn, channelsOut]
"""
# Kernel dimensions
kernelSizeH, kernelSizeW, orientations_nb, channelsIN, channelsOUT = map(
int, kernel.shape)
# Preparation for group convolutions
# Precompute a rotated stack of se2 kernels
# With shape: [orientations_nb, kernelSizeH, kernelSizeW, orientations_nb,
# channelsIN, channelsOUT]
kernel_stack = rotate_gconv_kernels(kernel, periodicity, diskMask)
print("SE2N-SE2N ROTATED KERNEL SET SHAPE:",
kernel_stack.get_shape()) # Debug
# Group convolutions are done by integrating over [x,y,theta,input-channels] for each translation and rotation of the kernel
# We compute this integral by doing standard 2D convolutions (translation part) for each rotated version of the kernel (rotation part)
# In order to efficiently do this we use 2D convolutions where the theta
# and input-channel axes are merged (thus treating the SE2 image as a 2D
# feature map)
# Prepare the input tensor (merge the orientation and channel axis) for
# the 2D convolutions:
input_tensor_as_if_2D = tf.reshape(
input_tensor, [tf.shape(input_tensor)[0], int(input_tensor.shape[1]), int(input_tensor.shape[2]), orientations_nb * channelsIN])
# Reshape the kernels for 2D convolutions (orientation+channelsIN axis are
# merged, rotation+channelsOUT axis are merged)
kernels_as_if_2D = tf.transpose(kernel_stack, [1, 2, 3, 4, 0, 5])
kernels_as_if_2D = tf.reshape(
kernels_as_if_2D, [kernelSizeH, kernelSizeW, orientations_nb * channelsIN, orientations_nb * channelsOUT])
# Perform the 2D convolutions
layer_output = tf.nn.conv2d(
input=input_tensor_as_if_2D,
filter=kernels_as_if_2D,
strides=[1, 1, 1, 1],
padding=padding)
# Reshape into an SE2 image (split the orientation and channelsOUT axis)
layer_output = tf.reshape(
layer_output, [tf.shape(layer_output)[0], int(layer_output.shape[1]), int(layer_output.shape[2]), orientations_nb, channelsOUT])
print("OUTPUT SE2N ACTIVATIONS SHAPE:", layer_output.get_shape()) # Debug
return layer_output, kernel_stack
# THE MAX-POOLING LAYER
def spatial_max_pool(input_tensor, nbOrientations, padding='VALID'):
""" Performs spatial max-pooling on every orientation of the SE2N tensor.
INPUT:
- input_tensor in SE2n, a tensor flow tensor with expected shape:
[BatchSize, Height, Width, nbOrientations, ChannelsIN]
OUTPUT:
- output_tensor, the tensor after spatial max-pooling
[BatchSize, Height/2, Width/2, nbOrientations, ChannelsOut]
"""
# 2D max-pooling is applied to each orientation
activations = [None] * nbOrientations
for i in range(nbOrientations):
activations[i] = tf.nn.max_pool(
value=input_tensor[:, :, :, i, :],
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding=padding)
# Re-stack all the pooled activations along the orientation dimension
tensor_pooled = tf.concat(
values=[tf.expand_dims(t, 3) for t in activations], axis=3)
return tensor_pooled
# KERNEL ROTATION FUNCTIONS
def rotate_lifting_kernels(kernel, orientations_nb, periodicity=2 * np.pi, diskMask=True):
""" Rotates the set of 2D lifting kernels.
INPUT:
- kernel, a tensor flow tensor with expected shape:
[Height, Width, ChannelsIN, ChannelsOUT]
- orientations_nb, an integer specifying the number of rotations
INPUT (optional):
- periodicity, rotate in total over 2*np.pi or np.pi
- disk_mask, True or False, specifying whether or not to mask the kernels spatially
OUTPUT:
- set_of_rotated_kernels, a tensorflow tensor with dimensions:
[nbOrientations, Height, Width, ChannelsIN, ChannelsOUT]
"""
# Unpack the shape of the input kernel
kernelSizeH, kernelSizeW, channelsIN, channelsOUT = map(int, kernel.shape)
print("Z2-SE2N BASE KERNEL SHAPE:", kernel.get_shape()) # Debug
# Flatten the baseline kernel
# Resulting shape: [kernelSizeH*kernelSizeW, channelsIN*channelsOUT]
kernel_flat = tf.reshape(
kernel, [kernelSizeH * kernelSizeW, channelsIN * channelsOUT])
# Generate a set of rotated kernels via rotation matrix multiplication
# For efficiency purpose, the rotation matrix is implemented as a sparse matrix object
# Result: The non-zero indices and weights of the rotation matrix
idx, vals = rotation_matrix.MultiRotationOperatorMatrixSparse(
[kernelSizeH, kernelSizeW],
orientations_nb,
periodicity=periodicity,
diskMask=diskMask)
# Sparse rotation matrix
# Resulting shape: [nbOrientations*kernelSizeH*kernelSizeW,
# kernelSizeH*kernelSizeW]
rotOp_matrix = tf.SparseTensor(
idx, vals,
[orientations_nb * kernelSizeH * kernelSizeW, kernelSizeH * kernelSizeW])
# Matrix multiplication
# Resulting shape: [nbOrientations*kernelSizeH*kernelSizeW,
# channelsIN*channelsOUT]
set_of_rotated_kernels = tf.sparse_tensor_dense_matmul(
rotOp_matrix, kernel_flat)
# Reshaping
# Resulting shape: [nbOrientations, kernelSizeH, kernelSizeW, channelsIN,
# channelsOUT]
set_of_rotated_kernels = tf.reshape(
set_of_rotated_kernels, [orientations_nb, kernelSizeH, kernelSizeW, channelsIN, channelsOUT])
return set_of_rotated_kernels
def rotate_gconv_kernels(kernel, periodicity=2 * np.pi, diskMask=True):
""" Rotates the set of SE2 kernels.
Rotation of SE2 kernels involves planar rotations and a shift in orientation,
see e.g. the left-regular representation L_g of the roto-translation group on SE(2) images,
(Eq. 3) of the MICCAI 2018 paper.
INPUT:
- kernel, a tensor flow tensor with expected shape:
[Height, Width, nbOrientations, ChannelsIN, ChannelsOUT]
INPUT (optional):
- periodicity, rotate in total over 2*np.pi or np.pi
- disk_mask, True or False, specifying whether or not to mask the kernels spatially
OUTPUT:
- set_of_rotated_kernels, a tensorflow tensor with dimensions:
[nbOrientations, Height, Width, nbOrientations, ChannelsIN, ChannelsOUT]
I.e., for each rotation angle a rotated (shift-twisted) version of the input kernel.
"""
# Rotation of an SE2 kernel consists of two parts:
# PART 1. Planar rotation
# PART 2. A shift in theta direction
# Unpack the shape of the input kernel
kernelSizeH, kernelSizeW, orientations_nb, channelsIN, channelsOUT = map(
int, kernel.shape)
print("SE2N-SE2N BASE KERNEL SHAPE:", kernel.get_shape()) # Debug
# PART 1 (planar rotation)
# Flatten the baseline kernel
# Resulting shape: [kernelSizeH*kernelSizeW,orientations_nb*channelsIN*channelsOUT]
#
kernel_flat = tf.reshape(
kernel, [kernelSizeH * kernelSizeW, orientations_nb * channelsIN * channelsOUT])
# Generate a set of rotated kernels via rotation matrix multiplication
# For efficiency purpose, the rotation matrix is implemented as a sparse matrix object
# Result: The non-zero indices and weights of the rotation matrix
idx, vals = rotation_matrix.MultiRotationOperatorMatrixSparse(
[kernelSizeH, kernelSizeW],
orientations_nb,
periodicity=periodicity,
diskMask=diskMask)
# The corresponding sparse rotation matrix
# Resulting shape: [nbOrientations*kernelSizeH*kernelSizeW,kernelSizeH*kernelSizeW]
#
rotOp_matrix = tf.SparseTensor(
idx, vals,
[orientations_nb * kernelSizeH * kernelSizeW, kernelSizeH * kernelSizeW])
# Matrix multiplication (each 2D plane is now rotated)
# Resulting shape: [nbOrientations*kernelSizeH*kernelSizeW, orientations_nb*channelsIN*channelsOUT]
#
kernels_planar_rotated = tf.sparse_tensor_dense_matmul(
rotOp_matrix, kernel_flat)
kernels_planar_rotated = tf.reshape(
kernels_planar_rotated, [orientations_nb, kernelSizeH, kernelSizeW, orientations_nb, channelsIN, channelsOUT])
# PART 2 (shift in theta direction)
set_of_rotated_kernels = [None] * orientations_nb
for orientation in range(orientations_nb):
# [kernelSizeH,kernelSizeW,orientations_nb,channelsIN,channelsOUT]
kernels_temp = kernels_planar_rotated[orientation]
# [kernelSizeH,kernelSizeW,channelsIN,channelsOUT,orientations_nb]
kernels_temp = tf.transpose(kernels_temp, [0, 1, 3, 4, 2])
# [kernelSizeH*kernelSizeW*channelsIN*channelsOUT*orientations_nb]
kernels_temp = tf.reshape(
kernels_temp, [kernelSizeH * kernelSizeW * channelsIN * channelsOUT, orientations_nb])
# Roll along the orientation axis
roll_matrix = tf.constant(
np.roll(np.identity(orientations_nb), orientation, axis=1), dtype=tf.float32)
kernels_temp = tf.matmul(kernels_temp, roll_matrix)
kernels_temp = tf.reshape(
kernels_temp, [kernelSizeH, kernelSizeW, channelsIN, channelsOUT, orientations_nb]) # [Nx,Ny,Nin,Nout,Ntheta]
kernels_temp = tf.transpose(kernels_temp, [0, 1, 4, 2, 3])
set_of_rotated_kernels[orientation] = kernels_temp
return tf.stack(set_of_rotated_kernels)
if __name__ == "__main__":
help(z2_se2n)
help(se2n_se2n)
help(spatial_max_pool)
help(rotate_lifting_kernels)
help(rotate_gconv_kernels)
| [
"tensorflow.nn.conv2d",
"numpy.identity",
"tensorflow.nn.max_pool",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.SparseTensor",
"tensorflow.sparse_tensor_dense_matmul",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.stack"
] | [((2953, 2996), 'tensorflow.transpose', 'tf.transpose', (['kernel_stack', '[1, 2, 3, 0, 4]'], {}), '(kernel_stack, [1, 2, 3, 0, 4])\n', (2965, 2996), True, 'import tensorflow as tf\n'), ((3099, 3203), 'tensorflow.reshape', 'tf.reshape', (['kernels_as_if_2D', '[kernelSizeH, kernelSizeW, channelsIN, orientations_nb * channelsOUT]'], {}), '(kernels_as_if_2D, [kernelSizeH, kernelSizeW, channelsIN, \n orientations_nb * channelsOUT])\n', (3109, 3203), True, 'import tensorflow as tf\n'), ((3261, 3361), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor', 'filter': 'kernels_as_if_2D', 'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), '(input=input_tensor, filter=kernels_as_if_2D, strides=[1, 1, 1,\n 1], padding=padding)\n', (3273, 3361), True, 'import tensorflow as tf\n'), ((6706, 6752), 'tensorflow.transpose', 'tf.transpose', (['kernel_stack', '[1, 2, 3, 4, 0, 5]'], {}), '(kernel_stack, [1, 2, 3, 4, 0, 5])\n', (6718, 6752), True, 'import tensorflow as tf\n'), ((6776, 6897), 'tensorflow.reshape', 'tf.reshape', (['kernels_as_if_2D', '[kernelSizeH, kernelSizeW, orientations_nb * channelsIN, orientations_nb *\n channelsOUT]'], {}), '(kernels_as_if_2D, [kernelSizeH, kernelSizeW, orientations_nb *\n channelsIN, orientations_nb * channelsOUT])\n', (6786, 6897), True, 'import tensorflow as tf\n'), ((6957, 7067), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input_tensor_as_if_2D', 'filter': 'kernels_as_if_2D', 'strides': '[1, 1, 1, 1]', 'padding': 'padding'}), '(input=input_tensor_as_if_2D, filter=kernels_as_if_2D, strides=\n [1, 1, 1, 1], padding=padding)\n', (6969, 7067), True, 'import tensorflow as tf\n'), ((9541, 9614), 'tensorflow.reshape', 'tf.reshape', (['kernel', '[kernelSizeH * kernelSizeW, channelsIN * channelsOUT]'], {}), '(kernel, [kernelSizeH * kernelSizeW, channelsIN * channelsOUT])\n', (9551, 9614), True, 'import tensorflow as tf\n'), ((10193, 10298), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['idx', 'vals', '[orientations_nb * kernelSizeH * kernelSizeW, kernelSizeH * kernelSizeW]'], {}), '(idx, vals, [orientations_nb * kernelSizeH * kernelSizeW, \n kernelSizeH * kernelSizeW])\n', (10208, 10298), True, 'import tensorflow as tf\n'), ((10463, 10519), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['rotOp_matrix', 'kernel_flat'], {}), '(rotOp_matrix, kernel_flat)\n', (10492, 10519), True, 'import tensorflow as tf\n'), ((10672, 10780), 'tensorflow.reshape', 'tf.reshape', (['set_of_rotated_kernels', '[orientations_nb, kernelSizeH, kernelSizeW, channelsIN, channelsOUT]'], {}), '(set_of_rotated_kernels, [orientations_nb, kernelSizeH,\n kernelSizeW, channelsIN, channelsOUT])\n', (10682, 10780), True, 'import tensorflow as tf\n'), ((12318, 12413), 'tensorflow.reshape', 'tf.reshape', (['kernel', '[kernelSizeH * kernelSizeW, orientations_nb * channelsIN * channelsOUT]'], {}), '(kernel, [kernelSizeH * kernelSizeW, orientations_nb * channelsIN *\n channelsOUT])\n', (12328, 12413), True, 'import tensorflow as tf\n'), ((13005, 13110), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['idx', 'vals', '[orientations_nb * kernelSizeH * kernelSizeW, kernelSizeH * kernelSizeW]'], {}), '(idx, vals, [orientations_nb * kernelSizeH * kernelSizeW, \n kernelSizeH * kernelSizeW])\n', (13020, 13110), True, 'import tensorflow as tf\n'), ((13322, 13378), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['rotOp_matrix', 'kernel_flat'], {}), '(rotOp_matrix, kernel_flat)\n', (13351, 13378), True, 'import tensorflow as tf\n'), ((13417, 13542), 'tensorflow.reshape', 'tf.reshape', (['kernels_planar_rotated', '[orientations_nb, kernelSizeH, kernelSizeW, orientations_nb, channelsIN,\n channelsOUT]'], {}), '(kernels_planar_rotated, [orientations_nb, kernelSizeH,\n kernelSizeW, orientations_nb, channelsIN, channelsOUT])\n', (13427, 13542), True, 'import tensorflow as tf\n'), ((14698, 14730), 'tensorflow.stack', 'tf.stack', (['set_of_rotated_kernels'], {}), '(set_of_rotated_kernels)\n', (14706, 14730), True, 'import tensorflow as tf\n'), ((8121, 8233), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'input_tensor[:, :, :, i, :]', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': 'padding'}), '(value=input_tensor[:, :, :, i, :], ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding=padding)\n', (8135, 8233), True, 'import tensorflow as tf\n'), ((13922, 13965), 'tensorflow.transpose', 'tf.transpose', (['kernels_temp', '[0, 1, 3, 4, 2]'], {}), '(kernels_temp, [0, 1, 3, 4, 2])\n', (13934, 13965), True, 'import tensorflow as tf\n'), ((14064, 14165), 'tensorflow.reshape', 'tf.reshape', (['kernels_temp', '[kernelSizeH * kernelSizeW * channelsIN * channelsOUT, orientations_nb]'], {}), '(kernels_temp, [kernelSizeH * kernelSizeW * channelsIN *\n channelsOUT, orientations_nb])\n', (14074, 14165), True, 'import tensorflow as tf\n'), ((14365, 14401), 'tensorflow.matmul', 'tf.matmul', (['kernels_temp', 'roll_matrix'], {}), '(kernels_temp, roll_matrix)\n', (14374, 14401), True, 'import tensorflow as tf\n'), ((14425, 14523), 'tensorflow.reshape', 'tf.reshape', (['kernels_temp', '[kernelSizeH, kernelSizeW, channelsIN, channelsOUT, orientations_nb]'], {}), '(kernels_temp, [kernelSizeH, kernelSizeW, channelsIN, channelsOUT,\n orientations_nb])\n', (14435, 14523), True, 'import tensorflow as tf\n'), ((14583, 14626), 'tensorflow.transpose', 'tf.transpose', (['kernels_temp', '[0, 1, 4, 2, 3]'], {}), '(kernels_temp, [0, 1, 4, 2, 3])\n', (14595, 14626), True, 'import tensorflow as tf\n'), ((3832, 3854), 'tensorflow.shape', 'tf.shape', (['layer_output'], {}), '(layer_output)\n', (3840, 3854), True, 'import tensorflow as tf\n'), ((6437, 6459), 'tensorflow.shape', 'tf.shape', (['input_tensor'], {}), '(input_tensor)\n', (6445, 6459), True, 'import tensorflow as tf\n'), ((7228, 7250), 'tensorflow.shape', 'tf.shape', (['layer_output'], {}), '(layer_output)\n', (7236, 7250), True, 'import tensorflow as tf\n'), ((8401, 8421), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(3)'], {}), '(t, 3)\n', (8415, 8421), True, 'import tensorflow as tf\n'), ((14272, 14300), 'numpy.identity', 'np.identity', (['orientations_nb'], {}), '(orientations_nb)\n', (14283, 14300), True, 'import numpy as np\n')] |
#!python3
"""
An implementation of a PROPm allocation algorithm. Reference:
<NAME>, <NAME>, <NAME>, and <NAME> (2021).
["PROPm Allocations of Indivisible Goods to Multiple Agents"](https://arxiv.org/abs/2105.11348).
Programmer: <NAME>
Since: 2021-05
"""
import networkx as nx
import numpy as np
from fairpy import ValuationMatrix, Allocation, convert_input_to_valuation_matrix
from typing import List
from copy import deepcopy
import logging
logger = logging.getLogger(__name__)
###
### Main function
###
def propm_allocation(instance) -> Allocation:
"""
Function that takes a valuation matrix and returns PROPm allocation of goods.
>>> import numpy as np
>>> v = np.array([
... [0.25, 0.25, 0.25, 0.25, 0, 0],
... [0.25, 0, 0.26, 0, 0.25, 0.24],
... [0.25, 0, 0.24, 0, 0.25, 0.26]
... ])
>>> propm_allocation(v)
Agent #0 gets {2,3} with value 0.5.
Agent #1 gets {1,5} with value 0.24.
Agent #2 gets {0,4} with value 0.5.
<BLANKLINE>
>>> propm_allocation(v[np.ix_([0, 1, 2], [0, 2, 1, 3, 4, 5])])
Agent #0 gets {2,3} with value 0.5.
Agent #1 gets {0,1} with value 0.51.
Agent #2 gets {4,5} with value 0.51.
<BLANKLINE>
>>> v = {"Alice": {"z":12, "y":10, "x":8, "w":7, "v":4, "u":1},\
"Dina": {"z":14, "y":9, "x":15, "w":4, "v":9, "u":12},\
"George": {"z":19, "y":16, "x":8, "w":6, "v":5, "u":1},\
}
>>> propm_allocation(v)
Alice gets {x,y} with value 18.
Dina gets {u,v,w} with value 25.
George gets {z} with value 19.
<BLANKLINE>
"""
return convert_input_to_valuation_matrix(solve)(instance)
###
### Subroutines
###
def insert_agent_into_allocation(agent: int, item: int, allocated_bundles: List[List[int]]):
"""
If agent's i value of item j is greater than 1/n, we can allocate item j to i and solve
the remaining sub-problem. This function inserts agent i with item j to the sub-problem
allocation.
>>> bundles = [[0, 2], [1, 3]]
>>> insert_agent_into_allocation(0, 0, bundles)
>>> bundles
[[0], [1, 3], [2, 4]]
>>> bundles = [[0, 2], [1, 3]]
>>> insert_agent_into_allocation(1, 0, bundles)
>>> bundles
[[1, 3], [0], [2, 4]]
"""
for bundle in allocated_bundles:
for i, allocated_item in enumerate(bundle):
if allocated_item >= item:
bundle[i] = allocated_item + 1
allocated_bundles.insert(agent, [item])
def divide(v: ValuationMatrix) -> List[List[int]]:
""" "
In stage 1 the divider agent having index 0 partitions the goods into bundles.
>>> divide(ValuationMatrix([[0.5, 0, 0.5], [1/3, 1/3, 1/3]]))
[[1, 0], [2]]
>>> divide(ValuationMatrix([[0.25, 0.25, 0.25, 0.25, 0, 0], [0.25, 0, 0.26, 0, 0.25, 0.24], [0.25, 0, 0.24, 0, 0.25, 0.26]]))
[[4, 5, 0], [1], [2, 3]]
"""
total_value = v.verify_normalized()
item_order = sorted(v.objects(), key=lambda j: v[0, j])
bundles = []
divided_items_count = 0
divided_value = 0
for bundle_index in v.agents():
bundle_value = 0
item_index = divided_items_count
while (
item_index < v.num_of_objects
and (bundle_value + v[0, item_order[item_index]]) * (v.num_of_agents - bundle_index) + divided_value
<= total_value
):
bundle_value += v[0, item_order[item_index]]
item_index += 1
bundles.append(list(map(lambda t: item_order[t], range(divided_items_count, item_index))))
divided_items_count = item_index
divided_value += bundle_value
return bundles
class Decomposition:
"""
this class represents decomposition of problem into sub-problems
sub-problem i is defined by pair (agents[i], bundles[i])
"""
def __init__(self, values: ValuationMatrix):
self.v = values
self.total_value = values.verify_normalized()
self.agents = []
self.bundles = []
def __repr__(self):
return "\n".join(
[
f"sub-problem {i}:\n\tagents : {list(agents)}\n\tgoods : {bundle}"
for i, (agents, bundle) in enumerate(zip(self.agents, self.bundles))
]
)
def num_of_agents(self):
"""
this method returns number of agents in decomposition
"""
return sum(map(len, self.agents))
def num_of_objects(self):
"""
this method returns number of goods in decomposition
"""
return sum(map(len, self.bundles))
def get_all_agents(self):
"""
this method returns set containing all agents in decomposition
"""
return set().union(*self.agents)
def get_all_items(self):
"""
this method returns list containing all items in decomposition
"""
return sum(self.bundles, [])
def update(self, candidate, bundle):
"""
UpdateDecomposition subroutine
bundle is S_t bundle
candidate is agent k from the paper
"""
logger.info("Updating decomposition trying to add agent %d and bundle %s", candidate, str(bundle))
t = len(self.bundles) + 1
sub_problem_graph = nx.DiGraph()
sub_problem_graph.add_node(0, agents={candidate}, bundle=[])
for i in range(1, t):
sub_problem_graph.add_node(i, agents=self.agents[i - 1], bundle=self.bundles[i - 1])
sub_problem_graph.add_node(t, agents=set(), bundle=bundle)
sub_problem_agents = nx.get_node_attributes(sub_problem_graph, "agents")
sub_problem_bundle = nx.get_node_attributes(sub_problem_graph, "bundle")
for node_from in range(t):
for node_to in range(1, t + 1):
agent = next(
filter(
lambda a: self.v.agent_value_for_bundle(a, sub_problem_bundle[node_to]) * self.v.num_of_agents
>= self.total_value * max(1, len(sub_problem_agents[node_to])),
sub_problem_agents[node_from],
),
None,
)
if agent is not None:
sub_problem_graph.add_edge(node_from, node_to, agent=agent)
reachable = set()
for parent, child in nx.dfs_edges(sub_problem_graph, 0):
nx.set_node_attributes(sub_problem_graph, {child: parent}, "parent")
reachable.add(child)
parent = nx.get_node_attributes(sub_problem_graph, "parent")
edge_agent = nx.get_edge_attributes(sub_problem_graph, "agent")
if t in reachable:
logger.info("Case 1: bundle's vertex is reachable from candidate's vertex in sub-problem graph")
self.agents.append(set())
self.bundles.append(bundle)
node_to = t
node_from = parent[node_to]
while node_from != 0:
logger.info("Moving agent %d from sub-problem %d to sub-problem %d", node_from - 1, node_to - 1)
self.agents[node_from - 1].remove(edge_agent[(node_from, node_to)])
self.agents[node_to - 1].add(edge_agent[(node_from, node_to)])
node_to = node_from
node_from = parent[node_to]
logger.info("Adding agent %d to sub-problem %d", candidate, node_to - 1)
self.agents[node_to - 1].add(candidate)
return
for node_to in reachable:
for agent in sub_problem_agents[node_to]:
if self.v.num_of_agents * self.v.agent_value_for_bundle(agent, self.get_all_items() + bundle) <= t:
logger.info(
"Case 2: agent's %d vertex is reachable from the candidate's in sub-problem graph"
"and she prefers sharing last n-t bundles rather than first t",
agent,
)
logger.info("Removing agent %d from decomposition", agent)
self.agents[node_to - 1].remove(agent)
node_from = parent[node_to]
while node_from != 0:
logger.info("Moving agent %d from sub-problem %d to sub-problem %d", node_from - 1, node_to - 1)
self.agents[node_from - 1].remove(edge_agent[(node_from, node_to)])
self.agents[node_to - 1].add(edge_agent[(node_from, node_to)])
node_to = node_from
node_from = parent[node_to]
logger.info("Adding agent %d to sub-problem %d")
self.agents[node_to - 1].add(candidate)
return
logger.info(
"Case 3: bundle's t vertex is not reachable from candidate's and all reachable agents of decomposition "
"prefer first %d bundles rather than last %d",
t,
self.v.num_of_agents - t,
)
logger.info("Merging all sub-problems into one and adding candidate and bundle")
self.agents = [self.get_all_agents().union({candidate})]
self.bundles = [self.get_all_items() + bundle]
def solve(agents) -> List[List[int]]:
"""
recursive function which takes valuations and returns a PROPm allocation
as a list of bundles
>>> import numpy as np
>>> v = np.array([
... [0.25, 0.25, 0.25, 0.25, 0, 0],
... [0.25, 0, 0.26, 0, 0.25, 0.24],
... [0.25, 0, 0.24, 0, 0.25, 0.26]
... ])
>>> solve(v)
[[2, 3], [1, 5], [4, 0]]
>>> solve(v[np.ix_([0, 1, 2], [0, 2, 1, 3, 4, 5])])
[[2, 3], [0, 1], [4, 5]]
>>> v = np.array([
... [0, 0, 0, 0, 0, 0],
... [1, 2, 3, 4, 5, 6],
... [10, 20, 30, 40, 50, 60]
... ])
>>> solve(v)
[[], [0, 1, 2, 3], [4, 5]]
"""
v = ValuationMatrix(agents)
if v.num_of_agents == 0 or v.num_of_objects == 0:
return []
logger.info("Looking for PROPm allocation for %d agents and %d items", v.num_of_agents, v.num_of_objects)
logger.info("Solving a problem defined by valuation matrix:\n %s", v)
for agent in v.agents():
if np.allclose(v[agent], 0.0): # irrelevant agent - values everything at 0
allocation = solve(v.without_agent(agent))
allocation.insert(agent, [])
return allocation
total_value = v.normalize()
logger.info("Normalized matrix:\n %s", v)
for agent in v.agents():
for item in v.objects():
if v[agent][item] * v.num_of_agents > total_value:
logger.info(
"Allocating item %d to agent %d as she values it as %f > 1/n",
item,
agent,
v[agent][item] / total_value,
)
allocation = solve(v.without_agent(agent).without_object(item))
insert_agent_into_allocation(agent, item, allocation)
return allocation
bundles = divide(v)
logger.info("Divider divides items into following bundles: %s", str(bundles))
remaining_agents = set(range(1, v.num_of_agents))
logger.info("Building decomposition:")
decomposition = Decomposition(v)
for t in range(1, v.num_of_agents + 1):
considered_items = sum(bundles[:t], [])
candidates = list(
filter(
lambda a: v.num_of_agents * v.agent_value_for_bundle(a, considered_items) > t * total_value,
remaining_agents,
)
)
logger.info(
"There are %s remaining agents that prefer sharing first %s bundles rather than last %s: %s",
len(candidates),
str(candidates),
t,
v.num_of_agents - t,
)
while len(candidates) > 0 and decomposition.num_of_agents() < t:
logger.info("Current decomposition:\n %s", str(decomposition))
decomposition.update(candidates[0], bundles[t - 1])
remaining_agents = set(range(1, v.num_of_agents)).difference(decomposition.get_all_agents())
candidates = list(
filter(
lambda a: v.num_of_agents * v.agent_value_for_bundle(a, considered_items) > t * total_value,
remaining_agents,
)
)
if decomposition.num_of_agents() < t:
decomposition.agents.append(remaining_agents)
decomposition.bundles.append(sum(bundles[t:], []))
logger.info("Final decomposition:\n %s", str(decomposition))
logger.info("Allocating bundle %d to divider agent", t)
allocation = list([[] for _ in range(v.num_of_agents)])
allocation[0] = bundles[t - 1]
for agents, bundle in zip(decomposition.agents, decomposition.bundles):
agents = list(sorted(agents))
sub_problem = v.submatrix(agents, bundle)
solution = solve(sub_problem)
for i, agent in enumerate(agents):
for j in solution[i]:
allocation[agent].append(bundle[j])
return allocation
propm_allocation.logger = logger
if __name__ == "__main__":
import sys
logger.addHandler(logging.StreamHandler(sys.stdout))
# logger.setLevel(logging.INFO)
import doctest
(failures, tests) = doctest.testmod(report=True)
print("{} failures, {} tests".format(failures, tests))
| [
"logging.getLogger",
"numpy.allclose",
"logging.StreamHandler",
"fairpy.convert_input_to_valuation_matrix",
"networkx.get_edge_attributes",
"networkx.DiGraph",
"doctest.testmod",
"networkx.get_node_attributes",
"fairpy.ValuationMatrix",
"networkx.set_node_attributes",
"networkx.dfs_edges"
] | [((465, 492), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (482, 492), False, 'import logging\n'), ((9817, 9840), 'fairpy.ValuationMatrix', 'ValuationMatrix', (['agents'], {}), '(agents)\n', (9832, 9840), False, 'from fairpy import ValuationMatrix, Allocation, convert_input_to_valuation_matrix\n'), ((13370, 13398), 'doctest.testmod', 'doctest.testmod', ([], {'report': '(True)'}), '(report=True)\n', (13385, 13398), False, 'import doctest\n'), ((1603, 1643), 'fairpy.convert_input_to_valuation_matrix', 'convert_input_to_valuation_matrix', (['solve'], {}), '(solve)\n', (1636, 1643), False, 'from fairpy import ValuationMatrix, Allocation, convert_input_to_valuation_matrix\n'), ((5228, 5240), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (5238, 5240), True, 'import networkx as nx\n'), ((5534, 5585), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['sub_problem_graph', '"""agents"""'], {}), "(sub_problem_graph, 'agents')\n", (5556, 5585), True, 'import networkx as nx\n'), ((5615, 5666), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['sub_problem_graph', '"""bundle"""'], {}), "(sub_problem_graph, 'bundle')\n", (5637, 5666), True, 'import networkx as nx\n'), ((6308, 6342), 'networkx.dfs_edges', 'nx.dfs_edges', (['sub_problem_graph', '(0)'], {}), '(sub_problem_graph, 0)\n', (6320, 6342), True, 'import networkx as nx\n'), ((6476, 6527), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['sub_problem_graph', '"""parent"""'], {}), "(sub_problem_graph, 'parent')\n", (6498, 6527), True, 'import networkx as nx\n'), ((6549, 6599), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['sub_problem_graph', '"""agent"""'], {}), "(sub_problem_graph, 'agent')\n", (6571, 6599), True, 'import networkx as nx\n'), ((10139, 10165), 'numpy.allclose', 'np.allclose', (['v[agent]', '(0.0)'], {}), '(v[agent], 0.0)\n', (10150, 10165), True, 'import numpy as np\n'), ((13254, 13287), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (13275, 13287), False, 'import logging\n'), ((6356, 6424), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['sub_problem_graph', '{child: parent}', '"""parent"""'], {}), "(sub_problem_graph, {child: parent}, 'parent')\n", (6378, 6424), True, 'import networkx as nx\n')] |
"""
This script follows closely the Mathematica script from Sandri, 1996 (see
Sandri_1996_script/ for more details) and tries to reproduce the results
therein.
"""
import matplotlib.pyplot as plt
import mpl_extras as me
import tpsim as tp
import numpy as np
sigma = 16
beta = 4
rho = 45.92
def F(t, state):
"""Define the Lorenz system"""
x, y, z = state
return np.array([sigma * (y - x), x * (rho - z) - y, x * y - beta * z])
def DF(x, y, z):
"""The Jacobian of the Lorenz system"""
return np.array([[-sigma, sigma, 0], [rho - z, -1, -x], [y, x, -beta]])
def RK4(x0, phi0, T, dt, *args):
# Number of steps
N = int(T / dt)
# Assign current state
x = x0
phi = phi0
# Loop
for n in range(N):
t = n * dt
# Update state
xn = x
xk1 = F(t, xn, *args)
xk2 = F(t + dt / 2, xn + dt * xk1 / 2, *args)
xk3 = F(t + dt / 2, xn + dt * xk2 / 2, *args)
xk4 = F(t + dt, xn + dt * xk3, *args)
x = xn + dt * (xk1 + 2 * xk2 + 2 * xk3 + xk4) / 6
# Update phi
pn = phi
pk1 = np.dot(DF(*xn), pn)
pk2 = np.dot(DF(*(xn + dt * xk1 / 2)), pn + dt * pk1 / 2)
pk3 = np.dot(DF(*(xn + dt * xk2 / 2)), pn + dt * pk2 / 2)
pk4 = np.dot(DF(*(xn + dt * xk3)), pn + dt * pk3)
phi = pn + dt * (pk1 + 2 * pk2 + 2 * pk3 + pk4) / 6
return x, phi
# Constants & initial conditions
dt = 0.02
x0 = np.array([19, 20, 50])
phi0 = np.identity(3)
# --------------------------------------------------------------------------- #
# Continuous system estimation
# --------------------------------------------------------------------------- #
"""
Continuously updates the 'ball' in each iteration, after which an
estimation of the LCE can be carried out via the operation of the ball on
random elements of the phase space.
"""
# Run the pusher for a transient period.
T = 20
# Step the system
x, phi = RK4(x0, phi0, T, dt)
# Compare to the final state in Sandri, 1996
assert np.isclose(x, np.array([17.733, 15.0227, 53.4122]), rtol=1e-2).all()
# Estimate LCEs from integrated phi by acting the resulting ball phi onto
# a random vector
u = np.random.randn(len(x))
LCE = np.log(np.linalg.norm(np.dot(phi, u))) / T
# Compare again, but since this is randomized, the margin might be large
assert np.isclose(LCE, 1.45026, rtol=2e-1)
# --------------------------------------------------------------------------- #
# LCE spectrum after transient period
# --------------------------------------------------------------------------- #
"""
Having passed the transient period, we can now estimate the LCE components
in periods of K iterations from the ball phi.
"""
# Reset the ball to an identity ball
x = x0
phi = phi0
# Run pusher for small periods now
T = 0.1
K = 800
t_S = T * np.arange(K)
# Holders for LCE components
L_S = np.zeros((3, K))
l1, l2, l3 = np.zeros(3)
# Push and calculate
for j in range(K):
x, phi = RK4(x, phi, T, dt)
W, V = tp.gram_schmidt(phi)
# Note that this estimation is cumulative
l1 += np.log(np.linalg.norm(W[:, 0])) / T
l2 += np.log(np.linalg.norm(W[:, 1])) / T
l3 += np.log(np.linalg.norm(W[:, 2])) / T
L_S[0, j] = l1
L_S[1, j] = l2
L_S[2, j] = l3
# Renormalize the ball
phi = V
# Normalize LCE
L_S = L_S / np.arange(1, K + 1)
# Compare with Sandri's results. Somehow, the LCE component close to zero is
# very hard to match. But we're probably fine with it being on the same order
# of magnitude.
assert np.isclose(L_S[0, -1], 1.50085, rtol=1e-1)
assert np.isclose(L_S[2, -1], -22.4872, rtol=1e-1)
# --------------------------------------------------------------------------- #
# What if we used forward differentiation?
# --------------------------------------------------------------------------- #
T = 80
dt = 0.0002
N = int(T / dt)
t_FD = dt * np.arange(N)
x = x0
phi = phi0
L_FD = np.zeros((3, N))
l1, l2, l3 = np.zeros(3)
for j in range(N):
# Push (calculate new phi with FD)
x, _ = RK4(x, phi, dt, dt)
# Push phi
phi = np.matmul(np.identity(3) + dt * DF(*x), phi)
# Then, we follow the same procedure
W, V = tp.gram_schmidt(phi)
l1 += np.log(np.linalg.norm(W[:, 0])) / dt
l2 += np.log(np.linalg.norm(W[:, 1])) / dt
l3 += np.log(np.linalg.norm(W[:, 2])) / dt
L_FD[0, j] = l1
L_FD[1, j] = l2
L_FD[2, j] = l3
# Renormalize
phi = V
# Normalize LCE
L_FD = L_FD / np.arange(1, N + 1)
# Plot
me.setup_mpl(tex=True)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.plot(t_S, L_S[0, :], "-k", label="RK4")
ax.plot(t_S, L_S[1, :], "-k")
ax.plot(t_S, L_S[2, :], "-k")
ax.plot(t_FD, L_FD[0, :], "--r", label="FD")
ax.plot(t_FD, L_FD[1, :], "--r")
ax.plot(t_FD, L_FD[2, :], "--r")
ax.set_xlabel("Steps")
ax.set_ylabel("LCEs")
ax.tick_params(**me.params)
fig.savefig("lorenz_LCE_spectrum.png", dpi=fig.dpi)
| [
"numpy.identity",
"numpy.isclose",
"mpl_extras.setup_mpl",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.norm",
"tpsim.gram_schmidt",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((1440, 1462), 'numpy.array', 'np.array', (['[19, 20, 50]'], {}), '([19, 20, 50])\n', (1448, 1462), True, 'import numpy as np\n'), ((1470, 1484), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1481, 1484), True, 'import numpy as np\n'), ((2362, 2396), 'numpy.isclose', 'np.isclose', (['LCE', '(1.45026)'], {'rtol': '(0.2)'}), '(LCE, 1.45026, rtol=0.2)\n', (2372, 2396), True, 'import numpy as np\n'), ((2917, 2933), 'numpy.zeros', 'np.zeros', (['(3, K)'], {}), '((3, K))\n', (2925, 2933), True, 'import numpy as np\n'), ((2947, 2958), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2955, 2958), True, 'import numpy as np\n'), ((3571, 3612), 'numpy.isclose', 'np.isclose', (['L_S[0, -1]', '(1.50085)'], {'rtol': '(0.1)'}), '(L_S[0, -1], 1.50085, rtol=0.1)\n', (3581, 3612), True, 'import numpy as np\n'), ((3621, 3663), 'numpy.isclose', 'np.isclose', (['L_S[2, -1]', '(-22.4872)'], {'rtol': '(0.1)'}), '(L_S[2, -1], -22.4872, rtol=0.1)\n', (3631, 3663), True, 'import numpy as np\n'), ((3973, 3989), 'numpy.zeros', 'np.zeros', (['(3, N)'], {}), '((3, N))\n', (3981, 3989), True, 'import numpy as np\n'), ((4003, 4014), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4011, 4014), True, 'import numpy as np\n'), ((4538, 4560), 'mpl_extras.setup_mpl', 'me.setup_mpl', ([], {'tex': '(True)'}), '(tex=True)\n', (4550, 4560), True, 'import mpl_extras as me\n'), ((4571, 4605), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (4583, 4605), True, 'import matplotlib.pyplot as plt\n'), ((379, 443), 'numpy.array', 'np.array', (['[sigma * (y - x), x * (rho - z) - y, x * y - beta * z]'], {}), '([sigma * (y - x), x * (rho - z) - y, x * y - beta * z])\n', (387, 443), True, 'import numpy as np\n'), ((518, 582), 'numpy.array', 'np.array', (['[[-sigma, sigma, 0], [rho - z, -1, -x], [y, x, -beta]]'], {}), '([[-sigma, sigma, 0], [rho - z, -1, -x], [y, x, -beta]])\n', (526, 582), True, 'import numpy as np\n'), ((2869, 2881), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (2878, 2881), True, 'import numpy as np\n'), ((3042, 3062), 'tpsim.gram_schmidt', 'tp.gram_schmidt', (['phi'], {}), '(phi)\n', (3057, 3062), True, 'import tpsim as tp\n'), ((3373, 3392), 'numpy.arange', 'np.arange', (['(1)', '(K + 1)'], {}), '(1, K + 1)\n', (3382, 3392), True, 'import numpy as np\n'), ((3935, 3947), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3944, 3947), True, 'import numpy as np\n'), ((4226, 4246), 'tpsim.gram_schmidt', 'tp.gram_schmidt', (['phi'], {}), '(phi)\n', (4241, 4246), True, 'import tpsim as tp\n'), ((4509, 4528), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {}), '(1, N + 1)\n', (4518, 4528), True, 'import numpy as np\n'), ((2058, 2094), 'numpy.array', 'np.array', (['[17.733, 15.0227, 53.4122]'], {}), '([17.733, 15.0227, 53.4122])\n', (2066, 2094), True, 'import numpy as np\n'), ((2261, 2275), 'numpy.dot', 'np.dot', (['phi', 'u'], {}), '(phi, u)\n', (2267, 2275), True, 'import numpy as np\n'), ((3126, 3149), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 0]'], {}), '(W[:, 0])\n', (3140, 3149), True, 'import numpy as np\n'), ((3172, 3195), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 1]'], {}), '(W[:, 1])\n', (3186, 3195), True, 'import numpy as np\n'), ((3218, 3241), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 2]'], {}), '(W[:, 2])\n', (3232, 3241), True, 'import numpy as np\n'), ((4139, 4153), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (4150, 4153), True, 'import numpy as np\n'), ((4264, 4287), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 0]'], {}), '(W[:, 0])\n', (4278, 4287), True, 'import numpy as np\n'), ((4311, 4334), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 1]'], {}), '(W[:, 1])\n', (4325, 4334), True, 'import numpy as np\n'), ((4358, 4381), 'numpy.linalg.norm', 'np.linalg.norm', (['W[:, 2]'], {}), '(W[:, 2])\n', (4372, 4381), True, 'import numpy as np\n')] |
import numpy as np
import os, datetime, time
import plot_settings
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
from test_utilities import process_fig2p6
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..",))
from frius import total_freq_response, distance2time
"""
Increase number of pulses in noiseless situation. Compute SRR of resynthesized
signal and on time locations.
Set `results_dir` to path in order to load already computed data. Otherwise,
perform test by setting `results_dir` to e.g. `None`.
Parameters for test can be set in the main function.
"""
if __name__ == '__main__':
# load data
results_dir = 'noiseless_increasing_order06_13_11h05'
# parameters for test (if not loading file)
max_n_diracs = 100
step_size = 5
n_diracs_vals = np.arange(start=max_n_diracs, stop=1, step=-step_size)
n_diracs_vals = np.insert(n_diracs_vals, 0, [500, 400, 300, 200])
n_trials = 50
n_jobs = 5
# load file available, otherwise run test
try:
npzfile = np.load(os.path.join(os.path.dirname(__file__), results_dir, "results.npz"))
n_diracs_vals = npzfile["n_diracs_vals"]
err_sig = npzfile["err_sig"]
err_loc = npzfile["err_loc"]
print("Loading data from %s..." % results_dir)
run_sweep = False
except:
run_sweep = True
print("No data available. Running test...")
print()
if run_sweep:
# constants (typical US values)
clk_verasonics = 62.5e6
n_cycles = 2.5
center_freq = clk_verasonics/12
samp_freq = clk_verasonics/3
speed_sound = 1540
bw = 2/3
bwr = -6
depth = 5e-2 # in meters
period = distance2time(depth, speed_sound)
# sweep
err_loc = np.zeros((len(n_diracs_vals), n_trials))
err_sig = np.zeros((len(n_diracs_vals), n_trials))
print("Number of pulses to sweep over : ", end="")
print(n_diracs_vals)
start_sweep = time.time()
for i, K in enumerate(n_diracs_vals):
print()
print("Num. of pulses : %d" % K)
n_diracs_time = time.time()
# critical sampling parameters
M = K
n_samples = 2*M+1
samp_bw = n_samples/period
Ts = 1/samp_bw
# frequencies within samples bandwidth (baseband)
freqs_fft = np.fft.fftfreq(n_samples, Ts)
increasing_order = np.argsort(freqs_fft)
freqs_fft = freqs_fft[increasing_order]
# pulse for equalizing
freqs = freqs_fft+center_freq
H_tot = total_freq_response(freqs, center_freq, bw, n_cycles, bwr)
# random trials
res = Parallel(n_jobs=n_jobs)(
delayed(process_fig2p6)(K, j, period, H_tot, freqs,
center_freq, bw, n_cycles, bwr, samp_freq)
for j in range(n_trials))
res = np.array(res)
err_loc[i,:] = res[:,0]
err_sig[i,:] = res[:,1]
avg_time = (time.time() - n_diracs_time)/n_trials
print("Average reconstruction time for %d dirac(s) : %f sec" % (K, avg_time))
""" Save """
time_stamp = datetime.datetime.now().strftime("%m_%d_%Hh%M")
results_dir = os.path.join(os.path.dirname(__file__), "noiseless_increasing_order%s" % time_stamp)
os.makedirs(results_dir)
np.savez(os.path.join(results_dir, "results"), n_diracs_vals=n_diracs_vals,
err_sig=err_sig, err_loc=err_loc)
print("Results saved to %s" % results_dir)
print()
print("TOTAL SIMULATION TIME : %f min" % ((time.time()-start_sweep)/60.) )
""" Visualize """
loc_err_per_ndiracs = np.mean(err_loc, axis=1)
sig_err_per_ndiracs = np.mean(err_sig, axis=1)
loc_std = np.std(err_loc, axis=1)
sig_std = np.std(err_sig, axis=1)
f, (ax1, ax2) = plt.subplots(2,1, sharex=True)
ax1.errorbar(n_diracs_vals, sig_err_per_ndiracs, sig_std, ecolor='r', marker='o')
ax1.set_ylabel("SRR [dB]")
ax1.axes.set_yticks(np.arange(0,max(sig_err_per_ndiracs+sig_std),50))
ax1.set_xscale("log", nonposx='clip')
ax1.set_ylim([0, max(sig_err_per_ndiracs+sig_std)])
ax1.grid()
ax2.errorbar(n_diracs_vals, loc_err_per_ndiracs, loc_std, ecolor='r', marker='o')
ax2.set_ylabel("$t_k$ [dB]")
ax2.grid()
ax2.axes.set_yticks(np.arange(0,max(loc_err_per_ndiracs+loc_std),50))
ax2.set_xscale("log", nonposx='clip')
ax2.set_xlim([min(n_diracs_vals), max(n_diracs_vals)])
ax2.set_ylim([0, max(loc_err_per_ndiracs+loc_std)])
ax2.set_xlabel("Num. of pulses [log scale]")
f.tight_layout()
fp = os.path.join(os.path.dirname(__file__), "figures", "_fig2p6.pdf")
plt.savefig(fp, dpi=300)
plt.show()
| [
"numpy.argsort",
"frius.distance2time",
"numpy.array",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.savefig",
"os.path.dirname",
"numpy.std",
"time.time",
"matplotlib.pyplot.show",
"numpy.insert",
"os.makedirs",
"numpy.fft.fftfreq",
"os.path.join",
"joblib.Parallel",
"datetime.date... | [((827, 881), 'numpy.arange', 'np.arange', ([], {'start': 'max_n_diracs', 'stop': '(1)', 'step': '(-step_size)'}), '(start=max_n_diracs, stop=1, step=-step_size)\n', (836, 881), True, 'import numpy as np\n'), ((902, 951), 'numpy.insert', 'np.insert', (['n_diracs_vals', '(0)', '[500, 400, 300, 200]'], {}), '(n_diracs_vals, 0, [500, 400, 300, 200])\n', (911, 951), True, 'import numpy as np\n'), ((3796, 3820), 'numpy.mean', 'np.mean', (['err_loc'], {'axis': '(1)'}), '(err_loc, axis=1)\n', (3803, 3820), True, 'import numpy as np\n'), ((3847, 3871), 'numpy.mean', 'np.mean', (['err_sig'], {'axis': '(1)'}), '(err_sig, axis=1)\n', (3854, 3871), True, 'import numpy as np\n'), ((3886, 3909), 'numpy.std', 'np.std', (['err_loc'], {'axis': '(1)'}), '(err_loc, axis=1)\n', (3892, 3909), True, 'import numpy as np\n'), ((3924, 3947), 'numpy.std', 'np.std', (['err_sig'], {'axis': '(1)'}), '(err_sig, axis=1)\n', (3930, 3947), True, 'import numpy as np\n'), ((3969, 4000), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (3981, 4000), True, 'import matplotlib.pyplot as plt\n'), ((4821, 4845), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fp'], {'dpi': '(300)'}), '(fp, dpi=300)\n', (4832, 4845), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4859, 4861), True, 'import matplotlib.pyplot as plt\n'), ((219, 244), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (234, 244), False, 'import os, datetime, time\n'), ((1749, 1782), 'frius.distance2time', 'distance2time', (['depth', 'speed_sound'], {}), '(depth, speed_sound)\n', (1762, 1782), False, 'from frius import total_freq_response, distance2time\n'), ((2028, 2039), 'time.time', 'time.time', ([], {}), '()\n', (2037, 2039), False, 'import os, datetime, time\n'), ((3440, 3464), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (3451, 3464), False, 'import os, datetime, time\n'), ((4764, 4789), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4779, 4789), False, 'import os, datetime, time\n'), ((2180, 2191), 'time.time', 'time.time', ([], {}), '()\n', (2189, 2191), False, 'import os, datetime, time\n'), ((2437, 2466), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['n_samples', 'Ts'], {}), '(n_samples, Ts)\n', (2451, 2466), True, 'import numpy as np\n'), ((2498, 2519), 'numpy.argsort', 'np.argsort', (['freqs_fft'], {}), '(freqs_fft)\n', (2508, 2519), True, 'import numpy as np\n'), ((2670, 2728), 'frius.total_freq_response', 'total_freq_response', (['freqs', 'center_freq', 'bw', 'n_cycles', 'bwr'], {}), '(freqs, center_freq, bw, n_cycles, bwr)\n', (2689, 2728), False, 'from frius import total_freq_response, distance2time\n'), ((2994, 3007), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (3002, 3007), True, 'import numpy as np\n'), ((3360, 3385), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3375, 3385), False, 'import os, datetime, time\n'), ((3482, 3518), 'os.path.join', 'os.path.join', (['results_dir', '"""results"""'], {}), "(results_dir, 'results')\n", (3494, 3518), False, 'import os, datetime, time\n'), ((1080, 1105), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1095, 1105), False, 'import os, datetime, time\n'), ((2776, 2799), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (2784, 2799), False, 'from joblib import Parallel, delayed\n'), ((3277, 3300), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3298, 3300), False, 'import os, datetime, time\n'), ((3106, 3117), 'time.time', 'time.time', ([], {}), '()\n', (3115, 3117), False, 'import os, datetime, time\n'), ((2817, 2840), 'joblib.delayed', 'delayed', (['process_fig2p6'], {}), '(process_fig2p6)\n', (2824, 2840), False, 'from joblib import Parallel, delayed\n'), ((3714, 3725), 'time.time', 'time.time', ([], {}), '()\n', (3723, 3725), False, 'import os, datetime, time\n')] |
r"""
.. _sec-normal:
Gaussian process change
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
This cost function detects changes in the mean and scale of a Gaussian time series.
Formally, for a signal :math:`\{y_t\}_t` on an interval :math:`I`,
.. math:: c(y_{I}) = |I| \log\det\widehat{\Sigma}_I
where :math:`\widehat{\Sigma}_I` is the empirical covariance matrix of the sub-signal :math:`\{y_t\}_{t\in I}`.
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create a signal.
.. code-block:: python
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n, dim = 500, 3 # number of samples, dimension
n_bkps, sigma = 3, 5 # number of change points, noise standart deviation
signal, bkps = rpt.pw_constant(n, dim, n_bkps, noise_std=sigma)
Then create a :class:`CostNormal` instance and print the cost of the sub-signal :code:`signal[50:150]`.
.. code-block:: python
c = rpt.costs.CostNormal().fit(signal)
print(c.error(50, 150))
You can also compute the sum of costs for a given list of change points.
.. code-block:: python
print(c.sum_of_costs(bkps))
print(c.sum_of_costs([10, 100, 200, 250, n]))
In order to use this cost class in a change point detection algorithm (inheriting from :class:`BaseEstimator`), either pass a :class:`CostNormal` instance (through the argument ``'custom_cost'``) or set :code:`model="normal"`.
.. code-block:: python
c = rpt.costs.CostNormal(); algo = rpt.Dynp(custom_cost=c)
# is equivalent to
algo = rpt.Dynp(model="normal")
Code explanation
----------------------------------------------------------------------------------------------------
.. autoclass:: ruptures.costs.CostNormal
:members:
:special-members: __init__
"""
import numpy as np
from numpy.linalg import slogdet
from ruptures.base import BaseCost
from ruptures.costs import NotEnoughPoints
class CostNormal(BaseCost):
"""Maximum Gaussian likelihood."""
model = "normal"
def __init__(self):
self.signal = None
self.min_size = 2
def fit(self, signal):
"""Set parameters of the instance.
Args:
signal (array): signal. Shape (n_samples,) or (n_samples, n_features)
Returns:
self
"""
if signal.ndim == 1:
self.signal = signal.reshape(-1, 1)
else:
self.signal = signal
return self
def error(self, start, end):
"""Return the approximation cost on the segment [start:end].
Args:
start (int): start of the segment
end (int): end of the segment
Returns:
float: segment cost
Raises:
NotEnoughPoints: when the segment is too short (less than ``'min_size'`` samples).
"""
if end - start < self.min_size:
raise NotEnoughPoints
sub = self.signal[start:end]
if self.signal.shape[1] > 1:
cov = np.cov(sub.T)
else:
cov = np.array([[sub.var()]])
_, val = slogdet(cov)
return val * (end - start)
| [
"numpy.linalg.slogdet",
"numpy.cov"
] | [((3339, 3351), 'numpy.linalg.slogdet', 'slogdet', (['cov'], {}), '(cov)\n', (3346, 3351), False, 'from numpy.linalg import slogdet\n'), ((3252, 3265), 'numpy.cov', 'np.cov', (['sub.T'], {}), '(sub.T)\n', (3258, 3265), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding=UTF-8
# BSD 2-Clause License
# Copyright (c) 2021, <NAME> (Beigesoft™)
# All rights reserved.
# See the LICENSE in the root source folder
#transfer NIST data to LIBSVM formatted train (900 samples) and test (the rest 100) files
#NIST data from http://www.cis.jhu.edu/~sachin/digit/digit.html 1000 28x28 digits (unsigned char 8bit): data0..data9
#required path to the data in command line sys.argv[1]
import sys, os
sys.path += [os.path.dirname(os.path.abspath (__file__)) + '/../..']
from BsLibMisc import *
import numpy as np
def prnUsage ():
print ('You must pass path to the NIST files 1000 28x28 digits (unsigned char 8bit): data0..data9!')
print ('from http://www.cis.jhu.edu/~sachin/digit/digit.html')
print ('Use: python dig1000tolibsvm.py [path_to_nist_files]')
ip = 0
pth = ''
for arg in sys.argv:
if ip == 1:
pth = arg
ip += 1
if pth == '' or pth == '-h':
prnUsage ()
exit (1)
digCnt = 1000
digSz = 28
smpCnt = digSz * digSz
trainCnt = 900
testCnt = digCnt - trainCnt
testOfst = trainCnt * smpCnt
ftrain = False
ftest = False
try:
for dig in range (10):
fnme = pth + "/data" + str (dig)
NUMd = np.fromfile (fnme, dtype=np.uint8)
if NUMd.shape[0] != digCnt * smpCnt:
print ('It must be 1000 uint8 28x28 samples in ', fnme)
raise
#just for visual control, print several samples:
print ("Samples #0,1,2,900,901,902 From file: ", fnme)
bsPrnImgTxt (NUMd, 0, digSz)
bsPrnImgTxt (NUMd, 1*smpCnt, digSz)
bsPrnImgTxt (NUMd, 2*smpCnt, digSz)
bsPrnImgTxt (NUMd, 900*smpCnt, digSz)
bsPrnImgTxt (NUMd, 901*smpCnt, digSz)
bsPrnImgTxt (NUMd, 902*smpCnt, digSz)
#there is 5 #900 that looks like 6
if ftrain == False:
ftrain = open (pth + '/nist'+str(digCnt)+'x'+str(digSz)+'x'+str(digSz), 'w')
for i in range (trainCnt):
ftrain.write (str (dig))
for j in range (smpCnt):
ftrain.write(' ' + str (j+1) + ':' + str (NUMd[i * smpCnt + j]))
ftrain.write('\n')
if ftest == False:
ftest = open (pth + '/nist'+str (digCnt)+'x'+str(digSz)+'x'+str(digSz) + 't', 'w')
for i in range (testCnt):
ftest.write (str (dig))
for j in range (smpCnt):
ftest.write(' ' + str (j+1) + ':' + str (NUMd[testOfst + i * smpCnt + j]))
ftest.write('\n')
except:
prnUsage ()
print (sys.exc_info ()[0])
if ftrain != False:
ftrain.close()
if ftest != False:
ftest.close()
raise
if ftrain != False:
ftrain.close()
if ftest != False:
ftest.close()
| [
"os.path.abspath",
"sys.exc_info",
"numpy.fromfile"
] | [((1175, 1208), 'numpy.fromfile', 'np.fromfile', (['fnme'], {'dtype': 'np.uint8'}), '(fnme, dtype=np.uint8)\n', (1186, 1208), True, 'import numpy as np\n'), ((478, 503), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (493, 503), False, 'import sys, os\n'), ((2367, 2381), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2379, 2381), False, 'import sys, os\n')] |
# Created by <NAME>, 30/08/2018
import os
from os import path
import numpy as np
import gym
from gym import GoalEnv
from gym import error, spaces
from gym.utils import seeding
import mujoco_py
from mujoco_py import load_model_from_path, MjSim, MjViewer
import cv2 as cv
import copy
class GoalEnvExt(GoalEnv):
def __init__(self, model_path, n_substeps, n_actions, horizon, image_size, use_image_goal,
use_visual_observation, with_goal,
reward_type, distance_threshold, distance_threshold_obs, use_true_reward,
initial_qpos=None, default_camera_name='external_camera_0', use_auxiliary_loss=False,
noisy_reward_fp=None, noisy_reward_fn=None, state_estimation_reward_noise=0., **kwargs):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "./assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.model = load_model_from_path(fullpath)
self.sim = MjSim(self.model, nsubsteps=n_substeps)
self.data = self.sim.data
self.viewer = None
self.np_random = None
self.seed()
self._env_setup(initial_qpos=initial_qpos)
self.initial_state = copy.deepcopy(self.sim.get_state())
self.horizon = horizon
self.image_size = image_size
self.use_image_goal = use_image_goal
self.use_visual_observation = use_visual_observation
self.with_goal = with_goal
self.reward_type = reward_type
self.distance_threshold = distance_threshold
self.distance_threshold_obs = distance_threshold_obs
self.use_true_reward = use_true_reward
self.state_estimation_reward_noise = state_estimation_reward_noise
self._max_episode_steps = horizon
self.time_step = 0
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
self.goal_state = self.goal_observation = None
if noisy_reward_fn is None and noisy_reward_fp is None:
if (not use_visual_observation and not use_true_reward) or (
use_visual_observation and distance_threshold_obs == 0. and not use_true_reward):
self.compute_reward = self.compute_reward_zero
else:
self.compute_reward = self.compute_reward_noisy
self.noisy_reward_fp = noisy_reward_fp
self.noisy_reward_fn = noisy_reward_fn
self.default_camera_name = default_camera_name
self._set_camera()
self.sim.render(camera_name=default_camera_name, width=self.image_size, height=self.image_size, depth=False,
mode='offscreen')
self.use_auxiliary_loss = use_auxiliary_loss
self.action_space = spaces.Box(-1., 1., shape=(n_actions,), dtype='float32')
obs = self.reset()
self.observation_space = spaces.Dict(dict(
desired_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),
observation=spaces.Box(-np.inf, np.inf, shape=obs['observation'].shape, dtype='float32'),
))
self.goal_dim = np.prod(obs['achieved_goal'].shape)
self.goal_state_dim = np.prod(self.goal_state.shape)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
old_state = self.sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self.sim.set_state(new_state)
self.sim.forward()
def compute_reward(self, achieved_goal, desired_goal, info):
# Compute distance between goal and the achieved goal. (only one of them)
if self.use_true_reward:
achieved_goal = info['ag_state']
desired_goal = info['g_state']
achieved_goal = achieved_goal.reshape([-1, self.goal_state_dim])
desired_goal = desired_goal.reshape([-1, self.goal_state_dim])
d_threshold = self.distance_threshold
else:
achieved_goal = achieved_goal.reshape([-1, self.goal_dim])
desired_goal = desired_goal.reshape([-1, self.goal_dim])
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
if self.reward_type == 'sparse':
return -(d > d_threshold).astype(np.float32)
else:
return -d
def compute_reward_zero(self, achieved_goal, desired_goal, info):
if self.use_true_reward:
achieved_goal = info['ag_state']
desired_goal = info['g_state']
achieved_goal = achieved_goal.reshape([-1, self.goal_state_dim])
desired_goal = desired_goal.reshape([-1, self.goal_state_dim])
else:
achieved_goal = achieved_goal.reshape([-1, self.goal_dim])
desired_goal = desired_goal.reshape([-1, self.goal_dim])
assert achieved_goal.shape == desired_goal.shape
return np.alltrue(np.equal(achieved_goal, desired_goal), axis=-1) - 1.
# Start code to meet the RIG framework interface
def compute_rewards(self, actions, obs):
''' @brief: both 'action' and 'obs' are a batch of data. (The batch size could be 1)
'''
# Considering the obs is a dictionary, we will not check batch size here (I think I can't)
rewards = []
info = self.get_current_info()
for i in range(actions.shape[0]):
act = actions[i]
obser = {
k: v[i] for k, v in obs.items()
}
rewards.append(self.compute_reward(obser['achieved_goal'], obser['desired_goal'], info))
return np.array(rewards)
# End code to meet the RIG framework interface
def compute_reward_noisy(self, achieved_goal, desired_goal, info):
achieved_goal = info['ag_state']
desired_goal = info['g_state']
achieved_goal = achieved_goal.reshape([-1, self.goal_state_dim])
desired_goal = desired_goal.reshape([-1, self.goal_state_dim])
d_threshold = self.distance_threshold
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
bool_rewards = d <= d_threshold
if self.noisy_reward_fp is not None:
neg_idx = np.where(bool_rewards == False)[0]
fp_idx = np.where(np.random.random(size=len(neg_idx)) < self.noisy_reward_fp)[0]
bool_rewards[neg_idx[fp_idx]] = True
return bool_rewards.astype(np.float32) - 1.
elif self.noisy_reward_fn is not None:
pos_idx = np.where(bool_rewards == True)[0]
fn_idx = np.where(np.random.random(size=len(pos_idx)) < self.noisy_reward_fn)[0]
bool_rewards[pos_idx[fn_idx]] = False
return bool_rewards.astype(np.float32) - 1.
else:
raise NotImplementedError
# This is for test purpose only
# def compute_reward_noisy(self, achieved_goal, desired_goal, info, bool_rewards=None):
# if bool_rewards is None:
# achieved_goal = info['ag_state']
# desired_goal = info['g_state']
# achieved_goal = achieved_goal.reshape([-1, self.goal_state_dim])
# desired_goal = desired_goal.reshape([-1, self.goal_state_dim])
# d_threshold = self.distance_threshold
# d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
# bool_rewards = d <= d_threshold
# else:
# bool_rewards = bool_rewards.copy()
# if self.noisy_reward_fp is not None:
# neg_idx = np.where(bool_rewards == False)[0]
# # print('neg_idx', neg_idx)
# fp_idx = np.where(np.random.random(size=len(neg_idx)) < self.noisy_reward_fp)[0]
# # print('fp_idx', fp_idx)
# bool_rewards[neg_idx[fp_idx]] = True
# # print('bool_noisy_rewards', bool_rewards)
# return bool_rewards.astype(np.float32) - 1.
# elif self.noisy_reward_fn is not None:
# pos_idx = np.where(bool_rewards == True)[0]
# fn_idx = np.where(np.random.random(size=len(pos_idx)) < self.noisy_reward_fn)[0]
# bool_rewards[pos_idx[fn_idx]] = False
# return bool_rewards.astype(np.float32) - 1.
# else:
# raise NotImplementedError
# methods to override:
# ----------------------------
def _reset_sim(self):
"""Resets a simulation and indicates whether or not it was successful.
If a reset was unsuccessful (e.g. if a randomized state caused an error in the
simulation), this method should indicate such a failure by returning False.
In such a case, this method will be called again to attempt a the reset again.
"""
raise NotImplementedError
def _get_obs(self):
"""
Get observation
"""
raise NotImplementedError
def _set_action(self, ctrl):
"""
Do simulation
"""
raise NotImplementedError
def _step_callback(self):
"""A custom callback that is called after stepping the simulation. Can be used
to enforce additional constraints on the simulation state.
"""
pass
def _set_camera(self):
pass
def get_current_info(self):
"""
:return: The true current state, 'ag_state', and goal state, 'g_state'
"""
raise NotImplementedError
def _viewer_setup(self):
"""
This method is called when the viewer is initialized and after every reset
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
def _render_callback(self):
"""A custom callback that is called before rendering. Can be used
to implement custom visualizations.
"""
pass
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
pass
def set_hidden_goal(self):
"""
Hide the goal position from image observation
"""
pass
def get_image_obs(self, depth=True, hide_overlay=True, camera_id=-1):
assert False
return
def _sample_goal_state(self):
"""Samples a new goal in state space and returns it.
"""
return None
# Core functions framework
# -----------------------------
def reset(self):
'''
Attempt to reset the simulator. Since we randomize initial conditions, it
is possible to get into a state with numerical issues (e.g. due to penetration or
Gimbel lock) or we may not achieve an initial condition (e.g. an object is within the hand).
In this case, we just keep randomizing until we eventually achieve a valid initial
configuration.
'''
self.time_step = 0
if not self.with_goal:
self.set_hidden_goal()
goal_state = self._sample_goal_state()
if goal_state is None:
self.goal_state = None
else:
self.goal_state = goal_state.copy()
did_reset_sim = False
while not did_reset_sim:
did_reset_sim = self._reset_sim()
return self._get_obs()
def step(self, action):
action = np.clip(action, self.action_space.low, self.action_space.high)
if self.use_auxiliary_loss:
assert self.use_visual_observation
self._set_action(action)
self.sim.step()
self._step_callback()
obs = self._get_obs()
# transformed_img, transformation = self.random_image_transformation(next_frame)
# TODO for now, comment out all other auxiliary losses except action prediction
aug_info = {
'action_taken': action,
# 'transformed_frame': transformed_img.flatten(),
# 'transformation': transformation
}
else:
self._set_action(action)
self.sim.step()
self._step_callback()
obs = self._get_obs()
aug_info = {}
state_info = self.get_current_info()
info = {**aug_info, **state_info}
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
self.time_step += 1
# Episode ends only when the horizon is reached
done = False
if self.time_step >= self.horizon:
done = True
return obs, reward, done, info
def get_initial_info(self):
state_info = self.get_current_info()
if self.use_auxiliary_loss:
aug_info = {
'action_taken': np.zeros(self.action_space.shape),
# 'transformed_frame': transformed_img.flatten(),
# 'transformation': transformation
}
return {**aug_info, **state_info}
else:
return state_info
def _get_viewer(self):
if self.viewer is None:
self.viewer = MjViewer(self.sim)
self._viewer_setup()
return self.viewer
def render(self, mode='human', image_size=None, depth=True, camera_name=None):
self._render_callback()
if camera_name is None:
camera_name = self.default_camera_name
if image_size is None:
image_size = self.image_size
if mode == 'rgb_array':
# could be a bug code
# self.sim.render_contexts[0]._set_mujoco_buffers()
if depth:
image, depth = self.sim.render(camera_name=camera_name, width=image_size, height=image_size, depth=True)
# id = self.sim.model.camera_name2id('external_camera_0')
# print(self.sim.model.cam_fovy)
rgbd_data = np.dstack([image, depth])
return rgbd_data[::-1, :, :]
else:
image = self.sim.render(camera_name=camera_name, width=image_size, height=image_size, depth=False)
return image[::-1, :, :]
elif mode == 'human':
return self._get_viewer().render()
# Auxiliary Reward Methods
# ----------------------------
@staticmethod
def random_image_transformation(image, max_translation=10, max_angle=30):
angle = np.random.uniform(-max_angle, max_angle)
translation_x = np.random.uniform(-max_translation, max_translation)
translation_y = np.random.uniform(-max_translation, max_translation)
width = image.shape[1]
height = image.shape[0]
M1 = cv.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
M2 = np.float32([[1, 0, translation_x], [0, 1, translation_y]])
transformed_img = cv.warpAffine(image, M1 + M2, (image.shape[1], image.shape[0]))
return transformed_img, np.asarray([angle, translation_x, translation_y])
# Helper Functions
# ----------------------------
def _get_info_state(self, achieved_goal, desired_goal):
# Given g, ag in state space and return the distance and success
achieved_goal = achieved_goal.reshape([-1, self.goal_state_dim])
desired_goal = desired_goal.reshape([-1, self.goal_state_dim])
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
return d, (d <= self.distance_threshold).astype(np.float32)
def _get_info_obs(self, achieved_goal_obs, desired_goal_obs):
# Given g, ag in state space and return the distance and success
achieved_goal_obs = achieved_goal_obs.reshape([-1, self.goal_dim])
desired_goal_obs = desired_goal_obs.reshape([-1, self.goal_dim])
d = np.linalg.norm(achieved_goal_obs - desired_goal_obs, axis=-1)
return d, (d <= self.distance_threshold_obs).astype(np.float32)
def set_camera_location(self, camera_name=None, pos=[0.0, 0.0, 0.0]):
id = self.sim.model.camera_name2id(camera_name)
self.sim.model.cam_pos[id] = pos
def set_camera_fov(self, camera_name=None, fovy=50.0):
id = self.sim.model.camera_name2id(camera_name)
self.sim.model.cam_fovy[id] = fovy
def set_camera_orientation(self, camera_name=None, orientation_quat=[0, 0, 0, 0]):
id = self.sim.model.camera_name2id(camera_name)
self.sim.model.cam_quat[id] = orientation_quat
# Start Adding interface for multiworld environment collection
def initialize_camera(self, init_fctn):
# sim = self.sim
# # viewer = mujoco_py.MjRenderContextOffscreen(sim, device_id=self.device_id)
# viewer = mujoco_py.MjViewer(sim)
# init_fctn(viewer.cam)
# sim.add_render_context(viewer)
pass
def _get_env_state(self):
''' According to multiworld, there is a base class.
But this is roughly already the base class along the inheritance chain.
I put the implementation here.
'''
joint_state = self.sim.get_state()
mocap_state = self.data.mocap_pos, self.data.mocap_quat
state = joint_state, mocap_state
return copy.deepcopy(state)
def get_env_state(self):
base_state = self._get_env_state()
goal = self.goal_state.copy()
return base_state, goal
def _set_env_state(self, state):
raise NotImplementedError
def set_env_state(self, state):
base_state, goal = state
self._set_env_state(base_state) # from the child class
self.goal_state = goal
self._reset_sim()
def sample_goals(self, num_batches):
''' Return a dict with each batch of goals.
The dict includes keys: 'desired_goal', 'state_desired_goal'
'''
desired_goal = []
state_desired_goal = []
for _ in range(num_batches):
goal = self._sample_goal_state()
desired_goal.append(goal)
state_desired_goal.append(goal)
# form it as a dict and return
return dict(
desired_goal= desired_goal,
state_desired_goal= state_desired_goal,
)
# End Adding interface for multiworld environment collection
| [
"numpy.clip",
"numpy.prod",
"numpy.equal",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"mujoco_py.MjViewer",
"gym.utils.seeding.np_random",
"mujoco_py.MjSim",
"os.path.exists",
"mujoco_py.MjSimState",
"mujoco_py.load_model_from_path",
"numpy.where",
"numpy.asarray",
"cv2.warpAff... | [((1139, 1169), 'mujoco_py.load_model_from_path', 'load_model_from_path', (['fullpath'], {}), '(fullpath)\n', (1159, 1169), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((1189, 1228), 'mujoco_py.MjSim', 'MjSim', (['self.model'], {'nsubsteps': 'n_substeps'}), '(self.model, nsubsteps=n_substeps)\n', (1194, 1228), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((2985, 3043), 'gym.spaces.Box', 'spaces.Box', (['(-1.0)', '(1.0)'], {'shape': '(n_actions,)', 'dtype': '"""float32"""'}), "(-1.0, 1.0, shape=(n_actions,), dtype='float32')\n", (2995, 3043), False, 'from gym import error, spaces\n'), ((3469, 3504), 'numpy.prod', 'np.prod', (["obs['achieved_goal'].shape"], {}), "(obs['achieved_goal'].shape)\n", (3476, 3504), True, 'import numpy as np\n'), ((3535, 3565), 'numpy.prod', 'np.prod', (['self.goal_state.shape'], {}), '(self.goal_state.shape)\n', (3542, 3565), True, 'import numpy as np\n'), ((3629, 3652), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3646, 3652), False, 'from gym.utils import seeding\n'), ((3855, 3944), 'mujoco_py.MjSimState', 'mujoco_py.MjSimState', (['old_state.time', 'qpos', 'qvel', 'old_state.act', 'old_state.udd_state'], {}), '(old_state.time, qpos, qvel, old_state.act, old_state.\n udd_state)\n', (3875, 3944), False, 'import mujoco_py\n'), ((4684, 4737), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal - desired_goal)'], {'axis': '(-1)'}), '(achieved_goal - desired_goal, axis=-1)\n', (4698, 4737), True, 'import numpy as np\n'), ((6138, 6155), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (6146, 6155), True, 'import numpy as np\n'), ((6559, 6612), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal - desired_goal)'], {'axis': '(-1)'}), '(achieved_goal - desired_goal, axis=-1)\n', (6573, 6612), True, 'import numpy as np\n'), ((11849, 11911), 'numpy.clip', 'np.clip', (['action', 'self.action_space.low', 'self.action_space.high'], {}), '(action, self.action_space.low, self.action_space.high)\n', (11856, 11911), True, 'import numpy as np\n'), ((14856, 14896), 'numpy.random.uniform', 'np.random.uniform', (['(-max_angle)', 'max_angle'], {}), '(-max_angle, max_angle)\n', (14873, 14896), True, 'import numpy as np\n'), ((14921, 14973), 'numpy.random.uniform', 'np.random.uniform', (['(-max_translation)', 'max_translation'], {}), '(-max_translation, max_translation)\n', (14938, 14973), True, 'import numpy as np\n'), ((14998, 15050), 'numpy.random.uniform', 'np.random.uniform', (['(-max_translation)', 'max_translation'], {}), '(-max_translation, max_translation)\n', (15015, 15050), True, 'import numpy as np\n'), ((15128, 15187), 'cv2.getRotationMatrix2D', 'cv.getRotationMatrix2D', (['(width / 2, height / 2)', 'angle', '(1.0)'], {}), '((width / 2, height / 2), angle, 1.0)\n', (15150, 15187), True, 'import cv2 as cv\n'), ((15202, 15260), 'numpy.float32', 'np.float32', (['[[1, 0, translation_x], [0, 1, translation_y]]'], {}), '([[1, 0, translation_x], [0, 1, translation_y]])\n', (15212, 15260), True, 'import numpy as np\n'), ((15288, 15351), 'cv2.warpAffine', 'cv.warpAffine', (['image', '(M1 + M2)', '(image.shape[1], image.shape[0])'], {}), '(image, M1 + M2, (image.shape[1], image.shape[0]))\n', (15301, 15351), True, 'import cv2 as cv\n'), ((15782, 15835), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal - desired_goal)'], {'axis': '(-1)'}), '(achieved_goal - desired_goal, axis=-1)\n', (15796, 15835), True, 'import numpy as np\n'), ((16204, 16265), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal_obs - desired_goal_obs)'], {'axis': '(-1)'}), '(achieved_goal_obs - desired_goal_obs, axis=-1)\n', (16218, 16265), True, 'import numpy as np\n'), ((17612, 17632), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (17625, 17632), False, 'import copy\n'), ((1032, 1053), 'os.path.exists', 'path.exists', (['fullpath'], {}), '(fullpath)\n', (1043, 1053), False, 'from os import path\n'), ((13581, 13599), 'mujoco_py.MjViewer', 'MjViewer', (['self.sim'], {}), '(self.sim)\n', (13589, 13599), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer\n'), ((15384, 15433), 'numpy.asarray', 'np.asarray', (['[angle, translation_x, translation_y]'], {}), '([angle, translation_x, translation_y])\n', (15394, 15433), True, 'import numpy as np\n'), ((966, 991), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (981, 991), False, 'import os\n'), ((5453, 5490), 'numpy.equal', 'np.equal', (['achieved_goal', 'desired_goal'], {}), '(achieved_goal, desired_goal)\n', (5461, 5490), True, 'import numpy as np\n'), ((6720, 6751), 'numpy.where', 'np.where', (['(bool_rewards == False)'], {}), '(bool_rewards == False)\n', (6728, 6751), True, 'import numpy as np\n'), ((13239, 13272), 'numpy.zeros', 'np.zeros', (['self.action_space.shape'], {}), '(self.action_space.shape)\n', (13247, 13272), True, 'import numpy as np\n'), ((14355, 14380), 'numpy.dstack', 'np.dstack', (['[image, depth]'], {}), '([image, depth])\n', (14364, 14380), True, 'import numpy as np\n'), ((3146, 3224), 'gym.spaces.Box', 'spaces.Box', (['(-np.inf)', 'np.inf'], {'shape': "obs['achieved_goal'].shape", 'dtype': '"""float32"""'}), "(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32')\n", (3156, 3224), False, 'from gym import error, spaces\n'), ((3252, 3330), 'gym.spaces.Box', 'spaces.Box', (['(-np.inf)', 'np.inf'], {'shape': "obs['achieved_goal'].shape", 'dtype': '"""float32"""'}), "(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32')\n", (3262, 3330), False, 'from gym import error, spaces\n'), ((3356, 3432), 'gym.spaces.Box', 'spaces.Box', (['(-np.inf)', 'np.inf'], {'shape': "obs['observation'].shape", 'dtype': '"""float32"""'}), "(-np.inf, np.inf, shape=obs['observation'].shape, dtype='float32')\n", (3366, 3432), False, 'from gym import error, spaces\n'), ((7022, 7052), 'numpy.where', 'np.where', (['(bool_rewards == True)'], {}), '(bool_rewards == True)\n', (7030, 7052), True, 'import numpy as np\n')] |
'''A wrapper class for optimizer '''
import numpy as np
class ScheduledOptim(object):
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, d_model, n_warmup_steps):
self.optimizer = optimizer
self.d_model = d_model
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
def step(self):
"Step by the inner optimizer"
self.optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self.optimizer.zero_grad()
def update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_current_steps += 1
new_lr = np.power(self.d_model, -0.5) * np.min([
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
| [
"numpy.power"
] | [((723, 751), 'numpy.power', 'np.power', (['self.d_model', '(-0.5)'], {}), '(self.d_model, -0.5)\n', (731, 751), True, 'import numpy as np\n'), ((776, 812), 'numpy.power', 'np.power', (['self.n_current_steps', '(-0.5)'], {}), '(self.n_current_steps, -0.5)\n', (784, 812), True, 'import numpy as np\n'), ((827, 862), 'numpy.power', 'np.power', (['self.n_warmup_steps', '(-1.5)'], {}), '(self.n_warmup_steps, -1.5)\n', (835, 862), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import sys
import pickle
from typing import List
import numpy as np
from jina.executors.devices import TorchDevice
from jina.excepts import PretrainedModelFileDoesNotExist
from jina.executors.decorators import batching_multi_input, as_ndarray
from jina.executors.encoders.multimodal import BaseMultiModalEncoder
sys.path.append(".")
from img_text_composition_models import TIRG
class TirgMultiModalEncoder(TorchDevice, BaseMultiModalEncoder):
def __init__(self, model_path: str = 'checkpoint.pth',
texts_path: str = 'texts.pkl',
positional_modality: List[str] = ['image', 'text'],
channel_axis: int = -1,
*args, **kwargs):
"""
:param model_path: the path where the model is stored.
"""
super().__init__(*args, **kwargs)
self.model_path = model_path
self.texts_path = texts_path
self.positional_modality = positional_modality
self.channel_axis = channel_axis
# axis 0 is the batch
self._default_channel_axis = 1
def post_init(self):
super().post_init()
import torch
if self.model_path and os.path.exists(self.model_path):
with open(self.texts_path, 'rb') as fp:
texts = pickle.load(fp)
self.model = TIRG(texts, 512)
model_sd = torch.load(self.model_path, map_location=torch.device('cpu'))
self.model.load_state_dict(model_sd['model_state_dict'])
self.model.eval()
self.to_device(self.model)
else:
raise PretrainedModelFileDoesNotExist(f'model {self.model_path} does not exist')
def _get_features(self, data):
import torch
visual_data = data[(self.positional_modality.index('image'))]
if self.channel_axis != self._default_channel_axis:
visual_data = np.moveaxis(visual_data, self.channel_axis, self._default_channel_axis)
textual_data = data[(self.positional_modality.index('text'))]
visual_data = torch.from_numpy(np.stack(visual_data)).float()
textual_data = np.stack(textual_data).tolist()
if self.on_gpu:
visual_data = visual_data.cuda()
textual_data = textual_data.cuda()
img_features = self.model.extract_img_feature(visual_data)
text_features = self.model.extract_text_feature(textual_data)
return self.model.compose_img_text_features(img_features, text_features)
@batching_multi_input
@as_ndarray
def encode(self, *data: 'np.ndarray', **kwargs) -> 'np.ndarray':
feature = self._get_features(data).detach()
if self.on_gpu:
feature = feature.cpu()
feature = feature.numpy()
return feature | [
"os.path.exists",
"pickle.load",
"numpy.stack",
"numpy.moveaxis",
"jina.excepts.PretrainedModelFileDoesNotExist",
"img_text_composition_models.TIRG",
"sys.path.append",
"torch.device"
] | [((428, 448), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (443, 448), False, 'import sys\n'), ((1288, 1319), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (1302, 1319), False, 'import os\n'), ((1438, 1454), 'img_text_composition_models.TIRG', 'TIRG', (['texts', '(512)'], {}), '(texts, 512)\n', (1442, 1454), False, 'from img_text_composition_models import TIRG\n'), ((1710, 1784), 'jina.excepts.PretrainedModelFileDoesNotExist', 'PretrainedModelFileDoesNotExist', (['f"""model {self.model_path} does not exist"""'], {}), "(f'model {self.model_path} does not exist')\n", (1741, 1784), False, 'from jina.excepts import PretrainedModelFileDoesNotExist\n'), ((1998, 2069), 'numpy.moveaxis', 'np.moveaxis', (['visual_data', 'self.channel_axis', 'self._default_channel_axis'], {}), '(visual_data, self.channel_axis, self._default_channel_axis)\n', (2009, 2069), True, 'import numpy as np\n'), ((1397, 1412), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1408, 1412), False, 'import pickle\n'), ((2234, 2256), 'numpy.stack', 'np.stack', (['textual_data'], {}), '(textual_data)\n', (2242, 2256), True, 'import numpy as np\n'), ((1519, 1538), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1531, 1538), False, 'import torch\n'), ((2180, 2201), 'numpy.stack', 'np.stack', (['visual_data'], {}), '(visual_data)\n', (2188, 2201), True, 'import numpy as np\n')] |
import sys
sys.path.append('./')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import multiprocessing as multi
import optuna
import changefinder
import bocpd
import dmdl.sdmdl as sdmdl
import dmdl.hsdmdl2 as hsdmdl2
import tsmdl.aw2s_mdl as aw2s_mdl
import utils.sdmdl_nml as sdmdl_nml
import utils.hsdmdl2_nml as hsdmdl2_nml
from multiprocessing import Pool
from functools import partial
from copy import deepcopy
from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score
def _calc_metrics(idx_data, dataset, changepoints, tolerance_delay, threshold, retrospective): # calculate the metrics
_retrospective = deepcopy(retrospective)
scores = _retrospective.calc_scores(dataset[idx_data])
F1_score, precision, recall = calc_F1_score(
scores, changepoints, tolerance_delay, threshold)
return F1_score, precision, recall
# obtain the optimal threshold
def calc_opt_threshold(train, changepoints, tolerance_delay, retrospective):
_retrospective = deepcopy(retrospective)
scores = _retrospective.calc_scores(train)
_, _, _, opt_threshold = calc_F1_score(
scores, changepoints, tolerance_delay)
return opt_threshold
def _objective_CF(trial, train, changepoints, tolerance_delay): # ChangeFinder
# hyperparameters
r = trial.suggest_uniform('r', 0.01, 0.99)
order = trial.suggest_int('order', 1, 20)
smooth = trial.suggest_int('smooth', 3, 20)
retrospective = changefinder.Retrospective(r=r, order=order, smooth=smooth)
scores = retrospective.calc_scores(train)
F1_score, _, _, _ = calc_F1_score(scores, changepoints, tolerance_delay)
return -F1_score
def conduct_CF(n_trials, n_samples, dataset, changepoints, tolerance_delay): # ChangeFinder
# hyperparameter tuning
objective_CF = partial(_objective_CF, train=dataset[0],
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_CF, n_trials=n_trials, n_jobs=-1)
opt_r = study.best_params['r']
opt_order = study.best_params['order']
opt_smooth = study.best_params['smooth']
# optimal threshold
retrospective = changefinder.Retrospective(
r=opt_r, order=opt_order, smooth=opt_smooth)
opt_threshold = calc_opt_threshold(train=dataset[0], changepoints=changepoints,
tolerance_delay=tolerance_delay, retrospective=retrospective)
# calculate metrics
calc_metrics = partial(_calc_metrics, dataset=dataset, changepoints=changepoints,
tolerance_delay=tolerance_delay, threshold=opt_threshold, retrospective=retrospective)
p = Pool(multi.cpu_count() - 1)
args = list(range(1, n_samples))
res = np.array(p.map(calc_metrics, args))
p.close()
# result
print("F1 score: ", np.mean(res[:, 0]), "±", np.std(res[:, 0]))
print("precision: ", np.mean(res[:, 1]), "±", np.std(res[:, 1]))
print("recall: ", np.mean(res[:, 2]), "±", np.std(res[:, 2]))
row = pd.DataFrame({"method": ["ChangeFinder"], "F1_score_mean": np.mean(res[:, 0]), "F1_score_std": np.std(res[:, 0]),
"precision_mean": np.mean(res[:, 1]), "precision_std": np.std(res[:, 1]),
"recall_mean": np.mean(res[:, 2]), "recall_std": np.std(res[:, 2])})
return row
def _objective_BOCPD(trial, train, changepoints, tolerance_delay): # BOCPD
lam = trial.suggest_int('lam', 2, 1000)
alpha = trial.suggest_uniform('alpha', 0.01, 10)
beta = trial.suggest_uniform('beta', 0.01, 10)
kappa = trial.suggest_uniform('kappa', 0.01, 10)
mu = trial.suggest_uniform('mu', 0.01, 10)
h = partial(bocpd.constant_hazard, lam)
lik = bocpd.StudentT(alpha, beta, kappa, mu)
retrospective = bocpd.Retrospective(hazard_func=h, likelihood_func=lik)
scores = retrospective.calc_scores(train)
F1_score, _, _, _ = calc_F1_score(
scores, changepoints, tolerance_delay)
return -F1_score
def conduct_BOCPD(n_trials, n_samples, dataset, changepoints, tolerance_delay): # BOCPD
# hyperparameter tuning
objective_BOCPD = partial(_objective_BOCPD, train=dataset[0],
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_BOCPD, n_trials=n_trials, n_jobs=-1)
opt_lam = study.best_params['lam']
opt_alpha = study.best_params['alpha']
opt_beta = study.best_params['beta']
opt_kappa = study.best_params['kappa']
opt_mu = study.best_params['mu']
# optimal threshold
h = partial(bocpd.constant_hazard, opt_lam)
lik = bocpd.StudentT(opt_alpha, opt_beta, opt_kappa, opt_mu)
retrospective = bocpd.Retrospective(hazard_func=h, likelihood_func=lik)
opt_threshold = calc_opt_threshold(train=dataset[0], changepoints=changepoints,
tolerance_delay=tolerance_delay, retrospective=retrospective)
# calculate metrics
calc_metrics = partial(_calc_metrics, dataset=dataset, changepoints=changepoints,
tolerance_delay=tolerance_delay, threshold=opt_threshold, retrospective=retrospective)
p = Pool(multi.cpu_count() - 1)
args = list(range(1, n_samples))
res = np.array(p.map(calc_metrics, args))
p.close()
# result
print("F1 score: ", np.mean(res[:, 0]), "±", np.std(res[:, 0]))
print("precision: ", np.mean(res[:, 1]), "±", np.std(res[:, 1]))
print("recall: ", np.mean(res[:, 2]), "±", np.std(res[:, 2]))
row = pd.DataFrame({"method": ["BOCPD"], "F1_score_mean": np.mean(res[:, 0]), "F1_score_std": np.std(res[:, 0]),
"precision_mean": np.mean(res[:, 1]), "precision_std": np.std(res[:, 1]),
"recall_mean": np.mean(res[:, 2]), "recall_std": np.std(res[:, 2])})
return row
# Hierarchical
def _objective_Hierarchical(trial, train, changepoints, tolerance_delay, order):
nml_gaussian = partial(hsdmdl2_nml.nml_gaussian)
min_datapoints = 5
# delta_0だけはいかなる場合でもチューニングしなければならない
delta_0 = trial.suggest_uniform('delta_0', 0.000001, 10.00)
if order == 1:
delta_1 = trial.suggest_uniform('delta_1', 0.000001, 1.00)
else:
delta_1 = 0.05
if order == 2:
delta_2 = trial.suggest_uniform('delta_2', 0.000001, 1.00)
else:
delta_2 = 0.05
retrospective = hsdmdl2.Retrospective(encoding_func=nml_gaussian, d=2, M=5, min_datapoints=min_datapoints, delta_0=delta_0,
delta_1=delta_1, delta_2=delta_2, how_to_drop='all', order=order, reliability=True)
alarms = retrospective.make_alarms(train)
scores = np.zeros(len(train))
scores[alarms] = 1
F1_score, _, _ = calc_F1_score(
scores, changepoints, tolerance_delay, tuned_threshold=0.5)
return -F1_score
# calculate the metrics
def _calc_Hierarchical_metrics(idx_data, dataset, changepoints, tolerance_delay, threshold, retrospective, order):
_retrospective = deepcopy(retrospective)
alarms = _retrospective.make_alarms(dataset[idx_data])
scores = np.zeros(len(dataset[idx_data]))
scores[alarms] = 1
F1_score, precision, recall = calc_F1_score(
scores, changepoints, tolerance_delay, threshold)
return F1_score, precision, recall
def conduct_Hierarchical(n_trials, n_samples, dataset, changepoints, tolerance_delay, order): # Hierarchical
# hyperparameter tuning
objective_Hierarchical = partial(_objective_Hierarchical, train=dataset[0],
changepoints=changepoints, tolerance_delay=tolerance_delay, order=order)
study = optuna.create_study()
study.optimize(objective_Hierarchical, n_trials=n_trials, n_jobs=-1)
opt_delta_0 = study.best_params['delta_0']
if order == 1:
opt_delta_1 = study.best_params['delta_1']
else:
opt_delta_1 = 0.05
if order == 2:
opt_delta_2 = study.best_params['delta_2']
else:
opt_delta_2 = 0.05
min_datapoints = 5
nml_gaussian = partial(hsdmdl2_nml.nml_gaussian)
retrospective = hsdmdl2.Retrospective(encoding_func=nml_gaussian, d=2, M=5, min_datapoints=min_datapoints, delta_0=opt_delta_0,
delta_1=opt_delta_1, delta_2=opt_delta_2, how_to_drop='all', order=True, reliability=True)
# calculate metrics
calc_metrics = partial(_calc_Hierarchical_metrics, dataset=dataset, changepoints=changepoints,
tolerance_delay=tolerance_delay, threshold=0.5, retrospective=retrospective, order=order)
p = Pool(multi.cpu_count() - 1)
args = list(range(1, n_samples))
res = np.array(p.map(calc_metrics, args))
p.close()
# result
print("F1 score: ", np.mean(res[:, 0]), "±", np.std(res[:, 0]))
print("precision: ", np.mean(res[:, 1]), "±", np.std(res[:, 1]))
print("recall: ", np.mean(res[:, 2]), "±", np.std(res[:, 2]))
method_name = "Hierarchical"
if order == 0:
method_name += "_0th"
elif order == 1:
method_name += "_1st"
else:
method_name += "_2nd"
row = pd.DataFrame({"method": [method_name], "F1_score_mean": np.mean(res[:, 0]), "F1_score_std": np.std(res[:, 0]),
"precision_mean": np.mean(res[:, 1]), "precision_std": np.std(res[:, 1]),
"recall_mean": np.mean(res[:, 2]), "recall_std": np.std(res[:, 2])})
return row
def _objective_AW2S_MDL(trial, train, changepoints, tolerance_delay):
window_size = trial.suggest_int('window_size', 10, 500)
sigma_given = trial.suggest_uniform('sigma_given', 0.1, 2)
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=1e8,
div_min=1e-8, div_max=1e8)
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=1e8,
div_min=1e-8, div_max=1e8)
retrospective_first = sdmdl.Retrospective(h=window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=0)
lnml_gaussian = partial(hsdmdl2_nml.lnml_gaussian, sigma_given=sigma_given)
retrospective_second = hsdmdl2.Retrospective(encoding_func=lnml_gaussian, d=2, M=5, min_datapoints=5, delta_0=0.05,
delta_1=0.05, delta_2=0.05, how_to_drop='all', order=0, reliability=True)
retrospective = aw2s_mdl.Retrospective(
retrospective_first, retrospective_second)
alarms = retrospective.make_alarms(train)
scores = np.zeros(len(train))
scores[alarms] = 1
F1_score, _, _ = calc_F1_score(
scores, changepoints, tolerance_delay, tuned_threshold=0.5)
return -F1_score
# calculate the metrics
def _calc_AW2S_MDL_metrics(idx_data, dataset, changepoints, tolerance_delay, threshold, retrospective):
_retrospective = deepcopy(retrospective)
alarms = _retrospective.make_alarms(dataset[idx_data])
scores = np.zeros(len(dataset[idx_data]))
scores[alarms] = 1
F1_score, precision, recall = calc_F1_score(
scores, changepoints, tolerance_delay, threshold)
return F1_score, precision, recall
def conduct_AW2S_MDL(n_trials, n_samples, dataset, changepoints, tolerance_delay): # AW2S-MDL
# hyperparameter tuning
objective_AW2S_MDL = partial(_objective_AW2S_MDL, train=dataset[0],
changepoints=changepoints, tolerance_delay=tolerance_delay)
study = optuna.create_study()
study.optimize(objective_AW2S_MDL, n_trials=n_trials, n_jobs=-1)
opt_window_size = study.best_params['window_size']
opt_sigma_given = study.best_params['sigma_given']
nml_gaussian = partial(sdmdl_nml.nml_gaussian, mu_max=1e8,
div_min=1e-8, div_max=1e8)
complexity_gaussian = partial(sdmdl_nml.complexity_gaussian, mu_max=1e8,
div_min=1e-8, div_max=1e8)
retrospective_first = sdmdl.Retrospective(h=opt_window_size, encoding_func=nml_gaussian,
complexity_func=complexity_gaussian, order=0)
lnml_gaussian = partial(hsdmdl2_nml.lnml_gaussian,
sigma_given=opt_sigma_given)
retrospective_second = hsdmdl2.Retrospective(encoding_func=lnml_gaussian, d=2, M=5, min_datapoints=5, delta_0=0.05,
delta_1=0.05, delta_2=0.05, how_to_drop='all', order=0, reliability=True)
retrospective = aw2s_mdl.Retrospective(
retrospective_first, retrospective_second)
# calculate metrics
calc_metrics = partial(_calc_AW2S_MDL_metrics, dataset=dataset, changepoints=changepoints,
tolerance_delay=tolerance_delay, threshold=0.5, retrospective=retrospective)
p = Pool(multi.cpu_count() - 1)
args = list(range(1, n_samples))
res = np.array(p.map(calc_metrics, args))
p.close()
# result
print("F1 score: ", np.mean(res[:, 0]), "±", np.std(res[:, 0]))
print("precision: ", np.mean(res[:, 1]), "±", np.std(res[:, 1]))
print("recall: ", np.mean(res[:, 2]), "±", np.std(res[:, 2]))
row = pd.DataFrame({"method": ["AW2S_MDL"], "F1_score_mean": np.mean(res[:, 0]), "F1_score_std": np.std(res[:, 0]),
"precision_mean": np.mean(res[:, 1]), "precision_std": np.std(res[:, 1]),
"recall_mean": np.mean(res[:, 2]), "recall_std": np.std(res[:, 2])})
return row
def conduct_experiment(n_trials, n_samples, func, transition_period, tolerance_delay, random_seed=0):
# fix seed for reproducibility
np.random.seed(random_seed)
mu_max = 1000
div_min = 1e-8
div_max = 1e8
df_result = pd.DataFrame()
print("Create Dataset")
dataset, changepoints = create_dataset(n_samples, func, transition_period)
print("ChangeFinder")
row = conduct_CF(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay)
df_result = pd.concat([df_result, row], axis=0)
print("BOCPD")
row = conduct_BOCPD(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay)
df_result = pd.concat([df_result, row], axis=0)
print("Hierarchical 0th")
row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay, order=0)
df_result = pd.concat([df_result, row], axis=0)
print("Hierarchical 1st")
row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay, order=1)
df_result = pd.concat([df_result, row], axis=0)
print("Hierarchical 2nd")
row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay, order=2)
df_result = pd.concat([df_result, row], axis=0)
print("AW2S_MDL")
row = conduct_AW2S_MDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset,
changepoints=changepoints, tolerance_delay=tolerance_delay)
df_result = pd.concat([df_result, row], axis=0)
return df_result
if __name__ == '__main__':
# parameters
random_seed = 0
n_trials = 5
n_samples = 5
tolerance_delay = 100
transition_periods = [0, 100, 200, 300, 400]
func_names = [(variance_changing, "variance_changing"),
(mean_changing, "mean_changing")]
df_results = pd.DataFrame()
for transition_period in transition_periods:
for func_name in func_names:
df_result = conduct_experiment(n_trials=n_trials, n_samples=n_samples, func=func_name[0],
transition_period=transition_period, tolerance_delay=tolerance_delay,
random_seed=random_seed)
df_result["dataset"] = func_name[1]
df_result["transition_period"] = transition_period
df_result["n_trials"] = n_trials
df_result["n_samples"] = n_samples
df_result["tolerance_delay"] = tolerance_delay
df_result["random_seed"] = random_seed
df_result = df_result.reindex(columns=["method", "dataset", "transition_period", "F1_score_mean", "F1_score_std", "precision_mean",
"precision_std", "recall_mean", "recall_std", "n_trials", "n_samples", "tolerance_delay", "random_seed"])
df_results = pd.concat([df_results, df_result], axis=0)
df_results.to_csv("./results/F1_score_results.csv",
index=False)
| [
"bocpd.Retrospective",
"dmdl.hsdmdl2.Retrospective",
"numpy.mean",
"utils.utils.create_dataset",
"changefinder.Retrospective",
"tsmdl.aw2s_mdl.Retrospective",
"numpy.std",
"multiprocessing.cpu_count",
"utils.utils.calc_F1_score",
"functools.partial",
"bocpd.StudentT",
"numpy.random.seed",
"c... | [((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((671, 694), 'copy.deepcopy', 'deepcopy', (['retrospective'], {}), '(retrospective)\n', (679, 694), False, 'from copy import deepcopy\n'), ((788, 851), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay', 'threshold'], {}), '(scores, changepoints, tolerance_delay, threshold)\n', (801, 851), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((1031, 1054), 'copy.deepcopy', 'deepcopy', (['retrospective'], {}), '(retrospective)\n', (1039, 1054), False, 'from copy import deepcopy\n'), ((1131, 1183), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay'], {}), '(scores, changepoints, tolerance_delay)\n', (1144, 1183), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((1484, 1543), 'changefinder.Retrospective', 'changefinder.Retrospective', ([], {'r': 'r', 'order': 'order', 'smooth': 'smooth'}), '(r=r, order=order, smooth=smooth)\n', (1510, 1543), False, 'import changefinder\n'), ((1614, 1666), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay'], {}), '(scores, changepoints, tolerance_delay)\n', (1627, 1666), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((1831, 1935), 'functools.partial', 'partial', (['_objective_CF'], {'train': 'dataset[0]', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay'}), '(_objective_CF, train=dataset[0], changepoints=changepoints,\n tolerance_delay=tolerance_delay)\n', (1838, 1935), False, 'from functools import partial\n'), ((1971, 1992), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (1990, 1992), False, 'import optuna\n'), ((2224, 2295), 'changefinder.Retrospective', 'changefinder.Retrospective', ([], {'r': 'opt_r', 'order': 'opt_order', 'smooth': 'opt_smooth'}), '(r=opt_r, order=opt_order, smooth=opt_smooth)\n', (2250, 2295), False, 'import changefinder\n'), ((2534, 2696), 'functools.partial', 'partial', (['_calc_metrics'], {'dataset': 'dataset', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay', 'threshold': 'opt_threshold', 'retrospective': 'retrospective'}), '(_calc_metrics, dataset=dataset, changepoints=changepoints,\n tolerance_delay=tolerance_delay, threshold=opt_threshold, retrospective\n =retrospective)\n', (2541, 2696), False, 'from functools import partial\n'), ((3735, 3770), 'functools.partial', 'partial', (['bocpd.constant_hazard', 'lam'], {}), '(bocpd.constant_hazard, lam)\n', (3742, 3770), False, 'from functools import partial\n'), ((3781, 3819), 'bocpd.StudentT', 'bocpd.StudentT', (['alpha', 'beta', 'kappa', 'mu'], {}), '(alpha, beta, kappa, mu)\n', (3795, 3819), False, 'import bocpd\n'), ((3840, 3895), 'bocpd.Retrospective', 'bocpd.Retrospective', ([], {'hazard_func': 'h', 'likelihood_func': 'lik'}), '(hazard_func=h, likelihood_func=lik)\n', (3859, 3895), False, 'import bocpd\n'), ((3967, 4019), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay'], {}), '(scores, changepoints, tolerance_delay)\n', (3980, 4019), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((4192, 4299), 'functools.partial', 'partial', (['_objective_BOCPD'], {'train': 'dataset[0]', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay'}), '(_objective_BOCPD, train=dataset[0], changepoints=changepoints,\n tolerance_delay=tolerance_delay)\n', (4199, 4299), False, 'from functools import partial\n'), ((4338, 4359), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (4357, 4359), False, 'import optuna\n'), ((4662, 4701), 'functools.partial', 'partial', (['bocpd.constant_hazard', 'opt_lam'], {}), '(bocpd.constant_hazard, opt_lam)\n', (4669, 4701), False, 'from functools import partial\n'), ((4712, 4766), 'bocpd.StudentT', 'bocpd.StudentT', (['opt_alpha', 'opt_beta', 'opt_kappa', 'opt_mu'], {}), '(opt_alpha, opt_beta, opt_kappa, opt_mu)\n', (4726, 4766), False, 'import bocpd\n'), ((4787, 4842), 'bocpd.Retrospective', 'bocpd.Retrospective', ([], {'hazard_func': 'h', 'likelihood_func': 'lik'}), '(hazard_func=h, likelihood_func=lik)\n', (4806, 4842), False, 'import bocpd\n'), ((5072, 5234), 'functools.partial', 'partial', (['_calc_metrics'], {'dataset': 'dataset', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay', 'threshold': 'opt_threshold', 'retrospective': 'retrospective'}), '(_calc_metrics, dataset=dataset, changepoints=changepoints,\n tolerance_delay=tolerance_delay, threshold=opt_threshold, retrospective\n =retrospective)\n', (5079, 5234), False, 'from functools import partial\n'), ((6048, 6081), 'functools.partial', 'partial', (['hsdmdl2_nml.nml_gaussian'], {}), '(hsdmdl2_nml.nml_gaussian)\n', (6055, 6081), False, 'from functools import partial\n'), ((6471, 6671), 'dmdl.hsdmdl2.Retrospective', 'hsdmdl2.Retrospective', ([], {'encoding_func': 'nml_gaussian', 'd': '(2)', 'M': '(5)', 'min_datapoints': 'min_datapoints', 'delta_0': 'delta_0', 'delta_1': 'delta_1', 'delta_2': 'delta_2', 'how_to_drop': '"""all"""', 'order': 'order', 'reliability': '(True)'}), "(encoding_func=nml_gaussian, d=2, M=5, min_datapoints=\n min_datapoints, delta_0=delta_0, delta_1=delta_1, delta_2=delta_2,\n how_to_drop='all', order=order, reliability=True)\n", (6492, 6671), True, 'import dmdl.hsdmdl2 as hsdmdl2\n'), ((6831, 6904), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay'], {'tuned_threshold': '(0.5)'}), '(scores, changepoints, tolerance_delay, tuned_threshold=0.5)\n', (6844, 6904), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((7098, 7121), 'copy.deepcopy', 'deepcopy', (['retrospective'], {}), '(retrospective)\n', (7106, 7121), False, 'from copy import deepcopy\n'), ((7285, 7348), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay', 'threshold'], {}), '(scores, changepoints, tolerance_delay, threshold)\n', (7298, 7348), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((7566, 7694), 'functools.partial', 'partial', (['_objective_Hierarchical'], {'train': 'dataset[0]', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay', 'order': 'order'}), '(_objective_Hierarchical, train=dataset[0], changepoints=\n changepoints, tolerance_delay=tolerance_delay, order=order)\n', (7573, 7694), False, 'from functools import partial\n'), ((7739, 7760), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (7758, 7760), False, 'import optuna\n'), ((8141, 8174), 'functools.partial', 'partial', (['hsdmdl2_nml.nml_gaussian'], {}), '(hsdmdl2_nml.nml_gaussian)\n', (8148, 8174), False, 'from functools import partial\n'), ((8195, 8407), 'dmdl.hsdmdl2.Retrospective', 'hsdmdl2.Retrospective', ([], {'encoding_func': 'nml_gaussian', 'd': '(2)', 'M': '(5)', 'min_datapoints': 'min_datapoints', 'delta_0': 'opt_delta_0', 'delta_1': 'opt_delta_1', 'delta_2': 'opt_delta_2', 'how_to_drop': '"""all"""', 'order': '(True)', 'reliability': '(True)'}), "(encoding_func=nml_gaussian, d=2, M=5, min_datapoints=\n min_datapoints, delta_0=opt_delta_0, delta_1=opt_delta_1, delta_2=\n opt_delta_2, how_to_drop='all', order=True, reliability=True)\n", (8216, 8407), True, 'import dmdl.hsdmdl2 as hsdmdl2\n'), ((8484, 8662), 'functools.partial', 'partial', (['_calc_Hierarchical_metrics'], {'dataset': 'dataset', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay', 'threshold': '(0.5)', 'retrospective': 'retrospective', 'order': 'order'}), '(_calc_Hierarchical_metrics, dataset=dataset, changepoints=\n changepoints, tolerance_delay=tolerance_delay, threshold=0.5,\n retrospective=retrospective, order=order)\n', (8491, 8662), False, 'from functools import partial\n'), ((9752, 9844), 'functools.partial', 'partial', (['sdmdl_nml.nml_gaussian'], {'mu_max': '(100000000.0)', 'div_min': '(1e-08)', 'div_max': '(100000000.0)'}), '(sdmdl_nml.nml_gaussian, mu_max=100000000.0, div_min=1e-08, div_max=\n 100000000.0)\n', (9759, 9844), False, 'from functools import partial\n'), ((9876, 9974), 'functools.partial', 'partial', (['sdmdl_nml.complexity_gaussian'], {'mu_max': '(100000000.0)', 'div_min': '(1e-08)', 'div_max': '(100000000.0)'}), '(sdmdl_nml.complexity_gaussian, mu_max=100000000.0, div_min=1e-08,\n div_max=100000000.0)\n', (9883, 9974), False, 'from functools import partial\n'), ((10014, 10126), 'dmdl.sdmdl.Retrospective', 'sdmdl.Retrospective', ([], {'h': 'window_size', 'encoding_func': 'nml_gaussian', 'complexity_func': 'complexity_gaussian', 'order': '(0)'}), '(h=window_size, encoding_func=nml_gaussian,\n complexity_func=complexity_gaussian, order=0)\n', (10033, 10126), True, 'import dmdl.sdmdl as sdmdl\n'), ((10190, 10249), 'functools.partial', 'partial', (['hsdmdl2_nml.lnml_gaussian'], {'sigma_given': 'sigma_given'}), '(hsdmdl2_nml.lnml_gaussian, sigma_given=sigma_given)\n', (10197, 10249), False, 'from functools import partial\n'), ((10277, 10453), 'dmdl.hsdmdl2.Retrospective', 'hsdmdl2.Retrospective', ([], {'encoding_func': 'lnml_gaussian', 'd': '(2)', 'M': '(5)', 'min_datapoints': '(5)', 'delta_0': '(0.05)', 'delta_1': '(0.05)', 'delta_2': '(0.05)', 'how_to_drop': '"""all"""', 'order': '(0)', 'reliability': '(True)'}), "(encoding_func=lnml_gaussian, d=2, M=5, min_datapoints\n =5, delta_0=0.05, delta_1=0.05, delta_2=0.05, how_to_drop='all', order=\n 0, reliability=True)\n", (10298, 10453), True, 'import dmdl.hsdmdl2 as hsdmdl2\n'), ((10514, 10579), 'tsmdl.aw2s_mdl.Retrospective', 'aw2s_mdl.Retrospective', (['retrospective_first', 'retrospective_second'], {}), '(retrospective_first, retrospective_second)\n', (10536, 10579), True, 'import tsmdl.aw2s_mdl as aw2s_mdl\n'), ((10715, 10788), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay'], {'tuned_threshold': '(0.5)'}), '(scores, changepoints, tolerance_delay, tuned_threshold=0.5)\n', (10728, 10788), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((10972, 10995), 'copy.deepcopy', 'deepcopy', (['retrospective'], {}), '(retrospective)\n', (10980, 10995), False, 'from copy import deepcopy\n'), ((11159, 11222), 'utils.utils.calc_F1_score', 'calc_F1_score', (['scores', 'changepoints', 'tolerance_delay', 'threshold'], {}), '(scores, changepoints, tolerance_delay, threshold)\n', (11172, 11222), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((11421, 11531), 'functools.partial', 'partial', (['_objective_AW2S_MDL'], {'train': 'dataset[0]', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay'}), '(_objective_AW2S_MDL, train=dataset[0], changepoints=changepoints,\n tolerance_delay=tolerance_delay)\n', (11428, 11531), False, 'from functools import partial\n'), ((11573, 11594), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (11592, 11594), False, 'import optuna\n'), ((11795, 11887), 'functools.partial', 'partial', (['sdmdl_nml.nml_gaussian'], {'mu_max': '(100000000.0)', 'div_min': '(1e-08)', 'div_max': '(100000000.0)'}), '(sdmdl_nml.nml_gaussian, mu_max=100000000.0, div_min=1e-08, div_max=\n 100000000.0)\n', (11802, 11887), False, 'from functools import partial\n'), ((11919, 12017), 'functools.partial', 'partial', (['sdmdl_nml.complexity_gaussian'], {'mu_max': '(100000000.0)', 'div_min': '(1e-08)', 'div_max': '(100000000.0)'}), '(sdmdl_nml.complexity_gaussian, mu_max=100000000.0, div_min=1e-08,\n div_max=100000000.0)\n', (11926, 12017), False, 'from functools import partial\n'), ((12057, 12173), 'dmdl.sdmdl.Retrospective', 'sdmdl.Retrospective', ([], {'h': 'opt_window_size', 'encoding_func': 'nml_gaussian', 'complexity_func': 'complexity_gaussian', 'order': '(0)'}), '(h=opt_window_size, encoding_func=nml_gaussian,\n complexity_func=complexity_gaussian, order=0)\n', (12076, 12173), True, 'import dmdl.sdmdl as sdmdl\n'), ((12237, 12300), 'functools.partial', 'partial', (['hsdmdl2_nml.lnml_gaussian'], {'sigma_given': 'opt_sigma_given'}), '(hsdmdl2_nml.lnml_gaussian, sigma_given=opt_sigma_given)\n', (12244, 12300), False, 'from functools import partial\n'), ((12356, 12532), 'dmdl.hsdmdl2.Retrospective', 'hsdmdl2.Retrospective', ([], {'encoding_func': 'lnml_gaussian', 'd': '(2)', 'M': '(5)', 'min_datapoints': '(5)', 'delta_0': '(0.05)', 'delta_1': '(0.05)', 'delta_2': '(0.05)', 'how_to_drop': '"""all"""', 'order': '(0)', 'reliability': '(True)'}), "(encoding_func=lnml_gaussian, d=2, M=5, min_datapoints\n =5, delta_0=0.05, delta_1=0.05, delta_2=0.05, how_to_drop='all', order=\n 0, reliability=True)\n", (12377, 12532), True, 'import dmdl.hsdmdl2 as hsdmdl2\n'), ((12593, 12658), 'tsmdl.aw2s_mdl.Retrospective', 'aw2s_mdl.Retrospective', (['retrospective_first', 'retrospective_second'], {}), '(retrospective_first, retrospective_second)\n', (12615, 12658), True, 'import tsmdl.aw2s_mdl as aw2s_mdl\n'), ((12712, 12873), 'functools.partial', 'partial', (['_calc_AW2S_MDL_metrics'], {'dataset': 'dataset', 'changepoints': 'changepoints', 'tolerance_delay': 'tolerance_delay', 'threshold': '(0.5)', 'retrospective': 'retrospective'}), '(_calc_AW2S_MDL_metrics, dataset=dataset, changepoints=changepoints,\n tolerance_delay=tolerance_delay, threshold=0.5, retrospective=retrospective\n )\n', (12719, 12873), False, 'from functools import partial\n'), ((13716, 13743), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (13730, 13743), True, 'import numpy as np\n'), ((13816, 13830), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13828, 13830), True, 'import pandas as pd\n'), ((13888, 13938), 'utils.utils.create_dataset', 'create_dataset', (['n_samples', 'func', 'transition_period'], {}), '(n_samples, func, transition_period)\n', (13902, 13938), False, 'from utils.utils import mean_changing, variance_changing, create_dataset, calc_F1_score\n'), ((14141, 14176), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (14150, 14176), True, 'import pandas as pd\n'), ((14378, 14413), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (14387, 14413), True, 'import pandas as pd\n'), ((14649, 14684), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (14658, 14684), True, 'import pandas as pd\n'), ((14920, 14955), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (14929, 14955), True, 'import pandas as pd\n'), ((15191, 15226), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (15200, 15226), True, 'import pandas as pd\n'), ((15437, 15472), 'pandas.concat', 'pd.concat', (['[df_result, row]'], {'axis': '(0)'}), '([df_result, row], axis=0)\n', (15446, 15472), True, 'import pandas as pd\n'), ((15801, 15815), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15813, 15815), True, 'import pandas as pd\n'), ((2887, 2905), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (2894, 2905), True, 'import numpy as np\n'), ((2913, 2930), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (2919, 2930), True, 'import numpy as np\n'), ((2957, 2975), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (2964, 2975), True, 'import numpy as np\n'), ((2983, 3000), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (2989, 3000), True, 'import numpy as np\n'), ((3024, 3042), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (3031, 3042), True, 'import numpy as np\n'), ((3050, 3067), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (3056, 3067), True, 'import numpy as np\n'), ((5425, 5443), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (5432, 5443), True, 'import numpy as np\n'), ((5451, 5468), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (5457, 5468), True, 'import numpy as np\n'), ((5495, 5513), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (5502, 5513), True, 'import numpy as np\n'), ((5521, 5538), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (5527, 5538), True, 'import numpy as np\n'), ((5562, 5580), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (5569, 5580), True, 'import numpy as np\n'), ((5588, 5605), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (5594, 5605), True, 'import numpy as np\n'), ((8853, 8871), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (8860, 8871), True, 'import numpy as np\n'), ((8879, 8896), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (8885, 8896), True, 'import numpy as np\n'), ((8923, 8941), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (8930, 8941), True, 'import numpy as np\n'), ((8949, 8966), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (8955, 8966), True, 'import numpy as np\n'), ((8990, 9008), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (8997, 9008), True, 'import numpy as np\n'), ((9016, 9033), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (9022, 9033), True, 'import numpy as np\n'), ((13064, 13082), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (13071, 13082), True, 'import numpy as np\n'), ((13090, 13107), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (13096, 13107), True, 'import numpy as np\n'), ((13134, 13152), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (13141, 13152), True, 'import numpy as np\n'), ((13160, 13177), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (13166, 13177), True, 'import numpy as np\n'), ((13201, 13219), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (13208, 13219), True, 'import numpy as np\n'), ((13227, 13244), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (13233, 13244), True, 'import numpy as np\n'), ((2728, 2745), 'multiprocessing.cpu_count', 'multi.cpu_count', ([], {}), '()\n', (2743, 2745), True, 'import multiprocessing as multi\n'), ((3138, 3156), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (3145, 3156), True, 'import numpy as np\n'), ((3174, 3191), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (3180, 3191), True, 'import numpy as np\n'), ((3235, 3253), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (3242, 3253), True, 'import numpy as np\n'), ((3272, 3289), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (3278, 3289), True, 'import numpy as np\n'), ((3330, 3348), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (3337, 3348), True, 'import numpy as np\n'), ((3364, 3381), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (3370, 3381), True, 'import numpy as np\n'), ((5266, 5283), 'multiprocessing.cpu_count', 'multi.cpu_count', ([], {}), '()\n', (5281, 5283), True, 'import multiprocessing as multi\n'), ((5669, 5687), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (5676, 5687), True, 'import numpy as np\n'), ((5705, 5722), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (5711, 5722), True, 'import numpy as np\n'), ((5766, 5784), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (5773, 5784), True, 'import numpy as np\n'), ((5803, 5820), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (5809, 5820), True, 'import numpy as np\n'), ((5861, 5879), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (5868, 5879), True, 'import numpy as np\n'), ((5895, 5912), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (5901, 5912), True, 'import numpy as np\n'), ((8694, 8711), 'multiprocessing.cpu_count', 'multi.cpu_count', ([], {}), '()\n', (8709, 8711), True, 'import multiprocessing as multi\n'), ((9275, 9293), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (9282, 9293), True, 'import numpy as np\n'), ((9311, 9328), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (9317, 9328), True, 'import numpy as np\n'), ((9372, 9390), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (9379, 9390), True, 'import numpy as np\n'), ((9409, 9426), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (9415, 9426), True, 'import numpy as np\n'), ((9467, 9485), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (9474, 9485), True, 'import numpy as np\n'), ((9501, 9518), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (9507, 9518), True, 'import numpy as np\n'), ((12905, 12922), 'multiprocessing.cpu_count', 'multi.cpu_count', ([], {}), '()\n', (12920, 12922), True, 'import multiprocessing as multi\n'), ((13311, 13329), 'numpy.mean', 'np.mean', (['res[:, 0]'], {}), '(res[:, 0])\n', (13318, 13329), True, 'import numpy as np\n'), ((13347, 13364), 'numpy.std', 'np.std', (['res[:, 0]'], {}), '(res[:, 0])\n', (13353, 13364), True, 'import numpy as np\n'), ((13408, 13426), 'numpy.mean', 'np.mean', (['res[:, 1]'], {}), '(res[:, 1])\n', (13415, 13426), True, 'import numpy as np\n'), ((13445, 13462), 'numpy.std', 'np.std', (['res[:, 1]'], {}), '(res[:, 1])\n', (13451, 13462), True, 'import numpy as np\n'), ((13503, 13521), 'numpy.mean', 'np.mean', (['res[:, 2]'], {}), '(res[:, 2])\n', (13510, 13521), True, 'import numpy as np\n'), ((13537, 13554), 'numpy.std', 'np.std', (['res[:, 2]'], {}), '(res[:, 2])\n', (13543, 13554), True, 'import numpy as np\n'), ((16828, 16870), 'pandas.concat', 'pd.concat', (['[df_results, df_result]'], {'axis': '(0)'}), '([df_results, df_result], axis=0)\n', (16837, 16870), True, 'import pandas as pd\n')] |
import math
import numpy as np
from rclpy.qos import QoSDurabilityPolicy
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist, Vector3
from turtlesim.msg import Pose
from rclpy.parameter import Parameter
class Config(Node):
def __init__(self):
super().__init__('dwa_planner')
# robot parameter
self.max_speed = 0.8 # [m/s]
self.min_speed = -0.5 # [m/s]
self.max_yaw_rate = 80.0 * math.pi / 180.0 # [rad/s]
self.max_accel = 0.2 # [m/ss]
self.max_delta_yaw_rate = 40.0 * math.pi / 180.0 # [rad/ss]
self.v_resolution = 0.01 # [m/s]
self.yaw_rate_resolution = 0.1 * math.pi / 180.0 # [rad/s]
self.dt = 0.1 # [s] Time tick for motion prediction
self.predict_time = 3.0 # [s]
self.to_goal_cost_gain = 0.15
self.speed_cost_gain = 1.0
self.obstacle_cost_gain = 1.0
self.robot_stuck_flag_cons = 0.001 # constant to prevent robot stucked
self.robot_width = 1.0 # [m] for collision check
self.robot_length = 1.0 # [m] for collision check
self.x = np.array([0,0,0,0,0]) # [x(m), y(m), theta(rad), linear_velocity(m/s), angular_velocity(rad/s)]
self.u = np.array([0,0]) # [linear_velocity(m/s), angular_velocity(rad/s)]
# cmd_vel message
self.twist = Twist()
self.linear = Vector3()
self.angular = Vector3()
# obstacles [x(m) y(m), ....]
self.ob = np.array([[0.0, 0.0]])
# waypoints [[x(m), y(m)],[x(m), y(m)], ...]
self.declare_parameter('num_waypoints', 4)
self.num_waypoints = self.get_parameter('num_waypoints').value
print(self.num_waypoints)
self.declare_parameter('waypoint_1', None)
self.declare_parameter('waypoint_2', None)
self.declare_parameter('waypoint_3', None)
self.declare_parameter('waypoint_4', None)
waypoint1 = self.get_parameter('waypoint_1').value
waypoint2 = self.get_parameter('waypoint_2').value
waypoint3 = self.get_parameter('waypoint_3').value
waypoint4 = self.get_parameter('waypoint_4').value
self.waypoints = np.array([waypoint1, waypoint2, waypoint3, waypoint4])
self.i = 0
self.declare_parameter('qos_depth', 10)
qos_depth = self.get_parameter('qos_depth').value
self.declare_parameter('waypoints')
QOS_RKL10V = QoSProfile(
reliability=QoSReliabilityPolicy.RELIABLE,
history=QoSHistoryPolicy.KEEP_LAST,
depth=qos_depth,
durability=QoSDurabilityPolicy.VOLATILE)
# turtle1's pose subscriber
self.turtle1_sub = self.create_subscription(
Pose,
'/turtle1/pose',
self.subscribe_turtle1_pose,
QOS_RKL10V)
# turtle2's pose subscriber
self.turtle2_sub = self.create_subscription(
Pose,
'/turtle2/pose',
self.subscribe_turtle2_pose,
QOS_RKL10V)
# turtle2's cmd_vel publisher
self.turtle2_pub = self.create_publisher(
Twist,
'/turtle2/cmd_vel',
QOS_RKL10V)
self.timer = self.create_timer(0.1, self.publish_cmd)
self.count = 0
def subscribe_turtle1_pose(self, msg):
self.ob = np.array([[msg.x, msg.y]])
def subscribe_turtle2_pose(self, msg):
self.x = np.array([msg.x, msg.y, msg.theta, msg.linear_velocity, msg.angular_velocity])
def publish_cmd(self):
self.linear.x = float(self.u[0])
self.linear.y = 0.0
self.linear.z = 0.0
self.angular.x = 0.0
self.angular.y = 0.0
self.angular.z = float(self.u[1])
self.twist.linear = self.linear
self.twist.angular = self.angular
trajectory = np.array(self.x)
self.u, predicted_trajectory = self.dwa_control(self.x, self.waypoints[self.i], self.ob)
self.x = self.motion(self.x, self.u, self.dt) # simulate robot
trajectory = np.vstack((trajectory, self.x)) # store state history
dist_to_goal = math.hypot(self.x[0] - self.waypoints[self.i][0], self.x[1] - self.waypoints[self.i][1])
self.turtle2_pub.publish(self.twist)
if dist_to_goal <= self.robot_width/2:
self.i = self.i + 1
if self.i == 4:
self.i = 0
def dwa_control(self, x, goal, ob):
"""
Dynamic Window Approach control
"""
dw = self.calc_dynamic_window(x)
u, trajectory = self.calc_control_and_trajectory(x, dw, goal, ob)
return u, trajectory
def motion(self, x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x
def calc_dynamic_window(self, x):
"""
calculation dynamic window based on current state x
"""
# Dynamic window from robot specification
Vs = [self.min_speed, self.max_speed,
-self.max_yaw_rate, self.max_yaw_rate]
# Dynamic window from motion model
Vd = [x[3] - self.max_accel * self.dt,
x[3] + self.max_accel * self.dt,
x[4] - self.max_delta_yaw_rate * self.dt,
x[4] + self.max_delta_yaw_rate * self.dt]
# [v_min, v_max, yaw_rate_min, yaw_rate_max]
dw = [max(Vs[0], Vd[0]), min(Vs[1], Vd[1]),
max(Vs[2], Vd[2]), min(Vs[3], Vd[3])]
return dw
def predict_trajectory(self, x_init, v, y):
"""
predict trajectory with an input
"""
x = np.array(x_init)
trajectory = np.array(x)
time = 0
while time <= self.predict_time:
x = self.motion(x, [v, y], self.dt)
trajectory = np.vstack((trajectory, x))
time += self.dt
return trajectory
def calc_control_and_trajectory(self, x, dw, goal, ob):
"""
calculation final input with dynamic window
"""
x_init = x[:]
min_cost = float("inf")
best_u = [0.0, 0.0]
best_trajectory = np.array([x])
# evaluate all trajectory with sampled input in dynamic window
for v in np.arange(dw[0], dw[1], self.v_resolution):
for y in np.arange(dw[2], dw[3], self.yaw_rate_resolution):
trajectory = self.predict_trajectory(x_init, v, y)
# calc cost
to_goal_cost = self.to_goal_cost_gain * self.calc_to_goal_cost(trajectory, goal)
speed_cost = self.speed_cost_gain * (self.max_speed - trajectory[-1, 3])
ob_cost = self.obstacle_cost_gain * self.calc_obstacle_cost(trajectory, ob)
final_cost = to_goal_cost + speed_cost + ob_cost
# search minimum trajectory
if min_cost >= final_cost:
min_cost = final_cost
best_u = [v, y]
best_trajectory = trajectory
if abs(best_u[0]) < self.robot_stuck_flag_cons \
and abs(x[3]) < self.robot_stuck_flag_cons:
# to ensure the robot do not get stuck in
# best v=0 m/s (in front of an obstacle) and
# best omega=0 rad/s (heading to the goal with
# angle difference of 0)
best_u[1] = -self.max_delta_yaw_rate
return best_u, best_trajectory
def calc_obstacle_cost(self, trajectory, ob):
"""
calc obstacle cost inf: collision
"""
ox = ob[:, 0]
oy = ob[:, 1]
dx = trajectory[:, 0] - ox[:, None]
dy = trajectory[:, 1] - oy[:, None]
r = np.hypot(dx, dy)
yaw = trajectory[:, 2]
rot = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])
rot = np.transpose(rot, [2, 0, 1])
local_ob = ob[:, None] - trajectory[:, 0:2]
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
local_ob = np.array([local_ob @ x for x in rot])
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
upper_check = local_ob[:, 0] <= self.robot_length / 2
right_check = local_ob[:, 1] <= self.robot_width / 2
bottom_check = local_ob[:, 0] >= -self.robot_length / 2
left_check = local_ob[:, 1] >= -self.robot_width / 2
if (np.logical_and(np.logical_and(upper_check, right_check),
np.logical_and(bottom_check, left_check))).any():
return float("Inf")
min_r = np.min(r)
return 1.0 / min_r # OK
def calc_to_goal_cost(self, trajectory, goal):
"""
calc to goal cost with angle difference
"""
dx = goal[0] - trajectory[-1, 0]
dy = goal[1] - trajectory[-1, 1]
error_angle = math.atan2(dy, dx)
cost_angle = error_angle - trajectory[-1, 2]
cost = abs(math.atan2(math.sin(cost_angle), math.cos(cost_angle)))
return cost
def main(gx=2.5, gy=8.58, args=None):
rclpy.init(args=args)
try:
node = Config()
try:
rclpy.spin(node)
except KeyboardInterrupt:
node.get_logger().info('Keyboard Interrypt (SIGINT)')
finally:
node.destroy_node()
finally:
rclpy.shutdown()
if __name__ == '__main__':
main()
| [
"geometry_msgs.msg.Vector3",
"math.cos",
"numpy.array",
"numpy.sin",
"math.hypot",
"rclpy.init",
"numpy.arange",
"numpy.vstack",
"numpy.min",
"numpy.hypot",
"rclpy.shutdown",
"geometry_msgs.msg.Twist",
"math.atan2",
"numpy.cos",
"numpy.transpose",
"rclpy.qos.QoSProfile",
"rclpy.spin"... | [((9300, 9321), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (9310, 9321), False, 'import rclpy\n'), ((1238, 1263), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (1246, 1263), True, 'import numpy as np\n'), ((1351, 1367), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1359, 1367), True, 'import numpy as np\n'), ((1465, 1472), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (1470, 1472), False, 'from geometry_msgs.msg import Twist, Vector3\n'), ((1495, 1504), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (1502, 1504), False, 'from geometry_msgs.msg import Twist, Vector3\n'), ((1528, 1537), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (1535, 1537), False, 'from geometry_msgs.msg import Twist, Vector3\n'), ((1595, 1617), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (1603, 1617), True, 'import numpy as np\n'), ((2293, 2347), 'numpy.array', 'np.array', (['[waypoint1, waypoint2, waypoint3, waypoint4]'], {}), '([waypoint1, waypoint2, waypoint3, waypoint4])\n', (2301, 2347), True, 'import numpy as np\n'), ((2541, 2698), 'rclpy.qos.QoSProfile', 'QoSProfile', ([], {'reliability': 'QoSReliabilityPolicy.RELIABLE', 'history': 'QoSHistoryPolicy.KEEP_LAST', 'depth': 'qos_depth', 'durability': 'QoSDurabilityPolicy.VOLATILE'}), '(reliability=QoSReliabilityPolicy.RELIABLE, history=\n QoSHistoryPolicy.KEEP_LAST, depth=qos_depth, durability=\n QoSDurabilityPolicy.VOLATILE)\n', (2551, 2698), False, 'from rclpy.qos import QoSProfile\n'), ((3462, 3488), 'numpy.array', 'np.array', (['[[msg.x, msg.y]]'], {}), '([[msg.x, msg.y]])\n', (3470, 3488), True, 'import numpy as np\n'), ((3550, 3628), 'numpy.array', 'np.array', (['[msg.x, msg.y, msg.theta, msg.linear_velocity, msg.angular_velocity]'], {}), '([msg.x, msg.y, msg.theta, msg.linear_velocity, msg.angular_velocity])\n', (3558, 3628), True, 'import numpy as np\n'), ((3959, 3975), 'numpy.array', 'np.array', (['self.x'], {}), '(self.x)\n', (3967, 3975), True, 'import numpy as np\n'), ((4166, 4197), 'numpy.vstack', 'np.vstack', (['(trajectory, self.x)'], {}), '((trajectory, self.x))\n', (4175, 4197), True, 'import numpy as np\n'), ((4244, 4337), 'math.hypot', 'math.hypot', (['(self.x[0] - self.waypoints[self.i][0])', '(self.x[1] - self.waypoints[self.i][1])'], {}), '(self.x[0] - self.waypoints[self.i][0], self.x[1] - self.\n waypoints[self.i][1])\n', (4254, 4337), False, 'import math\n'), ((5826, 5842), 'numpy.array', 'np.array', (['x_init'], {}), '(x_init)\n', (5834, 5842), True, 'import numpy as np\n'), ((5864, 5875), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5872, 5875), True, 'import numpy as np\n'), ((6336, 6349), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (6344, 6349), True, 'import numpy as np\n'), ((6439, 6481), 'numpy.arange', 'np.arange', (['dw[0]', 'dw[1]', 'self.v_resolution'], {}), '(dw[0], dw[1], self.v_resolution)\n', (6448, 6481), True, 'import numpy as np\n'), ((7968, 7984), 'numpy.hypot', 'np.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (7976, 7984), True, 'import numpy as np\n'), ((8113, 8141), 'numpy.transpose', 'np.transpose', (['rot', '[2, 0, 1]'], {}), '(rot, [2, 0, 1])\n', (8125, 8141), True, 'import numpy as np\n'), ((8273, 8312), 'numpy.array', 'np.array', (['[(local_ob @ x) for x in rot]'], {}), '([(local_ob @ x) for x in rot])\n', (8281, 8312), True, 'import numpy as np\n'), ((8811, 8820), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (8817, 8820), True, 'import numpy as np\n'), ((9088, 9106), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (9098, 9106), False, 'import math\n'), ((9567, 9583), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (9581, 9583), False, 'import rclpy\n'), ((6007, 6033), 'numpy.vstack', 'np.vstack', (['(trajectory, x)'], {}), '((trajectory, x))\n', (6016, 6033), True, 'import numpy as np\n'), ((6504, 6553), 'numpy.arange', 'np.arange', (['dw[2]', 'dw[3]', 'self.yaw_rate_resolution'], {}), '(dw[2], dw[3], self.yaw_rate_resolution)\n', (6513, 6553), True, 'import numpy as np\n'), ((9380, 9396), 'rclpy.spin', 'rclpy.spin', (['node'], {}), '(node)\n', (9390, 9396), False, 'import rclpy\n'), ((4885, 4899), 'math.cos', 'math.cos', (['x[2]'], {}), '(x[2])\n', (4893, 4899), False, 'import math\n'), ((4928, 4942), 'math.sin', 'math.sin', (['x[2]'], {}), '(x[2])\n', (4936, 4942), False, 'import math\n'), ((9190, 9210), 'math.sin', 'math.sin', (['cost_angle'], {}), '(cost_angle)\n', (9198, 9210), False, 'import math\n'), ((9212, 9232), 'math.cos', 'math.cos', (['cost_angle'], {}), '(cost_angle)\n', (9220, 9232), False, 'import math\n'), ((8042, 8053), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (8048, 8053), True, 'import numpy as np\n'), ((8071, 8082), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (8077, 8082), True, 'import numpy as np\n'), ((8084, 8095), 'numpy.cos', 'np.cos', (['yaw'], {}), '(yaw)\n', (8090, 8095), True, 'import numpy as np\n'), ((8646, 8686), 'numpy.logical_and', 'np.logical_and', (['upper_check', 'right_check'], {}), '(upper_check, right_check)\n', (8660, 8686), True, 'import numpy as np\n'), ((8712, 8752), 'numpy.logical_and', 'np.logical_and', (['bottom_check', 'left_check'], {}), '(bottom_check, left_check)\n', (8726, 8752), True, 'import numpy as np\n'), ((8056, 8067), 'numpy.sin', 'np.sin', (['yaw'], {}), '(yaw)\n', (8062, 8067), True, 'import numpy as np\n')] |
# digitizer class for zurich instrument's hf2li lockin
from small_lab_gui.digitizers.digitizer import digitizer
import numpy as np
import time
from threading import Event
class hf2li_dummy(digitizer):
def __init__(self, dev='dev1251'):
self.num_sensors = 6
self.dev = dev
def setup(self, integration, timeconstant=0.1, order=2):
self.integration = integration
self.timeconstant = timeconstant
self.order = order
def frame(self, stop_event=None, inp=None, init_output=None):
time.sleep(self.integration)
return {
'sig': np.random.rand(1)[0],
'r': np.random.rand(1)[0],
'theta': np.random.rand(1)[0],
'sig2': np.random.rand(1)[0],
'r2': np.random.rand(1)[0],
'theta2': np.random.rand(1)[0],
'source': 'zi_hf2li',
'success': True}
def readout_continuous(self, stop_event=Event(),
inp=None, init_output=None):
time.sleep(self.integration)
return {
'sig': np.random.rand(1)[0],
'r': np.random.rand(1)[0],
'theta': np.random.rand(1)[0],
'sig2': np.random.rand(1)[0],
'r2': np.random.rand(1)[0],
'theta2': np.random.rand(1)[0],
'source': 'zi_hf2li',
'success': True}
def stop(self):
pass
def close(self):
print('closing digitizer')
| [
"threading.Event",
"numpy.random.rand",
"time.sleep"
] | [((539, 567), 'time.sleep', 'time.sleep', (['self.integration'], {}), '(self.integration)\n', (549, 567), False, 'import time\n'), ((942, 949), 'threading.Event', 'Event', ([], {}), '()\n', (947, 949), False, 'from threading import Event\n'), ((1015, 1043), 'time.sleep', 'time.sleep', (['self.integration'], {}), '(self.integration)\n', (1025, 1043), False, 'import time\n'), ((604, 621), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (618, 621), True, 'import numpy as np\n'), ((643, 660), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (657, 660), True, 'import numpy as np\n'), ((686, 703), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (700, 703), True, 'import numpy as np\n'), ((728, 745), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (742, 745), True, 'import numpy as np\n'), ((768, 785), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (782, 785), True, 'import numpy as np\n'), ((812, 829), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (826, 829), True, 'import numpy as np\n'), ((1080, 1097), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1094, 1097), True, 'import numpy as np\n'), ((1119, 1136), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1133, 1136), True, 'import numpy as np\n'), ((1162, 1179), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1176, 1179), True, 'import numpy as np\n'), ((1204, 1221), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1218, 1221), True, 'import numpy as np\n'), ((1244, 1261), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1258, 1261), True, 'import numpy as np\n'), ((1288, 1305), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1302, 1305), True, 'import numpy as np\n')] |
import numpy as np
# Constants
a = 0.4
b = 2.0
c = 2.0
class Particle:
def __init__(self, lower_bound, upper_bound):
# Assign local variables
self.lower_bound = lower_bound
self.upper_bound = upper_bound
# Initialize the particle's position with a uniformly distributed random vector
self.position = np.random.uniform(lower_bound, upper_bound, 2) # assign random possition
# Initialize the particle's best known position to its initial position
self.best_position = self.position
self.velocity = np.array([0, 0])
def rosenbrock(self, x, y):
a = 0
return (a - x) ** 2 + 100 * ((y - x ** 2)) ** 2
def rastrigin(self, x, y):
return 0 - (10 * 2 + (x ** 2 - (10 * np.cos(2 * np.pi * x))) + (y ** 2 - (10 * np.cos(2 * np.pi * y))))
def evaluation_of_current_position(self):
return self.rosenbrock(self.position[0], self.position[1])
# return self.rastrigin(self.position[0], self.position[1])
def evaluation_of_best_position(self):
return self.rosenbrock(self.best_position[0], self.best_position[1])
# return self.rastrigin(self.position[0], self.position[1])
def move_to_new_position(self):
self.position = self.position + self.velocity
def update_velocity(self, global_best_position):
global a
global b
global c
# calculate new velocity
self.velocity = a * self.velocity + (b * np.random.uniform(0, 1)) * (
self.best_position - self.position) + (c * np.random.uniform(0, 1)) * (
global_best_position - self.position)
def initialize_velocity(self):
abs_diff = np.fabs(self.lower_bound - self.upper_bound)
self.velocity = np.random.uniform(-abs_diff, abs_diff, 2)
| [
"numpy.fabs",
"numpy.array",
"numpy.cos",
"numpy.random.uniform"
] | [((348, 394), 'numpy.random.uniform', 'np.random.uniform', (['lower_bound', 'upper_bound', '(2)'], {}), '(lower_bound, upper_bound, 2)\n', (365, 394), True, 'import numpy as np\n'), ((570, 586), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (578, 586), True, 'import numpy as np\n'), ((1726, 1770), 'numpy.fabs', 'np.fabs', (['(self.lower_bound - self.upper_bound)'], {}), '(self.lower_bound - self.upper_bound)\n', (1733, 1770), True, 'import numpy as np\n'), ((1795, 1836), 'numpy.random.uniform', 'np.random.uniform', (['(-abs_diff)', 'abs_diff', '(2)'], {}), '(-abs_diff, abs_diff, 2)\n', (1812, 1836), True, 'import numpy as np\n'), ((1572, 1595), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1589, 1595), True, 'import numpy as np\n'), ((809, 830), 'numpy.cos', 'np.cos', (['(2 * np.pi * y)'], {}), '(2 * np.pi * y)\n', (815, 830), True, 'import numpy as np\n'), ((1484, 1507), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1501, 1507), True, 'import numpy as np\n'), ((767, 788), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (773, 788), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import numpy as np
import sys
import math
sys.stdout.write('.')
def loadDataFromFile(filename):
global prefix
try:
data = np.loadtxt(filename, skiprows=0)
except:
prefix = filename if len(sys.argv) <= 3 else sys.argv[3]
print(prefix+": UNABLE TO OPEN '"+filename+"'")
sys.exit(1)
#labelsx = data[0,0:]
#labelsy = data[0:,0]
#data = data[1:,1:]
return data
ref_data = loadDataFromFile(sys.argv[1])
cmp_data = loadDataFromFile(sys.argv[2])
prefix = sys.argv[3]
norm_l1_value = 0.0
norm_l2_value = 0.0
norm_linf_value = 0.0
#print (cmp_data)
size_ref_j = len(ref_data)
size_ref_i = len(ref_data[0])
size_cmp_j = len(cmp_data)
size_cmp_i = len(cmp_data[0])
multiplier_j = (size_ref_j+1)/(size_cmp_j+1)
multiplier_i = (size_ref_i+1)/(size_cmp_i+1)
if not float(multiplier_i).is_integer() or not float(multiplier_j).is_integer() :
print ("Dimensions of reference solution: ", size_ref_i, size_ref_j)
print ("Dimensions of method under analysis: ", size_cmp_i, size_cmp_j)
print ("Multipliers: ", multiplier_i, multiplier_j)
print ("Grids are not aligned")
sys.exit(1)
multiplier_j = int(multiplier_j)
multiplier_i = int(multiplier_i)
#print ("Multipliers (int): ", multiplier_i, multiplier_j)
for j in range(0, size_cmp_j):
for i in range(0, size_cmp_i):
#print("(",i,",",j,",", i*multiplier_i,",", j*multiplier_j,")", end="")
value = cmp_data[j,i]-ref_data[j*multiplier_j,i*multiplier_i]
norm_l1_value += abs(value)
norm_l2_value += value*value
norm_linf_value = max(norm_linf_value, abs(value))
norm_l1_value = norm_l1_value/(size_cmp_i*size_cmp_j)
norm_l2_value = math.sqrt(norm_l2_value/(size_cmp_i*size_cmp_j))
#
# L1, L2, Linf
#
print(prefix+"\t"+str(norm_l1_value)+"\t"+str(norm_l2_value)+"\t"+str(norm_linf_value))
| [
"numpy.loadtxt",
"math.sqrt",
"sys.exit",
"sys.stdout.write"
] | [((68, 89), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (84, 89), False, 'import sys\n'), ((1632, 1684), 'math.sqrt', 'math.sqrt', (['(norm_l2_value / (size_cmp_i * size_cmp_j))'], {}), '(norm_l2_value / (size_cmp_i * size_cmp_j))\n', (1641, 1684), False, 'import math\n'), ((1105, 1116), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1113, 1116), False, 'import sys\n'), ((154, 186), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(0)'}), '(filename, skiprows=0)\n', (164, 186), True, 'import numpy as np\n'), ((307, 318), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (315, 318), False, 'import sys\n')] |
import numpy as np
class ReplayBuffer:
"""Experience Replay Buffer para DQNs."""
def __init__(self, max_length, observation_space):
"""Cria um Replay Buffer.
Parâmetros
----------
max_length: int
Tamanho máximo do Replay Buffer.
observation_space: int
Tamanho do espaço de observação.
"""
self.index, self.size, self.max_length = 0, 0, max_length
self.states = np.zeros((max_length, observation_space), dtype=np.float32)
self.actions = np.zeros((max_length), dtype=np.int32)
self.rewards = np.zeros((max_length), dtype=np.float32)
self.next_states = np.zeros((max_length, observation_space), dtype=np.float32)
self.dones = np.zeros((max_length), dtype=np.float32)
def __len__(self):
"""Retorna o tamanho do buffer."""
return self.size
def update(self, state, action, reward, next_state, done):
"""Adiciona uma experiência ao Replay Buffer.
Parâmetros
----------
state: np.array
Estado da transição.
action: int
Ação tomada.
reward: float
Recompensa recebida.
state: np.array
Estado seguinte.
done: int
Flag indicando se o episódio acabou.
"""
self.states[self.index] = state
self.actions[self.index] = action
self.rewards[self.index] = reward
self.next_states[self.index] = next_state
self.dones[self.index] = done
self.index = (self.index + 1) % self.max_length
if self.size < self.max_length:
self.size += 1
def sample(self, batch_size):
"""Retorna um batch de experiências.
Parâmetros
----------
batch_size: int
Tamanho do batch de experiências.
Retorna
-------
states: np.array
Batch de estados.
actions: np.array
Batch de ações.
rewards: np.array
Batch de recompensas.
next_states: np.array
Batch de estados seguintes.
dones: np.array
Batch de flags indicando se o episódio acabou.
"""
# Escolhe índices aleatoriamente do Replay Buffer
idxs = np.random.randint(0, self.size, size=batch_size)
return (self.states[idxs], self.actions[idxs], self.rewards[idxs], self.next_states[idxs], self.dones[idxs]) | [
"numpy.zeros",
"numpy.random.randint"
] | [((460, 519), 'numpy.zeros', 'np.zeros', (['(max_length, observation_space)'], {'dtype': 'np.float32'}), '((max_length, observation_space), dtype=np.float32)\n', (468, 519), True, 'import numpy as np\n'), ((543, 579), 'numpy.zeros', 'np.zeros', (['max_length'], {'dtype': 'np.int32'}), '(max_length, dtype=np.int32)\n', (551, 579), True, 'import numpy as np\n'), ((605, 643), 'numpy.zeros', 'np.zeros', (['max_length'], {'dtype': 'np.float32'}), '(max_length, dtype=np.float32)\n', (613, 643), True, 'import numpy as np\n'), ((673, 732), 'numpy.zeros', 'np.zeros', (['(max_length, observation_space)'], {'dtype': 'np.float32'}), '((max_length, observation_space), dtype=np.float32)\n', (681, 732), True, 'import numpy as np\n'), ((754, 792), 'numpy.zeros', 'np.zeros', (['max_length'], {'dtype': 'np.float32'}), '(max_length, dtype=np.float32)\n', (762, 792), True, 'import numpy as np\n'), ((2326, 2374), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (2343, 2374), True, 'import numpy as np\n')] |
import gym
from gym.spaces import Box, Discrete
from gym.utils import seeding
import numpy as np
from .world import World
from .agents import Car, Building, Pedestrian, Painting
from .geometry import Point
import time
class Scenario5(gym.Env):
def __init__(self):
self.seed(0) # just in case we forget seeding
self.init_ego = Car(Point(22, 10), heading = np.pi/2)
self.init_ego.velocity = Point(0, 0.)
self.init_adv = Car(Point(14, 115), heading = -np.pi/2, color='blue')
self.init_adv.velocity = Point(0, 4.)
self.target = Point(22, 120)
self.noise_adv_pos = 1.0
self.noise_adv_vel = 1.0
self.dt = 0.1
self.T = 40
self.initiate_world()
self.reset()
def initiate_world(self):
self.world = World(self.dt, width = 40, height = 120, ppm = 5)
self.world.add(Building(Point(6, 60), Point(12, 120)))
self.world.add(Building(Point(34, 60), Point(12, 120)))
def reset(self):
self.ego = self.init_ego.copy()
self.ego.min_speed = 0.
self.ego.max_speed = 10.
self.adv = self.init_adv.copy()
self.adv.min_speed = 0.
self.adv.max_speed = 6.
self.turning_point = Point(14, 90)
self.collision_point = Point(16, 82.5)
self.add_noise()
self.world.reset()
self.world.add(self.ego)
self.world.add(self.adv)
return self._get_obs()
def close(self):
self.world.close()
def add_noise(self):
self.ego.center += Point(0, 20*self.np_random.rand() - 10)
self.adv.center += Point(0, 10*self.np_random.rand() - 5)
self.collision_point.y += self.np_random.rand()*10 - 5
self.turning_point.y += self.np_random.rand()*10 - 5
@property
def observation_space(self):
low = np.array([0, self.ego.min_speed, self.init_adv.x - self.noise_adv_pos/2., 0 - self.ego.max_speed*self.dt - self.noise_adv_pos/2., self.adv.min_speed - self.noise_adv_vel/2.])
high= np.array([self.target.y + self.ego.max_speed*self.dt, self.ego.max_speed, self.init_ego.x + self.init_ego.size.x + self.noise_adv_pos/2., self.target.y + self.noise_adv_pos/2., self.adv.max_speed + self.noise_adv_vel/2.])
return Box(low=low, high=high)
@property
def action_space(self):
return Box(low=np.array([-3.5]), high=np.array([2.]))
def seed(self, seed):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_adv_control(self):
ttc_ego = (self.collision_point.y - self.ego.y) / np.abs(self.ego.yp + 1e-8)
ttc_adv = (self.adv.y - self.collision_point.y) / np.abs(self.adv.yp - 1e-8)
if self.adv.y > self.turning_point.y:
acceleration = 1. + self.np_random.rand()*0.4 - 0.2 if ttc_adv > ttc_ego else 0.
return np.array([0, acceleration], dtype=np.float32)
elif self.turning_point.y >= self.adv.y > self.collision_point.y:
acceleration = 1. + self.np_random.rand()*0.4 - 0.2 if ttc_adv > ttc_ego else 0.
steering = -0.1 if (self.collision_point.x - self.adv.x) * np.tan(self.adv.heading) > self.collision_point.y - self.adv.y else 0.1
return np.array([steering, acceleration], dtype=np.float32)
else:
steering = -0.1 if (18 - self.adv.x) * np.tan(self.adv.heading) > -5 and np.mod(self.adv.heading, 2*np.pi) > 3*np.pi/2 else 0.1
return np.array([steering, 0.], dtype=np.float32)
def get_ego_control(self,policy_no=0):
predicted_collision_point = (22 - self.ego.size.x/2. - self.adv.x) * np.tan(self.adv.heading) + self.adv.y
predicted_ttc_ego = (predicted_collision_point - self.ego.y) / np.abs(self.ego.yp + 1e-8)
predicted_ttc_adv = (self.adv.y - predicted_collision_point) / np.abs(self.adv.yp - 1e-8)
if policy_no==0: # aggressive
if predicted_ttc_ego < 0 or predicted_ttc_adv < -1.5 or predicted_ttc_ego < predicted_ttc_adv - 0.1:
return np.array([0, 2.], dtype=np.float32)
else:
return np.array([0, -3.], dtype=np.float32)
elif policy_no==1: # cautious
if predicted_ttc_ego < 0 or predicted_ttc_adv < -1.5 or predicted_ttc_ego < predicted_ttc_adv - 0.5:
return np.array([0, 1.], dtype=np.float32)
else:
return np.array([0, -2.5], dtype=np.float32)
@property
def target_reached(self):
return self.ego.y >= self.target.y
@property
def collision_exists(self):
return self.ego.collidesWith(self.adv)
def step(self, action):
while type(action) == list:
action = action[0]
action = np.clip(action, self.action_space.low, self.action_space.high)
ego_action = np.array([0, action], dtype=np.float32)
adv_action = self.get_adv_control()
self.ego.set_control(*ego_action)
self.adv.set_control(*adv_action)
self.world.tick()
return self._get_obs(), 0, self.collision_exists or self.target_reached or self.world.t >= self.T, {}
def _get_obs(self):
return np.array([self.ego.center.y, self.ego.velocity.y, self.adv.center.x + self.noise_adv_pos*self.np_random.rand() - self.noise_adv_pos/2., self.adv.center.y + self.noise_adv_pos*self.np_random.rand() - self.noise_adv_pos/2., self.adv.velocity.y + self.noise_adv_vel*self.np_random.rand() - self.noise_adv_vel/2.])
def render(self, mode='rgb'):
self.world.render() | [
"numpy.clip",
"numpy.abs",
"numpy.tan",
"gym.spaces.Box",
"numpy.array",
"numpy.mod",
"gym.utils.seeding.np_random"
] | [((1651, 1845), 'numpy.array', 'np.array', (['[0, self.ego.min_speed, self.init_adv.x - self.noise_adv_pos / 2.0, 0 - \n self.ego.max_speed * self.dt - self.noise_adv_pos / 2.0, self.adv.\n min_speed - self.noise_adv_vel / 2.0]'], {}), '([0, self.ego.min_speed, self.init_adv.x - self.noise_adv_pos / 2.0,\n 0 - self.ego.max_speed * self.dt - self.noise_adv_pos / 2.0, self.adv.\n min_speed - self.noise_adv_vel / 2.0])\n', (1659, 1845), True, 'import numpy as np\n'), ((1834, 2080), 'numpy.array', 'np.array', (['[self.target.y + self.ego.max_speed * self.dt, self.ego.max_speed, self.\n init_ego.x + self.init_ego.size.x + self.noise_adv_pos / 2.0, self.\n target.y + self.noise_adv_pos / 2.0, self.adv.max_speed + self.\n noise_adv_vel / 2.0]'], {}), '([self.target.y + self.ego.max_speed * self.dt, self.ego.max_speed,\n self.init_ego.x + self.init_ego.size.x + self.noise_adv_pos / 2.0, self\n .target.y + self.noise_adv_pos / 2.0, self.adv.max_speed + self.\n noise_adv_vel / 2.0])\n', (1842, 2080), True, 'import numpy as np\n'), ((2065, 2088), 'gym.spaces.Box', 'Box', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (2068, 2088), False, 'from gym.spaces import Box, Discrete\n'), ((2232, 2255), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2249, 2255), False, 'from gym.utils import seeding\n'), ((4249, 4311), 'numpy.clip', 'np.clip', (['action', 'self.action_space.low', 'self.action_space.high'], {}), '(action, self.action_space.low, self.action_space.high)\n', (4256, 4311), True, 'import numpy as np\n'), ((4330, 4369), 'numpy.array', 'np.array', (['[0, action]'], {'dtype': 'np.float32'}), '([0, action], dtype=np.float32)\n', (4338, 4369), True, 'import numpy as np\n'), ((2355, 2382), 'numpy.abs', 'np.abs', (['(self.ego.yp + 1e-08)'], {}), '(self.ego.yp + 1e-08)\n', (2361, 2382), True, 'import numpy as np\n'), ((2434, 2461), 'numpy.abs', 'np.abs', (['(self.adv.yp - 1e-08)'], {}), '(self.adv.yp - 1e-08)\n', (2440, 2461), True, 'import numpy as np\n'), ((2595, 2640), 'numpy.array', 'np.array', (['[0, acceleration]'], {'dtype': 'np.float32'}), '([0, acceleration], dtype=np.float32)\n', (2603, 2640), True, 'import numpy as np\n'), ((3399, 3426), 'numpy.abs', 'np.abs', (['(self.ego.yp + 1e-08)'], {}), '(self.ego.yp + 1e-08)\n', (3405, 3426), True, 'import numpy as np\n'), ((3491, 3518), 'numpy.abs', 'np.abs', (['(self.adv.yp - 1e-08)'], {}), '(self.adv.yp - 1e-08)\n', (3497, 3518), True, 'import numpy as np\n'), ((2143, 2159), 'numpy.array', 'np.array', (['[-3.5]'], {}), '([-3.5])\n', (2151, 2159), True, 'import numpy as np\n'), ((2166, 2181), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (2174, 2181), True, 'import numpy as np\n'), ((2937, 2989), 'numpy.array', 'np.array', (['[steering, acceleration]'], {'dtype': 'np.float32'}), '([steering, acceleration], dtype=np.float32)\n', (2945, 2989), True, 'import numpy as np\n'), ((3139, 3182), 'numpy.array', 'np.array', (['[steering, 0.0]'], {'dtype': 'np.float32'}), '([steering, 0.0], dtype=np.float32)\n', (3147, 3182), True, 'import numpy as np\n'), ((3296, 3320), 'numpy.tan', 'np.tan', (['self.adv.heading'], {}), '(self.adv.heading)\n', (3302, 3320), True, 'import numpy as np\n'), ((3665, 3701), 'numpy.array', 'np.array', (['[0, 2.0]'], {'dtype': 'np.float32'}), '([0, 2.0], dtype=np.float32)\n', (3673, 3701), True, 'import numpy as np\n'), ((3721, 3758), 'numpy.array', 'np.array', (['[0, -3.0]'], {'dtype': 'np.float32'}), '([0, -3.0], dtype=np.float32)\n', (3729, 3758), True, 'import numpy as np\n'), ((3905, 3941), 'numpy.array', 'np.array', (['[0, 1.0]'], {'dtype': 'np.float32'}), '([0, 1.0], dtype=np.float32)\n', (3913, 3941), True, 'import numpy as np\n'), ((3961, 3998), 'numpy.array', 'np.array', (['[0, -2.5]'], {'dtype': 'np.float32'}), '([0, -2.5], dtype=np.float32)\n', (3969, 3998), True, 'import numpy as np\n'), ((2855, 2879), 'numpy.tan', 'np.tan', (['self.adv.heading'], {}), '(self.adv.heading)\n', (2861, 2879), True, 'import numpy as np\n'), ((3074, 3109), 'numpy.mod', 'np.mod', (['self.adv.heading', '(2 * np.pi)'], {}), '(self.adv.heading, 2 * np.pi)\n', (3080, 3109), True, 'import numpy as np\n'), ((3040, 3064), 'numpy.tan', 'np.tan', (['self.adv.heading'], {}), '(self.adv.heading)\n', (3046, 3064), True, 'import numpy as np\n')] |
'''
beeBrain - An Artificial Intelligence & Machine Learning library
by Dev. <NAME> (www.devhima.tk)
'''
''''
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
[variable.py]
- This file contains the implementation of improving neural network stability and modeling performance with data scaling by controlling variables.
'''
import numpy as np
class Variable:
def __init__(self, v):
self.data = v
self._error = np.empty_like(v) # for backpropagation of the last layer
self.info = {}
def __repr__(self):
return str(self.data)
def set_error(self, error):
assert self._error.shape == error.shape
self._error[:] = error
@property
def error(self):
return self._error
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim | [
"numpy.empty_like"
] | [((1456, 1472), 'numpy.empty_like', 'np.empty_like', (['v'], {}), '(v)\n', (1469, 1472), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import pandas as pd
import os
import sys
import time
from lightgbm import LGBMClassifier
from sklearn.preprocessing import LabelEncoder
import cleanlab
from cleanlab.pruning import get_noise_indices
model = 'clean_embed_all-mpnet-base-v2.csv'
df = pd.read_csv('/global/project/hpcg1614_shared/ca/data/banking77/{}'.format(model))
df_orig = pd.read_csv('clean.csv')
#df = df.head(1000)
#df_orig = df_orig.head(1000)
df_orig = df_orig.to_numpy()
from sklearn.model_selection import train_test_split, StratifiedKFold
X = df.drop(['category'], axis=1).to_numpy()
y_cat = df['category'].to_numpy()
label_transformer = LabelEncoder()
y = label_transformer.fit_transform(y_cat)
kfold = StratifiedKFold(n_splits=10, shuffle=True)
res = []
split = 0
for train_ix, val_ix in kfold.split(X, y):
split = split + 1
print("split")
X_train, X_val = X[train_ix], X[val_ix]
y_train, y_val = y[train_ix], y[val_ix]
X_orig_val = df_orig[val_ix]
params = {
"learning_rate": 0.1,
"max_depth": 4,
"num_leaves": 15,
"n_estimators": 1000,
"n_jobs": 5,
"verbosity": -1,
"seed": 77,
}
estimator = LGBMClassifier(**params)
estimator.fit(X_train, y_train)
y_val_pred =estimator.predict_proba(X_val)
ordered_label_errors = get_noise_indices(
s=y_val,
psx=y_val_pred,
sorted_index_method='normalized_margin', # Orders label errors
)
i = 0
for error_ix in ordered_label_errors:
i = i + 1
print()
print("Possible Truth Label Error:".format(error_ix))
o_ix = val_ix[error_ix]
print(" Orig IDX: {}".format(o_ix))
print(" Orig Message: {}".format(df_orig[o_ix]))
print(" Message: {}".format(X_orig_val[error_ix][0]))
print(" Truth Label: {} {}".format(y_val[error_ix], label_transformer.inverse_transform([y_val[error_ix]])))
probas = np.around(y_val_pred[error_ix], decimals=4)
index_max = np.argmax(probas)
print(" Predicted label: {}".format(label_transformer.inverse_transform([index_max])))
print(" Predicted probas: {}".format(probas))
res.append({
'Split': split,
'i': i,
'ID': o_ix,
'Message': X_orig_val[error_ix][0],
'Truth': label_transformer.inverse_transform([y_val[error_ix]])[0],
'Maybe Better': label_transformer.inverse_transform([index_max])[0],
})
df_res = pd.DataFrame(res)
df_res.to_csv('possible_errors.csv', index=False)
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"lightgbm.LGBMClassifier",
"numpy.argmax",
"sklearn.model_selection.StratifiedKFold",
"numpy.around",
"pandas.DataFrame",
"cleanlab.pruning.get_noise_indices"
] | [((377, 401), 'pandas.read_csv', 'pd.read_csv', (['"""clean.csv"""'], {}), "('clean.csv')\n", (388, 401), True, 'import pandas as pd\n'), ((654, 668), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (666, 668), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((722, 764), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (737, 764), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((2619, 2636), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (2631, 2636), True, 'import pandas as pd\n'), ((1254, 1278), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {}), '(**params)\n', (1268, 1278), False, 'from lightgbm import LGBMClassifier\n'), ((1391, 1479), 'cleanlab.pruning.get_noise_indices', 'get_noise_indices', ([], {'s': 'y_val', 'psx': 'y_val_pred', 'sorted_index_method': '"""normalized_margin"""'}), "(s=y_val, psx=y_val_pred, sorted_index_method=\n 'normalized_margin')\n", (1408, 1479), False, 'from cleanlab.pruning import get_noise_indices\n'), ((2046, 2089), 'numpy.around', 'np.around', (['y_val_pred[error_ix]'], {'decimals': '(4)'}), '(y_val_pred[error_ix], decimals=4)\n', (2055, 2089), True, 'import numpy as np\n'), ((2110, 2127), 'numpy.argmax', 'np.argmax', (['probas'], {}), '(probas)\n', (2119, 2127), True, 'import numpy as np\n')] |
"""
Original code from <NAME> for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by <NAME> and <NAME>
Adapted for CS294-112 Fall 2018 by <NAME>, <NAME>, and <NAME>
"""
import inspect
import os
import time
from itertools import count
import gym
import numpy as np
import torch
from torch import nn
from torch.multiprocessing import Process
from torch.nn import functional as F
import logz
class Net(nn.Module):
def __init__(self, obs_dim, act_dim):
super(Net, self).__init__()
self.fc0 = nn.Linear(obs_dim, 128)
self.fc1 = nn.Linear(128, act_dim)
def forward(self, x):
x = x.type_as(self.fc0.bias)
x = F.relu(self.fc0(x))
x = self.fc1(x)
return x
class GaussianPolicy(nn.Module):
def __init__(self, obs_dim, act_dim):
super(GaussianPolicy, self).__init__()
self.mean = Net(obs_dim, act_dim)
self.std = nn.Parameter(torch.ones(1, act_dim))
def forward(self, obs):
mean = torch.tanh(self.mean(obs))
dist = torch.distributions.Normal(mean, self.std)
action = dist.sample()
log_prob = dist.log_prob(action).sum(dim=1, keepdim=True)
return action, log_prob, torch.zeros((log_prob.size(0), 1))
class CategoricalPolicy(nn.Module):
def __init__(self, obs_dim, act_dim):
super(CategoricalPolicy, self).__init__()
self.network = Net(obs_dim, act_dim)
def forward(self, obs):
logit = self.network(obs)
dist = torch.distributions.Categorical(logits=logit)
action = dist.sample()
log_prob = dist.log_prob(action).unsqueeze(-1)
return action, log_prob, dist.entropy().unsqueeze(-1)
def normalize(array): # Normalize Numpy array or PyTorch tensor to a standard normal distribution.
return (array - array.mean()) / (array.std() + 1e-7)
def setup_logger(logdir, locals_):
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getfullargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
def quick_log(agent, rewards, iteration, start_time, timesteps_this_batch, total_timesteps):
returns = [re.sum() for re in rewards]
ep_lengths = [len(re) for re in rewards]
logz.log_dict({"Time": time.time() - start_time,
"Iteration": iteration,
"AverageReturn": np.mean(returns),
"StdReturn": np.std(returns),
"MaxReturn": np.max(returns),
"MinReturn": np.min(returns),
"EpLenMean": np.mean(ep_lengths),
"EpLenStd": np.std(ep_lengths),
"TimestepsThisBatch": timesteps_this_batch,
"TimestepsSoFar": total_timesteps})
logz.dump_tabular()
logz.save_agent(agent)
def make_agent_args(args, env):
discrete = isinstance(env.action_space, gym.spaces.Discrete)
return {'n_layers': args['n_layers'],
'ob_dim': env.observation_space.shape[0],
'ac_dim': env.action_space.n if discrete else env.action_space.shape[0],
'discrete': discrete,
'size': args['size'],
'learning_rate': args['learning_rate'],
'num_target_updates': args['num_target_updates'],
'num_grad_steps_per_target_update': args['num_grad_steps_per_target_update'],
'device': torch.device("cuda" if torch.cuda.is_available() else "cpu"),
'animate': args['render'],
'max_path_length': args['ep_len'] if args['ep_len'] > 0 else env.spec.max_episode_steps,
'min_timesteps_per_batch': args['batch_size'],
'gamma': args['discount'],
'normalize_advantages': not args['dont_normalize_advantages']}
# ============================================================================================#
# Actor Critic
# ============================================================================================#
class Agent(object):
def __init__(self, args):
super(Agent, self).__init__()
self.n_layers = args['n_layers']
self.ob_dim = args['ob_dim']
self.ac_dim = args['ac_dim']
self.discrete = args['discrete']
self.size = args['size']
self.learning_rate = args['learning_rate']
self.num_target_updates = args['num_target_updates']
self.num_grad_steps_per_target_update = args['num_grad_steps_per_target_update']
self.device = args['device']
self.animate = args['animate']
self.max_path_length = args['max_path_length']
self.min_timesteps_per_batch = args['min_timesteps_per_batch']
self.gamma = args['gamma']
self.normalize_advantages = args['normalize_advantages']
self.actor = None # The loss function is computed at self.update_parameters.
if self.discrete:
self.actor = CategoricalPolicy(self.ob_dim, self.ac_dim).to(self.device)
else:
self.actor = GaussianPolicy(self.ob_dim, self.ac_dim).to(self.device)
self.optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.learning_rate)
self.critic_prediction = Net(self.ob_dim, 1).to(self.device)
self.critic_loss = nn.MSELoss()
self.critic_optimizer = torch.optim.Adam(self.critic_prediction.parameters(), lr=self.learning_rate)
def sample_trajectories(self, itr, env):
# Collect trajectories until we have enough timesteps
timesteps_this_batch = 0
obs, acs, next_obs, log_probs, res, terminals = [], [], [], [], [], []
while timesteps_this_batch <= self.min_timesteps_per_batch:
animate_this_episode = (len(res) == 0 and (itr % 10 == 0) and self.animate)
ob, ac, next_ob, log_prob, re, terminal = self.sample_trajectory(env, animate_this_episode)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
log_probs.append(log_prob)
res.append(re)
terminals.append(terminal)
timesteps_this_batch += len(re)
return np.concatenate(obs), acs, np.concatenate(next_obs), torch.cat(log_probs).squeeze(1), res, np.concatenate(
terminals), \
timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, next_obs, log_probs, rewards, terminals = [], [], [], [], [], []
for steps in count():
if animate_this_episode:
env.render()
time.sleep(0.1)
ac, log_prob, _ = self.actor(torch.from_numpy(ob).to(self.device).unsqueeze(0))
if self.discrete:
ac = ac[0].item()
else:
ac = ac[0].detach().cpu().numpy()
obs.append(ob)
acs.append(ac)
log_probs.append(log_prob)
next_ob, rew, done, _ = env.step(ac)
next_obs.append(next_ob)
rewards.append(rew)
ob = next_ob
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
return np.array(obs, dtype=np.float32), \
np.array(acs, dtype=np.float32), \
np.array(next_obs, dtype=np.float32), \
torch.cat(log_probs), \
np.array(rewards, dtype=np.float32), \
np.array(terminals, dtype=np.float32)
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
re_n = torch.from_numpy(np.concatenate(re_n)).to(self.device)
ob_no = torch.from_numpy(ob_no).to(self.device)
next_ob_no = torch.from_numpy(next_ob_no).to(self.device)
mask = torch.from_numpy(1 - terminal_n).to(self.device)
v_prime_n = self.critic_prediction(next_ob_no).reshape(re_n.shape)
q_n = re_n + self.gamma * v_prime_n * mask
v_n = self.critic_prediction(ob_no).reshape(re_n.shape)
adv_n = q_n - v_n
if self.normalize_advantages:
adv_n = normalize(adv_n)
return adv_n.detach()
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
ob_no = torch.from_numpy(ob_no).to(self.device).unsqueeze(0)
next_ob_no = torch.from_numpy(next_ob_no).to(self.device).unsqueeze(0)
re_n = torch.from_numpy(np.concatenate(re_n)).to(self.device)
mask = torch.from_numpy(1 - terminal_n).to(self.device)
for _ in range(self.num_target_updates):
self.critic_prediction.eval()
v_prime_n = self.critic_prediction(next_ob_no).reshape(re_n.shape)
target = re_n + self.gamma * v_prime_n * mask
target = target.detach()
self.critic_prediction.train()
for _ in range(self.num_grad_steps_per_target_update):
prediction = self.critic_prediction(ob_no).reshape(re_n.shape)
loss = self.critic_loss(input=prediction, target=target)
self.critic_optimizer.zero_grad()
loss.backward()
self.critic_optimizer.step()
self.critic_prediction.eval()
def update_actor(self, ob_no, log_prob_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.actor.train()
self.optimizer.zero_grad()
loss = -torch.mean(log_prob_na * adv_n)
loss.backward()
self.optimizer.step()
self.actor.eval()
def train_AC(args, logdir, seed):
start = time.time()
setup_logger(logdir, locals())
# Make the gym environment
env = gym.make(args['env_name'])
discrete = isinstance(env.action_space, gym.spaces.Discrete)
env.seed(args['seed'])
torch.manual_seed(args['seed'])
np.random.seed(args['seed'])
agent_args = make_agent_args(args, env)
agent = Agent(agent_args) # estimate_return_args
total_timesteps = 0
for itr in range(args['n_iter']):
print("********** Iteration %i ************" % itr)
ob_no, _, next_ob_no, log_prob_na, re_n, terminal_n, timesteps_this_batch = agent.sample_trajectories(itr, env)
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, log_prob_na, adv_n)
# Log diagnostics
total_timesteps += timesteps_this_batch
quick_log(agent=agent, rewards=re_n, iteration=itr, start_time=start, timesteps_this_batch=timesteps_this_batch,
total_timesteps=total_timesteps)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
args.exp_name = 'ac_' + args.exp_name
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not (os.path.exists(logdir)):
os.makedirs(logdir)
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print('Running experiment with seed %d' % seed)
p = Process(target=train_AC, args=(vars(args), os.path.join(logdir, '%d' % seed), seed))
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
| [
"logz.save_params",
"torch.distributions.Categorical",
"inspect.getfullargspec",
"time.sleep",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"logz.configure_output_dir",
"logz.save_agent",
"gym.make",
"logz.dump_tabular",
"os.path.exists",
"numpy.mean",
... | [((1910, 1943), 'logz.configure_output_dir', 'logz.configure_output_dir', (['logdir'], {}), '(logdir)\n', (1935, 1943), False, 'import logz\n'), ((2098, 2122), 'logz.save_params', 'logz.save_params', (['params'], {}), '(params)\n', (2114, 2122), False, 'import logz\n'), ((2829, 2848), 'logz.dump_tabular', 'logz.dump_tabular', ([], {}), '()\n', (2846, 2848), False, 'import logz\n'), ((2853, 2875), 'logz.save_agent', 'logz.save_agent', (['agent'], {}), '(agent)\n', (2868, 2875), False, 'import logz\n'), ((11351, 11362), 'time.time', 'time.time', ([], {}), '()\n', (11360, 11362), False, 'import time\n'), ((11440, 11466), 'gym.make', 'gym.make', (["args['env_name']"], {}), "(args['env_name'])\n", (11448, 11466), False, 'import gym\n'), ((11564, 11595), 'torch.manual_seed', 'torch.manual_seed', (["args['seed']"], {}), "(args['seed'])\n", (11581, 11595), False, 'import torch\n'), ((11600, 11628), 'numpy.random.seed', 'np.random.seed', (["args['seed']"], {}), "(args['seed'])\n", (11614, 11628), True, 'import numpy as np\n'), ((12728, 12753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12751, 12753), False, 'import argparse\n'), ((14104, 14135), 'os.path.join', 'os.path.join', (['data_path', 'logdir'], {}), '(data_path, logdir)\n', (14116, 14135), False, 'import os\n'), ((543, 566), 'torch.nn.Linear', 'nn.Linear', (['obs_dim', '(128)'], {}), '(obs_dim, 128)\n', (552, 566), False, 'from torch import nn\n'), ((586, 609), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'act_dim'], {}), '(128, act_dim)\n', (595, 609), False, 'from torch import nn\n'), ((1055, 1097), 'torch.distributions.Normal', 'torch.distributions.Normal', (['mean', 'self.std'], {}), '(mean, self.std)\n', (1081, 1097), False, 'import torch\n'), ((1516, 1561), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'logits': 'logit'}), '(logits=logit)\n', (1547, 1561), False, 'import torch\n'), ((1989, 2021), 'inspect.getfullargspec', 'inspect.getfullargspec', (['train_AC'], {}), '(train_AC)\n', (2011, 2021), False, 'import inspect\n'), ((5298, 5310), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5308, 5310), False, 'from torch import nn\n'), ((6513, 6520), 'itertools.count', 'count', ([], {}), '()\n', (6518, 6520), False, 'from itertools import count\n'), ((13940, 13965), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (13954, 13965), False, 'import os\n'), ((13976, 13998), 'os.makedirs', 'os.makedirs', (['data_path'], {}), '(data_path)\n', (13987, 13998), False, 'import os\n'), ((14056, 14090), 'time.strftime', 'time.strftime', (['"""%d-%m-%Y_%H-%M-%S"""'], {}), "('%d-%m-%Y_%H-%M-%S')\n", (14069, 14090), False, 'import time\n'), ((14148, 14170), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (14162, 14170), False, 'import os\n'), ((14181, 14200), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (14192, 14200), False, 'import os\n'), ((945, 967), 'torch.ones', 'torch.ones', (['(1)', 'act_dim'], {}), '(1, act_dim)\n', (955, 967), False, 'import torch\n'), ((2438, 2454), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (2445, 2454), True, 'import numpy as np\n'), ((2488, 2503), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (2494, 2503), True, 'import numpy as np\n'), ((2537, 2552), 'numpy.max', 'np.max', (['returns'], {}), '(returns)\n', (2543, 2552), True, 'import numpy as np\n'), ((2586, 2601), 'numpy.min', 'np.min', (['returns'], {}), '(returns)\n', (2592, 2601), True, 'import numpy as np\n'), ((2635, 2654), 'numpy.mean', 'np.mean', (['ep_lengths'], {}), '(ep_lengths)\n', (2642, 2654), True, 'import numpy as np\n'), ((2687, 2705), 'numpy.std', 'np.std', (['ep_lengths'], {}), '(ep_lengths)\n', (2693, 2705), True, 'import numpy as np\n'), ((6155, 6174), 'numpy.concatenate', 'np.concatenate', (['obs'], {}), '(obs)\n', (6169, 6174), True, 'import numpy as np\n'), ((6181, 6205), 'numpy.concatenate', 'np.concatenate', (['next_obs'], {}), '(next_obs)\n', (6195, 6205), True, 'import numpy as np\n'), ((6245, 6270), 'numpy.concatenate', 'np.concatenate', (['terminals'], {}), '(terminals)\n', (6259, 6270), True, 'import numpy as np\n'), ((7369, 7400), 'numpy.array', 'np.array', (['obs'], {'dtype': 'np.float32'}), '(obs, dtype=np.float32)\n', (7377, 7400), True, 'import numpy as np\n'), ((7419, 7450), 'numpy.array', 'np.array', (['acs'], {'dtype': 'np.float32'}), '(acs, dtype=np.float32)\n', (7427, 7450), True, 'import numpy as np\n'), ((7469, 7505), 'numpy.array', 'np.array', (['next_obs'], {'dtype': 'np.float32'}), '(next_obs, dtype=np.float32)\n', (7477, 7505), True, 'import numpy as np\n'), ((7524, 7544), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (7533, 7544), False, 'import torch\n'), ((7563, 7598), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (7571, 7598), True, 'import numpy as np\n'), ((7617, 7654), 'numpy.array', 'np.array', (['terminals'], {'dtype': 'np.float32'}), '(terminals, dtype=np.float32)\n', (7625, 7654), True, 'import numpy as np\n'), ((11191, 11222), 'torch.mean', 'torch.mean', (['(log_prob_na * adv_n)'], {}), '(log_prob_na * adv_n)\n', (11201, 11222), False, 'import torch\n'), ((13848, 13874), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (13864, 13874), False, 'import os\n'), ((2333, 2344), 'time.time', 'time.time', ([], {}), '()\n', (2342, 2344), False, 'import time\n'), ((3473, 3498), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3496, 3498), False, 'import torch\n'), ((6604, 6619), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6614, 6619), False, 'import time\n'), ((7813, 7836), 'torch.from_numpy', 'torch.from_numpy', (['ob_no'], {}), '(ob_no)\n', (7829, 7836), False, 'import torch\n'), ((7874, 7902), 'torch.from_numpy', 'torch.from_numpy', (['next_ob_no'], {}), '(next_ob_no)\n', (7890, 7902), False, 'import torch\n'), ((7934, 7966), 'torch.from_numpy', 'torch.from_numpy', (['(1 - terminal_n)'], {}), '(1 - terminal_n)\n', (7950, 7966), False, 'import torch\n'), ((9884, 9916), 'torch.from_numpy', 'torch.from_numpy', (['(1 - terminal_n)'], {}), '(1 - terminal_n)\n', (9900, 9916), False, 'import torch\n'), ((6207, 6227), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (6216, 6227), False, 'import torch\n'), ((7759, 7779), 'numpy.concatenate', 'np.concatenate', (['re_n'], {}), '(re_n)\n', (7773, 7779), True, 'import numpy as np\n'), ((9831, 9851), 'numpy.concatenate', 'np.concatenate', (['re_n'], {}), '(re_n)\n', (9845, 9851), True, 'import numpy as np\n'), ((14406, 14439), 'os.path.join', 'os.path.join', (['logdir', "('%d' % seed)"], {}), "(logdir, '%d' % seed)\n", (14418, 14439), False, 'import os\n'), ((9667, 9690), 'torch.from_numpy', 'torch.from_numpy', (['ob_no'], {}), '(ob_no)\n', (9683, 9690), False, 'import torch\n'), ((9741, 9769), 'torch.from_numpy', 'torch.from_numpy', (['next_ob_no'], {}), '(next_ob_no)\n', (9757, 9769), False, 'import torch\n'), ((6662, 6682), 'torch.from_numpy', 'torch.from_numpy', (['ob'], {}), '(ob)\n', (6678, 6682), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the rhythmic dynamic movement primitive.
"""
import numpy as np
from pyrobolearn.models.dmp.canonical_systems import RhythmicCS
from pyrobolearn.models.dmp.forcing_terms import RhythmicForcingTerm
from pyrobolearn.models.dmp.dmp import DMP
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class RhythmicDMP(DMP):
r"""Rhythmic Dynamic Movement Primitive
Rhythmic DMPs have the same mathematical formulation as general DMPs, which is given by:
.. math:: \tau^2 \ddot{y} = K (g - y) - D \tau \dot{y} + f(s)
where :math:`\tau` is a scaling factor that allows to slow down or speed up the reproduced movement, :math:`K`
is the stiffness coefficient, :math:`D` is the damping coefficient, :math:`y, \dot{y}, \ddot{y}` are the position,
velocity, and acceleration of a DoF, and :math:`f(s)` is the non-linear forcing term.
However, the forcing term in the case of rhythmic DMPs is given by:
.. math:: f(s) = \frac{\sum_i \psi_i(s) w_i}{\sum_i \psi_i(s)} a
where :math:`w` are the learnable weight parameters, and :math:`\psi` are the basis functions evaluated at the
given input phase variable :math:`s`, and :math:`a` is the amplitude.
The basis functions (in the rhythmic case) are given by:
.. math:: \psi_i(s) = \exp \left( - h_i (\cos(s - c_i) - 1) \right)
where :math:`c_i` is the center of the basis, and :math:`h_i` is a measure of concentration.
Also, the canonical system associated with this transformation system is given by:
.. math:: \tau \dot{s} = 1
where :math:`\tau` is a scaling factor that allows to slow down or speed up the movement, and :math:`s` is the
phase variable that drives the DMP.
All these differential equations are solved using Euler's method.
References:
[1] "Dynamical movement primitives: Learning attractor models for motor behaviors", Ijspeert et al., 2013
"""
def __init__(self, num_dmps, num_basis, dt=0.01, y0=0, goal=1,
forcing_terms=None, stiffness=None, damping=None):
"""Initialize the rhythmic DMP
Args:
num_dmps (int): number of DMPs
num_basis (int): number of basis functions
dt (float): step integration for Euler's method
y0 (float, np.array): initial position(s)
goal (float, np.array): goal(s)
forcing_terms (list, ForcingTerm): the forcing terms (which can have different basis functions)
stiffness (float): stiffness coefficient
damping (float): damping coefficient
"""
# create rhythmic canonical system
cs = RhythmicCS(dt=dt)
# create forcing terms (each one contains the basis functions and learnable weights)
if forcing_terms is None:
if isinstance(num_basis, int):
forcing_terms = [RhythmicForcingTerm(cs, num_basis) for _ in range(num_dmps)]
else:
if not isinstance(num_basis, (np.ndarray, list, tuple, set)):
raise TypeError("Expecting 'num_basis' to be an int, list, tuple, np.array or set.")
if len(num_basis) != num_dmps:
raise ValueError("The length of th list of number of basis doesn't match the number of DMPs")
forcing_terms = [RhythmicForcingTerm(cs, n_basis) for n_basis in num_basis]
# call super class constructor
super(RhythmicDMP, self).__init__(canonical_system=cs, forcing_term=forcing_terms, y0=y0, goal=goal,
stiffness=stiffness, damping=damping)
def get_scaling_term(self, new_goal=None):
"""
Return the scaling term for the forcing term. For rhythmic DMPs it's non-diminishing, so this function just
returns 1.
"""
return np.ones(self.num_dmps)
def _generate_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs, the goal is the average of the desired trajectory.
Args:
y_des (float[M,T]): the desired trajectory to follow (with shape [num_dmps, timesteps])
Returns:
float[M]: goal positions (one for each DMP)
"""
goal = np.zeros(self.num_dmps)
for n in range(self.num_dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
| [
"numpy.ones",
"pyrobolearn.models.dmp.canonical_systems.RhythmicCS",
"pyrobolearn.models.dmp.forcing_terms.RhythmicForcingTerm",
"numpy.zeros",
"numpy.isnan"
] | [((2848, 2865), 'pyrobolearn.models.dmp.canonical_systems.RhythmicCS', 'RhythmicCS', ([], {'dt': 'dt'}), '(dt=dt)\n', (2858, 2865), False, 'from pyrobolearn.models.dmp.canonical_systems import RhythmicCS\n'), ((4036, 4058), 'numpy.ones', 'np.ones', (['self.num_dmps'], {}), '(self.num_dmps)\n', (4043, 4058), True, 'import numpy as np\n'), ((4441, 4464), 'numpy.zeros', 'np.zeros', (['self.num_dmps'], {}), '(self.num_dmps)\n', (4449, 4464), True, 'import numpy as np\n'), ((4527, 4545), 'numpy.isnan', 'np.isnan', (['y_des[n]'], {}), '(y_des[n])\n', (4535, 4545), True, 'import numpy as np\n'), ((3070, 3104), 'pyrobolearn.models.dmp.forcing_terms.RhythmicForcingTerm', 'RhythmicForcingTerm', (['cs', 'num_basis'], {}), '(cs, num_basis)\n', (3089, 3104), False, 'from pyrobolearn.models.dmp.forcing_terms import RhythmicForcingTerm\n'), ((3526, 3558), 'pyrobolearn.models.dmp.forcing_terms.RhythmicForcingTerm', 'RhythmicForcingTerm', (['cs', 'n_basis'], {}), '(cs, n_basis)\n', (3545, 3558), False, 'from pyrobolearn.models.dmp.forcing_terms import RhythmicForcingTerm\n')] |
from __future__ import print_function
import torch
import torch.utils.data
import numpy as np
import torchvision.utils as vutils
def gen_error_colormap():
cols = np.array(
[[0 / 3.0, 0.1875 / 3.0, 49, 54, 149],
[0.1875 / 3.0, 0.375 / 3.0, 69, 117, 180],
[0.375 / 3.0, 0.75 / 3.0, 116, 173, 209],
[0.75 / 3.0, 1.5 / 3.0, 171, 217, 233],
[1.5 / 3.0, 3 / 3.0, 224, 243, 248],
[3 / 3.0, 6 / 3.0, 254, 224, 144],
[6 / 3.0, 12 / 3.0, 253, 174, 97],
[12 / 3.0, 24 / 3.0, 244, 109, 67],
[24 / 3.0, 48 / 3.0, 215, 48, 39],
[48 / 3.0, np.inf, 165, 0, 38]], dtype=np.float32)
cols[:, 2: 5] /= 255.
return cols
def disp_error_img(D_est_tensor, D_gt_tensor, abs_thres=3., rel_thres=0.05, dilate_radius=1):
D_gt_np = D_gt_tensor.detach().cpu().numpy()
D_est_np = D_est_tensor.detach().cpu().numpy()
B, H, W = D_gt_np.shape
# valid mask
mask = D_gt_np > 0
# error in percentage. When error <= 1, the pixel is valid since <= 3px & 5%
error = np.abs(D_gt_np - D_est_np)
error[np.logical_not(mask)] = 0
error[mask] = np.minimum(error[mask] / abs_thres, (error[mask] / D_gt_np[mask]) / rel_thres)
# get colormap
cols = gen_error_colormap()
# create error image
error_image = np.zeros([B, H, W, 3], dtype=np.float32)
for i in range(cols.shape[0]):
error_image[np.logical_and(error >= cols[i][0], error < cols[i][1])] = cols[i, 2:]
# TODO: imdilate
# error_image = cv2.imdilate(D_err, strel('disk', dilate_radius));
error_image[np.logical_not(mask)] = 0.
# show color tag in the top-left cornor of the image
for i in range(cols.shape[0]):
distance = 20
error_image[:, :10, i * distance:(i + 1) * distance, :] = cols[i, 2:]
return torch.from_numpy(np.ascontiguousarray(error_image.transpose([0, 3, 1, 2])))
def save_images(logger, mode_tag, images_dict, global_step):
images_dict = tensor2numpy(images_dict)
for tag, values in images_dict.items():
if not isinstance(values, list) and not isinstance(values, tuple):
values = [values]
for idx, value in enumerate(values):
if len(value.shape) == 3:
value = value[:, np.newaxis, :, :]
value = value[:1]
value = torch.from_numpy(value)
image_name = '{}/{}'.format(mode_tag, tag)
if len(values) > 1:
image_name = image_name + "_" + str(idx)
logger.add_image(image_name, vutils.make_grid(value, padding=0, nrow=1, normalize=True, scale_each=True),
global_step)
def tensor2numpy(var_dict):
for key, vars in var_dict.items():
if isinstance(vars, np.ndarray):
var_dict[key] = vars
elif isinstance(vars, torch.Tensor):
var_dict[key] = vars.data.cpu().numpy()
else:
raise NotImplementedError("invalid input type for tensor2numpy")
return var_dict
| [
"numpy.abs",
"numpy.minimum",
"numpy.logical_and",
"numpy.logical_not",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"torchvision.utils.make_grid"
] | [((178, 603), 'numpy.array', 'np.array', (['[[0 / 3.0, 0.1875 / 3.0, 49, 54, 149], [0.1875 / 3.0, 0.375 / 3.0, 69, 117,\n 180], [0.375 / 3.0, 0.75 / 3.0, 116, 173, 209], [0.75 / 3.0, 1.5 / 3.0,\n 171, 217, 233], [1.5 / 3.0, 3 / 3.0, 224, 243, 248], [3 / 3.0, 6 / 3.0,\n 254, 224, 144], [6 / 3.0, 12 / 3.0, 253, 174, 97], [12 / 3.0, 24 / 3.0,\n 244, 109, 67], [24 / 3.0, 48 / 3.0, 215, 48, 39], [48 / 3.0, np.inf, \n 165, 0, 38]]'], {'dtype': 'np.float32'}), '([[0 / 3.0, 0.1875 / 3.0, 49, 54, 149], [0.1875 / 3.0, 0.375 / 3.0,\n 69, 117, 180], [0.375 / 3.0, 0.75 / 3.0, 116, 173, 209], [0.75 / 3.0, \n 1.5 / 3.0, 171, 217, 233], [1.5 / 3.0, 3 / 3.0, 224, 243, 248], [3 / \n 3.0, 6 / 3.0, 254, 224, 144], [6 / 3.0, 12 / 3.0, 253, 174, 97], [12 / \n 3.0, 24 / 3.0, 244, 109, 67], [24 / 3.0, 48 / 3.0, 215, 48, 39], [48 / \n 3.0, np.inf, 165, 0, 38]], dtype=np.float32)\n', (186, 603), True, 'import numpy as np\n'), ((1091, 1117), 'numpy.abs', 'np.abs', (['(D_gt_np - D_est_np)'], {}), '(D_gt_np - D_est_np)\n', (1097, 1117), True, 'import numpy as np\n'), ((1174, 1250), 'numpy.minimum', 'np.minimum', (['(error[mask] / abs_thres)', '(error[mask] / D_gt_np[mask] / rel_thres)'], {}), '(error[mask] / abs_thres, error[mask] / D_gt_np[mask] / rel_thres)\n', (1184, 1250), True, 'import numpy as np\n'), ((1351, 1391), 'numpy.zeros', 'np.zeros', (['[B, H, W, 3]'], {'dtype': 'np.float32'}), '([B, H, W, 3], dtype=np.float32)\n', (1359, 1391), True, 'import numpy as np\n'), ((1129, 1149), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1143, 1149), True, 'import numpy as np\n'), ((1631, 1651), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (1645, 1651), True, 'import numpy as np\n'), ((1449, 1504), 'numpy.logical_and', 'np.logical_and', (['(error >= cols[i][0])', '(error < cols[i][1])'], {}), '(error >= cols[i][0], error < cols[i][1])\n', (1463, 1504), True, 'import numpy as np\n'), ((2396, 2419), 'torch.from_numpy', 'torch.from_numpy', (['value'], {}), '(value)\n', (2412, 2419), False, 'import torch\n'), ((2611, 2686), 'torchvision.utils.make_grid', 'vutils.make_grid', (['value'], {'padding': '(0)', 'nrow': '(1)', 'normalize': '(True)', 'scale_each': '(True)'}), '(value, padding=0, nrow=1, normalize=True, scale_each=True)\n', (2627, 2686), True, 'import torchvision.utils as vutils\n')] |
# coding=utf-8
# Copyright (c) DIRECT Contributors
import numpy as np
import pytest
import torch
from direct.data.transforms import fft2, ifft2
from direct.nn.unet.unet_2d import NormUnetModel2d, Unet2d
def create_input(shape):
data = np.random.randn(*shape).copy()
data = torch.from_numpy(data).float()
return data
@pytest.mark.parametrize(
"shape",
[
[2, 3, 16, 16],
[4, 5, 16, 32],
[3, 4, 32, 32],
[3, 4, 40, 20],
],
)
@pytest.mark.parametrize(
"num_filters",
[4, 6, 8],
)
@pytest.mark.parametrize(
"num_pool_layers",
[2, 3],
)
@pytest.mark.parametrize(
"skip",
[True, False],
)
@pytest.mark.parametrize(
"normalized",
[True, False],
)
def test_unet_2d(shape, num_filters, num_pool_layers, skip, normalized):
model = Unet2d(
fft2,
ifft2,
num_filters=num_filters,
num_pool_layers=num_pool_layers,
skip_connection=skip,
normalized=normalized,
dropout_probability=0.05,
).cpu()
data = create_input(shape + [2]).cpu()
sens = create_input(shape + [2]).cpu()
out = model(data, sens)
assert list(out.shape) == [shape[0]] + shape[2:] + [2]
| [
"pytest.mark.parametrize",
"direct.nn.unet.unet_2d.Unet2d",
"numpy.random.randn",
"torch.from_numpy"
] | [((337, 439), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[[2, 3, 16, 16], [4, 5, 16, 32], [3, 4, 32, 32], [3, 4, 40, 20]]'], {}), "('shape', [[2, 3, 16, 16], [4, 5, 16, 32], [3, 4, 32,\n 32], [3, 4, 40, 20]])\n", (360, 439), False, 'import pytest\n'), ((487, 536), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_filters"""', '[4, 6, 8]'], {}), "('num_filters', [4, 6, 8])\n", (510, 536), False, 'import pytest\n'), ((549, 599), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num_pool_layers"""', '[2, 3]'], {}), "('num_pool_layers', [2, 3])\n", (572, 599), False, 'import pytest\n'), ((612, 658), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""skip"""', '[True, False]'], {}), "('skip', [True, False])\n", (635, 658), False, 'import pytest\n'), ((671, 723), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""normalized"""', '[True, False]'], {}), "('normalized', [True, False])\n", (694, 723), False, 'import pytest\n'), ((244, 267), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (259, 267), True, 'import numpy as np\n'), ((286, 308), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (302, 308), False, 'import torch\n'), ((820, 977), 'direct.nn.unet.unet_2d.Unet2d', 'Unet2d', (['fft2', 'ifft2'], {'num_filters': 'num_filters', 'num_pool_layers': 'num_pool_layers', 'skip_connection': 'skip', 'normalized': 'normalized', 'dropout_probability': '(0.05)'}), '(fft2, ifft2, num_filters=num_filters, num_pool_layers=\n num_pool_layers, skip_connection=skip, normalized=normalized,\n dropout_probability=0.05)\n', (826, 977), False, 'from direct.nn.unet.unet_2d import NormUnetModel2d, Unet2d\n')] |
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from matplotlib import rcParams
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=True)
rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
# Parámetros
# número de muestras
N = 100
# número de parámetros
p = 2
# factor de olvido
lamb = 0.85
# condiciones iniciales
theta_i = np.zeros((p, ))
sigma_i = 1e5 * np.eye(p)
# señal de referencia (frecuencia, amplitud y fase)
omega_r = 2 * math.pi * 0.1
a_r = 1
p_r = 0
# señal de interferencia (frecuencia, amplitud y fase)
omega_i = omega_r
n = np.arange(N)
a_i = 10 * (1 + 0.5 * np.cos(2 * math.pi * n / N))
p_i = math.pi / 4
# Construcción de las señales
# referencia e interferencia
x_i = a_i * np.cos(omega_i * n + p_i)
x_r = a_r * np.cos(omega_r * n + p_r)
# estimador en cada paso
theta_n = np.zeros((N, p))
error = np.zeros((N, ))
y_r = np.zeros((N, ))
# Procesamiento
theta = theta_i
sigma = sigma_i
x_r_pad = np.concatenate((np.zeros((p - 1,)), x_r))
for i in n:
h = x_r_pad[i + (p - 1): i - 1 if i - 1 >= 0 else None: -1]
e = x_i[i] - theta @ h
K = (sigma @ h) / (lamb ** i + h @ sigma @ h)
theta = theta + K * e
sigma = (np.eye(p) - K[:, None] @ h[None, :]) @ sigma
theta_n[i, :] = theta
error[i] = x_i[i] - theta @ h
y_r[i] = theta @ h
# valores verdaderos
h1 = -a_i * math.sin(math.pi/4) / math.sin(math.pi/5)
h0 = a_i * math.cos(math.pi/4) - h1 * math.cos(math.pi/5)
#print("h[0] = {0:f}, h[1] = {1:f}".format(h0, h1))
ms = 3
fs = 12
ymax = 16
fig = plt.figure(0, figsize=(9, 6), frameon=False)
ax = plt.subplot2grid((9, 1), (0, 0), rowspan=3, colspan=1)
plt.xlim(0, N-1)
plt.ylim(-ymax, ymax)
plt.plot(n, x_i, linestyle='-', color='k', marker='s', markersize=ms, label='$x[n]$')
plt.plot(n, y_r, linestyle='-', color='r', marker='s', markersize=ms, label='$\hat{x}[n]$')
ax.set_xticklabels([])
leg = plt.legend(loc='center', bbox_to_anchor=(0.5, 0.83), frameon=False, fontsize=fs)
ax = plt.subplot2grid((9, 1), (3, 0), rowspan=3, colspan=1)
plt.xlim(0, N-1)
plt.ylim(-ymax, ymax)
plt.plot(n, error, linestyle='-', marker='s', color='k', markersize=ms, lw=1)
ax.set_xticklabels([])
ax.set_ylabel(r'$\epsilon[n]=x[n]-\hat{x}[n]$', fontsize=fs)
ax = plt.subplot2grid((9, 1), (6, 0), rowspan=3, colspan=1)
# e = hd-h_est
plt.xlim(0, N-1)
plt.plot(n, theta_n[:, 0], linestyle='-', color='k', marker='s', markersize=ms, label='$\hat{h}_n[0]$')
plt.plot(n, theta_n[:, 1], linestyle='-', color='r', marker='s', markersize=ms, label='$\hat{h}_n[1]$')
plt.plot(n, h0, linestyle='--', lw=1, color='grey')
plt.plot(n, h1, linestyle='--', lw=1, color='grey')
ax.set_xlabel(r'$n$', fontsize=fs)
ax.set_ylabel('${\\rm Par\\acute{a}metros\;del\;filtro}$', fontsize=fs)
leg = plt.legend(loc='best', frameon=False, fontsize=fs)
plt.savefig('example_8_13.pdf', bbox_inches='tight')
fig = plt.figure(1, figsize=(9, 5), frameon=False)
plt.plot(n[p:], x_i[p:] - y_r[p:], linestyle='-', color='k', marker='s', markersize=ms)
plt.show()
| [
"numpy.eye",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"math.cos",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"numpy.cos",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"math.sin",
"matplotlib.pyplot.subplot2grid",
"numpy.aran... | [((174, 197), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (176, 197), False, 'from matplotlib import rc\n'), ((396, 410), 'numpy.zeros', 'np.zeros', (['(p,)'], {}), '((p,))\n', (404, 410), True, 'import numpy as np\n'), ((612, 624), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (621, 624), True, 'import numpy as np\n'), ((865, 881), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (873, 881), True, 'import numpy as np\n'), ((890, 904), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (898, 904), True, 'import numpy as np\n'), ((912, 926), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (920, 926), True, 'import numpy as np\n'), ((1568, 1612), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(9, 6)', 'frameon': '(False)'}), '(0, figsize=(9, 6), frameon=False)\n', (1578, 1612), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1673), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(9, 1)', '(0, 0)'], {'rowspan': '(3)', 'colspan': '(1)'}), '((9, 1), (0, 0), rowspan=3, colspan=1)\n', (1635, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1692), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (1682, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1691, 1712), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-ymax)', 'ymax'], {}), '(-ymax, ymax)\n', (1699, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1803), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'x_i'], {'linestyle': '"""-"""', 'color': '"""k"""', 'marker': '"""s"""', 'markersize': 'ms', 'label': '"""$x[n]$"""'}), "(n, x_i, linestyle='-', color='k', marker='s', markersize=ms, label\n ='$x[n]$')\n", (1721, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1799, 1896), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'y_r'], {'linestyle': '"""-"""', 'color': '"""r"""', 'marker': '"""s"""', 'markersize': 'ms', 'label': '"""$\\\\hat{x}[n]$"""'}), "(n, y_r, linestyle='-', color='r', marker='s', markersize=ms, label\n ='$\\\\hat{x}[n]$')\n", (1807, 1896), True, 'import matplotlib.pyplot as plt\n'), ((1920, 2005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center"""', 'bbox_to_anchor': '(0.5, 0.83)', 'frameon': '(False)', 'fontsize': 'fs'}), "(loc='center', bbox_to_anchor=(0.5, 0.83), frameon=False, fontsize=fs\n )\n", (1930, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2061), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(9, 1)', '(3, 0)'], {'rowspan': '(3)', 'colspan': '(1)'}), '((9, 1), (3, 0), rowspan=3, colspan=1)\n', (2023, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2080), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (2070, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2100), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-ymax)', 'ymax'], {}), '(-ymax, ymax)\n', (2087, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2178), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'error'], {'linestyle': '"""-"""', 'marker': '"""s"""', 'color': '"""k"""', 'markersize': 'ms', 'lw': '(1)'}), "(n, error, linestyle='-', marker='s', color='k', markersize=ms, lw=1)\n", (2109, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2269, 2323), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(9, 1)', '(6, 0)'], {'rowspan': '(3)', 'colspan': '(1)'}), '((9, 1), (6, 0), rowspan=3, colspan=1)\n', (2285, 2323), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2357), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (2347, 2357), True, 'import matplotlib.pyplot as plt\n'), ((2356, 2465), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'theta_n[:, 0]'], {'linestyle': '"""-"""', 'color': '"""k"""', 'marker': '"""s"""', 'markersize': 'ms', 'label': '"""$\\\\hat{h}_n[0]$"""'}), "(n, theta_n[:, 0], linestyle='-', color='k', marker='s', markersize\n =ms, label='$\\\\hat{h}_n[0]$')\n", (2364, 2465), True, 'import matplotlib.pyplot as plt\n'), ((2460, 2569), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'theta_n[:, 1]'], {'linestyle': '"""-"""', 'color': '"""r"""', 'marker': '"""s"""', 'markersize': 'ms', 'label': '"""$\\\\hat{h}_n[1]$"""'}), "(n, theta_n[:, 1], linestyle='-', color='r', marker='s', markersize\n =ms, label='$\\\\hat{h}_n[1]$')\n", (2468, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2615), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'h0'], {'linestyle': '"""--"""', 'lw': '(1)', 'color': '"""grey"""'}), "(n, h0, linestyle='--', lw=1, color='grey')\n", (2572, 2615), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2667), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'h1'], {'linestyle': '"""--"""', 'lw': '(1)', 'color': '"""grey"""'}), "(n, h1, linestyle='--', lw=1, color='grey')\n", (2624, 2667), True, 'import matplotlib.pyplot as plt\n'), ((2781, 2831), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(False)', 'fontsize': 'fs'}), "(loc='best', frameon=False, fontsize=fs)\n", (2791, 2831), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2885), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example_8_13.pdf"""'], {'bbox_inches': '"""tight"""'}), "('example_8_13.pdf', bbox_inches='tight')\n", (2844, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2937), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(9, 5)', 'frameon': '(False)'}), '(1, figsize=(9, 5), frameon=False)\n', (2903, 2937), True, 'import matplotlib.pyplot as plt\n'), ((2938, 3029), 'matplotlib.pyplot.plot', 'plt.plot', (['n[p:]', '(x_i[p:] - y_r[p:])'], {'linestyle': '"""-"""', 'color': '"""k"""', 'marker': '"""s"""', 'markersize': 'ms'}), "(n[p:], x_i[p:] - y_r[p:], linestyle='-', color='k', marker='s',\n markersize=ms)\n", (2946, 3029), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3036, 3038), True, 'import matplotlib.pyplot as plt\n'), ((428, 437), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (434, 437), True, 'import numpy as np\n'), ((766, 791), 'numpy.cos', 'np.cos', (['(omega_i * n + p_i)'], {}), '(omega_i * n + p_i)\n', (772, 791), True, 'import numpy as np\n'), ((804, 829), 'numpy.cos', 'np.cos', (['(omega_r * n + p_r)'], {}), '(omega_r * n + p_r)\n', (810, 829), True, 'import numpy as np\n'), ((1405, 1426), 'math.sin', 'math.sin', (['(math.pi / 5)'], {}), '(math.pi / 5)\n', (1413, 1426), False, 'import math\n'), ((1002, 1020), 'numpy.zeros', 'np.zeros', (['(p - 1,)'], {}), '((p - 1,))\n', (1010, 1020), True, 'import numpy as np\n'), ((1383, 1404), 'math.sin', 'math.sin', (['(math.pi / 4)'], {}), '(math.pi / 4)\n', (1391, 1404), False, 'import math\n'), ((1436, 1457), 'math.cos', 'math.cos', (['(math.pi / 4)'], {}), '(math.pi / 4)\n', (1444, 1457), False, 'import math\n'), ((1463, 1484), 'math.cos', 'math.cos', (['(math.pi / 5)'], {}), '(math.pi / 5)\n', (1471, 1484), False, 'import math\n'), ((647, 674), 'numpy.cos', 'np.cos', (['(2 * math.pi * n / N)'], {}), '(2 * math.pi * n / N)\n', (653, 674), True, 'import numpy as np\n'), ((1220, 1229), 'numpy.eye', 'np.eye', (['p'], {}), '(p)\n', (1226, 1229), True, 'import numpy as np\n')] |
import src.sfamanopt.mssfa as mssfa
import src.sfamanopt.ssfa as ssfa
import src.sfamanopt.fault_diagnosis as fd
import numpy as np
import matplotlib.pyplot as plt
import tepimport
if __name__ == "__main__":
alpha = 0.01
Md = 55
lagged_samples = 2
# Algorithm names for labels
"""Import Data"""
X = tepimport.import_sets((0), skip_test=True)[0]
T = tepimport.import_sets((4), skip_training=True)[0]
ignored_var = list(range(22, 41))
X = np.delete(X[1], ignored_var, axis=0)
T = np.delete(T[1], ignored_var, axis=0)
X = tepimport.add_lagged_samples(X, lagged_samples)
T = tepimport.add_lagged_samples(T, lagged_samples)
m = X.shape[0]
n = X.shape[1]
X_mean = np.mean(X, axis=1).reshape((-1, 1))
X = X - X_mean
X_std = np.std(X, axis=1).reshape((-1, 1))
X_norm = X / X_std
Me = m - Md
"""Train Models"""
ssfa_object = ssfa.SSFA()
W_ssfa, _, _, _, _ = ssfa_object.run(X, Md)
W_ssfa_norm, _, _, _, _ = ssfa_object.run(X_norm, Md)
Lambda_inv_ssfa = np.linalg.pinv(W_ssfa.T @ W_ssfa)
Lambda_inv_ssfa_norm = np.linalg.pinv(W_ssfa_norm.T @ W_ssfa_norm)
mssfa_object = mssfa.MSSFA("chol", "l1")
W_mssfa, _, _, _, _ = mssfa_object.run(X, Md)
W_mssfa_norm, _, _, _, _ = mssfa_object.run(X_norm, Md)
"""Test data"""
n_test = T.shape[1]
X_test = T - X_mean
X_test_norm = (T - X_mean) / X_std
Y_ssfa = W_ssfa.T @ X_test
Y_ssfa_norm = W_ssfa_norm.T @ X_test_norm
Y_mssfa = W_mssfa.T @ X_test
Y_mssfa_norm = W_mssfa_norm.T @ X_test_norm
# Calculate T^2 for the code from the paper
T_sqr = np.zeros((4, n_test))
for i in range(n_test):
T_sqr[0, i] = Y_ssfa[:, i].T @ Lambda_inv_ssfa @ Y_ssfa[:, i]
T_sqr[1, i] = (Y_ssfa_norm[:, i].T @ Lambda_inv_ssfa_norm
@ Y_ssfa_norm[:, i])
T_sqr[2, i] = Y_mssfa[:, i].T @ Y_mssfa[:, i]
T_sqr[3, i] = Y_mssfa_norm[:, i].T @ Y_mssfa_norm[:, i]
Tdc, Tec, Sdc, Sec = fd.calculate_crit_values(n_test, Md, Me, alpha)
"""Plot the comparison"""
_f, axs2d = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row')
_f.set_size_inches(21, 9)
fontsize = 24
mssfa_plot = axs2d[0][0]
mssfa_plot.set_title(f"Unnormalized Inputs", fontsize=fontsize)
mssfa_plot.set_ylabel("Sparse SFA $T^2$", fontsize=fontsize)
mssfa_plot.plot(T_sqr[0, :])
mssfa_plot_norm = axs2d[0][1]
mssfa_plot_norm.set_title(f"Normalized Inputs", fontsize=fontsize)
mssfa_plot_norm.plot(T_sqr[1, :])
ssfa_plot = axs2d[1][0]
ssfa_plot.set_ylabel("Manifold Sparse SFA $T^2$", fontsize=fontsize)
ssfa_plot.set_xlabel("Sample", fontsize=fontsize)
ssfa_plot.plot(T_sqr[2, :])
ssfa_plot_norm = axs2d[1][1]
ssfa_plot_norm.set_xlabel("Sample", fontsize=fontsize)
ssfa_plot_norm.plot(T_sqr[3, :])
_f.set_tight_layout(True)
plt.savefig(f"plots/normalized_comparison.png", dpi=350)
plt.close(fig=_f)
_f = None
| [
"src.sfamanopt.mssfa.MSSFA",
"numpy.mean",
"tepimport.add_lagged_samples",
"numpy.linalg.pinv",
"matplotlib.pyplot.savefig",
"numpy.delete",
"tepimport.import_sets",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.std",
"src.sfamanopt.fault_diagnosis.calculate_crit_values",
"src.sfamanopt.ssf... | [((477, 513), 'numpy.delete', 'np.delete', (['X[1]', 'ignored_var'], {'axis': '(0)'}), '(X[1], ignored_var, axis=0)\n', (486, 513), True, 'import numpy as np\n'), ((522, 558), 'numpy.delete', 'np.delete', (['T[1]', 'ignored_var'], {'axis': '(0)'}), '(T[1], ignored_var, axis=0)\n', (531, 558), True, 'import numpy as np\n'), ((568, 615), 'tepimport.add_lagged_samples', 'tepimport.add_lagged_samples', (['X', 'lagged_samples'], {}), '(X, lagged_samples)\n', (596, 615), False, 'import tepimport\n'), ((624, 671), 'tepimport.add_lagged_samples', 'tepimport.add_lagged_samples', (['T', 'lagged_samples'], {}), '(T, lagged_samples)\n', (652, 671), False, 'import tepimport\n'), ((907, 918), 'src.sfamanopt.ssfa.SSFA', 'ssfa.SSFA', ([], {}), '()\n', (916, 918), True, 'import src.sfamanopt.ssfa as ssfa\n'), ((1047, 1080), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(W_ssfa.T @ W_ssfa)'], {}), '(W_ssfa.T @ W_ssfa)\n', (1061, 1080), True, 'import numpy as np\n'), ((1108, 1151), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(W_ssfa_norm.T @ W_ssfa_norm)'], {}), '(W_ssfa_norm.T @ W_ssfa_norm)\n', (1122, 1151), True, 'import numpy as np\n'), ((1172, 1197), 'src.sfamanopt.mssfa.MSSFA', 'mssfa.MSSFA', (['"""chol"""', '"""l1"""'], {}), "('chol', 'l1')\n", (1183, 1197), True, 'import src.sfamanopt.mssfa as mssfa\n'), ((1637, 1658), 'numpy.zeros', 'np.zeros', (['(4, n_test)'], {}), '((4, n_test))\n', (1645, 1658), True, 'import numpy as np\n'), ((2011, 2058), 'src.sfamanopt.fault_diagnosis.calculate_crit_values', 'fd.calculate_crit_values', (['n_test', 'Md', 'Me', 'alpha'], {}), '(n_test, Md, Me, alpha)\n', (2035, 2058), True, 'import src.sfamanopt.fault_diagnosis as fd\n'), ((2106, 2164), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'sharex': '"""col"""', 'sharey': '"""row"""'}), "(nrows=2, ncols=2, sharex='col', sharey='row')\n", (2118, 2164), True, 'import matplotlib.pyplot as plt\n'), ((2906, 2962), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/normalized_comparison.png"""'], {'dpi': '(350)'}), "(f'plots/normalized_comparison.png', dpi=350)\n", (2917, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2967, 2984), 'matplotlib.pyplot.close', 'plt.close', ([], {'fig': '_f'}), '(fig=_f)\n', (2976, 2984), True, 'import matplotlib.pyplot as plt\n'), ((326, 366), 'tepimport.import_sets', 'tepimport.import_sets', (['(0)'], {'skip_test': '(True)'}), '(0, skip_test=True)\n', (347, 366), False, 'import tepimport\n'), ((380, 424), 'tepimport.import_sets', 'tepimport.import_sets', (['(4)'], {'skip_training': '(True)'}), '(4, skip_training=True)\n', (401, 424), False, 'import tepimport\n'), ((724, 742), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (731, 742), True, 'import numpy as np\n'), ((791, 808), 'numpy.std', 'np.std', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (797, 808), True, 'import numpy as np\n')] |
import logging
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import style
from mpl_finance import candlestick_ohlc
from stock_analyzer.analyzer_base import AnalyzerBase
logging.basicConfig(format='%(level_name)s: %(message)s', level=logging.DEBUG)
style.use('ggplot')
class StockAssetAnalyzer(AnalyzerBase):
def __init__(self, ticker, hist_start_date=None, refresh=False):
super(StockAssetAnalyzer, self).__init__(ticker, hist_start_date)
# Get underlying data and setup required parameters
self.setup_underlying_data(refresh=refresh)
@property
def mean(self):
return self.stock_data['Close'].mean()
@property
def std(self):
return self.stock_data['Close'].std()
@property
def asset_returns(self):
if self.stock_data.empty:
raise ValueError("Historical stock prices unavailable")
print(self.stock_data.head())
self.stock_data['returns'] = self.stock_data['Close'].pct_change()
return self.stock_data.returns[1:]
@property
def index_returns(self):
if self.sp500_data.empty:
raise ValueError("Historical stock prices unavailable")
self.sp500_data['returns'] = self.sp500_data['Close'].pct_change()
return self.sp500_data.returns[1:]
def plot_returns(self):
plt.figure(figsize=(10, 5))
self.asset_returns.plot()
plt.ylabel("Daily Returns of %s " % self.ticker)
plt.show()
def plot_returns_against_snp500(self):
plt.figure(figsize=(10, 5))
self.asset_returns.plot()
self.index_returns.plot()
plt.ylabel("Daily Returns of %s against SNP500" % self.ticker)
plt.show()
def plot_candlestick(self):
df_ohlc = self.stock_data['Close'].resample('4D').ohlc()
df_volume = self.stock_data['Volume'].resample('4D').sum()
df_ohlc = df_ohlc.reset_index()
df_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num)
fig = plt.figure(figsize=(20, 10))
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(self.ticker)
plt.legend()
plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0.8)
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6, 1), (5, 0), rowspan=1, colspan=1, sharex=ax1)
ax1.xaxis_date()
candlestick_ohlc(ax1, df_ohlc.values, width=3, colorup='#77d879', colordown='#db3f3f')
ax2.bar(df_volume.index.map(mdates.date2num), df_volume.values)
# ax2.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0)
plt.show()
def plot_moving_averages(self, window1=14, window2=42):
self.stock_data['%dDay' % window1] = self.stock_data['Mean'].rolling(window=window1).mean()
self.stock_data['%dDay' % window2] = self.stock_data['Mean'].rolling(window=window2).mean()
self.stock_data[['Mean', '%dDay' % window1, '%dDay' % window2]].plot()
plt.show()
def plot_ols(self):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.index_returns.values, self.asset_returns.values, 'r.')
ax = plt.axis()
x = np.linspace(ax[0], ax[1] + 0.01)
plt.plot(x, self.alpha + self.beta * x, 'b', lw=2)
plt.grid(True)
plt.axis('tight')
plt.xlabel('SNP 500 Returns')
plt.ylabel('{} returns'.format(self.ticker))
plt.show()
@property
def alpha(self):
return self.ols_model.params[0]
@property
def beta(self):
return self.ols_model.params[1]
@property
def ols_model(self):
return AnalyzerBase.ordinary_least_square_model(self.asset_returns, self.index_returns)
if __name__ == '__main__':
analyzer = StockAssetAnalyzer('TSLA')
print(analyzer.asset_returns.head())
print(analyzer.index_returns.head())
analyzer.plot_returns()
analyzer.plot_returns_against_snp500()
analyzer.plot_candlestick()
analyzer.plot_moving_averages()
analyzer.plot_ols()
print(analyzer.ols_model.summary())
print("Alpha ", analyzer.alpha)
print("Beta ", analyzer.beta)
print("Mean ", analyzer.mean)
| [
"logging.basicConfig",
"matplotlib.pyplot.subplots_adjust",
"stock_analyzer.analyzer_base.AnalyzerBase.ordinary_least_square_model",
"matplotlib.pyplot.grid",
"mpl_finance.candlestick_ohlc",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
... | [((227, 305), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(level_name)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(level_name)s: %(message)s', level=logging.DEBUG)\n", (246, 305), False, 'import logging\n'), ((307, 326), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (316, 326), False, 'from matplotlib import style\n'), ((1390, 1417), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1400, 1417), True, 'import matplotlib.pyplot as plt\n'), ((1460, 1508), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Daily Returns of %s ' % self.ticker)"], {}), "('Daily Returns of %s ' % self.ticker)\n", (1470, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1517, 1527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1525, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1607), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1590, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1684, 1746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Daily Returns of %s against SNP500' % self.ticker)"], {}), "('Daily Returns of %s against SNP500' % self.ticker)\n", (1694, 1746), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1765), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1763, 1765), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2077), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2059, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2104), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (2096, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2113, 2132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (2123, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2141, 2163), 'matplotlib.pyplot.title', 'plt.title', (['self.ticker'], {}), '(self.ticker)\n', (2150, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2184), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2182, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2284), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.09)', 'bottom': '(0.2)', 'right': '(0.94)', 'top': '(0.9)', 'wspace': '(0.2)', 'hspace': '(0.8)'}), '(left=0.09, bottom=0.2, right=0.94, top=0.9, wspace=0.2,\n hspace=0.8)\n', (2212, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2352), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(6, 1)', '(0, 0)'], {'rowspan': '(5)', 'colspan': '(1)'}), '((6, 1), (0, 0), rowspan=5, colspan=1)\n', (2314, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2367, 2433), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(6, 1)', '(5, 0)'], {'rowspan': '(1)', 'colspan': '(1)', 'sharex': 'ax1'}), '((6, 1), (5, 0), rowspan=1, colspan=1, sharex=ax1)\n', (2383, 2433), True, 'import matplotlib.pyplot as plt\n'), ((2468, 2559), 'mpl_finance.candlestick_ohlc', 'candlestick_ohlc', (['ax1', 'df_ohlc.values'], {'width': '(3)', 'colorup': '"""#77d879"""', 'colordown': '"""#db3f3f"""'}), "(ax1, df_ohlc.values, width=3, colorup='#77d879', colordown\n ='#db3f3f')\n", (2484, 2559), False, 'from mpl_finance import candlestick_ohlc\n'), ((2721, 2731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2729, 2731), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3088, 3090), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3158), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3140, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3167, 3235), 'matplotlib.pyplot.plot', 'plt.plot', (['self.index_returns.values', 'self.asset_returns.values', '"""r."""'], {}), "(self.index_returns.values, self.asset_returns.values, 'r.')\n", (3175, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3249, 3259), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (3257, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3304), 'numpy.linspace', 'np.linspace', (['ax[0]', '(ax[1] + 0.01)'], {}), '(ax[0], ax[1] + 0.01)\n', (3283, 3304), True, 'import numpy as np\n'), ((3313, 3363), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(self.alpha + self.beta * x)', '"""b"""'], {'lw': '(2)'}), "(x, self.alpha + self.beta * x, 'b', lw=2)\n", (3321, 3363), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3387), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3381, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3413), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (3404, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SNP 500 Returns"""'], {}), "('SNP 500 Returns')\n", (3432, 3451), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3521, 3523), True, 'import matplotlib.pyplot as plt\n'), ((3730, 3815), 'stock_analyzer.analyzer_base.AnalyzerBase.ordinary_least_square_model', 'AnalyzerBase.ordinary_least_square_model', (['self.asset_returns', 'self.index_returns'], {}), '(self.asset_returns, self.index_returns\n )\n', (3770, 3815), False, 'from stock_analyzer.analyzer_base import AnalyzerBase\n')] |
import numpy as np
from numpy import sqrt
from numpy.random import rand, randn
import matplotlib.pyplot as plt
import channel
import Coding
import BPSK, QPSK, BFSK, QFSK, MPSK
if __name__ == "__main__":
N = 1e3
EbNodB_range = range(0,50)
ber = []
for n in range(len(EbNodB_range)):
EbNodB = EbNodB_range[n]
EbNo=10.0**(EbNodB/10.0)
noise_std = 1/sqrt(2*EbNo)
msg = np.random.randint(low=0, high=2, size=int(N))
msg = Coding.encodebits(msg)
mmsg = BPSK.modulate(msg, EbNo*0.0004, 0.01, 100, 10000)
mmsg += channel.generate_noise(mmsg, noise_std, 10000)
dmsg = BPSK.demodulate(mmsg, 0.01, 100, 10000)
dsmg = Coding.decodebits(dmsg)
Pb, Pb_pr = BPSK.error_probabilities(msg, dmsg, EbNo*0.0004, noise_std)
ber.append(Pb_pr)
plt.plot(EbNodB_range, ber, "o-", label="BPSK Practical BER")
plt.xscale('linear')
plt.xlabel("SNR (dB)")
plt.ylabel("BER")
plt.yscale('log')
plt.legend()
plt.savefig("BPSK_PER.png")
plt.show() | [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"Coding.encodebits",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"BPSK.modulate",
"channel.generate_noise",
"BPSK.demodulate",
"BPSK.error_probabilities",
"Coding.decodebits",
"matplo... | [((847, 908), 'matplotlib.pyplot.plot', 'plt.plot', (['EbNodB_range', 'ber', '"""o-"""'], {'label': '"""BPSK Practical BER"""'}), "(EbNodB_range, ber, 'o-', label='BPSK Practical BER')\n", (855, 908), True, 'import matplotlib.pyplot as plt\n'), ((913, 933), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (923, 933), True, 'import matplotlib.pyplot as plt\n'), ((938, 960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SNR (dB)"""'], {}), "('SNR (dB)')\n", (948, 960), True, 'import matplotlib.pyplot as plt\n'), ((965, 982), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""BER"""'], {}), "('BER')\n", (975, 982), True, 'import matplotlib.pyplot as plt\n'), ((987, 1004), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (997, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1021), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1019, 1021), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1053), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""BPSK_PER.png"""'], {}), "('BPSK_PER.png')\n", (1037, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1066, 1068), True, 'import matplotlib.pyplot as plt\n'), ((484, 506), 'Coding.encodebits', 'Coding.encodebits', (['msg'], {}), '(msg)\n', (501, 506), False, 'import Coding\n'), ((523, 574), 'BPSK.modulate', 'BPSK.modulate', (['msg', '(EbNo * 0.0004)', '(0.01)', '(100)', '(10000)'], {}), '(msg, EbNo * 0.0004, 0.01, 100, 10000)\n', (536, 574), False, 'import BPSK, QPSK, BFSK, QFSK, MPSK\n'), ((589, 635), 'channel.generate_noise', 'channel.generate_noise', (['mmsg', 'noise_std', '(10000)'], {}), '(mmsg, noise_std, 10000)\n', (611, 635), False, 'import channel\n'), ((652, 691), 'BPSK.demodulate', 'BPSK.demodulate', (['mmsg', '(0.01)', '(100)', '(10000)'], {}), '(mmsg, 0.01, 100, 10000)\n', (667, 691), False, 'import BPSK, QPSK, BFSK, QFSK, MPSK\n'), ((707, 730), 'Coding.decodebits', 'Coding.decodebits', (['dmsg'], {}), '(dmsg)\n', (724, 730), False, 'import Coding\n'), ((752, 813), 'BPSK.error_probabilities', 'BPSK.error_probabilities', (['msg', 'dmsg', '(EbNo * 0.0004)', 'noise_std'], {}), '(msg, dmsg, EbNo * 0.0004, noise_std)\n', (776, 813), False, 'import BPSK, QPSK, BFSK, QFSK, MPSK\n'), ((397, 411), 'numpy.sqrt', 'sqrt', (['(2 * EbNo)'], {}), '(2 * EbNo)\n', (401, 411), False, 'from numpy import sqrt\n')] |
from heapq import heappop, heappush
import time
from scipy import ndimage as ndi
import numpy as np
import napari
from skimage import filters, morphology, feature
from skimage.morphology._util import _offsets_to_raveled_neighbors, _validate_connectivity
SLEEP_PER_PIX = 0
# ---------
# Watershed
# ---------
def watershed(image, marker_coords, mask,
compactness=0, affinities=True, scale=None, out=None):
dim_weights = _prep_anisotropy(scale, marker_coords)
prepped_data = _prep_data(
image, marker_coords, mask, affinities, output=out
)
image_raveled, marker_coords, offsets, mask, output, strides = prepped_data
yield from slow_raveled_watershed(image_raveled, marker_coords,
offsets, mask, strides, compactness,
output, affinities, dim_weights)
if affinities:
shape = image.shape[1:]
else:
shape = image.shape
output = output.reshape(shape)
return output
def _prep_data(image, marker_coords, mask=None, affinities=False, output=None):
# INTENSITY VALUES
if affinities:
im_ndim = image.ndim - 1 # the first dim should represent affinities
image_shape = image.shape[1:]
image_strides = image[0].strides
image_itemsize = image[0].itemsize
raveled_image = np.zeros((image.shape[0], image[0].size), dtype=image.dtype)
for i in range(image.shape[0]):
raveled_image[i] = image[i].ravel()
else:
im_ndim = image.ndim
image_shape = image.shape
image_strides = image.strides
image_itemsize = image.itemsize
raveled_image = image.ravel()
# NEIGHBORS
selem, centre = _validate_connectivity(im_ndim, 1, None)
if affinities:
# array of shape (ndim * 2, 2) giving the indicies of neighbor affinities
offsets = _indices_to_raveled_affinities(image_shape, selem, centre)
else:
offsets = _offsets_to_raveled_neighbors(image_shape, selem, centre)
raveled_markers = np.apply_along_axis(_raveled_coordinate, 1,
marker_coords, **{'shape':image_shape})
if mask is None:
small_shape = [s - 2 for s in image_shape]
mask = np.ones(small_shape, dtype=bool)
mask = np.pad(mask, 1, constant_values=0)
assert image_shape == mask.shape
mask_raveled = mask.ravel()
if output is None:
output = np.zeros(mask_raveled.shape, dtype=raveled_image.dtype)
labels = np.arange(len(raveled_markers)) + 1
output[raveled_markers] = labels
strides = np.array(image_strides, dtype=np.intp) // image_itemsize
return raveled_image, raveled_markers, offsets, mask_raveled, output, strides
def _raveled_coordinate(coordinate, shape):
# array[z, y, x] = array.ravel()[z * array.shape[1] * array.shape[2] + y * array.shape[2] + x]
raveled_coord = 0
for i in range(len(coordinate)):
to_add = coordinate[i]
for j in range(len(shape)):
if j > i:
to_add *= shape[j]
raveled_coord += to_add
return raveled_coord
def _indices_to_raveled_affinities(image_shape, selem, centre):
im_offsets = _offsets_to_raveled_neighbors(image_shape, selem, centre)
#im_offsets[-len(image_shape):] = 0
affs = np.concatenate([np.arange(len(image_shape)),
np.arange(len(image_shape))[::-1]])
indices = np.stack([affs, im_offsets], axis=1)
return indices
def _prep_anisotropy(scale, marker_coords):
dim_weights = None
if scale is not None:
# validate that the scale is appropriate for coordinates
assert len(scale) == marker_coords.shape[1]
dim_weights = list(scale) + list(scale)[::-1]
dim_weights = list(map(abs, dim_weights))
return dim_weights
def slow_raveled_watershed(image_raveled, marker_coords, offsets, mask,
strides, compactness, output, affinities,
dim_weights):
'''
Parameters
----------
'''
heap = Heap()
n_neighbors = offsets.shape[0]
age = 1
compact = compactness > 0
anisotropic = dim_weights is not None
aff_offsets = offsets.copy()
aff_offsets[:int(len(offsets) / 2), 1] = 0
# add each seed to the stack
for i in range(marker_coords.shape[0]):
elem = Element()
index = marker_coords[i]
if affinities:
elem.value = 0.
else:
elem.value = image_raveled[index]
elem.source = index
elem.index = index
elem.age = 0
heap.push(elem)
# remove from stack until empty
while not heap.is_empty:
elem = heap.pop()
if compact: # or anisotropic:
if output[elem.index] and elem.index != elem.source:
# non-marker, already visited, move on to next item
continue
output[elem.index] = output[elem.source]
yield
time.sleep(SLEEP_PER_PIX)
for i in range(n_neighbors):
# get the flattened address of the neighbor
if affinities:
# offsets are 2d (size, 2) with columns 0 and 1 corresponding to
# affinities and image neighbour indices respectively
neighbor_index = offsets[i, 1] + elem.index
# in this case the index used to find elem.value will be 2d tuple
affinity_index = tuple(aff_offsets[i] + np.array([0, elem.index]))
else:
neighbor_index = offsets[i] + elem.index
if not mask[neighbor_index]:
# neighbor is not in mask, move on to next neighbor
continue
if output[neighbor_index]:
# if there is a non-zero value in output, move on to next neighbor
continue
# if the neighbor is in the mask and not already labeled, add to queue
age += 1
new_elem = Element()
if affinities:
new_elem.value = image_raveled[affinity_index]
else:
new_elem.value = image_raveled[neighbor_index]
if anisotropic:
dim_weight = dim_weights[i]
new_elem.value = new_elem.value * dim_weight
if compact:
# weight values according to distance from source voxel
new_elem.value += (compactness *
_euclid_dist(neighbor_index, elem.source,
strides))
# weight the value according to scale
# (may need to introduce a scaling hyperparameter)
else:
output[neighbor_index] = output[elem.index]
yield
time.sleep(SLEEP_PER_PIX)
new_elem.age = age
new_elem.index = neighbor_index
new_elem.source = elem.source
heap.push(new_elem)
return output
def _euclid_dist(pt0, pt1, strides):
result, curr = 0, 0
for i in range(strides.shape[0]):
curr = (pt0 // strides[i]) - (pt1 // strides[i])
result += curr * curr
pt0 = pt0 % strides[i]
pt1 = pt1 % strides[i]
return np.sqrt(result)
class Element:
def __init__(self, value=None, index=None, age=None, source=None):
self._value = value
self._index = index
self._age = age
self._source = source
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
@property
def index(self):
return self._index
@index.setter
def index(self, i):
self._index = i
@property
def age(self):
return self._age
@age.setter
def age(self, a):
self._age = a
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
class Heap:
def __init__(self):
self.items = {}
self.values = []
self.id = 0
@property
def is_empty(self):
return len(self.items) == 0
def push(self, item: Element):
'''
Add an element to the heap
Parameters
----------
item: Element
'''
# add the item to the new ID
self.items[self.id] = item
new_id = self.id
heappush(self.values, (item.value, new_id))
# new ID
self.id += 1
def pop(self):
'''
Remove the highest value element from the heap
'''
_, ID = heappop(self.values)
elem = self.items.pop(ID)
return elem
def size(self):
return len(self.items)
def segment_output_image(
unet_output,
affinities_channels,
centroids_channel,
thresholding_channel,
scale=None,
compactness=0.,
absolute_thresh=None,
out=None,
):
'''
Parameters
----------
unet_output: np.ndarray or dask.array.core.Array
Output from U-net inclusive of all channels. If there is an extra
dim of size 1, this will be squeezed out. Therefore shape may be
(1, c, z, y, x) or (c, z, y, x).
affinities_channels: tuple of int
Ints, in order (z, y, x) describe the channel indicies to which
the z, y, and x short-range affinities belong.
centroids_channel: int
Describes the channel index for the channel that is used to find
centroids.
thresholding_channel: in
Describes the channel index for the channel that is used to find
the mask for watershed.
'''
unet_output = np.asarray(np.squeeze(unet_output))
# Get the affinities image (a, z, y, x)
affinities = unet_output[list(affinities_channels)]
affinities /= np.max(affinities, axis=(1, 2, 3)).reshape((-1, 1, 1, 1))
affinities = np.pad(
affinities,
((0, 0), (1, 1), (1, 1), (1, 1)),
mode='constant',
constant_values=0,
)
# Get the image for finding centroids
centroids_img = unet_output[centroids_channel]
# find the centroids
centroids = _get_centroids(centroids_img) + 1 # account for padding
# Get the image for finding the mask
masking_img = unet_output[thresholding_channel]
# find the mask for use with watershed
if absolute_thresh is None:
mask = _get_mask(masking_img)
else:
mask = masking_img > absolute_thresh
mask = np.pad(mask, 1, constant_values=0) # edge voxels must be 0
mask, centroids = _remove_unwanted_objects(
mask, centroids, min_area=10, max_area=10000
)
# affinity-based watershed
segmentation = yield from watershed(
affinities, centroids, mask,
affinities=True, scale=scale,
compactness=compactness, out=out
)
segmentation = segmentation[1:-1, 1:-1, 1:-1]
seeds = centroids - 1
return segmentation, seeds, mask
def _get_mask(img, sigma=2):
thresh = filters.threshold_otsu(
filters.gaussian(img, sigma=(sigma/4, sigma, sigma))
)
mask = img > thresh
return mask
def _get_centroids(cent, gaussian=True):
if gaussian:
cent = filters.gaussian(cent, sigma=(0, 1, 1))
centroids = feature.peak_local_max(cent, threshold_abs=.04) #* c_scale
#centroids = blob_log(cent, min_sigma=min_sigma, max_sigma=max_sigma, threshold=threshold)
return centroids
def _remove_unwanted_objects(mask, centroids, min_area=0, max_area=10000):
labels, _ = ndi.label(mask)
labels_no_small = morphology.remove_small_objects(labels, min_size=min_area)
labels_large = morphology.remove_small_objects(labels_no_small, min_size=max_area)
labels_goldilocks = labels_no_small ^ labels_large
centroid_labels = labels_goldilocks[tuple(centroids.T)]
new_centroids = centroids[centroid_labels > 0]
new_mask = labels_goldilocks.astype(bool)
return new_mask, new_centroids
if __name__ == '__main__':
#import skimage.io as io
#labs = io.imread('/Users/amcg0011/Data/pia-tracking/cang_training/191113_IVMTR26_I3_E3_t58_cang_training_labels.tif')
#img = io.imread('/Users/amcg0011/Data/pia-tracking/cang_training/191113_IVMTR26_I3_E3_t58_cang_training_image.tif')
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
# Generate an initial image with two overlapping circles
x, y = np.indices((80, 80))
x1, y1, x2, y2 = 28, 28, 44, 52
r1, r2 = 16, 20
mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
image = np.logical_or(mask_circle1, mask_circle2)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
coords = peak_local_max(distance, footprint=np.ones((3, 3)), labels=image)
distance = 1 - (distance / distance.max())
out = watershed(distance, coords, image, 0, affinities=False)
from train_io import get_affinities
affs = get_affinities(out.copy())
from skimage.filters import gaussian
a_out = watershed(affs.copy(), coords, image, 0, affinities=True)
affs_g = np.stack([gaussian(affs[i], sigma=1) for i in range(affs.shape[0])])
ag_out = watershed(affs_g.copy(), coords, image, 0, affinities=True)
import napari
v = napari.view_labels(image, name='mask', blending='additive', visible=False)
v.add_labels(out, name='watershed', blending='additive', visible=False)
v.add_image(affs[0], name='y affinities', blending='additive', colormap='green', visible=False)
v.add_image(affs[1], name='x affinities', blending='additive', colormap='magenta', visible=False)
v.add_labels(a_out, name='affinity watershed', blending='additive', visible=False)
v.add_image(affs_g[0], name='y affinities (Gaussian)', blending='additive', colormap='green', visible=False)
v.add_image(affs_g[1], name='x affinities (Gaussian)', blending='additive', colormap='magenta', visible=False)
v.add_labels(ag_out, name='affinity watershed (Gaussian)', blending='additive')
v.add_points(coords, size=2)
napari.run()
#from helpers import get_files, get_paths
#data_dir = '/Users/amcg0011/Data/pia-tracking/cang_training'
#train_dir = os.path.join(data_dir, 'cang_training_data')
#prediction_dir = os.path.join(data_dir, '210314_training_0')
# Get the file paths
#image_files, affinities_files = get_files(train_dir)
#prediction_files = get_paths(prediction_dir)
# Get some sample images
#i0 = imread(image_files[0])
#a0 = imread(affinities_files[0])
#p0 = imread(prediction_files[0])
# ----------------
# This Didn't Work
# ----------------
#from elf.segmentation.workflows import simple_multicut_workflow
#from elf.segmentation.mutex_watershed import mutex_watershed
#from helpers import get_files, get_paths
# GOT SOME INTERESTING TRACEBACKS!!
#seg_a0 = simple_multicut_workflow(a0, True, 'blockwise-multicut')
#seg_p0 = simple_multicut_workflow(p0, False, 'greedy-additive')
# ----------------------------
# Playing With Raveled Indices
# ----------------------------
#OFFSETS = [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]
#STRIDES = [1, 1, 1]
# CAN'T GET AFFORGATO
#mw_a0 = mutex_watershed(a0, OFFSETS, STRIDES)
#offsets = _offsets_to_raveled_neighbors((10, 256, 256), selem, (1, 1, 1))
#image = np.random.random((100, 100))
#offsets = _offsets_to_raveled_neighbors((100, 100), np.ones((3, 3)), (1, 1))
#image_raveled = image.raveled()
#pixel_pos = [49, 49]
#selem = np.ones((3, 3))
#selem_indices = np.stack(np.nonzero(selem), axis=-1)
#offsets = selem_indices - (1,1)
#ravel_factors = image.shape[1:] + (1,)
#raveled_offsets = (offsets * ravel_factors).sum(axis=1)
# similarly
#i = np.array([[12, 23, 31], [43, 39, 24]])
#ir = np.ravel_multi_index(i, (100, 100))
| [
"numpy.sqrt",
"time.sleep",
"numpy.array",
"heapq.heappush",
"napari.view_labels",
"scipy.ndimage.label",
"numpy.max",
"numpy.stack",
"heapq.heappop",
"scipy.ndimage.distance_transform_edt",
"numpy.ones",
"skimage.morphology._util._validate_connectivity",
"numpy.indices",
"skimage.morpholo... | [((1747, 1787), 'skimage.morphology._util._validate_connectivity', '_validate_connectivity', (['im_ndim', '(1)', 'None'], {}), '(im_ndim, 1, None)\n', (1769, 1787), False, 'from skimage.morphology._util import _offsets_to_raveled_neighbors, _validate_connectivity\n'), ((2074, 2162), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_raveled_coordinate', '(1)', 'marker_coords'], {}), "(_raveled_coordinate, 1, marker_coords, **{'shape':\n image_shape})\n", (2093, 2162), True, 'import numpy as np\n'), ((3251, 3308), 'skimage.morphology._util._offsets_to_raveled_neighbors', '_offsets_to_raveled_neighbors', (['image_shape', 'selem', 'centre'], {}), '(image_shape, selem, centre)\n', (3280, 3308), False, 'from skimage.morphology._util import _offsets_to_raveled_neighbors, _validate_connectivity\n'), ((3483, 3519), 'numpy.stack', 'np.stack', (['[affs, im_offsets]'], {'axis': '(1)'}), '([affs, im_offsets], axis=1)\n', (3491, 3519), True, 'import numpy as np\n'), ((7339, 7354), 'numpy.sqrt', 'np.sqrt', (['result'], {}), '(result)\n', (7346, 7354), True, 'import numpy as np\n'), ((10013, 10105), 'numpy.pad', 'np.pad', (['affinities', '((0, 0), (1, 1), (1, 1), (1, 1))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(affinities, ((0, 0), (1, 1), (1, 1), (1, 1)), mode='constant',\n constant_values=0)\n", (10019, 10105), True, 'import numpy as np\n'), ((10604, 10638), 'numpy.pad', 'np.pad', (['mask', '(1)'], {'constant_values': '(0)'}), '(mask, 1, constant_values=0)\n', (10610, 10638), True, 'import numpy as np\n'), ((11411, 11459), 'skimage.feature.peak_local_max', 'feature.peak_local_max', (['cent'], {'threshold_abs': '(0.04)'}), '(cent, threshold_abs=0.04)\n', (11433, 11459), False, 'from skimage import filters, morphology, feature\n'), ((11679, 11694), 'scipy.ndimage.label', 'ndi.label', (['mask'], {}), '(mask)\n', (11688, 11694), True, 'from scipy import ndimage as ndi\n'), ((11717, 11775), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['labels'], {'min_size': 'min_area'}), '(labels, min_size=min_area)\n', (11748, 11775), False, 'from skimage import filters, morphology, feature\n'), ((11795, 11862), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['labels_no_small'], {'min_size': 'max_area'}), '(labels_no_small, min_size=max_area)\n', (11826, 11862), False, 'from skimage import filters, morphology, feature\n'), ((12570, 12590), 'numpy.indices', 'np.indices', (['(80, 80)'], {}), '((80, 80))\n', (12580, 12590), True, 'import numpy as np\n'), ((12765, 12806), 'numpy.logical_or', 'np.logical_or', (['mask_circle1', 'mask_circle2'], {}), '(mask_circle1, mask_circle2)\n', (12778, 12806), True, 'import numpy as np\n'), ((12954, 12987), 'scipy.ndimage.distance_transform_edt', 'ndi.distance_transform_edt', (['image'], {}), '(image)\n', (12980, 12987), True, 'from scipy import ndimage as ndi\n'), ((13550, 13624), 'napari.view_labels', 'napari.view_labels', (['image'], {'name': '"""mask"""', 'blending': '"""additive"""', 'visible': '(False)'}), "(image, name='mask', blending='additive', visible=False)\n", (13568, 13624), False, 'import napari\n'), ((14339, 14351), 'napari.run', 'napari.run', ([], {}), '()\n', (14349, 14351), False, 'import napari\n'), ((1373, 1433), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image[0].size)'], {'dtype': 'image.dtype'}), '((image.shape[0], image[0].size), dtype=image.dtype)\n', (1381, 1433), True, 'import numpy as np\n'), ((1994, 2051), 'skimage.morphology._util._offsets_to_raveled_neighbors', '_offsets_to_raveled_neighbors', (['image_shape', 'selem', 'centre'], {}), '(image_shape, selem, centre)\n', (2023, 2051), False, 'from skimage.morphology._util import _offsets_to_raveled_neighbors, _validate_connectivity\n'), ((2288, 2320), 'numpy.ones', 'np.ones', (['small_shape'], {'dtype': 'bool'}), '(small_shape, dtype=bool)\n', (2295, 2320), True, 'import numpy as np\n'), ((2336, 2370), 'numpy.pad', 'np.pad', (['mask', '(1)'], {'constant_values': '(0)'}), '(mask, 1, constant_values=0)\n', (2342, 2370), True, 'import numpy as np\n'), ((2484, 2539), 'numpy.zeros', 'np.zeros', (['mask_raveled.shape'], {'dtype': 'raveled_image.dtype'}), '(mask_raveled.shape, dtype=raveled_image.dtype)\n', (2492, 2539), True, 'import numpy as np\n'), ((2640, 2678), 'numpy.array', 'np.array', (['image_strides'], {'dtype': 'np.intp'}), '(image_strides, dtype=np.intp)\n', (2648, 2678), True, 'import numpy as np\n'), ((8503, 8546), 'heapq.heappush', 'heappush', (['self.values', '(item.value, new_id)'], {}), '(self.values, (item.value, new_id))\n', (8511, 8546), False, 'from heapq import heappop, heappush\n'), ((8700, 8720), 'heapq.heappop', 'heappop', (['self.values'], {}), '(self.values)\n', (8707, 8720), False, 'from heapq import heappop, heappush\n'), ((9795, 9818), 'numpy.squeeze', 'np.squeeze', (['unet_output'], {}), '(unet_output)\n', (9805, 9818), True, 'import numpy as np\n'), ((11177, 11231), 'skimage.filters.gaussian', 'filters.gaussian', (['img'], {'sigma': '(sigma / 4, sigma, sigma)'}), '(img, sigma=(sigma / 4, sigma, sigma))\n', (11193, 11231), False, 'from skimage import filters, morphology, feature\n'), ((11355, 11394), 'skimage.filters.gaussian', 'filters.gaussian', (['cent'], {'sigma': '(0, 1, 1)'}), '(cent, sigma=(0, 1, 1))\n', (11371, 11394), False, 'from skimage import filters, morphology, feature\n'), ((5043, 5068), 'time.sleep', 'time.sleep', (['SLEEP_PER_PIX'], {}), '(SLEEP_PER_PIX)\n', (5053, 5068), False, 'import time\n'), ((6885, 6910), 'time.sleep', 'time.sleep', (['SLEEP_PER_PIX'], {}), '(SLEEP_PER_PIX)\n', (6895, 6910), False, 'import time\n'), ((9938, 9972), 'numpy.max', 'np.max', (['affinities'], {'axis': '(1, 2, 3)'}), '(affinities, axis=(1, 2, 3))\n', (9944, 9972), True, 'import numpy as np\n'), ((13036, 13051), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (13043, 13051), True, 'import numpy as np\n'), ((13392, 13418), 'skimage.filters.gaussian', 'gaussian', (['affs[i]'], {'sigma': '(1)'}), '(affs[i], sigma=1)\n', (13400, 13418), False, 'from skimage.filters import gaussian\n'), ((5539, 5564), 'numpy.array', 'np.array', (['[0, elem.index]'], {}), '([0, elem.index])\n', (5547, 5564), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sharpclaw', parent_package, top_path)
config.add_extension('sharpclaw1',
['ClawParams.f90','weno.f90','reconstruct.f90',
'evec.f90','workspace.f90','flux1.f90'])
config.add_extension('sharpclaw2',
['ClawParams.f90','weno.f90','reconstruct.f90',
'evec.f90','workspace.f90','flux2.f90',
'flux1.f90'])
config.add_extension('sharpclaw3',
['ClawParams.f90','weno.f90','reconstruct.f90',
'evec.f90','workspace.f90','flux3.f90',
'flux1.f90'])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"numpy.distutils.misc_util.Configuration"
] | [((183, 235), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""sharpclaw"""', 'parent_package', 'top_path'], {}), "('sharpclaw', parent_package, top_path)\n", (196, 235), False, 'from numpy.distutils.misc_util import Configuration\n')] |
"""
Random Correlation matrix (LKJ 2009) output checking
Created on Wed Aug 2 09:09:02 2017
@author: junpenglao
"""
import numpy as np
from scipy import stats
def is_pos_def(A):
if np.array_equal(A, A.T):
try:
np.linalg.cholesky(A)
return 1
except np.linalg.linalg.LinAlgError:
return 0
else:
return 0
n = 10
eta = 1.
size = 1000
P = lkj_random(n, eta, size)
k=0
for i, p in enumerate(P):
k+=is_pos_def(p)
print("{0} % of the output matrix is positive definite.".format(k/size*100))
import matplotlib.pylab as plt
# Off diagnoal element
C= P.transpose((1, 2, 0))[np.triu_indices(n, k=1)].T
fig, ax = plt.subplots()
ax.hist(C.flatten(), 100, normed=True)
beta = eta - 1 + n/2
C2 = 2 * stats.beta.rvs(size=C.shape, a=beta, b=beta)-1
ax.hist(C2.flatten(), 100, normed=True, histtype='step', label='Beta() distribution')
plt.legend(loc='upper right', frameon=False);
| [
"matplotlib.pylab.subplots",
"scipy.stats.beta.rvs",
"numpy.triu_indices",
"matplotlib.pylab.legend",
"numpy.array_equal",
"numpy.linalg.cholesky"
] | [((682, 696), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (694, 696), True, 'import matplotlib.pylab as plt\n'), ((907, 951), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'frameon': '(False)'}), "(loc='upper right', frameon=False)\n", (917, 951), True, 'import matplotlib.pylab as plt\n'), ((189, 211), 'numpy.array_equal', 'np.array_equal', (['A', 'A.T'], {}), '(A, A.T)\n', (203, 211), True, 'import numpy as np\n'), ((644, 667), 'numpy.triu_indices', 'np.triu_indices', (['n'], {'k': '(1)'}), '(n, k=1)\n', (659, 667), True, 'import numpy as np\n'), ((772, 816), 'scipy.stats.beta.rvs', 'stats.beta.rvs', ([], {'size': 'C.shape', 'a': 'beta', 'b': 'beta'}), '(size=C.shape, a=beta, b=beta)\n', (786, 816), False, 'from scipy import stats\n'), ((238, 259), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['A'], {}), '(A)\n', (256, 259), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import copy
import datetime
import gc
import time
# import torch
import numpy as np
IrcTuple = collections.namedtuple('IrcTuple', ['index', 'row', 'col'])
XyzTuple = collections.namedtuple('XyzTuple', ['x', 'y', 'z'])
def irc2xyz(coord_irc, origin_xyz, vxSize_xyz, direction_a):
cri_a = np.array(coord_irc)[::-1]
origin_a = np.array(origin_xyz)
vxSize_a = np.array(vxSize_xyz)
coords_xyz = (direction_a @ (cri_a * vxSize_a)) + origin_a
# coords_xyz = (direction_a @ (idx * vxSize_a)) + origin_a
return XyzTuple(*coords_xyz)
def xyz2irc(coord_xyz, origin_xyz, vxSize_xyz, direction_a):
origin_a = np.array(origin_xyz)
vxSize_a = np.array(vxSize_xyz)
coord_a = np.array(coord_xyz)
cri_a = ((coord_a - origin_a) @ np.linalg.inv(direction_a)) / vxSize_a
cri_a = np.round(cri_a)
return IrcTuple(int(cri_a[2]), int(cri_a[1]), int(cri_a[0]))
def importstr(module_str, from_=None):
"""
>>> importstr('os')
<module 'os' from '.../os.pyc'>
>>> importstr('math', 'fabs')
<built-in function fabs>
"""
if from_ is None and ':' in module_str:
module_str, from_ = module_str.rsplit(':')
module = __import__(module_str)
for sub_str in module_str.split('.')[1:]:
module = getattr(module, sub_str)
if from_:
try:
return getattr(module, from_)
except:
raise ImportError('{}.{}'.format(module_str, from_))
return module
def prhist(ary, prefix_str=None, **kwargs):
if prefix_str is None:
prefix_str = ''
else:
prefix_str += ' '
count_ary, bins_ary = np.histogram(ary, **kwargs)
for i in range(count_ary.shape[0]):
print("{}{:-8.2f}".format(prefix_str, bins_ary[i]), "{:-10}".format(count_ary[i]))
print("{}{:-8.2f}".format(prefix_str, bins_ary[-1]))
def enumerateWithEstimate(
iter,
desc_str,
start_ndx=0,
print_ndx=4,
backoff=None,
iter_len=None,
):
"""
In terms of behavior, `enumerateWithEstimate` is almost identical
to the standard `enumerate` (the differences are things like how
our function returns a generator, while `enumerate` returns a
specialized `<enumerate object at 0x...>`).
However, the side effects (logging, specifically) are what make the
function interesting.
:param iter: `iter` is the iterable that will be passed into
`enumerate`. Required.
:param desc_str: This is a human-readable string that describes
what the loop is doing. The value is arbitrary, but should be
kept reasonably short. Things like `"epoch 4 training"` or
`"deleting temp files"` or similar would all make sense.
:param start_ndx: This parameter defines how many iterations of the
loop should be skipped before timing actually starts. Skipping
a few iterations can be useful if there are startup costs like
caching that are only paid early on, resulting in a skewed
average when those early iterations dominate the average time
per iteration.
NOTE: Using `start_ndx` to skip some iterations makes the time
spent performing those iterations not be included in the
displayed duration. Please account for this if you use the
displayed duration for anything formal.
This parameter defaults to `0`.
:param print_ndx: determines which loop interation that the timing
logging will start on. The intent is that we don't start
logging until we've given the loop a few iterations to let the
average time-per-iteration a chance to stablize a bit. We
require that `print_ndx` not be less than `start_ndx` times
`backoff`, since `start_ndx` greater than `0` implies that the
early N iterations are unstable from a timing perspective.
`print_ndx` defaults to `4`.
:param backoff: This is used to how many iterations to skip before
logging again. Frequent logging is less interesting later on,
so by default we double the gap between logging messages each
time after the first.
`backoff` defaults to `2` unless iter_len is > 1000, in which
case it defaults to `4`.
:param iter_len: Since we need to know the number of items to
estimate when the loop will finish, that can be provided by
passing in a value for `iter_len`. If a value isn't provided,
then it will be set by using the value of `len(iter)`.
:return:
"""
if iter_len is None:
iter_len = len(iter)
if backoff is None:
backoff = 2
while backoff ** 7 < iter_len:
backoff *= 2
assert backoff >= 2
while print_ndx < start_ndx * backoff:
print_ndx *= backoff
start_ts = time.time()
for (current_ndx, item) in enumerate(iter):
yield (current_ndx, item)
if current_ndx == print_ndx:
# ... <1>
duration_sec = ((time.time() - start_ts)
/ (current_ndx - start_ndx + 1)
* (iter_len-start_ndx)
)
print_ndx *= backoff
if current_ndx + 1 == start_ndx:
start_ts = time.time()
| [
"numpy.histogram",
"collections.namedtuple",
"numpy.array",
"numpy.linalg.inv",
"time.time",
"numpy.round"
] | [((164, 223), 'collections.namedtuple', 'collections.namedtuple', (['"""IrcTuple"""', "['index', 'row', 'col']"], {}), "('IrcTuple', ['index', 'row', 'col'])\n", (186, 223), False, 'import collections\n'), ((235, 286), 'collections.namedtuple', 'collections.namedtuple', (['"""XyzTuple"""', "['x', 'y', 'z']"], {}), "('XyzTuple', ['x', 'y', 'z'])\n", (257, 286), False, 'import collections\n'), ((402, 422), 'numpy.array', 'np.array', (['origin_xyz'], {}), '(origin_xyz)\n', (410, 422), True, 'import numpy as np\n'), ((438, 458), 'numpy.array', 'np.array', (['vxSize_xyz'], {}), '(vxSize_xyz)\n', (446, 458), True, 'import numpy as np\n'), ((695, 715), 'numpy.array', 'np.array', (['origin_xyz'], {}), '(origin_xyz)\n', (703, 715), True, 'import numpy as np\n'), ((731, 751), 'numpy.array', 'np.array', (['vxSize_xyz'], {}), '(vxSize_xyz)\n', (739, 751), True, 'import numpy as np\n'), ((766, 785), 'numpy.array', 'np.array', (['coord_xyz'], {}), '(coord_xyz)\n', (774, 785), True, 'import numpy as np\n'), ((873, 888), 'numpy.round', 'np.round', (['cri_a'], {}), '(cri_a)\n', (881, 888), True, 'import numpy as np\n'), ((1685, 1712), 'numpy.histogram', 'np.histogram', (['ary'], {}), '(ary, **kwargs)\n', (1697, 1712), True, 'import numpy as np\n'), ((4866, 4877), 'time.time', 'time.time', ([], {}), '()\n', (4875, 4877), False, 'import time\n'), ((361, 380), 'numpy.array', 'np.array', (['coord_irc'], {}), '(coord_irc)\n', (369, 380), True, 'import numpy as np\n'), ((822, 848), 'numpy.linalg.inv', 'np.linalg.inv', (['direction_a'], {}), '(direction_a)\n', (835, 848), True, 'import numpy as np\n'), ((5314, 5325), 'time.time', 'time.time', ([], {}), '()\n', (5323, 5325), False, 'import time\n'), ((5048, 5059), 'time.time', 'time.time', ([], {}), '()\n', (5057, 5059), False, 'import time\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
range_start = np.linspace(0,40, 8, endpoint=False)
dr = range_start[1] - range_start[0]
# pertubation_types = ["all"]
pertubation_types = ["lattice", "lattice_nodens", "atom_types", "atom_sites", "density", "all"]
for t in pertubation_types:
data = np.load("%s.npy" % t)
df = pd.DataFrame(data, columns=["ml", "dml"])
averages = [df[(df.ml < a + dr) & (df.ml >= a)].mean().dml for a in range_start]
fig = plt.figure(figsize=(5,5), tight_layout=True)
ax = fig.add_subplot(1, 1, 1)
ax.plot(range_start + dr/2, averages, zorder=3, color="orange", lw="3")
ax.scatter(data[:, 0], data[:, 1], zorder=2)
ax.set_xlabel('parent ML')
ax.set_ylabel('∆ML')
# ax.set_xlabel('Parent methane loading [bin units]')
# ax.set_ylabel('∆ methane loading [bin units]')
ax.grid(linestyle='-', color='0.7', zorder=0)
ax.set_xlim(0,40)
ax.set_ylim(-15,15)
ax.legend(["Range average"])
ax.set_title(t)
fig.savefig("%s.png" % t)
plt.close(fig)
| [
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.load"
] | [((88, 125), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(8)'], {'endpoint': '(False)'}), '(0, 40, 8, endpoint=False)\n', (99, 125), True, 'import numpy as np\n'), ((329, 350), 'numpy.load', 'np.load', (["('%s.npy' % t)"], {}), "('%s.npy' % t)\n", (336, 350), True, 'import numpy as np\n'), ((360, 401), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['ml', 'dml']"}), "(data, columns=['ml', 'dml'])\n", (372, 401), True, 'import pandas as pd\n'), ((498, 543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)', 'tight_layout': '(True)'}), '(figsize=(5, 5), tight_layout=True)\n', (508, 543), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1067), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1062, 1067), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import scipy.signal
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions.utils import _standard_normal, broadcast_all
from torch.distributions.exp_family import ExponentialFamily
from numbers import Number
from torch.distributions import constraints
import math
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class SquashedGaussianMLPActor(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):
super().__init__()
self.net = mlp([obs_dim] + list(hidden_sizes), activation, activation)
self.mu_layer = nn.Linear(hidden_sizes[-1], act_dim)
self.log_std_layer = nn.Linear(hidden_sizes[-1], act_dim)
self.act_limit = act_limit
self.distribution = None
def forward(self, obs, distribution=None, deterministic=False, with_logprob=True):
net_out = self.net(obs)
mu = self.mu_layer(net_out)
log_std = self.log_std_layer(net_out)
log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
std = torch.exp(log_std)
# Pre-squash distribution and sample
pi_distribution = Normal(mu, std)
if deterministic:
# Only used for evaluating policy at test time.
pi_action = mu
else:
pi_action = pi_distribution.rsample()
if with_logprob:
# Compute logprob from Gaussian, and then apply correction for Tanh squashing.
# NOTE: The correction formula is a little bit magic. To get an understanding
# of where it comes from, check out the original SAC paper (arXiv 1801.01290)
# and look in appendix C. This is a more numerically-stable equivalent to Eq 21.
# Try deriving it yourself as a (very difficult) exercise. :)
logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)
logp_pi -= (2*(np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(axis=1)
else:
logp_pi = None
pi_action = torch.tanh(pi_action)
pi_action = self.act_limit * pi_action
return pi_action, logp_pi, None
class MLPQFunction(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs, act):
q = self.q(torch.cat([obs, act], dim=-1))
return torch.squeeze(q, -1) # Critical to ensure q has right shape.
class MLPActorCritic(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=(256,256),
activation=nn.ReLU):
super().__init__()
obs_dim = observation_space.shape[0]
act_dim = action_space.shape[0]
act_limit = action_space.high[0]
# build policy and value functions
self.pi = ActorFC(obs_dim, act_dim, hidden_sizes, activation, act_limit)
# self.pi = SquashedGaussianMLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)
self.q1 = CriticFC(obs_dim, act_dim, hidden_sizes, activation)
self.q2 = CriticFC(obs_dim, act_dim, hidden_sizes, activation)
def act(self, obs, deterministic=False):
with torch.no_grad():
a, _, _ = self.pi(obs, deterministic=deterministic, with_logprob=False, squash=True)
return a.cpu().numpy()
class MyNormal(ExponentialFamily):
arg_constraints = {'loc': constraints.real, 'logscale': constraints.real}
support = constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
return self.loc
@property
def stddev(self):
return self.scale
@property
def variance(self):
return self.scale.pow(2)
def __init__(self, loc, logscale, validate_args=None):
self.loc, self.logscale = broadcast_all(loc, logscale)
if isinstance(loc, Number) and isinstance(logscale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(MyNormal, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MyNormal, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.logscale = self.logscale.expand(batch_shape)
super(MyNormal, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
@property
def scale(self):
return self.logscale.exp()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.normal(self.loc.expand(shape), self.scale.expand(shape))
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + eps * self.scale
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return -((value - self.loc) ** 2) / (2 * self.variance) - self.logscale - math.log(math.sqrt(2 * math.pi))
def cdf(self, value):
if self._validate_args:
self._validate_sample(value)
return 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)))
def icdf(self, value):
if self._validate_args:
self._validate_sample(value)
return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2)
def entropy(self):
raise NotImplementedError
@property
def _natural_params(self):
return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal())
def _log_normalizer(self, x, y):
return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y)
def log_tanh_grad(x):
return 2 * (np.log(2) - x - F.softplus(-2 * x))
def atanh(x):
x = torch.clamp(x, min=-1+1e-5, max=1-1e-5)
return 0.5 * torch.log((1 + x) / (1 - x))
class Deterministic(torch.distributions.distribution.Distribution):
def __init__(self, loc):
super(Deterministic, self).__init__()
self.x = loc
def rsample(self, sample_shape=torch.Size([])):
expanded = sample_shape + self.x.shape
x = self.x.view(torch.Size([1] * len(sample_shape)) + self.x.shape)
x = x.expand(expanded)
return x
def log_prob(self, value):
return torch.ones_like(self.x)
def entropy(self):
return torch.zeros_like(self.x)
def identity(x):
return x
def zero(x):
return torch.zeros_like(x)
class Policy(nn.Module):
def __init__(self, distribution, bounded=True):
super(Policy, self).__init__()
if bounded:
self.squash = torch.tanh
self.desquash = atanh
self.da_du = log_tanh_grad
else:
self.squash = identity
self.desquash = identity
self.da_du = zero
if distribution == 'deterministic':
self.generator = Deterministic
elif distribution == 'Normal':
self.generator = MyNormal
else:
self.generator = getattr(torch.distributions, distribution)
self.distribution = None
def _sample(self, method, n=None, deterministic=False):
if deterministic:
sample = self.distribution.mean
if type(n) is int:
sample = torch.repeat_interleave(sample.unsqueeze(0), n, dim=0)
if method == 'sample':
sample = sample.detach()
elif n is None:
sample = getattr(self.distribution, method)()
else:
if type(n) is int:
n = torch.Size([n])
sample = getattr(self.distribution, method)(n)
a = self.squash(sample)
return a
def rsample(self, n=None, deterministic=False):
return self._sample('rsample', n=n, deterministic=deterministic)
def sample(self, n=None, deterministic=False):
return self._sample('sample', n=n, deterministic=deterministic)
def log_prob(self, a):
distribution = self.distribution.expand(a.shape)
sample = self.desquash(a)
log_prob = distribution.log_prob(sample) - self.da_du(sample)
return log_prob.sum(dim=-1)
def forward(self, deterministic=False, **params):
self.params = params
self.distribution = self.generator(**params)
a = self.rsample(deterministic=deterministic)
return a.squeeze(0)
class ActorFC(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):
super(ActorFC, self).__init__()
self.act_limit = act_limit
self.lin = mlp([obs_dim] + list(hidden_sizes), activation, activation)
self.mu_head = nn.Linear(hidden_sizes[-1], act_dim)
self.std_head = nn.Linear(hidden_sizes[-1], act_dim)
self.distribution = None
def _sample(self, method, n=None, deterministic=False):
if deterministic:
sample = self.distribution.mean
if type(n) is int:
sample = torch.repeat_interleave(sample.unsqueeze(0), n, dim=0)
if method == 'sample':
sample = sample.detach()
elif n is None:
sample = getattr(self.distribution, method)()
else:
if type(n) is int:
n = torch.Size([n])
sample = getattr(self.distribution, method)(n)
a = torch.tanh(sample) * self.act_limit
return a
def rsample(self, n=None, deterministic=False):
return self._sample('rsample', n=n, deterministic=deterministic)
def sample(self, n=None, deterministic=False):
return self._sample('sample', n=n, deterministic=deterministic)
def normalize(self, a):
a = atanh(1 / self.act_limit * a)
distribution = self.distribution.expand(a.shape)
a = (a - distribution.loc) / (distribution.scale + 1e-5)
# a = torch.tanh(a / 10) * self.act_limit * 10
# a = torch.tanh(a) * self.act_limit
return a
def log_prob(self, a, desquash=False):
a_desquash = atanh(a / self.act_limit) if desquash else a
distribution = self.distribution.expand(a_desquash.shape)
logp_pi = distribution.log_prob(a_desquash).sum(axis=-1)
logp_pi -= (2 * (np.log(2) - a_desquash - F.softplus(-2 * a_desquash))).sum(axis=-1)
return logp_pi
def forward(self, s, distribution=None, deterministic=False, with_logprob=True, squash=False):
s = self.lin(s)
mu = self.mu_head(s)
logstd = self.std_head(s)
logstd = torch.clamp(logstd, LOG_STD_MIN, LOG_STD_MAX)
# params = {'loc': mu, 'scale': torch.exp(logstd)}
# self.distribution = Normal(**params)
params = {'loc': mu, 'logscale': logstd}
self.distribution = MyNormal(**params)
if deterministic:
a = mu
else:
a = self.distribution.rsample()
logp_a = self.log_prob(a) if with_logprob else None
if squash:
a = torch.tanh(a)
a = self.act_limit * a
a_norm = None
if distribution is not None:
distribution = distribution.expand(a.shape)
a_norm = (a - distribution.loc) / (distribution.scale + 1e-5)
# a_norm = torch.tanh(a_norm / 10) * self.act_limit * 10
return a, logp_a, a_norm
# class ActorFC(Policy):
#
# def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit,
# distribution='Normal', bounded=True):
#
# super(ActorFC, self).__init__(distribution, bounded=bounded)
#
# self.lin = mlp([obs_dim] + list(hidden_sizes), activation, activation)
# self.mu_head = nn.Linear(hidden_sizes[-1], act_dim)
#
# self.form = distribution
# if distribution in ['Normal', 'Uniform']:
# self.std_head = nn.Linear(hidden_sizes[-1], act_dim)
#
# def forward(self, s, deterministic=False, with_logprob=True):
#
# s = self.lin(s)
#
# mu = self.mu_head(s)
#
# if self.form in ['Normal', 'Uniform']:
# logstd = self.std_head(s)
# logstd = torch.clamp(logstd, LOG_STD_MIN, LOG_STD_MAX)
# params = {'loc': mu, 'logscale': logstd}
# else:
# params = {'loc': mu}
#
# a = super(ActorFC, self).forward(**params, deterministic=deterministic)
# logp_a = self.log_prob(a.detach()) if with_logprob else None
#
# return a, logp_a
class CriticFC(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super(CriticFC, self).__init__()
self.actions = act_dim
self.lin = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)
def forward(self, s, a=None, distribution=None):
shape = s.shape
if self.actions:
if distribution is not None:
distribution = distribution.expand(a.shape)
a = a * (distribution.scale + 1e-5) + distribution.loc
a = torch.tanh(a) # * act_limit
if len(a.shape) > len(shape):
shape = a.shape
n, b, _ = shape
s = s.unsqueeze(0).expand(n, b, -1)
s = torch.cat([s, a], dim=-1)
s = s.view(n * b, -1)
else:
s = torch.cat([s, a], dim=-1)
q = self.lin(s)
q = q.view(*shape[:-1], -1).squeeze(-1)
return q | [
"numpy.prod",
"torch.nn.Sequential",
"torch.distributions.normal.Normal",
"numpy.log",
"math.sqrt",
"torch.exp",
"torch.erfinv",
"torch.squeeze",
"torch.tanh",
"numpy.isscalar",
"torch.zeros_like",
"torch.ones_like",
"torch.distributions.utils.broadcast_all",
"torch.nn.functional.softplus"... | [((776, 798), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (789, 798), True, 'import torch.nn as nn\n'), ((6644, 6689), 'torch.clamp', 'torch.clamp', (['x'], {'min': '(-1 + 1e-05)', 'max': '(1 - 1e-05)'}), '(x, min=-1 + 1e-05, max=1 - 1e-05)\n', (6655, 6689), False, 'import torch\n'), ((7317, 7336), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (7333, 7336), False, 'import torch\n'), ((487, 505), 'numpy.isscalar', 'np.isscalar', (['shape'], {}), '(shape)\n', (498, 505), True, 'import numpy as np\n'), ((1179, 1215), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[-1]', 'act_dim'], {}), '(hidden_sizes[-1], act_dim)\n', (1188, 1215), True, 'import torch.nn as nn\n'), ((1245, 1281), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[-1]', 'act_dim'], {}), '(hidden_sizes[-1], act_dim)\n', (1254, 1281), True, 'import torch.nn as nn\n'), ((1570, 1616), 'torch.clamp', 'torch.clamp', (['log_std', 'LOG_STD_MIN', 'LOG_STD_MAX'], {}), '(log_std, LOG_STD_MIN, LOG_STD_MAX)\n', (1581, 1616), False, 'import torch\n'), ((1631, 1649), 'torch.exp', 'torch.exp', (['log_std'], {}), '(log_std)\n', (1640, 1649), False, 'import torch\n'), ((1722, 1737), 'torch.distributions.normal.Normal', 'Normal', (['mu', 'std'], {}), '(mu, std)\n', (1728, 1737), False, 'from torch.distributions.normal import Normal\n'), ((2604, 2625), 'torch.tanh', 'torch.tanh', (['pi_action'], {}), '(pi_action)\n', (2614, 2625), False, 'import torch\n'), ((3023, 3043), 'torch.squeeze', 'torch.squeeze', (['q', '(-1)'], {}), '(q, -1)\n', (3036, 3043), False, 'import torch\n'), ((4456, 4484), 'torch.distributions.utils.broadcast_all', 'broadcast_all', (['loc', 'logscale'], {}), '(loc, logscale)\n', (4469, 4484), False, 'from torch.distributions.utils import _standard_normal, broadcast_all\n'), ((4867, 4890), 'torch.Size', 'torch.Size', (['batch_shape'], {}), '(batch_shape)\n', (4877, 4890), False, 'import torch\n'), ((5241, 5253), 'torch.Size', 'torch.Size', ([], {}), '()\n', (5251, 5253), False, 'import torch\n'), ((5455, 5467), 'torch.Size', 'torch.Size', ([], {}), '()\n', (5465, 5467), False, 'import torch\n'), ((5535, 5604), 'torch.distributions.utils._standard_normal', '_standard_normal', (['shape'], {'dtype': 'self.loc.dtype', 'device': 'self.loc.device'}), '(shape, dtype=self.loc.dtype, device=self.loc.device)\n', (5551, 5604), False, 'from torch.distributions.utils import _standard_normal, broadcast_all\n'), ((6701, 6729), 'torch.log', 'torch.log', (['((1 + x) / (1 - x))'], {}), '((1 + x) / (1 - x))\n', (6710, 6729), False, 'import torch\n'), ((6933, 6947), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (6943, 6947), False, 'import torch\n'), ((7171, 7194), 'torch.ones_like', 'torch.ones_like', (['self.x'], {}), '(self.x)\n', (7186, 7194), False, 'import torch\n'), ((7234, 7258), 'torch.zeros_like', 'torch.zeros_like', (['self.x'], {}), '(self.x)\n', (7250, 7258), False, 'import torch\n'), ((9570, 9606), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[-1]', 'act_dim'], {}), '(hidden_sizes[-1], act_dim)\n', (9579, 9606), True, 'import torch.nn as nn\n'), ((9631, 9667), 'torch.nn.Linear', 'nn.Linear', (['hidden_sizes[-1]', 'act_dim'], {}), '(hidden_sizes[-1], act_dim)\n', (9640, 9667), True, 'import torch.nn as nn\n'), ((11440, 11485), 'torch.clamp', 'torch.clamp', (['logstd', 'LOG_STD_MIN', 'LOG_STD_MAX'], {}), '(logstd, LOG_STD_MIN, LOG_STD_MAX)\n', (11451, 11485), False, 'import torch\n'), ((725, 758), 'torch.nn.Linear', 'nn.Linear', (['sizes[j]', 'sizes[j + 1]'], {}), '(sizes[j], sizes[j + 1])\n', (734, 758), True, 'import torch.nn as nn\n'), ((840, 856), 'numpy.prod', 'np.prod', (['p.shape'], {}), '(p.shape)\n', (847, 856), True, 'import numpy as np\n'), ((2977, 3006), 'torch.cat', 'torch.cat', (['[obs, act]'], {'dim': '(-1)'}), '([obs, act], dim=-1)\n', (2986, 3006), False, 'import torch\n'), ((3818, 3833), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3831, 3833), False, 'import torch\n'), ((4581, 4593), 'torch.Size', 'torch.Size', ([], {}), '()\n', (4591, 4593), False, 'import torch\n'), ((5320, 5335), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5333, 5335), False, 'import torch\n'), ((6600, 6618), 'torch.nn.functional.softplus', 'F.softplus', (['(-2 * x)'], {}), '(-2 * x)\n', (6610, 6618), True, 'import torch.nn.functional as F\n'), ((10259, 10277), 'torch.tanh', 'torch.tanh', (['sample'], {}), '(sample)\n', (10269, 10277), False, 'import torch\n'), ((11891, 11904), 'torch.tanh', 'torch.tanh', (['a'], {}), '(a)\n', (11901, 11904), False, 'import torch\n'), ((13902, 13915), 'torch.tanh', 'torch.tanh', (['a'], {}), '(a)\n', (13912, 13915), False, 'import torch\n'), ((5844, 5866), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (5853, 5866), False, 'import math\n'), ((6236, 6248), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6245, 6248), False, 'import math\n'), ((6520, 6543), 'torch.log', 'torch.log', (['(-math.pi / y)'], {}), '(-math.pi / y)\n', (6529, 6543), False, 'import torch\n'), ((6584, 6593), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (6590, 6593), True, 'import numpy as np\n'), ((14112, 14137), 'torch.cat', 'torch.cat', (['[s, a]'], {'dim': '(-1)'}), '([s, a], dim=-1)\n', (14121, 14137), False, 'import torch\n'), ((14214, 14239), 'torch.cat', 'torch.cat', (['[s, a]'], {'dim': '(-1)'}), '([s, a], dim=-1)\n', (14223, 14239), False, 'import torch\n'), ((6206, 6233), 'torch.erfinv', 'torch.erfinv', (['(2 * value - 1)'], {}), '(2 * value - 1)\n', (6218, 6233), False, 'import torch\n'), ((8458, 8473), 'torch.Size', 'torch.Size', (['[n]'], {}), '([n])\n', (8468, 8473), False, 'import torch\n'), ((10171, 10186), 'torch.Size', 'torch.Size', (['[n]'], {}), '([n])\n', (10181, 10186), False, 'import torch\n'), ((6051, 6063), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6060, 6063), False, 'import math\n'), ((11166, 11193), 'torch.nn.functional.softplus', 'F.softplus', (['(-2 * a_desquash)'], {}), '(-2 * a_desquash)\n', (11176, 11193), True, 'import torch.nn.functional as F\n'), ((2503, 2529), 'torch.nn.functional.softplus', 'F.softplus', (['(-2 * pi_action)'], {}), '(-2 * pi_action)\n', (2513, 2529), True, 'import torch.nn.functional as F\n'), ((11141, 11150), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (11147, 11150), True, 'import numpy as np\n'), ((2479, 2488), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2485, 2488), True, 'import numpy as np\n')] |
import argparse
import time
import os
import glob
import sys
import json
import shutil
import itertools
import numpy as np
import pandas as pd
import csv
import torch
from torch.autograd import Variable
from sklearn.metrics import confusion_matrix
from torch.nn import functional as F
from opts import parse_opts_online
from model import generate_model, _modify_first_conv_layer, _construct_depth_model
from mean import get_mean, get_std
from spatial_transforms import *
from temporal_transforms import *
# from temporal_transforms_adap import *
from target_transforms import ClassLabel
from dataset import get_online_data
from utils import Logger, AverageMeter, LevenshteinDistance, Queue
import pdb
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import scipy.io as sio
def weighting_func(x):
return (1 / (1 + np.exp(-0.2*(x-9))))
opt = parse_opts_online()
def load_models(opt):
opt.resume_path = opt.resume_path_det
opt.pretrain_path = opt.pretrain_path_det
opt.sample_duration = opt.sample_duration_det
opt.model = opt.model_det
opt.model_depth = opt.model_depth_det
opt.modality = opt.modality_det
opt.resnet_shortcut = opt.resnet_shortcut_det
opt.n_classes = opt.n_classes_det
opt.n_finetune_classes = opt.n_finetune_classes_det
opt.no_first_lay = opt.no_first_lay_det
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts_det_{}.json'.format(opt.store_name)), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
detector, parameters = generate_model(opt)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
detector.load_state_dict(checkpoint['state_dict'])
print('Model 1 \n', detector)
pytorch_total_params = sum(p.numel() for p in detector.parameters() if
p.requires_grad)
print("Total number of trainable parameters: ", pytorch_total_params)
opt.resume_path = opt.resume_path_clf
opt.pretrain_path = opt.pretrain_path_clf
opt.sample_duration = opt.sample_duration_clf
opt.model = opt.model_clf
opt.model_depth = opt.model_depth_clf
opt.modality = opt.modality_clf
opt.resnet_shortcut = opt.resnet_shortcut_clf
opt.n_classes = opt.n_classes_clf
opt.n_finetune_classes = opt.n_finetune_classes_clf
opt.no_first_lay = opt.no_first_lay_clf
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value)
opt.std = get_std(opt.norm_value)
print(opt)
with open(os.path.join(opt.result_path, 'opts_clf_{}.json'.format(opt.store_name)), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
classifier, parameters = generate_model(opt)
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
if opt.sample_duration_clf < 32 and opt.model_clf != 'c3d':
classifier = _modify_first_conv_layer(classifier,3,3)
classifier = _construct_depth_model(classifier)
classifier = classifier.cuda()
classifier.load_state_dict(checkpoint['state_dict'])
print('Model 2 \n', classifier)
pytorch_total_params = sum(p.numel() for p in classifier.parameters() if
p.requires_grad)
print("Total number of trainable parameters: ", pytorch_total_params)
return detector, classifier
opt.store_name = '{}_{}_{}'.format(opt.store_name, opt.test_subset, opt.model_clf)
detector,classifier = load_models(opt)
sys.stdout.flush()
norm_method = Normalize([0, 0, 0], [1, 1, 1])
spatial_transform = Compose([
Scale(112),
CenterCrop(112),
ToTensor(opt.norm_value), norm_method
])
target_transform = ClassLabel()
## Get list of videos to test
if opt.dataset == 'egogesture':
subject_list = ['Subject{:02d}'.format(i) for i in [2, 9, 11, 14, 18, 19, 28, 31, 41, 47]]
test_paths = []
buf = 4
for subject in subject_list:
for x in glob.glob(os.path.join(opt.video_path,subject,'*/*/rgb*')):
test_paths.append(x)
elif opt.dataset == 'nv':
df = pd.read_csv(os.path.join(opt.video_path,'nvgesture_test_correct_cvpr2016_v2.lst'), delimiter = ' ', header = None)
test_paths = []
buf = 4
for x in df[0].values:
if opt.modality_det in ['RGB', 'RGB-D', 'RGB-flo', 'RGB-seg']:
test_paths.append(os.path.join(opt.video_path, x.replace('path:./', ''), 'sk_color_all'))
elif opt.modality_det == 'Depth':
test_paths.append(os.path.join(opt.video_path, x.replace('path:', ''), 'sk_depth_all'))
elif opt.dataset == 'ipn':
file_set = os.path.join(opt.video_path, 'Video_TestList.txt')
test_paths = []
buf = 0
with open(file_set,'rb') as f:
for line in f:
vid_name = line.decode().split('\t')[0]
test_paths.append(os.path.join(opt.video_path, 'frames', vid_name))
elif opt.dataset == 'AHG':
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/AHG/splitfiles/testlist01.mat'))['raw_list'][0]
test_paths = []
true_classes_all = []
true_frames_all = []
buf = 0
for i in range(data.shape[0]): #All videos
test_paths.append(str(data[i][0][0])) #path
true_classes_all.append(np.array(data[i][1][0])) #classes
true_frames_all.append(np.array(data[i][-2][0])) #ef
elif opt.dataset == 'denso':
if opt.test_subset == 'val':
print('Online evaluation of validation set')
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/Pointing/train_sets/valid_list3.mat'))['raw_list'][0]
elif opt.test_subset == 'test':
print('Online evaluation of testing set')
data = sio.loadmat(os.path.join(opt.root_path,'bega/datasets/Pointing/train_sets/test_list3.mat'))['raw_list'][0]
else:
print('ERROR: chose val or test set for online evaluation')
assert(opt.test_subset == 1)
test_paths = []
true_classes_all = []
true_frames_all = []
buf = 0
for i in range(data.shape[0]): #All videos
test_paths.append(str(data[i][0][0])) #path
true_classes_all.append(np.array(data[i][1][0])) #classes
true_frames_all.append(np.array(data[i][-1][0])) #gef
print('Start Evaluation')
detector.eval()
classifier.eval()
levenshtein_accuracies = AverageMeter()
det_idxs = []
end_frames = []
pre_classes = []
all_pred_frames = []
all_pred_starts = []
all_pred = []
all_true_frames = []
all_true_starts = []
all_true = []
videoidx = 0
for idx, path in enumerate(test_paths[buf:]):
if opt.dataset == 'egogesture':
opt.whole_path = path.split(os.sep, 4)[-1]
elif opt.dataset == 'nv':
opt.whole_path = path.split(os.sep, 7)[-1]
elif opt.dataset == 'ipn':
opt.whole_path = os.path.join('frames', path.split(os.sep)[-1])
elif opt.dataset == 'AHG':
opt.whole_path = path
elif opt.dataset == 'denso':
opt.whole_path = path
videoidx += 1
active_index = 0
passive_count = 0
active = False
prev_active = False
finished_prediction = None
pre_predict = False
cum_sum = np.zeros(opt.n_classes_clf,)
clf_selected_queue = np.zeros(opt.n_classes_clf,)
det_selected_queue = np.zeros(opt.n_classes_det,)
myqueue_det = Queue(opt.det_queue_size , n_classes = opt.n_classes_det)
myqueue_clf = Queue(opt.clf_queue_size, n_classes = opt.n_classes_clf )
print('[{}/{}]============'.format(videoidx,len(test_paths)))
print(path)
sys.stdout.flush()
opt.sample_duration = max(opt.sample_duration_clf, opt.sample_duration_det)
test_data = get_online_data(
opt, spatial_transform, None, target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
results = []
pred_frames = []
pred_start = []
pred_starts = []
prev_best1 = opt.n_classes_clf
det_idx = np.zeros(6000)
end_fra = np.zeros(1000)
pre_cla = np.zeros(1000)
det_idx[0] = test_data.data[-1]['frame_indices'][-1]
for i, (inputs, targets) in enumerate(test_loader):
if not opt.no_cuda:
targets = targets.cuda()
ground_truth_array = np.zeros(opt.n_classes_clf +1,)
with torch.no_grad():
inputs = Variable(inputs)
targets = Variable(targets)
if opt.modality_det in ['RGB', 'RGB-D', 'RGB-flo', 'RGB-seg']:
inputs_det = inputs[:,:,-opt.sample_duration_det:,:,:]
elif opt.modality_det == 'Depth':
inputs_det = inputs[:,-1,-opt.sample_duration_det:,:,:].unsqueeze(1)
s_dt = time.time()
# pdb.set_trace()
outputs_det = detector(inputs_det)
outputs_det = F.softmax(outputs_det,dim=1)
outputs_det = outputs_det.cpu().numpy()[0].reshape(-1,)
e_dt = time.time()
# enqueue the probabilities to the detector queue
myqueue_det.enqueue(outputs_det.tolist())
if opt.det_strategy == 'raw':
det_selected_queue = outputs_det
elif opt.det_strategy == 'median':
det_selected_queue = myqueue_det.median
elif opt.det_strategy == 'ma':
det_selected_queue = myqueue_det.ma
elif opt.det_strategy == 'ewma':
det_selected_queue = myqueue_det.ewma
prediction_det = np.argmax(det_selected_queue)
prob_det = det_selected_queue[prediction_det]
#### State of the detector is checked here as detector act as a switch for the classifier
if prediction_det == 1:
# det_idx[i] = test_data.data[i]['frame_indices'][-1]
det_idx[test_data.data[i]['frame_indices'][-1]] = 1
pred_start.append(test_data.data[i]['frame_indices'][-1])
if opt.modality_clf in ['RGB', 'RGB-D', 'RGB-flo', 'RGB-seg']:
inputs_clf = inputs[:,:,:,:,:]
elif opt.modality_clf == 'Depth':
inputs_clf = inputs[:,-1,:,:,:].unsqueeze(1)
s_ct = time.time()
outputs_clf = classifier(inputs_clf)
outputs_clf = F.softmax(outputs_clf,dim=1)
outputs_clf = outputs_clf.cpu().numpy()[0].reshape(-1,)
e_ct = time.time()
# Push the probabilities to queue
myqueue_clf.enqueue(outputs_clf.tolist())
passive_count = 0
if opt.clf_strategy == 'raw':
clf_selected_queue = outputs_clf
elif opt.clf_strategy == 'median':
clf_selected_queue = myqueue_clf.median
elif opt.clf_strategy == 'ma':
clf_selected_queue = myqueue_clf.ma
elif opt.clf_strategy == 'ewma':
clf_selected_queue = myqueue_clf.ewma
# print('Clf Time: {}s ({}ms)'.format(e_ct-s_ct, (e_ct-s_ct)*1000))
# print('Sum Time: {}s ({}ms)'.format((e_dt-s_dt)+(e_ct-s_ct), ((e_dt-s_dt)+(e_ct-s_ct))*1000))
# print('All Time: {}s ({}ms)'.format(e_ct-s_dt, (e_ct-s_dt)*1000))
else:
outputs_clf = np.zeros(opt.n_classes_clf ,)
# Push the probabilities to queue
myqueue_clf.enqueue(outputs_clf.tolist())
passive_count += 1
if passive_count >= opt.det_counter:
active = False
else:
active = True
# one of the following line need to be commented !!!!
if active:
active_index += 1
cum_sum = ((cum_sum * (active_index-1)) + (weighting_func(active_index) * clf_selected_queue))/active_index # Weighted Aproach
# cum_sum = ((cum_sum * (x-1)) + (1.0 * clf_selected_queue))/x #Not Weighting Aproach
best2, best1 = tuple(cum_sum.argsort()[-2:][::1])
if float(cum_sum[best1]- cum_sum[best2]) > opt.clf_threshold_pre:
finished_prediction = True
pre_predict = True
else:
active_index = 0
if active == False and prev_active == True:
finished_prediction = True
elif active == True and prev_active == False:
finished_prediction = False
if test_data.data[i]['frame_indices'][-1] % 500 == 0:
print('No gestures detected at frame {}'.format(test_data.data[i]['frame_indices'][-1]))
sys.stdout.flush()
if finished_prediction == True:
best2, best1 = tuple(cum_sum.argsort()[-2:][::1])
if cum_sum[best1]>opt.clf_threshold_final:
if pre_predict == True:
if best1 != prev_best1:
if cum_sum[best1]>opt.clf_threshold_final:
results.append(((i*opt.stride_len)+opt.sample_duration_clf,best1))
print( 'Early Detected - class : {} with prob : {} at frames {}~{}'.format(best1, cum_sum[best1], pred_start[0], test_data.data[i]['frame_indices'][-1]))
pred_frames.append(test_data.data[i]['frame_indices'][-1])
pred_starts.append(pred_start[0])
pred_start = []
else:
if cum_sum[best1]>opt.clf_threshold_final:
if best1 == prev_best1:
if cum_sum[best1]>5:
results.append(((i*opt.stride_len)+opt.sample_duration_clf,best1))
print( 'Late Detected - class : {} with prob : {} at frames {}~{}'.format(best1, cum_sum[best1], pred_start[0], test_data.data[i]['frame_indices'][-1]))
pred_frames.append(test_data.data[i]['frame_indices'][-1])
pred_starts.append(pred_start[0])
pred_start = []
else:
results.append(((i*opt.stride_len)+opt.sample_duration_clf,best1))
print( 'Late Detected - class : {} with prob : {} at frames {}~{}'.format(best1, cum_sum[best1], pred_start[0], test_data.data[i]['frame_indices'][-1]))
pred_frames.append(test_data.data[i]['frame_indices'][-1])
pred_starts.append(pred_start[0])
pred_start = []
finished_prediction = False
prev_best1 = best1
pred_start = []
cum_sum = np.zeros(opt.n_classes_clf,)
pred_start = []
sys.stdout.flush()
if active == False and prev_active == True:
pre_predict = False
prev_active = active
if opt.dataset == 'egogesture':
target_csv_path = os.path.join(opt.video_path.rsplit(os.sep, 1)[0],
'labels-final-revised1',
opt.whole_path.rsplit(os.sep,2)[0],
'Group'+opt.whole_path[-1] + '.csv').replace('Subject', 'subject')
true_classes = []
with open(target_csv_path) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
true_classes.append(int(row[0])-1)
elif opt.dataset == 'nv':
true_classes = []
true_starts = []
true_frames = []
with open('annotation_nvGesture/vallistall.txt') as csvfile:
readCSV = csv.reader(csvfile, delimiter=' ')
for row in readCSV:
if row[0][2:] == opt.whole_path:
if row[1] != '26' :
true_classes.append(int(row[1])-1)
true_starts.append(int(row[2]))
true_frames.append(int(row[3]))
elif opt.dataset == 'ipn':
true_classes = []
true_frames = []
true_starts = []
with open('annotation_ipnGesture/vallistall.txt') as csvfile:
readCSV = csv.reader(csvfile, delimiter=' ')
for row in readCSV:
if row[0][2:] == opt.whole_path:
if row[1] != '1' :
true_classes.append(int(row[1])-2)
true_starts.append(int(row[2]))
true_frames.append(int(row[3]))
elif opt.dataset == 'AHG':
true_classes = []
true_frames = true_frames_all[idx]
for idc in true_classes_all[idx]:
if idc > 7:
true_classes.append(int(idc-2))
else:
true_classes.append(int(idc-1))
elif opt.dataset == 'denso':
true_classes = []
true_frames = true_frames_all[idx]
for idc in true_classes_all[idx]:
true_classes.append(int(idc-1))
# if path == '/misc/dl001/dataset/NVIDIA/nvgesture_arch/./Video_data/class_02/subject13_r1/sk_depth_all':
# pdb.set_trace()
true_classes = np.array(true_classes)
if results == []:
predicted = np.array(results)
pred_frames = np.array(pred_frames)
levenshtein_distance = -1
else:
pred_frames = np.array(pred_frames)
predicted = np.array(results)[:,1]
levenshtein_distance = LevenshteinDistance(true_classes, predicted)
# pdb.set_trace()
levenshtein_accuracy = 1-(levenshtein_distance/len(true_classes))
pre_cla[0:len(predicted)] = predicted+1
end_fra[0:len(pred_frames)] = pred_frames
if levenshtein_distance <0: # Distance cannot be less than 0
levenshtein_accuracies.update(0, len(true_classes))
# pass
else:
levenshtein_accuracies.update(levenshtein_accuracy, len(true_classes))
pred = []
all_pred.append(predicted.tolist())
all_pred_frames.append(pred_frames.tolist())
all_pred_starts.append(pred_starts)
for i, pn in enumerate(predicted):
pred.append('{}({}~{})'.format(pn, pred_starts[i], pred_frames[i]))
true_gt = []
all_true.append(true_classes.tolist())
all_true_frames.append(true_frames)
all_true_starts.append(true_starts)
for i, pn in enumerate(true_classes):
true_gt.append('{}({}~{})'.format(pn, true_starts[i], true_frames[i]))
# print('predicted classes: \t {} \t at frames: {}'.format(predicted, pred_frames))
# print('True classes :\t\t {} \t at frames: {}'.format(true_classes, true_frames))
if results == []:
print('predicted classes: {}'.format('NONE'))
else:
print('predicted classes: {}'.format(' '.join(pred)))
print('True classes :\t {}'.format(' '.join(true_gt)))
print('Levenshtein Accuracy = {} ({}) frame detections: {}/{}'.format(levenshtein_accuracies.val, levenshtein_accuracies.avg, np.sum(det_idx[2:]), det_idx[0]))
det_idxs.append(det_idx)
end_frames.append(end_fra)
pre_classes.append(pre_cla)
sys.stdout.flush()
print('Average Levenshtein Accuracy= {}'.format(levenshtein_accuracies.avg))
print('-----Evaluation is finished------')
res_data = {}
res_data['all_pred'] = all_pred
res_data['all_pred_frames'] = all_pred_frames
res_data['all_pred_starts'] = all_pred_starts
res_data['all_true'] = all_true
res_data['all_true_frames'] = all_true_frames
res_data['all_true_starts'] = all_true_starts
with open(os.path.join(opt.result_path,'res_'+opt.store_name+'.json'), 'w') as dst_file:
json.dump(res_data, dst_file)
# det_idxs = np.array(det_idxs)
# end_frames = np.array(end_frames)
# pre_classes = np.array(pre_classes)
# sio.savemat(os.path.join(opt.result_path,opt.store_name+'.mat'), {'detecs':det_idxs, 'efs':end_frames, 'p_id':pre_classes}) | [
"utils.Queue",
"mean.get_std",
"mean.get_mean",
"numpy.array",
"torch.nn.functional.softmax",
"target_transforms.ClassLabel",
"numpy.exp",
"model._modify_first_conv_layer",
"sys.stdout.flush",
"torch.autograd.Variable",
"csv.reader",
"dataset.get_online_data",
"model.generate_model",
"nump... | [((893, 912), 'opts.parse_opts_online', 'parse_opts_online', ([], {}), '()\n', (910, 912), False, 'from opts import parse_opts_online\n'), ((5185, 5203), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5201, 5203), False, 'import sys\n'), ((5389, 5401), 'target_transforms.ClassLabel', 'ClassLabel', ([], {}), '()\n', (5399, 5401), False, 'from target_transforms import ClassLabel\n'), ((8072, 8086), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8084, 8086), False, 'from utils import Logger, AverageMeter, LevenshteinDistance, Queue\n'), ((2038, 2062), 'mean.get_mean', 'get_mean', (['opt.norm_value'], {}), '(opt.norm_value)\n', (2046, 2062), False, 'from mean import get_mean, get_std\n'), ((2077, 2100), 'mean.get_std', 'get_std', (['opt.norm_value'], {}), '(opt.norm_value)\n', (2084, 2100), False, 'from mean import get_mean, get_std\n'), ((2267, 2301), 'torch.manual_seed', 'torch.manual_seed', (['opt.manual_seed'], {}), '(opt.manual_seed)\n', (2284, 2301), False, 'import torch\n'), ((2330, 2349), 'model.generate_model', 'generate_model', (['opt'], {}), '(opt)\n', (2344, 2349), False, 'from model import generate_model, _modify_first_conv_layer, _construct_depth_model\n'), ((3999, 4023), 'mean.get_mean', 'get_mean', (['opt.norm_value'], {}), '(opt.norm_value)\n', (4007, 4023), False, 'from mean import get_mean, get_std\n'), ((4038, 4061), 'mean.get_std', 'get_std', (['opt.norm_value'], {}), '(opt.norm_value)\n', (4045, 4061), False, 'from mean import get_mean, get_std\n'), ((4228, 4262), 'torch.manual_seed', 'torch.manual_seed', (['opt.manual_seed'], {}), '(opt.manual_seed)\n', (4245, 4262), False, 'import torch\n'), ((4292, 4311), 'model.generate_model', 'generate_model', (['opt'], {}), '(opt)\n', (4306, 4311), False, 'from model import generate_model, _modify_first_conv_layer, _construct_depth_model\n'), ((8881, 8908), 'numpy.zeros', 'np.zeros', (['opt.n_classes_clf'], {}), '(opt.n_classes_clf)\n', (8889, 8908), True, 'import numpy as np\n'), ((8935, 8962), 'numpy.zeros', 'np.zeros', (['opt.n_classes_clf'], {}), '(opt.n_classes_clf)\n', (8943, 8962), True, 'import numpy as np\n'), ((8989, 9016), 'numpy.zeros', 'np.zeros', (['opt.n_classes_det'], {}), '(opt.n_classes_det)\n', (8997, 9016), True, 'import numpy as np\n'), ((9036, 9090), 'utils.Queue', 'Queue', (['opt.det_queue_size'], {'n_classes': 'opt.n_classes_det'}), '(opt.det_queue_size, n_classes=opt.n_classes_det)\n', (9041, 9090), False, 'from utils import Logger, AverageMeter, LevenshteinDistance, Queue\n'), ((9113, 9167), 'utils.Queue', 'Queue', (['opt.clf_queue_size'], {'n_classes': 'opt.n_classes_clf'}), '(opt.clf_queue_size, n_classes=opt.n_classes_clf)\n', (9118, 9167), False, 'from utils import Logger, AverageMeter, LevenshteinDistance, Queue\n'), ((9259, 9277), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9275, 9277), False, 'import sys\n'), ((9374, 9437), 'dataset.get_online_data', 'get_online_data', (['opt', 'spatial_transform', 'None', 'target_transform'], {}), '(opt, spatial_transform, None, target_transform)\n', (9389, 9437), False, 'from dataset import get_online_data\n'), ((9466, 9595), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'num_workers': 'opt.n_threads', 'pin_memory': '(True)'}), '(test_data, batch_size=opt.batch_size, shuffle=\n False, num_workers=opt.n_threads, pin_memory=True)\n', (9493, 9595), False, 'import torch\n'), ((9802, 9816), 'numpy.zeros', 'np.zeros', (['(6000)'], {}), '(6000)\n', (9810, 9816), True, 'import numpy as np\n'), ((9831, 9845), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (9839, 9845), True, 'import numpy as np\n'), ((9860, 9874), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (9868, 9874), True, 'import numpy as np\n'), ((19009, 19031), 'numpy.array', 'np.array', (['true_classes'], {}), '(true_classes)\n', (19017, 19031), True, 'import numpy as np\n'), ((20936, 20954), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20952, 20954), False, 'import sys\n'), ((21436, 21465), 'json.dump', 'json.dump', (['res_data', 'dst_file'], {}), '(res_data, dst_file)\n', (21445, 21465), False, 'import json\n'), ((1424, 1467), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.video_path'], {}), '(opt.root_path, opt.video_path)\n', (1436, 1467), False, 'import os\n'), ((1498, 1546), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.annotation_path'], {}), '(opt.root_path, opt.annotation_path)\n', (1510, 1546), False, 'import os\n'), ((1573, 1617), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.result_path'], {}), '(opt.root_path, opt.result_path)\n', (1585, 1617), False, 'import os\n'), ((2401, 2445), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.resume_path'], {}), '(opt.root_path, opt.resume_path)\n', (2413, 2445), False, 'import os\n'), ((2530, 2557), 'torch.load', 'torch.load', (['opt.resume_path'], {}), '(opt.resume_path)\n', (2540, 2557), False, 'import torch\n'), ((3386, 3429), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.video_path'], {}), '(opt.root_path, opt.video_path)\n', (3398, 3429), False, 'import os\n'), ((3460, 3508), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.annotation_path'], {}), '(opt.root_path, opt.annotation_path)\n', (3472, 3508), False, 'import os\n'), ((3535, 3579), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.result_path'], {}), '(opt.root_path, opt.result_path)\n', (3547, 3579), False, 'import os\n'), ((4421, 4448), 'torch.load', 'torch.load', (['opt.resume_path'], {}), '(opt.resume_path)\n', (4431, 4448), False, 'import torch\n'), ((10083, 10114), 'numpy.zeros', 'np.zeros', (['(opt.n_classes_clf + 1)'], {}), '(opt.n_classes_clf + 1)\n', (10091, 10114), True, 'import numpy as np\n'), ((19074, 19091), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (19082, 19091), True, 'import numpy as np\n'), ((19114, 19135), 'numpy.array', 'np.array', (['pred_frames'], {}), '(pred_frames)\n', (19122, 19135), True, 'import numpy as np\n'), ((19202, 19223), 'numpy.array', 'np.array', (['pred_frames'], {}), '(pred_frames)\n', (19210, 19223), True, 'import numpy as np\n'), ((19298, 19342), 'utils.LevenshteinDistance', 'LevenshteinDistance', (['true_classes', 'predicted'], {}), '(true_classes, predicted)\n', (19317, 19342), False, 'from utils import Logger, AverageMeter, LevenshteinDistance, Queue\n'), ((21353, 21417), 'os.path.join', 'os.path.join', (['opt.result_path', "('res_' + opt.store_name + '.json')"], {}), "(opt.result_path, 'res_' + opt.store_name + '.json')\n", (21365, 21417), False, 'import os\n'), ((864, 886), 'numpy.exp', 'np.exp', (['(-0.2 * (x - 9))'], {}), '(-0.2 * (x - 9))\n', (870, 886), True, 'import numpy as np\n'), ((1676, 1720), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.resume_path'], {}), '(opt.root_path, opt.resume_path)\n', (1688, 1720), False, 'import os\n'), ((1783, 1829), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.pretrain_path'], {}), '(opt.root_path, opt.pretrain_path)\n', (1795, 1829), False, 'import os\n'), ((3638, 3682), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.resume_path'], {}), '(opt.root_path, opt.resume_path)\n', (3650, 3682), False, 'import os\n'), ((3745, 3791), 'os.path.join', 'os.path.join', (['opt.root_path', 'opt.pretrain_path'], {}), '(opt.root_path, opt.pretrain_path)\n', (3757, 3791), False, 'import os\n'), ((4588, 4630), 'model._modify_first_conv_layer', '_modify_first_conv_layer', (['classifier', '(3)', '(3)'], {}), '(classifier, 3, 3)\n', (4612, 4630), False, 'from model import generate_model, _modify_first_conv_layer, _construct_depth_model\n'), ((4654, 4688), 'model._construct_depth_model', '_construct_depth_model', (['classifier'], {}), '(classifier)\n', (4676, 4688), False, 'from model import generate_model, _modify_first_conv_layer, _construct_depth_model\n'), ((5653, 5702), 'os.path.join', 'os.path.join', (['opt.video_path', 'subject', '"""*/*/rgb*"""'], {}), "(opt.video_path, subject, '*/*/rgb*')\n", (5665, 5702), False, 'import os\n'), ((5783, 5853), 'os.path.join', 'os.path.join', (['opt.video_path', '"""nvgesture_test_correct_cvpr2016_v2.lst"""'], {}), "(opt.video_path, 'nvgesture_test_correct_cvpr2016_v2.lst')\n", (5795, 5853), False, 'import os\n'), ((6302, 6352), 'os.path.join', 'os.path.join', (['opt.video_path', '"""Video_TestList.txt"""'], {}), "(opt.video_path, 'Video_TestList.txt')\n", (6314, 6352), False, 'import os\n'), ((10128, 10143), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10141, 10143), False, 'import torch\n'), ((10166, 10182), 'torch.autograd.Variable', 'Variable', (['inputs'], {}), '(inputs)\n', (10174, 10182), False, 'from torch.autograd import Variable\n'), ((10205, 10222), 'torch.autograd.Variable', 'Variable', (['targets'], {}), '(targets)\n', (10213, 10222), False, 'from torch.autograd import Variable\n'), ((10532, 10543), 'time.time', 'time.time', ([], {}), '()\n', (10541, 10543), False, 'import time\n'), ((10647, 10676), 'torch.nn.functional.softmax', 'F.softmax', (['outputs_det'], {'dim': '(1)'}), '(outputs_det, dim=1)\n', (10656, 10676), True, 'from torch.nn import functional as F\n'), ((10763, 10774), 'time.time', 'time.time', ([], {}), '()\n', (10772, 10774), False, 'import time\n'), ((11324, 11353), 'numpy.argmax', 'np.argmax', (['det_selected_queue'], {}), '(det_selected_queue)\n', (11333, 11353), True, 'import numpy as np\n'), ((14470, 14488), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14486, 14488), False, 'import sys\n'), ((16577, 16604), 'numpy.zeros', 'np.zeros', (['opt.n_classes_clf'], {}), '(opt.n_classes_clf)\n', (16585, 16604), True, 'import numpy as np\n'), ((16646, 16664), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (16662, 16664), False, 'import sys\n'), ((17218, 17252), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (17228, 17252), False, 'import csv\n'), ((19244, 19261), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (19252, 19261), True, 'import numpy as np\n'), ((20806, 20825), 'numpy.sum', 'np.sum', (['det_idx[2:]'], {}), '(det_idx[2:])\n', (20812, 20825), True, 'import numpy as np\n'), ((12045, 12056), 'time.time', 'time.time', ([], {}), '()\n', (12054, 12056), False, 'import time\n'), ((12140, 12169), 'torch.nn.functional.softmax', 'F.softmax', (['outputs_clf'], {'dim': '(1)'}), '(outputs_clf, dim=1)\n', (12149, 12169), True, 'from torch.nn import functional as F\n'), ((12264, 12275), 'time.time', 'time.time', ([], {}), '()\n', (12273, 12275), False, 'import time\n'), ((13184, 13211), 'numpy.zeros', 'np.zeros', (['opt.n_classes_clf'], {}), '(opt.n_classes_clf)\n', (13192, 13211), True, 'import numpy as np\n'), ((17533, 17567), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """'}), "(csvfile, delimiter=' ')\n", (17543, 17567), False, 'import csv\n'), ((18059, 18093), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """'}), "(csvfile, delimiter=' ')\n", (18069, 18093), False, 'import csv\n'), ((6525, 6573), 'os.path.join', 'os.path.join', (['opt.video_path', '"""frames"""', 'vid_name'], {}), "(opt.video_path, 'frames', vid_name)\n", (6537, 6573), False, 'import os\n'), ((6968, 6991), 'numpy.array', 'np.array', (['data[i][1][0]'], {}), '(data[i][1][0])\n', (6976, 6991), True, 'import numpy as np\n'), ((7036, 7060), 'numpy.array', 'np.array', (['data[i][-2][0]'], {}), '(data[i][-2][0])\n', (7044, 7060), True, 'import numpy as np\n'), ((6625, 6699), 'os.path.join', 'os.path.join', (['opt.root_path', '"""bega/datasets/AHG/splitfiles/testlist01.mat"""'], {}), "(opt.root_path, 'bega/datasets/AHG/splitfiles/testlist01.mat')\n", (6637, 6699), False, 'import os\n'), ((7883, 7906), 'numpy.array', 'np.array', (['data[i][1][0]'], {}), '(data[i][1][0])\n', (7891, 7906), True, 'import numpy as np\n'), ((7951, 7975), 'numpy.array', 'np.array', (['data[i][-1][0]'], {}), '(data[i][-1][0])\n', (7959, 7975), True, 'import numpy as np\n'), ((7211, 7296), 'os.path.join', 'os.path.join', (['opt.root_path', '"""bega/datasets/Pointing/train_sets/valid_list3.mat"""'], {}), "(opt.root_path, 'bega/datasets/Pointing/train_sets/valid_list3.mat'\n )\n", (7223, 7296), False, 'import os\n'), ((7420, 7499), 'os.path.join', 'os.path.join', (['opt.root_path', '"""bega/datasets/Pointing/train_sets/test_list3.mat"""'], {}), "(opt.root_path, 'bega/datasets/Pointing/train_sets/test_list3.mat')\n", (7432, 7499), False, 'import os\n')] |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Capriqorn --- CAlculation of P(R) and I(Q) Of macRomolcules in solutioN
#
# Copyright (c) <NAME>, <NAME>, and contributors.
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the GNU Public Licence, v2 or any higher version, see the file LICENSE.txt.
"""Library for the Capriqorn reference structure filter.
Includes naive particle cutout functions as well as more sophisticated ones
based on cell lists.
The cell list implementation uses dictionaries (hashes). The number of particles
considered is given by the cutoff distance and approximately d^3*27, where d is
the cutout distance. d is usually 10 Angstrom.
"""
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import numpy as np
import copy
import re
from scipy.spatial.distance import cdist
from . import idxcode
# use Cython-accelerated functions, if possible
try:
from capriqorn.kernel import c_refstruct
have_c_refstruct = True
except:
have_c_refstruct = False
print(" Note: capriqorn.lib.refstruct: could not import c_refstruct")
# cell lists are disabled for the moment due to non-optimum performance
q_cell_lists = False
def set_algorithm(algo):
"""Selects the algorithm to be used for the neighbour search.
"""
global q_cell_lists
if algo.lower().startswith("cell"):
# print(" refstruct uses cell list for neighbor search")
q_cell_lists = True
else:
# print(" (refstruct uses brute-force neighbor search)")
q_cell_lists = False
def get_selection(xyz, ref, R):
"""Uppermost entry point to the reference structure selection functions.
"""
if q_cell_lists:
return cutout_using_cell_lists(xyz, ref, R, return_mask=True)
else:
return queryDistance(xyz, ref, R)
def queryDistance(xyz, ref, R):
"""Lightweight wrapper of the simple queryDistance functions.
"""
# the cython kernel needs double precision input
xyz = np.asanyarray(xyz, dtype=np.float64)
ref = np.asanyarray(ref, dtype=np.float64)
if have_c_refstruct:
return c_refstruct.queryDistance(xyz, ref, R)
else:
return queryDistance_opt(xyz, ref, R)
def queryDistance_opt(xyz, ref, R):
"""Check which atoms in xyz lie within a radius R of any reference
atom.
Improved implementation in terms of memory and speed.
Parameters
----------
xyz : array_like (n_atoms, n_dim)
atoms positions
ref : array_like (n_atoms, n_dim)
Reference atoms positions
R : float
distance to any atoms
Returns
-------
query : ndarray (n_atoms)
boolean array showing which particle are close to ref
"""
xyz = np.asanyarray(xyz)
ref = np.asanyarray(ref)
mask = np.zeros(xyz.shape[0], dtype=bool)
for i, a in enumerate(xyz):
for b in ref:
if (np.less(np.linalg.norm(a - b), R)):
mask[i] = True
break
return mask
def queryDistance_legacy(xyz, ref, R):
"""Check which atoms in xyz lie within a radius R of any reference
atom.
Original implementation, expensive in terms of memory and CPU time
at large problem sizes.
Parameters
----------
xyz : array_like (n_atoms, n_dim)
atoms positions
ref : array_like (n_atoms, n_dim)
Reference atoms positions
R : float
distance to any atoms
Returns
-------
query : ndarray (n_atoms)
boolean array showing which particle are close to ref
"""
xyz = np.asanyarray(xyz)
ref = np.asanyarray(ref)
return (cdist(xyz, ref) < R).sum(1).astype(bool)
def selectBody(ref_coords, coords, R):
"""
Return indices of the particles within the sphere of radius R.
Parameters
----------
ref_coords : array_like (n_atoms, n_dim)
Reference atoms positions
coords : array_like (n_atoms, n_dim)
atoms positions
R : float
distance to any atoms
Returns
-------
array
particle indices within reference
"""
return np.where(get_selection(coords, ref_coords, R))
def selectShell(ref_coords, coords, R, sw):
"""
Return indices of the particles within the spherical shell of
inner radius (R-sw) and outer radius R, ie the shell.
Parameters
----------
ref_coords : array_like (n_atoms, n_dim)
Reference atoms positions
coords : array_like (n_atoms, n_dim)
atoms positions
R : float
distance to any atoms
Returns
-------
array
particle indices within shell
"""
if R < sw:
raise RuntimeError("selection radius smaller then shell width")
body_query = get_selection(coords, ref_coords, R=R)
core_query = get_selection(coords, ref_coords, R=R - sw)
query = np.logical_xor(body_query, core_query)
return np.where(query)
def selectCore(ref_coords, coords, R, sw):
"""
Return indices of the particles within the sphere of
radius (R-sw), ie the core.
Parameters
----------
ref_coords : array_like (n_atoms, n_dim)
Reference atoms positions
coords : array_like (n_atoms, n_dim)
atoms positions
R : float
distance to any atoms
Returns
-------
array
particle indices the inner shell
"""
if R < sw:
raise RuntimeError("selection radius smaller then shell width")
return selectBody(ref_coords, coords, R=R - sw)
def maxInnerDistance(xyz):
"""max distance between atoms in ``xyz``
Parameters
----------
xyz : array_like
array of atoms positions
Returns
-------
float
maximal distance
"""
return cdist(xyz, xyz).max()
# --- cell list implementation by <NAME> below ---
def get_neighbours():
""""Helper function. Initializes components of relative locations of neighbouring cells."""
neighbours = []
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
for k in [-1, 0, 1]:
if i != 0 or j != 0 or k != 0:
# print i,j,k
neighbours.append([i, j, k])
neighbours = np.asarray(neighbours)
return neighbours
def get_cell_indices(positions, distance):
"""
Returns array of indices of cells to which particles with coordinates 'positions' belong to.
Indices of a cell are given by three integer numbers, e.g., [1,0,-2] denotes a single cell.
"""
return np.asarray(np.rint(old_div(positions, distance)), dtype=np.int64)
def get_cell_strings(indices):
"""
Converts lists of indices to strings for use as dictionary keys.
Indices of a cell are given by three integer numbers, e.g., [1,0,-2] denotes
a single cell, which results in the string "+1+0-2".
Update: Use get_cell_idxcodes() instead.
"""
strings = []
for idx in indices:
strng = "%+d%+d%+d" % tuple(idx)
strings.append(strng)
return strings
def get_cell_index_from_string(string):
"""
Converts a the string index representation to an integer triple representation.
Update: Use get_cell_index_from_idxcode() instead.
"""
tokens = re.split('(\d+)', string)
index = []
for i in range(3):
index.append(int(tokens[2 * i] + tokens[2 * i + 1]))
return np.asarray(index)
def get_cell_idxcodes(indices):
"""Converts lists of indices to idxcodes for use as dictionary keys. Indices
of a cell are given by three integer numbers.
"""
idx_codes = []
for idx in indices:
# NOTE: using tuple(idx) is slower than
idx_codes.append(idxcode.encode_triple(idx))
return idx_codes
def get_cell_index_from_idxcode(idx_code):
"""
Converts the idxcode index representation to an integer triple representation.
"""
triple = idxcode.decode_indices(idx_code)
return np.asarray(triple)
def get_neighbour_indices(indices, neighbours):
"""
Returns cell indices of neighbouring cells of a given cell ('indices').
Indices of a cell are given by three integer numbers, e.g., [1,0,-2] denotes a single cell.
"""
neighbour_indices = indices[np.newaxis, :] + neighbours
return neighbour_indices
def get_particle_indices(cell_indices_strings, uniq_cell_indices_strings):
"""
Returns a dicionary of lists of particle indices.
Each key corresponds to one cell the list contains the indices of the particles this cell.
This indices are used to retrieve the coordinates from the corresponding array (e.g., 'positions').
"""
particle_indices = {}
for k in uniq_cell_indices_strings:
particle_indices[k] = []
for i, k in enumerate(cell_indices_strings):
particle_indices[k].append(i)
return particle_indices
def get_particle_indices_within_neighbours(ref_particle_indices, particle_indices, cell_indices, neighbours):
"""
For each cell occupied by at least one particle of the reference structure (central cell),
this function returns the indices of all particles of the full structure,
which belong either to this central cell or its neighbours.
"""
neigh_particle_indices = {}
for k in ref_particle_indices:
# add particle indices of the cells themselves
if k in particle_indices:
neigh_particle_indices[k] = copy.deepcopy(particle_indices[k])
# get index of cell corresponding to key 'k' (cell indices of first particle in list)
ind = cell_indices[particle_indices[k][0]]
else:
neigh_particle_indices[k] = []
# ind = get_cell_index_from_string(k)
ind = get_cell_index_from_idxcode(k)
# get indices of neighbour cells
neighs = get_neighbour_indices(ind, neighbours)
# get keys of neighbour cells
#neigh_particle_strings = get_cell_strings(neighs)
neigh_particle_strings = get_cell_idxcodes(neighs)
# add indices of neighbour cells to list
for kk in neigh_particle_strings:
# Check if neighbouring cell is not empty. If so, we should raise an exception as
# it indicates that the simulation box is too small for the currently used cutoff distance.
if kk in particle_indices:
neigh_particle_indices[k] = neigh_particle_indices[k] + particle_indices[kk]
return neigh_particle_indices
def get_observation_volume_particle_indices(ref_positions, positions, ref_particle_indices,
particle_indices_within_neighbours, distance, return_mask=False):
"""
Returns indices (i_out) of particles within cutout distance of reference structure using cell lists,
or the index-mask of these particles if return_mask is set to True.
ref_postions: array of coordinates of particles of the reference structure
postions: array of coordinates of particles of the full system
ref_particle_indices: Dictionary with keys corresponding to cells containing at least
a single particle of the reference structure. Each entry contains a list of indices of
all particles of the reference structure belonging to this cell.
particle_indices_within_neighbours: Dictionary with same keys as 'ref_particle_indices' above.
Contains lists of indices of all particles of full system belonging to central cell, refernced by key,
and its neighbouring cells
distance: cutout distance
"""
distanceSqr = distance**2
mask = np.zeros(len(positions), dtype=np.int64)
num_distances_calc = 0
for k in ref_particle_indices:
ref = ref_positions[ref_particle_indices[k]]
xyz = positions[particle_indices_within_neighbours[k]]
tmp = queryDistance(np.asanyarray(xyz, dtype=np.float64), np.asanyarray(ref, dtype=np.float64), distance)
num_distances_calc += len(ref) * len(xyz)
dummy_indices = np.asanyarray(particle_indices_within_neighbours[k])[np.where(tmp)[0]]
if len(dummy_indices) > 0:
mask[dummy_indices] = 1
# alternative implemenation using python loops
# for iref in ref_particle_indices[k]:
# for i in particle_indices_within_neighbours[k]:
# refx=ref_positions[iref]
# x=positions[i]
# dSqr=((refx-x)**2).sum()
# if dSqr<distanceSqr:
# mask[i]=1
if return_mask:
return mask
else:
i_out = np.where(mask == 1)[0]
return i_out, num_distances_calc
def cutout_using_cell_lists(positions, ref_positions, distance, return_mask=False):
"""
Returns indices of observation volume, which is defined by
all particles with coordinates 'positions' within a distance 'distance' of the
reference structure with particle coordinates 'ref_postions'.
In case return_mask is True, only the particle index mask is returned.
"""
# determine to which cell each particle belongs
# for the full structure
cell_indices = get_cell_indices(positions, distance)
#cell_indices_strings = get_cell_strings(cell_indices)
cell_indices_strings = get_cell_idxcodes(cell_indices)
uniq_cell_indices_strings = set(cell_indices_strings)
# print " number of cells for full structure =", len(uniq_cell_indices_strings)
# for the reference structure
ref_cell_indices = get_cell_indices(ref_positions, distance)
#ref_cell_indices_strings = get_cell_strings(ref_cell_indices)
ref_cell_indices_strings = get_cell_idxcodes(ref_cell_indices)
ref_uniq_cell_indices_strings = set(ref_cell_indices_strings)
# print " number of cells for ref. structure =", len(ref_uniq_cell_indices_strings)
# collecting all particle indices belonging to one cell in a single dictionary entry
particle_indices = get_particle_indices(cell_indices_strings, uniq_cell_indices_strings)
ref_particle_indices = get_particle_indices(ref_cell_indices_strings, ref_uniq_cell_indices_strings)
# calling helper function. 'neighbours' contains relative locations of neighbouring cells.
neighbours = get_neighbours()
# collecting all particle indices within a cell and its neighbours in a single dictionary entry
particle_indices_within_neighbours = get_particle_indices_within_neighbours(
ref_particle_indices, particle_indices, cell_indices, neighbours)
# determine indices of particles in observation volume using cell lists.
result = get_observation_volume_particle_indices(ref_positions, positions, ref_particle_indices,
particle_indices_within_neighbours, distance,
return_mask)
return result
# --- end of cell lists implementation ---
| [
"re.split",
"numpy.where",
"scipy.spatial.distance.cdist",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.logical_xor",
"numpy.asanyarray",
"past.utils.old_div",
"numpy.zeros",
"builtins.range",
"copy.deepcopy",
"capriqorn.kernel.c_refstruct.queryDistance"
] | [((2169, 2205), 'numpy.asanyarray', 'np.asanyarray', (['xyz'], {'dtype': 'np.float64'}), '(xyz, dtype=np.float64)\n', (2182, 2205), True, 'import numpy as np\n'), ((2216, 2252), 'numpy.asanyarray', 'np.asanyarray', (['ref'], {'dtype': 'np.float64'}), '(ref, dtype=np.float64)\n', (2229, 2252), True, 'import numpy as np\n'), ((2910, 2928), 'numpy.asanyarray', 'np.asanyarray', (['xyz'], {}), '(xyz)\n', (2923, 2928), True, 'import numpy as np\n'), ((2939, 2957), 'numpy.asanyarray', 'np.asanyarray', (['ref'], {}), '(ref)\n', (2952, 2957), True, 'import numpy as np\n'), ((2969, 3003), 'numpy.zeros', 'np.zeros', (['xyz.shape[0]'], {'dtype': 'bool'}), '(xyz.shape[0], dtype=bool)\n', (2977, 3003), True, 'import numpy as np\n'), ((3745, 3763), 'numpy.asanyarray', 'np.asanyarray', (['xyz'], {}), '(xyz)\n', (3758, 3763), True, 'import numpy as np\n'), ((3774, 3792), 'numpy.asanyarray', 'np.asanyarray', (['ref'], {}), '(ref)\n', (3787, 3792), True, 'import numpy as np\n'), ((5018, 5056), 'numpy.logical_xor', 'np.logical_xor', (['body_query', 'core_query'], {}), '(body_query, core_query)\n', (5032, 5056), True, 'import numpy as np\n'), ((5068, 5083), 'numpy.where', 'np.where', (['query'], {}), '(query)\n', (5076, 5083), True, 'import numpy as np\n'), ((6355, 6377), 'numpy.asarray', 'np.asarray', (['neighbours'], {}), '(neighbours)\n', (6365, 6377), True, 'import numpy as np\n'), ((7377, 7403), 're.split', 're.split', (['"""(\\\\d+)"""', 'string'], {}), "('(\\\\d+)', string)\n", (7385, 7403), False, 'import re\n'), ((7431, 7439), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (7436, 7439), False, 'from builtins import range\n'), ((7513, 7530), 'numpy.asarray', 'np.asarray', (['index'], {}), '(index)\n', (7523, 7530), True, 'import numpy as np\n'), ((8070, 8088), 'numpy.asarray', 'np.asarray', (['triple'], {}), '(triple)\n', (8080, 8088), True, 'import numpy as np\n'), ((2293, 2331), 'capriqorn.kernel.c_refstruct.queryDistance', 'c_refstruct.queryDistance', (['xyz', 'ref', 'R'], {}), '(xyz, ref, R)\n', (2318, 2331), False, 'from capriqorn.kernel import c_refstruct\n'), ((5906, 5921), 'scipy.spatial.distance.cdist', 'cdist', (['xyz', 'xyz'], {}), '(xyz, xyz)\n', (5911, 5921), False, 'from scipy.spatial.distance import cdist\n'), ((6685, 6713), 'past.utils.old_div', 'old_div', (['positions', 'distance'], {}), '(positions, distance)\n', (6692, 6713), False, 'from past.utils import old_div\n'), ((9540, 9574), 'copy.deepcopy', 'copy.deepcopy', (['particle_indices[k]'], {}), '(particle_indices[k])\n', (9553, 9574), False, 'import copy\n'), ((11959, 11995), 'numpy.asanyarray', 'np.asanyarray', (['xyz'], {'dtype': 'np.float64'}), '(xyz, dtype=np.float64)\n', (11972, 11995), True, 'import numpy as np\n'), ((11997, 12033), 'numpy.asanyarray', 'np.asanyarray', (['ref'], {'dtype': 'np.float64'}), '(ref, dtype=np.float64)\n', (12010, 12033), True, 'import numpy as np\n'), ((12119, 12171), 'numpy.asanyarray', 'np.asanyarray', (['particle_indices_within_neighbours[k]'], {}), '(particle_indices_within_neighbours[k])\n', (12132, 12171), True, 'import numpy as np\n'), ((12669, 12688), 'numpy.where', 'np.where', (['(mask == 1)'], {}), '(mask == 1)\n', (12677, 12688), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (3096, 3103), True, 'import numpy as np\n'), ((12172, 12185), 'numpy.where', 'np.where', (['tmp'], {}), '(tmp)\n', (12180, 12185), True, 'import numpy as np\n'), ((3805, 3820), 'scipy.spatial.distance.cdist', 'cdist', (['xyz', 'ref'], {}), '(xyz, ref)\n', (3810, 3820), False, 'from scipy.spatial.distance import cdist\n')] |
# Based on https://stackoverflow.com/questions/30376581/save-numpy-array-in-append-mode
import tables
import numpy as np
import h5py
class BigH5Array():
def __init__(self, filename, shape=None, atom=tables.Float32Atom()):
self.filename = filename
self.shape = shape
self.atom = atom
def open_for_write(self):
self.f = tables.open_file(self.filename, mode='w')
self.array_c = self.f.create_carray(self.f.root, 'carray', self.atom, self.shape)
def open_for_write_expandable(self):
self.f = tables.open_file(self.filename, mode='w')
self.array_e = self.f.create_earray(self.f.root, 'data', self.atom, [0] + list(self.shape[1:]))
def open_for_read(self):
self.f = tables.open_file(self.filename, mode='r')
def data(self): # for expandable
# bigarray.data()[1:10,2:20]
return self.f.root.data
def append(self, row_data): # for expandable
self.array_e.append(row_data)
def __call__(self): # for random access
return self.f.root.carray
def close(self):
self.f.close()
def big_h5_load(filename):
bigfile = BigH5Array(filename)
bigfile.open_for_read()
bigarray = np.array(bigfile())
bigfile.close()
return bigarray
class H5VarLenStorage:
"""Variable length HDF5 database storage helper class.
Wrapped access to write/read variable length data are provided.
Multi dimentional (ndim > 2) is automatically reshaped to 2 dimentions
when storing with `put()` and unfolded with `get()`.
Requirement:
- Data shall have variable length in the last dimention of its shape.
Attributes:
f (h5py.File): HDF5 file object for direct access.
"""
def __init__(self, file_name, mode='r', verbose=False):
self.f = h5py.File(file_name, mode)
self.count = {}
self.verbose = verbose
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def __del__(self):
self.close()
@staticmethod
def _is_str(data):
return type(data) == str or type(data) != np.ndarray
def set_dataset(self, key, num_items, example):
if self._is_str(example):
dt = h5py.string_dtype()
shape = (num_items,)
attr_shape = []
else:
dt = h5py.vlen_dtype(example.dtype)
shape = (num_items, np.prod(example.shape[:-1]))
attr_shape = list(example.shape[:-1])
if self.verbose:
print(f'key={key} stores data with shape={shape + (-1,)}')
self.f.create_dataset(key, shape=shape, dtype=dt)
self.f[key].attrs['shape'] = attr_shape
self.count[key] = 0
def set_attr(self, key, data):
self.f.attrs[key] = data
def shape(self, key):
return self.f[key].attrs['shape']
def put(self, key, data):
if not self._is_str(data):
shape = data.shape[:-1]
assert np.all(self.shape(key) == shape), f'putting variable shape {shape} is not compatible with definition {self.shape(key)}.'
data = data.reshape((np.prod(shape), -1))
self.f[key][self.count[key]] = data
self.count[key] += 1
def close(self):
self.f.close()
def attr(self, key):
return self.f.attrs[key]
def get(self, key, index):
var = self.f[key][index]
if self._is_str(var):
return var
var = np.array(list(self.f[key][index]))
shape = list(self.shape(key)) + [-1]
return var.reshape(shape)
def __repr__(self):
format_string = self.__class__.__name__ + '\n'
format_string += '\n'.join([f' [{k}] shape={self.shape(k)} count={self.count[k]}' for k in self.count])
return format_string
| [
"numpy.prod",
"tables.open_file",
"h5py.File",
"h5py.string_dtype",
"h5py.vlen_dtype",
"tables.Float32Atom"
] | [((205, 225), 'tables.Float32Atom', 'tables.Float32Atom', ([], {}), '()\n', (223, 225), False, 'import tables\n'), ((360, 401), 'tables.open_file', 'tables.open_file', (['self.filename'], {'mode': '"""w"""'}), "(self.filename, mode='w')\n", (376, 401), False, 'import tables\n'), ((550, 591), 'tables.open_file', 'tables.open_file', (['self.filename'], {'mode': '"""w"""'}), "(self.filename, mode='w')\n", (566, 591), False, 'import tables\n'), ((742, 783), 'tables.open_file', 'tables.open_file', (['self.filename'], {'mode': '"""r"""'}), "(self.filename, mode='r')\n", (758, 783), False, 'import tables\n'), ((1804, 1830), 'h5py.File', 'h5py.File', (['file_name', 'mode'], {}), '(file_name, mode)\n', (1813, 1830), False, 'import h5py\n'), ((2234, 2253), 'h5py.string_dtype', 'h5py.string_dtype', ([], {}), '()\n', (2251, 2253), False, 'import h5py\n'), ((2346, 2376), 'h5py.vlen_dtype', 'h5py.vlen_dtype', (['example.dtype'], {}), '(example.dtype)\n', (2361, 2376), False, 'import h5py\n'), ((2409, 2436), 'numpy.prod', 'np.prod', (['example.shape[:-1]'], {}), '(example.shape[:-1])\n', (2416, 2436), True, 'import numpy as np\n'), ((3132, 3146), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3139, 3146), True, 'import numpy as np\n')] |
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Nov 20, 2014
Configuration file for AlexNet topology with LMDB loader.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import numpy
import os
from veles.config import root
base_lr = 0.01
wd = 0.0005
data_path = os.path.join(root.common.dirs.datasets, "AlexNet/LMDB")
root.common.engine.backend = "cuda"
root.common.engine.precision_type = "float"
root.common.engine.precision_level = 0
root.imagenet.lr_adjuster.lr_parameters = {
"lrs_with_lengths":
[(1, 100000), (0.1, 100000), (0.1, 100000), (0.01, 100000000)]}
root.imagenet.lr_adjuster.bias_lr_parameters = {
"lrs_with_lengths":
[(1, 100000), (0.1, 100000), (0.1, 100000), (0.01, 100000000)]}
root.imagenet.update({
"decision": {"fail_iterations": 10000,
"max_epochs": 10000000},
"snapshotter": {"prefix": "imagenet",
"directory":
os.path.join(root.common.dirs.datasets,
"AlexNet/snapshots"),
"interval": 1, "time_interval": 0},
"add_plotters": True,
"image_saver": {"out_dirs":
[os.path.join(root.common.dirs.datasets,
"AlexNet/image_saver/test"),
os.path.join(root.common.dirs.datasets,
"AlexNet/image_saver/validation"),
os.path.join(root.common.dirs.datasets,
"AlexNet/image_saver/train")]},
"lr_adjuster": {"lr_policy_name": "arbitrary_step",
"bias_lr_policy_name": "arbitrary_step"},
"loss_function": "softmax",
"loader_name": "lmdb",
"loader": {"minibatch_size": 256,
"shuffle_limit": numpy.iinfo(numpy.uint32).max,
"crop": (227, 227), "mirror": "random",
"color_space": "RGB", "normalization_type": "external_mean",
"train_path": os.path.join(data_path, "ilsvrc12_train_lmdb"),
"validation_path": os.path.join(data_path, "ilsvrc12_val_lmdb"),
},
"weights_plotter": {"limit": 256, "split_channels": False},
"layers": [{"name": "conv_str1",
"type": "conv_str",
"->": {"n_kernels": 96, "kx": 11, "ky": 11,
"padding": (0, 0, 0, 0), "sliding": (4, 4),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "max_pool1",
"type": "max_pooling",
"->": {"kx": 3, "ky": 3, "sliding": (2, 2)}},
{"name": "norm1",
"type": "norm", "n": 5, "alpha": 0.0001, "beta": 0.75},
{"name": "grouping1",
"type": "zero_filter",
"grouping": 2},
{"name": "conv_str2",
"type": "conv_str",
"->": {"n_kernels": 256, "kx": 5, "ky": 5,
"padding": (2, 2, 2, 2), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "max_pool2",
"type": "max_pooling", "->": {"kx": 3, "ky": 3,
"sliding": (2, 2)}},
{"name": "norm2",
"type": "norm", "n": 5, "alpha": 0.0001, "beta": 0.75},
{"name": "grouping2",
"type": "zero_filter",
"grouping": 2},
{"name": "conv_str3",
"type": "conv_str",
"->": {"n_kernels": 384, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "conv_str4",
"type": "conv_str",
"->": {"n_kernels": 384, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "grouping4",
"type": "zero_filter", "grouping": 2},
{"name": "conv_str5",
"type": "conv_str",
"->": {"n_kernels": 256, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "max_pool5",
"type": "max_pooling",
"->": {"kx": 3, "ky": 3, "sliding": (2, 2)}},
{"name": "grouping5",
"type": "zero_filter", "grouping": 2},
{"name": "fc_linear6",
"type": "all2all",
"->": {"output_sample_shape": 4096,
"weights_filling": "gaussian", "weights_stddev": 0.005,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "relu6", "type": "activation_str"},
{"name": "drop6", "type": "dropout", "dropout_ratio": 0.5},
{"name": "fc_linear7",
"type": "all2all",
"->": {"output_sample_shape": 4096,
"weights_filling": "gaussian", "weights_stddev": 0.005,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"name": "relu7", "type": "activation_str"},
{"name": "drop7", "type": "dropout", "dropout_ratio": 0.5},
{"name": "fc_softmax8",
"type": "softmax",
"->": {"output_sample_shape": 1000,
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}}]})
root.imagenet.loader.normalization_parameters = {
"mean_source": os.path.join(root.common.dirs.datasets,
"AlexNet/mean_image_227.JPEG")}
| [
"os.path.join",
"numpy.iinfo"
] | [((1357, 1412), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/LMDB"""'], {}), "(root.common.dirs.datasets, 'AlexNet/LMDB')\n", (1369, 1412), False, 'import os\n'), ((9258, 9328), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/mean_image_227.JPEG"""'], {}), "(root.common.dirs.datasets, 'AlexNet/mean_image_227.JPEG')\n", (9270, 9328), False, 'import os\n'), ((2016, 2076), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/snapshots"""'], {}), "(root.common.dirs.datasets, 'AlexNet/snapshots')\n", (2028, 2076), False, 'import os\n'), ((3044, 3090), 'os.path.join', 'os.path.join', (['data_path', '"""ilsvrc12_train_lmdb"""'], {}), "(data_path, 'ilsvrc12_train_lmdb')\n", (3056, 3090), False, 'import os\n'), ((3126, 3170), 'os.path.join', 'os.path.join', (['data_path', '"""ilsvrc12_val_lmdb"""'], {}), "(data_path, 'ilsvrc12_val_lmdb')\n", (3138, 3170), False, 'import os\n'), ((2246, 2313), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/image_saver/test"""'], {}), "(root.common.dirs.datasets, 'AlexNet/image_saver/test')\n", (2258, 2313), False, 'import os\n'), ((2370, 2443), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/image_saver/validation"""'], {}), "(root.common.dirs.datasets, 'AlexNet/image_saver/validation')\n", (2382, 2443), False, 'import os\n'), ((2500, 2568), 'os.path.join', 'os.path.join', (['root.common.dirs.datasets', '"""AlexNet/image_saver/train"""'], {}), "(root.common.dirs.datasets, 'AlexNet/image_saver/train')\n", (2512, 2568), False, 'import os\n'), ((2853, 2878), 'numpy.iinfo', 'numpy.iinfo', (['numpy.uint32'], {}), '(numpy.uint32)\n', (2864, 2878), False, 'import numpy\n')] |
from common.caching import cached
from . import dataio
import numpy as np
import tkinter as tk
import skimage.io
import os
import glob
import random
import time
class BodyPartLabelerGUI(object):
def __init__(self, master, files, labels):
self.master = master
self.files = files
self.image_width = 512
self.image_height = 660
self.last_preview = None
self.line_stack = []
self.ans = []
self.file_index = 0
self.labels = labels
self.times = []
self.canvas = tk.Canvas(width=self.image_width, height=self.image_height)
self.canvas.pack()
self.label_text = tk.StringVar()
self.set_label_text()
self.label = tk.Label(self.master, textvariable=self.label_text)
self.label.pack()
self.draw_image()
self.canvas.bind('<Motion>', self.preview_line)
self.canvas.bind('<Button-1>', self.create_line)
self.canvas.bind('<Button-3>', self.remove_line)
def set_label_text(self):
labels = self.labels[self.files[self.file_index].split('-')[0]]
labels = [i+1 for i, label in enumerate(labels) if label]
eta = '?'
if len(self.times) >= 2:
eta = (len(self.files)-self.file_index) / \
((len(self.times)-1)/(self.times[-1]-self.times[0]))
eta /= 3600
self.label_text.set('%s/%s | eta: %s | threat zones: %s' % (self.file_index + 1,
len(self.files), eta, labels))
def draw_image(self):
file = self.files[self.file_index]
self.image = tk.PhotoImage(file=file)
self.canvas.create_image(0, 0, anchor=tk.NW, image=self.image)
self.side_view = int(file.split('.')[0].split('-')[-1]) in (4, 12)
def horizontal_line(self, y):
return [self.canvas.create_line(0, y, self.image_width, y, fill='#FF0000')]
def vertical_line(self, x):
return [self.canvas.create_line(x, 0, x, self.image_height, fill='#FF0000')]
def symmetric_line(self, x0, x1):
return [
self.vertical_line(x1),
self.vertical_line(x0 - (x1 - x0))
]
def which_line(self, event):
if len(self.ans) < 8:
return self.horizontal_line(event.y), event.y
else:
if self.side_view or len(self.ans) == 8:
return self.vertical_line(event.x), event.x
else:
return self.symmetric_line(self.ans[8], event.x), event.x
def done(self):
if self.side_view:
return len(self.ans) == 10
else:
return len(self.ans) == 11
def preview_line(self, event):
if self.done():
return
if self.last_preview is not None:
for line in self.last_preview:
self.canvas.delete(line)
self.last_preview, _ = self.which_line(event)
def write_output(self):
out = self.files[self.file_index].replace('.gif', '.npy')
np.save(out, np.array(self.ans))
for line in sum(self.line_stack, []):
self.canvas.delete(line)
self.line_stack = []
if self.last_preview is not None:
for line in self.last_preview:
self.canvas.delete(line)
self.last_preview = None
self.ans = []
self.file_index += 1
if self.file_index == len(self.files):
self.master.quit()
return
self.draw_image()
self.set_label_text()
self.times.append(time.time())
if len(self.times) > 10:
self.times.pop(0)
def create_line(self, event):
if self.done():
self.write_output()
return
lines, ans = self.which_line(event)
self.ans.append(ans)
self.line_stack.append(lines)
def remove_line(self, event):
if len(self.line_stack) != 0:
for line in self.line_stack[-1]:
self.canvas.delete(line)
self.line_stack.pop()
self.ans.pop()
@cached(dataio.get_all_data_generator, version=5)
def get_body_part_labels(mode):
if not os.path.exists('gifs_created'):
for file, data in dataio.get_all_data_generator(mode, 'aps')():
for i in range(0, 16, 4):
out = '%s-%s.gif' % (file, i)
if os.path.exists(out):
continue
image = np.rot90(data[:, :, i])
image /= np.max(image)
if i == 4 or i == 8:
image = np.fliplr(image)
skimage.io.imsave(out, image)
open('gifs_created', 'w').close()
if not os.path.exists('labels_created'):
files = [file for file in glob.glob('*.gif')
if not os.path.exists(file.replace('.gif', '.npy'))]
random.seed(0)
random.shuffle(files)
labels = dataio.get_train_labels()
root = tk.Tk()
gui = BodyPartLabelerGUI(root, files, labels)
root.mainloop()
open('labels_created', 'w').close()
if not os.path.exists('done'):
side_images, side_labels = [], []
front_images, front_labels = [], []
for image_file in glob.glob('*.gif'):
label_file = image_file.replace('.gif', '.npy')
if not os.path.exists(label_file):
continue
side_view = int(image_file.split('.')[0].split('-')[-1]) in (4, 12)
images = side_images if side_view else front_images
labels = side_labels if side_view else front_labels
image = skimage.io.imread(image_file)
images.append(image)
label = np.load(label_file).astype('float32')
if len(label) == 11:
label[9:] = label[8] + np.abs(label[9:] - label[8])
labels.append(label)
side_images, side_labels = np.stack(side_images), np.stack(side_labels)
front_images, front_labels = np.stack(front_images), np.stack(front_labels)
np.save('side_images', side_images)
np.save('side_labels', side_labels)
np.save('front_images', front_images)
np.save('front_labels', front_labels)
open('done', 'w').close()
else:
side_images, side_labels = np.load('side_images.npy'), np.load('side_labels.npy')
front_images, front_labels = np.load('front_images.npy'), np.load('front_labels.npy')
return side_images, side_labels, front_images, front_labels | [
"common.caching.cached",
"tkinter.Canvas",
"numpy.array",
"tkinter.Label",
"numpy.rot90",
"numpy.save",
"os.path.exists",
"numpy.max",
"tkinter.StringVar",
"numpy.stack",
"glob.glob",
"numpy.abs",
"random.shuffle",
"numpy.fliplr",
"tkinter.PhotoImage",
"time.time",
"random.seed",
"... | [((4116, 4164), 'common.caching.cached', 'cached', (['dataio.get_all_data_generator'], {'version': '(5)'}), '(dataio.get_all_data_generator, version=5)\n', (4122, 4164), False, 'from common.caching import cached\n'), ((553, 612), 'tkinter.Canvas', 'tk.Canvas', ([], {'width': 'self.image_width', 'height': 'self.image_height'}), '(width=self.image_width, height=self.image_height)\n', (562, 612), True, 'import tkinter as tk\n'), ((667, 681), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (679, 681), True, 'import tkinter as tk\n'), ((733, 784), 'tkinter.Label', 'tk.Label', (['self.master'], {'textvariable': 'self.label_text'}), '(self.master, textvariable=self.label_text)\n', (741, 784), True, 'import tkinter as tk\n'), ((1658, 1682), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'file': 'file'}), '(file=file)\n', (1671, 1682), True, 'import tkinter as tk\n'), ((4208, 4238), 'os.path.exists', 'os.path.exists', (['"""gifs_created"""'], {}), "('gifs_created')\n", (4222, 4238), False, 'import os\n'), ((4734, 4766), 'os.path.exists', 'os.path.exists', (['"""labels_created"""'], {}), "('labels_created')\n", (4748, 4766), False, 'import os\n'), ((4899, 4913), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (4910, 4913), False, 'import random\n'), ((4922, 4943), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (4936, 4943), False, 'import random\n'), ((5003, 5010), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (5008, 5010), True, 'import tkinter as tk\n'), ((5146, 5168), 'os.path.exists', 'os.path.exists', (['"""done"""'], {}), "('done')\n", (5160, 5168), False, 'import os\n'), ((5282, 5300), 'glob.glob', 'glob.glob', (['"""*.gif"""'], {}), "('*.gif')\n", (5291, 5300), False, 'import glob\n'), ((6094, 6129), 'numpy.save', 'np.save', (['"""side_images"""', 'side_images'], {}), "('side_images', side_images)\n", (6101, 6129), True, 'import numpy as np\n'), ((6138, 6173), 'numpy.save', 'np.save', (['"""side_labels"""', 'side_labels'], {}), "('side_labels', side_labels)\n", (6145, 6173), True, 'import numpy as np\n'), ((6182, 6219), 'numpy.save', 'np.save', (['"""front_images"""', 'front_images'], {}), "('front_images', front_images)\n", (6189, 6219), True, 'import numpy as np\n'), ((6228, 6265), 'numpy.save', 'np.save', (['"""front_labels"""', 'front_labels'], {}), "('front_labels', front_labels)\n", (6235, 6265), True, 'import numpy as np\n'), ((3071, 3089), 'numpy.array', 'np.array', (['self.ans'], {}), '(self.ans)\n', (3079, 3089), True, 'import numpy as np\n'), ((3595, 3606), 'time.time', 'time.time', ([], {}), '()\n', (3604, 3606), False, 'import time\n'), ((5956, 5977), 'numpy.stack', 'np.stack', (['side_images'], {}), '(side_images)\n', (5964, 5977), True, 'import numpy as np\n'), ((5979, 6000), 'numpy.stack', 'np.stack', (['side_labels'], {}), '(side_labels)\n', (5987, 6000), True, 'import numpy as np\n'), ((6038, 6060), 'numpy.stack', 'np.stack', (['front_images'], {}), '(front_images)\n', (6046, 6060), True, 'import numpy as np\n'), ((6062, 6084), 'numpy.stack', 'np.stack', (['front_labels'], {}), '(front_labels)\n', (6070, 6084), True, 'import numpy as np\n'), ((6346, 6372), 'numpy.load', 'np.load', (['"""side_images.npy"""'], {}), "('side_images.npy')\n", (6353, 6372), True, 'import numpy as np\n'), ((6374, 6400), 'numpy.load', 'np.load', (['"""side_labels.npy"""'], {}), "('side_labels.npy')\n", (6381, 6400), True, 'import numpy as np\n'), ((6438, 6465), 'numpy.load', 'np.load', (['"""front_images.npy"""'], {}), "('front_images.npy')\n", (6445, 6465), True, 'import numpy as np\n'), ((6467, 6494), 'numpy.load', 'np.load', (['"""front_labels.npy"""'], {}), "('front_labels.npy')\n", (6474, 6494), True, 'import numpy as np\n'), ((4415, 4434), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (4429, 4434), False, 'import os\n'), ((4489, 4512), 'numpy.rot90', 'np.rot90', (['data[:, :, i]'], {}), '(data[:, :, i])\n', (4497, 4512), True, 'import numpy as np\n'), ((4538, 4551), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4544, 4551), True, 'import numpy as np\n'), ((4802, 4820), 'glob.glob', 'glob.glob', (['"""*.gif"""'], {}), "('*.gif')\n", (4811, 4820), False, 'import glob\n'), ((5381, 5407), 'os.path.exists', 'os.path.exists', (['label_file'], {}), '(label_file)\n', (5395, 5407), False, 'import os\n'), ((4617, 4633), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (4626, 4633), True, 'import numpy as np\n'), ((5748, 5767), 'numpy.load', 'np.load', (['label_file'], {}), '(label_file)\n', (5755, 5767), True, 'import numpy as np\n'), ((5858, 5886), 'numpy.abs', 'np.abs', (['(label[9:] - label[8])'], {}), '(label[9:] - label[8])\n', (5864, 5886), True, 'import numpy as np\n')] |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def resolve_de(Pd,beta,gamma):
n = len(beta)
b = [-beta[i] for i in range(n)]
b.append(Pd)
c = np.zeros((n+1,n+1))
for i in range(n): c[i][i] = gamma[i]*2
c[ :, -1] = -1
c[-1, :] = 1
c[-1, -1] = 0
de = np.linalg.solve(c,b)
return de
def sem_perdas(Pd,beta,gamma,pmin,pmax):
n = len(beta)
pg = resolve_de(Pd,beta,gamma)[:-1]
bet = []
gam = []
state = []
carga = Pd
#print(f'[0] Carga inicial: {carga} (MW)'.format())
for i in range(n):
if(pmin[i] <= pg[i] <= pmax[i]):
#print(f'G{i+1}] gerando {pg[i]} (MW) está dentro dos limites operacionais!'.format())
state.append(0)
bet.append(beta[i])
gam.append(gamma[i])
else:
if(pmin[i]> pg[i]):
#print(f'G{i+1}] gerando {pg[i]} (MW) é menor que {pmin[i]} (MW)!'.format())
#print('Ultrapassando a capacidade minima de geração da unidade!')
pg[i] = pmin[i]
state.append(-1)
bet.append(beta[i])
gam.append(gamma[i])
else:
#print(f'G{i+1}] gerando {pg[i]} (MW) é maior que {pmax[i]} (MW)!'.format())
#print('Ultrapassando a capacidade máxima de geração da unidade!')
pg[i] = pmax[i]
state.append(1)
carga = carga - pg[i]
#print(f'[{i+1}]Carga atual: {carga} (MW)'.format())
b = np.array(bet)
c = np.array(gam)
#print('STATE: ', state)
#print('B:',b, 'C:',c)
de = resolve_de(carga, b, c)
#print('DE: ',de)
k=0
for i in range(n):
if(state[i]>0):
k=k+1
else:
pg[i] = de[i-k]
pg = np.array(pg)
lbd = np.float(de[-1])
#print('PG:',pg,'\n')
#print('lbd:',de[-1],'\n')
return ([pg, lbd])
def despacho(path,Pd):
# ENTRADA DE DADOS:
data = pd.read_csv(path)
data.head()
print(f'Problema:\n{data}\nDemanda: {Pd} (MW)'.format())
u = data['Unid.']
alpha = data['A']
beta = data['B']
gamma = data['C']
custo = data['Custo']
pmin = data['Min']
pmax = data['Max']
n = len(u)
a = [alpha[i]*custo[i] for i in range(n)]
b = [ beta[i]*custo[i] for i in range(n)]
c = [gamma[i]*custo[i] for i in range(n)]
de = sem_perdas(Pd, b,c,pmin,pmax)
print('PG:', de[0])
print('Cin:', de[1])
return de
def simu_de():
print('Iniciar simulação...\n')
def visualize():
import matplotlib
matplotlib.axes.Axes.legend
matplotlib.pyplot.legend
matplotlib.legend.Legend
matplotlib.legend.Legend.get_frame
nt = 24 # horas (1 dia)
ng = 3 # numero de geradoras
p = np.zeros((nt,ng)) # matriz 24x3
for i in range(nt):
Pd = random.randint(450,1000)
de = despacho('input2.csv', Pd)
p[i,:] = de[0]
print(p)
tempo = [i for i in range(len(p))]
ax = plt.subplot(111)
for i in range(3):
ger = f'G{i+1}'.format()
plt.plot(tempo, p[:, i], label = ger)
leg = plt.legend(loc='best', ncol=1, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.8)
plt.xlabel('Tempo (h)')
plt.ylabel('Potência (MW)')
plt.title('Demanda/Unid. Geradora')
plt.show()
if __name__ == '__main__':
import random
path = 'input2.csv'
visualize()
| [
"numpy.linalg.solve",
"numpy.float",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"random.randint",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sho... | [((194, 218), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {}), '((n + 1, n + 1))\n', (202, 218), True, 'import numpy as np\n'), ((324, 345), 'numpy.linalg.solve', 'np.linalg.solve', (['c', 'b'], {}), '(c, b)\n', (339, 345), True, 'import numpy as np\n'), ((1554, 1567), 'numpy.array', 'np.array', (['bet'], {}), '(bet)\n', (1562, 1567), True, 'import numpy as np\n'), ((1576, 1589), 'numpy.array', 'np.array', (['gam'], {}), '(gam)\n', (1584, 1589), True, 'import numpy as np\n'), ((2012, 2029), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (2023, 2029), True, 'import pandas as pd\n'), ((2825, 2843), 'numpy.zeros', 'np.zeros', (['(nt, ng)'], {}), '((nt, ng))\n', (2833, 2843), True, 'import numpy as np\n'), ((3052, 3068), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3063, 3068), True, 'import matplotlib.pyplot as plt\n'), ((3183, 3241), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'ncol': '(1)', 'shadow': '(True)', 'fancybox': '(True)'}), "(loc='best', ncol=1, shadow=True, fancybox=True)\n", (3193, 3241), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tempo (h)"""'], {}), "('Tempo (h)')\n", (3291, 3304), True, 'import matplotlib.pyplot as plt\n'), ((3309, 3336), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Potência (MW)"""'], {}), "('Potência (MW)')\n", (3319, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3376), 'matplotlib.pyplot.title', 'plt.title', (['"""Demanda/Unid. Geradora"""'], {}), "('Demanda/Unid. Geradora')\n", (3350, 3376), True, 'import matplotlib.pyplot as plt\n'), ((3382, 3392), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3390, 3392), True, 'import matplotlib.pyplot as plt\n'), ((1829, 1841), 'numpy.array', 'np.array', (['pg'], {}), '(pg)\n', (1837, 1841), True, 'import numpy as np\n'), ((1856, 1872), 'numpy.float', 'np.float', (['de[-1]'], {}), '(de[-1])\n', (1864, 1872), True, 'import numpy as np\n'), ((2895, 2920), 'random.randint', 'random.randint', (['(450)', '(1000)'], {}), '(450, 1000)\n', (2909, 2920), False, 'import random\n'), ((3134, 3169), 'matplotlib.pyplot.plot', 'plt.plot', (['tempo', 'p[:, i]'], {'label': 'ger'}), '(tempo, p[:, i], label=ger)\n', (3142, 3169), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) ASU GitHub Project.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
################################################################################
import numpy as np
import SimpleITK as sitk
def resample_img(itk_image, out_spacing=[2.0, 2.0, 2.0], is_label=True):
# Resample images to 2mm spacing with SimpleITK
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
| [
"SimpleITK.ResampleImageFilter",
"numpy.round",
"SimpleITK.Transform"
] | [((802, 828), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (826, 828), True, 'import SimpleITK as sitk\n'), ((1039, 1055), 'SimpleITK.Transform', 'sitk.Transform', ([], {}), '()\n', (1053, 1055), True, 'import SimpleITK as sitk\n'), ((552, 619), 'numpy.round', 'np.round', (['(original_size[0] * (original_spacing[0] / out_spacing[0]))'], {}), '(original_size[0] * (original_spacing[0] / out_spacing[0]))\n', (560, 619), True, 'import numpy as np\n'), ((634, 701), 'numpy.round', 'np.round', (['(original_size[1] * (original_spacing[1] / out_spacing[1]))'], {}), '(original_size[1] * (original_spacing[1] / out_spacing[1]))\n', (642, 701), True, 'import numpy as np\n'), ((716, 783), 'numpy.round', 'np.round', (['(original_size[2] * (original_spacing[2] / out_spacing[2]))'], {}), '(original_size[2] * (original_spacing[2] / out_spacing[2]))\n', (724, 783), True, 'import numpy as np\n')] |
from __future__ import division
import argparse
import matplotlib.pyplot as plt
import pickle
import gzip
import numpy as np
import tensorflow as tf
import matplotlib.gridspec as gridspec
import os
# from tensorflow.examples.tutorials.mnist import input_data
# np.set_printoptions(threshold=np.inf)
f =gzip.open('./screenshot_data2002003.gzip','rb')
save_file='./model/vae.ckpt'
z_dim = 500
X_dim = 200
X_channel = 1
conv_dim = 32
h_dim = 128
VAE=False # VAE if true, else AE
CONV=True # convolution if true, else dense layers only
#lr = 1e-4
def lrelu(x, alpha=0.1):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
# =============================== Q(z|X) ======================================
X = tf.placeholder(tf.float32, shape=[None,X_dim,X_dim,X_channel])
z = tf.placeholder(tf.float32, shape=[None, z_dim])
lr = tf.placeholder(tf.float32)
if CONV:
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
Q_W1 = tf.Variable(xavier_init([int(X_dim*X_dim/((2*2))*conv_dim), h_dim]))
Q_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
Q_W2_mu = tf.Variable(xavier_init([h_dim, z_dim]))
Q_b2_mu = tf.Variable(tf.zeros(shape=[z_dim]))
Q_W2_sigma = tf.Variable(xavier_init([h_dim, z_dim]))
Q_b2_sigma = tf.Variable(tf.zeros(shape=[z_dim]))
def Q(X):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
# X = tf.reshape(X, [-1, X_dim, X_dim, 3])
conv = tf.contrib.layers.conv2d(X,
conv_dim,
[5, 5],
(2, 2),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
conv = tf.contrib.layers.conv2d(conv,
conv_dim,
[5, 5],
(1, 1),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
flat = tf.contrib.layers.flatten(conv)
#print(flat.shape)
h = tf.nn.relu(tf.matmul(flat, Q_W1) + Q_b1)
z_mu = tf.matmul(h, Q_W2_mu) + Q_b2_mu
z_logvar = tf.matmul(h, Q_W2_sigma) + Q_b2_sigma
return z_mu, z_logvar
else: # dense layers only
def Q(X):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
X=tf.layers.flatten(X)
X=tf.layers.dense(X, h_dim, activation=lrelu)
z_mu=tf.layers.dense(X, z_dim, activation=None)
z_logvar=tf.layers.dense(X, z_dim, activation=None)
return z_mu, z_logvar
def sample_z(mu, log_var):
eps = tf.random_normal(shape=tf.shape(mu))
return mu + tf.math.exp(log_var / 2) * eps
# =============================== P(X|z) ======================================
if CONV:
P_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
P_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
P_W2 = tf.Variable(xavier_init([h_dim, int(X_dim*X_dim/((2*2))*conv_dim)]))
P_b2 = tf.Variable(tf.zeros(shape=[int(X_dim*X_dim/((2*2))*conv_dim)]))
def P(z):
h = tf.nn.relu(tf.matmul(z, P_W1) + P_b1)
logits = tf.matmul(h, P_W2) + P_b2
logits=tf.reshape(logits, [-1,int(X_dim/2),int(X_dim/2),conv_dim])
trans_conv = tf.contrib.layers.conv2d_transpose(logits,
conv_dim,
[5, 5],
(1, 1),
padding='SAME',
activation_fn=lrelu,
normalizer_fn=tf.contrib.layers.batch_norm)
trans_conv = tf.contrib.layers.conv2d_transpose(trans_conv,
X_channel, # output dim, 3 for 3-channel image
[5, 5],
(2, 2),
padding='SAME',
# activation_fn=lrelu,
activation_fn=tf.nn.sigmoid,
normalizer_fn=tf.contrib.layers.batch_norm)
# out = tf.nn.sigmoid(trans_conv)
# out = tf.nn.relu6(trans_conv)/6.
# out = tf.nn.relu(trans_conv)
out = trans_conv
return out, logits
else: # dense layers only
def P(z):
z=tf.layers.dense(z, h_dim, activation=lrelu)
logits=tf.layers.dense(z, X_dim*X_dim*conv_dim, activation=lrelu)
out=tf.nn.sigmoid(logits)
out=tf.reshape(out, [-1, X_dim, X_dim, X_channel])
return out, logits
# =============================== TRAINING ====================================
z_mu, z_logvar = Q(X)
z_sample = sample_z(z_mu, z_logvar)
if VAE:
out, logits = P(z_sample)
else:
out, logits = P(z_mu)
# Sampling from random z
X_samples, _ = P(z)
# E[log P(X|z)]
# recon_loss = tf.reduce_sum(tf.abs(out - X))
recon_loss=tf.reduce_sum(tf.losses.mean_squared_error(out, X))
# D_KL(Q(z|X) || P(z)); calculate in closed form as both dist. are Gaussian
kl_loss = 0.5 * tf.reduce_sum(tf.math.exp(z_logvar) + z_mu**2 - 1. - z_logvar)
#recon_loss=tf.reduce_sum(tf.abs(X - X))
if VAE:
# VAE loss
vae_loss = tf.reduce_mean(recon_loss + kl_loss)
else:
# AE loss
vae_loss = tf.reduce_mean(recon_loss)
solver = tf.train.AdamOptimizer(lr).minimize(vae_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if not os.path.exists('convae/'):
os.makedirs('convae/')
Loss=[]
It=[]
train_times=1000
batch=[]
data_samples=1
epoch_samples=1
# load data
for i in range(data_samples):
print(i)
if X_channel>1: # channel==3
batch.append(pickle.load(f)/255.) # rgb image value range 0-255
else: # channel==1
batch.append(pickle.load(f)[:,:,1:2]/255.) # rgb image value range 0-255
print(np.array(batch).shape)
# save original img
if X_channel>1:
plt.imshow(batch[0])
else:
plt.imshow(batch[0][:,:,0])
plt.savefig('convae/{}.png'.format(str('origin').zfill(3)), bbox_inches='tight')
# vae training
for it in range(train_times):
for epo in range(data_samples//epoch_samples):
_, loss ,recon_l, kl_l, output = sess.run([solver, vae_loss,recon_loss,kl_loss,out], \
feed_dict={X: batch[epo*epoch_samples:epoch_samples*(epo+1)],lr:1e-3/train_times})
Loss.append(loss)
It.append(it)
print('Iter: {}'.format(it))
#print('Loss: {:.4}'. format(loss),recon_l,kl_l)
print('Loss: {:.4}, KL: {}, Recon: {}'.format(loss, kl_l, recon_l))
sample = sess.run(X_samples, feed_dict={z: np.random.randn(1,z_dim)})
if X_channel>1:
plt.imshow(sample.reshape(X_dim,X_dim,X_channel))
else:
plt.imshow(sample.reshape(X_dim,X_dim))
plt.savefig('convae/{}.png'.format(str(it).zfill(3)), bbox_inches='tight')
saver.save(sess, save_file)
f.close() | [
"tensorflow.contrib.layers.conv2d",
"tensorflow.contrib.layers.flatten",
"tensorflow.layers.flatten",
"tensorflow.shape",
"gzip.open",
"tensorflow.contrib.layers.conv2d_transpose",
"numpy.array",
"tensorflow.math.exp",
"tensorflow.reduce_mean",
"matplotlib.pyplot.imshow",
"os.path.exists",
"te... | [((303, 351), 'gzip.open', 'gzip.open', (['"""./screenshot_data2002003.gzip"""', '"""rb"""'], {}), "('./screenshot_data2002003.gzip', 'rb')\n", (312, 351), False, 'import gzip\n'), ((862, 927), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, X_dim, X_dim, X_channel]'}), '(tf.float32, shape=[None, X_dim, X_dim, X_channel])\n', (876, 927), True, 'import tensorflow as tf\n'), ((929, 976), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, z_dim]'}), '(tf.float32, shape=[None, z_dim])\n', (943, 976), True, 'import tensorflow as tf\n'), ((982, 1008), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (996, 1008), True, 'import tensorflow as tf\n'), ((6123, 6135), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6133, 6135), True, 'import tensorflow as tf\n'), ((6188, 6204), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6202, 6204), True, 'import tensorflow as tf\n'), ((724, 774), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'size', 'stddev': 'xavier_stddev'}), '(shape=size, stddev=xavier_stddev)\n', (740, 774), True, 'import tensorflow as tf\n'), ((5687, 5723), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['out', 'X'], {}), '(out, X)\n', (5715, 5723), True, 'import tensorflow as tf\n'), ((5960, 5996), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(recon_loss + kl_loss)'], {}), '(recon_loss + kl_loss)\n', (5974, 5996), True, 'import tensorflow as tf\n'), ((6032, 6058), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['recon_loss'], {}), '(recon_loss)\n', (6046, 6058), True, 'import tensorflow as tf\n'), ((6145, 6178), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6176, 6178), True, 'import tensorflow as tf\n'), ((6213, 6238), 'os.path.exists', 'os.path.exists', (['"""convae/"""'], {}), "('convae/')\n", (6227, 6238), False, 'import os\n'), ((6244, 6266), 'os.makedirs', 'os.makedirs', (['"""convae/"""'], {}), "('convae/')\n", (6255, 6266), False, 'import os\n'), ((6677, 6697), 'matplotlib.pyplot.imshow', 'plt.imshow', (['batch[0]'], {}), '(batch[0])\n', (6687, 6697), True, 'import matplotlib.pyplot as plt\n'), ((6708, 6737), 'matplotlib.pyplot.imshow', 'plt.imshow', (['batch[0][:, :, 0]'], {}), '(batch[0][:, :, 0])\n', (6718, 6737), True, 'import matplotlib.pyplot as plt\n'), ((583, 596), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (593, 596), True, 'import tensorflow as tf\n'), ((692, 713), 'tensorflow.sqrt', 'tf.sqrt', (['(in_dim / 2.0)'], {}), '(in_dim / 2.0)\n', (699, 713), True, 'import tensorflow as tf\n'), ((1028, 1077), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': 'tf.AUTO_REUSE'}), "('encoder', reuse=tf.AUTO_REUSE)\n", (1045, 1077), True, 'import tensorflow as tf\n'), ((3351, 3374), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3359, 3374), True, 'import tensorflow as tf\n'), ((3738, 3896), 'tensorflow.contrib.layers.conv2d_transpose', 'tf.contrib.layers.conv2d_transpose', (['logits', 'conv_dim', '[5, 5]', '(1, 1)'], {'padding': '"""SAME"""', 'activation_fn': 'lrelu', 'normalizer_fn': 'tf.contrib.layers.batch_norm'}), "(logits, conv_dim, [5, 5], (1, 1),\n padding='SAME', activation_fn=lrelu, normalizer_fn=tf.contrib.layers.\n batch_norm)\n", (3772, 3896), True, 'import tensorflow as tf\n'), ((4247, 4418), 'tensorflow.contrib.layers.conv2d_transpose', 'tf.contrib.layers.conv2d_transpose', (['trans_conv', 'X_channel', '[5, 5]', '(2, 2)'], {'padding': '"""SAME"""', 'activation_fn': 'tf.nn.sigmoid', 'normalizer_fn': 'tf.contrib.layers.batch_norm'}), "(trans_conv, X_channel, [5, 5], (2, 2),\n padding='SAME', activation_fn=tf.nn.sigmoid, normalizer_fn=tf.contrib.\n layers.batch_norm)\n", (4281, 4418), True, 'import tensorflow as tf\n'), ((5100, 5143), 'tensorflow.layers.dense', 'tf.layers.dense', (['z', 'h_dim'], {'activation': 'lrelu'}), '(z, h_dim, activation=lrelu)\n', (5115, 5143), True, 'import tensorflow as tf\n'), ((5159, 5221), 'tensorflow.layers.dense', 'tf.layers.dense', (['z', '(X_dim * X_dim * conv_dim)'], {'activation': 'lrelu'}), '(z, X_dim * X_dim * conv_dim, activation=lrelu)\n', (5174, 5221), True, 'import tensorflow as tf\n'), ((5230, 5251), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (5243, 5251), True, 'import tensorflow as tf\n'), ((5264, 5310), 'tensorflow.reshape', 'tf.reshape', (['out', '[-1, X_dim, X_dim, X_channel]'], {}), '(out, [-1, X_dim, X_dim, X_channel])\n', (5274, 5310), True, 'import tensorflow as tf\n'), ((6069, 6095), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (6091, 6095), True, 'import tensorflow as tf\n'), ((6613, 6628), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (6621, 6628), True, 'import numpy as np\n'), ((607, 621), 'tensorflow.nn.relu', 'tf.nn.relu', (['(-x)'], {}), '(-x)\n', (617, 621), True, 'import tensorflow as tf\n'), ((1191, 1214), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (1199, 1214), True, 'import tensorflow as tf\n'), ((1306, 1329), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[z_dim]'}), '(shape=[z_dim])\n', (1314, 1329), True, 'import tensorflow as tf\n'), ((1427, 1450), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[z_dim]'}), '(shape=[z_dim])\n', (1435, 1450), True, 'import tensorflow as tf\n'), ((1483, 1532), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': 'tf.AUTO_REUSE'}), "('encoder', reuse=tf.AUTO_REUSE)\n", (1500, 1532), True, 'import tensorflow as tf\n'), ((1608, 1746), 'tensorflow.contrib.layers.conv2d', 'tf.contrib.layers.conv2d', (['X', 'conv_dim', '[5, 5]', '(2, 2)'], {'padding': '"""SAME"""', 'activation_fn': 'lrelu', 'normalizer_fn': 'tf.contrib.layers.batch_norm'}), "(X, conv_dim, [5, 5], (2, 2), padding='SAME',\n activation_fn=lrelu, normalizer_fn=tf.contrib.layers.batch_norm)\n", (1632, 1746), True, 'import tensorflow as tf\n'), ((2026, 2167), 'tensorflow.contrib.layers.conv2d', 'tf.contrib.layers.conv2d', (['conv', 'conv_dim', '[5, 5]', '(1, 1)'], {'padding': '"""SAME"""', 'activation_fn': 'lrelu', 'normalizer_fn': 'tf.contrib.layers.batch_norm'}), "(conv, conv_dim, [5, 5], (1, 1), padding='SAME',\n activation_fn=lrelu, normalizer_fn=tf.contrib.layers.batch_norm)\n", (2050, 2167), True, 'import tensorflow as tf\n'), ((2447, 2478), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['conv'], {}), '(conv)\n', (2472, 2478), True, 'import tensorflow as tf\n'), ((2763, 2812), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {'reuse': 'tf.AUTO_REUSE'}), "('encoder', reuse=tf.AUTO_REUSE)\n", (2780, 2812), True, 'import tensorflow as tf\n'), ((2828, 2848), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['X'], {}), '(X)\n', (2845, 2848), True, 'import tensorflow as tf\n'), ((2863, 2906), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'h_dim'], {'activation': 'lrelu'}), '(X, h_dim, activation=lrelu)\n', (2878, 2906), True, 'import tensorflow as tf\n'), ((2924, 2966), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'z_dim'], {'activation': 'None'}), '(X, z_dim, activation=None)\n', (2939, 2966), True, 'import tensorflow as tf\n'), ((2988, 3030), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'z_dim'], {'activation': 'None'}), '(X, z_dim, activation=None)\n', (3003, 3030), True, 'import tensorflow as tf\n'), ((3123, 3135), 'tensorflow.shape', 'tf.shape', (['mu'], {}), '(mu)\n', (3131, 3135), True, 'import tensorflow as tf\n'), ((3153, 3177), 'tensorflow.math.exp', 'tf.math.exp', (['(log_var / 2)'], {}), '(log_var / 2)\n', (3164, 3177), True, 'import tensorflow as tf\n'), ((3616, 3634), 'tensorflow.matmul', 'tf.matmul', (['h', 'P_W2'], {}), '(h, P_W2)\n', (3625, 3634), True, 'import tensorflow as tf\n'), ((2586, 2607), 'tensorflow.matmul', 'tf.matmul', (['h', 'Q_W2_mu'], {}), '(h, Q_W2_mu)\n', (2595, 2607), True, 'import tensorflow as tf\n'), ((2641, 2665), 'tensorflow.matmul', 'tf.matmul', (['h', 'Q_W2_sigma'], {}), '(h, Q_W2_sigma)\n', (2650, 2665), True, 'import tensorflow as tf\n'), ((3572, 3590), 'tensorflow.matmul', 'tf.matmul', (['z', 'P_W1'], {}), '(z, P_W1)\n', (3581, 3590), True, 'import tensorflow as tf\n'), ((6451, 6465), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6462, 6465), False, 'import pickle\n'), ((7361, 7386), 'numpy.random.randn', 'np.random.randn', (['(1)', 'z_dim'], {}), '(1, z_dim)\n', (7376, 7386), True, 'import numpy as np\n'), ((2537, 2558), 'tensorflow.matmul', 'tf.matmul', (['flat', 'Q_W1'], {}), '(flat, Q_W1)\n', (2546, 2558), True, 'import tensorflow as tf\n'), ((5831, 5852), 'tensorflow.math.exp', 'tf.math.exp', (['z_logvar'], {}), '(z_logvar)\n', (5842, 5852), True, 'import tensorflow as tf\n'), ((6546, 6560), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6557, 6560), False, 'import pickle\n')] |
import numpy as np
DEBUG = False
def overlap(samll_box, big_box):
if samll_box[1] > big_box[1] and samll_box[2] > big_box[2] and \
samll_box[3] < big_box[3] and samll_box[4] < big_box[4]:
lap = 1
else:
lap = 0
return lap
def local_box_layer(rois, im_info):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
n_boxes = rois.size()[0]
# allow boxes to sit over the edge by a small amount
# _allowed_border = 0
# map of shape (..., H, W)
# height, width = rpn_cls_score.shape[1:3]
# print ">>>>>>>>>>>>>>>>>>>>>>>>union_boxes"
rois = rois.tolist()
# rois = rois[0]
local_boxes = []
im_info = im_info[0]
# print im_info
for i in range(n_boxes):
scene = []
for j in range(1, n_boxes):
lap = overlap(rois[i], rois[j])
if lap == 1:
scene = rois[j]
continue
if len(scene) == 0:
local_boxes.append([0, 0, 0, im_info[0], im_info[1]])
else:
local_boxes.append(scene)
# scene = [[0, 0, 0, im_info[0], im_info[1]]]
# union_boxes = np.array(union_boxes).astype(np.float32)
local_boxes = np.array(local_boxes).astype(np.float32)
# print scene
return local_boxes | [
"numpy.array"
] | [((1284, 1305), 'numpy.array', 'np.array', (['local_boxes'], {}), '(local_boxes)\n', (1292, 1305), True, 'import numpy as np\n')] |
# A simple Psi 4 input script to compute a SCF reference using Psi4's libJK
# Requires numpy 1.7.2+
#
# Created by: <NAME>
# Date: 4/1/15
# License: GPL v3.0
#
import time
import numpy as np
import helper_HF as scf_helper
np.set_printoptions(precision=5, linewidth=200, suppress=True)
import psi4
# Memory for Psi4 in GB
psi4.set_memory('2 GB')
psi4.core.set_output_file('output.dat', False)
# Memory for numpy in GB
numpy_memory = 2
# Triplet O2
mol = psi4.geometry("""
0 3
O
O 1 1.2
symmetry c1
""")
psi4.set_options({'guess': 'core',
'basis': 'aug-cc-pvdz',
'scf_type': 'pk',
'e_convergence': 1e-8,
'reference': 'rohf'})
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('BASIS'))
# Set occupations
nocc = wfn.nalpha()
ndocc = wfn.nbeta()
nsocc = nocc - ndocc
# Set defaults
maxiter = 10
max_micro = 4
micro_print = True
micro_conv = 1.e-3
E_conv = 1.0E-8
D_conv = 1.0E-4
# Integral generation from Psi4's MintsHelper
t = time.time()
mints = psi4.core.MintsHelper(wfn.basisset())
S = np.asarray(mints.ao_overlap())
nbf = S.shape[0]
jk = psi4.core.JK.build(wfn.basisset())
jk.initialize()
if nbf > 100:
raise Exception("This has a N^4 memory overhead, killing if nbf > 100.")
print('\nNumber of doubly occupied orbitals: %d' % ndocc)
print('Number of singly occupied orbitals: %d' % nsocc)
print('Number of basis functions: %d' % nbf)
V = np.asarray(mints.ao_potential())
T = np.asarray(mints.ao_kinetic())
# Build H_core
H = T + V
# ERI's
I = np.asarray(mints.ao_eri())
# Orthogonalizer A = S^(-1/2)
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
A = np.asarray(A)
print('\nTotal time taken for integrals: %.3f seconds.' % (time.time()-t))
t = time.time()
def transform(I, C1, C2, C3, C4):
#MO = np.einsum('pA,pqrs->Aqrs', C1, I)
nao = I.shape[0]
MO = np.dot(C1.T, I.reshape(nao, -1)).reshape(C1.shape[1], nao, nao, nao)
MO = np.einsum('qB,Aqrs->ABrs', C2, MO)
MO = np.einsum('rC,ABrs->ABCs', C3, MO)
MO = np.einsum('sD,ABCs->ABCD', C4, MO)
return MO
# Build initial orbitals and density matrices
Hp = A.dot(H).dot(A)
e, Ct = np.linalg.eigh(Hp)
C = A.dot(Ct)
Cnocc = C[:, :nocc]
Docc = np.dot(Cnocc, Cnocc.T)
Cndocc = C[:, :ndocc]
Ddocc = np.dot(Cndocc, Cndocc.T)
t = time.time()
E = 0.0
Enuc = mol.nuclear_repulsion_energy()
Eold = 0.0
iter_type = 'CORE'
# Build a DIIS helper object
diis = scf_helper.DIIS_helper()
print('\nTotal time taken for setup: %.3f seconds' % (time.time() - t))
print('\nStart SCF iterations:\n')
t = time.time()
for SCF_ITER in range(1, maxiter + 1):
# Build a and b fock matrices
Ja = np.einsum('pqrs,rs->pq', I, Docc)
Ka = np.einsum('prqs,rs->pq', I, Docc)
Jb = np.einsum('pqrs,rs->pq', I, Ddocc)
Kb = np.einsum('prqs,rs->pq', I, Ddocc)
J = Ja + Jb
Fa = H + J - Ka
Fb = H + J - Kb
# Build MO Fock matrix
moFa = (C.T).dot(Fa).dot(C)
moFb = (C.T).dot(Fb).dot(C)
# Special note on the ROHF Fock matrix (taken from psi4)
# Fo = open-shell fock matrix = 0.5 Fa
# Fc = closed-shell fock matrix = 0.5 (Fa + Fb)
#
# The effective Fock matrix has the following structure
# | closed open virtual
# ----------------------------------------
# closed | Fc 2(Fc-Fo) Fc
# open | 2(Fc-Fo) Fc 2Fo
# virtual | Fc 2Fo Fc
moFeff = 0.5 * (moFa + moFb)
moFeff[:ndocc, ndocc:nocc] = moFb[:ndocc, ndocc:nocc]
moFeff[ndocc:nocc, :ndocc] = moFb[ndocc:nocc, :ndocc]
moFeff[ndocc:nocc, nocc:] = moFa[ndocc:nocc, nocc:]
moFeff[nocc:, ndocc:nocc] = moFa[nocc:, ndocc:nocc]
# Back transform to AO Fock
Feff = (Ct).dot(moFeff).dot(Ct.T)
# Build gradient
IFock = moFeff[:nocc, ndocc:].copy()
IFock[ndocc:, :nsocc] = 0.0
diis_e = (Ct[:, :nocc]).dot(IFock).dot(Ct[:, ndocc:].T)
diis.add(Feff, diis_e)
# SCF energy and update
SCF_E = np.einsum('pq,pq->', Docc + Ddocc, H)
SCF_E += np.einsum('pq,pq->', Docc, Fa)
SCF_E += np.einsum('pq,pq->', Ddocc, Fb)
SCF_E *= 0.5
SCF_E += Enuc
dRMS = np.mean(diis_e**2)**0.5
print('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E %s' % \
(SCF_ITER, SCF_E, (SCF_E - Eold), dRMS, iter_type))
if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):
break
#if SCF_ITER == maxiter:
# clean()
# raise Exception("Maximum number of SCF cycles exceeded.")
ediff = abs(SCF_E - Eold)
Eold = SCF_E
gradient = -4 * IFock.copy()
gradient[ndocc:] /= 2
gradient[:, :nsocc] /= 2
gradient[ndocc:, :nsocc] = 0.0
grad_dot = np.linalg.norm(gradient)
#if True:
if (np.max(np.abs(gradient)) > 0.1):
# Conventional update
Feff = diis.extrapolate()
e, Ct = np.linalg.eigh(Feff)
C = A.dot(Ct)
iter_type = 'DIIS'
else:
# Second-order update
Cocc = C[:, :nocc]
Cvir = C[:, ndocc:]
nvir = nbf - ndocc
# Build an approximate ROHF guess
eps = np.diag(moFeff)
precon = -4 * (eps[:nocc].reshape(-1, 1) - eps[ndocc:])
precon[ndocc:, :nsocc] = 1
precon[ndocc:] /= 2
guess_x = gradient / precon
# Start Hessian
MOovov = transform(I, Cocc, Cvir, Cocc, Cvir)
MOoovv = transform(I, Cocc, Cocc, Cvir, Cvir)
IAJB = MOovov.copy()
IAJB -= 0.5 * np.einsum('pqrs->psrq', MOovov)
IAJB -= 0.5 * np.einsum('pqrs->qspr', MOoovv)
iajb = IAJB.copy()
IAJB += 0.5 * np.einsum('IJ,AB->IAJB', np.diag(np.ones(nocc)), moFa[ndocc:, ndocc:])
IAJB -= 0.5 * np.einsum('AB,IJ->IAJB', np.diag(np.ones(nvir)), moFa[:nocc, :nocc])
IAJB[:, :nsocc, :, :] = 0.0
IAJB[:, :, :, :nsocc] = 0.0
iajb += 0.5 * np.einsum('IJ,AB->IAJB', np.diag(np.ones(nocc)), moFb[ndocc:, ndocc:])
iajb -= 0.5 * np.einsum('AB,IJ->IAJB', np.diag(np.ones(nvir)), moFb[:nocc, :nocc])
iajb[:, :, ndocc:, :] = 0.0
iajb[ndocc:, :, :, :] = 0.0
IAjb = MOovov.copy()
for i in range(nsocc):
IAjb[ndocc + i, :, :, i] += 0.5 * moFb[ndocc:, :nocc]
IAjb[:, :, ndocc:, :] = 0.0
IAjb[:, :nsocc, :, :] = 0.0
iaJB = np.einsum('pqrs->rspq', IAjb)
# Build and find x
Hess = IAJB + IAjb + iaJB + iajb
Hess *= 4
ndim = Hess.shape[0] * Hess.shape[1]
Hess = Hess.reshape(gradient.size, -1) # Make the hessian square
Hess[np.diag_indices_from(Hess)] += 1.e-14 # Prevent singularities
x = np.linalg.solve(Hess, gradient.ravel()).reshape(nocc, nvir)
# Special orbital rotation, some overlap in the middle
U = np.zeros((C.shape[1], C.shape[1]))
U[:nocc, ndocc:] = x
U[ndocc:, :nocc] = -x.T
U += 0.5 * np.dot(U, U)
U[np.diag_indices_from(U)] += 1
# Easy acess to shmidt orthogonalization
U, r = np.linalg.qr(U.T)
#print U
# Rotate and set orbitals
Ct = Ct.dot(U)
C = A.dot(Ct)
iter_type = 'SOSCF'
Cnocc = C[:, :nocc]
Docc = np.dot(Cnocc, Cnocc.T)
Cndocc = C[:, :ndocc]
Ddocc = np.dot(Cndocc, Cndocc.T)
print('Total time for SCF iterations: %.3f seconds \n' % (time.time() - t))
print('Final SCF energy: %.8f hartree' % SCF_E)
# Compare to Psi4
SCF_E_psi = psi4.energy('SCF')
psi4.compare_values(SCF_E_psi, SCF_E, 6, 'SCF Energy')
| [
"numpy.einsum",
"numpy.linalg.norm",
"numpy.mean",
"numpy.diag_indices_from",
"psi4.geometry",
"numpy.linalg.qr",
"numpy.asarray",
"numpy.dot",
"psi4.compare_values",
"numpy.linalg.eigh",
"numpy.abs",
"psi4.set_memory",
"numpy.ones",
"psi4.energy",
"time.time",
"numpy.set_printoptions"... | [((223, 285), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'linewidth': '(200)', 'suppress': '(True)'}), '(precision=5, linewidth=200, suppress=True)\n', (242, 285), True, 'import numpy as np\n'), ((323, 346), 'psi4.set_memory', 'psi4.set_memory', (['"""2 GB"""'], {}), "('2 GB')\n", (338, 346), False, 'import psi4\n'), ((347, 393), 'psi4.core.set_output_file', 'psi4.core.set_output_file', (['"""output.dat"""', '(False)'], {}), "('output.dat', False)\n", (372, 393), False, 'import psi4\n'), ((457, 517), 'psi4.geometry', 'psi4.geometry', (['"""\n 0 3\n O\n O 1 1.2\nsymmetry c1\n"""'], {}), '("""\n 0 3\n O\n O 1 1.2\nsymmetry c1\n""")\n', (470, 517), False, 'import psi4\n'), ((519, 645), 'psi4.set_options', 'psi4.set_options', (["{'guess': 'core', 'basis': 'aug-cc-pvdz', 'scf_type': 'pk', 'e_convergence':\n 1e-08, 'reference': 'rohf'}"], {}), "({'guess': 'core', 'basis': 'aug-cc-pvdz', 'scf_type': 'pk',\n 'e_convergence': 1e-08, 'reference': 'rohf'})\n", (535, 645), False, 'import psi4\n'), ((1036, 1047), 'time.time', 'time.time', ([], {}), '()\n', (1045, 1047), False, 'import time\n'), ((1683, 1696), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (1693, 1696), True, 'import numpy as np\n'), ((1778, 1789), 'time.time', 'time.time', ([], {}), '()\n', (1787, 1789), False, 'import time\n'), ((2192, 2210), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Hp'], {}), '(Hp)\n', (2206, 2210), True, 'import numpy as np\n'), ((2252, 2274), 'numpy.dot', 'np.dot', (['Cnocc', 'Cnocc.T'], {}), '(Cnocc, Cnocc.T)\n', (2258, 2274), True, 'import numpy as np\n'), ((2305, 2329), 'numpy.dot', 'np.dot', (['Cndocc', 'Cndocc.T'], {}), '(Cndocc, Cndocc.T)\n', (2311, 2329), True, 'import numpy as np\n'), ((2335, 2346), 'time.time', 'time.time', ([], {}), '()\n', (2344, 2346), False, 'import time\n'), ((2460, 2484), 'helper_HF.DIIS_helper', 'scf_helper.DIIS_helper', ([], {}), '()\n', (2482, 2484), True, 'import helper_HF as scf_helper\n'), ((2598, 2609), 'time.time', 'time.time', ([], {}), '()\n', (2607, 2609), False, 'import time\n'), ((7463, 7481), 'psi4.energy', 'psi4.energy', (['"""SCF"""'], {}), "('SCF')\n", (7474, 7481), False, 'import psi4\n'), ((7482, 7536), 'psi4.compare_values', 'psi4.compare_values', (['SCF_E_psi', 'SCF_E', '(6)', '"""SCF Energy"""'], {}), "(SCF_E_psi, SCF_E, 6, 'SCF Energy')\n", (7501, 7536), False, 'import psi4\n'), ((754, 790), 'psi4.core.get_global_option', 'psi4.core.get_global_option', (['"""BASIS"""'], {}), "('BASIS')\n", (781, 790), False, 'import psi4\n'), ((1979, 2013), 'numpy.einsum', 'np.einsum', (['"""qB,Aqrs->ABrs"""', 'C2', 'MO'], {}), "('qB,Aqrs->ABrs', C2, MO)\n", (1988, 2013), True, 'import numpy as np\n'), ((2023, 2057), 'numpy.einsum', 'np.einsum', (['"""rC,ABrs->ABCs"""', 'C3', 'MO'], {}), "('rC,ABrs->ABCs', C3, MO)\n", (2032, 2057), True, 'import numpy as np\n'), ((2067, 2101), 'numpy.einsum', 'np.einsum', (['"""sD,ABCs->ABCD"""', 'C4', 'MO'], {}), "('sD,ABCs->ABCD', C4, MO)\n", (2076, 2101), True, 'import numpy as np\n'), ((2694, 2727), 'numpy.einsum', 'np.einsum', (['"""pqrs,rs->pq"""', 'I', 'Docc'], {}), "('pqrs,rs->pq', I, Docc)\n", (2703, 2727), True, 'import numpy as np\n'), ((2737, 2770), 'numpy.einsum', 'np.einsum', (['"""prqs,rs->pq"""', 'I', 'Docc'], {}), "('prqs,rs->pq', I, Docc)\n", (2746, 2770), True, 'import numpy as np\n'), ((2780, 2814), 'numpy.einsum', 'np.einsum', (['"""pqrs,rs->pq"""', 'I', 'Ddocc'], {}), "('pqrs,rs->pq', I, Ddocc)\n", (2789, 2814), True, 'import numpy as np\n'), ((2824, 2858), 'numpy.einsum', 'np.einsum', (['"""prqs,rs->pq"""', 'I', 'Ddocc'], {}), "('prqs,rs->pq', I, Ddocc)\n", (2833, 2858), True, 'import numpy as np\n'), ((4006, 4043), 'numpy.einsum', 'np.einsum', (['"""pq,pq->"""', '(Docc + Ddocc)', 'H'], {}), "('pq,pq->', Docc + Ddocc, H)\n", (4015, 4043), True, 'import numpy as np\n'), ((4057, 4087), 'numpy.einsum', 'np.einsum', (['"""pq,pq->"""', 'Docc', 'Fa'], {}), "('pq,pq->', Docc, Fa)\n", (4066, 4087), True, 'import numpy as np\n'), ((4101, 4132), 'numpy.einsum', 'np.einsum', (['"""pq,pq->"""', 'Ddocc', 'Fb'], {}), "('pq,pq->', Ddocc, Fb)\n", (4110, 4132), True, 'import numpy as np\n'), ((4726, 4750), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (4740, 4750), True, 'import numpy as np\n'), ((7219, 7241), 'numpy.dot', 'np.dot', (['Cnocc', 'Cnocc.T'], {}), '(Cnocc, Cnocc.T)\n', (7225, 7241), True, 'import numpy as np\n'), ((7280, 7304), 'numpy.dot', 'np.dot', (['Cndocc', 'Cndocc.T'], {}), '(Cndocc, Cndocc.T)\n', (7286, 7304), True, 'import numpy as np\n'), ((4181, 4201), 'numpy.mean', 'np.mean', (['(diis_e ** 2)'], {}), '(diis_e ** 2)\n', (4188, 4201), True, 'import numpy as np\n'), ((4887, 4907), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Feff'], {}), '(Feff)\n', (4901, 4907), True, 'import numpy as np\n'), ((5137, 5152), 'numpy.diag', 'np.diag', (['moFeff'], {}), '(moFeff)\n', (5144, 5152), True, 'import numpy as np\n'), ((6345, 6374), 'numpy.einsum', 'np.einsum', (['"""pqrs->rspq"""', 'IAjb'], {}), "('pqrs->rspq', IAjb)\n", (6354, 6374), True, 'import numpy as np\n'), ((6805, 6839), 'numpy.zeros', 'np.zeros', (['(C.shape[1], C.shape[1])'], {}), '((C.shape[1], C.shape[1]))\n', (6813, 6839), True, 'import numpy as np\n'), ((7039, 7056), 'numpy.linalg.qr', 'np.linalg.qr', (['U.T'], {}), '(U.T)\n', (7051, 7056), True, 'import numpy as np\n'), ((1757, 1768), 'time.time', 'time.time', ([], {}), '()\n', (1766, 1768), False, 'import time\n'), ((2540, 2551), 'time.time', 'time.time', ([], {}), '()\n', (2549, 2551), False, 'import time\n'), ((4781, 4797), 'numpy.abs', 'np.abs', (['gradient'], {}), '(gradient)\n', (4787, 4797), True, 'import numpy as np\n'), ((5502, 5533), 'numpy.einsum', 'np.einsum', (['"""pqrs->psrq"""', 'MOovov'], {}), "('pqrs->psrq', MOovov)\n", (5511, 5533), True, 'import numpy as np\n'), ((5556, 5587), 'numpy.einsum', 'np.einsum', (['"""pqrs->qspr"""', 'MOoovv'], {}), "('pqrs->qspr', MOoovv)\n", (5565, 5587), True, 'import numpy as np\n'), ((6595, 6621), 'numpy.diag_indices_from', 'np.diag_indices_from', (['Hess'], {}), '(Hess)\n', (6615, 6621), True, 'import numpy as np\n'), ((6921, 6933), 'numpy.dot', 'np.dot', (['U', 'U'], {}), '(U, U)\n', (6927, 6933), True, 'import numpy as np\n'), ((6944, 6967), 'numpy.diag_indices_from', 'np.diag_indices_from', (['U'], {}), '(U)\n', (6964, 6967), True, 'import numpy as np\n'), ((7365, 7376), 'time.time', 'time.time', ([], {}), '()\n', (7374, 7376), False, 'import time\n'), ((5672, 5685), 'numpy.ones', 'np.ones', (['nocc'], {}), '(nocc)\n', (5679, 5685), True, 'import numpy as np\n'), ((5765, 5778), 'numpy.ones', 'np.ones', (['nvir'], {}), '(nvir)\n', (5772, 5778), True, 'import numpy as np\n'), ((5929, 5942), 'numpy.ones', 'np.ones', (['nocc'], {}), '(nocc)\n', (5936, 5942), True, 'import numpy as np\n'), ((6022, 6035), 'numpy.ones', 'np.ones', (['nvir'], {}), '(nvir)\n', (6029, 6035), True, 'import numpy as np\n')] |
import platform
import numpy as np
import pytest
import qtpy
from napari.layers import Labels, Points
from qtpy.QtCore import QCoreApplication
from PartSeg._roi_analysis.image_view import ResultImageView
from PartSeg.common_backend.base_settings import BaseSettings
from PartSeg.common_gui.channel_control import ChannelProperty
from PartSeg.common_gui.napari_viewer_wrap import Viewer
from PartSegCore.project_info import AdditionalLayerDescription
from PartSegCore.roi_info import ROIInfo
from .utils import CI_BUILD
pyside_skip = pytest.mark.skipif(qtpy.API_NAME == "PySide2" and platform.system() == "Linux", reason="PySide2 problem")
class TestResultImageView:
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
@pyside_skip
def test_simple(self, qtbot, part_settings, image):
prop = ChannelProperty(part_settings, "test")
viewer = ResultImageView(part_settings, prop, "test")
viewer.show()
qtbot.add_widget(prop)
qtbot.add_widget(viewer)
viewer.add_image(image)
assert not viewer.roi_alternative_select.isVisible()
assert not viewer.label1.isVisible()
assert not viewer.label2.isVisible()
assert not viewer.opacity.isVisible()
assert not viewer.only_border.isVisible()
assert not viewer.roi_alternative_select.isVisible()
assert not viewer.any_roi()
assert not viewer.available_alternatives()
viewer.hide()
# prop.close()
# viewer.close()
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
@pyside_skip
def test_set_roi(self, qtbot, part_settings, image):
prop = ChannelProperty(part_settings, "test")
viewer = ResultImageView(part_settings, prop, "test")
qtbot.add_widget(prop)
qtbot.add_widget(viewer)
viewer.show()
part_settings.image = image
roi = ROIInfo((image.get_channel(0) > 0).astype(np.uint8))
roi = roi.fit_to_image(image)
viewer.set_roi(roi, image)
QCoreApplication.processEvents()
assert not viewer.roi_alternative_select.isVisible()
assert viewer.label1.isVisible()
assert viewer.label2.isVisible()
assert viewer.opacity.isVisible()
assert viewer.only_border.isVisible()
assert not viewer.roi_alternative_select.isVisible()
assert viewer.any_roi()
assert not viewer.available_alternatives()
viewer.hide()
@pyside_skip
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
class TestNapariViewer:
def test_base(self, image, analysis_segmentation2, tmp_path):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "")
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
settings.image = analysis_segmentation2.image
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 1
settings.roi = analysis_segmentation2.roi_info.roi
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
settings.mask = analysis_segmentation2.mask
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 3
viewer.close()
def test_points(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "")
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
points = np.array([[0, 1, 1, 1], [0, 7, 10, 10]])
settings.points = points
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 3
assert isinstance(viewer.layers[-1], Points)
viewer._sync_widget.sync_points_chk.setChecked(True)
with qtbot.wait_signal(settings.points_changed):
settings.points = None
assert len(viewer.layers) == 2
with qtbot.wait_signal(settings.points_changed):
settings.points = points
assert len(viewer.layers) == 3
assert isinstance(viewer.layers[-1], Points)
viewer.close()
def test_image(self, image, image2, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
with qtbot.waitSignal(viewer._sync_widget.sync_image_chk.stateChanged):
viewer._sync_widget.sync_image_chk.setChecked(True)
assert len(viewer.layers) == 2
with qtbot.waitSignal(settings.image_changed):
settings.image = image2
assert len(viewer.layers) == 3
viewer.close()
def test_roi(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
viewer._sync_widget.sync_image()
assert len(viewer.layers) == 2
viewer._sync_widget.sync_ROI_chk.setChecked(True)
roi_info = ROIInfo(image.get_channel(0), {}, {"sample": image.get_channel(1)})
with qtbot.waitSignal(settings.roi_changed):
settings.roi = roi_info
assert len(viewer.layers) == 4
viewer.close()
def test_additional(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
viewer._sync_widget.sync_image()
assert len(viewer.layers) == 2
settings._additional_layers = {
"first": AdditionalLayerDescription(image.get_channel(0), "image", "first"),
"second": AdditionalLayerDescription(image.get_channel(0), "labels", "second"),
}
viewer._sync_widget.sync_additional()
assert len(viewer.layers) == 4
assert isinstance(viewer.layers[-1], Labels)
viewer.close()
| [
"PartSeg.common_gui.napari_viewer_wrap.Viewer",
"PartSeg.common_backend.base_settings.BaseSettings",
"qtpy.QtCore.QCoreApplication.processEvents",
"numpy.array",
"platform.system",
"PartSeg.common_gui.channel_control.ChannelProperty",
"PartSeg._roi_analysis.image_view.ResultImageView"
] | [((874, 912), 'PartSeg.common_gui.channel_control.ChannelProperty', 'ChannelProperty', (['part_settings', '"""test"""'], {}), "(part_settings, 'test')\n", (889, 912), False, 'from PartSeg.common_gui.channel_control import ChannelProperty\n'), ((930, 974), 'PartSeg._roi_analysis.image_view.ResultImageView', 'ResultImageView', (['part_settings', 'prop', '"""test"""'], {}), "(part_settings, prop, 'test')\n", (945, 974), False, 'from PartSeg._roi_analysis.image_view import ResultImageView\n'), ((1762, 1800), 'PartSeg.common_gui.channel_control.ChannelProperty', 'ChannelProperty', (['part_settings', '"""test"""'], {}), "(part_settings, 'test')\n", (1777, 1800), False, 'from PartSeg.common_gui.channel_control import ChannelProperty\n'), ((1818, 1862), 'PartSeg._roi_analysis.image_view.ResultImageView', 'ResultImageView', (['part_settings', 'prop', '"""test"""'], {}), "(part_settings, prop, 'test')\n", (1833, 1862), False, 'from PartSeg._roi_analysis.image_view import ResultImageView\n'), ((2133, 2165), 'qtpy.QtCore.QCoreApplication.processEvents', 'QCoreApplication.processEvents', ([], {}), '()\n', (2163, 2165), False, 'from qtpy.QtCore import QCoreApplication\n'), ((2797, 2819), 'PartSeg.common_backend.base_settings.BaseSettings', 'BaseSettings', (['tmp_path'], {}), '(tmp_path)\n', (2809, 2819), False, 'from PartSeg.common_backend.base_settings import BaseSettings\n'), ((2868, 2888), 'PartSeg.common_gui.napari_viewer_wrap.Viewer', 'Viewer', (['settings', '""""""'], {}), "(settings, '')\n", (2874, 2888), False, 'from PartSeg.common_gui.napari_viewer_wrap import Viewer\n'), ((3648, 3670), 'PartSeg.common_backend.base_settings.BaseSettings', 'BaseSettings', (['tmp_path'], {}), '(tmp_path)\n', (3660, 3670), False, 'from PartSeg.common_backend.base_settings import BaseSettings\n'), ((3719, 3739), 'PartSeg.common_gui.napari_viewer_wrap.Viewer', 'Viewer', (['settings', '""""""'], {}), "(settings, '')\n", (3725, 3739), False, 'from PartSeg.common_gui.napari_viewer_wrap import Viewer\n'), ((3857, 3897), 'numpy.array', 'np.array', (['[[0, 1, 1, 1], [0, 7, 10, 10]]'], {}), '([[0, 1, 1, 1], [0, 7, 10, 10]])\n', (3865, 3897), True, 'import numpy as np\n'), ((4563, 4585), 'PartSeg.common_backend.base_settings.BaseSettings', 'BaseSettings', (['tmp_path'], {}), '(tmp_path)\n', (4575, 4585), False, 'from PartSeg.common_backend.base_settings import BaseSettings\n'), ((4634, 4658), 'PartSeg.common_gui.napari_viewer_wrap.Viewer', 'Viewer', (['settings', '"""test"""'], {}), "(settings, 'test')\n", (4640, 4658), False, 'from PartSeg.common_gui.napari_viewer_wrap import Viewer\n'), ((5063, 5085), 'PartSeg.common_backend.base_settings.BaseSettings', 'BaseSettings', (['tmp_path'], {}), '(tmp_path)\n', (5075, 5085), False, 'from PartSeg.common_backend.base_settings import BaseSettings\n'), ((5134, 5158), 'PartSeg.common_gui.napari_viewer_wrap.Viewer', 'Viewer', (['settings', '"""test"""'], {}), "(settings, 'test')\n", (5140, 5158), False, 'from PartSeg.common_gui.napari_viewer_wrap import Viewer\n'), ((5610, 5632), 'PartSeg.common_backend.base_settings.BaseSettings', 'BaseSettings', (['tmp_path'], {}), '(tmp_path)\n', (5622, 5632), False, 'from PartSeg.common_backend.base_settings import BaseSettings\n'), ((5681, 5705), 'PartSeg.common_gui.napari_viewer_wrap.Viewer', 'Viewer', (['settings', '"""test"""'], {}), "(settings, 'test')\n", (5687, 5705), False, 'from PartSeg.common_gui.napari_viewer_wrap import Viewer\n'), ((587, 604), 'platform.system', 'platform.system', ([], {}), '()\n', (602, 604), False, 'import platform\n'), ((2599, 2616), 'platform.system', 'platform.system', ([], {}), '()\n', (2614, 2616), False, 'import platform\n'), ((697, 714), 'platform.system', 'platform.system', ([], {}), '()\n', (712, 714), False, 'import platform\n'), ((1584, 1601), 'platform.system', 'platform.system', ([], {}), '()\n', (1599, 1601), False, 'import platform\n')] |
# The kNN code implemented using NumPy
import numpy as np
import db_func
from os import path
def display_data(v):
"""Display a given vector using print"""
try:
assert(type(v) is np.ndarray)
assert(len(v.shape) == 1)
assert(v.size % db_func.config["data-width"] == 0)
except:
print("Invalid v for display_data")
return None
# If the value is greater than 0, print a block for it. Otherwise, leave a space.
for i in range(v.size // db_func.config["data-width"]):
for j in range(db_func.config["data-width"]):
if v[i * db_func.config["data-width"] + j] > 0:
print('█', end = "")
else:
print(" ", end = "")
print()
return True
def construct_data_set():
"""Get dataset. Return a list of tuple (actual_value, data(in numpy array format))"""
id_list = db_func.read_db_list()
try:
assert(id_list is not None)
except Exception as e:
print("construct_data_set failed because read_db_list failed.")
print(e)
return []
# Keep only the data with actual value
# Task: filter out the data without actual value, because they cannot be used in the model; save the remaining id in a list
data_set = []
# Task: try to load the data's actual value and the numpy array data, and save the result as a tuple in data_set
# You may use "read_one_data" function defined in db_func
return data_set
def generate_M(data_set):
"""Generate the M matrix from data_set, in order to calculate the MSE."""
try:
data_set_pure = None
# Task: from the data_set, you should join all numpy row vectors into a matrix data_set_pure (pure means we do not save the actual value here)
return data_set_pure
except Exception as e:
print("generate_M failed")
print(e)
return None
def calculate_mse(v, M):
"""Calculate the mse for a vector v and every rows of a matrix M"""
try:
assert(type(v) is np.ndarray)
assert(type(M) is np.ndarray)
assert(len(v.shape) == 1)
assert(len(M.shape) == 2)
except Exception as e:
print("Invalid format for v or M")
print(e)
return None
result = None
# Task: calculate the mean squared error for each row in M versus vector v. Hint: you may take advantage of Numpy's broadcast mechanism.
# Take advantage of Numpy's broadcast mechanism: https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return result
def predict(v, k=None, test=False):
"""Predict the digit given a vector v"""
if k is None:
k = db_func.config["default-k"]
else:
try:
assert(type(k) is int)
assert(k > 0)
except Exception as e:
print("Invalid argument k for predict")
print(e)
return None
data_set = construct_data_set()
# Randomly choose one if no data
if len(data_set) == 0:
most_indices = np.random.choice(np.arange(0, 10))
else:
most_indices = None
# Task: aggregate the functions you have implemented and come up with the most indices, save it as most_indices.
# You may follow the 6 stesp:
# 1. calculate the mean squared error vector
# 2. collect the actual_value vector from the data_set
# 3. get the indices with the minimal k mean squared errors
# 4. get the actual values with the smallest k MSE from your actual_value vector
# 5. count the number of indices
# 6. find the most indices from the count
# Provide also the counts for test
if test and len(data_set)>0:
return most_indices, counts[most_indices]
else:
return most_indices
| [
"db_func.read_db_list",
"numpy.arange"
] | [((900, 922), 'db_func.read_db_list', 'db_func.read_db_list', ([], {}), '()\n', (920, 922), False, 'import db_func\n'), ((3107, 3123), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (3116, 3123), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 7 11:43:28 2021
@author: <NAME>
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
class Panels:
def __init__(self, n_panels):
self.n_panels = self.get_n_panels(n_panels)
self.x_coords, self.y_coords, self.camber_line = self.get_coords(self.n_panels)
self.control_x_coords, self.control_y_coords = self.get_control_points(self.x_coords, self.y_coords)
self.normal = self.get_normal(self.x_coords, self.y_coords)
self.lengths = self.get_length(self.x_coords, self.y_coords)
self.theta = self.get_angles(self.x_coords, self.y_coords, self.lengths)
# Allows user to set y coords of panels
# @param y coords to be set
def set_y_coords(self, y_coords):
self.y_coords = y_coords
self.camber_line = self.get_camber(self.y_coords)
self.control_x_coords, self.control_y_coords = self.get_control_points(self.x_coords, self.y_coords)
self.normal = self.get_normal(self.x_coords, self.y_coords)
self.lengths = self.get_length(self.x_coords, self.y_coords)
self.theta = self.get_angles(self.x_coords, self.y_coords, self.lengths)
# Calculates the camberline of given panels coordinates
# @param Y cooridinates of panels
def get_camber(self, y_coords):
bot_surface = y_coords[0:len(y_coords)//2+1]
top_surface = np.flip(y_coords[len(y_coords)//2:])
camber_line = (top_surface + bot_surface) / 2.0
return camber_line
# Ensures the passed number of panels is valid
# @param Number of panels to create
def get_n_panels(self, n_panels):
if int(round(n_panels)) % 2 == 0:
return int(round(n_panels))
else:
raise Exception("Invalid number of panels (must be even).")
# Gets the x/c and y/c normalized coordinates of the panels
# @param Number of panels
def get_coords(self, n_panels):
x_coords = self.get_x_coords(n_panels)
y_coords, camber_line = self.get_y_coords(x_coords)
return x_coords, y_coords, camber_line
# Gets the x/c normalized coordinates of the panels
# @param Number of panels
def get_x_coords(self, n_panels):
n = (n_panels//2)
j = np.arange(n+1)
top_coords = 0.5 - 0.5*np.cos(j*np.pi/n)
bot_coords = 0.5 + 0.5*np.cos(j*np.pi/n)
x_coords = np.concatenate((bot_coords, top_coords[1:]))
return x_coords
# Gets the y/c normalized coordinates of the panels and camber updated x/c normalized coords of the panels
# @param X cooridinates of panels
def get_y_coords(self, x_coords):
x_on_c = x_coords[0:len(x_coords)//2+1]
yf = 0.15 * np.random.rand() + 0.10
xf = 0.30 * np.random.rand() + 0.10
m0 = (100.0 - 2.0*(yf/xf)) * np.random.rand() + 2.0*(yf/xf)
a = np.sqrt(xf/(m0*(m0*xf-2.0*yf)))*abs(m0*xf-yf)
b = abs((m0*xf-yf)*yf)/(m0*xf-2.0*yf)
h = xf
k = (-yf*yf)/(m0*xf-2.0*yf)
LE_thickness = ((b*np.sqrt(a*a-(x_on_c*(x_on_c<=xf)-h)**2.0)+a*k) / a) * (x_on_c<=xf)
c = -yf/(xf*xf-2.0*xf+1)
d = (2.0*xf*yf)/(xf*xf-2.0*xf+1)
e = (yf*(1-2.0*xf))/(xf*xf-2.0*xf+1)
TE_thickness = (c*x_on_c*x_on_c + d*x_on_c + e) * (x_on_c>xf)
half_thickness = 0.5*LE_thickness + 0.5*TE_thickness
half_thickness[half_thickness<1.0e-4]=0.0
x1 = 0.40 * np.random.rand() + 0.10
y1 = ((0.08 - 0.0001) * np.random.rand() + 0.0001)*np.sign(-np.random.rand()+0.75)
xm = 0.30 * np.random.rand() + 0.65
if xm >= 0.80:
xm = 1.0
x2 = 1.1
y2 = 0.0
else:
x2 = 0.10 * np.random.rand() + 0.85
y2 = -((0.03 - 0.0001) * np.random.rand() + 0.0001)*np.sign(y1)
f1 = (2.0*y1*x_on_c)/x1 - (y1*x_on_c*x_on_c)/(x1*x1)
f2 = (-y1*x_on_c*x_on_c)/(x1*x1-2.0*x1*xm+xm*xm) + (2.0*x1*y1*x_on_c)/(x1*x1-2.0*x1*xm+xm*xm) - (y1*xm*(2.0*x1-xm))/(x1*x1-2.0*x1*xm+xm*xm)
f3 = (-y2*x_on_c*x_on_c)/((x2-xm)*(x2-xm)) + (2.0*x2*y2*x_on_c)/((x2-xm)*(x2-xm)) - (y2*xm*(2.0*x2-xm))/((x2-xm)*(x2-xm))
f4 = (-y2*x_on_c*x_on_c)/(x2*x2-2.0*x2+1.0) + (2.0*x2*y2*x_on_c)/(x2*x2-2.0*x2+1.0) - (y2*(2.0*x2-1.0))/(x2*x2-2.0*x2+1.0)
f1 = f1 * (x_on_c>=0.0) * (x_on_c<x1)
f2 = f2 * (x_on_c>=x1) * (x_on_c<=xm)
f3 = f3 * (x_on_c>xm) * (x_on_c<=x2)
f4 = f4 * (x_on_c>x2) * (x_on_c<=1.0)
camber_line = f1+f2+f3+f4
camber_line[abs(camber_line)<1.0e-4]=0.0
y_upper = camber_line + half_thickness
y_lower = camber_line - half_thickness
y_coords = np.concatenate((y_lower, np.flip(y_upper)[1:]))
y_coords[0] = 0.0
y_coords[-1] = 0.0
return y_coords, camber_line
# Gets the locations of the control points
# @param X coords of panels
# @param Y coords of panels
def get_control_points(self, x_coords, y_coords):
control_x_coords = x_coords[1:]-0.5*np.diff(x_coords)
control_y_coords = y_coords[1:]-0.5*np.diff(y_coords)
return control_x_coords, control_y_coords
# Solve the normal vectors for each panel
# @param X coords of panels
# @param Y coords of panels
def get_normal(self, x_coords, y_coords):
x_dirn = np.diff(x_coords).reshape(len(x_coords)-1,1)
y_dirn = np.diff(y_coords).reshape(len(y_coords)-1,1)
tangent = np.transpose(np.concatenate((x_dirn, y_dirn), axis=1))
rotation = np.array([[0.0, -1.0],[1.0, 0.0]])
normal = np.matmul(rotation, tangent)
normal = normal / np.sqrt(normal[0,:]**2.0 + normal[1,:]**2.0)
return normal
# Solve the length of each panel
# @param X coords of panels
# @param Y coords of panels
def get_length(self, x_coords, y_coords):
lengths = (np.diff(y_coords)**2.0+np.diff(x_coords)**2.0)**0.50
return lengths
# Solves the orientation angle between each panel and the x-axis
# @param X coords of panels
# @param Y coords of panels
# @param Length of each panel
def get_angles(self, x_coords, y_coords, lengths):
theta = np.arctan2(np.diff(y_coords), np.diff(x_coords))
return theta
# Renders and save the panels
# @param save path
# @param name of airfoil
def draw(self, path, airfoil_name=''):
if not os.path.isdir(path):
os.mkdir(path)
num = 0
done = False
while not done:
done = not (os.path.exists(path + "/airfoil_" + str(num) + ".png"))
num = num + 1
num = num - 1
if 'rebuilt' in airfoil_name.lower():
path = path + "/airfoil_" + str(num-1) + "_rebuilt.png"
else:
path = path + "/airfoil_" + str(num) + ".png"
plt.close()
normal_x_coords_start = self.x_coords[1:]-0.5*np.diff(self.x_coords)
normal_y_coords_start = self.y_coords[1:]-0.5*np.diff(self.y_coords)
normal_x_coords_end = normal_x_coords_start + 0.04 * self.normal[0,:]
normal_y_coords_end = normal_y_coords_start + 0.04 * self.normal[1,:]
plt.plot(self.x_coords[0:len(self.x_coords)//2+1], self.camber_line, lw=2.0, ls="--", c='r')
plt.axhline(0.0, lw=2.0, c='r')
plt.plot(self.x_coords, self.y_coords, color='b', lw=2.0)
plt.plot(self.control_x_coords, self.control_y_coords, 'd', color='g', markersize=7)
plt.plot(self.x_coords, self.y_coords, 'o', color='k', markersize=7)
for i in range(len(normal_x_coords_start)):
x_points = np.array([normal_x_coords_start[i],normal_x_coords_end[i]])
y_points = np.array([normal_y_coords_start[i],normal_y_coords_end[i]])
plt.plot(x_points, y_points, color='k', lw=2.0)
y_diff = np.diff(y_points)
x_diff = np.diff(x_points)
tangent = np.arctan(y_diff / x_diff)[0]*180.0/np.pi
if x_diff >= 0.0 and y_diff >= 0.0:
angle = -(90.0 - tangent)
elif x_diff < 0.0 and y_diff > 0.0:
angle = (90.0 - abs(tangent))
elif x_diff < 0.0 and y_diff < 0.0:
angle = -(90.0 - tangent) + 180.0
elif x_diff > 0.0 and y_diff < 0.0:
angle = (90.0 - abs(tangent)) + 180.0
t = mpl.markers.MarkerStyle(marker='^')
t._transform = t.get_transform().rotate_deg(angle)
plt.plot(normal_x_coords_end[i], normal_y_coords_end[i], marker=t, color='k', markersize=8)
plt.xlabel("X/C [-]", fontsize="large")
plt.ylabel("Y/C [-]", fontsize="large")
if airfoil_name == '':
plt.title('Airfoil', fontsize="xx-large")
else:
plt.title(airfoil_name, fontsize="xx-large")
plt.xlim([-0.05, 1.05])
plt.ylim([-0.25, 0.20])
plt.xticks([0, 0.2, 0.4, 0.6, 0.8, 1.0],fontsize='large')
plt.yticks([-0.25, -0.15, -0.05, 0.05, 0.15, 0.25],fontsize='large')
plt.gcf().set_size_inches(8,2.8)
plt.savefig(path, dpi=200)
class Solver:
def __init__(self):
self.panels=0.0
self.alpha=0.0
self.v_panels=0.0
self.cp=0.0
self.Cl=0.0
self.Cdp=0.0
self.Cmc4=0.0
# Solves the total local velocity at each control point based on linear varying vortex panel method
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_velocity_vp(self, alpha, panels):
Cn1 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Cn2 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Ct1 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
Ct2 = np.zeros((len(panels.control_x_coords),len(panels.control_x_coords)))
for i in range(len(panels.control_x_coords)):
xi = panels.control_x_coords[i]
yi = panels.control_y_coords[i]
theta_i = panels.theta[i]
for j in range(len(panels.control_x_coords)):
theta_j = panels.theta[j]
Sj = panels.lengths[j]
Xj = panels.x_coords[j]
Yj = panels.y_coords[j]
if i==j:
Cn2[i,j] = 1.0
Cn1[i,j] = -1.0
Ct2[i,j] = np.pi/2.0
Ct1[i,j] = np.pi/2.0
else:
A = -(xi - Xj)*np.cos(theta_j) - (yi - Yj)*np.sin(theta_j)
B = (xi - Xj)**2.0 + (yi - Yj)**2.0
C = np.sin(theta_i - theta_j)
D = np.cos(theta_i - theta_j)
E = (xi - Xj)*np.sin(theta_j) - (yi - Yj)*np.cos(theta_j)
F = np.log(1.0 + (Sj**2.0 + 2.0*A*Sj)/B)
G = np.arctan2(E*Sj, (B + A*Sj))
P = (xi - Xj)*np.sin(theta_i - 2.0*theta_j) + (yi - Yj)*np.cos(theta_i - 2.0*theta_j)
Q = (xi - Xj)*np.cos(theta_i - 2.0*theta_j) + (yi - Yj)*np.sin(theta_i - 2.0*theta_j)
Cn2[i,j] = D+0.5*Q*F/Sj-(A*C+D*E)*G/Sj
Cn1[i,j] = 0.5*D*F+C*G-Cn2[i,j]
Ct2[i,j] = C+0.5*P*F/Sj+(A*D-C*E)*G/Sj
Ct1[i,j] = 0.5*C*F-D*G-Ct2[i,j]
aerodynamic_matrix = np.zeros((len(panels.x_coords),len(panels.x_coords)))
tangential_matrix = np.zeros((len(panels.x_coords)-1,len(panels.x_coords)))
for i in range(len(panels.x_coords)):
for j in range(len(panels.x_coords)):
if j == 0 and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn1[i,j]
tangential_matrix[i,j] = Ct1[i,j]
elif j > 0 and j < panels.n_panels and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn1[i,j] + Cn2[i,j-1]
tangential_matrix[i,j] = Ct1[i,j] + Ct2[i,j-1]
elif j == panels.n_panels and i != panels.n_panels:
aerodynamic_matrix[i,j] = Cn2[i,j-1]
tangential_matrix[i,j] = Ct2[i,j-1]
elif i == panels.n_panels and (j == 0 or j == panels.n_panels):
aerodynamic_matrix[i,j] = 1.0
free_stream_matrix = np.sin(panels.theta - alpha*(np.pi/180.0))
free_stream_matrix = np.append(free_stream_matrix, 0.0)
gamma_prime = np.linalg.solve(aerodynamic_matrix,free_stream_matrix)
self.v_panels = np.matmul(tangential_matrix, gamma_prime) + np.cos(panels.theta - alpha*(np.pi/180.0))
return self.v_panels
# Solves the total local velocity at each control point based on vortex source method
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_velocity_spvp(self, alpha, panels):
Iij = np.zeros((panels.n_panels,panels.n_panels))
Jij = np.zeros((panels.n_panels,panels.n_panels))
Kij = np.zeros((panels.n_panels,panels.n_panels))
Lij = np.zeros((panels.n_panels,panels.n_panels))
for i in range(panels.n_panels):
xi = panels.control_x_coords[i]
yi = panels.control_y_coords[i]
theta_i = panels.theta[i]
c_theta_i = np.cos(theta_i)
s_theta_i = np.sin(theta_i)
for j in range(panels.n_panels):
theta_j = panels.theta[j]
c_theta_j = np.cos(theta_j)
s_theta_j = np.sin(theta_j)
Sj = panels.lengths[j]
Xj = panels.x_coords[j]
Yj = panels.y_coords[j]
A = -(xi-Xj)*c_theta_j-(yi-Yj)*s_theta_j
B = (xi-Xj)**2.0+(yi-Yj)**2.0
Ci = np.sin(theta_i-theta_j)
Cj = -np.cos(theta_i-theta_j)
Cl = np.sin(theta_j-theta_i)
Di = -(xi-Xj)*s_theta_i+(yi-Yj)*c_theta_i
Dj = (xi-Xj)*c_theta_i+(yi-Yj)*s_theta_i
Dl = (xi-Xj)*s_theta_i-(yi-Yj)*c_theta_i
if B-A*A >= 0.0:
E = np.sqrt(B-A*A)
else:
E = 0.0
if B == 0.0 or E == 0.0:
Iij[i,j] = 0.0
Jij[i,j] = 0.0
Kij[i,j] = 0.0
Lij[i,j] = 0.0
else:
term1 = np.log((Sj*Sj+2.0*A*Sj+B)/B)/2.0
term2 = (np.arctan2((Sj+A),E)-np.arctan2(A,E))/E
Iij[i,j] = Ci*term1+(Di-A*Ci)*term2
Jij[i,j] = Cj*term1+(Dj-A*Cj)*term2
Kij[i,j] = Jij[i,j]
Lij[i,j] = Cl*term1+(Dl-A*Cl)*term2
aerodynamic_matrix = np.zeros((panels.n_panels+1,panels.n_panels+1))
for i in range(panels.n_panels+1):
for j in range(panels.n_panels+1):
if i == panels.n_panels:
if j == panels.n_panels:
aerodynamic_matrix[i,j] = -(np.sum(Lij[0,:]) + np.sum(Lij[panels.n_panels-1,:])) + 2.0*np.pi
else:
aerodynamic_matrix[i,j] = Jij[0,j] + Jij[panels.n_panels-1,j]
elif j == panels.n_panels:
aerodynamic_matrix[i,j] = -np.sum(Kij[i,:])
elif i == j:
aerodynamic_matrix[i,j] = np.pi
else:
aerodynamic_matrix[i,j] = Iij[i,j]
beta = panels.theta + np.pi/2.0 - alpha*(np.pi/180.0)
beta[beta > 2.0*np.pi] = beta[beta > 2.0*np.pi] - 2.0*np.pi
free_stream_matrix = -2.0*np.pi*np.cos(beta)
free_stream_matrix = np.append(free_stream_matrix, -2.0*np.pi*(np.sin(beta[0]) + np.sin(beta[panels.n_panels-1])))
source_vortex_soln = np.linalg.solve(aerodynamic_matrix,free_stream_matrix)
self.v_panels = np.zeros(panels.n_panels)
for i in range(panels.n_panels):
term1 = np.sin(beta[i])
term2 = 1.0 / (2.0*np.pi) * np.sum(source_vortex_soln[0:-1]*Jij[i,:])
term3 = source_vortex_soln[-1] / 2.0
term4 = -(source_vortex_soln[-1] / (2.0*np.pi))*np.sum(Lij[i,:])
self.v_panels[i] = term1 + term2 + term3 + term4
return self.v_panels
# Solves the lift, drag, and moment coefficients
# @param Angle of attack
# @param Panels object that defines airfoil geometry
def get_aerodynamics(self, alpha, panels):
self.alpha = alpha
self.panels = panels
v_panels = self.get_velocity_spvp(alpha, panels)
self.cp = 1.0 - v_panels**2.0
Cf = -self.cp * panels.lengths * panels.normal
Cfnet = np.sum(Cf, axis=1)
Ca = Cfnet[0]
Cn = Cfnet[1]
self.Cmc4 = 0.0
for i in range(len(panels.control_x_coords)):
ra = panels.control_x_coords[i] - 0.25
rn = panels.control_y_coords[i]
dca = Cf[0,i]
dcn = Cf[1,i]
self.Cmc4 = self.Cmc4 - (dcn*ra-dca*rn)
self.Cl = Cn*np.cos(alpha*np.pi/180.0) - Ca*np.sin(alpha*np.pi/180.0)
self.Cdp = Cn*np.sin(alpha*np.pi/180.0) + Ca*np.cos(alpha*np.pi/180.0)
return self.Cl, self.Cdp, self.Cmc4, self.cp
# Calculates the lift and moment curves of a set of panels
def get_curves(self, panels, n_points):
alpha_curve = np.linspace(-5, 15, n_points)
A = np.zeros((3,3))
A[0,0] = len(alpha_curve)
A[1,0] = sum((np.array(alpha_curve)*(np.pi/180.0)))
A[2,0] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[0,1] = sum((np.array(alpha_curve)*(np.pi/180.0)))
A[1,1] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[2,1] = sum((np.array(alpha_curve)*(np.pi/180.0))**3.0)
A[0,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
A[1,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**3.0)
A[2,2] = sum((np.array(alpha_curve)*(np.pi/180.0))**4.0)
lift_curve = []
moment_curve = []
min_upper_cp_loc = []
min_lower_cp_loc = []
for j in range(n_points):
Cl, Cd, Cm_c4, cp = self.get_aerodynamics(alpha_curve[j],panels)
upper_cp = cp[panels.n_panels//2:]
lower_cp = cp[0:panels.n_panels//2]
lift_curve.append(Cl)
moment_curve.append(Cm_c4)
min_upper_cp_loc.append(panels.control_x_coords[np.argmin(upper_cp)+panels.n_panels//2])
min_lower_cp_loc.append(panels.control_x_coords[np.argmin(lower_cp)])
min_upper_cp_loc = np.mean(min_upper_cp_loc)
min_lower_cp_loc = np.mean(min_lower_cp_loc)
a = len(alpha_curve)*sum(np.array(alpha_curve)*(np.pi/180.0)*np.array(lift_curve))
b = sum(np.array(alpha_curve)*(np.pi/180.0))*sum(np.array(lift_curve))
c = len(alpha_curve)*sum((np.array(alpha_curve)*(np.pi/180.0))**2.0)
d = sum(np.array(alpha_curve)*(np.pi/180.0))**2.0
lift_slope = (a-b) / (c-d)
e = sum(np.array(lift_curve))
f = lift_slope * sum(np.array(alpha_curve)*(np.pi/180.0))
g = len(alpha_curve)
zero_lift_angle = 180.0*(f-e) / (g*lift_slope*np.pi)
B = np.zeros((3))
B[0] = sum(np.array(moment_curve))
B[1] = sum(np.array(moment_curve) * np.array(alpha_curve) * (np.pi/180.0))
B[2] = sum(np.array(moment_curve) * (np.array(alpha_curve) * (np.pi/180.0))**2.0)
C = np.linalg.solve(A,B)
curve_parameters = np.zeros(7)
curve_parameters[0] = lift_slope
curve_parameters[1] = zero_lift_angle
curve_parameters[2] = C[0]
curve_parameters[3] = C[1]
curve_parameters[4] = C[2]
curve_parameters[5] = min_upper_cp_loc
curve_parameters[6] = min_lower_cp_loc
return curve_parameters, alpha_curve, lift_curve, moment_curve
# Draws the lift and moment curves
def draw_curves(self, path, panels, name='', estimated_performance=[], rebuilt_panels=0.0):
real_performance, alpha_curve, lift_curve, moment_curve = self.get_curves(panels, 50)
plot_rebuilt = False
if isinstance(rebuilt_panels, Panels):
rebuilt_performance, _, rebuilt_lift_curve, rebuilt_moment_curve = self.get_curves(rebuilt_panels, 50)
plot_rebuilt = True
plot_estimated = False
if len(estimated_performance)==7:
estimated_lift_curve = estimated_performance[0] * (alpha_curve*(np.pi/180.0) - estimated_performance[1]*(np.pi/180.0))
estimated_moment_curve = estimated_performance[2] + estimated_performance[3]*(alpha_curve*(np.pi/180.0)) + estimated_performance[4]*(alpha_curve*(np.pi/180.0))**2.0
plot_estimated = True
if not os.path.isdir(path):
os.mkdir(path)
num = 0
done = False
while not done:
if not plot_rebuilt and not plot_estimated:
done = not (os.path.exists(path + "/lift_" + str(num) + ".png"))
elif not plot_rebuilt and plot_estimated:
done = not (os.path.exists(path + "/estimated_lift_" + str(num) + ".png"))
elif plot_rebuilt and not plot_estimated:
done = not (os.path.exists(path + "/rebuilt_lift_" + str(num) + ".png"))
elif plot_rebuilt and plot_estimated:
done = not (os.path.exists(path + "/estimated_lift_" + str(num) + ".png"))
done = done and not (os.path.exists(path + "/rebuilt_lift_" + str(num) + ".png"))
num = num + 1
num = num - 1
if not plot_rebuilt and not plot_estimated:
lift_path = path + "/lift_" + str(num) + ".png"
if plot_estimated:
lift_path_estimated = path + "/estimated_lift_" + str(num) + ".png"
if plot_rebuilt:
lift_path_rebuilt = path + "/rebuilt_lift_" + str(num) + ".png"
if not plot_rebuilt and not plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, lift_curve, c='b', lw=2.5)
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{l}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Lift Curve for "+name, fontsize="xx-large")
else:
plt.title("Lift Curve", fontsize="xx-large")
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*1.0+np.min(lift_curve), r'$x_{p_{min,u}}$'+' = '+str(round(real_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.9+np.min(lift_curve), r'$x_{p_{min,l}}$'+' = '+str(round(real_performance[6],2)),fontsize='large')
if np.min(lift_curve) < 0.0:
plt.ylim([1.1*np.min(lift_curve), 1.1*np.max(lift_curve)])
else:
plt.ylim([0.9*np.min(lift_curve), 1.1*np.max(lift_curve)])
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.gcf().set_size_inches(8,5.6)
plt.savefig(lift_path, dpi=200)
if plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, lift_curve, c='b', lw=2.5, label='Original')
plt.plot(alpha_curve, estimated_lift_curve, c='r', lw=2.5, label='Estimated', ls='--')
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{l}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Lift Curve for "+name, fontsize="xx-large")
else:
plt.title("Lift Curve", fontsize="xx-large")
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*1.0+np.min(lift_curve), r'$x_{p_{min,u}}$'+' = '+str(round(real_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.8+np.min(lift_curve), r'$x_{p_{min,l}}$'+' = '+str(round(real_performance[6],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.9+np.min(lift_curve), r'$\overline{x_{p_{min,u}}}$'+' = '+str(round(estimated_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.7+np.min(lift_curve), r'$\overline{x_{p_{min,l}}}$'+' = '+str(round(estimated_performance[6],2)),fontsize='large')
plt.legend(fontsize='x-large',loc='lower right')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
if np.min(lift_curve) < 0.0:
plt.ylim([1.1*np.min(lift_curve), 1.1*np.max(lift_curve)])
else:
plt.ylim([0.9*np.min(lift_curve), 1.1*np.max(lift_curve)])
plt.gcf().set_size_inches(8,5.6)
plt.savefig(lift_path_estimated, dpi=200)
if plot_rebuilt:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, lift_curve, c='b', lw=2.5, label='Original')
plt.plot(alpha_curve, rebuilt_lift_curve, c='r', lw=2.5, label='Rebuilt', ls='--')
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{l}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Lift Curve for "+name, fontsize="xx-large")
else:
plt.title("Lift Curve", fontsize="xx-large")
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*1.0+np.min(lift_curve), r'$x_{p_{min,u}}$'+' = '+str(round(real_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.8+np.min(lift_curve), r'$x_{p_{min,l}}$'+' = '+str(round(real_performance[6],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.9+np.min(lift_curve), r'$\overline{x_{p_{min,u}}}$'+' = '+str(round(rebuilt_performance[5],2)),fontsize='large')
plt.text(-5.2, (np.max(lift_curve)-np.min(lift_curve))*0.7+np.min(lift_curve), r'$\overline{x_{p_{min,l}}}$'+' = '+str(round(rebuilt_performance[6],2)),fontsize='large')
plt.legend(fontsize='x-large',loc='lower right')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
if np.min(lift_curve) < 0.0:
plt.ylim([1.1*np.min(lift_curve), 1.1*np.max(lift_curve)])
else:
plt.ylim([0.9*np.min(lift_curve), 1.1*np.max(lift_curve)])
plt.gcf().set_size_inches(8,5.6)
plt.savefig(lift_path_rebuilt, dpi=200)
if not plot_rebuilt and not plot_estimated:
moment_path = path + "/moment_" + str(num) + ".png"
if plot_estimated:
moment_path_estimated = path + "/estimated_moment_" + str(num) + ".png"
if plot_rebuilt:
moment_path_rebuilt = path + "/rebuilt_moment_" + str(num) + ".png"
if not plot_rebuilt and not plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, moment_curve, c='b', lw=2.5)
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{m}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Moment Curve for "+name, fontsize="xx-large")
else:
plt.title("Moment Curve", fontsize="xx-large")
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.gcf().set_size_inches(8,5.6)
plt.savefig(moment_path, dpi=200)
if plot_estimated:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, moment_curve, c='b', lw=2.5, label='Original')
plt.plot(alpha_curve, estimated_moment_curve, c='r', lw=2.5, label='Estimated', ls='--')
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{m}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Moment Curve for "+name, fontsize="xx-large")
else:
plt.title("Moment Curve", fontsize="xx-large")
plt.legend(fontsize='x-large')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.gcf().set_size_inches(8,5.6)
plt.savefig(moment_path_estimated, dpi=200)
if plot_rebuilt:
plt.close()
plt.axhline(0.0, color='k', lw=0.75)
plt.axvline(0.0, color='k', lw=0.75)
plt.plot(alpha_curve, moment_curve, c='b', lw=2.5, label='Original')
plt.plot(alpha_curve, rebuilt_moment_curve, c='r', lw=2.5, label='Rebuilt', ls='--')
plt.xlabel("Angle of Attack [deg]", fontsize="x-large")
plt.ylabel(r'$C_{m}$'+' [-]', fontsize="x-large")
if name != '':
plt.title("Moment Curve for "+name, fontsize="xx-large")
else:
plt.title("Moment Curve", fontsize="xx-large")
plt.legend(fontsize='x-large')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
plt.gcf().set_size_inches(8,5.6)
plt.savefig(moment_path_rebuilt, dpi=200) | [
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"numpy.flip",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.p... | [((2410, 2426), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (2419, 2426), True, 'import numpy as np\n'), ((2542, 2586), 'numpy.concatenate', 'np.concatenate', (['(bot_coords, top_coords[1:])'], {}), '((bot_coords, top_coords[1:]))\n', (2556, 2586), True, 'import numpy as np\n'), ((5820, 5855), 'numpy.array', 'np.array', (['[[0.0, -1.0], [1.0, 0.0]]'], {}), '([[0.0, -1.0], [1.0, 0.0]])\n', (5828, 5855), True, 'import numpy as np\n'), ((5880, 5908), 'numpy.matmul', 'np.matmul', (['rotation', 'tangent'], {}), '(rotation, tangent)\n', (5889, 5908), True, 'import numpy as np\n'), ((7188, 7199), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7197, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7637, 7668), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'lw': '(2.0)', 'c': '"""r"""'}), "(0.0, lw=2.0, c='r')\n", (7648, 7668), True, 'import matplotlib.pyplot as plt\n'), ((7677, 7734), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_coords', 'self.y_coords'], {'color': '"""b"""', 'lw': '(2.0)'}), "(self.x_coords, self.y_coords, color='b', lw=2.0)\n", (7685, 7734), True, 'import matplotlib.pyplot as plt\n'), ((7743, 7831), 'matplotlib.pyplot.plot', 'plt.plot', (['self.control_x_coords', 'self.control_y_coords', '"""d"""'], {'color': '"""g"""', 'markersize': '(7)'}), "(self.control_x_coords, self.control_y_coords, 'd', color='g',\n markersize=7)\n", (7751, 7831), True, 'import matplotlib.pyplot as plt\n'), ((7836, 7904), 'matplotlib.pyplot.plot', 'plt.plot', (['self.x_coords', 'self.y_coords', '"""o"""'], {'color': '"""k"""', 'markersize': '(7)'}), "(self.x_coords, self.y_coords, 'o', color='k', markersize=7)\n", (7844, 7904), True, 'import matplotlib.pyplot as plt\n'), ((9010, 9049), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X/C [-]"""'], {'fontsize': '"""large"""'}), "('X/C [-]', fontsize='large')\n", (9020, 9049), True, 'import matplotlib.pyplot as plt\n'), ((9058, 9097), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y/C [-]"""'], {'fontsize': '"""large"""'}), "('Y/C [-]', fontsize='large')\n", (9068, 9097), True, 'import matplotlib.pyplot as plt\n'), ((9271, 9294), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (9279, 9294), True, 'import matplotlib.pyplot as plt\n'), ((9303, 9325), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.25, 0.2]'], {}), '([-0.25, 0.2])\n', (9311, 9325), True, 'import matplotlib.pyplot as plt\n'), ((9335, 9393), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.2, 0.4, 0.6, 0.8, 1.0]'], {'fontsize': '"""large"""'}), "([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize='large')\n", (9345, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9401, 9470), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-0.25, -0.15, -0.05, 0.05, 0.15, 0.25]'], {'fontsize': '"""large"""'}), "([-0.25, -0.15, -0.05, 0.05, 0.15, 0.25], fontsize='large')\n", (9411, 9470), True, 'import matplotlib.pyplot as plt\n'), ((9528, 9554), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(200)'}), '(path, dpi=200)\n', (9539, 9554), True, 'import matplotlib.pyplot as plt\n'), ((12877, 12923), 'numpy.sin', 'np.sin', (['(panels.theta - alpha * (np.pi / 180.0))'], {}), '(panels.theta - alpha * (np.pi / 180.0))\n', (12883, 12923), True, 'import numpy as np\n'), ((12949, 12983), 'numpy.append', 'np.append', (['free_stream_matrix', '(0.0)'], {}), '(free_stream_matrix, 0.0)\n', (12958, 12983), True, 'import numpy as np\n'), ((13015, 13070), 'numpy.linalg.solve', 'np.linalg.solve', (['aerodynamic_matrix', 'free_stream_matrix'], {}), '(aerodynamic_matrix, free_stream_matrix)\n', (13030, 13070), True, 'import numpy as np\n'), ((13463, 13507), 'numpy.zeros', 'np.zeros', (['(panels.n_panels, panels.n_panels)'], {}), '((panels.n_panels, panels.n_panels))\n', (13471, 13507), True, 'import numpy as np\n'), ((13521, 13565), 'numpy.zeros', 'np.zeros', (['(panels.n_panels, panels.n_panels)'], {}), '((panels.n_panels, panels.n_panels))\n', (13529, 13565), True, 'import numpy as np\n'), ((13579, 13623), 'numpy.zeros', 'np.zeros', (['(panels.n_panels, panels.n_panels)'], {}), '((panels.n_panels, panels.n_panels))\n', (13587, 13623), True, 'import numpy as np\n'), ((13637, 13681), 'numpy.zeros', 'np.zeros', (['(panels.n_panels, panels.n_panels)'], {}), '((panels.n_panels, panels.n_panels))\n', (13645, 13681), True, 'import numpy as np\n'), ((15420, 15472), 'numpy.zeros', 'np.zeros', (['(panels.n_panels + 1, panels.n_panels + 1)'], {}), '((panels.n_panels + 1, panels.n_panels + 1))\n', (15428, 15472), True, 'import numpy as np\n'), ((16578, 16633), 'numpy.linalg.solve', 'np.linalg.solve', (['aerodynamic_matrix', 'free_stream_matrix'], {}), '(aerodynamic_matrix, free_stream_matrix)\n', (16593, 16633), True, 'import numpy as np\n'), ((16666, 16691), 'numpy.zeros', 'np.zeros', (['panels.n_panels'], {}), '(panels.n_panels)\n', (16674, 16691), True, 'import numpy as np\n'), ((17529, 17547), 'numpy.sum', 'np.sum', (['Cf'], {'axis': '(1)'}), '(Cf, axis=1)\n', (17535, 17547), True, 'import numpy as np\n'), ((18258, 18287), 'numpy.linspace', 'np.linspace', (['(-5)', '(15)', 'n_points'], {}), '(-5, 15, n_points)\n', (18269, 18287), True, 'import numpy as np\n'), ((18309, 18325), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (18317, 18325), True, 'import numpy as np\n'), ((19486, 19511), 'numpy.mean', 'np.mean', (['min_upper_cp_loc'], {}), '(min_upper_cp_loc)\n', (19493, 19511), True, 'import numpy as np\n'), ((19539, 19564), 'numpy.mean', 'np.mean', (['min_lower_cp_loc'], {}), '(min_lower_cp_loc)\n', (19546, 19564), True, 'import numpy as np\n'), ((20129, 20140), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (20137, 20140), True, 'import numpy as np\n'), ((20371, 20392), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (20386, 20392), True, 'import numpy as np\n'), ((20428, 20439), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (20436, 20439), True, 'import numpy as np\n'), ((3051, 3092), 'numpy.sqrt', 'np.sqrt', (['(xf / (m0 * (m0 * xf - 2.0 * yf)))'], {}), '(xf / (m0 * (m0 * xf - 2.0 * yf)))\n', (3058, 3092), True, 'import numpy as np\n'), ((5759, 5799), 'numpy.concatenate', 'np.concatenate', (['(x_dirn, y_dirn)'], {'axis': '(1)'}), '((x_dirn, y_dirn), axis=1)\n', (5773, 5799), True, 'import numpy as np\n'), ((5935, 5985), 'numpy.sqrt', 'np.sqrt', (['(normal[0, :] ** 2.0 + normal[1, :] ** 2.0)'], {}), '(normal[0, :] ** 2.0 + normal[1, :] ** 2.0)\n', (5942, 5985), True, 'import numpy as np\n'), ((6521, 6538), 'numpy.diff', 'np.diff', (['y_coords'], {}), '(y_coords)\n', (6528, 6538), True, 'import numpy as np\n'), ((6540, 6557), 'numpy.diff', 'np.diff', (['x_coords'], {}), '(x_coords)\n', (6547, 6557), True, 'import numpy as np\n'), ((6730, 6749), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6743, 6749), False, 'import os\n'), ((6763, 6777), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (6771, 6777), False, 'import os\n'), ((8002, 8062), 'numpy.array', 'np.array', (['[normal_x_coords_start[i], normal_x_coords_end[i]]'], {}), '([normal_x_coords_start[i], normal_x_coords_end[i]])\n', (8010, 8062), True, 'import numpy as np\n'), ((8085, 8145), 'numpy.array', 'np.array', (['[normal_y_coords_start[i], normal_y_coords_end[i]]'], {}), '([normal_y_coords_start[i], normal_y_coords_end[i]])\n', (8093, 8145), True, 'import numpy as np\n'), ((8157, 8204), 'matplotlib.pyplot.plot', 'plt.plot', (['x_points', 'y_points'], {'color': '"""k"""', 'lw': '(2.0)'}), "(x_points, y_points, color='k', lw=2.0)\n", (8165, 8204), True, 'import matplotlib.pyplot as plt\n'), ((8239, 8256), 'numpy.diff', 'np.diff', (['y_points'], {}), '(y_points)\n', (8246, 8256), True, 'import numpy as np\n'), ((8278, 8295), 'numpy.diff', 'np.diff', (['x_points'], {}), '(x_points)\n', (8285, 8295), True, 'import numpy as np\n'), ((8790, 8825), 'matplotlib.markers.MarkerStyle', 'mpl.markers.MarkerStyle', ([], {'marker': '"""^"""'}), "(marker='^')\n", (8813, 8825), True, 'import matplotlib as mpl\n'), ((8901, 8997), 'matplotlib.pyplot.plot', 'plt.plot', (['normal_x_coords_end[i]', 'normal_y_coords_end[i]'], {'marker': 't', 'color': '"""k"""', 'markersize': '(8)'}), "(normal_x_coords_end[i], normal_y_coords_end[i], marker=t, color=\n 'k', markersize=8)\n", (8909, 8997), True, 'import matplotlib.pyplot as plt\n'), ((9141, 9182), 'matplotlib.pyplot.title', 'plt.title', (['"""Airfoil"""'], {'fontsize': '"""xx-large"""'}), "('Airfoil', fontsize='xx-large')\n", (9150, 9182), True, 'import matplotlib.pyplot as plt\n'), ((9209, 9253), 'matplotlib.pyplot.title', 'plt.title', (['airfoil_name'], {'fontsize': '"""xx-large"""'}), "(airfoil_name, fontsize='xx-large')\n", (9218, 9253), True, 'import matplotlib.pyplot as plt\n'), ((13094, 13135), 'numpy.matmul', 'np.matmul', (['tangential_matrix', 'gamma_prime'], {}), '(tangential_matrix, gamma_prime)\n', (13103, 13135), True, 'import numpy as np\n'), ((13138, 13184), 'numpy.cos', 'np.cos', (['(panels.theta - alpha * (np.pi / 180.0))'], {}), '(panels.theta - alpha * (np.pi / 180.0))\n', (13144, 13184), True, 'import numpy as np\n'), ((13881, 13896), 'numpy.cos', 'np.cos', (['theta_i'], {}), '(theta_i)\n', (13887, 13896), True, 'import numpy as np\n'), ((13921, 13936), 'numpy.sin', 'np.sin', (['theta_i'], {}), '(theta_i)\n', (13927, 13936), True, 'import numpy as np\n'), ((16404, 16416), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (16410, 16416), True, 'import numpy as np\n'), ((16753, 16768), 'numpy.sin', 'np.sin', (['beta[i]'], {}), '(beta[i])\n', (16759, 16768), True, 'import numpy as np\n'), ((19930, 19950), 'numpy.array', 'np.array', (['lift_curve'], {}), '(lift_curve)\n', (19938, 19950), True, 'import numpy as np\n'), ((20162, 20184), 'numpy.array', 'np.array', (['moment_curve'], {}), '(moment_curve)\n', (20170, 20184), True, 'import numpy as np\n'), ((21737, 21756), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (21750, 21756), False, 'import os\n'), ((21770, 21784), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (21778, 21784), False, 'import os\n'), ((22969, 22980), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22978, 22980), True, 'import matplotlib.pyplot as plt\n'), ((22993, 23029), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (23004, 23029), True, 'import matplotlib.pyplot as plt\n'), ((23042, 23078), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (23053, 23078), True, 'import matplotlib.pyplot as plt\n'), ((23091, 23139), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'lift_curve'], {'c': '"""b"""', 'lw': '(2.5)'}), "(alpha_curve, lift_curve, c='b', lw=2.5)\n", (23099, 23139), True, 'import matplotlib.pyplot as plt\n'), ((23152, 23207), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (23162, 23207), True, 'import matplotlib.pyplot as plt\n'), ((23220, 23270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{l}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{l}$' + ' [-]', fontsize='x-large')\n", (23230, 23270), True, 'import matplotlib.pyplot as plt\n'), ((24004, 24034), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (24014, 24034), True, 'import matplotlib.pyplot as plt\n'), ((24047, 24077), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (24057, 24077), True, 'import matplotlib.pyplot as plt\n'), ((24135, 24166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['lift_path'], {'dpi': '(200)'}), '(lift_path, dpi=200)\n', (24146, 24166), True, 'import matplotlib.pyplot as plt\n'), ((24219, 24230), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24228, 24230), True, 'import matplotlib.pyplot as plt\n'), ((24243, 24279), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (24254, 24279), True, 'import matplotlib.pyplot as plt\n'), ((24292, 24328), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (24303, 24328), True, 'import matplotlib.pyplot as plt\n'), ((24341, 24407), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'lift_curve'], {'c': '"""b"""', 'lw': '(2.5)', 'label': '"""Original"""'}), "(alpha_curve, lift_curve, c='b', lw=2.5, label='Original')\n", (24349, 24407), True, 'import matplotlib.pyplot as plt\n'), ((24420, 24511), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'estimated_lift_curve'], {'c': '"""r"""', 'lw': '(2.5)', 'label': '"""Estimated"""', 'ls': '"""--"""'}), "(alpha_curve, estimated_lift_curve, c='r', lw=2.5, label=\n 'Estimated', ls='--')\n", (24428, 24511), True, 'import matplotlib.pyplot as plt\n'), ((24519, 24574), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (24529, 24574), True, 'import matplotlib.pyplot as plt\n'), ((24587, 24637), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{l}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{l}$' + ' [-]', fontsize='x-large')\n", (24597, 24637), True, 'import matplotlib.pyplot as plt\n'), ((25530, 25579), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""x-large"""', 'loc': '"""lower right"""'}), "(fontsize='x-large', loc='lower right')\n", (25540, 25579), True, 'import matplotlib.pyplot as plt\n'), ((25591, 25621), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (25601, 25621), True, 'import matplotlib.pyplot as plt\n'), ((25634, 25664), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (25644, 25664), True, 'import matplotlib.pyplot as plt\n'), ((25931, 25972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['lift_path_estimated'], {'dpi': '(200)'}), '(lift_path_estimated, dpi=200)\n', (25942, 25972), True, 'import matplotlib.pyplot as plt\n'), ((26023, 26034), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26032, 26034), True, 'import matplotlib.pyplot as plt\n'), ((26047, 26083), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (26058, 26083), True, 'import matplotlib.pyplot as plt\n'), ((26096, 26132), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (26107, 26132), True, 'import matplotlib.pyplot as plt\n'), ((26145, 26211), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'lift_curve'], {'c': '"""b"""', 'lw': '(2.5)', 'label': '"""Original"""'}), "(alpha_curve, lift_curve, c='b', lw=2.5, label='Original')\n", (26153, 26211), True, 'import matplotlib.pyplot as plt\n'), ((26224, 26310), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'rebuilt_lift_curve'], {'c': '"""r"""', 'lw': '(2.5)', 'label': '"""Rebuilt"""', 'ls': '"""--"""'}), "(alpha_curve, rebuilt_lift_curve, c='r', lw=2.5, label='Rebuilt',\n ls='--')\n", (26232, 26310), True, 'import matplotlib.pyplot as plt\n'), ((26319, 26374), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (26329, 26374), True, 'import matplotlib.pyplot as plt\n'), ((26387, 26437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{l}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{l}$' + ' [-]', fontsize='x-large')\n", (26397, 26437), True, 'import matplotlib.pyplot as plt\n'), ((27326, 27375), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""x-large"""', 'loc': '"""lower right"""'}), "(fontsize='x-large', loc='lower right')\n", (27336, 27375), True, 'import matplotlib.pyplot as plt\n'), ((27387, 27417), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (27397, 27417), True, 'import matplotlib.pyplot as plt\n'), ((27430, 27460), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (27440, 27460), True, 'import matplotlib.pyplot as plt\n'), ((27727, 27766), 'matplotlib.pyplot.savefig', 'plt.savefig', (['lift_path_rebuilt'], {'dpi': '(200)'}), '(lift_path_rebuilt, dpi=200)\n', (27738, 27766), True, 'import matplotlib.pyplot as plt\n'), ((28173, 28184), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28182, 28184), True, 'import matplotlib.pyplot as plt\n'), ((28197, 28233), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (28208, 28233), True, 'import matplotlib.pyplot as plt\n'), ((28246, 28282), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (28257, 28282), True, 'import matplotlib.pyplot as plt\n'), ((28295, 28345), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'moment_curve'], {'c': '"""b"""', 'lw': '(2.5)'}), "(alpha_curve, moment_curve, c='b', lw=2.5)\n", (28303, 28345), True, 'import matplotlib.pyplot as plt\n'), ((28358, 28413), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (28368, 28413), True, 'import matplotlib.pyplot as plt\n'), ((28426, 28476), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{m}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{m}$' + ' [-]', fontsize='x-large')\n", (28436, 28476), True, 'import matplotlib.pyplot as plt\n'), ((28669, 28699), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (28679, 28699), True, 'import matplotlib.pyplot as plt\n'), ((28712, 28742), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (28722, 28742), True, 'import matplotlib.pyplot as plt\n'), ((28800, 28833), 'matplotlib.pyplot.savefig', 'plt.savefig', (['moment_path'], {'dpi': '(200)'}), '(moment_path, dpi=200)\n', (28811, 28833), True, 'import matplotlib.pyplot as plt\n'), ((28886, 28897), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28895, 28897), True, 'import matplotlib.pyplot as plt\n'), ((28910, 28946), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (28921, 28946), True, 'import matplotlib.pyplot as plt\n'), ((28959, 28995), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (28970, 28995), True, 'import matplotlib.pyplot as plt\n'), ((29008, 29076), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'moment_curve'], {'c': '"""b"""', 'lw': '(2.5)', 'label': '"""Original"""'}), "(alpha_curve, moment_curve, c='b', lw=2.5, label='Original')\n", (29016, 29076), True, 'import matplotlib.pyplot as plt\n'), ((29089, 29182), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'estimated_moment_curve'], {'c': '"""r"""', 'lw': '(2.5)', 'label': '"""Estimated"""', 'ls': '"""--"""'}), "(alpha_curve, estimated_moment_curve, c='r', lw=2.5, label=\n 'Estimated', ls='--')\n", (29097, 29182), True, 'import matplotlib.pyplot as plt\n'), ((29190, 29245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (29200, 29245), True, 'import matplotlib.pyplot as plt\n'), ((29258, 29308), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{m}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{m}$' + ' [-]', fontsize='x-large')\n", (29268, 29308), True, 'import matplotlib.pyplot as plt\n'), ((29501, 29531), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (29511, 29531), True, 'import matplotlib.pyplot as plt\n'), ((29544, 29574), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (29554, 29574), True, 'import matplotlib.pyplot as plt\n'), ((29587, 29617), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (29597, 29617), True, 'import matplotlib.pyplot as plt\n'), ((29675, 29718), 'matplotlib.pyplot.savefig', 'plt.savefig', (['moment_path_estimated'], {'dpi': '(200)'}), '(moment_path_estimated, dpi=200)\n', (29686, 29718), True, 'import matplotlib.pyplot as plt\n'), ((29769, 29780), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (29778, 29780), True, 'import matplotlib.pyplot as plt\n'), ((29793, 29829), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (29804, 29829), True, 'import matplotlib.pyplot as plt\n'), ((29842, 29878), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0.0)'], {'color': '"""k"""', 'lw': '(0.75)'}), "(0.0, color='k', lw=0.75)\n", (29853, 29878), True, 'import matplotlib.pyplot as plt\n'), ((29891, 29959), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'moment_curve'], {'c': '"""b"""', 'lw': '(2.5)', 'label': '"""Original"""'}), "(alpha_curve, moment_curve, c='b', lw=2.5, label='Original')\n", (29899, 29959), True, 'import matplotlib.pyplot as plt\n'), ((29972, 30060), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_curve', 'rebuilt_moment_curve'], {'c': '"""r"""', 'lw': '(2.5)', 'label': '"""Rebuilt"""', 'ls': '"""--"""'}), "(alpha_curve, rebuilt_moment_curve, c='r', lw=2.5, label='Rebuilt',\n ls='--')\n", (29980, 30060), True, 'import matplotlib.pyplot as plt\n'), ((30069, 30124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of Attack [deg]"""'], {'fontsize': '"""x-large"""'}), "('Angle of Attack [deg]', fontsize='x-large')\n", (30079, 30124), True, 'import matplotlib.pyplot as plt\n'), ((30137, 30187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('$C_{m}$' + ' [-]')"], {'fontsize': '"""x-large"""'}), "('$C_{m}$' + ' [-]', fontsize='x-large')\n", (30147, 30187), True, 'import matplotlib.pyplot as plt\n'), ((30380, 30410), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (30390, 30410), True, 'import matplotlib.pyplot as plt\n'), ((30423, 30453), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (30433, 30453), True, 'import matplotlib.pyplot as plt\n'), ((30466, 30496), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (30476, 30496), True, 'import matplotlib.pyplot as plt\n'), ((30554, 30595), 'matplotlib.pyplot.savefig', 'plt.savefig', (['moment_path_rebuilt'], {'dpi': '(200)'}), '(moment_path_rebuilt, dpi=200)\n', (30565, 30595), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2477), 'numpy.cos', 'np.cos', (['(j * np.pi / n)'], {}), '(j * np.pi / n)\n', (2462, 2477), True, 'import numpy as np\n'), ((2505, 2526), 'numpy.cos', 'np.cos', (['(j * np.pi / n)'], {}), '(j * np.pi / n)\n', (2511, 2526), True, 'import numpy as np\n'), ((2894, 2910), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2908, 2910), True, 'import numpy as np\n'), ((2938, 2954), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2952, 2954), True, 'import numpy as np\n'), ((2999, 3015), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3013, 3015), True, 'import numpy as np\n'), ((3635, 3651), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3649, 3651), True, 'import numpy as np\n'), ((3770, 3786), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3784, 3786), True, 'import numpy as np\n'), ((4006, 4017), 'numpy.sign', 'np.sign', (['y1'], {}), '(y1)\n', (4013, 4017), True, 'import numpy as np\n'), ((5286, 5303), 'numpy.diff', 'np.diff', (['x_coords'], {}), '(x_coords)\n', (5293, 5303), True, 'import numpy as np\n'), ((5348, 5365), 'numpy.diff', 'np.diff', (['y_coords'], {}), '(y_coords)\n', (5355, 5365), True, 'import numpy as np\n'), ((5612, 5629), 'numpy.diff', 'np.diff', (['x_coords'], {}), '(x_coords)\n', (5619, 5629), True, 'import numpy as np\n'), ((5674, 5691), 'numpy.diff', 'np.diff', (['y_coords'], {}), '(y_coords)\n', (5681, 5691), True, 'import numpy as np\n'), ((7263, 7285), 'numpy.diff', 'np.diff', (['self.x_coords'], {}), '(self.x_coords)\n', (7270, 7285), True, 'import numpy as np\n'), ((7340, 7362), 'numpy.diff', 'np.diff', (['self.y_coords'], {}), '(self.y_coords)\n', (7347, 7362), True, 'import numpy as np\n'), ((9487, 9496), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9494, 9496), True, 'import matplotlib.pyplot as plt\n'), ((14073, 14088), 'numpy.cos', 'np.cos', (['theta_j'], {}), '(theta_j)\n', (14079, 14088), True, 'import numpy as np\n'), ((14117, 14132), 'numpy.sin', 'np.sin', (['theta_j'], {}), '(theta_j)\n', (14123, 14132), True, 'import numpy as np\n'), ((14393, 14418), 'numpy.sin', 'np.sin', (['(theta_i - theta_j)'], {}), '(theta_i - theta_j)\n', (14399, 14418), True, 'import numpy as np\n'), ((14484, 14509), 'numpy.sin', 'np.sin', (['(theta_j - theta_i)'], {}), '(theta_j - theta_i)\n', (14490, 14509), True, 'import numpy as np\n'), ((16809, 16853), 'numpy.sum', 'np.sum', (['(source_vortex_soln[0:-1] * Jij[i, :])'], {}), '(source_vortex_soln[0:-1] * Jij[i, :])\n', (16815, 16853), True, 'import numpy as np\n'), ((16960, 16977), 'numpy.sum', 'np.sum', (['Lij[i, :]'], {}), '(Lij[i, :])\n', (16966, 16977), True, 'import numpy as np\n'), ((17917, 17946), 'numpy.cos', 'np.cos', (['(alpha * np.pi / 180.0)'], {}), '(alpha * np.pi / 180.0)\n', (17923, 17946), True, 'import numpy as np\n'), ((17948, 17977), 'numpy.sin', 'np.sin', (['(alpha * np.pi / 180.0)'], {}), '(alpha * np.pi / 180.0)\n', (17954, 17977), True, 'import numpy as np\n'), ((17996, 18025), 'numpy.sin', 'np.sin', (['(alpha * np.pi / 180.0)'], {}), '(alpha * np.pi / 180.0)\n', (18002, 18025), True, 'import numpy as np\n'), ((18027, 18056), 'numpy.cos', 'np.cos', (['(alpha * np.pi / 180.0)'], {}), '(alpha * np.pi / 180.0)\n', (18033, 18056), True, 'import numpy as np\n'), ((18381, 18402), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18389, 18402), True, 'import numpy as np\n'), ((18506, 18527), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18514, 18527), True, 'import numpy as np\n'), ((19722, 19742), 'numpy.array', 'np.array', (['lift_curve'], {}), '(lift_curve)\n', (19730, 19742), True, 'import numpy as np\n'), ((20288, 20310), 'numpy.array', 'np.array', (['moment_curve'], {}), '(moment_curve)\n', (20296, 20310), True, 'import numpy as np\n'), ((23313, 23369), 'matplotlib.pyplot.title', 'plt.title', (["('Lift Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Lift Curve for ' + name, fontsize='xx-large')\n", (23322, 23369), True, 'import matplotlib.pyplot as plt\n'), ((23402, 23446), 'matplotlib.pyplot.title', 'plt.title', (['"""Lift Curve"""'], {'fontsize': '"""xx-large"""'}), "('Lift Curve', fontsize='xx-large')\n", (23411, 23446), True, 'import matplotlib.pyplot as plt\n'), ((23798, 23816), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23804, 23816), True, 'import numpy as np\n'), ((24680, 24736), 'matplotlib.pyplot.title', 'plt.title', (["('Lift Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Lift Curve for ' + name, fontsize='xx-large')\n", (24689, 24736), True, 'import matplotlib.pyplot as plt\n'), ((24769, 24813), 'matplotlib.pyplot.title', 'plt.title', (['"""Lift Curve"""'], {'fontsize': '"""xx-large"""'}), "('Lift Curve', fontsize='xx-large')\n", (24778, 24813), True, 'import matplotlib.pyplot as plt\n'), ((25680, 25698), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25686, 25698), True, 'import numpy as np\n'), ((26480, 26536), 'matplotlib.pyplot.title', 'plt.title', (["('Lift Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Lift Curve for ' + name, fontsize='xx-large')\n", (26489, 26536), True, 'import matplotlib.pyplot as plt\n'), ((26569, 26613), 'matplotlib.pyplot.title', 'plt.title', (['"""Lift Curve"""'], {'fontsize': '"""xx-large"""'}), "('Lift Curve', fontsize='xx-large')\n", (26578, 26613), True, 'import matplotlib.pyplot as plt\n'), ((27476, 27494), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27482, 27494), True, 'import numpy as np\n'), ((28519, 28577), 'matplotlib.pyplot.title', 'plt.title', (["('Moment Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Moment Curve for ' + name, fontsize='xx-large')\n", (28528, 28577), True, 'import matplotlib.pyplot as plt\n'), ((28610, 28656), 'matplotlib.pyplot.title', 'plt.title', (['"""Moment Curve"""'], {'fontsize': '"""xx-large"""'}), "('Moment Curve', fontsize='xx-large')\n", (28619, 28656), True, 'import matplotlib.pyplot as plt\n'), ((29351, 29409), 'matplotlib.pyplot.title', 'plt.title', (["('Moment Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Moment Curve for ' + name, fontsize='xx-large')\n", (29360, 29409), True, 'import matplotlib.pyplot as plt\n'), ((29442, 29488), 'matplotlib.pyplot.title', 'plt.title', (['"""Moment Curve"""'], {'fontsize': '"""xx-large"""'}), "('Moment Curve', fontsize='xx-large')\n", (29451, 29488), True, 'import matplotlib.pyplot as plt\n'), ((30230, 30288), 'matplotlib.pyplot.title', 'plt.title', (["('Moment Curve for ' + name)"], {'fontsize': '"""xx-large"""'}), "('Moment Curve for ' + name, fontsize='xx-large')\n", (30239, 30288), True, 'import matplotlib.pyplot as plt\n'), ((30321, 30367), 'matplotlib.pyplot.title', 'plt.title', (['"""Moment Curve"""'], {'fontsize': '"""xx-large"""'}), "('Moment Curve', fontsize='xx-large')\n", (30330, 30367), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3707), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3705, 3707), True, 'import numpy as np\n'), ((3918, 3934), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3932, 3934), True, 'import numpy as np\n'), ((4941, 4957), 'numpy.flip', 'np.flip', (['y_upper'], {}), '(y_upper)\n', (4948, 4957), True, 'import numpy as np\n'), ((6182, 6199), 'numpy.diff', 'np.diff', (['y_coords'], {}), '(y_coords)\n', (6189, 6199), True, 'import numpy as np\n'), ((6205, 6222), 'numpy.diff', 'np.diff', (['x_coords'], {}), '(x_coords)\n', (6212, 6222), True, 'import numpy as np\n'), ((11158, 11183), 'numpy.sin', 'np.sin', (['(theta_i - theta_j)'], {}), '(theta_i - theta_j)\n', (11164, 11183), True, 'import numpy as np\n'), ((11208, 11233), 'numpy.cos', 'np.cos', (['(theta_i - theta_j)'], {}), '(theta_i - theta_j)\n', (11214, 11233), True, 'import numpy as np\n'), ((11336, 11380), 'numpy.log', 'np.log', (['(1.0 + (Sj ** 2.0 + 2.0 * A * Sj) / B)'], {}), '(1.0 + (Sj ** 2.0 + 2.0 * A * Sj) / B)\n', (11342, 11380), True, 'import numpy as np\n'), ((11397, 11427), 'numpy.arctan2', 'np.arctan2', (['(E * Sj)', '(B + A * Sj)'], {}), '(E * Sj, B + A * Sj)\n', (11407, 11427), True, 'import numpy as np\n'), ((14439, 14464), 'numpy.cos', 'np.cos', (['(theta_i - theta_j)'], {}), '(theta_i - theta_j)\n', (14445, 14464), True, 'import numpy as np\n'), ((14737, 14755), 'numpy.sqrt', 'np.sqrt', (['(B - A * A)'], {}), '(B - A * A)\n', (14744, 14755), True, 'import numpy as np\n'), ((16488, 16503), 'numpy.sin', 'np.sin', (['beta[0]'], {}), '(beta[0])\n', (16494, 16503), True, 'import numpy as np\n'), ((16506, 16539), 'numpy.sin', 'np.sin', (['beta[panels.n_panels - 1]'], {}), '(beta[panels.n_panels - 1])\n', (16512, 16539), True, 'import numpy as np\n'), ((18441, 18462), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18449, 18462), True, 'import numpy as np\n'), ((18566, 18587), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18574, 18587), True, 'import numpy as np\n'), ((18631, 18652), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18639, 18652), True, 'import numpy as np\n'), ((18696, 18717), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18704, 18717), True, 'import numpy as np\n'), ((18761, 18782), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18769, 18782), True, 'import numpy as np\n'), ((18826, 18847), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (18834, 18847), True, 'import numpy as np\n'), ((19424, 19443), 'numpy.argmin', 'np.argmin', (['lower_cp'], {}), '(lower_cp)\n', (19433, 19443), True, 'import numpy as np\n'), ((19643, 19663), 'numpy.array', 'np.array', (['lift_curve'], {}), '(lift_curve)\n', (19651, 19663), True, 'import numpy as np\n'), ((19681, 19702), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (19689, 19702), True, 'import numpy as np\n'), ((19837, 19858), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (19845, 19858), True, 'import numpy as np\n'), ((19981, 20002), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (19989, 20002), True, 'import numpy as np\n'), ((20205, 20227), 'numpy.array', 'np.array', (['moment_curve'], {}), '(moment_curve)\n', (20213, 20227), True, 'import numpy as np\n'), ((20230, 20251), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (20238, 20251), True, 'import numpy as np\n'), ((23518, 23536), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23524, 23536), True, 'import numpy as np\n'), ((23686, 23704), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23692, 23704), True, 'import numpy as np\n'), ((24090, 24099), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (24097, 24099), True, 'import matplotlib.pyplot as plt\n'), ((24885, 24903), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (24891, 24903), True, 'import numpy as np\n'), ((25053, 25071), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25059, 25071), True, 'import numpy as np\n'), ((25221, 25239), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25227, 25239), True, 'import numpy as np\n'), ((25405, 25423), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25411, 25423), True, 'import numpy as np\n'), ((25886, 25895), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25893, 25895), True, 'import matplotlib.pyplot as plt\n'), ((26685, 26703), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (26691, 26703), True, 'import numpy as np\n'), ((26853, 26871), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (26859, 26871), True, 'import numpy as np\n'), ((27021, 27039), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27027, 27039), True, 'import numpy as np\n'), ((27203, 27221), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27209, 27221), True, 'import numpy as np\n'), ((27682, 27691), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (27689, 27691), True, 'import matplotlib.pyplot as plt\n'), ((28755, 28764), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28762, 28764), True, 'import matplotlib.pyplot as plt\n'), ((29630, 29639), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (29637, 29639), True, 'import matplotlib.pyplot as plt\n'), ((30509, 30518), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30516, 30518), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3274), 'numpy.sqrt', 'np.sqrt', (['(a * a - (x_on_c * (x_on_c <= xf) - h) ** 2.0)'], {}), '(a * a - (x_on_c * (x_on_c <= xf) - h) ** 2.0)\n', (3228, 3274), True, 'import numpy as np\n'), ((3727, 3743), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3741, 3743), True, 'import numpy as np\n'), ((8318, 8344), 'numpy.arctan', 'np.arctan', (['(y_diff / x_diff)'], {}), '(y_diff / x_diff)\n', (8327, 8344), True, 'import numpy as np\n'), ((15071, 15111), 'numpy.log', 'np.log', (['((Sj * Sj + 2.0 * A * Sj + B) / B)'], {}), '((Sj * Sj + 2.0 * A * Sj + B) / B)\n', (15077, 15111), True, 'import numpy as np\n'), ((19323, 19342), 'numpy.argmin', 'np.argmin', (['upper_cp'], {}), '(upper_cp)\n', (19332, 19342), True, 'import numpy as np\n'), ((19607, 19628), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (19615, 19628), True, 'import numpy as np\n'), ((19778, 19799), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (19786, 19799), True, 'import numpy as np\n'), ((20314, 20335), 'numpy.array', 'np.array', (['alpha_curve'], {}), '(alpha_curve)\n', (20322, 20335), True, 'import numpy as np\n'), ((3979, 3995), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3993, 3995), True, 'import numpy as np\n'), ((11034, 11049), 'numpy.cos', 'np.cos', (['theta_j'], {}), '(theta_j)\n', (11040, 11049), True, 'import numpy as np\n'), ((11062, 11077), 'numpy.sin', 'np.sin', (['theta_j'], {}), '(theta_j)\n', (11068, 11077), True, 'import numpy as np\n'), ((11268, 11283), 'numpy.sin', 'np.sin', (['theta_j'], {}), '(theta_j)\n', (11274, 11283), True, 'import numpy as np\n'), ((11296, 11311), 'numpy.cos', 'np.cos', (['theta_j'], {}), '(theta_j)\n', (11302, 11311), True, 'import numpy as np\n'), ((11460, 11491), 'numpy.sin', 'np.sin', (['(theta_i - 2.0 * theta_j)'], {}), '(theta_i - 2.0 * theta_j)\n', (11466, 11491), True, 'import numpy as np\n'), ((11502, 11533), 'numpy.cos', 'np.cos', (['(theta_i - 2.0 * theta_j)'], {}), '(theta_i - 2.0 * theta_j)\n', (11508, 11533), True, 'import numpy as np\n'), ((11566, 11597), 'numpy.cos', 'np.cos', (['(theta_i - 2.0 * theta_j)'], {}), '(theta_i - 2.0 * theta_j)\n', (11572, 11597), True, 'import numpy as np\n'), ((11608, 11639), 'numpy.sin', 'np.sin', (['(theta_i - 2.0 * theta_j)'], {}), '(theta_i - 2.0 * theta_j)\n', (11614, 11639), True, 'import numpy as np\n'), ((15133, 15154), 'numpy.arctan2', 'np.arctan2', (['(Sj + A)', 'E'], {}), '(Sj + A, E)\n', (15143, 15154), True, 'import numpy as np\n'), ((15154, 15170), 'numpy.arctan2', 'np.arctan2', (['A', 'E'], {}), '(A, E)\n', (15164, 15170), True, 'import numpy as np\n'), ((16007, 16024), 'numpy.sum', 'np.sum', (['Kij[i, :]'], {}), '(Kij[i, :])\n', (16013, 16024), True, 'import numpy as np\n'), ((23475, 23493), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (23481, 23493), True, 'import numpy as np\n'), ((23494, 23512), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23500, 23512), True, 'import numpy as np\n'), ((23643, 23661), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (23649, 23661), True, 'import numpy as np\n'), ((23662, 23680), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23668, 23680), True, 'import numpy as np\n'), ((23854, 23872), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23860, 23872), True, 'import numpy as np\n'), ((23878, 23896), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (23884, 23896), True, 'import numpy as np\n'), ((23947, 23965), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (23953, 23965), True, 'import numpy as np\n'), ((23971, 23989), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (23977, 23989), True, 'import numpy as np\n'), ((24842, 24860), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (24848, 24860), True, 'import numpy as np\n'), ((24861, 24879), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (24867, 24879), True, 'import numpy as np\n'), ((25010, 25028), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (25016, 25028), True, 'import numpy as np\n'), ((25029, 25047), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25035, 25047), True, 'import numpy as np\n'), ((25178, 25196), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (25184, 25196), True, 'import numpy as np\n'), ((25197, 25215), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25203, 25215), True, 'import numpy as np\n'), ((25362, 25380), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (25368, 25380), True, 'import numpy as np\n'), ((25381, 25399), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25387, 25399), True, 'import numpy as np\n'), ((25736, 25754), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25742, 25754), True, 'import numpy as np\n'), ((25760, 25778), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (25766, 25778), True, 'import numpy as np\n'), ((25829, 25847), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (25835, 25847), True, 'import numpy as np\n'), ((25853, 25871), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (25859, 25871), True, 'import numpy as np\n'), ((26642, 26660), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (26648, 26660), True, 'import numpy as np\n'), ((26661, 26679), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (26667, 26679), True, 'import numpy as np\n'), ((26810, 26828), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (26816, 26828), True, 'import numpy as np\n'), ((26829, 26847), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (26835, 26847), True, 'import numpy as np\n'), ((26978, 26996), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (26984, 26996), True, 'import numpy as np\n'), ((26997, 27015), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27003, 27015), True, 'import numpy as np\n'), ((27160, 27178), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (27166, 27178), True, 'import numpy as np\n'), ((27179, 27197), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27185, 27197), True, 'import numpy as np\n'), ((27532, 27550), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27538, 27550), True, 'import numpy as np\n'), ((27556, 27574), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (27562, 27574), True, 'import numpy as np\n'), ((27625, 27643), 'numpy.min', 'np.min', (['lift_curve'], {}), '(lift_curve)\n', (27631, 27643), True, 'import numpy as np\n'), ((27649, 27667), 'numpy.max', 'np.max', (['lift_curve'], {}), '(lift_curve)\n', (27655, 27667), True, 'import numpy as np\n'), ((15718, 15735), 'numpy.sum', 'np.sum', (['Lij[0, :]'], {}), '(Lij[0, :])\n', (15724, 15735), True, 'import numpy as np\n'), ((15737, 15772), 'numpy.sum', 'np.sum', (['Lij[panels.n_panels - 1, :]'], {}), '(Lij[panels.n_panels - 1, :])\n', (15743, 15772), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import io
import logging
import os
import pickle
import subprocess
import sys
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
#import h5py
from ..trajectory.oracle_core import Oracle
from .scatter_to_grid import scatter_value_to_grid_value
def subset_oracle_for_development_analysiis(oracle_object, cluster_column_name, cluster):
"""
Make a subset of oracle object by specifying of cluster.
This function pick up some of attributes that needed for development analysis rather than whole attributes.
"""
cells_of_interest = oracle_object.adata.obs[oracle_object.adata.obs[cluster_column_name] == cluster].index.values
# Create new oracle object and transfer data
oracle_ = Oracle()
for i in ["embedding", "delta_embedding", "delta_embedding_random", "corrcoef_random", "adata"]:
setattr(oracle_, i, getattr(oracle_object, i))
index_use = np.where(oracle_.adata.obs.index.isin(cells_of_interest))[0]
for i in ["embedding", "delta_embedding", "delta_embedding_random", "corrcoef_random"]:
setattr(oracle_, i, getattr(oracle_, i)[index_use])
oracle_.adata = oracle_.adata[cells_of_interest, :]
return oracle_
from scipy.stats import wilcoxon
def get_stat_for_inner_product(oracle_object, n_bins=10):
# Prepare data
inner_product_stats = pd.DataFrame({"score": oracle_object.inner_product[~oracle_object.mass_filter],
"pseudotime": oracle_object.new_pseudotime[~oracle_object.mass_filter]})
bins = _get_bins(inner_product_stats.pseudotime, n_bins)
inner_product_stats["pseudotime_id"] = np.digitize(inner_product_stats.pseudotime, bins) - 1
try:
inner_product_stats["stage"] = oracle_object.stage_grid[~oracle_object.mass_filter]
except:
print("stage_grid not calculated")
# stat test
ps = []
for i in np.sort(inner_product_stats["pseudotime_id"].unique()):
pseudotime_ = inner_product_stats[inner_product_stats["pseudotime_id"]==i].score.values
stat, p = wilcoxon(x=pseudotime_, alternative="less")
#print(i, p)
ps.append(p)
inner_product_stats_grouped = \
pd.DataFrame({"ip_score_median": inner_product_stats.groupby("pseudotime_id").median()["score"].values,
"ip_score_mean": inner_product_stats.groupby("pseudotime_id").mean()["score"],
"p-value_negative": ps},
index=np.sort(inner_product_stats["pseudotime_id"].unique()))
return inner_product_stats, inner_product_stats_grouped
def _get_bins(array, n_bins):
min_ = array.min()
max_ = array.max()
width = (max_ - min_)/(n_bins-1)
return np.arange(min_, max_ + width, width)
class Oracle_development_module():
def __init__(self):
pass
def extract_data_from_oracle(self, oracle_object, min_mass):
self.oracle_dev = Oracle()
## 1. Extract perturb simulation results as grid matrix
# 1.1.grid matrix and embedding
for i in ["flow_grid", "flow", "flow_norm_rndm", "embedding"]:
setattr(self.oracle_dev, i, getattr(oracle_object, i))
# 1.2. mass_filter for grid matrix
self.oracle_dev.mass_filter = (oracle_object.total_p_mass < min_mass)
## 2. Extract pseudotime data
self.oracle_dev.pseudotime = oracle_object.adata.obs["pseudotime"].values
try:
self.oracle_dev.stage = np.array(oracle_object.adata.obs["Stage"].values)
except:
print("Stage not in data")
def transfer_data_into_grid(self, args={}):
if not args:
args = {"method": "knn",
"n_knn": 30}
self.oracle_dev.new_pseudotime = scatter_value_to_grid_value(embedding=self.oracle_dev.embedding,
grid=self.oracle_dev.flow_grid,
value=self.oracle_dev.pseudotime,
**args)
try:
self.oracle_dev.stage_grid = scatter_value_to_grid_value(embedding=self.oracle_dev.embedding,
grid=self.oracle_dev.flow_grid,
value=self.oracle_dev.stage,
**{"method": "knn_class",
"n_knn": 30})
except:
print("Stage not in data")
def calculate_gradient_and_inner_product(self, scale_factor="l2_norm_mean", normalization=None):
# Gradient calculation
gradient = get_gradient(value_on_grid=self.oracle_dev.new_pseudotime.copy())
if normalization == "sqrt":
gradient = normalize_gradient(gradient, method="sqrt")
if scale_factor == "l2_norm_mean":
# divide gradient by the mean of l2 norm.
l2_norm = np.linalg.norm(gradient, ord=2, axis=1)
scale_factor = 1 / l2_norm.mean()
self.oracle_dev.gradient = gradient * scale_factor
# Calculate inner product between the pseudotime-gradient and the perturb-gradient
self.oracle_dev.inner_product = np.array([np.dot(i, j) for i, j in zip(self.oracle_dev.flow, self.oracle_dev.gradient)])
def calculate_stats(self, n_bins=10):
inner_product_stats, inner_product_stats_grouped, = \
get_stat_for_inner_product(self.oracle_dev, n_bins)
self.oracle_dev.inner_product_stats = inner_product_stats
self.oracle_dev.inner_product_stats_grouped = inner_product_stats_grouped
class Gradient_based_trajecory():
def __init__(self, adata=None, obsm_key=None, pseudotime_key="pseudotime", cluster_column_name=None, cluster=None, gt=None):
if adata is not None:
self.load_adata(adata=adata, obsm_key=obsm_key,
pseudotime_key=pseudotime_key,cluster_column_name=cluster_column_name,
cluster=cluster)
elif gt is not None:
self.embedding = gt.embedding_whole.copy()
self.embedding_whole = gt.embedding_whole.copy()
self.mass_filter = gt.mass_filter_whole.copy()
self.mass_filter_whole = gt.mass_filter_whole.copy()
self.gridpoints_coordinates = gt.gridpoints_coordinates.copy()
self.pseudotime = gt.pseudotime_whole.copy()
def load_adata(self, adata, obsm_key, pseudotime_key, cluster_column_name=None, cluster=None):
self.embedding = adata.obsm[obsm_key]
self.pseudotime = adata.obs[pseudotime_key].values
self.embedding_whole = self.embedding.copy()
self.pseudotime_whole = self.pseudotime.copy()
if (cluster_column_name is not None) & (cluster is not None):
cells_ix = np.where(adata.obs[cluster_column_name] == cluster)[0]
self.embedding = self.embedding[cells_ix, :]
self.pseudotime = self.pseudotime[cells_ix]
def calculate_mass_filter(self, min_mass=0.01, smooth=0.8, steps=(40, 40), n_neighbors=200, n_jobs=4):
x_min, y_min = self.embedding_whole.min(axis=0)
x_max, y_max = self.embedding_whole.max(axis=0)
xylim = ((x_min, x_max), (y_min, y_max))
total_p_mass, gridpoints_coordinates = calculate_p_mass(self.embedding, smooth=smooth, steps=steps,
n_neighbors=n_neighbors, n_jobs=n_jobs, xylim=xylim)
total_p_mass_whole, _ = calculate_p_mass(self.embedding_whole, smooth=smooth, steps=steps,
n_neighbors=n_neighbors, n_jobs=n_jobs, xylim=xylim)
self.total_p_mass = total_p_mass
self.mass_filter = (total_p_mass < min_mass)
self.mass_filter_whole = (total_p_mass_whole < min_mass)
self.gridpoints_coordinates = gridpoints_coordinates
def transfer_data_into_grid(self, args={}):
if not args:
args = {"method": "knn",
"n_knn": 30}
self.pseudotime_on_grid = scatter_value_to_grid_value(embedding=self.embedding,
grid=self.gridpoints_coordinates,
value=self.pseudotime,
**args)
def calculate_gradient(self, scale_factor=60, normalization=None):
# Gradient calculation
gradient = get_gradient(value_on_grid=self.pseudotime_on_grid.copy())
if normalization == "sqrt":
gradient = normalize_gradient(gradient, method="sqrt")
if scale_factor == "l2_norm_mean":
# divide gradient by the mean of l2 norm.
l2_norm = np.linalg.norm(gradient, ord=2, axis=1)
scale_factor = 1 / l2_norm.mean()
self.gradient = gradient * scale_factor
def visualize_dev_flow(self, scale_for_pseudotime=30, s=10, s_grid=30):
visualize_dev_flow(self, scale_for_pseudotime=scale_for_pseudotime, s=s, s_grid=s_grid)
def aggregate_GT_object(list_GT_object, base_gt=None):
pseudotime_stack = [i.pseudotime_on_grid for i in list_GT_object]
gradient_stack = [i.gradient for i in list_GT_object]
mass_filter_stack = [i.mass_filter for i in list_GT_object]
new_pseudotime, new_gradient, new_mass_filter = _aggregate_gradients(pseudotime_stack=pseudotime_stack,
gradient_stack=gradient_stack,
mass_filter_stack=mass_filter_stack)
if base_gt is None:
gt = Gradient_based_trajecory(gt=list_GT_object[0])
gt.pseudotime_on_grid = new_pseudotime
gt.gradient = new_gradient
else:
gt = base_gt
gt.pseudotime_on_grid[~new_mass_filter] = new_pseudotime[~new_mass_filter]
gt.gradient[~new_mass_filter, :] = new_gradient[~new_mass_filter, :]
return gt
def _aggregate_gradients(pseudotime_stack, gradient_stack, mass_filter_stack):
new_pseudotime = np.zeros_like(pseudotime_stack[0])
new_pseudotime_count = np.zeros_like(pseudotime_stack[0])
new_gradient = np.zeros_like(gradient_stack[0])
gradient_count = np.zeros_like(gradient_stack[0])
for fil, pt, gra in zip(mass_filter_stack, pseudotime_stack, gradient_stack):
new_pseudotime[~fil] += pt[~fil]
new_pseudotime_count[~fil] +=1
new_gradient[~fil, :] += gra[~fil, :]
gradient_count[~fil, :] += 1
new_pseudotime[new_pseudotime_count != 0] /= new_pseudotime_count[new_pseudotime_count != 0]
new_gradient[gradient_count != 0] /= gradient_count[gradient_count != 0]
new_mass_filter = (gradient_count.sum(axis=1) == 0)
return new_pseudotime, new_gradient, new_mass_filter
def normalize_gradient(gradient, method="sqrt"):
"""
Normalize length of 2D vector
"""
if method == "sqrt":
size = np.sqrt(np.power(gradient, 2).sum(axis=1))
size_sq = np.sqrt(size)
size_sq[size_sq == 0] = 1
factor = np.repeat(np.expand_dims(size_sq, axis=1), 2, axis=1)
return gradient / factor
from scipy.stats import norm as normal
from sklearn.neighbors import NearestNeighbors
def calculate_p_mass(embedding, smooth=0.5, steps=(40, 40),
n_neighbors=100, n_jobs=4, xylim=((None, None), (None, None))):
"""Calculate the velocity using a points on a regular grid and a gaussian kernel
Note: the function should work also for n-dimensional grid
Arguments
---------
embedding:
smooth: float, smooth=0.5
Higher value correspond to taking in consideration further points
the standard deviation of the gaussian kernel is smooth * stepsize
steps: tuple, default
the number of steps in the grid for each axis
n_neighbors:
number of neighbors to use in the calculation, bigger number should not change too much the results..
...as soon as smooth is small
Higher value correspond to slower execution time
n_jobs:
number of processes for parallel computing
xymin:
((xmin, xmax), (ymin, ymax))
Returns
-------
total_p_mass: np.ndarray
density at each point of the grid
"""
# Prepare the grid
grs = []
for dim_i in range(embedding.shape[1]):
m, M = np.min(embedding[:, dim_i]), np.max(embedding[:, dim_i])
if xylim[dim_i][0] is not None:
m = xylim[dim_i][0]
if xylim[dim_i][1] is not None:
M = xylim[dim_i][1]
m = m - 0.025 * np.abs(M - m)
M = M + 0.025 * np.abs(M - m)
gr = np.linspace(m, M, steps[dim_i])
grs.append(gr)
meshes_tuple = np.meshgrid(*grs)
gridpoints_coordinates = np.vstack([i.flat for i in meshes_tuple]).T
nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
nn.fit(embedding)
dists, neighs = nn.kneighbors(gridpoints_coordinates)
std = np.mean([(g[1] - g[0]) for g in grs])
# isotropic gaussian kernel
gaussian_w = normal.pdf(loc=0, scale=smooth * std, x=dists)
total_p_mass = gaussian_w.sum(1)
gridpoints_coordinates
return total_p_mass, gridpoints_coordinates
def get_gradient(value_on_grid):
# Gradient calculation
n = int(np.sqrt(value_on_grid.shape[0]))
value_on_grid_as_matrix = value_on_grid.reshape(n, n)
dy, dx = np.gradient(value_on_grid_as_matrix)
gradient = np.stack([dx.flatten(), dy.flatten()], axis=1)
return gradient
def visualize_dev_flow(self, scale_for_pseudotime=30, s=10, s_grid=30):
embedding_whole = self.embedding_whole
embedding_of_interest= self.embedding
mass_filter = self.mass_filter
mass_filter_whole = self.mass_filter_whole
gridpoints_coordinates=self.gridpoints_coordinates
pseudotime_raw = self.pseudotime
pseudotime_on_grid=self.pseudotime_on_grid
gradient_pseudotime=self.gradient
fig, ax = plt.subplots(1, 5, figsize=[25,5])
##
ax_ = ax[0]
ax_.scatter(embedding_whole[:, 0], embedding_whole[:, 1], c="lightgray", s=s)
ax_.scatter(embedding_of_interest[:, 0], embedding_of_interest[:, 1], c=pseudotime_raw, cmap="rainbow", s=s)
ax_.set_title("Pseudotime")
ax_.axis("off")
####
ax_ = ax[1]
ax_.scatter(gridpoints_coordinates[mass_filter, 0], gridpoints_coordinates[mass_filter, 1], s=0)
ax_.scatter(gridpoints_coordinates[~mass_filter_whole, 0], gridpoints_coordinates[~mass_filter_whole, 1],
c="lightgray", s=s_grid)
ax_.scatter(gridpoints_coordinates[~mass_filter, 0], gridpoints_coordinates[~mass_filter, 1],
c=pseudotime_on_grid[~mass_filter], cmap="rainbow", s=s_grid)
ax_.set_title("Pseudotime on grid")
ax_.axis("off")
###
ax_ = ax[2]
#ax_.scatter(gridpoints_coordinates[mass_filter, 0], gridpoints_coordinates[mass_filter, 1], s=0)
ax_.scatter(gridpoints_coordinates[mass_filter, 0], gridpoints_coordinates[mass_filter, 1], s=0)
ax_.scatter(gridpoints_coordinates[~mass_filter_whole, 0], gridpoints_coordinates[~mass_filter_whole, 1],
c="lightgray", s=s_grid)
ax_.scatter(gridpoints_coordinates[~mass_filter, 0], gridpoints_coordinates[~mass_filter, 1],
c=pseudotime_on_grid[~mass_filter], cmap="rainbow", s=s_grid)
ax_.quiver(gridpoints_coordinates[~mass_filter, 0], gridpoints_coordinates[~mass_filter, 1],
gradient_pseudotime[~mass_filter, 0], gradient_pseudotime[~mass_filter, 1],
scale=scale_for_pseudotime)
ax_.set_title("Gradient of pseudotime \n(=Development flow)")
ax_.axis("off")
###
ax_ = ax[3]
#ax_.scatter(gridpoints_coordinates[mass_filter, 0], gridpoints_coordinates[mass_filter, 1], s=0)
ax_.scatter(embedding_whole[:, 0], embedding_whole[:, 1], c="lightgray", s=s)
ax_.quiver(gridpoints_coordinates[~mass_filter, 0], gridpoints_coordinates[~mass_filter, 1],
gradient_pseudotime[~mass_filter, 0], gradient_pseudotime[~mass_filter, 1],
scale=scale_for_pseudotime)
ax_.set_title("Gradient of pseudotime \n(=Development flow)")
ax_.axis("off")
####
ax_ = ax[4]
ax_.scatter(embedding_whole[:, 0], embedding_whole[:, 1], c="lightgray", s=s)
ax_.scatter(embedding_of_interest[:, 0], embedding_of_interest[:, 1], c=pseudotime_raw, cmap="rainbow", s=s)
ax_.quiver(gridpoints_coordinates[~mass_filter, 0], gridpoints_coordinates[~mass_filter, 1],
gradient_pseudotime[~mass_filter, 0], gradient_pseudotime[~mass_filter, 1],
scale=scale_for_pseudotime)
ax_.set_title("Pseudotime + \nDevelopment flow")
ax_.axis("off")
| [
"numpy.sqrt",
"numpy.array",
"numpy.linalg.norm",
"numpy.gradient",
"numpy.arange",
"numpy.mean",
"numpy.where",
"numpy.max",
"scipy.stats.wilcoxon",
"numpy.linspace",
"numpy.dot",
"numpy.vstack",
"sklearn.neighbors.NearestNeighbors",
"numpy.min",
"pandas.DataFrame",
"numpy.meshgrid",
... | [((1398, 1560), 'pandas.DataFrame', 'pd.DataFrame', (["{'score': oracle_object.inner_product[~oracle_object.mass_filter],\n 'pseudotime': oracle_object.new_pseudotime[~oracle_object.mass_filter]}"], {}), "({'score': oracle_object.inner_product[~oracle_object.\n mass_filter], 'pseudotime': oracle_object.new_pseudotime[~oracle_object\n .mass_filter]})\n", (1410, 1560), True, 'import pandas as pd\n'), ((2779, 2815), 'numpy.arange', 'np.arange', (['min_', '(max_ + width)', 'width'], {}), '(min_, max_ + width, width)\n', (2788, 2815), True, 'import numpy as np\n'), ((10159, 10193), 'numpy.zeros_like', 'np.zeros_like', (['pseudotime_stack[0]'], {}), '(pseudotime_stack[0])\n', (10172, 10193), True, 'import numpy as np\n'), ((10221, 10255), 'numpy.zeros_like', 'np.zeros_like', (['pseudotime_stack[0]'], {}), '(pseudotime_stack[0])\n', (10234, 10255), True, 'import numpy as np\n'), ((10275, 10307), 'numpy.zeros_like', 'np.zeros_like', (['gradient_stack[0]'], {}), '(gradient_stack[0])\n', (10288, 10307), True, 'import numpy as np\n'), ((10329, 10361), 'numpy.zeros_like', 'np.zeros_like', (['gradient_stack[0]'], {}), '(gradient_stack[0])\n', (10342, 10361), True, 'import numpy as np\n'), ((12842, 12859), 'numpy.meshgrid', 'np.meshgrid', (['*grs'], {}), '(*grs)\n', (12853, 12859), True, 'import numpy as np\n'), ((12943, 12999), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'n_neighbors', 'n_jobs': 'n_jobs'}), '(n_neighbors=n_neighbors, n_jobs=n_jobs)\n', (12959, 12999), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((13091, 13128), 'numpy.mean', 'np.mean', (['[(g[1] - g[0]) for g in grs]'], {}), '([(g[1] - g[0]) for g in grs])\n', (13098, 13128), True, 'import numpy as np\n'), ((13178, 13224), 'scipy.stats.norm.pdf', 'normal.pdf', ([], {'loc': '(0)', 'scale': '(smooth * std)', 'x': 'dists'}), '(loc=0, scale=smooth * std, x=dists)\n', (13188, 13224), True, 'from scipy.stats import norm as normal\n'), ((13515, 13551), 'numpy.gradient', 'np.gradient', (['value_on_grid_as_matrix'], {}), '(value_on_grid_as_matrix)\n', (13526, 13551), True, 'import numpy as np\n'), ((14072, 14107), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '[25, 5]'}), '(1, 5, figsize=[25, 5])\n', (14084, 14107), True, 'import matplotlib.pyplot as plt\n'), ((1697, 1746), 'numpy.digitize', 'np.digitize', (['inner_product_stats.pseudotime', 'bins'], {}), '(inner_product_stats.pseudotime, bins)\n', (1708, 1746), True, 'import numpy as np\n'), ((2122, 2165), 'scipy.stats.wilcoxon', 'wilcoxon', ([], {'x': 'pseudotime_', 'alternative': '"""less"""'}), "(x=pseudotime_, alternative='less')\n", (2130, 2165), False, 'from scipy.stats import wilcoxon\n'), ((11100, 11113), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (11107, 11113), True, 'import numpy as np\n'), ((12767, 12798), 'numpy.linspace', 'np.linspace', (['m', 'M', 'steps[dim_i]'], {}), '(m, M, steps[dim_i])\n', (12778, 12798), True, 'import numpy as np\n'), ((12889, 12930), 'numpy.vstack', 'np.vstack', (['[i.flat for i in meshes_tuple]'], {}), '([i.flat for i in meshes_tuple])\n', (12898, 12930), True, 'import numpy as np\n'), ((13411, 13442), 'numpy.sqrt', 'np.sqrt', (['value_on_grid.shape[0]'], {}), '(value_on_grid.shape[0])\n', (13418, 13442), True, 'import numpy as np\n'), ((3527, 3576), 'numpy.array', 'np.array', (["oracle_object.adata.obs['Stage'].values"], {}), "(oracle_object.adata.obs['Stage'].values)\n", (3535, 3576), True, 'import numpy as np\n'), ((5061, 5100), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {'ord': '(2)', 'axis': '(1)'}), '(gradient, ord=2, axis=1)\n', (5075, 5100), True, 'import numpy as np\n'), ((8845, 8884), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {'ord': '(2)', 'axis': '(1)'}), '(gradient, ord=2, axis=1)\n', (8859, 8884), True, 'import numpy as np\n'), ((11175, 11206), 'numpy.expand_dims', 'np.expand_dims', (['size_sq'], {'axis': '(1)'}), '(size_sq, axis=1)\n', (11189, 11206), True, 'import numpy as np\n'), ((12475, 12502), 'numpy.min', 'np.min', (['embedding[:, dim_i]'], {}), '(embedding[:, dim_i])\n', (12481, 12502), True, 'import numpy as np\n'), ((12504, 12531), 'numpy.max', 'np.max', (['embedding[:, dim_i]'], {}), '(embedding[:, dim_i])\n', (12510, 12531), True, 'import numpy as np\n'), ((5349, 5361), 'numpy.dot', 'np.dot', (['i', 'j'], {}), '(i, j)\n', (5355, 5361), True, 'import numpy as np\n'), ((6925, 6976), 'numpy.where', 'np.where', (['(adata.obs[cluster_column_name] == cluster)'], {}), '(adata.obs[cluster_column_name] == cluster)\n', (6933, 6976), True, 'import numpy as np\n'), ((12702, 12715), 'numpy.abs', 'np.abs', (['(M - m)'], {}), '(M - m)\n', (12708, 12715), True, 'import numpy as np\n'), ((12740, 12753), 'numpy.abs', 'np.abs', (['(M - m)'], {}), '(M - m)\n', (12746, 12753), True, 'import numpy as np\n'), ((11047, 11068), 'numpy.power', 'np.power', (['gradient', '(2)'], {}), '(gradient, 2)\n', (11055, 11068), True, 'import numpy as np\n')] |
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
def run_model(model,
use_cuda,
num_iter,
batch_size,
seq_len,
framework_name,
thread_num=1):
# warm up
import torch
import contexttimer
import json
model()
if use_cuda:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
with contexttimer.Timer() as t:
for it in range(num_iter):
model()
if not use_cuda:
qps = num_iter / t.elapsed
time_consume = t.elapsed
else:
end.record()
torch.cuda.synchronize()
torch_elapsed = start.elapsed_time(end) / 1e3
qps = num_iter / torch_elapsed
time_consume = torch_elapsed
print(
json.dumps({
"QPS": qps,
"elapsed": time_consume,
"n": num_iter,
"batch_size": batch_size,
"seq_len": seq_len,
"framework": framework_name,
"thread_num": thread_num,
}))
def generate_onnx_model(model_name: str, filename: str, seq_len: int,
batch_size: int, backend: str):
import transformers
import torch
import os
test_device = torch.device('cuda:0') if backend == "GPU" else torch.device(
'cpu:0')
torch.set_grad_enabled(False)
if model_name == "bert":
cfg = transformers.BertConfig()
model = transformers.BertModel(cfg)
elif model_name == "albert":
cfg = transformers.AlbertConfig()
model = transformers.AlbertModel(cfg)
elif model_name == "roberta":
cfg = transformers.RobertaConfig()
model = transformers.RobertaModel(cfg)
else:
raise (f"benchmark does not support {model_name}")
model.eval()
model.to(test_device)
cfg = model.config # type: transformers.BertConfig
input_ids = torch.randint(low=0,
high=cfg.vocab_size - 1,
size=(batch_size, seq_len),
dtype=torch.long,
device=test_device)
with open(filename, 'wb') as outf:
torch.onnx.export(model=model, args=(input_ids, ), f=outf)
outf.flush()
return cfg.vocab_size
def onnxruntime_benchmark_creator(backend: str):
def _impl_(model_name: str,
seq_len: int,
batch_size: int,
n: int,
num_threads: int = 1):
import multiprocessing
import os
temp_fn = "/tmp/temp_onnx.model"
p = multiprocessing.Pool(1)
vocab_size = p.apply(generate_onnx_model,
args=(model_name, temp_fn, seq_len, batch_size,
backend))
p.close()
import contexttimer
import onnxruntime.backend
import onnx
import numpy
import json
if not onnxruntime.backend.supports_device(backend):
raise RuntimeError(
f"onnxruntime does not support {backend}, recompile it!")
os.environ['OMP_NUM_THREADS'] = str(num_threads)
os.environ['MKL_NUM_THREADS'] = str(num_threads)
model = onnx.load_model(f=temp_fn)
model = onnxruntime.backend.prepare(
model=model,
device=backend,
graph_optimization_level=onnxruntime.GraphOptimizationLevel.
ORT_ENABLE_ALL)
input_ids = numpy.random.randint(low=0,
high=vocab_size - 1,
size=(batch_size, seq_len),
dtype=numpy.int64)
model.run(inputs=[input_ids])
with contexttimer.Timer() as t:
for _ in range(n):
model.run(inputs=[input_ids])
print(
json.dumps({
"QPS": n / t.elapsed,
"elapsed": t.elapsed,
"n": n,
"batch_size": batch_size,
"seq_len": seq_len,
"framework": f"onnx_rt_{backend}",
"n_threads": num_threads
}))
return _impl_
| [
"torch.cuda.Event",
"torch.onnx.export",
"onnx.load_model",
"json.dumps",
"transformers.BertModel",
"transformers.AlbertConfig",
"transformers.RobertaModel",
"torch.cuda.synchronize",
"torch.randint",
"numpy.random.randint",
"multiprocessing.Pool",
"transformers.AlbertModel",
"torch.set_grad... | [((2009, 2038), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2031, 2038), False, 'import torch\n'), ((2584, 2699), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(cfg.vocab_size - 1)', 'size': '(batch_size, seq_len)', 'dtype': 'torch.long', 'device': 'test_device'}), '(low=0, high=cfg.vocab_size - 1, size=(batch_size, seq_len),\n dtype=torch.long, device=test_device)\n', (2597, 2699), False, 'import torch\n'), ((956, 992), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (972, 992), False, 'import torch\n'), ((1007, 1043), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (1023, 1043), False, 'import torch\n'), ((1077, 1097), 'contexttimer.Timer', 'contexttimer.Timer', ([], {}), '()\n', (1095, 1097), False, 'import contexttimer\n'), ((1288, 1312), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1310, 1312), False, 'import torch\n'), ((1462, 1635), 'json.dumps', 'json.dumps', (["{'QPS': qps, 'elapsed': time_consume, 'n': num_iter, 'batch_size':\n batch_size, 'seq_len': seq_len, 'framework': framework_name,\n 'thread_num': thread_num}"], {}), "({'QPS': qps, 'elapsed': time_consume, 'n': num_iter,\n 'batch_size': batch_size, 'seq_len': seq_len, 'framework':\n framework_name, 'thread_num': thread_num})\n", (1472, 1635), False, 'import json\n'), ((1926, 1948), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1938, 1948), False, 'import torch\n'), ((1974, 1995), 'torch.device', 'torch.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (1986, 1995), False, 'import torch\n'), ((2083, 2108), 'transformers.BertConfig', 'transformers.BertConfig', ([], {}), '()\n', (2106, 2108), False, 'import transformers\n'), ((2125, 2152), 'transformers.BertModel', 'transformers.BertModel', (['cfg'], {}), '(cfg)\n', (2147, 2152), False, 'import transformers\n'), ((2863, 2920), 'torch.onnx.export', 'torch.onnx.export', ([], {'model': 'model', 'args': '(input_ids,)', 'f': 'outf'}), '(model=model, args=(input_ids,), f=outf)\n', (2880, 2920), False, 'import torch\n'), ((3276, 3299), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (3296, 3299), False, 'import multiprocessing\n'), ((3913, 3939), 'onnx.load_model', 'onnx.load_model', ([], {'f': 'temp_fn'}), '(f=temp_fn)\n', (3928, 3939), False, 'import onnx\n'), ((4159, 4258), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': '(vocab_size - 1)', 'size': '(batch_size, seq_len)', 'dtype': 'numpy.int64'}), '(low=0, high=vocab_size - 1, size=(batch_size, seq_len),\n dtype=numpy.int64)\n', (4179, 4258), False, 'import numpy\n'), ((2200, 2227), 'transformers.AlbertConfig', 'transformers.AlbertConfig', ([], {}), '()\n', (2225, 2227), False, 'import transformers\n'), ((2244, 2273), 'transformers.AlbertModel', 'transformers.AlbertModel', (['cfg'], {}), '(cfg)\n', (2268, 2273), False, 'import transformers\n'), ((4430, 4450), 'contexttimer.Timer', 'contexttimer.Timer', ([], {}), '()\n', (4448, 4450), False, 'import contexttimer\n'), ((4562, 4741), 'json.dumps', 'json.dumps', (["{'QPS': n / t.elapsed, 'elapsed': t.elapsed, 'n': n, 'batch_size':\n batch_size, 'seq_len': seq_len, 'framework': f'onnx_rt_{backend}',\n 'n_threads': num_threads}"], {}), "({'QPS': n / t.elapsed, 'elapsed': t.elapsed, 'n': n,\n 'batch_size': batch_size, 'seq_len': seq_len, 'framework':\n f'onnx_rt_{backend}', 'n_threads': num_threads})\n", (4572, 4741), False, 'import json\n'), ((2322, 2350), 'transformers.RobertaConfig', 'transformers.RobertaConfig', ([], {}), '()\n', (2348, 2350), False, 'import transformers\n'), ((2367, 2397), 'transformers.RobertaModel', 'transformers.RobertaModel', (['cfg'], {}), '(cfg)\n', (2392, 2397), False, 'import transformers\n')] |
import gzip
import os
import unittest
import cPickle
import numpy as np
from convnet.layers import *
from convnet.net import ConvNet
from convnet.utils import arr_2d_to_3d, input_2d_to_3d, input_1d_to_3d, to_3d, y_to_3d, y_1d_to_3d
# noinspection PyPep8Naming
class ConvNetTest(unittest.TestCase):
def setUp(self):
np.set_printoptions(precision=2, linewidth=120)
# np.random.seed(10)
print('################### %s ################### ' % self._testMethodName)
def _test_basic(self):
X = [np.zeros((3, 3, 2)) for _ in xrange(2)]
y = [np.zeros((1, 1, 2)) for _ in xrange(2)]
net = ConvNet()
net.setup_layers([
InputLayer(InputLayerSettings(in_shape=X[0].shape)),
ConvolutionalLayer(ConvolutionalLayerSettings(filter_size=2, filters_count=2, stride=1)),
PoolingLayer(PoolingLayerSettings(filter_size=2, stride=1)),
ReluLayer(ReluLayerSettings(activation='max')),
FullConnectedLayer(FullConnectedLayerSettings(neurons_count=y[0].shape[-1])),
ReluLayer(ReluLayerSettings(activation='sigmoid')),
])
net.fit(X, y)
def _test_crossings(self):
X = to_3d([
np.asarray([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
]),
np.asarray([
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
]),
np.asarray([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
]),
np.asarray([
[0, 0, 1],
[0, 1, 0],
[0, 0, 0],
]),
])
Y = [
np.asarray([1, 0]),
np.asarray([1, 0]),
np.asarray([0, 1]),
np.asarray([0, 1]),
]
net = ConvNet(iterations_count=1000, learning_rate=0.01)
net.setup_layers([
InputLayer(InputLayerSettings(in_shape=X[0].shape)),
ConvolutionalLayer(ConvolutionalLayerSettings(filter_size=3, filters_count=2, stride=1)),
ReluLayer(ReluLayerSettings(activation='max')),
# PoolingLayer(PoolingLayerSettings(filter_size=1, stride=1)),
# FullConnectedLayer(FullConnectedLayerSettings(neurons_count=Y[0].shape[-1])),
# ReluLayer(ReluLayerSettings(activation='sigmoid')),
])
net.fit(X, y_to_3d(Y))
# net = ConvNet.load_net('/home/igor/Desktop/net.pkl')
for x, y in zip(X, Y):
h = net.predict(x)
print("predicted = {}; \nreal = {}\n\n".format(h, y))
pass
def transform_X(self, X_train):
X = []
for i in xrange(X_train.shape[0]):
x = X_train[i]
x = x.reshape((28, 28))[:, :, np.newaxis]
X.append(x)
return X
def transform_Y(self, Y_train):
Y = []
uniq = np.unique(Y_train)
labels_count = uniq.size
for i in xrange(Y_train.shape[0]):
y = Y_train[i]
y_arr = np.zeros(labels_count)
index = np.where(uniq == y)[0][0]
y_arr[index] = 1
Y.append(y_1d_to_3d(y_arr))
return Y
def get_examples(self, X_train, Y_train, labels=None, count=None):
if isinstance(count, int):
count = [count] * len(labels)
assert len(labels) == len(count)
X = None
Y = None
for i, label in enumerate(labels):
indices = np.where(Y_train == label)[0]
indices = indices[:count[i]]
if X is None:
X = X_train[indices]
else:
X = np.concatenate([X, X_train[indices]])
if Y is None:
Y = Y_train[indices]
else:
Y = np.concatenate([Y, Y_train[indices]])
return X, Y
def shuffle_in_unison_inplace(self, a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def test_mnist(self):
script_path = os.path.dirname(os.path.abspath(__file__))
f = gzip.open(os.path.join(script_path, '../mnist/mnist.pkl.gz'), 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
X_train, Y_train = train_set
X_train, Y_train = self.get_examples(X_train, Y_train, labels=np.arange(0, 4), count=10)
X_train, Y_train = self.shuffle_in_unison_inplace(X_train, Y_train)
X_train = self.transform_X(X_train)
Y_train = self.transform_Y(Y_train)
net = ConvNet(iterations_count=10, batch_size=1, learning_rate=0.001, momentum=0.8, weight_decay=0.001)
net.setup_layers([
InputLayer(InputLayerSettings(in_shape=X_train[0].shape)),
ConvolutionalLayer(ConvolutionalLayerSettings(filters_count=8, filter_size=5, stride=1, zero_padding=0)),
ReluLayer(ReluLayerSettings(activation='max')),
PoolingLayer(PoolingLayerSettings(filter_size=2, stride=2)),
ConvolutionalLayer(ConvolutionalLayerSettings(filters_count=16, filter_size=5, stride=1, zero_padding=0)),
ReluLayer(ReluLayerSettings(activation='max')),
PoolingLayer(PoolingLayerSettings(filter_size=3, stride=3)),
FullConnectedLayer(FullConnectedLayerSettings(neurons_count=Y_train[0].shape[-1], activation='sigmoid')),
# ReluLayer(ReluLayerSettings(activation='sigmoid')),
])
examples_count = 100000
net.fit(X_train[:examples_count], Y_train[:examples_count])
matched = 0
for x, y in zip(X_train[:examples_count], Y_train[:examples_count]):
h = net.predict(x)
h_res = h.argmax()
y_res = y.argmax()
print("predicted = {}; max = {}".format(h, h.argmax()))
print("real = {}; max = {}".format(y, y.argmax()))
print("\n")
matched += int(h_res == y_res)
print("Accuracy {}/{}".format(matched, len(X_train[:examples_count]))) | [
"numpy.unique",
"convnet.net.ConvNet",
"numpy.where",
"numpy.asarray",
"os.path.join",
"convnet.utils.y_1d_to_3d",
"numpy.zeros",
"numpy.concatenate",
"os.path.abspath",
"convnet.utils.y_to_3d",
"cPickle.load",
"numpy.arange",
"numpy.set_printoptions"
] | [((331, 378), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'linewidth': '(120)'}), '(precision=2, linewidth=120)\n', (350, 378), True, 'import numpy as np\n'), ((641, 650), 'convnet.net.ConvNet', 'ConvNet', ([], {}), '()\n', (648, 650), False, 'from convnet.net import ConvNet\n'), ((1884, 1934), 'convnet.net.ConvNet', 'ConvNet', ([], {'iterations_count': '(1000)', 'learning_rate': '(0.01)'}), '(iterations_count=1000, learning_rate=0.01)\n', (1891, 1934), False, 'from convnet.net import ConvNet\n'), ((2955, 2973), 'numpy.unique', 'np.unique', (['Y_train'], {}), '(Y_train)\n', (2964, 2973), True, 'import numpy as np\n'), ((4274, 4289), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (4286, 4289), False, 'import cPickle\n'), ((4623, 4725), 'convnet.net.ConvNet', 'ConvNet', ([], {'iterations_count': '(10)', 'batch_size': '(1)', 'learning_rate': '(0.001)', 'momentum': '(0.8)', 'weight_decay': '(0.001)'}), '(iterations_count=10, batch_size=1, learning_rate=0.001, momentum=\n 0.8, weight_decay=0.001)\n', (4630, 4725), False, 'from convnet.net import ConvNet\n'), ((533, 552), 'numpy.zeros', 'np.zeros', (['(3, 3, 2)'], {}), '((3, 3, 2))\n', (541, 552), True, 'import numpy as np\n'), ((586, 605), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (594, 605), True, 'import numpy as np\n'), ((1743, 1761), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {}), '([1, 0])\n', (1753, 1761), True, 'import numpy as np\n'), ((1775, 1793), 'numpy.asarray', 'np.asarray', (['[1, 0]'], {}), '([1, 0])\n', (1785, 1793), True, 'import numpy as np\n'), ((1807, 1825), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (1817, 1825), True, 'import numpy as np\n'), ((1839, 1857), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (1849, 1857), True, 'import numpy as np\n'), ((2453, 2463), 'convnet.utils.y_to_3d', 'y_to_3d', (['Y'], {}), '(Y)\n', (2460, 2463), False, 'from convnet.utils import arr_2d_to_3d, input_2d_to_3d, input_1d_to_3d, to_3d, y_to_3d, y_1d_to_3d\n'), ((3097, 3119), 'numpy.zeros', 'np.zeros', (['labels_count'], {}), '(labels_count)\n', (3105, 3119), True, 'import numpy as np\n'), ((4126, 4151), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4141, 4151), False, 'import os\n'), ((4175, 4225), 'os.path.join', 'os.path.join', (['script_path', '"""../mnist/mnist.pkl.gz"""'], {}), "(script_path, '../mnist/mnist.pkl.gz')\n", (4187, 4225), False, 'import os\n'), ((1230, 1275), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (1240, 1275), True, 'import numpy as np\n'), ((1352, 1397), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (1362, 1397), True, 'import numpy as np\n'), ((1474, 1519), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1], [0, 1, 0], [1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [1, 0, 0]])\n', (1484, 1519), True, 'import numpy as np\n'), ((1596, 1641), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1], [0, 1, 0], [0, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [0, 0, 0]])\n', (1606, 1641), True, 'import numpy as np\n'), ((3216, 3233), 'convnet.utils.y_1d_to_3d', 'y_1d_to_3d', (['y_arr'], {}), '(y_arr)\n', (3226, 3233), False, 'from convnet.utils import arr_2d_to_3d, input_2d_to_3d, input_1d_to_3d, to_3d, y_to_3d, y_1d_to_3d\n'), ((3542, 3568), 'numpy.where', 'np.where', (['(Y_train == label)'], {}), '(Y_train == label)\n', (3550, 3568), True, 'import numpy as np\n'), ((3714, 3751), 'numpy.concatenate', 'np.concatenate', (['[X, X_train[indices]]'], {}), '([X, X_train[indices]])\n', (3728, 3751), True, 'import numpy as np\n'), ((3854, 3891), 'numpy.concatenate', 'np.concatenate', (['[Y, Y_train[indices]]'], {}), '([Y, Y_train[indices]])\n', (3868, 3891), True, 'import numpy as np\n'), ((4417, 4432), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (4426, 4432), True, 'import numpy as np\n'), ((3140, 3159), 'numpy.where', 'np.where', (['(uniq == y)'], {}), '(uniq == y)\n', (3148, 3159), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 16:27:12 2017
@author: xinruyue
"""
import pandas as pd
import numpy as np
import xlrd
import pickle
import os
def get_country():
f = open('country.txt','r')
country = []
for line in f:
line = line.strip('\n')
country.append(line)
return country
#get F matrix
def get_f(df,country):
size = len(country)
f_matrix = np.zeros((size,size))
for index,row in df.iterrows():
imp = row[2]
exp = row[4]
value = row[8]
i = country.index(imp)
j = country.index(exp)
f_matrix[i][j] = value
return f_matrix
def processing(file1,y):
# get all data
df = pd.DataFrame()
book = xlrd.open_workbook(file1)
for sheet in book.sheets():
f = pd.read_excel(file1, sheetname=sheet.name)
df = pd.concat([df, f], ignore_index=True)
# remove 'world'
ex_list1 = list(df.Importer)
ex_list2 = list(df.Exporter)
ex_list1 = list(filter(lambda i: i != 'World', ex_list1))
ex_list2 = list(filter(lambda i: i != 'World', ex_list2))
df = df[df.Importer.isin(ex_list1)]
df = df[df.Exporter.isin(ex_list2)]
'''
# get country
country = df['Importer'].append(df['Exporter'])
country = country.drop_duplicates()
country = country.sort_values()
country = list(country)
'''
country = get_country()
#get each products' Sitc4
sitc = list(df.Sitc4)
sitc_dic = {}
for i in range(10):
i = str(i)
s_code = []
for each in sitc:
each = str(each)
# each = each.encode('utf-8')
# print(type(each))
if len(each) != 4:
each = '%04d'%(int(each))
if each[0] == i:
if each not in s_code:
s_code.append(each)
sitc_dic[i] = s_code
for key,value in sitc_dic.items():
for e_val in value:
val = []
val.append(e_val)
pro_df = df[df.Sitc4.isin(val)]
f_mat = get_f(pro_df,country)
fil = open(y+'_'+str(e_val)+'.pkl','wb')
pickle.dump(f_mat,fil)
fil.close()
print(e_val)
if __name__ == '__main__':
file_path = './data'
files = os.listdir(file_path)
for each in files:
year = ''.join(list(filter(str.isdigit,each)))
f = os.path.join(file_path,each)
processing(f,year)
print(year)
| [
"os.listdir",
"pickle.dump",
"xlrd.open_workbook",
"os.path.join",
"numpy.zeros",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat"
] | [((431, 453), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (439, 453), True, 'import numpy as np\n'), ((721, 735), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (733, 735), True, 'import pandas as pd\n'), ((747, 772), 'xlrd.open_workbook', 'xlrd.open_workbook', (['file1'], {}), '(file1)\n', (765, 772), False, 'import xlrd\n'), ((2309, 2330), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (2319, 2330), False, 'import os\n'), ((817, 859), 'pandas.read_excel', 'pd.read_excel', (['file1'], {'sheetname': 'sheet.name'}), '(file1, sheetname=sheet.name)\n', (830, 859), True, 'import pandas as pd\n'), ((873, 910), 'pandas.concat', 'pd.concat', (['[df, f]'], {'ignore_index': '(True)'}), '([df, f], ignore_index=True)\n', (882, 910), True, 'import pandas as pd\n'), ((2421, 2450), 'os.path.join', 'os.path.join', (['file_path', 'each'], {}), '(file_path, each)\n', (2433, 2450), False, 'import os\n'), ((2172, 2195), 'pickle.dump', 'pickle.dump', (['f_mat', 'fil'], {}), '(f_mat, fil)\n', (2183, 2195), False, 'import pickle\n')] |
import sys
if sys.version_info[0] < 3:
import Tkinter as Tk
else:
import tkinter as Tk
import numpy as np
import matplotlib.colors as colors
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
class DisplayXRay:
def __init__(self, root):
self.root = Tk.Tk()
self.root.wm_title("X-ray View")
self.x_ray_image = 0;
self.has_been_drawn = False;
def draw(self, x_ray_image):
self.x_ray_image = np.copy(x_ray_image);
if not self.has_been_drawn:
self.fig = Figure(figsize=(5, 8), dpi=100)
#self.x_ray_image = np.multiply(np.divide(np.subtract(x_ray_image, self.x_ray_image.min()),
# self.x_ray_image.max() - self.x_ray_image.min()), 255);
if self.has_been_drawn:
print("redraw")
self.fig.clear();
norm = colors.Normalize(vmin=self.x_ray_image.min(),vmax=self.x_ray_image.max())
ax = self.fig.add_subplot(211)
self.im_plot1 = ax.imshow(self.x_ray_image, norm=norm, cmap="PuBu_r");
self.fig.colorbar(self.im_plot1, ax=ax, extend='max');
ax.set_title('X-ray image');
ax = self.fig.add_subplot(212)
ax.hist(self.x_ray_image.ravel(), bins=256, density=True, facecolor='g', alpha=0.75)
ax.set_yscale("log")
ax.set_title("Intensity histogram")
ax.set_xlabel("Intensity")
ax.set_ylabel("Frequency")
if not self.has_been_drawn:
#ax.title("Intensity histogram of X-ray image");
# a tk.DrawingArea
self.canvas = FigureCanvasTkAgg(self.fig, master=self.root)
self.toolbar = NavigationToolbar2Tk(self.canvas, self.root)
self.toolbar.update()
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.button = Tk.Button(master=self.root, text='Quit', command=self._quit)
self.has_been_drawn = True;
#else:
#self.im_plot.set_data(self.x_ray_image)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.button.pack(side=Tk.BOTTOM)
#self.fig.canvas.draw()
#self.fig.canvas.flush_events()
def on_key_event(self, event):
print('you pressed %s' % event.key)
key_press_handler(event, self.canvas, self.toolbar)
def _quit(self):
self.root.quit() # stops mainloop
self.root.destroy() # this is necessary on Windows to prevent
| [
"numpy.copy",
"matplotlib.backends.backend_tkagg.NavigationToolbar2Tk",
"matplotlib.figure.Figure",
"tkinter.Button",
"tkinter.Tk",
"matplotlib.backend_bases.key_press_handler",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((445, 452), 'tkinter.Tk', 'Tk.Tk', ([], {}), '()\n', (450, 452), True, 'import tkinter as Tk\n'), ((624, 644), 'numpy.copy', 'np.copy', (['x_ray_image'], {}), '(x_ray_image)\n', (631, 644), True, 'import numpy as np\n'), ((2549, 2600), 'matplotlib.backend_bases.key_press_handler', 'key_press_handler', (['event', 'self.canvas', 'self.toolbar'], {}), '(event, self.canvas, self.toolbar)\n', (2566, 2600), False, 'from matplotlib.backend_bases import key_press_handler\n'), ((706, 737), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 8)', 'dpi': '(100)'}), '(figsize=(5, 8), dpi=100)\n', (712, 737), False, 'from matplotlib.figure import Figure\n'), ((1744, 1789), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['self.fig'], {'master': 'self.root'}), '(self.fig, master=self.root)\n', (1761, 1789), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((1818, 1862), 'matplotlib.backends.backend_tkagg.NavigationToolbar2Tk', 'NavigationToolbar2Tk', (['self.canvas', 'self.root'], {}), '(self.canvas, self.root)\n', (1838, 1862), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((1999, 2059), 'tkinter.Button', 'Tk.Button', ([], {'master': 'self.root', 'text': '"""Quit"""', 'command': 'self._quit'}), "(master=self.root, text='Quit', command=self._quit)\n", (2008, 2059), True, 'import tkinter as Tk\n')] |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TPU Embeddings mid level API on TPU."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import def_function
from tensorflow.python.platform import test
from tensorflow.python.tpu.tests import tpu_embedding_v2_correctness_base_test
class TPUEmbeddingCorrectnessTest(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters([True, False])
def test_dense_lookup(self, is_high_dimensional):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
if is_high_dimensional:
dataset = self._create_high_dimensional_dense_dataset(strategy)
else:
dataset = self._create_dense_dataset(strategy)
dist = strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(experimental_fetch_to_device=False))
dist_iter = iter(dist)
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
mid_level_api.enqueue(next(dist_iter), training=False)
return strategy.run(step)
# Run model.
shard_out_val = test_fn()
shard0 = (self._unpack(strategy, shard_out_val[0]),
self._unpack(strategy, shard_out_val[1]),
self._unpack(strategy, shard_out_val[2]))
# embedding_values is a linear list, so we reshape to match the correct
# shape of the corresponding table before performing the lookup.
numpy_videos = np.reshape(self.embedding_values, (8, 4))
numpy_users = np.reshape(self.embedding_values, (16, 2))
repeat_batch_num = strategy.num_replicas_in_sync // 2
golden = (
(numpy_videos[self.feature_watched_values[:self.data_batch_size] *
repeat_batch_num],
numpy_videos[self.feature_favorited_values[:self.data_batch_size] *
repeat_batch_num],
numpy_users[self.feature_friends_values[:self.data_batch_size] *
repeat_batch_num]))
if is_high_dimensional:
dense_size = self.data_batch_size * self.data_batch_size
golden = ((
numpy_videos[self.feature_watched_values_high_dimensional[:dense_size]
* repeat_batch_num].reshape(
self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1),
numpy_videos[
self.feature_favorited_values_high_dimensional[:dense_size] *
repeat_batch_num].reshape(self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1),
numpy_users[self.feature_friends_values_high_dimensional[:dense_size]
* repeat_batch_num].reshape(
self.data_batch_size * repeat_batch_num,
self.data_batch_size, -1)))
self.assertAllClose(shard0, golden)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| [
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"numpy.reshape",
"tensorflow.python.distribute.distribute_lib.InputOptions",
"absl.testing.parameterized.parameters",
"tensorflow.python.platform.test.main"
] | [((1194, 1233), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['[True, False]'], {}), '([True, False])\n', (1218, 1233), False, 'from absl.testing import parameterized\n'), ((3744, 3774), 'tensorflow.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', ([], {}), '()\n', (3772, 3774), False, 'from tensorflow.python.compat import v2_compat\n'), ((3777, 3788), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (3786, 3788), False, 'from tensorflow.python.platform import test\n'), ((2282, 2323), 'numpy.reshape', 'np.reshape', (['self.embedding_values', '(8, 4)'], {}), '(self.embedding_values, (8, 4))\n', (2292, 2323), True, 'import numpy as np\n'), ((2342, 2384), 'numpy.reshape', 'np.reshape', (['self.embedding_values', '(16, 2)'], {}), '(self.embedding_values, (16, 2))\n', (2352, 2384), True, 'import numpy as np\n'), ((1610, 1673), 'tensorflow.python.distribute.distribute_lib.InputOptions', 'distribute_lib.InputOptions', ([], {'experimental_fetch_to_device': '(False)'}), '(experimental_fetch_to_device=False)\n', (1637, 1673), False, 'from tensorflow.python.distribute import distribute_lib\n')] |
import numpy as np
import nutszebra_data_augmentation_picture
from functools import wraps
da = nutszebra_data_augmentation_picture.DataAugmentationPicture()
def reset(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
da()
return func(self, *args, **kwargs)
return wrapper
class DataAugmentationCifar10NormalizeSmall(object):
@staticmethod
@reset
def train(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(32, 36)).crop_picture_randomly(1.0, sizes=(32, 32)).cutout(0.5, sizes=(16, 16)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(32, 32), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationCifar10NormalizeMiddle(object):
@staticmethod
@reset
def train(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(64, 68)).crop_picture_randomly(1.0, sizes=(64, 64)).cutout(0.5, sizes=(32, 32)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(64, 64), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationCifar10NormalizeBig(object):
@staticmethod
@reset
def train(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(128, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).cutout(0.5, sizes=(64, 64)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationCifar10NormalizeBigger(object):
@staticmethod
@reset
def train(img):
da.convert_to_image_format(img).resize_image_randomly(1.0, size_range=(256, 512)).crop_picture_randomly(1.0, sizes=(224, 224)).cutout(0.5, sizes=(112, 112)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.convert_to_image_format(img).resize_image_randomly(1.0, size_range=(384, 384), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationCifar10NormalizeHuge(object):
@staticmethod
@reset
def train(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(299, 512)).crop_picture_randomly(1.0, sizes=(299, 299)).cutout(0.5, sizes=(114, 114)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da(img).convert_to_image_format(1.0).resize_image_randomly(1.0, size_range=(406, 406), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationNormalizeSmall(object):
@staticmethod
@reset
def train(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(32, 36)).crop_picture_randomly(1.0, sizes=(32, 32)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(32, 32), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationNormalizeMiddle(object):
@staticmethod
@reset
def train(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(64, 68)).crop_picture_randomly(1.0, sizes=(64, 64)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(64, 64), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationNormalizeBig(object):
@staticmethod
@reset
def train(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(129, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationNormalizeBigger(object):
@staticmethod
@reset
def train(img):
da.load_picture(img).gray_to_rgb(1.0).resize_image_randomly(1.0, size_range=(256, 512)).crop_picture_randomly(1.0, sizes=(224, 224)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img).gray_to_rgb(1.0).resize_image_randomly(1.0, size_range=(384, 384), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DataAugmentationNormalizeHuge(object):
@staticmethod
@reset
def train(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(299, 512)).crop_picture_randomly(1.0, sizes=(299, 299)).normalize_picture(1.0, value=10.).horizontal_flipping(0.5).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img).resize_image_randomly(1.0, size_range=(406, 406), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
class DoNothing(object):
@staticmethod
@reset
def train(img):
return img, None
@staticmethod
@reset
def test(img):
return img, None
class Ndim(object):
def __init__(self, ndim=3):
self.ndim = ndim
def train(self, img):
img = np.array(img)
if not img.ndim == self.ndim:
diff = self.ndim - img.ndim
img = np.reshape(img, (1,) * diff + img.shape)
return img, None
def test(self, img):
img = np.array(img)
if not img.ndim == self.ndim:
diff = self.ndim - img.ndim
img = np.reshape(img, (1,) * diff + img.shape)
return img, None
class DataAugmentationNormalizeBigOneChannel(object):
@staticmethod
@reset
def train(img):
da.load_picture(img, ndim=2).resize_image_randomly(1.0, size_range=(129, 132)).crop_picture_randomly(1.0, sizes=(128, 128)).normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
@staticmethod
@reset
def test(img):
da.load_picture(img, ndim=2).resize_image_randomly(1.0, size_range=(128, 128), interpolation='bilinear').normalize_picture(1.0, value=10.).convert_to_chainer_format(1.0)
return da.x, da.info
| [
"functools.wraps",
"numpy.array",
"numpy.reshape",
"nutszebra_data_augmentation_picture.DataAugmentationPicture"
] | [((95, 156), 'nutszebra_data_augmentation_picture.DataAugmentationPicture', 'nutszebra_data_augmentation_picture.DataAugmentationPicture', ([], {}), '()\n', (154, 156), False, 'import nutszebra_data_augmentation_picture\n'), ((181, 192), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (186, 192), False, 'from functools import wraps\n'), ((6832, 6845), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (6840, 6845), True, 'import numpy as np\n'), ((7048, 7061), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (7056, 7061), True, 'import numpy as np\n'), ((6942, 6982), 'numpy.reshape', 'np.reshape', (['img', '((1,) * diff + img.shape)'], {}), '(img, (1,) * diff + img.shape)\n', (6952, 6982), True, 'import numpy as np\n'), ((7158, 7198), 'numpy.reshape', 'np.reshape', (['img', '((1,) * diff + img.shape)'], {}), '(img, (1,) * diff + img.shape)\n', (7168, 7198), True, 'import numpy as np\n')] |
from DeepJetCore.TrainData import TrainData, fileTimeOut
from DeepJetCore import SimpleArray
import numpy as np
import uproot3 as uproot
import ROOT
import os
import pickle
import gzip
import pandas as pd
class TrainData_ild(TrainData):
def __init__(self):
TrainData.__init__(self)
#don't use for now
#self.input_names=["input_hits","input_row_splits"]
######### helper functions for ragged interface
##### might be moved to DJC soon?, for now lives here
def createSelection(self, jaggedarr):
# create/read a jagged array
# with selects for every event
pass
def branchToFlatArray(self, b, returnRowSplits=False, selectmask=None, is3d=None):
a = b.array()
nbatch = a.shape[0]
if is3d:
allba=[]
for b in range(nbatch):
ba = np.array(a[b])
allba.append(ba)
a = np.concatenate(allba,axis=0)
print(a.shape)
if selectmask is not None:
if is3d:
a = a[selectmask.flatten()]
else:
a = a[selectmask]
#use select(flattened) to select
contentarr=None
if is3d is None:
contentarr = a.content
contentarr = np.expand_dims(contentarr, axis=1)
else:
contentarr=a
if not returnRowSplits:
return np.array(contentarr,dtype='float32')
nevents = a.shape[0]
rowsplits = [0]
max_per_rs=0
#not super fast but ok given there aren't many events per file
for i in range(nevents):
#make a[i] np array
#get select[i] -> the truth mask
#apply, then fill RS
if selectmask is None:
rowsplits.append(rowsplits[-1] + a[i].shape[0])
else:
select = selectmask[i]
nonzero = np.count_nonzero(select)
if nonzero > max_per_rs:
max_per_rs=nonzero
rowsplits.append(rowsplits[-1] + nonzero)
rowsplits = np.array(rowsplits, dtype='int64')
print('mean hits per rs', contentarr.shape[0]/rowsplits.shape[0], ' max hits per rs: ',max_per_rs)
return np.expand_dims(a.content, axis=1),np.array(rowsplits, dtype='int64')
def fileIsValid(self, filename):
try:
fileTimeOut(filename, 2)
tree = uproot.open(filename)["SLCIOConverted"]
f=ROOT.TFile.Open(filename)
t=f.Get("SLCIOConverted")
if t.GetEntries() < 1:
raise ValueError("")
except Exception as e:
print(e)
return False
return True
def convertFromSourceFile(self, filename, weighterobjects, istraining, treename="SLCIOConverted"):
fileTimeOut(filename, 10)#10 seconds for eos to recover
tree = uproot.open(filename)[treename]
nevents = tree.numentries
selection=None
hit_energy , rs = self.branchToFlatArray(tree["energy"], True,selection)
hit_x = self.branchToFlatArray(tree["positionX"], False,selection)
hit_y = self.branchToFlatArray(tree["positionY"], False,selection)
hit_z = self.branchToFlatArray(tree["positionZ"], False,selection)
hit_ass_truth_idx = self.branchToFlatArray(tree["maxE_particle_index"], False,selection)
hit_ass_truth_energy = self.branchToFlatArray(tree["maxE_particle_energy"], False,selection)
#not used right now
hit_ass_truth_pX = self.branchToFlatArray(tree["maxE_particle_pX"], False,selection)
hit_ass_truth_pY = self.branchToFlatArray(tree["maxE_particle_pY"], False,selection)
hit_ass_truth_pZ = self.branchToFlatArray(tree["maxE_particle_pZ"], False,selection)
features = np.concatenate([
hit_energy,
hit_x ,
hit_y,
hit_z
], axis=-1)
farr = SimpleArray(features,rs,name="features")
t_idxarr = SimpleArray(hit_ass_truth_idx,rs,name="t_idx")
t_energyarr = SimpleArray(hit_ass_truth_energy,rs,name="t_energy")
zeros = np.zeros_like(hit_ass_truth_energy)
#just for compatibility
t_posarr = SimpleArray(zeros,rs,name="t_pos")
t_time = SimpleArray(zeros,rs,name="t_time")
t_pid = SimpleArray(zeros,rs,name="t_pid") #this would need some massaging so we can't use the PID directly
t_spectator = SimpleArray(zeros,rs,name="t_spectator")
t_fully_contained = SimpleArray(zeros,rs,name="t_fully_contained")
t_rest = SimpleArray(zeros,rs,name="t_rest") #breaks with old plotting but needs to be done at some point
return [farr, t_idxarr, t_energyarr, t_posarr, t_time, t_pid, t_spectator, t_fully_contained],[t_rest], []
def createFullDict(self, f_arraylist):
farr, rs, t_idxarr,_, t_energyarr,_, t_posarr,_, t_time,_, t_pid,_, t_spectator,_, t_fully_contained,_ = f_arraylist
d={
'hit_energy': farr[:,0:1],
'hit_x': farr[:,1:2],
'hit_y': farr[:,2:3],
'hit_z': farr[:,3:4],
't_idx': t_idxarr,
't_energy': t_energyarr
}
return d, rs
def createPandasDataFrame(self, eventno=-1):
#since this is only needed occationally
if self.nElements() <= eventno:
raise IndexError("Event wrongly selected")
tdc = self.copy()
if eventno>=0:
tdc.skim(eventno)
d, rs = self.createFullDict(tdc.transferFeatureListToNumpy(False))
d['hit_log_energy'] = np.log(d['hit_energy']+1)
#and a continuous truth index
allarr = []
for k in d:
allarr.append(d[k])
allarr = np.concatenate(allarr,axis=1)
frame = pd.DataFrame (allarr, columns = [k for k in d])
if eventno>=0:
return frame
else:
return frame, rs
def interpretAllModelInputs(self, ilist):
'''
input: the full list of keras inputs
returns:
- rechit feature array
- t_idxarr
- t_energyarr
- t_posarr
- t_time
- t_pid
- t_spectator
- t_fully_contained
- row_splits
(for copy-paste: feat, t_idx, t_energy, t_pos, t_time, t_pid, t_spectator ,t_fully_contained, row_splits)
'''
return ilist[0], ilist[2], ilist[4], ilist[6], ilist[8], ilist[10], ilist[12], ilist[14], ilist[1]
def writeOutPrediction(self, predicted, features, truth, weights, outfilename, inputfile):
outfilename = os.path.splitext(outfilename)[0] + '.bin.gz'
# print("hello", outfilename, inputfile)
outdict = dict()
outdict['predicted'] = predicted
outdict['features'] = features
outdict['truth'] = truth
print("Writing to ", outfilename)
with gzip.open(outfilename, "wb") as mypicklefile:
pickle.dump(outdict, mypicklefile)
print("Done")
def readPredicted(self, predfile):
with gzip.open(predfile) as mypicklefile:
return pickle.load(mypicklefile)
| [
"pickle.dump",
"gzip.open",
"uproot3.open",
"DeepJetCore.TrainData.TrainData.__init__",
"numpy.log",
"pickle.load",
"os.path.splitext",
"numpy.count_nonzero",
"numpy.array",
"DeepJetCore.SimpleArray",
"DeepJetCore.TrainData.fileTimeOut",
"numpy.concatenate",
"numpy.expand_dims",
"pandas.Da... | [((276, 300), 'DeepJetCore.TrainData.TrainData.__init__', 'TrainData.__init__', (['self'], {}), '(self)\n', (294, 300), False, 'from DeepJetCore.TrainData import TrainData, fileTimeOut\n'), ((2215, 2249), 'numpy.array', 'np.array', (['rowsplits'], {'dtype': '"""int64"""'}), "(rowsplits, dtype='int64')\n", (2223, 2249), True, 'import numpy as np\n'), ((2967, 2992), 'DeepJetCore.TrainData.fileTimeOut', 'fileTimeOut', (['filename', '(10)'], {}), '(filename, 10)\n', (2978, 2992), False, 'from DeepJetCore.TrainData import TrainData, fileTimeOut\n'), ((4033, 4091), 'numpy.concatenate', 'np.concatenate', (['[hit_energy, hit_x, hit_y, hit_z]'], {'axis': '(-1)'}), '([hit_energy, hit_x, hit_y, hit_z], axis=-1)\n', (4047, 4091), True, 'import numpy as np\n'), ((4183, 4225), 'DeepJetCore.SimpleArray', 'SimpleArray', (['features', 'rs'], {'name': '"""features"""'}), "(features, rs, name='features')\n", (4194, 4225), False, 'from DeepJetCore import SimpleArray\n'), ((4252, 4300), 'DeepJetCore.SimpleArray', 'SimpleArray', (['hit_ass_truth_idx', 'rs'], {'name': '"""t_idx"""'}), "(hit_ass_truth_idx, rs, name='t_idx')\n", (4263, 4300), False, 'from DeepJetCore import SimpleArray\n'), ((4321, 4375), 'DeepJetCore.SimpleArray', 'SimpleArray', (['hit_ass_truth_energy', 'rs'], {'name': '"""t_energy"""'}), "(hit_ass_truth_energy, rs, name='t_energy')\n", (4332, 4375), False, 'from DeepJetCore import SimpleArray\n'), ((4399, 4434), 'numpy.zeros_like', 'np.zeros_like', (['hit_ass_truth_energy'], {}), '(hit_ass_truth_energy)\n', (4412, 4434), True, 'import numpy as np\n'), ((4486, 4522), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_pos"""'}), "(zeros, rs, name='t_pos')\n", (4497, 4522), False, 'from DeepJetCore import SimpleArray\n'), ((4538, 4575), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_time"""'}), "(zeros, rs, name='t_time')\n", (4549, 4575), False, 'from DeepJetCore import SimpleArray\n'), ((4590, 4626), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_pid"""'}), "(zeros, rs, name='t_pid')\n", (4601, 4626), False, 'from DeepJetCore import SimpleArray\n'), ((4712, 4754), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_spectator"""'}), "(zeros, rs, name='t_spectator')\n", (4723, 4754), False, 'from DeepJetCore import SimpleArray\n'), ((4781, 4829), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_fully_contained"""'}), "(zeros, rs, name='t_fully_contained')\n", (4792, 4829), False, 'from DeepJetCore import SimpleArray\n'), ((4854, 4891), 'DeepJetCore.SimpleArray', 'SimpleArray', (['zeros', 'rs'], {'name': '"""t_rest"""'}), "(zeros, rs, name='t_rest')\n", (4865, 4891), False, 'from DeepJetCore import SimpleArray\n'), ((5968, 5995), 'numpy.log', 'np.log', (["(d['hit_energy'] + 1)"], {}), "(d['hit_energy'] + 1)\n", (5974, 5995), True, 'import numpy as np\n'), ((6148, 6178), 'numpy.concatenate', 'np.concatenate', (['allarr'], {'axis': '(1)'}), '(allarr, axis=1)\n', (6162, 6178), True, 'import numpy as np\n'), ((6203, 6247), 'pandas.DataFrame', 'pd.DataFrame', (['allarr'], {'columns': '[k for k in d]'}), '(allarr, columns=[k for k in d])\n', (6215, 6247), True, 'import pandas as pd\n'), ((983, 1012), 'numpy.concatenate', 'np.concatenate', (['allba'], {'axis': '(0)'}), '(allba, axis=0)\n', (997, 1012), True, 'import numpy as np\n'), ((1356, 1390), 'numpy.expand_dims', 'np.expand_dims', (['contentarr'], {'axis': '(1)'}), '(contentarr, axis=1)\n', (1370, 1390), True, 'import numpy as np\n'), ((1490, 1527), 'numpy.array', 'np.array', (['contentarr'], {'dtype': '"""float32"""'}), "(contentarr, dtype='float32')\n", (1498, 1527), True, 'import numpy as np\n'), ((2372, 2405), 'numpy.expand_dims', 'np.expand_dims', (['a.content'], {'axis': '(1)'}), '(a.content, axis=1)\n', (2386, 2405), True, 'import numpy as np\n'), ((2406, 2440), 'numpy.array', 'np.array', (['rowsplits'], {'dtype': '"""int64"""'}), "(rowsplits, dtype='int64')\n", (2414, 2440), True, 'import numpy as np\n'), ((2505, 2529), 'DeepJetCore.TrainData.fileTimeOut', 'fileTimeOut', (['filename', '(2)'], {}), '(filename, 2)\n', (2516, 2529), False, 'from DeepJetCore.TrainData import TrainData, fileTimeOut\n'), ((2603, 2628), 'ROOT.TFile.Open', 'ROOT.TFile.Open', (['filename'], {}), '(filename)\n', (2618, 2628), False, 'import ROOT\n'), ((3048, 3069), 'uproot3.open', 'uproot.open', (['filename'], {}), '(filename)\n', (3059, 3069), True, 'import uproot3 as uproot\n'), ((7341, 7369), 'gzip.open', 'gzip.open', (['outfilename', '"""wb"""'], {}), "(outfilename, 'wb')\n", (7350, 7369), False, 'import gzip\n'), ((7399, 7433), 'pickle.dump', 'pickle.dump', (['outdict', 'mypicklefile'], {}), '(outdict, mypicklefile)\n', (7410, 7433), False, 'import pickle\n'), ((7513, 7532), 'gzip.open', 'gzip.open', (['predfile'], {}), '(predfile)\n', (7522, 7532), False, 'import gzip\n'), ((7569, 7594), 'pickle.load', 'pickle.load', (['mypicklefile'], {}), '(mypicklefile)\n', (7580, 7594), False, 'import pickle\n'), ((906, 920), 'numpy.array', 'np.array', (['a[b]'], {}), '(a[b])\n', (914, 920), True, 'import numpy as np\n'), ((2015, 2039), 'numpy.count_nonzero', 'np.count_nonzero', (['select'], {}), '(select)\n', (2031, 2039), True, 'import numpy as np\n'), ((2549, 2570), 'uproot3.open', 'uproot.open', (['filename'], {}), '(filename)\n', (2560, 2570), True, 'import uproot3 as uproot\n'), ((7052, 7081), 'os.path.splitext', 'os.path.splitext', (['outfilename'], {}), '(outfilename)\n', (7068, 7081), False, 'import os\n')] |
from typing import Dict, Union
import gym
import numpy as np
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
class TimeFeatureWrapper(gym.Wrapper):
"""
Add remaining, normalized time to observation space for fixed length episodes.
See https://arxiv.org/abs/1712.00378 and https://github.com/aravindr93/mjrl/issues/13.
.. note::
Only ``gym.spaces.Box`` and ``gym.spaces.Dict`` (``gym.GoalEnv``) 1D observation spaces
are supported for now.
:param env: Gym env to wrap.
:param max_steps: Max number of steps of an episode
if it is not wrapped in a ``TimeLimit`` object.
:param test_mode: In test mode, the time feature is constant,
equal to zero. This allow to check that the agent did not overfit this feature,
learning a deterministic pre-defined sequence of actions.
"""
def __init__(self, env: gym.Env, max_steps: int = 1000, test_mode: bool = False):
assert isinstance(
env.observation_space, (gym.spaces.Box, gym.spaces.Dict)
), "`TimeFeatureWrapper` only supports `gym.spaces.Box` and `gym.spaces.Dict` (`gym.GoalEnv`) observation spaces."
# Add a time feature to the observation
if isinstance(env.observation_space, gym.spaces.Dict):
assert "observation" in env.observation_space.spaces, "No `observation` key in the observation space"
obs_space = env.observation_space.spaces["observation"]
assert isinstance(
obs_space, gym.spaces.Box
), "`TimeFeatureWrapper` only supports `gym.spaces.Box` observation space."
obs_space = env.observation_space.spaces["observation"]
else:
obs_space = env.observation_space
assert len(obs_space.shape) == 1, "Only 1D observation spaces are supported"
low, high = obs_space.low, obs_space.high
low, high = np.concatenate((low, [0.0])), np.concatenate((high, [1.0]))
self.dtype = obs_space.dtype
if isinstance(env.observation_space, gym.spaces.Dict):
env.observation_space.spaces["observation"] = gym.spaces.Box(low=low, high=high, dtype=self.dtype)
else:
env.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.dtype)
super().__init__(env)
# Try to infer the max number of steps per episode
try:
self._max_steps = env.spec.max_episode_steps
except AttributeError:
self._max_steps = None
# Fallback to provided value
if self._max_steps is None:
self._max_steps = max_steps
self._current_step = 0
self._test_mode = test_mode
def reset(self) -> GymObs:
self._current_step = 0
return self._get_obs(self.env.reset())
def step(self, action: Union[int, np.ndarray]) -> GymStepReturn:
self._current_step += 1
obs, reward, done, info = self.env.step(action)
return self._get_obs(obs), reward, done, info
def _get_obs(self, obs: Union[np.ndarray, Dict[str, np.ndarray]]) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Concatenate the time feature to the current observation.
:param obs:
:return:
"""
# Remaining time is more general
time_feature = 1 - (self._current_step / self._max_steps)
if self._test_mode:
time_feature = 1.0
time_feature = np.array(time_feature, dtype=self.dtype)
if isinstance(obs, dict):
obs["observation"] = np.append(obs["observation"], time_feature)
return obs
return np.append(obs, time_feature)
| [
"numpy.append",
"numpy.array",
"numpy.concatenate",
"gym.spaces.Box"
] | [((3458, 3498), 'numpy.array', 'np.array', (['time_feature'], {'dtype': 'self.dtype'}), '(time_feature, dtype=self.dtype)\n', (3466, 3498), True, 'import numpy as np\n'), ((3649, 3677), 'numpy.append', 'np.append', (['obs', 'time_feature'], {}), '(obs, time_feature)\n', (3658, 3677), True, 'import numpy as np\n'), ((1920, 1948), 'numpy.concatenate', 'np.concatenate', (['(low, [0.0])'], {}), '((low, [0.0]))\n', (1934, 1948), True, 'import numpy as np\n'), ((1950, 1979), 'numpy.concatenate', 'np.concatenate', (['(high, [1.0])'], {}), '((high, [1.0]))\n', (1964, 1979), True, 'import numpy as np\n'), ((2139, 2191), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'self.dtype'}), '(low=low, high=high, dtype=self.dtype)\n', (2153, 2191), False, 'import gym\n'), ((2242, 2294), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'self.dtype'}), '(low=low, high=high, dtype=self.dtype)\n', (2256, 2294), False, 'import gym\n'), ((3567, 3610), 'numpy.append', 'np.append', (["obs['observation']", 'time_feature'], {}), "(obs['observation'], time_feature)\n", (3576, 3610), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.