code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os.path
import subprocess
import numpy as np
import nibabel as nib
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import ImageGrid
def run(command, env={}):
merged_env = os.environ
merged_env.update(env)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True,
env=merged_env)
while True:
line = process.stdout.readline()
line = str(line, 'utf-8')[:-1]
#print(line)
if line == '' and process.poll() != None:
break
if process.returncode != 0:
raise Exception("Non zero return code: %d"%process.returncode)
def getImgThirds(img):
indx = np.floor(np.linspace(img.shape[2]/3-1, img.shape[2]-img.shape[2]/3,3)).astype(int)
indy = np.floor(np.linspace(img.shape[1]/3-1, img.shape[1]-img.shape[1]/3,3)).astype(int)
indz = np.floor(np.linspace(img.shape[0]/3-1, img.shape[0]-img.shape[0]/3,3)).astype(int)
return [indx, indy, indz]
def normImg(img):
return 255 * ((img - img.min()) / (img.max() - img.min()))
def fourierSharpness(img):
f = np.fft.fftn(img, axes=(0, 1, 2))
shift = (np.array(f.shape)/2).astype(int)
AF = abs(np.roll(np.roll(np.roll(f, shift[0], axis=0), shift[1], axis=1), shift[2], axis=2))
return float(np.count_nonzero(AF > (np.max(AF)/1000))) / float(np.prod(img.shape))
def plotFig(img, title, voxSize):
ind=getImgThirds(img)
fig = plt.figure(figsize=(20,20))
grid = ImageGrid(fig,111,nrows_ncols=(3,3), axes_pad=0)
fig.subplots_adjust(wspace=0, hspace=0)
if len(img.shape) == 3:
img = np.expand_dims(img, axis=3)
img = 255 * ((img - img.min()) / (img.max() - img.min()))
if img.shape[0]<img.shape[1]:
lr_pad = int((img.shape[1]-img.shape[0]) / 2 + 1)
img = np.pad(img,[(lr_pad,lr_pad), (0, 0), (0,0), (0,0)],'constant', constant_values=(0, 0))
ind[2] = ind[2] + lr_pad
if img.shape[1]<img.shape[0]:
ap_pad = int((img.shape[0]-img.shape[1]) / 2)
img = np.pad(img, [(0, 0), (ap_pad, ap_pad), (0, 0), (0, 0)], 'constant', constant_values=(0, 0))
#ind[0] = ind[0] + ap_pad
ind[1] = ind[1] + ap_pad
ax = (1, 0, 2)
cnt=0
for i in range(3):
for j in range(3):
if i==0: # axial
pltimg = img[:,::-1,ind[i][j],:]
ar = voxSize[1]/voxSize[0]
elif i==1: # coronal
pltimg = img[:,ind[i][j],::-1,:]
ar = voxSize[2]/voxSize[0]
elif i==2: # sagittal
pltimg = img[ind[i][j],:,::-1,:]
ar = voxSize[2]/voxSize[1]
pltimg = np.transpose(pltimg, axes=ax)
if len(np.squeeze(pltimg).shape) == 2:
grid[cnt].imshow(np.squeeze(pltimg), cmap='gray', vmin = 0, vmax = 255, interpolation='none', aspect=ar)
else: # colored
grid[cnt].imshow(np.squeeze(pltimg), interpolation='none', aspect=ar)
grid[cnt].axis('off')
cnt = cnt + 1
grid[0].set(ylabel='transversal')
grid[0].axis('on')
grid[0].xaxis.set_visible(False)
grid[0].yaxis.set_ticks([])
grid[0].yaxis.label.set_fontsize(16)
grid[3].set(ylabel='coronal')
grid[3].axis('on')
grid[3].xaxis.set_visible(False)
grid[3].yaxis.set_ticks([])
grid[3].yaxis.label.set_fontsize(16)
grid[6].set(ylabel='sagittal')
grid[6].axis('on')
grid[6].xaxis.set_visible(False)
grid[6].yaxis.set_ticks([])
grid[6].yaxis.label.set_fontsize(16)
grid[1].set_title(title, fontsize=16)
def plotTensor(img, ev1, title):
ev1[:,:,:,0] *= -1
ind = getImgThirds(img)
fig = plt.figure(figsize=(20,20))
grid = ImageGrid(fig,111,nrows_ncols=(3,3), axes_pad=0)
fig.subplots_adjust(wspace=0, hspace=0)
if len(img.shape) == 3:
img = np.expand_dims(img, axis=3)
img = 255 * ((img - img.min()) / (img.max() - img.min()))
if img.shape[0]<img.shape[1]:
lr_pad = int((img.shape[1]-img.shape[0]) / 2)
if img.shape[0] + 2*lr_pad < img.shape[1]:
lr_pad2 = lr_pad + 1
else:
lr_pad2 = lr_pad
padimg = np.pad(img,[(lr_pad,lr_pad2), (0, 0), (0,0), (0,0)],'constant', constant_values=(0, 0))
ind[2] = ind[2] + lr_pad
else:
lr_pad = 0
lr_pad2 = 0
padimg = img.copy()
vec_ind = ind.copy()
if img.shape[1]<img.shape[0]:
ap_pad = int((img.shape[0]-img.shape[1]) / 2)
if img.shape[1] + 2*ap_pad < img.shape[0]:
ap_pad2 = ap_pad + 1
else:
ap_pad2 = ap_pad
padimg = np.pad(img, [(0, 0), (ap_pad, ap_pad2), (0, 0), (0, 0)], 'constant', constant_values=(0, 0))
ind[1] = ind[1] + ap_pad
else:
ap_pad = 0
ap_pad2 = 0
vec_ind = ind.copy()
ax = (1, 0, 2)
cnt = 0
res = 1
for i in range(3):
for j in range(3):
if i==0:
pltimg = padimg[:,::-1,ind[i][j],:]
vec = ev1[:,::-1,vec_ind[i][j],:] * res * 1.7
elif i==1:
pltimg = padimg[:,ind[i][j],::-1,:]
vec = ev1[:,vec_ind[i][j],::-1,:] * res * 1.7
elif i==2:
pltimg = padimg[ind[i][j],:,::-1,:]
vec = ev1[vec_ind[i][j],:,::-1,:] * res * 1.7
pltimg = np.transpose(pltimg, axes=ax)
vec = np.transpose(vec, axes=ax)
if len(np.squeeze(pltimg).shape) == 2:
grid[cnt].imshow(np.squeeze(pltimg), cmap='gray', vmin = 0, vmax = 255, interpolation='none', aspect='equal')
else: #colored
grid[cnt].imshow(np.squeeze(pltimg), interpolation='none')
if i==0: # transversal x/y
off = lr_pad2
off2 = ap_pad2
elif i==1: # coronal x/z
off = lr_pad2
off2 = 0
elif i==2: # sagittal y/z
off = ap_pad2
off2 = 0
limX = pltimg.shape[1]-1
limY = pltimg.shape[0]-1
for x in range(0, pltimg.shape[1], res):
for y in range(0, pltimg.shape[0], res):
if pltimg[y, x] > 0 :
grad = vec[y-off2,x-off,:]
col = abs(grad)/np.linalg.norm(grad)
if i==0:
# correct as is
grad = grad
col = col
elif i==1:
grad[1] = grad[1] * -1
grad = grad[[0, 2, 1]]
elif i==2:
grad[0] = grad[0] * -1
grad = grad[[1, 2, 0]]
myX = [x - grad[0], x + grad[0]]
myY = [y - grad[1], y + grad[1]]
myX = np.clip(myX, 0, limX)
myY = np.clip(myY, 0, limY)
grid[cnt].plot(myX, myY, c=(col[0], col[1], col[2]), linewidth=1.5)
grid[cnt].axis('off')
cnt = cnt + 1
grid[0].set(ylabel='transversal')
grid[0].axis('on')
grid[0].xaxis.set_visible(False)
grid[0].yaxis.set_ticks([])
grid[0].yaxis.label.set_fontsize(16)
for spine in grid[0].spines.values():
spine.set_visible(False)
grid[3].set(ylabel='coronal')
grid[3].axis('on')
grid[3].xaxis.set_visible(False)
grid[3].yaxis.set_ticks([])
grid[3].yaxis.label.set_fontsize(16)
for spine in grid[3].spines.values():
spine.set_visible(False)
grid[6].set(ylabel='sagittal')
grid[6].axis('on')
grid[6].xaxis.set_visible(False)
grid[6].yaxis.set_ticks([])
grid[6].yaxis.label.set_fontsize(16)
for spine in grid[6].spines.values():
spine.set_visible(False)
grid[1].set_title(title, fontsize=16)
def fixImageHeader(img):
# flip dimensions to clean up Header-Trafo
dims = img.header.get_data_shape();
dims[:3]
M = img.affine
perm = np.argsort(np.square(np.transpose(M[:3,:3])).dot(np.transpose([1, 2, 3])))
if (perm == [0, 1, 2]).all():
perm = np.argsort(np.square(M[:3,:3]).dot([1, 2, 3]))
M = M[:,np.insert(perm,3,3)]
flip_sign = np.sign(M[:3,:3].dot([1, 2, 3]))
dims = np.array(dims)[perm]
R = M[:3,:3]
T = M[:3,3]
orig = np.linalg.inv(R).dot(-T) + 1;
if flip_sign[0] < 0:
orig[0] = dims[0] - orig[0] + 1
M[:,0] = -1 * M[:,0]
M[:3,3] = -M[:3,:3].dot(orig[:3] - 1)
if flip_sign[1] < 0:
orig[1] = dims[1] - orig[1] + 1
M[:,1] = -1 * M[:,1]
M[:3,3] = -M[:3,:3].dot(orig[:3] - 1)
if flip_sign[2] < 0:
orig[2] = dims[2] - orig[2] + 1
M[:,2] = -1 * M[:,2]
M[:3,3] = -M[:3,:3].dot(orig[:3] - 1)
return (M, perm, flip_sign)
| [
"numpy.clip",
"numpy.prod",
"numpy.array",
"numpy.linalg.norm",
"subprocess.Popen",
"numpy.fft.fftn",
"numpy.max",
"numpy.linspace",
"mpl_toolkits.axes_grid1.ImageGrid",
"matplotlib.use",
"numpy.squeeze",
"numpy.square",
"numpy.transpose",
"numpy.insert",
"numpy.roll",
"matplotlib.pypl... | [((92, 113), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (106, 113), False, 'import matplotlib\n'), ((380, 487), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'shell': '(True)', 'env': 'merged_env'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n shell=True, env=merged_env)\n', (396, 487), False, 'import subprocess\n'), ((1289, 1321), 'numpy.fft.fftn', 'np.fft.fftn', (['img'], {'axes': '(0, 1, 2)'}), '(img, axes=(0, 1, 2))\n', (1300, 1321), True, 'import numpy as np\n'), ((1625, 1653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1635, 1653), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1715), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig', '(111)'], {'nrows_ncols': '(3, 3)', 'axes_pad': '(0)'}), '(fig, 111, nrows_ncols=(3, 3), axes_pad=0)\n', (1673, 1715), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((3884, 3912), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (3894, 3912), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3974), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig', '(111)'], {'nrows_ncols': '(3, 3)', 'axes_pad': '(0)'}), '(fig, 111, nrows_ncols=(3, 3), axes_pad=0)\n', (3932, 3974), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((1800, 1827), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(3)'}), '(img, axis=3)\n', (1814, 1827), True, 'import numpy as np\n'), ((2001, 2096), 'numpy.pad', 'np.pad', (['img', '[(lr_pad, lr_pad), (0, 0), (0, 0), (0, 0)]', '"""constant"""'], {'constant_values': '(0, 0)'}), "(img, [(lr_pad, lr_pad), (0, 0), (0, 0), (0, 0)], 'constant',\n constant_values=(0, 0))\n", (2007, 2096), True, 'import numpy as np\n'), ((2224, 2319), 'numpy.pad', 'np.pad', (['img', '[(0, 0), (ap_pad, ap_pad), (0, 0), (0, 0)]', '"""constant"""'], {'constant_values': '(0, 0)'}), "(img, [(0, 0), (ap_pad, ap_pad), (0, 0), (0, 0)], 'constant',\n constant_values=(0, 0))\n", (2230, 2319), True, 'import numpy as np\n'), ((4059, 4086), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(3)'}), '(img, axis=3)\n', (4073, 4086), True, 'import numpy as np\n'), ((4386, 4482), 'numpy.pad', 'np.pad', (['img', '[(lr_pad, lr_pad2), (0, 0), (0, 0), (0, 0)]', '"""constant"""'], {'constant_values': '(0, 0)'}), "(img, [(lr_pad, lr_pad2), (0, 0), (0, 0), (0, 0)], 'constant',\n constant_values=(0, 0))\n", (4392, 4482), True, 'import numpy as np\n'), ((4846, 4942), 'numpy.pad', 'np.pad', (['img', '[(0, 0), (ap_pad, ap_pad2), (0, 0), (0, 0)]', '"""constant"""'], {'constant_values': '(0, 0)'}), "(img, [(0, 0), (ap_pad, ap_pad2), (0, 0), (0, 0)], 'constant',\n constant_values=(0, 0))\n", (4852, 4942), True, 'import numpy as np\n'), ((8560, 8574), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (8568, 8574), True, 'import numpy as np\n'), ((1532, 1550), 'numpy.prod', 'np.prod', (['img.shape'], {}), '(img.shape)\n', (1539, 1550), True, 'import numpy as np\n'), ((2857, 2886), 'numpy.transpose', 'np.transpose', (['pltimg'], {'axes': 'ax'}), '(pltimg, axes=ax)\n', (2869, 2886), True, 'import numpy as np\n'), ((5575, 5604), 'numpy.transpose', 'np.transpose', (['pltimg'], {'axes': 'ax'}), '(pltimg, axes=ax)\n', (5587, 5604), True, 'import numpy as np\n'), ((5623, 5649), 'numpy.transpose', 'np.transpose', (['vec'], {'axes': 'ax'}), '(vec, axes=ax)\n', (5635, 5649), True, 'import numpy as np\n'), ((8343, 8366), 'numpy.transpose', 'np.transpose', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (8355, 8366), True, 'import numpy as np\n'), ((8478, 8499), 'numpy.insert', 'np.insert', (['perm', '(3)', '(3)'], {}), '(perm, 3, 3)\n', (8487, 8499), True, 'import numpy as np\n'), ((878, 947), 'numpy.linspace', 'np.linspace', (['(img.shape[2] / 3 - 1)', '(img.shape[2] - img.shape[2] / 3)', '(3)'], {}), '(img.shape[2] / 3 - 1, img.shape[2] - img.shape[2] / 3, 3)\n', (889, 947), True, 'import numpy as np\n'), ((972, 1041), 'numpy.linspace', 'np.linspace', (['(img.shape[1] / 3 - 1)', '(img.shape[1] - img.shape[1] / 3)', '(3)'], {}), '(img.shape[1] / 3 - 1, img.shape[1] - img.shape[1] / 3, 3)\n', (983, 1041), True, 'import numpy as np\n'), ((1066, 1135), 'numpy.linspace', 'np.linspace', (['(img.shape[0] / 3 - 1)', '(img.shape[0] - img.shape[0] / 3)', '(3)'], {}), '(img.shape[0] / 3 - 1, img.shape[0] - img.shape[0] / 3, 3)\n', (1077, 1135), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.array', 'np.array', (['f.shape'], {}), '(f.shape)\n', (1343, 1352), True, 'import numpy as np\n'), ((1397, 1425), 'numpy.roll', 'np.roll', (['f', 'shift[0]'], {'axis': '(0)'}), '(f, shift[0], axis=0)\n', (1404, 1425), True, 'import numpy as np\n'), ((8626, 8642), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (8639, 8642), True, 'import numpy as np\n'), ((2972, 2990), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (2982, 2990), True, 'import numpy as np\n'), ((3121, 3139), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (3131, 3139), True, 'import numpy as np\n'), ((5734, 5752), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (5744, 5752), True, 'import numpy as np\n'), ((5887, 5905), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (5897, 5905), True, 'import numpy as np\n'), ((8315, 8338), 'numpy.transpose', 'np.transpose', (['M[:3, :3]'], {}), '(M[:3, :3])\n', (8327, 8338), True, 'import numpy as np\n'), ((8429, 8449), 'numpy.square', 'np.square', (['M[:3, :3]'], {}), '(M[:3, :3])\n', (8438, 8449), True, 'import numpy as np\n'), ((1505, 1515), 'numpy.max', 'np.max', (['AF'], {}), '(AF)\n', (1511, 1515), True, 'import numpy as np\n'), ((2907, 2925), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (2917, 2925), True, 'import numpy as np\n'), ((5669, 5687), 'numpy.squeeze', 'np.squeeze', (['pltimg'], {}), '(pltimg)\n', (5679, 5687), True, 'import numpy as np\n'), ((7130, 7151), 'numpy.clip', 'np.clip', (['myX', '(0)', 'limX'], {}), '(myX, 0, limX)\n', (7137, 7151), True, 'import numpy as np\n'), ((7182, 7203), 'numpy.clip', 'np.clip', (['myY', '(0)', 'limY'], {}), '(myY, 0, limY)\n', (7189, 7203), True, 'import numpy as np\n'), ((6534, 6554), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (6548, 6554), True, 'import numpy as np\n')] |
""" Episode runner refactored to work with petting zoo api
https://www.pettingzoo.ml/api
"""
#from envs import REGISTRY as env_REGISTRY
from math import floor
from shutil import ExecError
from src.envs import REGISTRY as env_REGISTRY
from functools import partial
#from components.episode_buffer import EpisodeBatch
from src.components.episode_buffer import EpisodeBatch
import numpy as np
from src.utils.zoo_utils import update_batch_pre, quadratic_makespan_reward
#from alex_4yp.camas_sim_vis import animate_des
from camas_gym.envs.rendering.zoo_camas_sim_vis import animate_des
import matplotlib.pyplot as plt
class RenderEpisodeRunner:
debug = True
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
assert self.batch_size == 1
self.logger.console_logger.debug('Rendering runner initialised')
self.env = env_REGISTRY[self.args.env](**self.args.env_args)
self.episode_limit = self.env.episode_limit
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
# Log the first run
self.log_train_stats_t = -1000000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
def get_env_info(self):
info = self.env.get_env_info()
info["n_actions"] = 5 # NOTE to add None action
return info
def save_replay(self):
self.env.save_replay()
def close_env(self):
self.env.close()
def reset(self):
self.batch = self.new_batch()
self.env.reset()
self.t = 0
def run(self, test_mode=False):
test_mode = True
print('*** reset environment ***')
self.reset()
terminated = False
episode_return = 0
self.mac.init_hidden(batch_size=self.batch_size) # NOTE not sure what this is
obs, reward, done, info = self.env.last()
last_time = self.env.sim_time()
k = 0
all_done = False
while not terminated:
k += 1
if self.debug: print(f'-- step {k} \nState: {self.env.state()}, Agent: {self.env.agent_selection}, Time: {last_time}')
pre_transition_data = self.env.get_pretran_data()
if self.debug: print(f"Pre transition data: {pre_transition_data}")
self.batch.update(pre_transition_data, ts=self.t)
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
action = actions[0][self.env.agent_idx()].item()
if action == 4:
self.env.step(None) # terminated action to update env correctly
else:
self.env.step(action)
obs, _, done, env_info = self.env.last()
reward = -1*(self.env.sim_time() - last_time)
#reward = 0
if done: reward += 20
last_time = self.env.sim_time()
if done and len(self.env.agents) == 1:
all_done = True
terminated = True
reward += quadratic_makespan_reward(last_time)
reward = reward/100 # NOTE scaled down
if self.debug: print(f'Actions: {actions}\nReward {reward}, Time {last_time}')
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [[(all_done),]], # NOTE used to be: [(terminated != env_info.get("episode_limit", False),)] # env info here is info from step()
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
if self.t == self.episode_limit:
terminated = True
pre_transition_data = self.env.get_pretran_data()
self.batch.update(pre_transition_data, ts=self.t)
'''last_data = {
"state": [self.env.state()],
"avail_actions": [obs["action_mask"]],
"obs": [obs["observation"]]
}
print('last data', last_data)
self.batch.update(last_data, ts=self.t) '''
# Select actions in the last stored state
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
self.batch.update({"actions": actions}, ts=self.t)
#print('last data', pre_transition_data, 'actions', actions)
#rc = {'agent_0':'blue', 'agent_1':'green', 'agent_2':'yellow'}
rc = {'agent_'+str(i): self._get_agent_colour(i) for i in range(self.env.agent_count())}
rs = {agent:'square' for agent in rc.keys()}
print('events', self.env._events)
print('len', len(self.env._events))
aevents = {agent: [('node', self.env._tm.nodes[self.env.inital_state(agent)], 0.0)] for agent in rc.keys()}
for event in self.env._events:
for a in aevents.keys():
if event[2] == a:
if event[0] == 'location':
if event[3] != "Agent reached goal":
aevents[a].append(('node', self.env._tm.nodes[event[3]], event[1]))
else:
aevents[a].append(event)
continue
print('aevents', aevents)
animate_des(self.env._tm, aevents, rc, rs, starts_and_ends=self.env.map_param("initial")) # current starts from teh second node visited
#env._tm.draw()
plt.show()
raise Exception()
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})
cur_stats["n_episodes"] = 1 + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = self.t + cur_stats.get("ep_length", 0)
if not test_mode:
self.t_env += self.t
cur_returns.append(episode_return)
if test_mode and (len(self.test_returns) == self.args.test_nepisode): #-- can't rectify log_stat
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
if self.debug: raise Exception()
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def _get_agent_colour(self, idx):
self._colours = ["blue", "green", "red", "cyan", "magenta", "yellow", "black"]
if idx >= len(self._colours):
idx = idx % len(self._colours)
return self._colours[idx] | [
"numpy.mean",
"functools.partial",
"numpy.std",
"src.utils.zoo_utils.quadratic_makespan_reward",
"matplotlib.pyplot.show"
] | [((1365, 1495), 'functools.partial', 'partial', (['EpisodeBatch', 'scheme', 'groups', 'self.batch_size', '(self.episode_limit + 1)'], {'preprocess': 'preprocess', 'device': 'self.args.device'}), '(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit +\n 1, preprocess=preprocess, device=self.args.device)\n', (1372, 1495), False, 'from functools import partial\n'), ((5910, 5920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5918, 5920), True, 'import matplotlib.pyplot as plt\n'), ((7186, 7202), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (7193, 7202), True, 'import numpy as np\n'), ((7268, 7283), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (7274, 7283), True, 'import numpy as np\n'), ((3467, 3503), 'src.utils.zoo_utils.quadratic_makespan_reward', 'quadratic_makespan_reward', (['last_time'], {}), '(last_time)\n', (3492, 3503), False, 'from src.utils.zoo_utils import update_batch_pre, quadratic_makespan_reward\n')] |
import metrics
import numpy as np
import torch
import revdiff as rd
import unittest
import utils
def get_grad(out, x):
return rd.build_node_grad(out, x)
def val(x):
return rd.build_val(x)
def get_arr_len(x):
if isinstance(x, (np.ndarray, np.generic)):
return x.size
else:
return 1
def mse(y_pred, y_true):
diff = (y_true - y_pred)
s = rd.op_sum(diff * diff, axis=0)
return (1 / len(y_pred.shape)) * s
class RDTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ck_feps = 1e-6
def ck_fequals(self, a, b, feps = None):
if feps is None:
feps = self.ck_feps
dist = metrics.tdist(a, b) / get_arr_len(a)
self.assertLess(dist, feps)
class BasicOps(RDTestCase):
def test_fdw_val(self):
x = rd.build_val(2)
y = rd.build_val(3)
self.ck_fequals(x.eval(), 2)
self.ck_fequals(y.eval(), 3)
def test_bwd_val(self):
x = rd.build_val(2)
y = rd.build_val(3)
self.ck_fequals(get_grad(x, x).eval(), 1)
self.ck_fequals(get_grad(x, y).eval(), 0)
self.ck_fequals(get_grad(y, y).eval(), 1)
self.ck_fequals(get_grad(y, x).eval(), 0)
def test_fwd_vadd(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) + val(y)).eval(), x + y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) + val(y)).eval(), x + y)
def test_bwd_vadd(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx + ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1)
self.ck_fequals(get_grad(tz, ty).eval(), 1)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vsub(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) - val(y)).eval(), x - y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) - val(y)).eval(), x - y)
def test_bwd_vsub(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx - ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1)
self.ck_fequals(get_grad(tz, ty).eval(), -1)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vmul(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) * val(y)).eval(), x * y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) * val(y)).eval(), x * y)
def test_bwd_vmul(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx * ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), y)
self.ck_fequals(get_grad(tz, ty).eval(), x)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_vdiv(self):
x = np.array(2)
y = np.array(3)
self.ck_fequals((val(x) / val(y)).eval(), x / y)
x = np.random.randn(3, 12, 7)
y = np.random.randn(3, 12, 7)
self.ck_fequals((val(x) / val(y)).eval(), x / y, feps=1e-4)
def test_bwd_vdiv(self):
x = np.array(2)
y = np.array(3)
tx = val(x)
ty = val(y)
tz = tx / ty
tlone = val(10)
self.ck_fequals(get_grad(tz, tx).eval(), 1 / y)
self.ck_fequals(get_grad(tz, ty).eval(), - x/(y**2))
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_fwd_dotvv(self):
x = np.random.randn(7)
y = np.random.randn(7)
tx = val(x)
ty = val(y)
tz = rd.build_dot_vv(tx, ty)
self.ck_fequals(tz.eval(), x @ y)
def test_bwd_dotvv(self):
x = np.random.randn(7)
y = np.random.randn(7)
tx = val(x)
ty = val(y)
tlone = val(10)
tz = rd.build_dot_vv(tx, ty)
self.ck_fequals(get_grad(tz, tx).eval(), y)
self.ck_fequals(get_grad(tz, ty).eval(), x)
self.ck_fequals(get_grad(tz, tlone).eval(), 0)
def test_bwd_vsadd(self):
x = np.random.randn()
y = np.random.randn(12).astype(np.float32)
tx = val(x)
ty = val(y)
tz = rd.build_vsadd(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx + dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vsmul(self):
x = np.random.randn()
y = np.random.randn(12).astype(np.float32)
tx = val(x)
ty = val(y)
tz = rd.build_vsmul(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx * dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vsdiv(self):
x = np.random.randn()
y = np.random.rand(12).astype(np.float32) + 0.1
tx = val(x)
ty = val(y)
tz = rd.build_vsdiv(tx, ty)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dy = torch.tensor(y, requires_grad=True)
dz = dx / dy
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, ty).eval(), dy.grad.data.numpy())
def test_bwd_vlog(self):
x = np.random.rand(12).astype(np.float32) + 0.1
tx = val(x)
tz = rd.build_vlog(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.log(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
class LinReg(RDTestCase):
def test_mse(self):
y_pred = np.random.randn(46)
y_true = np.random.randn(46)
dy_pred = val(y_pred)
dy_true = val(y_true)
dloss = mse(dy_pred, dy_true)
ty_pred = torch.tensor(y_pred, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dy_pred).eval(), ty_pred.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
def test_sgd_mse(self):
X = np.random.randn(46, 7)
w = np.random.randn(7)
y_true = np.random.randn(46)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_pred = rd.build_dot_mv(dX, dw)
dloss = mse(dy_pred, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
ty_pred = torch.matmul(tX, tw)
utils.save_grad(ty_pred)
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dy_pred).eval(),
utils.get_grad(ty_pred).data.numpy())
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_2(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7).astype(np.float32)
y_true = np.random.randint(0, 2, (46)).astype(np.float32)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mv(dX, dw)
dy_pred = rd.build_vsigmoid(dy_out)
dloss = rd.build_bce_loss(dy_out, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_out = torch.matmul(tX, tw)
utils.save_grad(ty_out)
ty_pred = torch.sigmoid(ty_out)
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_2_prim(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7).astype(np.float32)
y_true = np.random.randint(0, 2, (46)).astype(np.float32)
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mv(dX, dw)
dy_pred = rd.build_vsdiv(1, rd.build_vsadd(1, rd.build_vexp((-dy_out))))
dloss = - rd.op_sum(dy_true * rd.build_vlog(dy_pred)
+ (rd.build_vsadd(1, -dy_true))
* rd.build_vlog(rd.build_vsadd(1, -dy_pred)), axis=0)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_out = torch.matmul(tX, tw)
utils.save_grad(ty_out)
ty_pred = torch.sigmoid(ty_out)
criterion = torch.nn.BCEWithLogitsLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-2)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy(), feps=1e-4)
def test_sgd_logreg_k(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7, 4).astype(np.float32)
y_true = np.zeros((46, 4)).astype(np.float32)
for i in range(y_true.shape[0]):
y_true[i][np.random.randint(0, y_true.shape[1])] = 1
dX = val(X)
dw = val(w)
dy_true = val(y_true)
dy_out = rd.build_dot_mm(dX, dw)
dy_pred = rd.build_softmax(dy_out)
dloss = rd.build_cross_entropy_loss(dy_out, dy_true)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_true = torch.argmax(ty_true, dim=1)
ty_out = torch.matmul(tX, tw)
ty_pred = torch.nn.functional.softmax(ty_out, dim=1)
utils.save_grad(ty_out)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_sgd_logreg_k_l1_l2(self):
X = np.random.randn(46, 7).astype(np.float32)
w = np.random.randn(7, 4).astype(np.float32)
y_true = np.zeros((46, 4)).astype(np.float32)
for i in range(y_true.shape[0]):
y_true[i][np.random.randint(0, y_true.shape[1])] = 1
alpha_l1 = 0.53
alpha_l2 = 0.82
dX = val(X)
dw = val(w)
dw_flat = rd.build_reshape(dw, (dw.shape[0] * dw.shape[1],))
dy_true = val(y_true)
dy_out = rd.build_dot_mm(dX, dw)
dy_pred = rd.build_softmax(dy_out)
dloss = rd.build_cross_entropy_loss(dy_out, dy_true)
dloss = dloss + alpha_l1 * rd.build_norm1(dw_flat)
dloss = dloss + alpha_l2 * rd.build_dot_vv(dw_flat, dw_flat)
tX = torch.tensor(X, requires_grad=True)
tw = torch.tensor(w, requires_grad=True)
tw_flat = tw.view(-1)
ty_true = torch.tensor(y_true, requires_grad=False)
ty_true = torch.argmax(ty_true, dim=1)
ty_out = torch.matmul(tX, tw)
ty_pred = torch.nn.functional.softmax(ty_out, dim=1)
utils.save_grad(ty_out)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
tloss = criterion(ty_out, ty_true)
tloss = tloss + alpha_l1 * torch.norm(tw_flat, p=1) + alpha_l2 * torch.dot(tw_flat, tw_flat)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(dy_pred.eval(), ty_pred.data.numpy())
self.ck_fequals(get_grad(dloss, dy_out).eval(),
utils.get_grad(ty_out).data.numpy())
self.ck_fequals(get_grad(dloss, dw).eval(), tw.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
class MLP(RDTestCase):
def test_layer_lin1(self):
X = np.random.randn(46, 7)
y_true = np.random.randn(46, 3)
W = np.random.randn(7, 3)
b = np.random.randn(3)
dX = val(X)
dy_true = val(y_true)
dW = val(W)
db = val(b)
dy_pred = rd.build_add_bias(rd.build_dot_mm(dX, dW), db)
dloss = mse(rd.build_reshape(dy_pred, (y_true.size,)),
rd.build_reshape(dy_true, (y_true.size,)))
tX = torch.tensor(X, requires_grad=True)
ty_true = torch.tensor(y_true, requires_grad=True)
tW = torch.tensor(W, requires_grad=True)
tb = torch.tensor(b, requires_grad=True)
ty_pred = torch.matmul(tX, tW) + tb
criterion = torch.nn.MSELoss()
tloss = criterion(ty_pred, ty_true)
tloss.backward()
self.ck_fequals(dloss.eval(), tloss.data.numpy(), feps=1e-3)
self.ck_fequals(get_grad(dloss, dy_true).eval(), ty_true.grad.data.numpy())
self.ck_fequals(get_grad(dloss, dW).eval(), tW.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(dloss, db).eval(), tb.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(dloss, dX).eval(), tX.grad.data.numpy())
def test_act_relu(self):
x = np.random.randn(43)
tx = val(x)
tz = rd.build_vrelu(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.relu(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_softmax(self):
x = np.random.randn(11, 7)
tx = val(x)
tz = rd.build_softmax(tx)
tz = rd.build_reshape(tz, (11 * 7,))
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.relu(dx).view(-1)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy(), feps=1e-1)
#self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_softplus(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vsoftplus(tx, 0.7)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.softplus(dx, 0.7)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_tanh(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vtanh(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.tanh(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_sigmoid(self):
x = np.random.randn(23)
tx = val(x)
tz = rd.build_vsigmoid(tx)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.sigmoid(dx)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_leaky_relu(self):
x = np.random.randn(43) * 4
tx = val(x)
tz = rd.build_vleaky_relu(tx, 0.05)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.leaky_relu(dx, 0.05)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_act_elu(self):
x = np.random.randn(43) * 4
tx = val(x)
tz = rd.build_velu(tx, 0.05)
te = rd.build_dot_vv(tz, tz)
dx = torch.tensor(x, requires_grad=True)
dz = torch.nn.functional.elu(dx, 0.05)
de = torch.dot(dz, dz)
de.backward()
self.ck_fequals(tz.eval(), dz.data.numpy())
self.ck_fequals(get_grad(te, tx).eval(), dx.grad.data.numpy())
def test_mae_loss(self):
y_true = np.random.randn(17)
y_pred = np.random.randn(17)
ty_true = val(y_true)
ty_pred = val(y_pred)
te = rd.build_mae_loss(ty_pred, ty_true)
dy_true = torch.tensor(y_true, requires_grad=True)
dy_pred = torch.tensor(y_pred, requires_grad=True)
criterion = torch.nn.L1Loss(reduction='elementwise_mean')
de = criterion(dy_pred, dy_true)
de.backward()
self.ck_fequals(te.eval(), de.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty_pred).eval(), dy_pred.grad.data.numpy())
self.ck_fequals(get_grad(te, ty_true).eval(), dy_true.grad.data.numpy())
def test_mse_loss(self):
y_true = np.random.randn(17)
y_pred = np.random.randn(17)
ty_true = val(y_true)
ty_pred = val(y_pred)
te = rd.build_mse_loss(ty_pred, ty_true)
dy_true = torch.tensor(y_true, requires_grad=True)
dy_pred = torch.tensor(y_pred, requires_grad=True)
criterion = torch.nn.MSELoss(reduction='elementwise_mean')
de = criterion(dy_pred, dy_true)
de.backward()
self.ck_fequals(te.eval(), de.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, ty_pred).eval(), dy_pred.grad.data.numpy())
self.ck_fequals(get_grad(te, ty_true).eval(), dy_true.grad.data.numpy())
class ConvNet(RDTestCase):
def test_conv2d(self):
X = np.random.randn(2, 3, 17, 23).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
b = np.random.randn(4).astype(np.float32)
tX = val(X)
tK = val(K)
tb = val(b)
tY = rd.build_conv2d(tX, tK, 1, 1, 0, 0)
tY = rd.build_conv2d_bias_add(tY, tb)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
db = torch.tensor(b, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, bias=db, stride=(1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tb).eval(), db.grad.data.numpy(), feps=1e-3)
X = np.random.randn(2, 3, 17, 24).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 0, 0)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 17, 24).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 6, 8)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4), padding=(6, 8))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 16, 26).astype(np.float32)
K = np.random.randn(4, 3, 6, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.build_conv2d(tX, tK, 3, 4, 7, 11)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv2d(dX, dK, stride=(3, 4), padding=(7, 11))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
def test_conv2d_transpose(self):
X = np.random.randn(2, 4, 13, 16).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
tX = val(X)
tK = val(K)
tb = val(b)
tY = rd.op_conv2d_transpose(tX, tK, 1, 1, 0, 0)
tY = rd.build_conv2d_bias_add(tY, tb)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
db = torch.tensor(b, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, bias=db, stride=(1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-4)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tb).eval(), db.grad.data.numpy(), feps=1e-3)
X = np.random.randn(2, 4, 5, 5).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 0, 0)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
X = np.random.randn(2, 4, 5, 5).astype(np.float32)
K = np.random.randn(4, 3, 5, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 6, 8)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4), padding=(6, 8))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
X = np.random.randn(2, 4, 9, 11).astype(np.float32)
K = np.random.randn(4, 3, 6, 8).astype(np.float32)
tX = val(X)
tK = val(K)
tY = rd.op_conv2d_transpose(tX, tK, 3, 4, 7, 11)
tYf = rd.build_reshape(tY, ((-1,)))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dK = torch.tensor(K, requires_grad=True)
dY = torch.nn.functional.conv_transpose2d(dX, dK, stride=(3, 4), padding=(7, 11))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tK).eval(), dK.grad.data.numpy(), feps=1e-5)
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy(), feps=1e-5)
def test_maxpooling(self):
X = np.random.randn(1, 1, 6, 6)
tX = val(X)
tY = rd.build_max_pooling(tX, 2, 2, 2, 2)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (2, 2), (2, 2))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(1, 1, 5, 5)
tX = val(X)
tY = rd.build_max_pooling(tX, 2, 2, 1, 1)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (2, 2), (1, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
X = np.random.randn(2, 3, 9, 7)
tX = val(X)
tY = rd.build_max_pooling(tX, 3, 2, 2, 1)
tYf = rd.build_reshape(tY, (-1,))
te = rd.build_dot_vv(tYf, tYf)
dX = torch.tensor(X, requires_grad=True)
dY = torch.nn.functional.max_pool2d(dX, (3, 2), (2, 1))
dYf = dY.view(-1)
de = torch.dot(dYf, dYf)
de.backward()
self.ck_fequals(tY.eval(), dY.data.numpy())
self.ck_fequals(get_grad(te, tX).eval(), dX.grad.data.numpy())
class ModelsBack(RDTestCase):
def test_dense1(self):
N = 17
IN_SIZE = 23
HIDDEN1_SIZE = 16
HIDDEN2_SIZE = 9
OUT_SIZE = 4
LR = 0.001
class TNet(torch.nn.Module):
def __init__(self):
super(TNet, self).__init__()
self.l1 = torch.nn.Linear(IN_SIZE , HIDDEN1_SIZE)
self.l2 = torch.nn.Linear(HIDDEN1_SIZE, HIDDEN2_SIZE)
self.l3 = torch.nn.Linear(HIDDEN2_SIZE, OUT_SIZE)
def forward(self, x):
x = x.view(-1, IN_SIZE)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
y_logits = self.l3(x)
return y_logits
tnet = TNet()
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
class DNet(rd.Network):
def __init__(self):
super().__init__()
self.l1 = self.dense_layer(IN_SIZE , HIDDEN1_SIZE)
self.l2 = self.dense_layer(HIDDEN1_SIZE, HIDDEN2_SIZE)
self.l3 = self.dense_layer(HIDDEN2_SIZE, OUT_SIZE)
def forward(self, x):
x = rd.build_reshape(x, (-1, IN_SIZE))
x = rd.build_vrelu(self.l1(x))
x = rd.build_vrelu(self.l2(x))
y_logits = self.l3(x)
return y_logits
dnet = DNet()
X_sample = np.random.randn(N, IN_SIZE).astype(np.float32)
y_sample = np.random.randint(0, OUT_SIZE, size=N)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
dnet.params_[i].update(tparams[i].data.numpy().T)
tX = torch.tensor(X_sample)
ty = torch.tensor(y_sample)
ty_logits = tnet(tX)
tloss = criterion(ty_logits, ty)
tnet.zero_grad()
tloss.backward()
dX = rd.build_val(X_sample)
dy = rd.build_val(utils.vec2one_hot(y_sample, OUT_SIZE))
dy_logits = dnet(dX)
dloss = rd.build_cross_entropy_loss(dy_logits, dy)
self.ck_fequals(ty_logits.data.numpy(), dy_logits.eval())
self.ck_fequals(tloss.data.numpy(), dloss.eval(), feps=1e-4)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
grad = rd.build_node_grad(dloss, dnet.params_[i]).eval()
grad_sol = tparams[i].grad.data.numpy().T
self.ck_fequals(grad, grad_sol)
def test_conv1(self):
F = torch.nn.functional
class TNet(torch.nn.Module):
def __init__(self):
super(TNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 6, 5)
self.conv2 = torch.nn.Conv2d(6, 16, 5)
self.fc1 = torch.nn.Linear(16 * 5 * 5, 4)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(x.shape[0], -1)
x = self.fc1(x)
return x
tnet = TNet()
criterion = torch.nn.MSELoss(reduction='sum')
class DNet(rd.Network):
def __init__(self):
super().__init__()
self.conv1 = self.conv2d_layer(1, 6, 5, 5)
self.conv2 = self.conv2d_layer(6, 16, 5, 5)
self.fc = self.dense_layer(16 * 5 * 5, 4)
def forward(self, x):
x = rd.build_vrelu(self.conv1(x))
x = rd.build_max_pooling(x, 2, 2, 2, 2)
x = rd.build_vrelu(self.conv2(x))
x = rd.build_max_pooling(x, 2, 2, 2, 2)
x = rd.build_reshape(x, (x.shape[0], -1))
x = self.fc(x)
return x
dnet = DNet()
X = np.random.randn(3, 1, 32, 32).astype(np.float32)
y = np.random.randn(3, 4).astype(np.float32)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
if len(tparams[i].shape) == 2:
dnet.params_[i].update(tparams[i].data.numpy().T)
else:
dnet.params_[i].update(tparams[i].data.numpy())
tX = torch.tensor(X)
ty = torch.tensor(y)
ty_logits = tnet(tX)
tloss = criterion(ty_logits, ty)
tnet.zero_grad()
tloss.backward()
dX = rd.build_val(X)
dy = rd.build_val(y)
dy_logits = dnet(dX)
dloss = rd.op_mse_loss(dy_logits, dy)
self.ck_fequals(ty_logits.data.numpy(), dy_logits.eval())
self.ck_fequals(tloss.data.numpy(), dloss.eval(), feps=1e-5)
tparams = list(tnet.parameters())
for i in range(len(tparams)):
grad = rd.build_node_grad(dloss, dnet.params_[i]).eval()
if len(tparams[i].shape) == 2:
grad_sol = tparams[i].grad.data.numpy().T
else:
grad_sol = tparams[i].grad.data.numpy()
self.ck_fequals(grad, grad_sol)
if __name__ == '__main__':
unittest.main()
| [
"torch.nn.functional.conv2d",
"revdiff.op_sum",
"revdiff.build_vsmul",
"torch.nn.CrossEntropyLoss",
"numpy.random.rand",
"revdiff.build_mae_loss",
"torch.nn.L1Loss",
"numpy.array",
"torch.nn.MSELoss",
"torch.nn.functional.conv_transpose2d",
"revdiff.build_dot_mv",
"unittest.main",
"revdiff.b... | [((131, 157), 'revdiff.build_node_grad', 'rd.build_node_grad', (['out', 'x'], {}), '(out, x)\n', (149, 157), True, 'import revdiff as rd\n'), ((182, 197), 'revdiff.build_val', 'rd.build_val', (['x'], {}), '(x)\n', (194, 197), True, 'import revdiff as rd\n'), ((379, 409), 'revdiff.op_sum', 'rd.op_sum', (['(diff * diff)'], {'axis': '(0)'}), '(diff * diff, axis=0)\n', (388, 409), True, 'import revdiff as rd\n'), ((33337, 33352), 'unittest.main', 'unittest.main', ([], {}), '()\n', (33350, 33352), False, 'import unittest\n'), ((877, 892), 'revdiff.build_val', 'rd.build_val', (['(2)'], {}), '(2)\n', (889, 892), True, 'import revdiff as rd\n'), ((905, 920), 'revdiff.build_val', 'rd.build_val', (['(3)'], {}), '(3)\n', (917, 920), True, 'import revdiff as rd\n'), ((1036, 1051), 'revdiff.build_val', 'rd.build_val', (['(2)'], {}), '(2)\n', (1048, 1051), True, 'import revdiff as rd\n'), ((1064, 1079), 'revdiff.build_val', 'rd.build_val', (['(3)'], {}), '(3)\n', (1076, 1079), True, 'import revdiff as rd\n'), ((1322, 1333), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1330, 1333), True, 'import numpy as np\n'), ((1346, 1357), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1354, 1357), True, 'import numpy as np\n'), ((1428, 1453), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (1443, 1453), True, 'import numpy as np\n'), ((1466, 1491), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (1481, 1491), True, 'import numpy as np\n'), ((1591, 1602), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1599, 1602), True, 'import numpy as np\n'), ((1615, 1626), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1623, 1626), True, 'import numpy as np\n'), ((1913, 1924), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1921, 1924), True, 'import numpy as np\n'), ((1937, 1948), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1945, 1948), True, 'import numpy as np\n'), ((2019, 2044), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (2034, 2044), True, 'import numpy as np\n'), ((2057, 2082), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (2072, 2082), True, 'import numpy as np\n'), ((2182, 2193), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (2190, 2193), True, 'import numpy as np\n'), ((2206, 2217), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (2214, 2217), True, 'import numpy as np\n'), ((2505, 2516), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (2513, 2516), True, 'import numpy as np\n'), ((2529, 2540), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (2537, 2540), True, 'import numpy as np\n'), ((2611, 2636), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (2626, 2636), True, 'import numpy as np\n'), ((2649, 2674), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (2664, 2674), True, 'import numpy as np\n'), ((2774, 2785), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (2782, 2785), True, 'import numpy as np\n'), ((2798, 2809), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (2806, 2809), True, 'import numpy as np\n'), ((3096, 3107), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (3104, 3107), True, 'import numpy as np\n'), ((3120, 3131), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (3128, 3131), True, 'import numpy as np\n'), ((3202, 3227), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (3217, 3227), True, 'import numpy as np\n'), ((3240, 3265), 'numpy.random.randn', 'np.random.randn', (['(3)', '(12)', '(7)'], {}), '(3, 12, 7)\n', (3255, 3265), True, 'import numpy as np\n'), ((3376, 3387), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (3384, 3387), True, 'import numpy as np\n'), ((3400, 3411), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (3408, 3411), True, 'import numpy as np\n'), ((3712, 3730), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (3727, 3730), True, 'import numpy as np\n'), ((3743, 3761), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (3758, 3761), True, 'import numpy as np\n'), ((3815, 3838), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tx', 'ty'], {}), '(tx, ty)\n', (3830, 3838), True, 'import revdiff as rd\n'), ((3924, 3942), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (3939, 3942), True, 'import numpy as np\n'), ((3955, 3973), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (3970, 3973), True, 'import numpy as np\n'), ((4051, 4074), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tx', 'ty'], {}), '(tx, ty)\n', (4066, 4074), True, 'import revdiff as rd\n'), ((4278, 4295), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4293, 4295), True, 'import numpy as np\n'), ((4401, 4423), 'revdiff.build_vsadd', 'rd.build_vsadd', (['tx', 'ty'], {}), '(tx, ty)\n', (4415, 4423), True, 'import revdiff as rd\n'), ((4437, 4460), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (4452, 4460), True, 'import revdiff as rd\n'), ((4475, 4510), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (4487, 4510), False, 'import torch\n'), ((4524, 4559), 'torch.tensor', 'torch.tensor', (['y'], {'requires_grad': '(True)'}), '(y, requires_grad=True)\n', (4536, 4559), False, 'import torch\n'), ((4594, 4611), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (4603, 4611), False, 'import torch\n'), ((4840, 4857), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4855, 4857), True, 'import numpy as np\n'), ((4963, 4985), 'revdiff.build_vsmul', 'rd.build_vsmul', (['tx', 'ty'], {}), '(tx, ty)\n', (4977, 4985), True, 'import revdiff as rd\n'), ((4999, 5022), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (5014, 5022), True, 'import revdiff as rd\n'), ((5037, 5072), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (5049, 5072), False, 'import torch\n'), ((5086, 5121), 'torch.tensor', 'torch.tensor', (['y'], {'requires_grad': '(True)'}), '(y, requires_grad=True)\n', (5098, 5121), False, 'import torch\n'), ((5156, 5173), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (5165, 5173), False, 'import torch\n'), ((5402, 5419), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5417, 5419), True, 'import numpy as np\n'), ((5530, 5552), 'revdiff.build_vsdiv', 'rd.build_vsdiv', (['tx', 'ty'], {}), '(tx, ty)\n', (5544, 5552), True, 'import revdiff as rd\n'), ((5566, 5589), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (5581, 5589), True, 'import revdiff as rd\n'), ((5604, 5639), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (5616, 5639), False, 'import torch\n'), ((5653, 5688), 'torch.tensor', 'torch.tensor', (['y'], {'requires_grad': '(True)'}), '(y, requires_grad=True)\n', (5665, 5688), False, 'import torch\n'), ((5723, 5740), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (5732, 5740), False, 'import torch\n'), ((6038, 6055), 'revdiff.build_vlog', 'rd.build_vlog', (['tx'], {}), '(tx)\n', (6051, 6055), True, 'import revdiff as rd\n'), ((6069, 6092), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (6084, 6092), True, 'import revdiff as rd\n'), ((6107, 6142), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (6119, 6142), False, 'import torch\n'), ((6156, 6169), 'torch.log', 'torch.log', (['dx'], {}), '(dx)\n', (6165, 6169), False, 'import torch\n'), ((6183, 6200), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (6192, 6200), False, 'import torch\n'), ((6374, 6393), 'numpy.random.randn', 'np.random.randn', (['(46)'], {}), '(46)\n', (6389, 6393), True, 'import numpy as np\n'), ((6411, 6430), 'numpy.random.randn', 'np.random.randn', (['(46)'], {}), '(46)\n', (6426, 6430), True, 'import numpy as np\n'), ((6557, 6597), 'torch.tensor', 'torch.tensor', (['y_pred'], {'requires_grad': '(True)'}), '(y_pred, requires_grad=True)\n', (6569, 6597), False, 'import torch\n'), ((6616, 6656), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(True)'}), '(y_true, requires_grad=True)\n', (6628, 6656), False, 'import torch\n'), ((6677, 6695), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (6693, 6695), False, 'import torch\n'), ((7045, 7067), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (7060, 7067), True, 'import numpy as np\n'), ((7080, 7098), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (7095, 7098), True, 'import numpy as np\n'), ((7116, 7135), 'numpy.random.randn', 'np.random.randn', (['(46)'], {}), '(46)\n', (7131, 7135), True, 'import numpy as np\n'), ((7225, 7248), 'revdiff.build_dot_mv', 'rd.build_dot_mv', (['dX', 'dw'], {}), '(dX, dw)\n', (7240, 7248), True, 'import revdiff as rd\n'), ((7301, 7336), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (7313, 7336), False, 'import torch\n'), ((7350, 7385), 'torch.tensor', 'torch.tensor', (['w'], {'requires_grad': '(True)'}), '(w, requires_grad=True)\n', (7362, 7385), False, 'import torch\n'), ((7404, 7444), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(True)'}), '(y_true, requires_grad=True)\n', (7416, 7444), False, 'import torch\n'), ((7463, 7483), 'torch.matmul', 'torch.matmul', (['tX', 'tw'], {}), '(tX, tw)\n', (7475, 7483), False, 'import torch\n'), ((7492, 7516), 'utils.save_grad', 'utils.save_grad', (['ty_pred'], {}), '(ty_pred)\n', (7507, 7516), False, 'import utils\n'), ((7537, 7555), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7553, 7555), False, 'import torch\n'), ((8349, 8372), 'revdiff.build_dot_mv', 'rd.build_dot_mv', (['dX', 'dw'], {}), '(dX, dw)\n', (8364, 8372), True, 'import revdiff as rd\n'), ((8391, 8416), 'revdiff.build_vsigmoid', 'rd.build_vsigmoid', (['dy_out'], {}), '(dy_out)\n', (8408, 8416), True, 'import revdiff as rd\n'), ((8433, 8467), 'revdiff.build_bce_loss', 'rd.build_bce_loss', (['dy_out', 'dy_true'], {}), '(dy_out, dy_true)\n', (8450, 8467), True, 'import revdiff as rd\n'), ((8482, 8517), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (8494, 8517), False, 'import torch\n'), ((8531, 8566), 'torch.tensor', 'torch.tensor', (['w'], {'requires_grad': '(True)'}), '(w, requires_grad=True)\n', (8543, 8566), False, 'import torch\n'), ((8585, 8626), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(False)'}), '(y_true, requires_grad=False)\n', (8597, 8626), False, 'import torch\n'), ((8644, 8664), 'torch.matmul', 'torch.matmul', (['tX', 'tw'], {}), '(tX, tw)\n', (8656, 8664), False, 'import torch\n'), ((8673, 8696), 'utils.save_grad', 'utils.save_grad', (['ty_out'], {}), '(ty_out)\n', (8688, 8696), False, 'import utils\n'), ((8715, 8736), 'torch.sigmoid', 'torch.sigmoid', (['ty_out'], {}), '(ty_out)\n', (8728, 8736), False, 'import torch\n'), ((8757, 8800), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (8783, 8800), False, 'import torch\n'), ((9563, 9586), 'revdiff.build_dot_mv', 'rd.build_dot_mv', (['dX', 'dw'], {}), '(dX, dw)\n', (9578, 9586), True, 'import revdiff as rd\n'), ((9892, 9927), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (9904, 9927), False, 'import torch\n'), ((9941, 9976), 'torch.tensor', 'torch.tensor', (['w'], {'requires_grad': '(True)'}), '(w, requires_grad=True)\n', (9953, 9976), False, 'import torch\n'), ((9995, 10036), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(False)'}), '(y_true, requires_grad=False)\n', (10007, 10036), False, 'import torch\n'), ((10054, 10074), 'torch.matmul', 'torch.matmul', (['tX', 'tw'], {}), '(tX, tw)\n', (10066, 10074), False, 'import torch\n'), ((10083, 10106), 'utils.save_grad', 'utils.save_grad', (['ty_out'], {}), '(ty_out)\n', (10098, 10106), False, 'import utils\n'), ((10125, 10146), 'torch.sigmoid', 'torch.sigmoid', (['ty_out'], {}), '(ty_out)\n', (10138, 10146), False, 'import torch\n'), ((10167, 10210), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (10193, 10210), False, 'import torch\n'), ((11100, 11123), 'revdiff.build_dot_mm', 'rd.build_dot_mm', (['dX', 'dw'], {}), '(dX, dw)\n', (11115, 11123), True, 'import revdiff as rd\n'), ((11142, 11166), 'revdiff.build_softmax', 'rd.build_softmax', (['dy_out'], {}), '(dy_out)\n', (11158, 11166), True, 'import revdiff as rd\n'), ((11183, 11227), 'revdiff.build_cross_entropy_loss', 'rd.build_cross_entropy_loss', (['dy_out', 'dy_true'], {}), '(dy_out, dy_true)\n', (11210, 11227), True, 'import revdiff as rd\n'), ((11242, 11277), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (11254, 11277), False, 'import torch\n'), ((11291, 11326), 'torch.tensor', 'torch.tensor', (['w'], {'requires_grad': '(True)'}), '(w, requires_grad=True)\n', (11303, 11326), False, 'import torch\n'), ((11345, 11386), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(False)'}), '(y_true, requires_grad=False)\n', (11357, 11386), False, 'import torch\n'), ((11405, 11433), 'torch.argmax', 'torch.argmax', (['ty_true'], {'dim': '(1)'}), '(ty_true, dim=1)\n', (11417, 11433), False, 'import torch\n'), ((11451, 11471), 'torch.matmul', 'torch.matmul', (['tX', 'tw'], {}), '(tX, tw)\n', (11463, 11471), False, 'import torch\n'), ((11490, 11532), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['ty_out'], {'dim': '(1)'}), '(ty_out, dim=1)\n', (11517, 11532), False, 'import torch\n'), ((11541, 11564), 'utils.save_grad', 'utils.save_grad', (['ty_out'], {}), '(ty_out)\n', (11556, 11564), False, 'import utils\n'), ((11585, 11627), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (11610, 11627), False, 'import torch\n'), ((12509, 12559), 'revdiff.build_reshape', 'rd.build_reshape', (['dw', '(dw.shape[0] * dw.shape[1],)'], {}), '(dw, (dw.shape[0] * dw.shape[1],))\n', (12525, 12559), True, 'import revdiff as rd\n'), ((12607, 12630), 'revdiff.build_dot_mm', 'rd.build_dot_mm', (['dX', 'dw'], {}), '(dX, dw)\n', (12622, 12630), True, 'import revdiff as rd\n'), ((12649, 12673), 'revdiff.build_softmax', 'rd.build_softmax', (['dy_out'], {}), '(dy_out)\n', (12665, 12673), True, 'import revdiff as rd\n'), ((12690, 12734), 'revdiff.build_cross_entropy_loss', 'rd.build_cross_entropy_loss', (['dy_out', 'dy_true'], {}), '(dy_out, dy_true)\n', (12717, 12734), True, 'import revdiff as rd\n'), ((12877, 12912), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (12889, 12912), False, 'import torch\n'), ((12926, 12961), 'torch.tensor', 'torch.tensor', (['w'], {'requires_grad': '(True)'}), '(w, requires_grad=True)\n', (12938, 12961), False, 'import torch\n'), ((13010, 13051), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(False)'}), '(y_true, requires_grad=False)\n', (13022, 13051), False, 'import torch\n'), ((13070, 13098), 'torch.argmax', 'torch.argmax', (['ty_true'], {'dim': '(1)'}), '(ty_true, dim=1)\n', (13082, 13098), False, 'import torch\n'), ((13116, 13136), 'torch.matmul', 'torch.matmul', (['tX', 'tw'], {}), '(tX, tw)\n', (13128, 13136), False, 'import torch\n'), ((13155, 13197), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['ty_out'], {'dim': '(1)'}), '(ty_out, dim=1)\n', (13182, 13197), False, 'import torch\n'), ((13206, 13229), 'utils.save_grad', 'utils.save_grad', (['ty_out'], {}), '(ty_out)\n', (13221, 13229), False, 'import utils\n'), ((13250, 13292), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (13275, 13292), False, 'import torch\n'), ((13939, 13961), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (13954, 13961), True, 'import numpy as np\n'), ((13979, 14001), 'numpy.random.randn', 'np.random.randn', (['(46)', '(3)'], {}), '(46, 3)\n', (13994, 14001), True, 'import numpy as np\n'), ((14014, 14035), 'numpy.random.randn', 'np.random.randn', (['(7)', '(3)'], {}), '(7, 3)\n', (14029, 14035), True, 'import numpy as np\n'), ((14048, 14066), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (14063, 14066), True, 'import numpy as np\n'), ((14376, 14411), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (14388, 14411), False, 'import torch\n'), ((14430, 14470), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(True)'}), '(y_true, requires_grad=True)\n', (14442, 14470), False, 'import torch\n'), ((14484, 14519), 'torch.tensor', 'torch.tensor', (['W'], {'requires_grad': '(True)'}), '(W, requires_grad=True)\n', (14496, 14519), False, 'import torch\n'), ((14533, 14568), 'torch.tensor', 'torch.tensor', (['b'], {'requires_grad': '(True)'}), '(b, requires_grad=True)\n', (14545, 14568), False, 'import torch\n'), ((14633, 14651), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (14649, 14651), False, 'import torch\n'), ((15162, 15181), 'numpy.random.randn', 'np.random.randn', (['(43)'], {}), '(43)\n', (15177, 15181), True, 'import numpy as np\n'), ((15224, 15242), 'revdiff.build_vrelu', 'rd.build_vrelu', (['tx'], {}), '(tx)\n', (15238, 15242), True, 'import revdiff as rd\n'), ((15256, 15279), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (15271, 15279), True, 'import revdiff as rd\n'), ((15294, 15329), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (15306, 15329), False, 'import torch\n'), ((15343, 15357), 'torch.relu', 'torch.relu', (['dx'], {}), '(dx)\n', (15353, 15357), False, 'import torch\n'), ((15371, 15388), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (15380, 15388), False, 'import torch\n'), ((15582, 15604), 'numpy.random.randn', 'np.random.randn', (['(11)', '(7)'], {}), '(11, 7)\n', (15597, 15604), True, 'import numpy as np\n'), ((15648, 15668), 'revdiff.build_softmax', 'rd.build_softmax', (['tx'], {}), '(tx)\n', (15664, 15668), True, 'import revdiff as rd\n'), ((15682, 15713), 'revdiff.build_reshape', 'rd.build_reshape', (['tz', '(11 * 7,)'], {}), '(tz, (11 * 7,))\n', (15698, 15713), True, 'import revdiff as rd\n'), ((15727, 15750), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (15742, 15750), True, 'import revdiff as rd\n'), ((15765, 15800), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (15777, 15800), False, 'import torch\n'), ((15851, 15868), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (15860, 15868), False, 'import torch\n'), ((16075, 16094), 'numpy.random.randn', 'np.random.randn', (['(23)'], {}), '(23)\n', (16090, 16094), True, 'import numpy as np\n'), ((16137, 16164), 'revdiff.build_vsoftplus', 'rd.build_vsoftplus', (['tx', '(0.7)'], {}), '(tx, 0.7)\n', (16155, 16164), True, 'import revdiff as rd\n'), ((16178, 16201), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (16193, 16201), True, 'import revdiff as rd\n'), ((16216, 16251), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (16228, 16251), False, 'import torch\n'), ((16265, 16302), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['dx', '(0.7)'], {}), '(dx, 0.7)\n', (16293, 16302), False, 'import torch\n'), ((16316, 16333), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (16325, 16333), False, 'import torch\n'), ((16540, 16559), 'numpy.random.randn', 'np.random.randn', (['(23)'], {}), '(23)\n', (16555, 16559), True, 'import numpy as np\n'), ((16602, 16620), 'revdiff.build_vtanh', 'rd.build_vtanh', (['tx'], {}), '(tx)\n', (16616, 16620), True, 'import revdiff as rd\n'), ((16634, 16657), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (16649, 16657), True, 'import revdiff as rd\n'), ((16672, 16707), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (16684, 16707), False, 'import torch\n'), ((16721, 16735), 'torch.tanh', 'torch.tanh', (['dx'], {}), '(dx)\n', (16731, 16735), False, 'import torch\n'), ((16749, 16766), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (16758, 16766), False, 'import torch\n'), ((16967, 16986), 'numpy.random.randn', 'np.random.randn', (['(23)'], {}), '(23)\n', (16982, 16986), True, 'import numpy as np\n'), ((17029, 17050), 'revdiff.build_vsigmoid', 'rd.build_vsigmoid', (['tx'], {}), '(tx)\n', (17046, 17050), True, 'import revdiff as rd\n'), ((17064, 17087), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (17079, 17087), True, 'import revdiff as rd\n'), ((17102, 17137), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (17114, 17137), False, 'import torch\n'), ((17151, 17168), 'torch.sigmoid', 'torch.sigmoid', (['dx'], {}), '(dx)\n', (17164, 17168), False, 'import torch\n'), ((17182, 17199), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (17191, 17199), False, 'import torch\n'), ((17470, 17500), 'revdiff.build_vleaky_relu', 'rd.build_vleaky_relu', (['tx', '(0.05)'], {}), '(tx, 0.05)\n', (17490, 17500), True, 'import revdiff as rd\n'), ((17514, 17537), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (17529, 17537), True, 'import revdiff as rd\n'), ((17552, 17587), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (17564, 17587), False, 'import torch\n'), ((17601, 17641), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['dx', '(0.05)'], {}), '(dx, 0.05)\n', (17631, 17641), False, 'import torch\n'), ((17655, 17672), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (17664, 17672), False, 'import torch\n'), ((17935, 17958), 'revdiff.build_velu', 'rd.build_velu', (['tx', '(0.05)'], {}), '(tx, 0.05)\n', (17948, 17958), True, 'import revdiff as rd\n'), ((17972, 17995), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tz', 'tz'], {}), '(tz, tz)\n', (17987, 17995), True, 'import revdiff as rd\n'), ((18010, 18045), 'torch.tensor', 'torch.tensor', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (18022, 18045), False, 'import torch\n'), ((18059, 18092), 'torch.nn.functional.elu', 'torch.nn.functional.elu', (['dx', '(0.05)'], {}), '(dx, 0.05)\n', (18082, 18092), False, 'import torch\n'), ((18106, 18123), 'torch.dot', 'torch.dot', (['dz', 'dz'], {}), '(dz, dz)\n', (18115, 18123), False, 'import torch\n'), ((18334, 18353), 'numpy.random.randn', 'np.random.randn', (['(17)'], {}), '(17)\n', (18349, 18353), True, 'import numpy as np\n'), ((18371, 18390), 'numpy.random.randn', 'np.random.randn', (['(17)'], {}), '(17)\n', (18386, 18390), True, 'import numpy as np\n'), ((18474, 18509), 'revdiff.build_mae_loss', 'rd.build_mae_loss', (['ty_pred', 'ty_true'], {}), '(ty_pred, ty_true)\n', (18491, 18509), True, 'import revdiff as rd\n'), ((18529, 18569), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(True)'}), '(y_true, requires_grad=True)\n', (18541, 18569), False, 'import torch\n'), ((18588, 18628), 'torch.tensor', 'torch.tensor', (['y_pred'], {'requires_grad': '(True)'}), '(y_pred, requires_grad=True)\n', (18600, 18628), False, 'import torch\n'), ((18649, 18694), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {'reduction': '"""elementwise_mean"""'}), "(reduction='elementwise_mean')\n", (18664, 18694), False, 'import torch\n'), ((19031, 19050), 'numpy.random.randn', 'np.random.randn', (['(17)'], {}), '(17)\n', (19046, 19050), True, 'import numpy as np\n'), ((19068, 19087), 'numpy.random.randn', 'np.random.randn', (['(17)'], {}), '(17)\n', (19083, 19087), True, 'import numpy as np\n'), ((19171, 19206), 'revdiff.build_mse_loss', 'rd.build_mse_loss', (['ty_pred', 'ty_true'], {}), '(ty_pred, ty_true)\n', (19188, 19206), True, 'import revdiff as rd\n'), ((19226, 19266), 'torch.tensor', 'torch.tensor', (['y_true'], {'requires_grad': '(True)'}), '(y_true, requires_grad=True)\n', (19238, 19266), False, 'import torch\n'), ((19285, 19325), 'torch.tensor', 'torch.tensor', (['y_pred'], {'requires_grad': '(True)'}), '(y_pred, requires_grad=True)\n', (19297, 19325), False, 'import torch\n'), ((19346, 19392), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""elementwise_mean"""'}), "(reduction='elementwise_mean')\n", (19362, 19392), False, 'import torch\n'), ((19987, 20022), 'revdiff.build_conv2d', 'rd.build_conv2d', (['tX', 'tK', '(1)', '(1)', '(0)', '(0)'], {}), '(tX, tK, 1, 1, 0, 0)\n', (20002, 20022), True, 'import revdiff as rd\n'), ((20036, 20068), 'revdiff.build_conv2d_bias_add', 'rd.build_conv2d_bias_add', (['tY', 'tb'], {}), '(tY, tb)\n', (20060, 20068), True, 'import revdiff as rd\n'), ((20083, 20110), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (20099, 20110), True, 'import revdiff as rd\n'), ((20126, 20151), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (20141, 20151), True, 'import revdiff as rd\n'), ((20174, 20209), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (20186, 20209), False, 'import torch\n'), ((20223, 20258), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (20235, 20258), False, 'import torch\n'), ((20272, 20307), 'torch.tensor', 'torch.tensor', (['b'], {'requires_grad': '(True)'}), '(b, requires_grad=True)\n', (20284, 20307), False, 'import torch\n'), ((20321, 20379), 'torch.nn.functional.conv2d', 'torch.nn.functional.conv2d', (['dX', 'dK'], {'bias': 'db', 'stride': '(1, 1)'}), '(dX, dK, bias=db, stride=(1, 1))\n', (20347, 20379), False, 'import torch\n'), ((20419, 20438), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (20428, 20438), False, 'import torch\n'), ((20962, 20997), 'revdiff.build_conv2d', 'rd.build_conv2d', (['tX', 'tK', '(3)', '(4)', '(0)', '(0)'], {}), '(tX, tK, 3, 4, 0, 0)\n', (20977, 20997), True, 'import revdiff as rd\n'), ((21012, 21039), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (21028, 21039), True, 'import revdiff as rd\n'), ((21055, 21080), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (21070, 21080), True, 'import revdiff as rd\n'), ((21103, 21138), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (21115, 21138), False, 'import torch\n'), ((21152, 21187), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (21164, 21187), False, 'import torch\n'), ((21201, 21250), 'torch.nn.functional.conv2d', 'torch.nn.functional.conv2d', (['dX', 'dK'], {'stride': '(3, 4)'}), '(dX, dK, stride=(3, 4))\n', (21227, 21250), False, 'import torch\n'), ((21290, 21309), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (21299, 21309), False, 'import torch\n'), ((21740, 21775), 'revdiff.build_conv2d', 'rd.build_conv2d', (['tX', 'tK', '(3)', '(4)', '(6)', '(8)'], {}), '(tX, tK, 3, 4, 6, 8)\n', (21755, 21775), True, 'import revdiff as rd\n'), ((21790, 21817), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (21806, 21817), True, 'import revdiff as rd\n'), ((21833, 21858), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (21848, 21858), True, 'import revdiff as rd\n'), ((21881, 21916), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (21893, 21916), False, 'import torch\n'), ((21930, 21965), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (21942, 21965), False, 'import torch\n'), ((21979, 22044), 'torch.nn.functional.conv2d', 'torch.nn.functional.conv2d', (['dX', 'dK'], {'stride': '(3, 4)', 'padding': '(6, 8)'}), '(dX, dK, stride=(3, 4), padding=(6, 8))\n', (22005, 22044), False, 'import torch\n'), ((22084, 22103), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (22093, 22103), False, 'import torch\n'), ((22525, 22561), 'revdiff.build_conv2d', 'rd.build_conv2d', (['tX', 'tK', '(3)', '(4)', '(7)', '(11)'], {}), '(tX, tK, 3, 4, 7, 11)\n', (22540, 22561), True, 'import revdiff as rd\n'), ((22576, 22603), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (22592, 22603), True, 'import revdiff as rd\n'), ((22619, 22644), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (22634, 22644), True, 'import revdiff as rd\n'), ((22667, 22702), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (22679, 22702), False, 'import torch\n'), ((22716, 22751), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (22728, 22751), False, 'import torch\n'), ((22765, 22831), 'torch.nn.functional.conv2d', 'torch.nn.functional.conv2d', (['dX', 'dK'], {'stride': '(3, 4)', 'padding': '(7, 11)'}), '(dX, dK, stride=(3, 4), padding=(7, 11))\n', (22791, 22831), False, 'import torch\n'), ((22871, 22890), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (22880, 22890), False, 'import torch\n'), ((23412, 23454), 'revdiff.op_conv2d_transpose', 'rd.op_conv2d_transpose', (['tX', 'tK', '(1)', '(1)', '(0)', '(0)'], {}), '(tX, tK, 1, 1, 0, 0)\n', (23434, 23454), True, 'import revdiff as rd\n'), ((23468, 23500), 'revdiff.build_conv2d_bias_add', 'rd.build_conv2d_bias_add', (['tY', 'tb'], {}), '(tY, tb)\n', (23492, 23500), True, 'import revdiff as rd\n'), ((23515, 23542), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (23531, 23542), True, 'import revdiff as rd\n'), ((23558, 23583), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (23573, 23583), True, 'import revdiff as rd\n'), ((23606, 23641), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (23618, 23641), False, 'import torch\n'), ((23655, 23690), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (23667, 23690), False, 'import torch\n'), ((23704, 23739), 'torch.tensor', 'torch.tensor', (['b'], {'requires_grad': '(True)'}), '(b, requires_grad=True)\n', (23716, 23739), False, 'import torch\n'), ((23753, 23821), 'torch.nn.functional.conv_transpose2d', 'torch.nn.functional.conv_transpose2d', (['dX', 'dK'], {'bias': 'db', 'stride': '(1, 1)'}), '(dX, dK, bias=db, stride=(1, 1))\n', (23789, 23821), False, 'import torch\n'), ((23861, 23880), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (23870, 23880), False, 'import torch\n'), ((24383, 24425), 'revdiff.op_conv2d_transpose', 'rd.op_conv2d_transpose', (['tX', 'tK', '(3)', '(4)', '(0)', '(0)'], {}), '(tX, tK, 3, 4, 0, 0)\n', (24405, 24425), True, 'import revdiff as rd\n'), ((24440, 24467), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (24456, 24467), True, 'import revdiff as rd\n'), ((24483, 24508), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (24498, 24508), True, 'import revdiff as rd\n'), ((24531, 24566), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (24543, 24566), False, 'import torch\n'), ((24580, 24615), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (24592, 24615), False, 'import torch\n'), ((24629, 24688), 'torch.nn.functional.conv_transpose2d', 'torch.nn.functional.conv_transpose2d', (['dX', 'dK'], {'stride': '(3, 4)'}), '(dX, dK, stride=(3, 4))\n', (24665, 24688), False, 'import torch\n'), ((24728, 24747), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (24737, 24747), False, 'import torch\n'), ((25170, 25212), 'revdiff.op_conv2d_transpose', 'rd.op_conv2d_transpose', (['tX', 'tK', '(3)', '(4)', '(6)', '(8)'], {}), '(tX, tK, 3, 4, 6, 8)\n', (25192, 25212), True, 'import revdiff as rd\n'), ((25227, 25254), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (25243, 25254), True, 'import revdiff as rd\n'), ((25270, 25295), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (25285, 25295), True, 'import revdiff as rd\n'), ((25318, 25353), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (25330, 25353), False, 'import torch\n'), ((25367, 25402), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (25379, 25402), False, 'import torch\n'), ((25416, 25491), 'torch.nn.functional.conv_transpose2d', 'torch.nn.functional.conv_transpose2d', (['dX', 'dK'], {'stride': '(3, 4)', 'padding': '(6, 8)'}), '(dX, dK, stride=(3, 4), padding=(6, 8))\n', (25452, 25491), False, 'import torch\n'), ((25531, 25550), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (25540, 25550), False, 'import torch\n'), ((25975, 26018), 'revdiff.op_conv2d_transpose', 'rd.op_conv2d_transpose', (['tX', 'tK', '(3)', '(4)', '(7)', '(11)'], {}), '(tX, tK, 3, 4, 7, 11)\n', (25997, 26018), True, 'import revdiff as rd\n'), ((26033, 26060), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (26049, 26060), True, 'import revdiff as rd\n'), ((26076, 26101), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (26091, 26101), True, 'import revdiff as rd\n'), ((26124, 26159), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (26136, 26159), False, 'import torch\n'), ((26173, 26208), 'torch.tensor', 'torch.tensor', (['K'], {'requires_grad': '(True)'}), '(K, requires_grad=True)\n', (26185, 26208), False, 'import torch\n'), ((26222, 26298), 'torch.nn.functional.conv_transpose2d', 'torch.nn.functional.conv_transpose2d', (['dX', 'dK'], {'stride': '(3, 4)', 'padding': '(7, 11)'}), '(dX, dK, stride=(3, 4), padding=(7, 11))\n', (26258, 26298), False, 'import torch\n'), ((26338, 26357), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (26347, 26357), False, 'import torch\n'), ((26659, 26686), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(6)', '(6)'], {}), '(1, 1, 6, 6)\n', (26674, 26686), True, 'import numpy as np\n'), ((26721, 26757), 'revdiff.build_max_pooling', 'rd.build_max_pooling', (['tX', '(2)', '(2)', '(2)', '(2)'], {}), '(tX, 2, 2, 2, 2)\n', (26741, 26757), True, 'import revdiff as rd\n'), ((26772, 26799), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (26788, 26799), True, 'import revdiff as rd\n'), ((26813, 26838), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (26828, 26838), True, 'import revdiff as rd\n'), ((26853, 26888), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (26865, 26888), False, 'import torch\n'), ((26902, 26952), 'torch.nn.functional.max_pool2d', 'torch.nn.functional.max_pool2d', (['dX', '(2, 2)', '(2, 2)'], {}), '(dX, (2, 2), (2, 2))\n', (26932, 26952), False, 'import torch\n'), ((26992, 27011), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (27001, 27011), False, 'import torch\n'), ((27174, 27201), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)', '(5)', '(5)'], {}), '(1, 1, 5, 5)\n', (27189, 27201), True, 'import numpy as np\n'), ((27236, 27272), 'revdiff.build_max_pooling', 'rd.build_max_pooling', (['tX', '(2)', '(2)', '(1)', '(1)'], {}), '(tX, 2, 2, 1, 1)\n', (27256, 27272), True, 'import revdiff as rd\n'), ((27287, 27314), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (27303, 27314), True, 'import revdiff as rd\n'), ((27328, 27353), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (27343, 27353), True, 'import revdiff as rd\n'), ((27368, 27403), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (27380, 27403), False, 'import torch\n'), ((27417, 27467), 'torch.nn.functional.max_pool2d', 'torch.nn.functional.max_pool2d', (['dX', '(2, 2)', '(1, 1)'], {}), '(dX, (2, 2), (1, 1))\n', (27447, 27467), False, 'import torch\n'), ((27507, 27526), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (27516, 27526), False, 'import torch\n'), ((27697, 27724), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(9)', '(7)'], {}), '(2, 3, 9, 7)\n', (27712, 27724), True, 'import numpy as np\n'), ((27759, 27795), 'revdiff.build_max_pooling', 'rd.build_max_pooling', (['tX', '(3)', '(2)', '(2)', '(1)'], {}), '(tX, 3, 2, 2, 1)\n', (27779, 27795), True, 'import revdiff as rd\n'), ((27810, 27837), 'revdiff.build_reshape', 'rd.build_reshape', (['tY', '(-1,)'], {}), '(tY, (-1,))\n', (27826, 27837), True, 'import revdiff as rd\n'), ((27851, 27876), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['tYf', 'tYf'], {}), '(tYf, tYf)\n', (27866, 27876), True, 'import revdiff as rd\n'), ((27891, 27926), 'torch.tensor', 'torch.tensor', (['X'], {'requires_grad': '(True)'}), '(X, requires_grad=True)\n', (27903, 27926), False, 'import torch\n'), ((27940, 27990), 'torch.nn.functional.max_pool2d', 'torch.nn.functional.max_pool2d', (['dX', '(3, 2)', '(2, 1)'], {}), '(dX, (3, 2), (2, 1))\n', (27970, 27990), False, 'import torch\n'), ((28030, 28049), 'torch.dot', 'torch.dot', (['dYf', 'dYf'], {}), '(dYf, dYf)\n', (28039, 28049), False, 'import torch\n'), ((28985, 29027), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (29010, 29027), False, 'import torch\n'), ((29725, 29763), 'numpy.random.randint', 'np.random.randint', (['(0)', 'OUT_SIZE'], {'size': 'N'}), '(0, OUT_SIZE, size=N)\n', (29742, 29763), True, 'import numpy as np\n'), ((29929, 29951), 'torch.tensor', 'torch.tensor', (['X_sample'], {}), '(X_sample)\n', (29941, 29951), False, 'import torch\n'), ((29965, 29987), 'torch.tensor', 'torch.tensor', (['y_sample'], {}), '(y_sample)\n', (29977, 29987), False, 'import torch\n'), ((30122, 30144), 'revdiff.build_val', 'rd.build_val', (['X_sample'], {}), '(X_sample)\n', (30134, 30144), True, 'import revdiff as rd\n'), ((30255, 30297), 'revdiff.build_cross_entropy_loss', 'rd.build_cross_entropy_loss', (['dy_logits', 'dy'], {}), '(dy_logits, dy)\n', (30282, 30297), True, 'import revdiff as rd\n'), ((31335, 31368), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (31351, 31368), False, 'import torch\n'), ((32460, 32475), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (32472, 32475), False, 'import torch\n'), ((32489, 32504), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (32501, 32504), False, 'import torch\n'), ((32639, 32654), 'revdiff.build_val', 'rd.build_val', (['X'], {}), '(X)\n', (32651, 32654), True, 'import revdiff as rd\n'), ((32668, 32683), 'revdiff.build_val', 'rd.build_val', (['y'], {}), '(y)\n', (32680, 32683), True, 'import revdiff as rd\n'), ((32729, 32758), 'revdiff.op_mse_loss', 'rd.op_mse_loss', (['dy_logits', 'dy'], {}), '(dy_logits, dy)\n', (32743, 32758), True, 'import revdiff as rd\n'), ((721, 740), 'metrics.tdist', 'metrics.tdist', (['a', 'b'], {}), '(a, b)\n', (734, 740), False, 'import metrics\n'), ((14198, 14221), 'revdiff.build_dot_mm', 'rd.build_dot_mm', (['dX', 'dW'], {}), '(dX, dW)\n', (14213, 14221), True, 'import revdiff as rd\n'), ((14247, 14288), 'revdiff.build_reshape', 'rd.build_reshape', (['dy_pred', '(y_true.size,)'], {}), '(dy_pred, (y_true.size,))\n', (14263, 14288), True, 'import revdiff as rd\n'), ((14310, 14351), 'revdiff.build_reshape', 'rd.build_reshape', (['dy_true', '(y_true.size,)'], {}), '(dy_true, (y_true.size,))\n', (14326, 14351), True, 'import revdiff as rd\n'), ((14587, 14607), 'torch.matmul', 'torch.matmul', (['tX', 'tW'], {}), '(tX, tW)\n', (14599, 14607), False, 'import torch\n'), ((17404, 17423), 'numpy.random.randn', 'np.random.randn', (['(43)'], {}), '(43)\n', (17419, 17423), True, 'import numpy as np\n'), ((17869, 17888), 'numpy.random.randn', 'np.random.randn', (['(43)'], {}), '(43)\n', (17884, 17888), True, 'import numpy as np\n'), ((30171, 30208), 'utils.vec2one_hot', 'utils.vec2one_hot', (['y_sample', 'OUT_SIZE'], {}), '(y_sample, OUT_SIZE)\n', (30188, 30208), False, 'import utils\n'), ((4308, 4327), 'numpy.random.randn', 'np.random.randn', (['(12)'], {}), '(12)\n', (4323, 4327), True, 'import numpy as np\n'), ((4870, 4889), 'numpy.random.randn', 'np.random.randn', (['(12)'], {}), '(12)\n', (4885, 4889), True, 'import numpy as np\n'), ((8103, 8125), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (8118, 8125), True, 'import numpy as np\n'), ((8157, 8175), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (8172, 8175), True, 'import numpy as np\n'), ((8212, 8239), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(46)'], {}), '(0, 2, 46)\n', (8229, 8239), True, 'import numpy as np\n'), ((9317, 9339), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (9332, 9339), True, 'import numpy as np\n'), ((9371, 9389), 'numpy.random.randn', 'np.random.randn', (['(7)'], {}), '(7)\n', (9386, 9389), True, 'import numpy as np\n'), ((9426, 9453), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(46)'], {}), '(0, 2, 46)\n', (9443, 9453), True, 'import numpy as np\n'), ((9641, 9663), 'revdiff.build_vexp', 'rd.build_vexp', (['(-dy_out)'], {}), '(-dy_out)\n', (9654, 9663), True, 'import revdiff as rd\n'), ((10757, 10779), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (10772, 10779), True, 'import numpy as np\n'), ((10811, 10832), 'numpy.random.randn', 'np.random.randn', (['(7)', '(4)'], {}), '(7, 4)\n', (10826, 10832), True, 'import numpy as np\n'), ((10869, 10886), 'numpy.zeros', 'np.zeros', (['(46, 4)'], {}), '((46, 4))\n', (10877, 10886), True, 'import numpy as np\n'), ((10969, 11006), 'numpy.random.randint', 'np.random.randint', (['(0)', 'y_true.shape[1]'], {}), '(0, y_true.shape[1])\n', (10986, 11006), True, 'import numpy as np\n'), ((12146, 12168), 'numpy.random.randn', 'np.random.randn', (['(46)', '(7)'], {}), '(46, 7)\n', (12161, 12168), True, 'import numpy as np\n'), ((12200, 12221), 'numpy.random.randn', 'np.random.randn', (['(7)', '(4)'], {}), '(7, 4)\n', (12215, 12221), True, 'import numpy as np\n'), ((12258, 12275), 'numpy.zeros', 'np.zeros', (['(46, 4)'], {}), '((46, 4))\n', (12266, 12275), True, 'import numpy as np\n'), ((12358, 12395), 'numpy.random.randint', 'np.random.randint', (['(0)', 'y_true.shape[1]'], {}), '(0, y_true.shape[1])\n', (12375, 12395), True, 'import numpy as np\n'), ((12770, 12793), 'revdiff.build_norm1', 'rd.build_norm1', (['dw_flat'], {}), '(dw_flat)\n', (12784, 12793), True, 'import revdiff as rd\n'), ((12829, 12862), 'revdiff.build_dot_vv', 'rd.build_dot_vv', (['dw_flat', 'dw_flat'], {}), '(dw_flat, dw_flat)\n', (12844, 12862), True, 'import revdiff as rd\n'), ((13409, 13436), 'torch.dot', 'torch.dot', (['tw_flat', 'tw_flat'], {}), '(tw_flat, tw_flat)\n', (13418, 13436), False, 'import torch\n'), ((15814, 15828), 'torch.relu', 'torch.relu', (['dx'], {}), '(dx)\n', (15824, 15828), False, 'import torch\n'), ((19754, 19783), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(17)', '(23)'], {}), '(2, 3, 17, 23)\n', (19769, 19783), True, 'import numpy as np\n'), ((19815, 19842), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (19830, 19842), True, 'import numpy as np\n'), ((19874, 19892), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (19889, 19892), True, 'import numpy as np\n'), ((20800, 20829), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(17)', '(24)'], {}), '(2, 3, 17, 24)\n', (20815, 20829), True, 'import numpy as np\n'), ((20861, 20888), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (20876, 20888), True, 'import numpy as np\n'), ((21578, 21607), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(17)', '(24)'], {}), '(2, 3, 17, 24)\n', (21593, 21607), True, 'import numpy as np\n'), ((21639, 21666), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (21654, 21666), True, 'import numpy as np\n'), ((22363, 22392), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(16)', '(26)'], {}), '(2, 3, 16, 26)\n', (22378, 22392), True, 'import numpy as np\n'), ((22424, 22451), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(6)', '(8)'], {}), '(4, 3, 6, 8)\n', (22439, 22451), True, 'import numpy as np\n'), ((23180, 23209), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(13)', '(16)'], {}), '(2, 4, 13, 16)\n', (23195, 23209), True, 'import numpy as np\n'), ((23241, 23268), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (23256, 23268), True, 'import numpy as np\n'), ((23300, 23318), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (23315, 23318), True, 'import numpy as np\n'), ((24223, 24250), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(5)'], {}), '(2, 4, 5, 5)\n', (24238, 24250), True, 'import numpy as np\n'), ((24282, 24309), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (24297, 24309), True, 'import numpy as np\n'), ((25010, 25037), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(5)'], {}), '(2, 4, 5, 5)\n', (25025, 25037), True, 'import numpy as np\n'), ((25069, 25096), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(5)', '(8)'], {}), '(4, 3, 5, 8)\n', (25084, 25096), True, 'import numpy as np\n'), ((25814, 25842), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(9)', '(11)'], {}), '(2, 4, 9, 11)\n', (25829, 25842), True, 'import numpy as np\n'), ((25874, 25901), 'numpy.random.randn', 'np.random.randn', (['(4)', '(3)', '(6)', '(8)'], {}), '(4, 3, 6, 8)\n', (25889, 25901), True, 'import numpy as np\n'), ((28535, 28573), 'torch.nn.Linear', 'torch.nn.Linear', (['IN_SIZE', 'HIDDEN1_SIZE'], {}), '(IN_SIZE, HIDDEN1_SIZE)\n', (28550, 28573), False, 'import torch\n'), ((28601, 28644), 'torch.nn.Linear', 'torch.nn.Linear', (['HIDDEN1_SIZE', 'HIDDEN2_SIZE'], {}), '(HIDDEN1_SIZE, HIDDEN2_SIZE)\n', (28616, 28644), False, 'import torch\n'), ((28671, 28710), 'torch.nn.Linear', 'torch.nn.Linear', (['HIDDEN2_SIZE', 'OUT_SIZE'], {}), '(HIDDEN2_SIZE, OUT_SIZE)\n', (28686, 28710), False, 'import torch\n'), ((29401, 29435), 'revdiff.build_reshape', 'rd.build_reshape', (['x', '(-1, IN_SIZE)'], {}), '(x, (-1, IN_SIZE))\n', (29417, 29435), True, 'import revdiff as rd\n'), ((29659, 29686), 'numpy.random.randn', 'np.random.randn', (['N', 'IN_SIZE'], {}), '(N, IN_SIZE)\n', (29674, 29686), True, 'import numpy as np\n'), ((30890, 30914), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(6)', '(5)'], {}), '(1, 6, 5)\n', (30905, 30914), False, 'import torch\n'), ((30944, 30969), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (30959, 30969), False, 'import torch\n'), ((30997, 31027), 'torch.nn.Linear', 'torch.nn.Linear', (['(16 * 5 * 5)', '(4)'], {}), '(16 * 5 * 5, 4)\n', (31012, 31027), False, 'import torch\n'), ((31753, 31788), 'revdiff.build_max_pooling', 'rd.build_max_pooling', (['x', '(2)', '(2)', '(2)', '(2)'], {}), '(x, 2, 2, 2, 2)\n', (31773, 31788), True, 'import revdiff as rd\n'), ((31868, 31903), 'revdiff.build_max_pooling', 'rd.build_max_pooling', (['x', '(2)', '(2)', '(2)', '(2)'], {}), '(x, 2, 2, 2, 2)\n', (31888, 31903), True, 'import revdiff as rd\n'), ((31933, 31970), 'revdiff.build_reshape', 'rd.build_reshape', (['x', '(x.shape[0], -1)'], {}), '(x, (x.shape[0], -1))\n', (31949, 31970), True, 'import revdiff as rd\n'), ((32064, 32093), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)', '(32)', '(32)'], {}), '(3, 1, 32, 32)\n', (32079, 32093), True, 'import numpy as np\n'), ((32125, 32146), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (32140, 32146), True, 'import numpy as np\n'), ((5432, 5450), 'numpy.random.rand', 'np.random.rand', (['(12)'], {}), '(12)\n', (5446, 5450), True, 'import numpy as np\n'), ((5960, 5978), 'numpy.random.rand', 'np.random.rand', (['(12)'], {}), '(12)\n', (5974, 5978), True, 'import numpy as np\n'), ((13371, 13395), 'torch.norm', 'torch.norm', (['tw_flat'], {'p': '(1)'}), '(tw_flat, p=1)\n', (13381, 13395), False, 'import torch\n'), ((30536, 30578), 'revdiff.build_node_grad', 'rd.build_node_grad', (['dloss', 'dnet.params_[i]'], {}), '(dloss, dnet.params_[i])\n', (30554, 30578), True, 'import revdiff as rd\n'), ((32997, 33039), 'revdiff.build_node_grad', 'rd.build_node_grad', (['dloss', 'dnet.params_[i]'], {}), '(dloss, dnet.params_[i])\n', (33015, 33039), True, 'import revdiff as rd\n'), ((7776, 7799), 'utils.get_grad', 'utils.get_grad', (['ty_pred'], {}), '(ty_pred)\n', (7790, 7799), False, 'import utils\n'), ((9081, 9103), 'utils.get_grad', 'utils.get_grad', (['ty_out'], {}), '(ty_out)\n', (9095, 9103), False, 'import utils\n'), ((9706, 9728), 'revdiff.build_vlog', 'rd.build_vlog', (['dy_pred'], {}), '(dy_pred)\n', (9719, 9728), True, 'import revdiff as rd\n'), ((9763, 9790), 'revdiff.build_vsadd', 'rd.build_vsadd', (['(1)', '(-dy_true)'], {}), '(1, -dy_true)\n', (9777, 9790), True, 'import revdiff as rd\n'), ((10491, 10513), 'utils.get_grad', 'utils.get_grad', (['ty_out'], {}), '(ty_out)\n', (10505, 10513), False, 'import utils\n'), ((11908, 11930), 'utils.get_grad', 'utils.get_grad', (['ty_out'], {}), '(ty_out)\n', (11922, 11930), False, 'import utils\n'), ((13683, 13705), 'utils.get_grad', 'utils.get_grad', (['ty_out'], {}), '(ty_out)\n', (13697, 13705), False, 'import utils\n'), ((9839, 9866), 'revdiff.build_vsadd', 'rd.build_vsadd', (['(1)', '(-dy_pred)'], {}), '(1, -dy_pred)\n', (9853, 9866), True, 'import revdiff as rd\n')] |
#!/usr/bin/env python3
"""
Train my neural network
Usage:
homework4 autoencoder
homework4 werk <partitions> <sampling>
homework4 test
Arguments:
autoencoder
Run autoencoder
werk
Perform rap1 learning task with cross-validation
test
Classify test data and output to tsv
<partitions>
Number of partitions to make for cross-valitation
<sampling>
Sampling method for NN training input
(slide) Iterate over sequence in 17nt sliding frame
(space) Chop up each sequence into 17nt bits for inputs
"""
def werk():
"""
Train neural network on RAP binding sites
* Input layer with 17*4 (68) nodes + bias
* Hidden layer with 23-35 nodes + bias
* One output layer node
Train against negative and positive binding sites
* Import all negative sequences from .fa file
* For each sequence, iterate every 17 bases and train with
expected out of 0
* Every 137 negative training instances, train against all
positive binding sites with expected out of 1
* Go until negative binding sites have been iterated through...
"""
# Import Positive sequences
positive_sites = [pos_seq.strip() for pos_seq in open('project_files/rap1-lieb-positives.txt')]
# Import Negative sequences
negative_sites = list(SeqIO.parse('project_files/yeast-upstream-1k-negative.fa', 'fasta'))
# Separate into k random sections
# Taken from : http://stackoverflow.com/questions/3352737/python-randomly-partition-a-list-into-n-nearly-equal-parts
partitions = int(args['<partitions>'])
neg_division = len(negative_sites) / float(partitions)
neg_randomly_partitioned_list = [negative_sites[int(round(neg_division * i)): int(round(neg_division * (i + 1)))]
for i in range(partitions)]
pos_division = len(positive_sites) / float(partitions)
pos_randomly_partitioned_list = [positive_sites[int(round(pos_division * i)): int(round(pos_division * (i + 1)))]
for i in range(partitions)]
# Cycle through negative sites subsets for cross-validation
separation = 0
for index in range(int(args['<partitions>'])):
# Set up cross-validation sets
neg_site_list_copy = copy.deepcopy(neg_randomly_partitioned_list)
del neg_site_list_copy[index]
neg_site_training = [seq for partition in neg_site_list_copy for seq in partition]
neg_cross_validation_set = neg_randomly_partitioned_list[index]
pos_site_list_copy = copy.deepcopy(pos_randomly_partitioned_list)
del pos_site_list_copy[index]
pos_site_training = [seq for partition in pos_site_list_copy for seq in partition]
pos_cross_validation_set = pos_randomly_partitioned_list[index]
print("Training on Training Set...")
# Set number of nodes
NN = neural_network(68, 23, 1)
# Initialize values
NN.initialize_values()
pos_counter = 0
counter = 0
if args['<sampling>'] == 'slide':
for site in neg_site_training:
# Iterate over site in 17nt sliding frames in negative sites
for block in range(len(site) - 16):
slice = site[block:(block + 17)].seq
if slice not in positive_sites:
if all([slice[4] == 'C', slice[5] == 'C', slice[9] == 'C']) == False:
NN.set_input_and_expected_values(slice, autoencoder=False, negative=True)
NN.forward_propogation()
NN.backward_propogation()
NN.update_weights_and_bias()
pos_counter += 1
else:
print(slice)
if pos_counter == len(pos_site_training):
for pos_site in pos_site_training:
NN.set_input_and_expected_values(pos_site, autoencoder=False, negative=False)
NN.forward_propogation()
NN.backward_propogation()
NN.update_weights_and_bias()
pos_counter = 0
counter += 1
print("Training set: {}/{} completed...".format(counter, len(neg_cross_validation_set)))
max_change_1 = NN.matrix_1_errors.max()
min_change_1 = NN.matrix_1_errors.min()
max_change_2 = NN.matrix_2_errors.max()
min_change_2 = NN.matrix_2_errors.min()
if any([max_change_1 < 0.00000000001 and max_change_1 > 0,
min_change_1 > -.00000000001 and min_change_1 < 0]) and any(
[max_change_2 < 0.00000000001 and max_change_2 > 0,
min_change_2 > -0.00000000001 and min_change_2 < 0]):
print("Stop criterion met after {} iterations".format(counter))
break
if args['<sampling>'] == 'space':
for site in neg_site_training:
# Chop sequence into 17nt blocks in negative sites
number_of_blocks = int(len(site) / 17)
for block in range(number_of_blocks):
slice = site[(block * 17):((block + 1) * 17)].seq
if slice not in positive_sites:
if all([slice[4] == 'C', slice[5] == 'C', slice[9] == 'C']) == False:
NN.set_input_and_expected_values(slice, autoencoder=False, negative=True)
NN.forward_propogation()
NN.backward_propogation()
NN.update_weights_and_bias()
pos_counter += 1
else:
print(slice)
if pos_counter == len(pos_site_training):
for pos_site in pos_site_training:
NN.set_input_and_expected_values(pos_site, autoencoder=False, negative=False)
NN.forward_propogation()
NN.backward_propogation()
NN.update_weights_and_bias()
pos_counter = 0
counter += 1
max_change_1 = NN.matrix_1_errors.max()
min_change_1 = NN.matrix_1_errors.min()
max_change_2 = NN.matrix_2_errors.max()
min_change_2 = NN.matrix_2_errors.min()
if any([max_change_1 < 0.00000000001 and max_change_1 > 0,
min_change_1 > -.00000000001 and min_change_1 < 0]) and any(
[max_change_2 < 0.00000000001 and max_change_2 > 0,
min_change_2 > -0.00000000001 and min_change_2 < 0]):
print("Stop criterion met after {} iterations".format(counter))
break
print("Performing Cross-validation")
pos_list = []
neg_list = []
print("Negative cross-validation set...")
counter = 0
for site in neg_cross_validation_set:
for slice in range(len(site) - 16):
NN.set_input_and_expected_values(site[slice:slice + 17].seq, autoencoder=False, negative=True)
NN.forward_propogation()
neg_list.append(NN.output_layer_output)
counter += 1
print("Negative cross-validation: {}/{} completed...".format(counter, len(neg_cross_validation_set)))
break
print("Positive cross-validation set...")
for site in pos_cross_validation_set:
NN.set_input_and_expected_values(site, autoencoder=False)
NN.forward_propogation()
pos_list.append(NN.output_layer_output)
print('Positive avg: {}'.format(sum(pos_list) / len(pos_list)))
print('Negative avg: {}'.format(sum(neg_list) / len(neg_list)))
print(NN.matrix_1_bias)
print(NN.matrix_2_bias)
# Output connection matrices with greatest separation between average positive and negative scores
if ((sum(pos_list) / len(pos_list)) - (sum(neg_list) / len(neg_list))) > separation:
np.savetxt('connection_matrix_1.csv', NN.matrix_1_bias, delimiter=',')
np.savetxt('connection_matrix_2.csv', NN.matrix_2_bias, delimiter=',')
separation = (sum(pos_list) / len(pos_list)) - (sum(neg_list) / len(neg_list))
def autoencoder():
NN = neural_network()
NN.set_input_and_expected_values('GA', autoencoder=True)
NN.initialize_values()
# Stop criterion
finished_working = False
while finished_working == False:
NN.forward_propogation()
NN.backward_propogation()
NN.update_weights_and_bias()
max_change_1 = NN.matrix_1_errors.max()
min_change_1 = NN.matrix_1_errors.min()
max_change_2 = NN.matrix_2_errors.max()
min_change_2 = NN.matrix_2_errors.min()
if any([max_change_1 < 0.00001 and max_change_1 > 0,
min_change_1 > -.00001 and min_change_1 < 0]) or any(
[max_change_2 < 0.00001 and max_change_2 > 0,
min_change_2 > -0.00001 and min_change_2 < 0]):
finished_working = True
print(NN.output_layer_output)
def test():
test_sequences = open('project_files/rap1-lieb-test.txt')
NN = neural_network(68, 23, 1)
NN.matrix_1_bias = np.loadtxt('connection_matrix_1.csv', delimiter=',')
NN.matrix_2_bias = np.loadtxt('connection_matrix_2.csv', delimiter=',')
NN_outputs = open('NN_predictions.txt', 'w')
for test_seq in test_sequences:
NN.set_input_and_expected_values(test_seq.strip())
NN.forward_propogation()
NN_outputs.write('{}\t{}\n'.format(test_seq.strip(), NN.output_layer_output[0]))
NN_outputs.close()
if __name__ == '__main__':
import docopt
import numpy as np
from Bio import SeqIO
import copy
from .neural_network import neural_network
args = docopt.docopt(__doc__)
if args['autoencoder']:
autoencoder()
if args['werk']:
werk()
if args['test']:
test()
| [
"Bio.SeqIO.parse",
"numpy.savetxt",
"copy.deepcopy",
"numpy.loadtxt",
"docopt.docopt"
] | [((9591, 9643), 'numpy.loadtxt', 'np.loadtxt', (['"""connection_matrix_1.csv"""'], {'delimiter': '""","""'}), "('connection_matrix_1.csv', delimiter=',')\n", (9601, 9643), True, 'import numpy as np\n'), ((9667, 9719), 'numpy.loadtxt', 'np.loadtxt', (['"""connection_matrix_2.csv"""'], {'delimiter': '""","""'}), "('connection_matrix_2.csv', delimiter=',')\n", (9677, 9719), True, 'import numpy as np\n'), ((10182, 10204), 'docopt.docopt', 'docopt.docopt', (['__doc__'], {}), '(__doc__)\n', (10195, 10204), False, 'import docopt\n'), ((1383, 1450), 'Bio.SeqIO.parse', 'SeqIO.parse', (['"""project_files/yeast-upstream-1k-negative.fa"""', '"""fasta"""'], {}), "('project_files/yeast-upstream-1k-negative.fa', 'fasta')\n", (1394, 1450), False, 'from Bio import SeqIO\n'), ((2343, 2387), 'copy.deepcopy', 'copy.deepcopy', (['neg_randomly_partitioned_list'], {}), '(neg_randomly_partitioned_list)\n', (2356, 2387), False, 'import copy\n'), ((2619, 2663), 'copy.deepcopy', 'copy.deepcopy', (['pos_randomly_partitioned_list'], {}), '(pos_randomly_partitioned_list)\n', (2632, 2663), False, 'import copy\n'), ((8370, 8440), 'numpy.savetxt', 'np.savetxt', (['"""connection_matrix_1.csv"""', 'NN.matrix_1_bias'], {'delimiter': '""","""'}), "('connection_matrix_1.csv', NN.matrix_1_bias, delimiter=',')\n", (8380, 8440), True, 'import numpy as np\n'), ((8453, 8523), 'numpy.savetxt', 'np.savetxt', (['"""connection_matrix_2.csv"""', 'NN.matrix_2_bias'], {'delimiter': '""","""'}), "('connection_matrix_2.csv', NN.matrix_2_bias, delimiter=',')\n", (8463, 8523), True, 'import numpy as np\n')] |
#An example to demonstrate the plotting of 2D pixel plot from array saved as bin file in row major order
#Refer to bin_1.c
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sys
file_test="bindata_1.bin"
def load_binary_data(filename, dtype=np.float64):
"""
We assume that the data was written
with write_binary_data() (little endian).
"""
f = open(filename, "rb")
data = f.read()
f.close()
_data = np.fromstring(data, dtype)
if sys.byteorder == 'big':
_data = _data.byteswap()
return _data
data=load_binary_data(file_test)
x=range(1,6,1)
y=range(1,3,1)
plt.xscale("log")
plt.yscale("log")
Z=data.reshape((2,5))
print(Z)
plt.imshow(Z,extent=(min(x),max(x),max(y),min(y)),interpolation='nearest',cmap=plt.cm.hot)
plt.colorbar()
plt.show()
| [
"matplotlib.pyplot.colorbar",
"numpy.fromstring",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show"
] | [((655, 672), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (665, 672), True, 'import matplotlib.pyplot as plt\n'), ((674, 691), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (684, 691), True, 'import matplotlib.pyplot as plt\n'), ((820, 834), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((836, 846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (844, 846), True, 'import matplotlib.pyplot as plt\n'), ((480, 506), 'numpy.fromstring', 'np.fromstring', (['data', 'dtype'], {}), '(data, dtype)\n', (493, 506), True, 'import numpy as np\n')] |
import numpy as np
from scipy import signal
def sine_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = np.sin(2*np.pi*t*freq/sr).astype(np.float32)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16)
def square_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = signal.square(2*np.pi*t*freq/sr)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16)
def triangle_wave(freq, sr, seconds, n_bits=8):
t = np.arange(int(sr*seconds))
samples = signal.sawtooth(2*np.pi*t*freq/sr)
# Convert to int and return
to_int = 2**n_bits-1
return (samples*to_int).astype(np.int16) | [
"scipy.signal.square",
"numpy.sin",
"scipy.signal.sawtooth"
] | [((382, 422), 'scipy.signal.square', 'signal.square', (['(2 * np.pi * t * freq / sr)'], {}), '(2 * np.pi * t * freq / sr)\n', (395, 422), False, 'from scipy import signal\n'), ((616, 658), 'scipy.signal.sawtooth', 'signal.sawtooth', (['(2 * np.pi * t * freq / sr)'], {}), '(2 * np.pi * t * freq / sr)\n', (631, 658), False, 'from scipy import signal\n'), ((138, 171), 'numpy.sin', 'np.sin', (['(2 * np.pi * t * freq / sr)'], {}), '(2 * np.pi * t * freq / sr)\n', (144, 171), True, 'import numpy as np\n')] |
import scipy.signal
import numpy as np
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
#~ from pyacq.dsp.overlapfiltfilt import SosFiltfilt_Scipy
from .tools import FifoBuffer, median_mad
def offline_signal_preprocessor(sigs, sample_rate, common_ref_removal=True,
highpass_freq=300., lowpass_freq=None, output_dtype='float32', normalize=True, **unused):
#cast
sigs = sigs.astype(output_dtype)
#filter
if highpass_freq is not None:
b, a = scipy.signal.iirfilter(5, highpass_freq/sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, sigs, axis=0)
else:
filtered_sigs = sigs.copy()
if lowpass_freq is not None:
b, a = scipy.signal.iirfilter(5, lowpass_freq/sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, filtered_sigs, axis=0)
# common reference removal
if common_ref_removal:
filtered_sigs = filtered_sigs - np.median(filtered_sigs, axis=1)[:, None]
# normalize
if normalize:
#~ med = np.median(filtered_sigs, axis=0)
#~ mad = np.median(np.abs(filtered_sigs-med),axis=0)*1.4826
med, mad = median_mad(filtered_sigs, axis=0)
normed_sigs = (filtered_sigs - med)/mad
else:
normed_sigs = filtered_sigs
return normed_sigs.astype(output_dtype)
def estimate_medians_mads_after_preprocesing(sigs, sample_rate, **params):
params2 = dict(params)
params2['normalize'] = False
filtered_sigs = offline_signal_preprocessor(sigs, sample_rate, **params2)
med, mad = median_mad(filtered_sigs, axis=0)
return med, mad
class SignalPreprocessor_base:
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
self.sample_rate = sample_rate
self.nb_channel = nb_channel
self.chunksize = chunksize
self.input_dtype = input_dtype
def change_params(self, common_ref_removal=True,
highpass_freq=300.,
lowpass_freq=None,
smooth_size=0,
output_dtype='float32',
normalize=True,
pad_width = None,
signals_medians=None, signals_mads=None):
self.signals_medians = signals_medians
self.signals_mads = signals_mads
self.common_ref_removal = common_ref_removal
self.highpass_freq = highpass_freq
self.lowpass_freq = lowpass_freq
self.smooth_size = int(smooth_size)
self.output_dtype = np.dtype(output_dtype)
self.normalize = normalize
self.pad_width = pad_width
# set default pad_width if none is provided
if self.pad_width is None or self.pad_width<=0:
assert self.highpass_freq is not None, 'pad_width=None needs a highpass_freq'
self.pad_width = int(self.sample_rate/self.highpass_freq*3)
#~ print('self.pad_width', self.pad_width)
self.chunksize_1pad = self.chunksize + self.pad_width
self.chunksize_2pad = self.chunksize + 2 * self.pad_width
#~ print('self.pad_width', self.pad_width)
#~ print('self.chunksize_1pad', self.chunksize_1pad)
#~ assert self.chunksize_1pad>self.chunksize
self.coefficients = np.zeros((0, 6))
nyquist = self.sample_rate/2.
if self.highpass_freq is not None:
if self.highpass_freq>0 and self.highpass_freq<nyquist:
coeff_hp = scipy.signal.iirfilter(5, highpass_freq/self.sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_hp))
if self.lowpass_freq is not None:
if self.lowpass_freq>0 and self.lowpass_freq<nyquist:
#~ if self.lowpass_freq>(self.sample_rate/2.):
#~ self.lowpass_freq=(self.sample_rate/2.01)
coeff_lp = scipy.signal.iirfilter(5, lowpass_freq/self.sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_lp))
if self.smooth_size>0:
b0 = (1./3)**.5
b1 = (1-b0)
b2 = 0.
coeff_smooth = np.array([[b0, b1, b2, 1,0,0]], dtype=self.output_dtype)
coeff_smooth = np.tile(coeff_smooth, (self.smooth_size, 1))
self.coefficients = np.concatenate((self.coefficients, coeff_smooth))
if self.coefficients.shape[0]==0:
#this is the null filter
self.coefficients = np.array([[1, 0, 0, 1,0,0]], dtype=self.output_dtype)
self.nb_section =self. coefficients.shape[0]
self.forward_buffer = FifoBuffer((self.chunksize_1pad, self.nb_channel), self.output_dtype)
self.zi = np.zeros((self.nb_section, 2, self.nb_channel), dtype= self.output_dtype)
#~ print('self.normalize', self.normalize)
if self.normalize:
assert self.signals_medians is not None
assert self.signals_mads is not None
def process_buffer(self, data):
# used for offline processing when parralisation is possible
raise(NotImplmentedError)
def initialize_stream(self):
# must be for each new segment when index
# start back
raise(NotImplmentedError)
def process_buffer_stream(self, pos, data):
# used in real time mode when chunk are given one after another
raise(NotImplmentedError)
class SignalPreprocessor_Numpy(SignalPreprocessor_base):
"""
This apply chunk by chunk on a multi signal:
* baseline removal
* hight pass filtfilt
* normalize (optional)
"""
def process_buffer(self, data):
data = data.astype(self.output_dtype)
processed_data = scipy.signal.sosfiltfilt(self.coefficients, data, axis=0)
# TODO find why sosfiltfilt reverse strides!!!
processed_data = np.ascontiguousarray(processed_data, dtype=self.output_dtype)
# removal ref
if self.common_ref_removal:
processed_data -= np.median(processed_data, axis=1)[:, None]
#normalize
if self.normalize:
processed_data -= self.signals_medians
processed_data /= self.signals_mads
return processed_data
def process_buffer_stream(self, pos, data):
# TODO rewrite this with self.process_buffer()
#Online filtfilt
chunk = data.astype(self.output_dtype)
forward_chunk_filtered, self.zi = scipy.signal.sosfilt(self.coefficients, chunk, zi=self.zi, axis=0)
forward_chunk_filtered = forward_chunk_filtered.astype(self.output_dtype)
self.forward_buffer.new_chunk(forward_chunk_filtered, index=pos)
backward_chunk = self.forward_buffer.buffer
backward_filtered = scipy.signal.sosfilt(self.coefficients, backward_chunk[::-1, :], zi=None, axis=0)
backward_filtered = backward_filtered[::-1, :]
backward_filtered = backward_filtered.astype(self.output_dtype)
pos2 = pos-self.pad_width
if pos2<0:
return None, None
i1 = self.chunksize_1pad-self.pad_width-chunk.shape[0]
i2 = self.chunksize
assert i1<i2
data2 = backward_filtered[i1:i2]
if (pos2-data2.shape[0])<0:
data2 = data2[data2.shape[0]-pos2:]
# removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def initialize_stream(self):
self.forward_buffer.reset()
self.zi[:] = 0
class SignalPreprocessor_OpenCL(SignalPreprocessor_base, OpenCL_Helper):
"""
Implementation in OpenCL depending on material and nb_channel
this can lead to a smal speed improvement...
"""
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
SignalPreprocessor_base.__init__(self,sample_rate, nb_channel, chunksize, input_dtype)
def _check_data(self, data):
if not data.flags['C_CONTIGUOUS'] or data.dtype!=self.output_dtype:
data = np.ascontiguousarray(data, dtype=self.output_dtype)
return data
def process_buffer(self, data):
data = self._check_data(data)
#~ print(data.shape, self.chunksize, self.chunksize_2pad, self.pad_width)
#~ assert data.shape[0] == self.chunksize_2pad
if data.shape[0] == self.chunksize_2pad:
# OK
unpad = 0
elif data.shape[0] < self.chunksize_2pad:
# put some zero
unpad = self.chunksize_2pad - data.shape[0]
data_pad = np.zeros((self.chunksize_2pad, data.shape[1]), dtype=data.dtype)
#~ print('Apply a data pad')
data = data_pad
else:
raise ValueError(f'data have wring shape{data.shape[0]} { self.chunksize_2pad}')
event = pyopencl.enqueue_copy(self.queue, self.input_2pad_cl, data)
event = self.kern_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_2pad_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.signals_medians_cl, self.signals_mads_cl, self.output_2pad_cl)
#~ event.wait()
event = pyopencl.enqueue_copy(self.queue, self.output_2pad, self.output_2pad_cl)
event.wait()
data2 = self.output_2pad.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
if unpad > 0:
data2 = data2[:-unpad, :]
return data2
def process_buffer_stream(self, pos, data):
assert data.shape[0]==self.chunksize
data = self._check_data(data)
#Online filtfilt
event = pyopencl.enqueue_copy(self.queue, self.input_cl, data)
event = self.kern_stream_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.fifo_input_backward_cl, self.signals_medians_cl, self.signals_mads_cl, self.output_backward_cl)
event.wait()
#~ event.wait()
start = pos-self.chunksize_1pad
if start<-self.pad_width:
return None, None
pos2 = pos-self.pad_width
event = pyopencl.enqueue_copy(self.queue, self.output_backward, self.output_backward_cl)
if start>0:
data2 = self.output_backward[:self.chunksize, :]
else:
data2 = self.output_backward[self.pad_width:self.chunksize, :]
data2 = data2.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def change_params(self, **kargs):
cl_platform_index=kargs.pop('cl_platform_index', None)
cl_device_index=kargs.pop('cl_device_index', None)
ctx=kargs.pop('ctx', None)
queue=kargs.pop('queue', None)
OpenCL_Helper.initialize_opencl(self,cl_platform_index=cl_platform_index, cl_device_index=cl_device_index, ctx=ctx, queue=queue)
SignalPreprocessor_base.change_params(self, **kargs)
assert self.output_dtype=='float32', 'SignalPreprocessor_OpenCL support only float32 at the moment'
assert self.pad_width<self.chunksize, 'OpenCL fifo work only for self.pad_width<self.chunksize'
self.coefficients = np.ascontiguousarray(self.coefficients, dtype=self.output_dtype)
#~ print(self.coefficients.shape)
# this is for stream processing
self.zi1 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.zi2 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.output_forward = np.zeros((self.chunksize, self.nb_channel), dtype= self.output_dtype)
self.fifo_input_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
self.output_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
#GPU buffers
self.coefficients_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.coefficients)
self.zi1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)
self.zi2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)
self.input_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.output_forward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.fifo_input_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.fifo_input_backward)
self.output_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_backward.nbytes)
if self.signals_medians is not None:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_medians)
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_mads)
else:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
# this is for offline processing
self.input_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.output_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.input_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.input_2pad)
self.output_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.output_2pad)
#CL prog
if not self.common_ref_removal and self.normalize:
extra_code_nomalize = _extra_code_nomalize
extra_code_nomalize2 = _extra_code_nomalize2
else:
extra_code_nomalize = ''
extra_code_nomalize2 = ''
kernel_formated = processor_kernel%dict(chunksize=self.chunksize, chunksize_1pad=self.chunksize_1pad,
chunksize_2pad=self.chunksize_2pad,
pad_width=self.pad_width, nb_section=self.nb_section, nb_channel=self.nb_channel,
extra_code_nomalize=extra_code_nomalize, extra_code_nomalize2=extra_code_nomalize2)
#~ print(kernel_formated)
prg = pyopencl.Program(self.ctx, kernel_formated)
self.opencl_prg = prg.build(options='-cl-mad-enable')
self.max_wg_size = self.ctx.devices[0].get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
self.kern_stream_forward_backward_filter = getattr(self.opencl_prg, 'stream_forward_backward_filter')
self.kern_forward_backward_filter = getattr(self.opencl_prg, 'forward_backward_filter')
def initialize_stream(self):
self.output_forward[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.output_backward_cl, self.output_backward)
event.wait()
self.zi1[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi1_cl, self.zi1)
event.wait()
self.zi2[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
event.wait()
processor_kernel = """
#define chunksize %(chunksize)d
#define chunksize_1pad %(chunksize_1pad)d
#define chunksize_2pad %(chunksize_2pad)d
#define pad_width %(pad_width)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int local_chunksize, int direction, int out_offset_index) {
int chan = get_global_id(0); //channel indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
for (int section=0; section<nb_section; section++){
//offset_filt2 = chan*nb_section*6+section*6;
offset_filt2 = section*6;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<local_chunksize;s++){
if (direction==1) {idx = s*nb_channel+chan;}
else if (direction==-1) {idx = (local_chunksize-s-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx+out_offset_index];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx+out_offset_index] = res;
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
}
__kernel void stream_forward_backward_filter(__global float *input,
__constant float * coefficients,
__global float * zi1,
__global float * zi2,
__global float *fifo_input_backward,
__global float *signals_medians,
__global float *signals_mads,
__global float *output_backward){
int chan = get_global_id(0); //channel indice
//roll
for (int s=0; s<pad_width;s++){
fifo_input_backward[(s)*nb_channel+chan] = fifo_input_backward[(s+chunksize)*nb_channel+chan];
}
int out_offset_index = pad_width*nb_channel;
sos_filter(input, fifo_input_backward, coefficients, zi1, chunksize, 1, out_offset_index);
//set zi2 to zeros
for (int s=0; s<nb_section;s++){
zi2[chan*nb_section*2+s] = 0;
zi2[chan*nb_section*2+s+1] = 0;
}
//filter backward
sos_filter(fifo_input_backward, output_backward, coefficients, zi2, chunksize_1pad, -1, 0);
// nomalize optional
%(extra_code_nomalize)s
}
__kernel void forward_backward_filter(__global float *input,
__constant float * coefficients,
__global float * zi1,
__global float * zi2,
__global float *signals_medians,
__global float *signals_mads,
__global float *output){
int chan = get_global_id(0); //channel indice
sos_filter(input, input, coefficients, zi1, chunksize_2pad, 1, 0);
//filter backward
sos_filter(input, output, coefficients, zi2, chunksize_2pad, -1, 0);
// nomalize optional
%(extra_code_nomalize2)s
}
"""
_extra_code_nomalize = """
float v;
for (int s=0; s<chunksize;s++){
v = output_backward[(s)*nb_channel+chan];
output_backward[(s)*nb_channel+chan] = (v - signals_medians[chan]) / signals_mads[chan];
}
"""
_extra_code_nomalize2 = """
float v;
for (int s=0; s<chunksize_2pad;s++){
v = output[(s)*nb_channel+chan];
output[(s)*nb_channel+chan] = (v - signals_medians[chan]) / signals_mads[chan];
}
"""
signalpreprocessor_engines = { 'numpy' : SignalPreprocessor_Numpy,
'opencl' : SignalPreprocessor_OpenCL}
| [
"pyopencl.Buffer",
"pyopencl.Program",
"numpy.tile",
"numpy.median",
"pyopencl.enqueue_copy",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.dtype"
] | [((3026, 3048), 'numpy.dtype', 'np.dtype', (['output_dtype'], {}), '(output_dtype)\n', (3034, 3048), True, 'import numpy as np\n'), ((3792, 3808), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {}), '((0, 6))\n', (3800, 3808), True, 'import numpy as np\n'), ((5496, 5568), 'numpy.zeros', 'np.zeros', (['(self.nb_section, 2, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.nb_section, 2, self.nb_channel), dtype=self.output_dtype)\n', (5504, 5568), True, 'import numpy as np\n'), ((6662, 6723), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['processed_data'], {'dtype': 'self.output_dtype'}), '(processed_data, dtype=self.output_dtype)\n', (6682, 6723), True, 'import numpy as np\n'), ((9875, 9934), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.input_2pad_cl', 'data'], {}), '(self.queue, self.input_2pad_cl, data)\n', (9896, 9934), False, 'import pyopencl\n'), ((10282, 10354), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.output_2pad', 'self.output_2pad_cl'], {}), '(self.queue, self.output_2pad, self.output_2pad_cl)\n', (10303, 10354), False, 'import pyopencl\n'), ((11272, 11326), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.input_cl', 'data'], {}), '(self.queue, self.input_cl, data)\n', (11293, 11326), False, 'import pyopencl\n'), ((11923, 12008), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.output_backward', 'self.output_backward_cl'], {}), '(self.queue, self.output_backward, self.output_backward_cl\n )\n', (11944, 12008), False, 'import pyopencl\n'), ((13522, 13586), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.coefficients'], {'dtype': 'self.output_dtype'}), '(self.coefficients, dtype=self.output_dtype)\n', (13542, 13586), True, 'import numpy as np\n'), ((13697, 13769), 'numpy.zeros', 'np.zeros', (['(self.nb_channel, self.nb_section, 2)'], {'dtype': 'self.output_dtype'}), '((self.nb_channel, self.nb_section, 2), dtype=self.output_dtype)\n', (13705, 13769), True, 'import numpy as np\n'), ((13790, 13862), 'numpy.zeros', 'np.zeros', (['(self.nb_channel, self.nb_section, 2)'], {'dtype': 'self.output_dtype'}), '((self.nb_channel, self.nb_section, 2), dtype=self.output_dtype)\n', (13798, 13862), True, 'import numpy as np\n'), ((13894, 13962), 'numpy.zeros', 'np.zeros', (['(self.chunksize, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.chunksize, self.nb_channel), dtype=self.output_dtype)\n', (13902, 13962), True, 'import numpy as np\n'), ((13999, 14072), 'numpy.zeros', 'np.zeros', (['(self.chunksize_1pad, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.chunksize_1pad, self.nb_channel), dtype=self.output_dtype)\n', (14007, 14072), True, 'import numpy as np\n'), ((14105, 14178), 'numpy.zeros', 'np.zeros', (['(self.chunksize_1pad, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.chunksize_1pad, self.nb_channel), dtype=self.output_dtype)\n', (14113, 14178), True, 'import numpy as np\n'), ((14241, 14331), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.coefficients'}), '(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.\n coefficients)\n', (14256, 14331), False, 'import pyopencl\n'), ((14349, 14426), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.zi1'}), '(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)\n', (14364, 14426), False, 'import pyopencl\n'), ((14449, 14526), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.zi2'}), '(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)\n', (14464, 14526), False, 'import pyopencl\n'), ((14551, 14624), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', 'mf.READ_WRITE'], {'size': 'self.output_forward.nbytes'}), '(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)\n', (14566, 14624), False, 'import pyopencl\n'), ((14658, 14731), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', 'mf.READ_WRITE'], {'size': 'self.output_forward.nbytes'}), '(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)\n', (14673, 14731), False, 'import pyopencl\n'), ((14770, 14867), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.fifo_input_backward'}), '(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.\n fifo_input_backward)\n', (14785, 14867), False, 'import pyopencl\n'), ((14897, 14971), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', 'mf.READ_WRITE'], {'size': 'self.output_backward.nbytes'}), '(self.ctx, mf.READ_WRITE, size=self.output_backward.nbytes)\n', (14912, 14971), False, 'import pyopencl\n'), ((15668, 15741), 'numpy.zeros', 'np.zeros', (['(self.chunksize_2pad, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.chunksize_2pad, self.nb_channel), dtype=self.output_dtype)\n', (15676, 15741), True, 'import numpy as np\n'), ((15770, 15843), 'numpy.zeros', 'np.zeros', (['(self.chunksize_2pad, self.nb_channel)'], {'dtype': 'self.output_dtype'}), '((self.chunksize_2pad, self.nb_channel), dtype=self.output_dtype)\n', (15778, 15843), True, 'import numpy as np\n'), ((15874, 15963), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.input_2pad'}), '(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.\n input_2pad)\n', (15889, 15963), False, 'import pyopencl\n'), ((15989, 16079), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_WRITE | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.output_2pad'}), '(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.\n output_2pad)\n', (16004, 16079), False, 'import pyopencl\n'), ((16797, 16840), 'pyopencl.Program', 'pyopencl.Program', (['self.ctx', 'kernel_formated'], {}), '(self.ctx, kernel_formated)\n', (16813, 16840), False, 'import pyopencl\n'), ((17303, 17388), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.output_backward_cl', 'self.output_backward'], {}), '(self.queue, self.output_backward_cl, self.output_backward\n )\n', (17324, 17388), False, 'import pyopencl\n'), ((17455, 17511), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.zi1_cl', 'self.zi1'], {}), '(self.queue, self.zi1_cl, self.zi1)\n', (17476, 17511), False, 'import pyopencl\n'), ((17575, 17631), 'pyopencl.enqueue_copy', 'pyopencl.enqueue_copy', (['self.queue', 'self.zi2_cl', 'self.zi2'], {}), '(self.queue, self.zi2_cl, self.zi2)\n', (17596, 17631), False, 'import pyopencl\n'), ((4904, 4962), 'numpy.array', 'np.array', (['[[b0, b1, b2, 1, 0, 0]]'], {'dtype': 'self.output_dtype'}), '([[b0, b1, b2, 1, 0, 0]], dtype=self.output_dtype)\n', (4912, 4962), True, 'import numpy as np\n'), ((4988, 5032), 'numpy.tile', 'np.tile', (['coeff_smooth', '(self.smooth_size, 1)'], {}), '(coeff_smooth, (self.smooth_size, 1))\n', (4995, 5032), True, 'import numpy as np\n'), ((5065, 5114), 'numpy.concatenate', 'np.concatenate', (['(self.coefficients, coeff_smooth)'], {}), '((self.coefficients, coeff_smooth))\n', (5079, 5114), True, 'import numpy as np\n'), ((5262, 5317), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 0, 0]]'], {'dtype': 'self.output_dtype'}), '([[1, 0, 0, 1, 0, 0]], dtype=self.output_dtype)\n', (5270, 5317), True, 'import numpy as np\n'), ((9066, 9117), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data'], {'dtype': 'self.output_dtype'}), '(data, dtype=self.output_dtype)\n', (9086, 9117), True, 'import numpy as np\n'), ((15055, 15148), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.signals_medians'}), '(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.\n signals_medians)\n', (15070, 15148), False, 'import pyopencl\n'), ((15179, 15269), 'pyopencl.Buffer', 'pyopencl.Buffer', (['self.ctx', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'self.signals_mads'}), '(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.\n signals_mads)\n', (15194, 15269), False, 'import pyopencl\n'), ((1218, 1250), 'numpy.median', 'np.median', (['filtered_sigs'], {'axis': '(1)'}), '(filtered_sigs, axis=1)\n', (1227, 1250), True, 'import numpy as np\n'), ((4207, 4252), 'numpy.concatenate', 'np.concatenate', (['(self.coefficients, coeff_hp)'], {}), '((self.coefficients, coeff_hp))\n', (4221, 4252), True, 'import numpy as np\n'), ((4719, 4764), 'numpy.concatenate', 'np.concatenate', (['(self.coefficients, coeff_lp)'], {}), '((self.coefficients, coeff_lp))\n', (4733, 4764), True, 'import numpy as np\n'), ((6812, 6845), 'numpy.median', 'np.median', (['processed_data'], {'axis': '(1)'}), '(processed_data, axis=1)\n', (6821, 6845), True, 'import numpy as np\n'), ((8228, 8252), 'numpy.median', 'np.median', (['data2'], {'axis': '(1)'}), '(data2, axis=1)\n', (8237, 8252), True, 'import numpy as np\n'), ((9609, 9673), 'numpy.zeros', 'np.zeros', (['(self.chunksize_2pad, data.shape[1])'], {'dtype': 'data.dtype'}), '((self.chunksize_2pad, data.shape[1]), dtype=data.dtype)\n', (9617, 9673), True, 'import numpy as np\n'), ((10718, 10742), 'numpy.median', 'np.median', (['data2'], {'axis': '(1)'}), '(data2, axis=1)\n', (10727, 10742), True, 'import numpy as np\n'), ((12496, 12520), 'numpy.median', 'np.median', (['data2'], {'axis': '(1)'}), '(data2, axis=1)\n', (12505, 12520), True, 'import numpy as np\n'), ((15384, 15434), 'numpy.zeros', 'np.zeros', (['self.nb_channel'], {'dtype': 'self.output_dtype'}), '(self.nb_channel, dtype=self.output_dtype)\n', (15392, 15434), True, 'import numpy as np\n'), ((15539, 15589), 'numpy.zeros', 'np.zeros', (['self.nb_channel'], {'dtype': 'self.output_dtype'}), '(self.nb_channel, dtype=self.output_dtype)\n', (15547, 15589), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import sys
from pathlib import Path
import torch
from glob import glob
sys.path.append('..')
from torchlib.segneuralnet import SegmentationNeuralNet
from torchlib.post_processing_func import MAP_post
softmax = torch.nn.Softmax(1)
MAP = MAP_post()
def get_full_path(selected_model):
if selected_model == 'Cross Entropy':
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_cwce__adam_map_none_1_60_0/models/model_best.pth.tar'
elif selected_model == 'Dice':
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_cwdice__adam_map_none_1_60_0/models/model_best.pth.tar'
elif selected_model == "J-REG":
model_path = r'models/selected/Segments_Seg1009_1.3.3_unetpad_jreg__adam_map_none_1_60_0/models/model_best.pth.tar'
else:
assert False, f"MODEL {selected_model} NOT FOUD, OPT: 'Cross Entropy' 'Dice' 'J-REG'"
return model_path
def load_model(full_url, use_cuda=True, gpu_id=0, verbose=False):
full_url = Path(full_url)
full_path = str(full_url)
nameproject = full_url.parent.parent.name
patchproject = full_url.parent.parent.parent
ckpt_path = '/'.join((full_url.parts)[-2:])
file_name = full_url.name
exp_type = full_url.parent.parent.parent.name
net = SegmentationNeuralNet(
patchproject=patchproject,
nameproject=nameproject,
no_cuda=not use_cuda, parallel=False, seed=2021,
print_freq=False, gpu=gpu_id
)
check = net.load( full_path, verbose)
if use_cuda:
net.net.cuda(gpu_id)
net.net.eval()
return net
def get_results(srcs, net):
def from2to3(src):
zero = np.zeros(src.shape[:2])[..., None]
return np.concatenate((zero, src), 2)
inputs = torch.tensor(srcs)[None].float().cuda()
outputs = net(inputs)
output_soft = softmax(outputs).cpu().numpy()[0].transpose(1,2,0)
outputs = outputs.cpu().numpy()[0].transpose(1,2,0)
outputs = from2to3(outputs)
output_soft = from2to3(output_soft)
post = MAP(outputs, 2)[1]
return output_soft, post
def resize_batchh(srcs):
return srcs
def load_images(images_path, st_verbose=False):
srcs = np.array([cv2.imread(url, 0) for url in sorted(glob(f"{images_path}/*"))])
len_srcs = len(srcs)
if st_verbose:
import streamlit as st
st.text(f"Number of segmentations: {len_srcs}")
if len_srcs != 60:
np.random.seed(2021)
ids = np.random.choice(list(range(len_srcs)), 60)
if st_verbose:
st.text("Warring!: Number of segmentations != 60, selecting 60 randomly")
st.text(ids)
srcs = srcs[ids]
if srcs.max() > 1:
srcs = srcs/srcs.max()
return srcs | [
"torchlib.segneuralnet.SegmentationNeuralNet",
"torch.nn.Softmax",
"pathlib.Path",
"torchlib.post_processing_func.MAP_post",
"torch.tensor",
"streamlit.text",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"sys.path.append",
"glob.glob",
"cv2.imread"
] | [((102, 123), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (117, 123), False, 'import sys\n'), ((241, 260), 'torch.nn.Softmax', 'torch.nn.Softmax', (['(1)'], {}), '(1)\n', (257, 260), False, 'import torch\n'), ((267, 277), 'torchlib.post_processing_func.MAP_post', 'MAP_post', ([], {}), '()\n', (275, 277), False, 'from torchlib.post_processing_func import MAP_post\n'), ((1015, 1029), 'pathlib.Path', 'Path', (['full_url'], {}), '(full_url)\n', (1019, 1029), False, 'from pathlib import Path\n'), ((1315, 1476), 'torchlib.segneuralnet.SegmentationNeuralNet', 'SegmentationNeuralNet', ([], {'patchproject': 'patchproject', 'nameproject': 'nameproject', 'no_cuda': '(not use_cuda)', 'parallel': '(False)', 'seed': '(2021)', 'print_freq': '(False)', 'gpu': 'gpu_id'}), '(patchproject=patchproject, nameproject=nameproject,\n no_cuda=not use_cuda, parallel=False, seed=2021, print_freq=False, gpu=\n gpu_id)\n', (1336, 1476), False, 'from torchlib.segneuralnet import SegmentationNeuralNet\n'), ((1761, 1791), 'numpy.concatenate', 'np.concatenate', (['(zero, src)', '(2)'], {}), '((zero, src), 2)\n', (1775, 1791), True, 'import numpy as np\n'), ((2411, 2458), 'streamlit.text', 'st.text', (['f"""Number of segmentations: {len_srcs}"""'], {}), "(f'Number of segmentations: {len_srcs}')\n", (2418, 2458), True, 'import streamlit as st\n'), ((2491, 2511), 'numpy.random.seed', 'np.random.seed', (['(2021)'], {}), '(2021)\n', (2505, 2511), True, 'import numpy as np\n'), ((1711, 1734), 'numpy.zeros', 'np.zeros', (['src.shape[:2]'], {}), '(src.shape[:2])\n', (1719, 1734), True, 'import numpy as np\n'), ((2263, 2281), 'cv2.imread', 'cv2.imread', (['url', '(0)'], {}), '(url, 0)\n', (2273, 2281), False, 'import cv2\n'), ((2614, 2687), 'streamlit.text', 'st.text', (['"""Warring!: Number of segmentations != 60, selecting 60 randomly"""'], {}), "('Warring!: Number of segmentations != 60, selecting 60 randomly')\n", (2621, 2687), True, 'import streamlit as st\n'), ((2700, 2712), 'streamlit.text', 'st.text', (['ids'], {}), '(ids)\n', (2707, 2712), True, 'import streamlit as st\n'), ((2300, 2324), 'glob.glob', 'glob', (['f"""{images_path}/*"""'], {}), "(f'{images_path}/*')\n", (2304, 2324), False, 'from glob import glob\n'), ((1806, 1824), 'torch.tensor', 'torch.tensor', (['srcs'], {}), '(srcs)\n', (1818, 1824), False, 'import torch\n')] |
from numpy import degrees
from pose import Pose
from renderer import Renderer
from PyQt4.QtGui import QPainter,QColor,QPolygonF,QPen
from PyQt4.QtCore import QPointF,QLineF,QRectF,Qt
class QtRenderer(Renderer):
"""An implementation of :class:`~renderer.Renderer` for PyQt4.
This renderer will draw on any `QPaintDevice`
"""
def __init__(self, paint_device):
"""Creates a new renderer based on a QPaintDevice pd"""
self._grid_pen = QPen(QColor(0x808080))
self._grid_pen.setStyle(Qt.DashLine)
self._painter = None
Renderer.__init__(self, paint_device)
def set_canvas(self, canvas):
"""Tell the renderer to draw on canvas
The type of canvas is implementation-dependent"""
if self._painter is not None:
self._painter.restore()
self._painter.restore()
self._painter.end()
self._paintdevice = canvas
self._painter = QPainter(canvas)
self._painter.setRenderHint(QPainter.Antialiasing)
# invert the y axis
self._painter.scale(1,-1)
self._painter.translate(0,-canvas.height())
Renderer.set_canvas(self,canvas)
def _get_canvas_size(self,pd):
"""Get the canvas size tuple (width,height)"""
return (pd.width(), pd.height())
def push_state(self):
"""Store the current state on the stack.
Current state includes default pose, pen and brush"""
### FIXME store things
self._painter.save()
def pop_state(self):
"""Restore the last saved state from the stack
The state includes default pose, pen and brush"""
### FIXME store things
self._painter.restore()
def _calculate_bounds(self):
transform = self._painter.worldTransform().inverted()[0]
xs,ys = zip(
transform.map(0.0,0.0),
transform.map(0.0,float(self.size[1])),
transform.map(float(self.size[0]),float(self.size[1])),
transform.map(float(self.size[0]),0.0)
)
self._bounds = (min(xs), min(ys), max(xs), max(ys))
def _draw_grid(self):
self.reset_pose()
self._painter.setPen(self._grid_pen)
xmin, ymin, xmax, ymax = self._bounds
# Determine min/max x & y line indices:
x_ticks = (int(xmin//self._grid_spacing), int(xmax//self._grid_spacing + 1))
y_ticks = (int(ymin//self._grid_spacing), int(ymax//self._grid_spacing + 1))
self._painter.drawLines(
[QLineF(xmin, i * self._grid_spacing,
xmax, i * self._grid_spacing)
for i in range(*y_ticks)])
self._painter.drawLines(
[QLineF(i * self._grid_spacing, ymin,
i * self._grid_spacing, ymax)
for i in range(*x_ticks)])
def scale(self, factor):
"""Scale drawing operations by factor
To be implemented in subclasses."""
self._painter.scale(factor,factor)
def rotate(self, angle):
"""Rotate canvas by angle (in radians)
To be implemented in subclasses."""
self._painter.rotate(degrees(angle))
def translate(self, dx, dy):
"""Translate canvas by dx, dy
To be implemented in subclasses."""
self._painter.translate(dx,dy)
def clear_screen(self):
"""Erases the current screen with a white brush"""
self._painter.save()
self._painter.resetTransform()
self.set_pen(0xFFFFFF)
self.set_brush(0xFFFFFF)
self.draw_rectangle(0,0,self.size[0],self.size[1])
self._painter.restore()
Renderer.clear_screen(self)
@staticmethod
def __qcolor(color):
"""Returns qcolor for a given ARGB color"""
c = QColor(color)
if color > 0xFFFFFF:
c.setAlpha((color >> 24) & 0xFF)
return c
def set_pen(self,color=0, thickness=0):
"""Sets the line color and thickness.
Color is interpreted as 0xAARRGGBB."""
if color is None:
self._painter.setPen(Qt.NoPen)
else:
self._painter.setPen(QPen(self.__qcolor(color),thickness))
def set_brush(self,color):
"""Sets the fill color.
Color is interpreted as 0xAARRGGBB."""
if color is None:
self._painter.setBrush(Qt.NoBrush)
else:
self._painter.setBrush(self.__qcolor(color))
def draw_polygon(self,points):
"""Draws a polygon.
Expects a list of points as a list of tuples or as a numpy array."""
self._painter.drawPolygon(QPolygonF([QPointF(*point[:2]) for point in points]))
def draw_ellipse(self, cx, cy, ra, rb = None):
"""Draws an ellipse."""
if rb is None:
rb = ra
self._painter.drawEllipse(QRectF(cx-ra,cy-ra,2*ra,2*rb))
def draw_rectangle(self, x, y, w, h):
"""Draws a rectangle."""
self._painter.drawRect(QRectF(x,y,w,h))
def draw_text(self, text, x, y, bgcolor = 0):
"""Draws a text string at the defined position."""
pass
def draw_line(self, x1, y1, x2, y2):
"""Draws a line using the current pen from (x1,y1) to (x2,y2)"""
self._painter.drawLine(QLineF(x1,y1,x2,y2))
def draw_point(self,x,y):
"""Draw a single point using the current pen at (x,y)"""
self._painter.drawPoint(QPointF(x,y))
def draw_points(self,points):
"""Draw a set of points, given as [(x,y)], using the current pen"""
self._painter.drawPoints(QPolygonF([QPointF(x,y) for x,y in points]))
| [
"PyQt4.QtGui.QColor",
"renderer.Renderer.set_canvas",
"numpy.degrees",
"PyQt4.QtCore.QPointF",
"PyQt4.QtCore.QLineF",
"PyQt4.QtCore.QRectF",
"renderer.Renderer.clear_screen",
"renderer.Renderer.__init__",
"PyQt4.QtGui.QPainter"
] | [((580, 617), 'renderer.Renderer.__init__', 'Renderer.__init__', (['self', 'paint_device'], {}), '(self, paint_device)\n', (597, 617), False, 'from renderer import Renderer\n'), ((968, 984), 'PyQt4.QtGui.QPainter', 'QPainter', (['canvas'], {}), '(canvas)\n', (976, 984), False, 'from PyQt4.QtGui import QPainter, QColor, QPolygonF, QPen\n'), ((1168, 1201), 'renderer.Renderer.set_canvas', 'Renderer.set_canvas', (['self', 'canvas'], {}), '(self, canvas)\n', (1187, 1201), False, 'from renderer import Renderer\n'), ((3694, 3721), 'renderer.Renderer.clear_screen', 'Renderer.clear_screen', (['self'], {}), '(self)\n', (3715, 3721), False, 'from renderer import Renderer\n'), ((3830, 3843), 'PyQt4.QtGui.QColor', 'QColor', (['color'], {}), '(color)\n', (3836, 3843), False, 'from PyQt4.QtGui import QPainter, QColor, QPolygonF, QPen\n'), ((480, 495), 'PyQt4.QtGui.QColor', 'QColor', (['(8421504)'], {}), '(8421504)\n', (486, 495), False, 'from PyQt4.QtGui import QPainter, QColor, QPolygonF, QPen\n'), ((3204, 3218), 'numpy.degrees', 'degrees', (['angle'], {}), '(angle)\n', (3211, 3218), False, 'from numpy import degrees\n'), ((4872, 4912), 'PyQt4.QtCore.QRectF', 'QRectF', (['(cx - ra)', '(cy - ra)', '(2 * ra)', '(2 * rb)'], {}), '(cx - ra, cy - ra, 2 * ra, 2 * rb)\n', (4878, 4912), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((5010, 5028), 'PyQt4.QtCore.QRectF', 'QRectF', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (5016, 5028), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((5296, 5318), 'PyQt4.QtCore.QLineF', 'QLineF', (['x1', 'y1', 'x2', 'y2'], {}), '(x1, y1, x2, y2)\n', (5302, 5318), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((5453, 5466), 'PyQt4.QtCore.QPointF', 'QPointF', (['x', 'y'], {}), '(x, y)\n', (5460, 5466), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((2585, 2651), 'PyQt4.QtCore.QLineF', 'QLineF', (['xmin', '(i * self._grid_spacing)', 'xmax', '(i * self._grid_spacing)'], {}), '(xmin, i * self._grid_spacing, xmax, i * self._grid_spacing)\n', (2591, 2651), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((2761, 2827), 'PyQt4.QtCore.QLineF', 'QLineF', (['(i * self._grid_spacing)', 'ymin', '(i * self._grid_spacing)', 'ymax'], {}), '(i * self._grid_spacing, ymin, i * self._grid_spacing, ymax)\n', (2767, 2827), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((4668, 4687), 'PyQt4.QtCore.QPointF', 'QPointF', (['*point[:2]'], {}), '(*point[:2])\n', (4675, 4687), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n'), ((5630, 5643), 'PyQt4.QtCore.QPointF', 'QPointF', (['x', 'y'], {}), '(x, y)\n', (5637, 5643), False, 'from PyQt4.QtCore import QPointF, QLineF, QRectF, Qt\n')] |
import numpy
arr = list(map(int, input().split()))
arr = numpy.array(arr)
print(arr.reshape(3,3))
| [
"numpy.array"
] | [((57, 73), 'numpy.array', 'numpy.array', (['arr'], {}), '(arr)\n', (68, 73), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from dynamic_graph.sot.torque_control.hrp2.control_manager_conf import IN_OUT_GAIN
from scipy import ndimage
from identification_utils import solve1stOrderLeastSquare
def identify_motor_static(enc, dq, ctrl, current, tau, JOINT_ID, JOINT_NAME, ZERO_VELOCITY_THRESHOLD,
ZERO_VELOCITY_THRESHOLD_SMALL, SHOW_THRESHOLD_EFFECT):
# remove high velocity
maskConstAng = (abs(dq) < ZERO_VELOCITY_THRESHOLD)
# erode to get only steady phases where velocity is small
maskConstAng = ndimage.morphology.binary_erosion(maskConstAng, None, 100)
maskPosVel = (dq > ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel = (dq < -ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAng = np.logical_and(maskConstAng, maskPosVel)
maskConstNegAng = np.logical_and(maskConstAng, maskNegVel)
if SHOW_THRESHOLD_EFFECT:
plt.figure()
plt.plot(enc, label='q')
q_const = enc.copy()
q_const[np.logical_not(maskConstAng)] = np.nan
plt.plot(q_const, label='q_const')
plt.legend()
# identify current sensor gain
x = current[maskConstAng]
y = ctrl[maskConstAng] / IN_OUT_GAIN
maskPosErr = np.logical_and(y - x > 0.0, np.abs(x) > 0.5)
maskNegErr = np.logical_and(y - x < 0.0, np.abs(x) > 0.5)
print("Number of samples with constant angle:", x.shape[0])
print("Number of samples with constant angle and pos vel:", x[maskPosErr].shape[0])
print("Number of samples with constant angle and neg vel:", x[maskNegErr].shape[0])
if (x[maskPosErr].shape[0] < 10):
(Ks, DZ) = solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
elif (x[maskNegErr].shape[0] < 10):
(Ks, DZ) = solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
else:
(Ksn, DZn) = solve1stOrderLeastSquare(x[maskNegErr], y[maskNegErr])
(Ksp, DZp) = solve1stOrderLeastSquare(x[maskPosErr], y[maskPosErr])
Ks = 0.5 * (Ksp + Ksn)
Ks = min([Ksp, Ksn])
DZ = 0.5 * (DZp - DZn)
print("Current sensor gains = ", Ksp, Ksn)
print("Deadzones = ", DZp, -DZn)
x_neg = x[maskNegErr]
y_neg = y[maskNegErr]
plt.figure()
plt.plot(x_neg, y_neg, '.', lw=3, markersize=1, c='0.5')
plt.plot([min(x_neg), max(x_neg)], [Ksn * min(x_neg) + DZn, Ksn * max(x_neg) + DZn], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$u(t)$')
plt.title('Negative current errors - Joint ' + JOINT_NAME)
x_pos = x[maskPosErr]
y_pos = y[maskPosErr]
plt.figure()
plt.plot(x_pos, y_pos, '.', lw=3, markersize=1, c='0.5')
plt.plot([min(x_pos), max(x_pos)], [Ksp * min(x_pos) + DZp, Ksp * max(x_pos) + DZp], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$u(t)$')
plt.title('Positive current errors - Joint ' + JOINT_NAME)
plt.show()
if (Ks < 0.0):
print("ERROR: estimated Ks is negative! Setting it to 1")
Ks = 1.0
# plot dead zone effect ********************************************
plt.figure()
plt.plot(Ks * current, label='current')
plt.plot(ctrl / IN_OUT_GAIN, label='control')
plt.legend()
plt.figure()
y = Ks * current[maskConstAng]
x = ctrl[maskConstAng] / IN_OUT_GAIN - Ks * current[maskConstAng]
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label='pos err')
plt.plot(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label='neg err')
plt.legend()
plt.figure()
y = ctrl[maskConstAng] / IN_OUT_GAIN
x = ctrl[maskConstAng] / IN_OUT_GAIN - Ks * current[maskConstAng]
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$ctrl(t)-i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label='pos err')
plt.plot(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label='neg err')
plt.legend()
plt.figure()
y = ctrl / IN_OUT_GAIN
x = Ks * current
plt.ylabel(r'$ctrl(t)$')
plt.xlabel(r'$i(t)$')
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot([-3, 3], [-3, 3])
plt.show()
# y = a. x + b
# i = Kt.tau + Kf
# Identification ***************************************************
y = current # *Ks
x = tau
(Ktp, Kfp) = solve1stOrderLeastSquare(x[maskConstPosAng], y[maskConstPosAng])
(Ktn, b) = solve1stOrderLeastSquare(x[maskConstNegAng], y[maskConstNegAng])
Kfn = -b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black', lw=1)
plt.axvline(0, color='black', lw=1)
plt.plot(x, y, '.', lw=3, markersize=1, c='0.5')
plt.plot(x[maskConstPosAng], y[maskConstPosAng], 'rx', lw=3, markersize=1)
plt.plot(x[maskConstNegAng], y[maskConstNegAng], 'bx', lw=3, markersize=1)
# plot identified lin model
plt.plot([min(x), max(x)], [Ktp * min(x) + Kfp, Ktp * max(x) + Kfp], 'g:', lw=3)
plt.plot([min(x), max(x)], [Ktn * min(x) - Kfn, Ktn * max(x) - Kfn], 'g:', lw=3)
plt.ylabel(r'$i(t)$')
plt.xlabel(r'$\tau(t)$')
plt.title('Static experiment - Joint ' + JOINT_NAME)
print("cur_sens_gain[%d] = %f" % (JOINT_ID, Ks))
print('deadzone[%d] = %f' % (JOINT_ID, DZ))
print('Kt_p[%d] = %f' % (JOINT_ID, Ktp))
print('Kt_n[%d] = %f' % (JOINT_ID, Ktn))
print('Kf_p[%d] = %f' % (JOINT_ID, Kfp))
print('Kf_n[%d] = %f' % (JOINT_ID, Kfn))
print('Kt_m[%d] = %f' % (JOINT_ID, (Ktp + Ktn) / 2.0))
print('Kf_m[%d] = %f' % (JOINT_ID, (Kfp + Kfn) / 2.0))
return (Ktp, Ktn, Ks, DZ)
| [
"numpy.abs",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.logical_not",
"identification_utils.solve1stOrderLeastSquare",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"scipy.ndimage.morphology.... | [((694, 752), 'scipy.ndimage.morphology.binary_erosion', 'ndimage.morphology.binary_erosion', (['maskConstAng', 'None', '(100)'], {}), '(maskConstAng, None, 100)\n', (727, 752), False, 'from scipy import ndimage\n'), ((884, 924), 'numpy.logical_and', 'np.logical_and', (['maskConstAng', 'maskPosVel'], {}), '(maskConstAng, maskPosVel)\n', (898, 924), True, 'import numpy as np\n'), ((947, 987), 'numpy.logical_and', 'np.logical_and', (['maskConstAng', 'maskNegVel'], {}), '(maskConstAng, maskNegVel)\n', (961, 987), True, 'import numpy as np\n'), ((3231, 3243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3241, 3243), True, 'import matplotlib.pyplot as plt\n'), ((3248, 3287), 'matplotlib.pyplot.plot', 'plt.plot', (['(Ks * current)'], {'label': '"""current"""'}), "(Ks * current, label='current')\n", (3256, 3287), True, 'import matplotlib.pyplot as plt\n'), ((3292, 3337), 'matplotlib.pyplot.plot', 'plt.plot', (['(ctrl / IN_OUT_GAIN)'], {'label': '"""control"""'}), "(ctrl / IN_OUT_GAIN, label='control')\n", (3300, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3354), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3352, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3372), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3370, 3372), True, 'import matplotlib.pyplot as plt\n'), ((3482, 3502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$i(t)$"""'], {}), "('$i(t)$')\n", (3492, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3536), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$ctrl(t)-i(t)$"""'], {}), "('$ctrl(t)-i(t)$')\n", (3518, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3590), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x, y, '.', lw=3, markersize=1, c='0.5')\n", (3550, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3595, 3681), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskPosErr]', 'y[maskPosErr]', '"""rx"""'], {'lw': '(3)', 'markersize': '(1)', 'label': '"""pos err"""'}), "(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label=\n 'pos err')\n", (3603, 3681), True, 'import matplotlib.pyplot as plt\n'), ((3681, 3767), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskNegErr]', 'y[maskNegErr]', '"""bx"""'], {'lw': '(3)', 'markersize': '(1)', 'label': '"""neg err"""'}), "(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label=\n 'neg err')\n", (3689, 3767), True, 'import matplotlib.pyplot as plt\n'), ((3767, 3779), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3777, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3797), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3795, 3797), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3936), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ctrl(t)$"""'], {}), "('$ctrl(t)$')\n", (3923, 3936), True, 'import matplotlib.pyplot as plt\n'), ((3942, 3970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$ctrl(t)-i(t)$"""'], {}), "('$ctrl(t)-i(t)$')\n", (3952, 3970), True, 'import matplotlib.pyplot as plt\n'), ((3976, 4024), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x, y, '.', lw=3, markersize=1, c='0.5')\n", (3984, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4115), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskPosErr]', 'y[maskPosErr]', '"""rx"""'], {'lw': '(3)', 'markersize': '(1)', 'label': '"""pos err"""'}), "(x[maskPosErr], y[maskPosErr], 'rx', lw=3, markersize=1, label=\n 'pos err')\n", (4037, 4115), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4201), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskNegErr]', 'y[maskNegErr]', '"""bx"""'], {'lw': '(3)', 'markersize': '(1)', 'label': '"""neg err"""'}), "(x[maskNegErr], y[maskNegErr], 'bx', lw=3, markersize=1, label=\n 'neg err')\n", (4123, 4201), True, 'import matplotlib.pyplot as plt\n'), ((4201, 4213), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4211, 4213), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4229, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4284, 4307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ctrl(t)$"""'], {}), "('$ctrl(t)$')\n", (4294, 4307), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$i(t)$"""'], {}), "('$i(t)$')\n", (4323, 4333), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4387), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x, y, '.', lw=3, markersize=1, c='0.5')\n", (4347, 4387), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4418), 'matplotlib.pyplot.plot', 'plt.plot', (['[-3, 3]', '[-3, 3]'], {}), '([-3, 3], [-3, 3])\n', (4400, 4418), True, 'import matplotlib.pyplot as plt\n'), ((4424, 4434), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4432, 4434), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4675), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskConstPosAng]', 'y[maskConstPosAng]'], {}), '(x[maskConstPosAng], y[maskConstPosAng])\n', (4635, 4675), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((4691, 4755), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskConstNegAng]', 'y[maskConstNegAng]'], {}), '(x[maskConstNegAng], y[maskConstNegAng])\n', (4715, 4755), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((4847, 4859), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4857, 4859), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4899), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""black"""', 'lw': '(1)'}), "(0, color='black', lw=1)\n", (4875, 4899), True, 'import matplotlib.pyplot as plt\n'), ((4904, 4939), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'color': '"""black"""', 'lw': '(1)'}), "(0, color='black', lw=1)\n", (4915, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4944, 4992), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x, y, '.', lw=3, markersize=1, c='0.5')\n", (4952, 4992), True, 'import matplotlib.pyplot as plt\n'), ((4997, 5071), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskConstPosAng]', 'y[maskConstPosAng]', '"""rx"""'], {'lw': '(3)', 'markersize': '(1)'}), "(x[maskConstPosAng], y[maskConstPosAng], 'rx', lw=3, markersize=1)\n", (5005, 5071), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5150), 'matplotlib.pyplot.plot', 'plt.plot', (['x[maskConstNegAng]', 'y[maskConstNegAng]', '"""bx"""'], {'lw': '(3)', 'markersize': '(1)'}), "(x[maskConstNegAng], y[maskConstNegAng], 'bx', lw=3, markersize=1)\n", (5084, 5150), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$i(t)$"""'], {}), "('$i(t)$')\n", (5367, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5383, 5407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\tau(t)$"""'], {}), "('$\\\\tau(t)$')\n", (5393, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5412, 5464), 'matplotlib.pyplot.title', 'plt.title', (["('Static experiment - Joint ' + JOINT_NAME)"], {}), "('Static experiment - Joint ' + JOINT_NAME)\n", (5421, 5464), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1036, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1071), 'matplotlib.pyplot.plot', 'plt.plot', (['enc'], {'label': '"""q"""'}), "(enc, label='q')\n", (1055, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1198), 'matplotlib.pyplot.plot', 'plt.plot', (['q_const'], {'label': '"""q_const"""'}), "(q_const, label='q_const')\n", (1172, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1219), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1802), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskNegErr]', 'y[maskNegErr]'], {}), '(x[maskNegErr], y[maskNegErr])\n', (1772, 1802), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((1117, 1145), 'numpy.logical_not', 'np.logical_not', (['maskConstAng'], {}), '(maskConstAng)\n', (1131, 1145), True, 'import numpy as np\n'), ((1372, 1381), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1378, 1381), True, 'import numpy as np\n'), ((1434, 1443), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1440, 1443), True, 'import numpy as np\n'), ((1862, 1916), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskPosErr]', 'y[maskPosErr]'], {}), '(x[maskPosErr], y[maskPosErr])\n', (1886, 1916), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((1948, 2002), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskNegErr]', 'y[maskNegErr]'], {}), '(x[maskNegErr], y[maskNegErr])\n', (1972, 2002), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((2024, 2078), 'identification_utils.solve1stOrderLeastSquare', 'solve1stOrderLeastSquare', (['x[maskPosErr]', 'y[maskPosErr]'], {}), '(x[maskPosErr], y[maskPosErr])\n', (2048, 2078), False, 'from identification_utils import solve1stOrderLeastSquare\n'), ((2342, 2354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2419), 'matplotlib.pyplot.plot', 'plt.plot', (['x_neg', 'y_neg', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x_neg, y_neg, '.', lw=3, markersize=1, c='0.5')\n", (2371, 2419), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$i(t)$"""'], {}), "('$i(t)$')\n", (2543, 2553), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2583), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$u(t)$"""'], {}), "('$u(t)$')\n", (2573, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2651), 'matplotlib.pyplot.title', 'plt.title', (["('Negative current errors - Joint ' + JOINT_NAME)"], {}), "('Negative current errors - Joint ' + JOINT_NAME)\n", (2602, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2733), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2731, 2733), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2798), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pos', 'y_pos', '"""."""'], {'lw': '(3)', 'markersize': '(1)', 'c': '"""0.5"""'}), "(x_pos, y_pos, '.', lw=3, markersize=1, c='0.5')\n", (2750, 2798), True, 'import matplotlib.pyplot as plt\n'), ((2912, 2932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$i(t)$"""'], {}), "('$i(t)$')\n", (2922, 2932), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2962), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$u(t)$"""'], {}), "('$u(t)$')\n", (2952, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2972, 3030), 'matplotlib.pyplot.title', 'plt.title', (["('Positive current errors - Joint ' + JOINT_NAME)"], {}), "('Positive current errors - Joint ' + JOINT_NAME)\n", (2981, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3047, 3049), True, 'import matplotlib.pyplot as plt\n')] |
# Modified by Seungjae @ 2021. 07. 31
# Copyright (c) 2020 Huawei Technologies Co., Ltd.
# Licensed under CC BY-NC-SA 4.0 (Attribution-NonCommercial-ShareAlike 4.0 International) (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
#
# The code is released for academic research use only. For commercial use, please contact Huawei Technologies Co., Ltd.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file contains content licensed by https://github.com/xinntao/BasicSR/blob/master/LICENSE/LICENSE
import glob
import sys
from collections import OrderedDict
from natsort import natsort
import options.options as option
from Measure import Measure, psnr
from models.SRFlow_model import SRFlowModel
from imresize import imresize
from models import create_model
import torch
from utils.util import opt_get
import numpy as np
import pandas as pd
import os
import cv2
import argparse
from utils import util
import torchvision
from torchvision import transforms
def fiFindByWildcard(wildcard):
return natsort.natsorted(glob.glob(wildcard, recursive=True))
def load_model(conf_path):
opt = option.parse(conf_path, is_train=False)
opt['gpu_ids'] = None
opt = option.dict_to_nonedict(opt)
model = create_model(opt)
model_path = opt_get(opt, ['model_path'], None)
model.load_network(load_path=model_path, network=model.netG)
return model, opt
def predict(model, lr):
model.feed_data({"LQ": t(lr)}, need_GT=False)
model.test()
visuals = model.get_current_visuals(need_GT=False)
return visuals.get('rlt', visuals.get("SR"))
def t(array): return torch.Tensor(np.expand_dims(array.transpose([2, 0, 1]), axis=0).astype(np.float32)) / 255
def rgb(t): return (
np.clip((t[0] if len(t.shape) == 4 else t).detach().cpu().numpy().transpose([1, 2, 0]), 0, 1) * 255).astype(
np.uint8)
def imread(path):
return cv2.imread(path)[:, :, [2, 1, 0]]
def imwrite(path, img):
os.makedirs(os.path.dirname(path), exist_ok=True)
cv2.imwrite(path, img[:, :, [2, 1, 0]])
def imCropCenter(img, size):
h, w, c = img.shape
h_start = max(h // 2 - size // 2, 0)
h_end = min(h_start + size, h)
w_start = max(w // 2 - size // 2, 0)
w_end = min(w_start + size, w)
return img[h_start:h_end, w_start:w_end]
def impad(img, top=0, bottom=0, left=0, right=0, color=255):
return np.pad(img, [(top, bottom), (left, right), (0, 0)], 'reflect')
def sample_data(path, batch_size, image_size):
transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
]
)
dataset = torchvision.datasets.ImageFolder(path, transform=transform)
loader = torch.utils.data.DataLoader(dataset, shuffle=False, batch_size=batch_size)
loader = iter(loader)
while True:
try:
yield next(loader)
except StopIteration:
loader = torch.utils.data.DataLoader(
dataset, shuffle=False, batch_size=batch_size, num_workers=4
)
loader = iter(loader)
yield next(loader)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default='SRFlow/code/confs/SRFlow_CelebA_4X_seungjae_load_for_test.yml',
help='Path to option YMAL file.')
parser.add_argument('--p', type=str, default='/mnt/HDD3_coursework/srdualglow/celeba_small_test',
help='Path to celeba_small_test')
parser.add_argument('--exp_name', type=str,
default='SRFlow',
help='exp name')
args = parser.parse_args()
### For SRFlow
opt = option.parse(args.opt, is_train=True)
opt = option.dict_to_nonedict(opt)
conf_path = 'SRFlow/code/confs/SRFlow_CelebA_4X_seungjae_load_for_test.yml'
conf = conf_path.split('/')[-1].replace('.yml', '')
model = SRFlowModel(opt=opt, step=0)
### Load dataset
dataset_lr = iter(sample_data(args.p, 1, 128 // 4))
dataset_hr = iter(sample_data(args.p, 1, 128))
dataset = torchvision.datasets.ImageFolder(args.p)
leng = len(dataset)
test_dir = f'./{args.exp_name}_results'
os.makedirs(test_dir, exist_ok=True)
print(f"Out dir: {test_dir}")
measure = Measure(use_gpu=False)
fname = f'measure_full.csv'
fname_tmp = fname + "_"
path_out_measures = os.path.join(test_dir, fname_tmp)
path_out_measures_final = os.path.join(test_dir, fname)
print(path_out_measures)
if os.path.isfile(path_out_measures_final):
df = pd.read_csv(path_out_measures_final)
elif os.path.isfile(path_out_measures):
df = pd.read_csv(path_out_measures)
else:
df = None
for idx_test in range(leng):
lr, _ = next(dataset_lr)
# print(lr.size())
# lr = lr.cpu()
hr, _ = next(dataset_hr)
# print(hr.size())
### Inference part (Currently for SRFlow)
heat = opt['heat']
sr_t = model.get_sr(lq=lr, heat=heat)
sr = rgb(torch.clamp(sr_t, 0, 1)) # Return np
hr = rgb(hr) # To make numpy array
# IMSAVE
path_out_sr = f'{test_dir}/{idx_test:06d}.png'
imwrite(path_out_sr, sr)
# MEASURE
meas = OrderedDict(conf=conf, heat=heat, name=idx_test)
meas['PSNR'], meas['SSIM'], meas['LPIPS'] = measure.measure(sr, hr)
str_out = format_measurements(meas)
print(str_out)
# SAVE CSV
df = pd.DataFrame([meas]) if df is None else pd.concat([pd.DataFrame([meas]), df])
df.to_csv(path_out_measures + "_", index=False)
os.rename(path_out_measures + "_", path_out_measures)
df.to_csv(path_out_measures, index=False)
os.rename(path_out_measures, path_out_measures_final)
str_out = format_measurements(df.mean())
print(f"Results in: {path_out_measures_final}")
print('Mean: ' + str_out)
def format_measurements(meas):
s_out = []
for k, v in meas.items():
v = f"{v:0.2f}" if isinstance(v, float) else v
s_out.append(f"{k}: {v}")
str_out = ", ".join(s_out)
return str_out
if __name__ == "__main__":
main()
| [
"Measure.Measure",
"pandas.read_csv",
"models.SRFlow_model.SRFlowModel",
"utils.util.opt_get",
"argparse.ArgumentParser",
"torchvision.datasets.ImageFolder",
"pandas.DataFrame",
"torchvision.transforms.ToTensor",
"glob.glob",
"collections.OrderedDict",
"os.rename",
"os.path.isfile",
"os.path... | [((1521, 1560), 'options.options.parse', 'option.parse', (['conf_path'], {'is_train': '(False)'}), '(conf_path, is_train=False)\n', (1533, 1560), True, 'import options.options as option\n'), ((1597, 1625), 'options.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (1620, 1625), True, 'import options.options as option\n'), ((1638, 1655), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (1650, 1655), False, 'from models import create_model\n'), ((1674, 1708), 'utils.util.opt_get', 'opt_get', (['opt', "['model_path']", 'None'], {}), "(opt, ['model_path'], None)\n", (1681, 1708), False, 'from utils.util import opt_get\n'), ((2409, 2448), 'cv2.imwrite', 'cv2.imwrite', (['path', 'img[:, :, [2, 1, 0]]'], {}), '(path, img[:, :, [2, 1, 0]])\n', (2420, 2448), False, 'import cv2\n'), ((2778, 2840), 'numpy.pad', 'np.pad', (['img', '[(top, bottom), (left, right), (0, 0)]', '"""reflect"""'], {}), "(img, [(top, bottom), (left, right), (0, 0)], 'reflect')\n", (2784, 2840), True, 'import numpy as np\n'), ((3092, 3151), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['path'], {'transform': 'transform'}), '(path, transform=transform)\n', (3124, 3151), False, 'import torchvision\n'), ((3165, 3239), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size'}), '(dataset, shuffle=False, batch_size=batch_size)\n', (3192, 3239), False, 'import torch\n'), ((3591, 3616), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3614, 3616), False, 'import argparse\n'), ((4148, 4185), 'options.options.parse', 'option.parse', (['args.opt'], {'is_train': '(True)'}), '(args.opt, is_train=True)\n', (4160, 4185), True, 'import options.options as option\n'), ((4196, 4224), 'options.options.dict_to_nonedict', 'option.dict_to_nonedict', (['opt'], {}), '(opt)\n', (4219, 4224), True, 'import options.options as option\n'), ((4375, 4403), 'models.SRFlow_model.SRFlowModel', 'SRFlowModel', ([], {'opt': 'opt', 'step': '(0)'}), '(opt=opt, step=0)\n', (4386, 4403), False, 'from models.SRFlow_model import SRFlowModel\n'), ((4549, 4589), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['args.p'], {}), '(args.p)\n', (4581, 4589), False, 'import torchvision\n'), ((4663, 4699), 'os.makedirs', 'os.makedirs', (['test_dir'], {'exist_ok': '(True)'}), '(test_dir, exist_ok=True)\n', (4674, 4699), False, 'import os\n'), ((4749, 4771), 'Measure.Measure', 'Measure', ([], {'use_gpu': '(False)'}), '(use_gpu=False)\n', (4756, 4771), False, 'from Measure import Measure, psnr\n'), ((4857, 4890), 'os.path.join', 'os.path.join', (['test_dir', 'fname_tmp'], {}), '(test_dir, fname_tmp)\n', (4869, 4890), False, 'import os\n'), ((4921, 4950), 'os.path.join', 'os.path.join', (['test_dir', 'fname'], {}), '(test_dir, fname)\n', (4933, 4950), False, 'import os\n'), ((4988, 5027), 'os.path.isfile', 'os.path.isfile', (['path_out_measures_final'], {}), '(path_out_measures_final)\n', (5002, 5027), False, 'import os\n'), ((6214, 6267), 'os.rename', 'os.rename', (['path_out_measures', 'path_out_measures_final'], {}), '(path_out_measures, path_out_measures_final)\n', (6223, 6267), False, 'import os\n'), ((1445, 1480), 'glob.glob', 'glob.glob', (['wildcard'], {'recursive': '(True)'}), '(wildcard, recursive=True)\n', (1454, 1480), False, 'import glob\n'), ((2291, 2307), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2301, 2307), False, 'import cv2\n'), ((2367, 2388), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2382, 2388), False, 'import os\n'), ((5042, 5078), 'pandas.read_csv', 'pd.read_csv', (['path_out_measures_final'], {}), '(path_out_measures_final)\n', (5053, 5078), True, 'import pandas as pd\n'), ((5088, 5121), 'os.path.isfile', 'os.path.isfile', (['path_out_measures'], {}), '(path_out_measures)\n', (5102, 5121), False, 'import os\n'), ((5740, 5788), 'collections.OrderedDict', 'OrderedDict', ([], {'conf': 'conf', 'heat': 'heat', 'name': 'idx_test'}), '(conf=conf, heat=heat, name=idx_test)\n', (5751, 5788), False, 'from collections import OrderedDict\n'), ((6109, 6162), 'os.rename', 'os.rename', (["(path_out_measures + '_')", 'path_out_measures'], {}), "(path_out_measures + '_', path_out_measures)\n", (6118, 6162), False, 'import os\n'), ((2948, 2977), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (2965, 2977), False, 'from torchvision import transforms\n'), ((2991, 3024), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (3012, 3024), False, 'from torchvision import transforms\n'), ((3038, 3059), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3057, 3059), False, 'from torchvision import transforms\n'), ((5136, 5166), 'pandas.read_csv', 'pd.read_csv', (['path_out_measures'], {}), '(path_out_measures)\n', (5147, 5166), True, 'import pandas as pd\n'), ((5516, 5539), 'torch.clamp', 'torch.clamp', (['sr_t', '(0)', '(1)'], {}), '(sr_t, 0, 1)\n', (5527, 5539), False, 'import torch\n'), ((5966, 5986), 'pandas.DataFrame', 'pd.DataFrame', (['[meas]'], {}), '([meas])\n', (5978, 5986), True, 'import pandas as pd\n'), ((3379, 3472), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'shuffle': '(False)', 'batch_size': 'batch_size', 'num_workers': '(4)'}), '(dataset, shuffle=False, batch_size=batch_size,\n num_workers=4)\n', (3406, 3472), False, 'import torch\n'), ((6017, 6037), 'pandas.DataFrame', 'pd.DataFrame', (['[meas]'], {}), '([meas])\n', (6029, 6037), True, 'import pandas as pd\n')] |
"""NetCDF reader.
Pupynere implements a PUre PYthon NEtcdf REader.
"""
__author__ = "<NAME> <<EMAIL>>"
import struct
import mmap
from numpy import ndarray, zeros, array
ABSENT = '\x00' * 8
ZERO = '\x00' * 4
NC_BYTE = '\x00\x00\x00\x01'
NC_CHAR = '\x00\x00\x00\x02'
NC_SHORT = '\x00\x00\x00\x03'
NC_INT = '\x00\x00\x00\x04'
NC_FLOAT = '\x00\x00\x00\x05'
NC_DOUBLE = '\x00\x00\x00\x06'
NC_DIMENSION = '\x00\x00\x00\n'
NC_VARIABLE = '\x00\x00\x00\x0b'
NC_ATTRIBUTE = '\x00\x00\x00\x0c'
class NetCDFFile(object):
"""A NetCDF file parser."""
def __init__(self, file):
self._buffer = open(file, 'rb')
self._parse()
def read(self, size=-1):
"""Alias for reading the file buffer."""
return self._buffer.read(size)
def _parse(self):
"""Initial parsing of the header."""
# Check magic bytes.
assert self.read(3) == 'CDF'
# Read version byte.
byte = self.read(1)
self.version_byte = struct.unpack('>b', byte)[0]
# Read header info.
self._numrecs()
self._dim_array()
self._gatt_array()
self._var_array()
def _numrecs(self):
"""Read number of records."""
self._nrecs = self._unpack_int()
def _dim_array(self):
"""Read a dict with dimensions names and sizes."""
assert self.read(4) in [ZERO, NC_DIMENSION]
count = self._unpack_int()
self.dimensions = {}
self._dims = []
for dim in range(count):
name = self._read_string()
length = self._unpack_int()
if length == 0:
length = None # record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve dim order
def _gatt_array(self):
"""Read global attributes."""
self.attributes = self._att_array()
# Update __dict__ for compatibility with S.IO.N
self.__dict__.update(self.attributes)
def _att_array(self):
"""Read a dict with attributes."""
assert self.read(4) in [ZERO, NC_ATTRIBUTE]
count = self._unpack_int()
# Read attributes.
attributes = {}
for attribute in range(count):
name = self._read_string()
nc_type = self._unpack_int()
n = self._unpack_int()
# Read value for attributes.
attributes[name] = self._read_values(n, nc_type)
return attributes
def _var_array(self):
"""Read all variables."""
assert self.read(4) in [ZERO, NC_VARIABLE]
# Read size of each record, in bytes.
self._read_recsize()
# Read variables.
self.variables = {}
count = self._unpack_int()
for variable in range(count):
name = self._read_string()
self.variables[name] = self._read_var()
def _read_recsize(self):
"""Read all variables and compute record bytes."""
pos = self._buffer.tell()
recsize = 0
count = self._unpack_int()
for variable in range(count):
name = self._read_string()
n = self._unpack_int()
isrec = False
for i in range(n):
dimid = self._unpack_int()
name = self._dims[dimid]
dim = self.dimensions[name]
if dim is None and i == 0:
isrec = True
vsize = self._unpack_int()
if isrec:
recsize += vsize
self._recsize = recsize
self._buffer.seek(pos)
def _read_var(self):
dimensions = []
shape = []
n = self._unpack_int()
isrec = False
for i in range(n):
dimid = self._unpack_int()
name = self._dims[dimid]
dimensions.append(name)
dim = self.dimensions[name]
if dim is None and i == 0:
dim = self._nrecs
isrec = True
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._att_array()
nc_type = self._unpack_int()
vsize = self._unpack_int()
# Read offset.
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
return NetCDFVariable(
self._buffer.fileno(), nc_type, vsize, begin, shape, dimensions,
attributes, isrec, self._recsize)
def _read_values(self, n, nc_type):
bytes = [1, 1, 2, 4, 4, 8]
typecodes = ['b', 'c', 'h', 'i', 'f', 'd']
count = n * bytes[nc_type-1]
values = self.read(count)
typecode = typecodes[nc_type-1]
if nc_type != 2: # not char
values = struct.unpack('>%s' % (typecode * n), values)
values = array(values, dtype=typecode)
else:
# Remove EOL terminator.
if values.endswith('\x00'):
values = values[:-1]
return values
def _unpack_int(self):
return struct.unpack('>i', self.read(4))[0]
_unpack_int32 = _unpack_int
def _unpack_int64(self):
return struct.unpack('>q', self.read(8))[0]
def _read_string(self):
count = struct.unpack('>i', self.read(4))[0]
s = self.read(count)
# Remove EOL terminator.
if s.endswith('\x00'):
s = s[:-1]
return s
def close(self):
self._buffer.close()
class NetCDFVariable(object):
def __init__(self, fileno, nc_type, vsize, begin, shape, dimensions,
attributes, isrec=False, recsize=0):
self._nc_type = nc_type
self._vsize = vsize
self._begin = begin
self.shape = shape
self.dimensions = dimensions
self.attributes = attributes # for ``dap.plugins.netcdf``
self.__dict__.update(attributes)
self._is_record = isrec
# Number of bytes and type.
self._bytes = [1, 1, 2, 4, 4, 8][self._nc_type-1]
type_ = ['i', 'S', 'i', 'i', 'f', 'f'][self._nc_type-1]
dtype = '>%s%d' % (type_, self._bytes)
bytes = self._begin + self._vsize
if isrec:
# Record variables are not stored contiguosly on disk, so we
# need to create a separate array for each record.
self.__array_data__ = zeros(shape, dtype)
bytes += (shape[0] - 1) * recsize
for n in range(shape[0]):
offset = self._begin + (n * recsize)
mm = mmap.mmap(fileno, bytes, access=mmap.ACCESS_READ)
self.__array_data__[n] = ndarray.__new__(
ndarray, shape[1:], dtype=dtype, buffer=mm,
offset=offset, order=0)
else:
# Create buffer and data.
mm = mmap.mmap(fileno, bytes, access=mmap.ACCESS_READ)
self.__array_data__ = ndarray.__new__(
ndarray, shape, dtype=dtype, buffer=mm,
offset=self._begin, order=0)
# N-D array interface
self.__array_interface__ = {'shape': shape,
'typestr': dtype,
'data': self.__array_data__,
'version': 3,
}
def __getitem__(self, index):
return self.__array_data__.__getitem__(index)
def getValue(self):
"""For scalars."""
return self.__array_data__.item()
def typecode(self):
return ['b', 'c', 'h', 'i', 'f', 'd'][self._nc_type-1]
def _test():
import doctest
doctest.testmod()
| [
"mmap.mmap",
"numpy.ndarray.__new__",
"numpy.array",
"numpy.zeros",
"struct.unpack",
"doctest.testmod"
] | [((7615, 7632), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (7630, 7632), False, 'import doctest\n'), ((980, 1005), 'struct.unpack', 'struct.unpack', (['""">b"""', 'byte'], {}), "('>b', byte)\n", (993, 1005), False, 'import struct\n'), ((4755, 4800), 'struct.unpack', 'struct.unpack', (["('>%s' % (typecode * n))", 'values'], {}), "('>%s' % (typecode * n), values)\n", (4768, 4800), False, 'import struct\n'), ((4822, 4851), 'numpy.array', 'array', (['values'], {'dtype': 'typecode'}), '(values, dtype=typecode)\n', (4827, 4851), False, 'from numpy import ndarray, zeros, array\n'), ((6351, 6370), 'numpy.zeros', 'zeros', (['shape', 'dtype'], {}), '(shape, dtype)\n', (6356, 6370), False, 'from numpy import ndarray, zeros, array\n'), ((6814, 6863), 'mmap.mmap', 'mmap.mmap', (['fileno', 'bytes'], {'access': 'mmap.ACCESS_READ'}), '(fileno, bytes, access=mmap.ACCESS_READ)\n', (6823, 6863), False, 'import mmap\n'), ((6898, 6986), 'numpy.ndarray.__new__', 'ndarray.__new__', (['ndarray', 'shape'], {'dtype': 'dtype', 'buffer': 'mm', 'offset': 'self._begin', 'order': '(0)'}), '(ndarray, shape, dtype=dtype, buffer=mm, offset=self._begin,\n order=0)\n', (6913, 6986), False, 'from numpy import ndarray, zeros, array\n'), ((6529, 6578), 'mmap.mmap', 'mmap.mmap', (['fileno', 'bytes'], {'access': 'mmap.ACCESS_READ'}), '(fileno, bytes, access=mmap.ACCESS_READ)\n', (6538, 6578), False, 'import mmap\n'), ((6620, 6707), 'numpy.ndarray.__new__', 'ndarray.__new__', (['ndarray', 'shape[1:]'], {'dtype': 'dtype', 'buffer': 'mm', 'offset': 'offset', 'order': '(0)'}), '(ndarray, shape[1:], dtype=dtype, buffer=mm, offset=offset,\n order=0)\n', (6635, 6707), False, 'from numpy import ndarray, zeros, array\n')] |
#!/usr/bin/python3
import os
import sys
import argparse
import gensim.models
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy
import numpy as np
import os
import sys
import argparse
import gensim.models
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy
import numpy as np
from scipy.spatial import procrustes
from scipy.spatial.distance import cosine
# parse command line args
parser = argparse.ArgumentParser(description = "Processes semantic drift over time.")
parser.add_argument("--input", "-i", default = "./preprocessed/", help = "the directory containing the text files", metavar = "input_dir")
parser.add_argument("--output", "-o", default = "./output/", help = "the directory into which to place the embedding and result files", metavar = "output_dir")
parser.add_argument("--smoothing", "-s", type = int, default = 1, help = "the amount of smoothing, in years")
parser.add_argument("--topn", "-t", type = int, default = 10, help = "the amount of smoothing, in years")
parser.add_argument("--csv", "-c", type = bool, default = False, help = "output .csv files with detailed information on each word")
parser.add_argument("--dimensionality", "-d", type = int, default = 50, help = "dimensionality to use for embeddings")
parser.add_argument("start_year", type = int, help = "the year from which to start calculating drift")
parser.add_argument("end_year", type = int, help = "the year until which to calculate drift")
ns = parser.parse_args()
start_year = ns.start_year
end_year = ns.end_year
window_len = ns.smoothing
input_dir = ns.input
output_dir = ns.output
dimensionality = ns.dimensionality
csv = ns.csv
top_n = ns.topn
# map each time window to a sentence list and an embedding model
sentence_sets = {}
models = {}
if end_year < start_year :
print("Fatal: End year must be after start year", file = sys.stderr)
sys.exit(2)
# make models
print("Making models...", end = "\r")
year_range = end_year + 1 - start_year
i = 1
for year in range(start_year, end_year + 1) :
try :
input = open(input_dir + str(year) + ".txt")
# normalize, split by sentences
text = input.read()
text = text.lower()
sentences = sent_tokenize(text)
sentences = [word_tokenize(sent) for sent in sentences]
# add these sentences to every set in the time window
for y in range(year, year + window_len) :
if y not in sentence_sets :
sentence_sets[y] = []
sentence_sets[y] += sentences
except :
print("Could not find data for %d (%d.txt); skipping" % (year, year))
# make embedding model regardless of whether data for this year was found (use windows)
# however, there must be something in the set or else this won't work; fail if empty
if len(sentence_sets[year]) == 0 :
print("Fatal: No data in window for %d" % (year), file = sys.stderr)
sys.exit(1)
else :
model = gensim.models.Word2Vec(sentence_sets[year], size = dimensionality, window = 5, min_count = 5, workers = 4)
model.save("%s%d+%dx%d.word2vec" % (output_dir, year, window_len, dimensionality))
models[year] = model.wv
# clear sentence set from memory
del(sentence_sets[year])
print("Making models (%d/%d)" % (i, year_range), end = "\r")
i += 1
print()
del(sentence_sets)
# # intermittent load due to errors
# print("Loading models...", end = "\r")
# for year in range(start_year, end_year + 1) :
# try :
# model = gensim.models.Word2Vec.load("%s%d+%dx%d.word2vec" % (output_dir, year, window_len, dimensionality))
# models[year] = model.wv
# del(model)
# print("Loading models (%d - %d)" % (start_year, year), end = "\r")
# except :
# print("Fatal: No model found for %d (%s%d+%dx%d.word2vec)" % (year, output_dir, year, window_len, dimensionality), file = sys.stderr)
# sys.exit(4)
# print()
# consider only words that are in all models
print("Finding overlap...", end = "\r")
base = list(models.values())[0].vocab
wordset = set()
i = 1
p = 0
for word in base :
add = True
for model in models.values() :
if word not in model :
add = False
break
if add :
wordset.add(word)
i += 1
if (100 * i // len(base)) > p :
p = 100 * i // len(base)
print("Finding overlap (%d%%; %d words)" % (p, len(wordset)), end = "\r")
print()
# save overlap set
output = open(output_dir + "overlap-%d-%d+%dx%d" % (start_year, end_year, window_len, dimensionality), "wb")
pickle.dump(wordset, output)
output.close()
for year in range(start_year, end_year):
model1 = models[year]
model2 = models[year + 1]
# Normalisation : same length
# model1.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
# model2.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
i = 1
p = 0
dict_metric = dict()
mat1 = list()
mat2 = list()
for word in wordset:
# Aligning the learned embedding spaces using Procrustes transformation
mat1 += [model1[word]]
mat2 += [model2[word]]
i += 1
if (100 * i // len(wordset)) > p:
p = (100 * i // len(wordset))
print("Calculating drift (%d%%)" % (p), end="\r")
print()
mat1 = np.array(mat1)
mat2 = np.array(mat2)
# Using the distance of the words in the aligned space as a metric of shift
mtx1, mtx2, disparity = procrustes(mat1, mat2)
disparity /= dimensionality * len(wordset)
#similarity = np.sum([cosine(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
# Cosine similarity using cosine function from our library
similarity = np.sum([1 - cosine(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
# Cosine similarity using cos_sim function that I implemented
# similarity = np.sum([cos_sim(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
print("cosine SIMILARITY")
print(disparity, similarity)
def cos_sim(l1, l2):
"""Takes 2 vectors l1, l2 and returns the cosine similarity according
to the definition of the dot product
"""
dot_product = np.dot(l1, l2)
norm_l1 = np.linalg.norm(l1)
norm_l2 = np.linalg.norm(l2)
return dot_product / (norm_l1 * norm_l2)
i = 1
p = 0
for word in wordset:
union = set()
rows = dict()
for year in range(start_year, end_year + 1) :
similar = models[year].most_similar(positive = [word], topn = top_n)
union |= set([e[0] for e in similar])
rows[year] = dict(similar)
for year in rows :
for w in union :
if w not in rows[year] :
if w in models[year] :
rows[year][w] = models[year].similarity(word, w)
else :
rows[year][w] = 0
cols = numpy.array([[row[val] for val in sorted(row)] for row in list(rows.values())])
dict_metric[word] = numpy.sum([numpy.std(row) for row in numpy.rot90(cols)])
# write exhaustive data to csv
if csv :
try :
with open("%s%s-%s-%s+%sx%dt%d.csv" % (output_dir, word, start_year, end_year, window_len, dimensionality, top_n), "w") as output :
print(",%s" % (",".join(map(str, range(start_year, end_year + 1)))), file = output)
for word in union :
print(word, file = output, end = ",")
print(",".join(map(str, [rows[year][word] for year in range(start_year, end_year + 1)])), file = output)
print("", file = output)
output.close()
except :
print("Error: could not write file %s%s-%s-%s+%sx%dt%d.csv; skipping" % (output_dir, word, start_year, end_year, window_len, dimensionality, top_n), file = sys.stderr)
i += 1
if (100 * i // len(wordset)) > p :
p = (100 * i // len(wordset))
print("Calculating drift (%d%%)" % (p), end = "\r")
print()
# sort list
print("Sorting...", end = "\r")
drifters = sorted(dict_metric, key = dict_metric.get)
print("Sorted ")
# save sorted list
output = open(output_dir + "sorted-%s-%s+%sx%dt%d" % (start_year, end_year, window_len, dimensionality, top_n), "wb")
pickle.dump(drifters, output)
output.close()
# save metric dict
output = open(output_dir + "metric-%s-%s+%sx%dt%d" % (start_year, end_year, window_len, dimensionality, top_n), "wb")
pickle.dump(dict_metric, output)
output.close()
print()
print("Best:")
for word in drifters[-30:] :
print("\t%s\t%d" % (word, dict_metric[word]))
print (drifters) | [
"scipy.spatial.distance.cosine",
"pickle.dump",
"argparse.ArgumentParser",
"numpy.std",
"numpy.linalg.norm",
"nltk.tokenize.word_tokenize",
"numpy.array",
"numpy.dot",
"nltk.tokenize.sent_tokenize",
"numpy.rot90",
"sys.exit",
"scipy.spatial.procrustes"
] | [((477, 551), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Processes semantic drift over time."""'}), "(description='Processes semantic drift over time.')\n", (500, 551), False, 'import argparse\n'), ((4495, 4523), 'pickle.dump', 'pickle.dump', (['wordset', 'output'], {}), '(wordset, output)\n', (4506, 4523), False, 'import pickle\n'), ((8085, 8114), 'pickle.dump', 'pickle.dump', (['drifters', 'output'], {}), '(drifters, output)\n', (8096, 8114), False, 'import pickle\n'), ((8273, 8305), 'pickle.dump', 'pickle.dump', (['dict_metric', 'output'], {}), '(dict_metric, output)\n', (8284, 8305), False, 'import pickle\n'), ((1947, 1958), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1955, 1958), False, 'import sys\n'), ((5308, 5322), 'numpy.array', 'np.array', (['mat1'], {}), '(mat1)\n', (5316, 5322), True, 'import numpy as np\n'), ((5335, 5349), 'numpy.array', 'np.array', (['mat2'], {}), '(mat2)\n', (5343, 5349), True, 'import numpy as np\n'), ((5460, 5482), 'scipy.spatial.procrustes', 'procrustes', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (5470, 5482), False, 'from scipy.spatial import procrustes\n'), ((6225, 6239), 'numpy.dot', 'np.dot', (['l1', 'l2'], {}), '(l1, l2)\n', (6231, 6239), True, 'import numpy as np\n'), ((6255, 6273), 'numpy.linalg.norm', 'np.linalg.norm', (['l1'], {}), '(l1)\n', (6269, 6273), True, 'import numpy as np\n'), ((6289, 6307), 'numpy.linalg.norm', 'np.linalg.norm', (['l2'], {}), '(l2)\n', (6303, 6307), True, 'import numpy as np\n'), ((2265, 2284), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (2278, 2284), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((2921, 2932), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2929, 2932), False, 'import sys\n'), ((2301, 2320), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sent'], {}), '(sent)\n', (2314, 2320), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((6928, 6942), 'numpy.std', 'numpy.std', (['row'], {}), '(row)\n', (6937, 6942), False, 'import numpy\n'), ((6954, 6971), 'numpy.rot90', 'numpy.rot90', (['cols'], {}), '(cols)\n', (6965, 6971), False, 'import numpy\n'), ((5733, 5761), 'scipy.spatial.distance.cosine', 'cosine', (['mtx1[row]', 'mtx2[row]'], {}), '(mtx1[row], mtx2[row])\n', (5739, 5761), False, 'from scipy.spatial.distance import cosine\n')] |
#!/usr/bin/env python3
# Copyright 2020 Lawrence Livermore National Security, LLC and other
# DynIm Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
################################################################################
import unittest
from os.path import dirname, realpath, join
import numpy as np
import dynim
# ------------------------------------------------------------------------------
class TestDynim(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.workspace = join(dirname(realpath(__file__)), 'test_workspace')
self.samples = [dynim.HDPoint(i, np.random.rand(2).astype(np.float32))
for i in range(20)]
super(TestDynim, self).__init__(*args, **kwargs)
def test_sampler_random(self):
sampler = dynim.SamplerRandom('test_id', self.workspace,
min_cands_b4_sel=0, buffer_size=100)
sampler.add_candidates(self.samples)
self.assertTrue(len(sampler.select(5)) == 5)
def test_sampler_importance(self):
training_samples = np.random.rand(5000, 2).astype(np.float32)
hdspace = dynim.HDSpace()
hdspace.setup(2)
hdspace.train(training_samples)
sampler = dynim.SamplerImportance('test_id', self.workspace,
min_cands_b4_sel=0, buffer_size=100,
min_rand_b4_importance=10)
sampler.set_hdspace(hdspace)
sampler.add_candidates(self.samples)
self.assertTrue(len(sampler.select(5)) == 5)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
# ------------------------------------------------------------------------------
| [
"numpy.random.rand",
"dynim.SamplerRandom",
"os.path.realpath",
"unittest.main",
"dynim.HDSpace",
"dynim.SamplerImportance"
] | [((1741, 1756), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1754, 1756), False, 'import unittest\n'), ((845, 932), 'dynim.SamplerRandom', 'dynim.SamplerRandom', (['"""test_id"""', 'self.workspace'], {'min_cands_b4_sel': '(0)', 'buffer_size': '(100)'}), "('test_id', self.workspace, min_cands_b4_sel=0,\n buffer_size=100)\n", (864, 932), False, 'import dynim\n'), ((1193, 1208), 'dynim.HDSpace', 'dynim.HDSpace', ([], {}), '()\n', (1206, 1208), False, 'import dynim\n'), ((1293, 1411), 'dynim.SamplerImportance', 'dynim.SamplerImportance', (['"""test_id"""', 'self.workspace'], {'min_cands_b4_sel': '(0)', 'buffer_size': '(100)', 'min_rand_b4_importance': '(10)'}), "('test_id', self.workspace, min_cands_b4_sel=0,\n buffer_size=100, min_rand_b4_importance=10)\n", (1316, 1411), False, 'import dynim\n'), ((572, 590), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (580, 590), False, 'from os.path import dirname, realpath, join\n'), ((1132, 1155), 'numpy.random.rand', 'np.random.rand', (['(5000)', '(2)'], {}), '(5000, 2)\n', (1146, 1155), True, 'import numpy as np\n'), ((652, 669), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (666, 669), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from wildfires.data import GFEDv4
from wildfires.utils import get_land_mask
from empirical_fire_modelling.cache import cache
@cache
def get_gfed4_variogram_data(i):
gfed4 = GFEDv4()
if i == -1:
title = "Mean GFED4 BA"
ba = gfed4.get_mean_dataset().cube
else:
ba = gfed4.cube[i]
title = f"GFED4 BA {ensure_datetime(ba.coord('time').cell(0).point):%Y-%m}"
ba.data.mask = ~get_land_mask()
latitudes = ba.coord("latitude").points
longitudes = ba.coord("longitude").points
coords = []
for lon in longitudes:
for lat in latitudes:
coords.append((lat, lon))
coords = np.array(coords)
ba_flat = ba.data.ravel()
# Choose indices.
valid_indices = np.where(~ba.data.mask.ravel())[0]
# Random subset.
# inds = np.random.default_rng(0).choice(valid_indices, size=(4000,))
# All indices.
inds = valid_indices
assert inds.shape[0] == valid_indices.shape[0], "All samples should have been used"
# print(f"Max N: {valid_indices.shape[0]:>10d}")
# print(f"Chosen N: {inds.shape[0]:>10d}")
return coords[inds], ba_flat.data[inds], title
| [
"numpy.array",
"wildfires.utils.get_land_mask",
"wildfires.data.GFEDv4"
] | [((223, 231), 'wildfires.data.GFEDv4', 'GFEDv4', ([], {}), '()\n', (229, 231), False, 'from wildfires.data import GFEDv4\n'), ((697, 713), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (705, 713), True, 'import numpy as np\n'), ((465, 480), 'wildfires.utils.get_land_mask', 'get_land_mask', ([], {}), '()\n', (478, 480), False, 'from wildfires.utils import get_land_mask\n')] |
from uuid import UUID, uuid4
import numpy as np
from torchdemon.models import InferenceInputData
def uuid_const() -> UUID:
return UUID("009b7240-c7e5-4df3-8722-c1be32390106")
def uuid_rand() -> UUID:
return uuid4()
def ndarray_randint(*args: int) -> np.ndarray:
return np.random.randint(0, 10, size=args)
def ndarray_rand(*args: int) -> np.ndarray:
return np.random.rand(*args)
def inference_input_data() -> InferenceInputData:
return InferenceInputData(
args=[ndarray_randint(2), ndarray_rand(4)], kwargs={"input": ndarray_rand(6)}
)
| [
"numpy.random.randint",
"uuid.UUID",
"numpy.random.rand",
"uuid.uuid4"
] | [((138, 182), 'uuid.UUID', 'UUID', (['"""009b7240-c7e5-4df3-8722-c1be32390106"""'], {}), "('009b7240-c7e5-4df3-8722-c1be32390106')\n", (142, 182), False, 'from uuid import UUID, uuid4\n'), ((221, 228), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (226, 228), False, 'from uuid import UUID, uuid4\n'), ((289, 324), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': 'args'}), '(0, 10, size=args)\n', (306, 324), True, 'import numpy as np\n'), ((382, 403), 'numpy.random.rand', 'np.random.rand', (['*args'], {}), '(*args)\n', (396, 403), True, 'import numpy as np\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00-utilities.ipynb (unless otherwise specified).
__all__ = ['bayes_search_CV_init', 'bayes_search_CV__fit', 'add_series_index', 'PandasRandomForestRegressor',
'convert_file_to_json', 'encode_file_as_utf8', 'convert_nbs_to_md', 'MyHTMLParser', 'convert_df_to_md',
'extract_div_to_md_table', 'extract_div_to_md_tables', 'clean_md_file_tables', 'get_substring_idxs',
'clean_md_file_img_fps', 'AxTransformer', 'set_date_ticks']
# Cell
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import junix
from html.parser import HTMLParser
from nbdev.export2html import convert_md
from joblib import Parallel, delayed
from scipy.stats import rankdata
from skopt import BayesSearchCV
import os
import codecs
from ipypb import track
from warnings import warn
from functools import partial
from distutils.dir_util import copy_tree
from collections.abc import Iterable, Sized
from collections import defaultdict
import sklearn
from sklearn import linear_model
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import is_classifier, clone
from sklearn.utils.validation import indexable
try:
from sklearn.metrics import check_scoring
except ImportError:
from sklearn.metrics.scorer import check_scoring
# Cell
def bayes_search_CV_init(self, estimator, search_spaces, optimizer_kwargs=None,
n_iter=50, scoring=None, fit_params=None, n_jobs=1,
n_points=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=False):
self.search_spaces = search_spaces
self.n_iter = n_iter
self.n_points = n_points
self.random_state = random_state
self.optimizer_kwargs = optimizer_kwargs
self._check_search_space(self.search_spaces)
self.fit_params = fit_params
self.iid = None
super(BayesSearchCV, self).__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
BayesSearchCV.__init__ = bayes_search_CV_init
# Cell
def bayes_search_CV__fit(self, X, y, groups, parameter_iterable):
"""
Actual fitting, performing the search over parameters.
Taken from https://github.com/scikit-learn/scikit-learn/blob/0.18.X
.../sklearn/model_selection/_search.py
"""
estimator = self.estimator
cv = sklearn.model_selection._validation.check_cv(
self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(
self.estimator, scoring=self.scoring)
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
cv_iter = list(cv.split(X, y, groups))
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(sklearn.model_selection._validation._fit_and_score)(
clone(base_estimator),
X, y, self.scorer_,
train, test, self.verbose, parameters,
fit_params=self.fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True, return_parameters=True,
error_score=self.error_score
)
for parameters in parameter_iterable
for train, test in cv_iter)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_scores, test_scores, n_test_samples,
fit_time, score_time, parameters) = zip(*out)
else:
from warnings import warn
(fit_failed, test_scores, n_test_samples,
fit_time, score_time, parameters) = zip(*[a.values() for a in out])
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
results = dict()
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
# Computed the (weighted) mean and std for test scores alone
# NOTE test_sample counts (weights) remain the same for all candidates n_test_samples
n_test_samples = np.array(n_test_samples[:n_splits],
dtype=np.int)
_store('test_score', test_scores, splits=True, rank=True,
weights=n_test_samples if self.iid else None)
if self.return_train_score:
_store('train_score', train_scores, splits=True)
_store('fit_time', fit_time)
_store('score_time', score_time)
best_index = np.flatnonzero(results["rank_test_score"] == 1)[0]
best_parameters = candidate_params[best_index]
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(np.ma.array,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at est_sample_counts = np.array(n_test_samples[:n_splits], key 'params'
results['params'] = candidate_params
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
BayesSearchCV._fit = bayes_search_CV__fit
# Cell
def add_series_index(idx_arg_pos=0):
def decorator(func):
def decorator_wrapper(*args, **kwargs):
input_s = args[idx_arg_pos]
assert isinstance(input_s, (pd.Series, pd.DataFrame))
result = pd.Series(func(*args, **kwargs), index=input_s.index)
return result
return decorator_wrapper
return decorator
class PandasRandomForestRegressor(RandomForestRegressor):
def __init__(self, n_estimators=100, *, criterion='mse', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, ccp_alpha=0.0, max_samples=None, score_func=None):
super().__init__(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, bootstrap=bootstrap, oob_score=oob_score, n_jobs=n_jobs, random_state=random_state, verbose=verbose, warm_start=warm_start, ccp_alpha=ccp_alpha, max_samples=max_samples)
if score_func is None:
self.score_func = r2_score
else:
self.score_func = score_func
@add_series_index(1)
def predict(self, X):
pred = super().predict(X)
return pred
def score(self, X, y, *args, **kwargs):
y_pred = self.predict(X)
score = self.score_func(y, y_pred, *args, **kwargs)
return score
# Cell
def convert_file_to_json(filepath):
with open(filepath, 'r', encoding='utf8') as f:
contents = f.read()
f.close()
return json.loads(contents)
junix.exporter.convert_file_to_json = convert_file_to_json
def encode_file_as_utf8(fp):
with codecs.open(fp, 'r') as file:
contents = file.read(1048576)
file.close()
if not contents:
pass
else:
with codecs.open(fp, 'w', 'utf-8') as file:
file.write(contents)
def convert_nbs_to_md(nbs_dir, docs_nb_img_dir, docs_dir):
nb_files = [f for f in os.listdir(nbs_dir) if f[-6:]=='.ipynb']
for nb_file in track(nb_files):
nb_fp = f'{nbs_dir}/{nb_file}'
junix.export_images(nb_fp, docs_nb_img_dir)
convert_md(nb_fp, docs_dir, img_path=f'{docs_nb_img_dir}/', jekyll=False)
md_fp = docs_dir + '/'+ nb_file.replace('.ipynb', '') + '.md'
encode_file_as_utf8(md_fp)
# Cell
class MyHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.tags = []
def handle_starttag(self, tag, attrs):
self.tags.append(self.get_starttag_text())
def handle_endtag(self, tag):
self.tags.append(f"</{tag}>")
get_substring_idxs = lambda string, substring: [num for num in range(len(string)-len(substring)+1) if string[num:num+len(substring)]==substring]
def convert_df_to_md(df):
idx_col = df.columns[0]
df = df.set_index(idx_col)
if idx_col == 'Unnamed: 0':
df.index.name = ''
table_md = df.to_markdown()
return table_md
def extract_div_to_md_table(start_idx, end_idx, table_and_div_tags, file_txt):
n_start_divs_before = table_and_div_tags[:start_idx].count('<div>')
n_end_divs_before = table_and_div_tags[:end_idx].count('</div>')
div_start_idx = get_substring_idxs(file_txt, '<div>')[n_start_divs_before-1]
div_end_idx = get_substring_idxs(file_txt, '</div>')[n_end_divs_before]
div_txt = file_txt[div_start_idx:div_end_idx]
potential_dfs = pd.read_html(div_txt)
assert len(potential_dfs) == 1, 'Multiple tables were found when there should be only one'
df = potential_dfs[0]
md_table = convert_df_to_md(df)
return div_txt, md_table
def extract_div_to_md_tables(md_fp):
with open(md_fp, 'r') as f:
file_txt = f.read()
parser = MyHTMLParser()
parser.feed(file_txt)
table_and_div_tags = [tag for tag in parser.tags if tag in ['<div>', '</div>', '<table border="1" class="dataframe">', '</table>']]
table_start_tag_idxs = [i for i, tag in enumerate(table_and_div_tags) if tag=='<table border="1" class="dataframe">']
table_end_tag_idxs = [table_start_tag_idx+table_and_div_tags[table_start_tag_idx:].index('</table>') for table_start_tag_idx in table_start_tag_idxs]
div_to_md_tables = []
for start_idx, end_idx in zip(table_start_tag_idxs, table_end_tag_idxs):
div_txt, md_table = extract_div_to_md_table(start_idx, end_idx, table_and_div_tags, file_txt)
div_to_md_tables += [(div_txt, md_table)]
return div_to_md_tables
def clean_md_file_tables(md_fp):
div_to_md_tables = extract_div_to_md_tables(md_fp)
with open(md_fp, 'r') as f:
md_file_text = f.read()
for div_txt, md_txt in div_to_md_tables:
md_file_text = md_file_text.replace(div_txt, md_txt)
with open(md_fp, 'w') as f:
f.write(md_file_text)
return
# Cell
def clean_md_file_img_fps(md_fp):
with open(md_fp, 'r') as f:
md_file_text = f.read()
md_file_text = md_file_text.replace('../docs/img/nbs', 'img/nbs')
with open(md_fp, 'w') as f:
f.write(md_file_text)
return
# Cell
class AxTransformer:
def __init__(self, datetime_vals=False):
self.datetime_vals = datetime_vals
self.lr = linear_model.LinearRegression()
return
def process_tick_vals(self, tick_vals):
if not isinstance(tick_vals, Iterable) or isinstance(tick_vals, str):
tick_vals = [tick_vals]
if self.datetime_vals == True:
tick_vals = pd.to_datetime(tick_vals).astype(int).values
tick_vals = np.array(tick_vals)
return tick_vals
def fit(self, ax, axis='x'):
axis = getattr(ax, f'get_{axis}axis')()
tick_locs = axis.get_ticklocs()
tick_vals = self.process_tick_vals([label._text for label in axis.get_ticklabels()])
self.lr.fit(tick_vals.reshape(-1, 1), tick_locs)
return
def transform(self, tick_vals):
tick_vals = self.process_tick_vals(tick_vals)
tick_locs = self.lr.predict(np.array(tick_vals).reshape(-1, 1))
return tick_locs
def set_date_ticks(ax, start_date, end_date, axis='y', date_format='%Y-%m-%d', **date_range_kwargs):
dt_rng = pd.date_range(start_date, end_date, **date_range_kwargs)
ax_transformer = AxTransformer(datetime_vals=True)
ax_transformer.fit(ax, axis=axis)
getattr(ax, f'set_{axis}ticks')(ax_transformer.transform(dt_rng))
getattr(ax, f'set_{axis}ticklabels')(dt_rng.strftime(date_format))
ax.tick_params(axis=axis, which='both', bottom=True, top=False, labelbottom=True)
return ax | [
"numpy.array",
"pandas.read_html",
"pandas.date_range",
"pandas.to_datetime",
"os.listdir",
"sklearn.base.is_classifier",
"numpy.flatnonzero",
"numpy.empty",
"sklearn.metrics.scorer.check_scoring",
"json.loads",
"numpy.average",
"junix.export_images",
"codecs.open",
"sklearn.linear_model.L... | [((2833, 2884), 'sklearn.metrics.scorer.check_scoring', 'check_scoring', (['self.estimator'], {'scoring': 'self.scoring'}), '(self.estimator, scoring=self.scoring)\n', (2846, 2884), False, 'from sklearn.metrics.scorer import check_scoring\n'), ((2914, 2937), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (2923, 2937), False, 'from sklearn.utils.validation import indexable\n'), ((3307, 3328), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (3312, 3328), False, 'from sklearn.base import is_classifier, clone\n'), ((5711, 5760), 'numpy.array', 'np.array', (['n_test_samples[:n_splits]'], {'dtype': 'np.int'}), '(n_test_samples[:n_splits], dtype=np.int)\n', (5719, 5760), True, 'import numpy as np\n'), ((9616, 9636), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (9626, 9636), False, 'import json\n'), ((10123, 10138), 'ipypb.track', 'track', (['nb_files'], {}), '(nb_files)\n', (10128, 10138), False, 'from ipypb import track\n'), ((11498, 11519), 'pandas.read_html', 'pd.read_html', (['div_txt'], {}), '(div_txt)\n', (11510, 11519), True, 'import pandas as pd\n'), ((14261, 14317), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date, **date_range_kwargs)\n', (14274, 14317), True, 'import pandas as pd\n'), ((3420, 3497), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose', 'pre_dispatch': 'pre_dispatch'}), '(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=pre_dispatch)\n', (3428, 3497), False, 'from joblib import Parallel, delayed\n'), ((4997, 5039), 'numpy.average', 'np.average', (['array'], {'axis': '(1)', 'weights': 'weights'}), '(array, axis=1, weights=weights)\n', (5007, 5039), True, 'import numpy as np\n'), ((6092, 6139), 'numpy.flatnonzero', 'np.flatnonzero', (["(results['rank_test_score'] == 1)"], {}), "(results['rank_test_score'] == 1)\n", (6106, 6139), True, 'import numpy as np\n'), ((9736, 9756), 'codecs.open', 'codecs.open', (['fp', '"""r"""'], {}), "(fp, 'r')\n", (9747, 9756), False, 'import codecs\n'), ((10187, 10230), 'junix.export_images', 'junix.export_images', (['nb_fp', 'docs_nb_img_dir'], {}), '(nb_fp, docs_nb_img_dir)\n', (10206, 10230), False, 'import junix\n'), ((10239, 10312), 'nbdev.export2html.convert_md', 'convert_md', (['nb_fp', 'docs_dir'], {'img_path': 'f"""{docs_nb_img_dir}/"""', 'jekyll': '(False)'}), "(nb_fp, docs_dir, img_path=f'{docs_nb_img_dir}/', jekyll=False)\n", (10249, 10312), False, 'from nbdev.export2html import convert_md\n'), ((13284, 13315), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (13313, 13315), False, 'from sklearn import linear_model\n'), ((13621, 13640), 'numpy.array', 'np.array', (['tick_vals'], {}), '(tick_vals)\n', (13629, 13640), True, 'import numpy as np\n'), ((2788, 2812), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (2801, 2812), False, 'from sklearn.base import is_classifier, clone\n'), ((5179, 5257), 'numpy.average', 'np.average', (['((array - array_means[:, np.newaxis]) ** 2)'], {'axis': '(1)', 'weights': 'weights'}), '((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)\n', (5189, 5257), True, 'import numpy as np\n'), ((6469, 6491), 'numpy.empty', 'np.empty', (['n_candidates'], {}), '(n_candidates)\n', (6477, 6491), True, 'import numpy as np\n'), ((10062, 10081), 'os.listdir', 'os.listdir', (['nbs_dir'], {}), '(nbs_dir)\n', (10072, 10081), False, 'import os\n'), ((3520, 3579), 'joblib.delayed', 'delayed', (['sklearn.model_selection._validation._fit_and_score'], {}), '(sklearn.model_selection._validation._fit_and_score)\n', (3527, 3579), False, 'from joblib import Parallel, delayed\n'), ((3593, 3614), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (3598, 3614), False, 'from sklearn.base import is_classifier, clone\n'), ((4683, 4716), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float64'}), '(array, dtype=np.float64)\n', (4691, 4716), True, 'import numpy as np\n'), ((5480, 5516), 'scipy.stats.rankdata', 'rankdata', (['(-array_means)'], {'method': '"""min"""'}), "(-array_means, method='min')\n", (5488, 5516), False, 'from scipy.stats import rankdata\n'), ((7397, 7418), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (7402, 7418), False, 'from sklearn.base import is_classifier, clone\n'), ((9899, 9928), 'codecs.open', 'codecs.open', (['fp', '"""w"""', '"""utf-8"""'], {}), "(fp, 'w', 'utf-8')\n", (9910, 9928), False, 'import codecs\n'), ((14084, 14103), 'numpy.array', 'np.array', (['tick_vals'], {}), '(tick_vals)\n', (14092, 14103), True, 'import numpy as np\n'), ((13555, 13580), 'pandas.to_datetime', 'pd.to_datetime', (['tick_vals'], {}), '(tick_vals)\n', (13569, 13580), True, 'import pandas as pd\n')] |
"""
Optuna example that optimizes multi-layer perceptrons using Tensorflow (Estimator API).
In this example, we optimize the validation accuracy of hand-written digit recognition using
Tensorflow and MNIST. We optimize the neural network architecture as well as the optimizer
configuration. As it is too time consuming to use the whole MNIST dataset, we here use a small
subset of it.
We have the following two ways to execute this example:
(1) Execute this code directly.
$ python tensorflow_estimator_simple.py
(2) Execute through CLI.
$ STUDY_NAME=`optuna create-study --direction maximize --storage sqlite:///example.db`
$ optuna study optimize tensorflow_estimator_simple.py objective --n-trials=100 \
--study $STUDY_NAME --storage sqlite:///example.db
"""
import shutil
import tempfile
import numpy as np
import tensorflow as tf
import optuna
MODEL_DIR = tempfile.mkdtemp()
BATCH_SIZE = 128
TRAIN_STEPS = 1000
def create_network(trial, features):
# We optimize the numbers of layers and their units.
input_layer = tf.reshape(features['x'], [-1, 784])
prev_layer = input_layer
n_layers = trial.suggest_int('n_layers', 1, 3)
for i in range(n_layers):
n_units = trial.suggest_int('n_units_l{}'.format(i), 1, 128)
prev_layer = tf.keras.layers.Dense(
units=n_units, activation=tf.nn.relu)(prev_layer)
logits = tf.keras.layers.Dense(units=10)(prev_layer)
return logits
def create_optimizer(trial):
# We optimize the choice of optimizers as well as their parameters.
optimizer_name = trial.suggest_categorical('optimizer', ['Adam', 'SGD'])
if optimizer_name == 'Adam':
adam_lr = trial.suggest_loguniform('adam_lr', 1e-5, 1e-1)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=adam_lr)
else:
sgd_lr = trial.suggest_loguniform('sgd_lr', 1e-5, 1e-1)
sgd_momentum = trial.suggest_loguniform('sgd_momentum', 1e-5, 1e-1)
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=sgd_lr, momentum=sgd_momentum)
return optimizer
def model_fn(trial, features, labels, mode):
logits = create_network(trial, features)
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = create_optimizer(trial)
train_op = optimizer.minimize(loss, tf.compat.v1.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
"accuracy": tf.compat.v1.metrics.accuracy(labels=labels,
predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def objective(trial):
(train_data, train_labels), (eval_data, eval_labels) = tf.keras.datasets.mnist.load_data()
train_data = train_data / np.float32(255)
train_labels = train_labels.astype(np.int32)
eval_data = eval_data / np.float32(255)
eval_labels = eval_labels.astype(np.int32)
model_dir = "{}/{}".format(MODEL_DIR, trial.number)
mnist_classifier = tf.estimator.Estimator(
model_fn=lambda features, labels, mode: model_fn(trial, features, labels, mode),
model_dir=model_dir)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": train_data}, y=train_labels, batch_size=BATCH_SIZE, num_epochs=None, shuffle=True)
mnist_classifier.train(input_fn=train_input_fn, steps=TRAIN_STEPS)
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": eval_data}, y=eval_labels, num_epochs=1, shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
return float(eval_results['accuracy'])
def main(unused_argv):
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=25)
print('Number of finished trials: ', len(study.trials))
print('Best trial:')
trial = study.best_trial
print(' Value: ', trial.value)
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
shutil.rmtree(MODEL_DIR)
if __name__ == "__main__":
tf.compat.v1.app.run()
| [
"tensorflow.compat.v1.metrics.accuracy",
"numpy.float32",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.estimator.EstimatorSpec",
"shutil.rmtree",
"tensorflo... | [((889, 907), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (905, 907), False, 'import tempfile\n'), ((1058, 1094), 'tensorflow.reshape', 'tf.reshape', (["features['x']", '[-1, 784]'], {}), "(features['x'], [-1, 784])\n", (1068, 1094), True, 'import tensorflow as tf\n'), ((2478, 2556), 'tensorflow.compat.v1.losses.sparse_softmax_cross_entropy', 'tf.compat.v1.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2526, 2556), True, 'import tensorflow as tf\n'), ((3014, 3100), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=loss, eval_metric_ops=\n eval_metric_ops)\n', (3040, 3100), True, 'import tensorflow as tf\n'), ((3179, 3214), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (3212, 3214), True, 'import tensorflow as tf\n'), ((3647, 3787), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': train_data}", 'y': 'train_labels', 'batch_size': 'BATCH_SIZE', 'num_epochs': 'None', 'shuffle': '(True)'}), "(x={'x': train_data}, y=\n train_labels, batch_size=BATCH_SIZE, num_epochs=None, shuffle=True)\n", (3691, 3787), True, 'import tensorflow as tf\n'), ((3885, 3998), 'tensorflow.compat.v1.estimator.inputs.numpy_input_fn', 'tf.compat.v1.estimator.inputs.numpy_input_fn', ([], {'x': "{'x': eval_data}", 'y': 'eval_labels', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(x={'x': eval_data}, y=\n eval_labels, num_epochs=1, shuffle=False)\n", (3929, 3998), True, 'import tensorflow as tf\n'), ((4153, 4194), 'optuna.create_study', 'optuna.create_study', ([], {'direction': '"""maximize"""'}), "(direction='maximize')\n", (4172, 4194), False, 'import optuna\n'), ((4512, 4536), 'shutil.rmtree', 'shutil.rmtree', (['MODEL_DIR'], {}), '(MODEL_DIR)\n', (4525, 4536), False, 'import shutil\n'), ((4570, 4592), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (4590, 4592), True, 'import tensorflow as tf\n'), ((1395, 1426), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(10)'}), '(units=10)\n', (1416, 1426), True, 'import tensorflow as tf\n'), ((1757, 1812), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'adam_lr'}), '(learning_rate=adam_lr)\n', (1789, 1812), True, 'import tensorflow as tf\n'), ((1983, 2069), 'tensorflow.compat.v1.train.MomentumOptimizer', 'tf.compat.v1.train.MomentumOptimizer', ([], {'learning_rate': 'sgd_lr', 'momentum': 'sgd_momentum'}), '(learning_rate=sgd_lr, momentum=\n sgd_momentum)\n', (2019, 2069), True, 'import tensorflow as tf\n'), ((2232, 2263), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (2241, 2263), True, 'import tensorflow as tf\n'), ((2290, 2334), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': '"""softmax_tensor"""'}), "(logits, name='softmax_tensor')\n", (2303, 2334), True, 'import tensorflow as tf\n'), ((2403, 2465), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (2429, 2465), True, 'import tensorflow as tf\n'), ((2753, 2820), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, loss=loss, train_op=train_op)\n', (2779, 2820), True, 'import tensorflow as tf\n'), ((2866, 2951), 'tensorflow.compat.v1.metrics.accuracy', 'tf.compat.v1.metrics.accuracy', ([], {'labels': 'labels', 'predictions': "predictions['classes']"}), "(labels=labels, predictions=predictions['classes']\n )\n", (2895, 2951), True, 'import tensorflow as tf\n'), ((3246, 3261), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (3256, 3261), True, 'import numpy as np\n'), ((3340, 3355), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (3350, 3355), True, 'import numpy as np\n'), ((1296, 1355), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'n_units', 'activation': 'tf.nn.relu'}), '(units=n_units, activation=tf.nn.relu)\n', (1317, 1355), True, 'import tensorflow as tf\n'), ((2690, 2736), 'tensorflow.compat.v1.train.get_or_create_global_step', 'tf.compat.v1.train.get_or_create_global_step', ([], {}), '()\n', (2734, 2736), True, 'import tensorflow as tf\n')] |
# by TR
from obspy.core import UTCDateTime
try:
from sito.util import dist2gps
except:
pass
import logging
import numpy as np
import matplotlib
from matplotlib.colors import Normalize
from matplotlib import cbook
from numpy import ma
log = logging.getLogger(__name__)
def equi(m, lat, lon, radius, indeg=True):
if indeg:
radius = radius / 360. * 6371. * 2 * np.pi
X = []
Y = []
for azimuth in range(0, 360):
glat2, glon2 = dist2gps(radius, azimuth, lat, lon)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
#~ m.plot(X,Y,**kwargs) #Should work, but doesn't...
X, Y = m(X, Y)
return X, Y
def line(m, lat, lon, azi, start, end, indeg=True):
if indeg:
start = start / 360. * 6371. * 2 * np.pi
end = end / 360. * 6371. * 2 * np.pi
X = []
Y = []
for distance in np.linspace(start, end, 100):
glat2, glon2 = dist2gps(distance, azi, lat, lon)
X.append(glon2)
Y.append(glat2)
X, Y = m(X, Y)
return X, Y
def _getUTCListFromSth(stream, time, raisenumber=False):
N = len(stream)
if isinstance(time, basestring):
if 'onset' in time or 'time' in time: #isinstance(relative, basestring):
ret = stream.getHI(time)
elif time == 'middle':
starttime = stream.getHI('starttime')
endtime = stream.getHI('endtime')
ret = [starttime[i] + (endtime[i] - starttime[i]) / 2
for i in range(N)]
else:
raise ValueError('time is string but not expected one.')
elif isinstance(time, UTCDateTime):
ret = [time] * N
elif cbook.iterable(time):
if np.any([not isinstance(entry, (UTCDateTime, float, int, long)) for entry in time]):
raise ValueError('time is list, but not of UTCDateTime or float objects.')
if len(time) != N:
raise ValueError('time is list, but has not the length of stream.')
ret = None
elif isinstance(time, (float, int, long)) and not raisenumber:
ret = None
else:
raise ValueError('time has wrong type.')
return ret
def getTimeIntervall(stream, start=None, end=None, relative='starttime', ret_rel='utc'):
"""
Create two lists of UTCDateTimes - start list and end list
'time' can stand for UTCDateTime, list of UTCDateTimes, header entry out of
('ponset', 'sonset', 'startime', 'endtime') or 'middle'
:param start, end: - None (means start- resp. endtime)
- time object
- or seconds relative to param relative
:param relative: times (if given as seconds=numbers) are taken relative to
this parameter, is also needed for param ret_rel='relative
-time object
:param ret_rel: - 'utc' output in absolute UTCDateTime
- 'relative': output in seconds relative to param relative
- time object: output in seconds relative to time
:return: start and end list of UTCDateTime or None if stream has length 0
"""
N = len(stream)
if N == 0:
return
# get list of UTCDateTimes for start_out and end_out
if start == None:
start = 'starttime'
if end == None:
end = 'endtime'
start_out = _getUTCListFromSth(stream, start)
end_out = _getUTCListFromSth(stream, end)
# get list of UTCDateTimes for relative if needed
if start_out == None or end_out == None or ret_rel == 'relative':
relative = _getUTCListFromSth(stream, relative, raisenumber=True)
# get list of UTCDateTimes for start_out and end_out
if start_out == None:
if cbook.iterable(start):
start_out = [utc + start[i] for i, utc in enumerate(relative)]
else:
start_out = [i + start for i in relative]
if end_out == None:
if cbook.iterable(start):
end_out = [utc + end[i] for i, utc in enumerate(relative)]
else:
end_out = [i + end for i in relative]
# convert UTCDateTimes to seconds if ret_rel demands it
if ret_rel == 'utc':
return start_out, end_out
elif ret_rel != 'relative':
relative = _getUTCListFromSth(stream, ret_rel)
start_out = [start_out[i] - relative[i] for i in range(N)]
end_out = [end_out[i] - relative[i] for i in range(N)]
return start_out, end_out
def getDataWindow(stream, start=None, end=None, relative='starttime'):
"""
Return array with data in time window (start, end) around relative.
'time' can stand for UTCDateTime, list of UTCDateTimes, header entry out of
('ponset', 'sonset', 'startime', 'endtime') or 'middle'
:param stream: Stream object with data
:param start, end: time or float (seconds) relative to param=relative
:param relative: time, is needed if start or end in seconds (float)
:return: np.array of shape (N_stream, N_data)
"""
stream = stream.slice2(start, end, relative=relative)
N_stream = len(stream)
if N_stream == 0:
raise ValueError('Stream has length 0')
samp = stream.getHI('sampling_rate')
if min(samp) != max(samp):
stream.downsample2(min(samp))
log.warning('Downsampling stream because of differing sampling rate.')
npts = stream.getHI('npts')
if min(npts) != max(npts):
log.warning('Traces in stream have different NPTS. '
'Difference: %d samples' % (max(npts) - min(npts)))
data = np.zeros((N_stream, max(npts)))
for i, trace in enumerate(stream):
data[i, :len(trace.data)] = trace.data
return data
# create colormap Blue -> White -> Red for xcorr plots
cdict = {'red': ((0.0, 0.0, 0.0),
# (0.3, 0.5, 0.5),
(0.5, 1.0, 1.0),
# (0.7, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
# (0.3, 1.0, 1.0),
(0.5, 1.0, 1.0),
# (0.7, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
# (0.3, 1.0, 1.0),
(0.5, 1.0, 1.0),
# (0.7, 0.0, 0.0),
(1.0, 0.0, 0.0))}
xcorr_cmap = matplotlib.colors.LinearSegmentedColormap('xcorr_cmap', cdict, 256)
class DLogNorm(Normalize):
"""
Normalize a given positive or negative value to the 0-1 range on a log scale
negative values are mapped to 0-0.5
positive values are mapped to 0.5-1
Derived from:
matplotlib.colors.LogNorm
"""
def __init__(self, vmin=None, vmax=None, cmin=1e-5, cmax=1e-5, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
cmin, cmax gives the range of logarithmic plot for positive (cmax)
and negative (cmin) values. All values with smaller absolute value
are mapped to 0.5.
"""
self.vmin = vmin
self.vmax = vmax
self.cmin = cmin
self.cmax = cmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin * vmin, self.cmax * vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result = 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = 0. * val + 0.5
result[val > cmax] = (ma.log10(val[val > cmax]) - ma.log10(cmax)) / (np.log10(vmax) - np.log10(cmax)) / 2. + 0.5
result[val < cmin] = -(ma.log10(-val[val < cmin]) - ma.log10(-cmin)) / (np.log10(-vmin) - np.log10(-cmin)) / 2. + 0.5
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin * vmin, self.cmax * vmax
if cbook.iterable(value):
val = np.asarray(value)
result = 0.0 * val
result[val > 0.5] = cmax * (vmax / cmax) ** (2. * val[val > 0.5] - 1)
result[val < 0.5] = cmin * (vmin / cmin) ** (-2. * val[val < 0.5] + 1)
return result
else:
if value == 0.5:
return 0
elif value > 0.5:
return cmax * (vmax / cmax) ** (2. * value - 1)
elif value < 0.5:
return cmin * (vmin / cmin) ** (-2. * value + 1)
def ticks(self):
vmin, vmax = self.vmin, self.vmax
cmin, cmax = self.cmin, self.cmax
a1 = np.logspace(np.log10(cmax * vmax) + 1, np.log10(vmax), int(-np.log10(cmax)))
a2 = -np.logspace(np.log10(-cmin * vmin) + 1, np.log10(-vmin), int(-np.log10(cmin)))
return np.hstack((a1, 0, a2))
| [
"logging.getLogger",
"numpy.ma.asarray",
"numpy.log10",
"matplotlib.cbook.iterable",
"numpy.hstack",
"numpy.ma.array",
"numpy.ma.getmask",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.asarray",
"sito.util.dist2gps",
"numpy.linspace",
"numpy.ma.log10"
] | [((249, 276), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (266, 276), False, 'import logging\n'), ((6169, 6236), 'matplotlib.colors.LinearSegmentedColormap', 'matplotlib.colors.LinearSegmentedColormap', (['"""xcorr_cmap"""', 'cdict', '(256)'], {}), "('xcorr_cmap', cdict, 256)\n", (6210, 6236), False, 'import matplotlib\n'), ((884, 912), 'numpy.linspace', 'np.linspace', (['start', 'end', '(100)'], {}), '(start, end, 100)\n', (895, 912), True, 'import numpy as np\n'), ((467, 502), 'sito.util.dist2gps', 'dist2gps', (['radius', 'azimuth', 'lat', 'lon'], {}), '(radius, azimuth, lat, lon)\n', (475, 502), False, 'from sito.util import dist2gps\n'), ((937, 970), 'sito.util.dist2gps', 'dist2gps', (['distance', 'azi', 'lat', 'lon'], {}), '(distance, azi, lat, lon)\n', (945, 970), False, 'from sito.util import dist2gps\n'), ((3626, 3647), 'matplotlib.cbook.iterable', 'cbook.iterable', (['start'], {}), '(start)\n', (3640, 3647), False, 'from matplotlib import cbook\n'), ((3827, 3848), 'matplotlib.cbook.iterable', 'cbook.iterable', (['start'], {}), '(start)\n', (3841, 3848), False, 'from matplotlib import cbook\n'), ((7674, 7695), 'matplotlib.cbook.iterable', 'cbook.iterable', (['value'], {}), '(value)\n', (7688, 7695), False, 'from matplotlib import cbook\n'), ((8959, 8980), 'matplotlib.cbook.iterable', 'cbook.iterable', (['value'], {}), '(value)\n', (8973, 8980), False, 'from matplotlib import cbook\n'), ((9802, 9824), 'numpy.hstack', 'np.hstack', (['(a1, 0, a2)'], {}), '((a1, 0, a2))\n', (9811, 9824), True, 'import numpy as np\n'), ((1675, 1695), 'matplotlib.cbook.iterable', 'cbook.iterable', (['time'], {}), '(time)\n', (1689, 1695), False, 'from matplotlib import cbook\n'), ((9000, 9017), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (9010, 9017), True, 'import numpy as np\n'), ((9656, 9670), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (9664, 9670), True, 'import numpy as np\n'), ((9629, 9650), 'numpy.log10', 'np.log10', (['(cmax * vmax)'], {}), '(cmax * vmax)\n', (9637, 9650), True, 'import numpy as np\n'), ((9748, 9763), 'numpy.log10', 'np.log10', (['(-vmin)'], {}), '(-vmin)\n', (9756, 9763), True, 'import numpy as np\n'), ((7743, 7760), 'numpy.ma.asarray', 'ma.asarray', (['value'], {}), '(value)\n', (7753, 7760), False, 'from numpy import ma\n'), ((7839, 7856), 'numpy.ma.array', 'ma.array', (['[value]'], {}), '([value])\n', (7847, 7856), False, 'from numpy import ma\n'), ((8225, 8240), 'numpy.ma.getmask', 'ma.getmask', (['val'], {}), '(val)\n', (8235, 8240), False, 'from numpy import ma\n'), ((9677, 9691), 'numpy.log10', 'np.log10', (['cmax'], {}), '(cmax)\n', (9685, 9691), True, 'import numpy as np\n'), ((9720, 9742), 'numpy.log10', 'np.log10', (['(-cmin * vmin)'], {}), '(-cmin * vmin)\n', (9728, 9742), True, 'import numpy as np\n'), ((9770, 9784), 'numpy.log10', 'np.log10', (['cmin'], {}), '(cmin)\n', (9778, 9784), True, 'import numpy as np\n'), ((8424, 8449), 'numpy.ma.log10', 'ma.log10', (['val[val > cmax]'], {}), '(val[val > cmax])\n', (8432, 8449), False, 'from numpy import ma\n'), ((8452, 8466), 'numpy.ma.log10', 'ma.log10', (['cmax'], {}), '(cmax)\n', (8460, 8466), False, 'from numpy import ma\n'), ((8471, 8485), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (8479, 8485), True, 'import numpy as np\n'), ((8488, 8502), 'numpy.log10', 'np.log10', (['cmax'], {}), '(cmax)\n', (8496, 8502), True, 'import numpy as np\n'), ((8599, 8614), 'numpy.log10', 'np.log10', (['(-vmin)'], {}), '(-vmin)\n', (8607, 8614), True, 'import numpy as np\n'), ((8617, 8632), 'numpy.log10', 'np.log10', (['(-cmin)'], {}), '(-cmin)\n', (8625, 8632), True, 'import numpy as np\n'), ((8550, 8576), 'numpy.ma.log10', 'ma.log10', (['(-val[val < cmin])'], {}), '(-val[val < cmin])\n', (8558, 8576), False, 'from numpy import ma\n'), ((8579, 8594), 'numpy.ma.log10', 'ma.log10', (['(-cmin)'], {}), '(-cmin)\n', (8587, 8594), False, 'from numpy import ma\n')] |
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import cPickle as pickle
import argparse
import logging
from time import time
import numpy as np
class streamer(object):
def __init__(self, file_name):
self.file_name=file_name
def __iter__(self):
for s in open(self.file_name):
yield s.strip()
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model.')
parser.add_argument("--dataset", help="The path to the raw text dataset file",
required=True)
parser.add_argument("--cout", help="The path to the cross-entropy output model file",
default="output_tfidf.pk")
parser.add_argument("--minc", help="The minimum word frequency considered to compute CE weight.",
default=2, type=int)
parser.add_argument("--tf", help="TF normalization: none, binary, sublinear (default=none).", default="none")
parser.add_argument("--stop", help="Toggles stop words stripping.", action="store_true")
parser.add_argument("--lsa", help="Toggles LSA computation.", default=0, type=int)
parser.add_argument("--news", help="Toggles making analysis of predefined dataset.", action="store_true")
args = parser.parse_args()
t0 = time()
if not args.news:
corpus=streamer(args.dataset)
vectorizer = TfidfVectorizer(min_df=1,
encoding="latin-1",
decode_error="replace",
lowercase=False,
binary= True if args.tf.startswith("bin") else False,
sublinear_tf= True if args.tf.startswith("subl") else False,
stop_words= "english" if args.stop else None)
X = vectorizer.fit(corpus) if args.lsa<0 else vectorizer.fit_transform(corpus)
else:
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("%d documents" % len(dataset.data))
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if args.lsa==0:
with open(args.cout, 'wb') as fin:
pickle.dump(X, fin)
print("TF-IDF weights saved...")
exit()
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
svd = TruncatedSVD(args.lsa)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print ("Saving vectors to: %s" % args.cout)
np.savetxt(args.cout,X)
| [
"logging.basicConfig",
"cPickle.dump",
"numpy.unique",
"argparse.ArgumentParser",
"sklearn.datasets.fetch_20newsgroups",
"sklearn.decomposition.TruncatedSVD",
"sklearn.pipeline.make_pipeline",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.savetxt",
"sklearn.preprocessing.Normalizer",
... | [((464, 556), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (483, 556), False, 'import logging\n'), ((846, 977), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model."""'}), "(description=\n 'Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model.'\n )\n", (869, 977), False, 'import argparse\n'), ((1942, 1948), 'time.time', 'time', ([], {}), '()\n', (1946, 1948), False, 'from time import time\n'), ((3675, 3697), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', (['args.lsa'], {}), '(args.lsa)\n', (3687, 3697), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((3715, 3737), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'copy': '(False)'}), '(copy=False)\n', (3725, 3737), False, 'from sklearn.preprocessing import Normalizer\n'), ((3748, 3778), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['svd', 'normalizer'], {}), '(svd, normalizer)\n', (3761, 3778), False, 'from sklearn.pipeline import make_pipeline\n'), ((4065, 4089), 'numpy.savetxt', 'np.savetxt', (['args.cout', 'X'], {}), '(args.cout, X)\n', (4075, 4089), True, 'import numpy as np\n'), ((2688, 2778), 'sklearn.datasets.fetch_20newsgroups', 'fetch_20newsgroups', ([], {'subset': '"""all"""', 'categories': 'categories', 'shuffle': '(True)', 'random_state': '(42)'}), "(subset='all', categories=categories, shuffle=True,\n random_state=42)\n", (2706, 2778), False, 'from sklearn.datasets import fetch_20newsgroups\n'), ((3028, 3143), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_df': '(0.5)', 'max_features': 'opts.n_features', 'min_df': '(2)', 'stop_words': '"""english"""', 'use_idf': 'opts.use_idf'}), "(max_df=0.5, max_features=opts.n_features, min_df=2,\n stop_words='english', use_idf=opts.use_idf)\n", (3043, 3143), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3440, 3459), 'cPickle.dump', 'pickle.dump', (['X', 'fin'], {}), '(X, fin)\n', (3451, 3459), True, 'import cPickle as pickle\n'), ((2929, 2946), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2938, 2946), True, 'import numpy as np\n'), ((3285, 3291), 'time.time', 'time', ([], {}), '()\n', (3289, 3291), False, 'from time import time\n'), ((3837, 3843), 'time.time', 'time', ([], {}), '()\n', (3841, 3843), False, 'from time import time\n')] |
import numpy as np
import utils
import scipy.stats
from scipy.special import logsumexp
import sys
sys.path.append("../utils/")
import stats
from spotlight_background_model import SpotlightBackground
class Model():
def __init__(self, world_model = SpotlightBackground, n_samples = 1000):
self.world_model = world_model()
self.n_samples = n_samples
self.samples = self.world_model.prior_sample(n_samples)
def resample(self, loc, obs, goal):
"""
>>> m = Model()
>>> m.resample([10.25,10.25], 1, None) is not None
True
>>> m.resample([10.25,10.25], 1, [10.25,10.25])
[10.25, 10.25]
>>> sum(m.resample([10.25,10.25], 1, [100.25,100.25]) == [10.25, 10.25]) == 0
True
"""
if goal is None:
prob_resample = 1
else:
prob_resample = 1 - np.exp(self.world_model.likelihood(loc, obs, [goal])[0])
if np.random.random() < prob_resample:
return self.samples[np.random.choice(len(self.samples))].copy()
else:
return goal
def observe(self, loc, obs, others = None):
"""
>>> m = Model(n_samples = 500)
>>> x = [m.observe([100,250],1.0) for i in range(10)]
>>> np.linalg.norm(np.mean(m.samples,0) - np.array([100,250])) < 25
True
>>> m = Model(n_samples = 500)
>>> x = [m.observe([[100,250],[100,250]],[1.0,1.0]) for i in range(10)]
>>> np.linalg.norm(np.mean(m.samples,0) - np.array([100,250])) < 25 # multi match, TODO: fix
True
>>> m = Model(n_samples = 500)
>>> x = [m.observe([[100,250],[0,0]],[1.0,1.0]) for i in range(10)]
>>> np.linalg.norm(np.mean(m.samples,0) - np.array([100,250])) < 25 # multi mismatch, TODO: fix
False
"""
# samples = []
# if others is not None and len(others) > 0:
# assert False # TODO: fix
# samples += list(others)
samples = self.world_model.transition(self.samples)
if len(np.array(loc).shape) == 1:
weights = self.world_model.likelihood(loc, obs, samples)
else:
assert False # TODO: fix
weights = []
for i in range(len(samples)):
weights += np.sum(np.array([self.world_model.likelihood(loc[j], obs[j], samples[i]) for j in range(len(loc))]))
weights = np.array(weights)
norm = logsumexp(weights)
weights -= norm
weights = np.exp(weights)
samples = np.array(samples)
inds = np.random.choice(len(samples), size = self.n_samples, p = weights)
self.samples = samples[inds]
return weights
def get_beliefs(self):
# """
# >>> m = Model(world_model = lambda: SpotlightBackground(sizes = [3,4]))
# >>> m.samples = np.array([[0,0.1],[1.3,2.5]])
# >>> m.get_beliefs()
# array([[ 0.5, 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0.5],
# [ 0. , 0. , 0. , 0. ]])
# """
return self.samples#self.world_model.get_beliefs(self.samples)
def get_uncertainty(self):
"""
>>> m = Model()
>>> u1 = m.get_uncertainty()
>>> x = [m.observe([100,250],0.0) for i in range(10)]
>>> u2 = m.get_uncertainty()
>>> x = [m.observe([100,250],1.0) for i in range(10)]
>>> u3 = m.get_uncertainty()
>>> u3 < u1
True
>>> u3 < u2
True
"""
return stats.bounding_oval(self.samples)
def get_normal_fit(self):
return stats.get_normal_fit(self.samples)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"numpy.random.random",
"numpy.exp",
"numpy.array",
"stats.get_normal_fit",
"doctest.testmod",
"sys.path.append",
"scipy.special.logsumexp",
"stats.bounding_oval"
] | [((100, 128), 'sys.path.append', 'sys.path.append', (['"""../utils/"""'], {}), "('../utils/')\n", (115, 128), False, 'import sys\n'), ((3812, 3829), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (3827, 3829), False, 'import doctest\n'), ((2470, 2487), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (2478, 2487), True, 'import numpy as np\n'), ((2503, 2521), 'scipy.special.logsumexp', 'logsumexp', (['weights'], {}), '(weights)\n', (2512, 2521), False, 'from scipy.special import logsumexp\n'), ((2564, 2579), 'numpy.exp', 'np.exp', (['weights'], {}), '(weights)\n', (2570, 2579), True, 'import numpy as np\n'), ((2607, 2624), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (2615, 2624), True, 'import numpy as np\n'), ((3632, 3665), 'stats.bounding_oval', 'stats.bounding_oval', (['self.samples'], {}), '(self.samples)\n', (3651, 3665), False, 'import stats\n'), ((3722, 3756), 'stats.get_normal_fit', 'stats.get_normal_fit', (['self.samples'], {}), '(self.samples)\n', (3742, 3756), False, 'import stats\n'), ((954, 972), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (970, 972), True, 'import numpy as np\n'), ((2101, 2114), 'numpy.array', 'np.array', (['loc'], {}), '(loc)\n', (2109, 2114), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
hyperfine_fit - Fit Gaussian lines to hyperfine components
Functions needed to fit data::
ammonia_data()
model()
error_func()
Here is the basic procedure, assuming that the data are in arrays
x(in MHz) and y::
plot(x,y) # to see what to expect
set_model(line) # see ammonia_data
parameter_guess = [.3,-0.6,width] # Put in appropriate numbers
result = fit_data(x,y,hpf_model,parameter_guess)
best_fit = hpf_model(x,*result) # model y-values
plot(x,best_fit) # overlaid on the data
Problem
=======
Getting the ammonia hyperfine structure from the JPL catalog (v4)
and calculating A_ul gives the wrong relative intensities.
10**logint gives the right ratios.
"""
import numpy as NP
import pylab
import scipy.optimize as opt
import scipy.stats.distributions as dists
import Math
Gaussian = NP.vectorize(Math.gaussian)
import Physics.Radiation.Lines.Molec as M
def hpf_model(x, amp, x_0, w):
"""
Builds multicomponent Gaussian profile
@param x : numpy array of float::
x-values
@param amp : float::
scale factor for line strenths
@param x_0 : float::
center position of the model
@param w : float::
width of each Gaussian component
@return: numpy array of float::
y-values for the model
"""
global s, df
result = NP.zeros(len(x))
for i in range(len(df)):
result += amp*s[i]*Gaussian(x, x_0+df[i], w)
return result
def error_func(guess, x, data, data_model):
"""
@param guess : parameter list::
an estimate of the model parameters of the final solution
@param x : numpy array of float::
x-values
@param data : numpy array of y-values
@param data_model : function to be fitted
@return: numpy array of float::
differences between the data and the model
"""
return data - data_model(x, *guess)
def noise_data(SNR,width,span):
"""
Create noisy data samples
Noise is Gaussian (normal distribution) with a dispersion of 1.
@param SNR : float::
SNR of central component
@param width : float::
line width in MHz
@param span : float::
frquency range MHz for which data will be generated
@return: numpy array of float::
noisy y-values.
"""
global nsamps
x = NP.linspace(-span/2,span/2,nsamps)
noisy_data = hpf_model(x, SNR/s[0], 0, width) + dists.norm.rvs(size=nsamps)
return x, noisy_data
def ammonia_data(ju,ku,jl,kl):
spec_lines = []
nh3_file = open("/usr/local/line_cat/jpl/c017002.cat","r")
data_lines = nh3_file.readlines()
nh3_file.close()
name, n_lines, part_fn = M.jpl.get_mol_metadata(17002)
part_fn_300 = float(part_fn[300])
first_line = True
for line in data_lines[6408:6429]:
#(ju,ku,V,
line_data = M.jpl.parse_catalog_line(line)
if first_line:
print("Upper -> lower,", end=' ')
print(M.jpl.quantum_label(17002,line_data['q lower'],
line_data['qn format'],
line_data['deg free'])[0])
first_line = False
freq = float(line_data['freq'])
intens = float(line_data['int'])
g_up = int(line_data['g upper'])
E_lo = float(line_data['E lower'])
A_ul = M.jpl.einstein_a(intens, freq, part_fn_300, g_up, E_lo)
prin(M.jpl.quantum_label(17002,line_data['q upper'],
line_data['qn format'],
line_data['deg free'])[1]),
print(M.jpl.quantum_label(17002,line_data['q lower'],
line_data['qn format'],
line_data['deg free'])[1], end=' ')
print("%7.1f %5.2f" % ((freq-23694.4955)*1e3, (10.**intens)/6.4e-6))
return
def fit_data(x,data,model,parameter_guess):
"""
fit noisy data to the model
@type x : numpy array of float
@param x : array of abcissa values
@type data : numpy array of float
@param data : array of ordinate values
@type model : name of a function
@param model : function to be fitted to data
@type parameter_guess : list
@param parameter_guess : [amplitude, center_x, width]
@return: tuple
output from scipy.optimize.leastsq
"""
result, msg = opt.leastsq(error_func,
x0 = parameter_guess,
args=(x,data,model))
print(msg)
return result
def set_model(line):
"""
Make the model parameters global
@type line : int
@param line : line identifier; see ammonia_data()
@return: True if it worked
"""
global name, df, s
try:
name, df, s = ammonia_data(line)
return True
except Exception as details:
print(Exception, details)
return False
def test():
"""
Test with simulated and real data.
"""
global nsamps
width = 0.1 # MHz
pylab.ion()
o = input("Simulation (s) or real data (d)? ")
if o.lower()[0] == 's':
# make a set of noisy data samples
SNR = 6 # for central component
nsamps = 200
set_model(1)
x, y = noise_data(SNR,width,10)
parameter_guess = [1,0,width]
else:
x = NP.array([-1.84357808, -1.83374336, -1.82390864, -1.81407392, -1.8042392 ,
-1.79440448, -1.78456976, -1.77473504, -1.76490032, -1.7550656 ,
-1.74523088, -1.73539616, -1.72556144, -1.71572672, -1.70589201,
-1.69605729, -1.68622257, -1.67638785, -1.66655313, -1.65671841,
-1.64688369, -1.63704897, -1.62721425, -1.61737953, -1.60754481,
-1.59771009, -1.58787537, -1.57804065, -1.56820593, -1.55837121,
-1.54853649, -1.53870177, -1.52886705, -1.51903233, -1.50919761,
-1.49936289, -1.48952817, -1.47969345, -1.46985874, -1.46002402,
-1.4501893 , -1.44035458, -1.43051986, -1.42068514, -1.41085042,
-1.4010157 , -1.39118098, -1.38134626, -1.37151154, -1.36167682,
-1.3518421 , -1.34200738, -1.33217266, -1.32233794, -1.31250322,
-1.3026685 , -1.29283378, -1.28299906, -1.27316434, -1.26332962,
-1.2534949 , -1.24366018, -1.23382546, -1.22399075, -1.21415603,
-1.20432131, -1.19448659, -1.18465187, -1.17481715, -1.16498243,
-1.15514771, -1.14531299, -1.13547827, -1.12564355, -1.11580883,
-1.10597411, -1.09613939, -1.08630467, -1.07646995, -1.06663523,
-1.05680051, -1.04696579, -1.03713107, -1.02729635, -1.01746163,
-1.00762691, -0.99779219, -0.98795748, -0.97812276, -0.96828804,
-0.95845332, -0.9486186 , -0.93878388, -0.92894916, -0.91911444,
-0.90927972, -0.899445 , -0.88961028, -0.87977556, -0.86994084,
-0.86010612, -0.8502714 , -0.84043668, -0.83060196, -0.82076724,
-0.81093252, -0.8010978 , -0.79126308, -0.78142836, -0.77159364,
-0.76175892, -0.75192421, -0.74208949, -0.73225477, -0.72242005,
-0.71258533, -0.70275061, -0.69291589, -0.68308117, -0.67324645,
-0.66341173, -0.65357701, -0.64374229, -0.63390757, -0.62407285,
-0.61423813, -0.60440341, -0.59456869, -0.58473397, -0.57489925,
-0.56506453, -0.55522981, -0.54539509, -0.53556037, -0.52572565,
-0.51589093, -0.50605622, -0.4962215 , -0.48638678, -0.47655206,
-0.46671734, -0.45688262, -0.4470479 , -0.43721318, -0.42737846,
-0.41754374, -0.40770902, -0.3978743 , -0.38803958, -0.37820486,
-0.36837014, -0.35853542, -0.3487007 , -0.33886598, -0.32903126,
-0.31919654, -0.30936182, -0.2995271 , -0.28969238, -0.27985766,
-0.27002295, -0.26018823, -0.25035351, -0.24051879, -0.23068407,
-0.22084935, -0.21101463, -0.20117991, -0.19134519, -0.18151047,
-0.17167575, -0.16184103, -0.15200631, -0.14217159, -0.13233687,
-0.12250215, -0.11266743, -0.10283271, -0.09299799, -0.08316327,
-0.07332855, -0.06349383, -0.05365911, -0.04382439, -0.03398968,
-0.02415496, -0.01432024, -0.00448552, 0.0053492 , 0.01518392,
0.02501864, 0.03485336, 0.04468808, 0.0545228 , 0.06435752,
0.07419224, 0.08402696, 0.09386168, 0.1036964 , 0.11353112,
0.12336584, 0.13320056, 0.14303528, 0.15287 , 0.16270472,
0.17253944, 0.18237416, 0.19220888, 0.2020436 , 0.21187831,
0.22171303, 0.23154775, 0.24138247, 0.25121719, 0.26105191,
0.27088663, 0.28072135, 0.29055607, 0.30039079, 0.31022551,
0.32006023, 0.32989495, 0.33972967, 0.34956439, 0.35939911,
0.36923383, 0.37906855, 0.38890327, 0.39873799, 0.40857271,
0.41840743, 0.42824215, 0.43807687, 0.44791158, 0.4577463 ,
0.46758102, 0.47741574, 0.48725046, 0.49708518, 0.5069199 ,
0.51675462, 0.52658934, 0.53642406, 0.54625878, 0.5560935 ,
0.56592822, 0.57576294, 0.58559766, 0.59543238, 0.6052671 ,
0.61510182, 0.62493654, 0.63477126, 0.64460598, 0.6544407 ,
0.66427542])
y = NP.array([1.55753875e-02, 9.20142978e-03, -4.12695818e-02,
-2.41837688e-02, -5.67525066e-02, -1.33656085e-01,
-3.92932482e-02, -9.93828475e-02, -6.49584830e-02,
-1.87308770e-02, -8.49718973e-02, -5.02231643e-02,
-7.46907890e-02, -1.06046379e-01, -1.18749186e-01,
-1.13565601e-01, -4.63223010e-02, -1.12377331e-01,
-1.07838847e-01, -7.19103441e-02, -9.69987586e-02,
-4.05682698e-02, -5.52870296e-02, -6.34647682e-02,
-1.25110462e-01, -1.33551285e-01, -1.00283086e-01,
-8.78261775e-02, -1.22366831e-01, 3.73410434e-02,
-2.16346290e-02, -3.54011096e-02, -5.69699146e-02,
-1.50996730e-01, -8.64437893e-02, -1.06978618e-01,
-7.91596845e-02, -2.85348874e-02, -4.85092625e-02,
-7.40978718e-02, -5.35184331e-03, -1.41892433e-01,
-1.09891705e-01, -7.00225532e-02, 2.23670322e-02,
-6.42345473e-02, -1.14513963e-01, -1.77867692e-02,
-1.15476929e-01, -7.50609040e-02, -7.59665146e-02,
-3.89640033e-02, -6.71256706e-02, -1.46708071e-01,
-1.61539745e-02, -5.59726842e-02, -9.45299864e-02,
-9.83446389e-02, -9.92954075e-02, -1.03172548e-01,
-7.10593835e-02, -5.33846729e-02, 3.38634439e-02,
4.46895629e-01, 6.62468433e-01, 8.12499002e-02,
3.69865060e-01, 9.81505096e-01, 5.99039435e-01,
8.50967616e-02, -2.67737526e-02, -4.09759879e-02,
-1.44490153e-01, -2.30287295e-02, -2.02018376e-02,
-1.77040650e-03, -8.88574719e-02, -5.32815233e-02,
-6.93681315e-02, -7.17790946e-02, -1.42520413e-01,
1.50613356e-02, -8.37516487e-02, -9.93321687e-02,
-2.07297225e-02, -8.25655535e-02, 1.20367259e-02,
-1.47362985e-02, -4.14445363e-02, -5.36099076e-02,
-1.19483069e-01, -8.75750855e-02, -6.97698891e-02,
-9.45113301e-02, -5.86897917e-02, -5.38971759e-02,
-5.95922321e-02, 2.01958697e-02, -5.67614287e-03,
-4.83865663e-02, -7.87640661e-02, -1.30915985e-01,
-1.45986080e-01, -7.94370472e-02, -5.61923422e-02,
-9.50986519e-02, -9.20939595e-02, -4.11376543e-02,
-1.29739568e-01, -1.22105666e-01, -8.70440751e-02,
-8.68988112e-02, -1.08260494e-02, -7.90288299e-02,
-5.83453253e-02, -9.31360647e-02, -6.06538579e-02,
-3.26795094e-02, -1.24720916e-01, 2.33035088e-02,
4.42986703e-03, -1.70680247e-02, -1.65755842e-02,
2.17673182e-01, 4.91183043e-01, 9.86441195e-01,
1.60616803e+00, 9.89029050e-01, 1.42567885e+00,
1.86783028e+00, 8.98995638e-01, 1.65190771e-01,
-3.36427465e-02, -9.43350866e-02, -1.05553396e-01,
-5.37899788e-03, 6.19346742e-03, -7.22183101e-03,
-6.04815148e-02, -5.96636757e-02, -6.51778141e-03,
-8.12485069e-02, -2.17945613e-02, -6.93192706e-02,
-1.69927523e-01, -6.54176772e-02, -6.80938214e-02,
-1.08961679e-01, -2.78380569e-02, -6.92696646e-02,
-7.72257894e-02, -3.58553343e-02, -8.55760425e-02,
-5.15287071e-02, -3.54854837e-02, -1.05648793e-01,
-1.01979360e-01, -1.13662310e-01, -5.91211058e-02,
-4.10607755e-02, 3.95612381e-02, -3.21216823e-04,
-8.15489069e-02, -7.26812184e-02, 3.38813802e-03,
3.18101384e-02, 8.27607699e-03, -7.05176294e-02,
-1.20289661e-01, -5.37291467e-02, -4.78893109e-02,
-8.00910443e-02, -3.42484415e-02, -9.23061371e-02,
-6.11467026e-02, -5.12490347e-02, 5.45026129e-03,
-3.40601653e-02, -5.34633473e-02, -7.67978132e-02,
-5.27321585e-02, -7.51329362e-02, -9.02341753e-02,
-2.85653155e-02, -2.81812195e-02, 1.88794062e-01,
6.09963775e-01, 5.01888454e-01, 8.58971104e-02,
9.57417712e-02, 5.89331269e-01, 9.81935441e-01,
3.07364285e-01, -3.65563519e-02, -3.49376574e-02,
-1.34642854e-01, -1.64245758e-02, 6.07715966e-03,
-3.28341946e-02, -4.43529859e-02, -5.27672656e-02,
1.77064128e-02, 4.18064697e-03, 1.35755716e-02,
-3.81845832e-02, -4.23189811e-02, 2.52703223e-02,
-7.12039247e-02, 5.16605303e-02, 7.01981178e-03,
-6.71181753e-02, -2.03371570e-02, -1.20013859e-02,
-1.14060365e-01, -7.40282461e-02, -3.78084294e-02,
-1.20527424e-01, -6.82442710e-02, -1.02835357e-01,
2.08887681e-02, -1.96327586e-02, -4.01197970e-02,
-4.00166288e-02, 3.49126421e-02, -3.74765843e-02,
-3.12900096e-02, -1.17622502e-02, 7.02238753e-02,
1.81287788e-02, -6.88833147e-02, -8.13086852e-02,
-8.02919865e-02, -7.93092176e-02, -8.38449318e-03,
-1.22420341e-01, -1.62812844e-02, -9.11864787e-02,
-1.47517873e-02, 2.48801224e-02, -4.70457412e-02,
-8.15037489e-02, -6.75613731e-02, -8.35428163e-02,
-1.02822810e-01, -4.38780636e-02, -1.20214887e-01,
-5.27682826e-02, -1.31174894e-02, -1.30414739e-01,
-1.57103818e-02, -4.95527051e-02, 2.20772102e-02,
-1.40918205e-02, -5.67496903e-02, 1.55445077e-02,
-1.82226207e-02])
parameter_guess = [.3,-0.6,width]
pylab.plot(x,y)
pylab.draw() # time.sleep(0.1)
line = int(input("Main lines (1) or all lines (-1)? "))
set_model(line)
result = fit_data(x,y,hpf_model,parameter_guess)
best_fit = hpf_model(x,*result) # model y-values
pylab.plot(x,best_fit)
pylab.title(name)
pylab.xlabel("Relative frequency (MHz)")
pylab.ylabel("Amplitude (r.m.s = 1)")
print("Amplitude: %6.3f" % (result[0]*s[0]))
print("Position: %6.3f" % result[1])
print("Width: %6.3f" % result[2])
if __name__ == "__main__":
test()
| [
"pylab.title",
"pylab.ion",
"pylab.draw",
"Physics.Radiation.Lines.Molec.jpl.get_mol_metadata",
"pylab.plot",
"scipy.stats.distributions.norm.rvs",
"pylab.xlabel",
"Physics.Radiation.Lines.Molec.jpl.einstein_a",
"scipy.optimize.leastsq",
"numpy.linspace",
"numpy.array",
"Physics.Radiation.Line... | [((906, 933), 'numpy.vectorize', 'NP.vectorize', (['Math.gaussian'], {}), '(Math.gaussian)\n', (918, 933), True, 'import numpy as NP\n'), ((2292, 2332), 'numpy.linspace', 'NP.linspace', (['(-span / 2)', '(span / 2)', 'nsamps'], {}), '(-span / 2, span / 2, nsamps)\n', (2303, 2332), True, 'import numpy as NP\n'), ((2621, 2650), 'Physics.Radiation.Lines.Molec.jpl.get_mol_metadata', 'M.jpl.get_mol_metadata', (['(17002)'], {}), '(17002)\n', (2643, 2650), True, 'import Physics.Radiation.Lines.Molec as M\n'), ((4234, 4300), 'scipy.optimize.leastsq', 'opt.leastsq', (['error_func'], {'x0': 'parameter_guess', 'args': '(x, data, model)'}), '(error_func, x0=parameter_guess, args=(x, data, model))\n', (4245, 4300), True, 'import scipy.optimize as opt\n'), ((4805, 4816), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (4814, 4816), False, 'import pylab\n'), ((14048, 14064), 'pylab.plot', 'pylab.plot', (['x', 'y'], {}), '(x, y)\n', (14058, 14064), False, 'import pylab\n'), ((14066, 14078), 'pylab.draw', 'pylab.draw', ([], {}), '()\n', (14076, 14078), False, 'import pylab\n'), ((14280, 14303), 'pylab.plot', 'pylab.plot', (['x', 'best_fit'], {}), '(x, best_fit)\n', (14290, 14303), False, 'import pylab\n'), ((14305, 14322), 'pylab.title', 'pylab.title', (['name'], {}), '(name)\n', (14316, 14322), False, 'import pylab\n'), ((14325, 14365), 'pylab.xlabel', 'pylab.xlabel', (['"""Relative frequency (MHz)"""'], {}), "('Relative frequency (MHz)')\n", (14337, 14365), False, 'import pylab\n'), ((14368, 14405), 'pylab.ylabel', 'pylab.ylabel', (['"""Amplitude (r.m.s = 1)"""'], {}), "('Amplitude (r.m.s = 1)')\n", (14380, 14405), False, 'import pylab\n'), ((2377, 2404), 'scipy.stats.distributions.norm.rvs', 'dists.norm.rvs', ([], {'size': 'nsamps'}), '(size=nsamps)\n', (2391, 2404), True, 'import scipy.stats.distributions as dists\n'), ((2775, 2805), 'Physics.Radiation.Lines.Molec.jpl.parse_catalog_line', 'M.jpl.parse_catalog_line', (['line'], {}), '(line)\n', (2799, 2805), True, 'import Physics.Radiation.Lines.Molec as M\n'), ((3243, 3298), 'Physics.Radiation.Lines.Molec.jpl.einstein_a', 'M.jpl.einstein_a', (['intens', 'freq', 'part_fn_300', 'g_up', 'E_lo'], {}), '(intens, freq, part_fn_300, g_up, E_lo)\n', (3259, 3298), True, 'import Physics.Radiation.Lines.Molec as M\n'), ((5092, 8568), 'numpy.array', 'NP.array', (['[-1.84357808, -1.83374336, -1.82390864, -1.81407392, -1.8042392, -\n 1.79440448, -1.78456976, -1.77473504, -1.76490032, -1.7550656, -\n 1.74523088, -1.73539616, -1.72556144, -1.71572672, -1.70589201, -\n 1.69605729, -1.68622257, -1.67638785, -1.66655313, -1.65671841, -\n 1.64688369, -1.63704897, -1.62721425, -1.61737953, -1.60754481, -\n 1.59771009, -1.58787537, -1.57804065, -1.56820593, -1.55837121, -\n 1.54853649, -1.53870177, -1.52886705, -1.51903233, -1.50919761, -\n 1.49936289, -1.48952817, -1.47969345, -1.46985874, -1.46002402, -\n 1.4501893, -1.44035458, -1.43051986, -1.42068514, -1.41085042, -\n 1.4010157, -1.39118098, -1.38134626, -1.37151154, -1.36167682, -\n 1.3518421, -1.34200738, -1.33217266, -1.32233794, -1.31250322, -\n 1.3026685, -1.29283378, -1.28299906, -1.27316434, -1.26332962, -\n 1.2534949, -1.24366018, -1.23382546, -1.22399075, -1.21415603, -\n 1.20432131, -1.19448659, -1.18465187, -1.17481715, -1.16498243, -\n 1.15514771, -1.14531299, -1.13547827, -1.12564355, -1.11580883, -\n 1.10597411, -1.09613939, -1.08630467, -1.07646995, -1.06663523, -\n 1.05680051, -1.04696579, -1.03713107, -1.02729635, -1.01746163, -\n 1.00762691, -0.99779219, -0.98795748, -0.97812276, -0.96828804, -\n 0.95845332, -0.9486186, -0.93878388, -0.92894916, -0.91911444, -\n 0.90927972, -0.899445, -0.88961028, -0.87977556, -0.86994084, -\n 0.86010612, -0.8502714, -0.84043668, -0.83060196, -0.82076724, -\n 0.81093252, -0.8010978, -0.79126308, -0.78142836, -0.77159364, -\n 0.76175892, -0.75192421, -0.74208949, -0.73225477, -0.72242005, -\n 0.71258533, -0.70275061, -0.69291589, -0.68308117, -0.67324645, -\n 0.66341173, -0.65357701, -0.64374229, -0.63390757, -0.62407285, -\n 0.61423813, -0.60440341, -0.59456869, -0.58473397, -0.57489925, -\n 0.56506453, -0.55522981, -0.54539509, -0.53556037, -0.52572565, -\n 0.51589093, -0.50605622, -0.4962215, -0.48638678, -0.47655206, -\n 0.46671734, -0.45688262, -0.4470479, -0.43721318, -0.42737846, -\n 0.41754374, -0.40770902, -0.3978743, -0.38803958, -0.37820486, -\n 0.36837014, -0.35853542, -0.3487007, -0.33886598, -0.32903126, -\n 0.31919654, -0.30936182, -0.2995271, -0.28969238, -0.27985766, -\n 0.27002295, -0.26018823, -0.25035351, -0.24051879, -0.23068407, -\n 0.22084935, -0.21101463, -0.20117991, -0.19134519, -0.18151047, -\n 0.17167575, -0.16184103, -0.15200631, -0.14217159, -0.13233687, -\n 0.12250215, -0.11266743, -0.10283271, -0.09299799, -0.08316327, -\n 0.07332855, -0.06349383, -0.05365911, -0.04382439, -0.03398968, -\n 0.02415496, -0.01432024, -0.00448552, 0.0053492, 0.01518392, 0.02501864,\n 0.03485336, 0.04468808, 0.0545228, 0.06435752, 0.07419224, 0.08402696, \n 0.09386168, 0.1036964, 0.11353112, 0.12336584, 0.13320056, 0.14303528, \n 0.15287, 0.16270472, 0.17253944, 0.18237416, 0.19220888, 0.2020436, \n 0.21187831, 0.22171303, 0.23154775, 0.24138247, 0.25121719, 0.26105191,\n 0.27088663, 0.28072135, 0.29055607, 0.30039079, 0.31022551, 0.32006023,\n 0.32989495, 0.33972967, 0.34956439, 0.35939911, 0.36923383, 0.37906855,\n 0.38890327, 0.39873799, 0.40857271, 0.41840743, 0.42824215, 0.43807687,\n 0.44791158, 0.4577463, 0.46758102, 0.47741574, 0.48725046, 0.49708518, \n 0.5069199, 0.51675462, 0.52658934, 0.53642406, 0.54625878, 0.5560935, \n 0.56592822, 0.57576294, 0.58559766, 0.59543238, 0.6052671, 0.61510182, \n 0.62493654, 0.63477126, 0.64460598, 0.6544407, 0.66427542]'], {}), '([-1.84357808, -1.83374336, -1.82390864, -1.81407392, -1.8042392, -\n 1.79440448, -1.78456976, -1.77473504, -1.76490032, -1.7550656, -\n 1.74523088, -1.73539616, -1.72556144, -1.71572672, -1.70589201, -\n 1.69605729, -1.68622257, -1.67638785, -1.66655313, -1.65671841, -\n 1.64688369, -1.63704897, -1.62721425, -1.61737953, -1.60754481, -\n 1.59771009, -1.58787537, -1.57804065, -1.56820593, -1.55837121, -\n 1.54853649, -1.53870177, -1.52886705, -1.51903233, -1.50919761, -\n 1.49936289, -1.48952817, -1.47969345, -1.46985874, -1.46002402, -\n 1.4501893, -1.44035458, -1.43051986, -1.42068514, -1.41085042, -\n 1.4010157, -1.39118098, -1.38134626, -1.37151154, -1.36167682, -\n 1.3518421, -1.34200738, -1.33217266, -1.32233794, -1.31250322, -\n 1.3026685, -1.29283378, -1.28299906, -1.27316434, -1.26332962, -\n 1.2534949, -1.24366018, -1.23382546, -1.22399075, -1.21415603, -\n 1.20432131, -1.19448659, -1.18465187, -1.17481715, -1.16498243, -\n 1.15514771, -1.14531299, -1.13547827, -1.12564355, -1.11580883, -\n 1.10597411, -1.09613939, -1.08630467, -1.07646995, -1.06663523, -\n 1.05680051, -1.04696579, -1.03713107, -1.02729635, -1.01746163, -\n 1.00762691, -0.99779219, -0.98795748, -0.97812276, -0.96828804, -\n 0.95845332, -0.9486186, -0.93878388, -0.92894916, -0.91911444, -\n 0.90927972, -0.899445, -0.88961028, -0.87977556, -0.86994084, -\n 0.86010612, -0.8502714, -0.84043668, -0.83060196, -0.82076724, -\n 0.81093252, -0.8010978, -0.79126308, -0.78142836, -0.77159364, -\n 0.76175892, -0.75192421, -0.74208949, -0.73225477, -0.72242005, -\n 0.71258533, -0.70275061, -0.69291589, -0.68308117, -0.67324645, -\n 0.66341173, -0.65357701, -0.64374229, -0.63390757, -0.62407285, -\n 0.61423813, -0.60440341, -0.59456869, -0.58473397, -0.57489925, -\n 0.56506453, -0.55522981, -0.54539509, -0.53556037, -0.52572565, -\n 0.51589093, -0.50605622, -0.4962215, -0.48638678, -0.47655206, -\n 0.46671734, -0.45688262, -0.4470479, -0.43721318, -0.42737846, -\n 0.41754374, -0.40770902, -0.3978743, -0.38803958, -0.37820486, -\n 0.36837014, -0.35853542, -0.3487007, -0.33886598, -0.32903126, -\n 0.31919654, -0.30936182, -0.2995271, -0.28969238, -0.27985766, -\n 0.27002295, -0.26018823, -0.25035351, -0.24051879, -0.23068407, -\n 0.22084935, -0.21101463, -0.20117991, -0.19134519, -0.18151047, -\n 0.17167575, -0.16184103, -0.15200631, -0.14217159, -0.13233687, -\n 0.12250215, -0.11266743, -0.10283271, -0.09299799, -0.08316327, -\n 0.07332855, -0.06349383, -0.05365911, -0.04382439, -0.03398968, -\n 0.02415496, -0.01432024, -0.00448552, 0.0053492, 0.01518392, 0.02501864,\n 0.03485336, 0.04468808, 0.0545228, 0.06435752, 0.07419224, 0.08402696, \n 0.09386168, 0.1036964, 0.11353112, 0.12336584, 0.13320056, 0.14303528, \n 0.15287, 0.16270472, 0.17253944, 0.18237416, 0.19220888, 0.2020436, \n 0.21187831, 0.22171303, 0.23154775, 0.24138247, 0.25121719, 0.26105191,\n 0.27088663, 0.28072135, 0.29055607, 0.30039079, 0.31022551, 0.32006023,\n 0.32989495, 0.33972967, 0.34956439, 0.35939911, 0.36923383, 0.37906855,\n 0.38890327, 0.39873799, 0.40857271, 0.41840743, 0.42824215, 0.43807687,\n 0.44791158, 0.4577463, 0.46758102, 0.47741574, 0.48725046, 0.49708518, \n 0.5069199, 0.51675462, 0.52658934, 0.53642406, 0.54625878, 0.5560935, \n 0.56592822, 0.57576294, 0.58559766, 0.59543238, 0.6052671, 0.61510182, \n 0.62493654, 0.63477126, 0.64460598, 0.6544407, 0.66427542])\n', (5100, 8568), True, 'import numpy as NP\n'), ((8796, 12780), 'numpy.array', 'NP.array', (['[0.0155753875, 0.00920142978, -0.0412695818, -0.0241837688, -0.0567525066, \n -0.133656085, -0.0392932482, -0.0993828475, -0.064958483, -0.018730877,\n -0.0849718973, -0.0502231643, -0.074690789, -0.106046379, -0.118749186,\n -0.113565601, -0.046322301, -0.112377331, -0.107838847, -0.0719103441, \n -0.0969987586, -0.0405682698, -0.0552870296, -0.0634647682, -\n 0.125110462, -0.133551285, -0.100283086, -0.0878261775, -0.122366831, \n 0.0373410434, -0.021634629, -0.0354011096, -0.0569699146, -0.15099673, \n -0.0864437893, -0.106978618, -0.0791596845, -0.0285348874, -\n 0.0485092625, -0.0740978718, -0.00535184331, -0.141892433, -0.109891705,\n -0.0700225532, 0.0223670322, -0.0642345473, -0.114513963, -0.0177867692,\n -0.115476929, -0.075060904, -0.0759665146, -0.0389640033, -0.0671256706,\n -0.146708071, -0.0161539745, -0.0559726842, -0.0945299864, -\n 0.0983446389, -0.0992954075, -0.103172548, -0.0710593835, -0.0533846729,\n 0.0338634439, 0.446895629, 0.662468433, 0.0812499002, 0.36986506, \n 0.981505096, 0.599039435, 0.0850967616, -0.0267737526, -0.0409759879, -\n 0.144490153, -0.0230287295, -0.0202018376, -0.0017704065, -0.0888574719,\n -0.0532815233, -0.0693681315, -0.0717790946, -0.142520413, 0.0150613356,\n -0.0837516487, -0.0993321687, -0.0207297225, -0.0825655535, \n 0.0120367259, -0.0147362985, -0.0414445363, -0.0536099076, -0.119483069,\n -0.0875750855, -0.0697698891, -0.0945113301, -0.0586897917, -\n 0.0538971759, -0.0595922321, 0.0201958697, -0.00567614287, -\n 0.0483865663, -0.0787640661, -0.130915985, -0.14598608, -0.0794370472, \n -0.0561923422, -0.0950986519, -0.0920939595, -0.0411376543, -\n 0.129739568, -0.122105666, -0.0870440751, -0.0868988112, -0.0108260494,\n -0.0790288299, -0.0583453253, -0.0931360647, -0.0606538579, -\n 0.0326795094, -0.124720916, 0.0233035088, 0.00442986703, -0.0170680247,\n -0.0165755842, 0.217673182, 0.491183043, 0.986441195, 1.60616803, \n 0.98902905, 1.42567885, 1.86783028, 0.898995638, 0.165190771, -\n 0.0336427465, -0.0943350866, -0.105553396, -0.00537899788, \n 0.00619346742, -0.00722183101, -0.0604815148, -0.0596636757, -\n 0.00651778141, -0.0812485069, -0.0217945613, -0.0693192706, -\n 0.169927523, -0.0654176772, -0.0680938214, -0.108961679, -0.0278380569,\n -0.0692696646, -0.0772257894, -0.0358553343, -0.0855760425, -\n 0.0515287071, -0.0354854837, -0.105648793, -0.10197936, -0.11366231, -\n 0.0591211058, -0.0410607755, 0.0395612381, -0.000321216823, -\n 0.0815489069, -0.0726812184, 0.00338813802, 0.0318101384, 0.00827607699,\n -0.0705176294, -0.120289661, -0.0537291467, -0.0478893109, -\n 0.0800910443, -0.0342484415, -0.0923061371, -0.0611467026, -\n 0.0512490347, 0.00545026129, -0.0340601653, -0.0534633473, -\n 0.0767978132, -0.0527321585, -0.0751329362, -0.0902341753, -\n 0.0285653155, -0.0281812195, 0.188794062, 0.609963775, 0.501888454, \n 0.0858971104, 0.0957417712, 0.589331269, 0.981935441, 0.307364285, -\n 0.0365563519, -0.0349376574, -0.134642854, -0.0164245758, 0.00607715966,\n -0.0328341946, -0.0443529859, -0.0527672656, 0.0177064128, \n 0.00418064697, 0.0135755716, -0.0381845832, -0.0423189811, 0.0252703223,\n -0.0712039247, 0.0516605303, 0.00701981178, -0.0671181753, -0.020337157,\n -0.0120013859, -0.114060365, -0.0740282461, -0.0378084294, -0.120527424,\n -0.068244271, -0.102835357, 0.0208887681, -0.0196327586, -0.040119797, \n -0.0400166288, 0.0349126421, -0.0374765843, -0.0312900096, -\n 0.0117622502, 0.0702238753, 0.0181287788, -0.0688833147, -0.0813086852,\n -0.0802919865, -0.0793092176, -0.00838449318, -0.122420341, -\n 0.0162812844, -0.0911864787, -0.0147517873, 0.0248801224, -0.0470457412,\n -0.0815037489, -0.0675613731, -0.0835428163, -0.10282281, -0.0438780636,\n -0.120214887, -0.0527682826, -0.0131174894, -0.130414739, -0.0157103818,\n -0.0495527051, 0.0220772102, -0.0140918205, -0.0567496903, 0.0155445077,\n -0.0182226207]'], {}), '([0.0155753875, 0.00920142978, -0.0412695818, -0.0241837688, -\n 0.0567525066, -0.133656085, -0.0392932482, -0.0993828475, -0.064958483,\n -0.018730877, -0.0849718973, -0.0502231643, -0.074690789, -0.106046379,\n -0.118749186, -0.113565601, -0.046322301, -0.112377331, -0.107838847, -\n 0.0719103441, -0.0969987586, -0.0405682698, -0.0552870296, -\n 0.0634647682, -0.125110462, -0.133551285, -0.100283086, -0.0878261775, \n -0.122366831, 0.0373410434, -0.021634629, -0.0354011096, -0.0569699146,\n -0.15099673, -0.0864437893, -0.106978618, -0.0791596845, -0.0285348874,\n -0.0485092625, -0.0740978718, -0.00535184331, -0.141892433, -\n 0.109891705, -0.0700225532, 0.0223670322, -0.0642345473, -0.114513963, \n -0.0177867692, -0.115476929, -0.075060904, -0.0759665146, -0.0389640033,\n -0.0671256706, -0.146708071, -0.0161539745, -0.0559726842, -\n 0.0945299864, -0.0983446389, -0.0992954075, -0.103172548, -0.0710593835,\n -0.0533846729, 0.0338634439, 0.446895629, 0.662468433, 0.0812499002, \n 0.36986506, 0.981505096, 0.599039435, 0.0850967616, -0.0267737526, -\n 0.0409759879, -0.144490153, -0.0230287295, -0.0202018376, -0.0017704065,\n -0.0888574719, -0.0532815233, -0.0693681315, -0.0717790946, -\n 0.142520413, 0.0150613356, -0.0837516487, -0.0993321687, -0.0207297225,\n -0.0825655535, 0.0120367259, -0.0147362985, -0.0414445363, -\n 0.0536099076, -0.119483069, -0.0875750855, -0.0697698891, -0.0945113301,\n -0.0586897917, -0.0538971759, -0.0595922321, 0.0201958697, -\n 0.00567614287, -0.0483865663, -0.0787640661, -0.130915985, -0.14598608,\n -0.0794370472, -0.0561923422, -0.0950986519, -0.0920939595, -\n 0.0411376543, -0.129739568, -0.122105666, -0.0870440751, -0.0868988112,\n -0.0108260494, -0.0790288299, -0.0583453253, -0.0931360647, -\n 0.0606538579, -0.0326795094, -0.124720916, 0.0233035088, 0.00442986703,\n -0.0170680247, -0.0165755842, 0.217673182, 0.491183043, 0.986441195, \n 1.60616803, 0.98902905, 1.42567885, 1.86783028, 0.898995638, \n 0.165190771, -0.0336427465, -0.0943350866, -0.105553396, -0.00537899788,\n 0.00619346742, -0.00722183101, -0.0604815148, -0.0596636757, -\n 0.00651778141, -0.0812485069, -0.0217945613, -0.0693192706, -\n 0.169927523, -0.0654176772, -0.0680938214, -0.108961679, -0.0278380569,\n -0.0692696646, -0.0772257894, -0.0358553343, -0.0855760425, -\n 0.0515287071, -0.0354854837, -0.105648793, -0.10197936, -0.11366231, -\n 0.0591211058, -0.0410607755, 0.0395612381, -0.000321216823, -\n 0.0815489069, -0.0726812184, 0.00338813802, 0.0318101384, 0.00827607699,\n -0.0705176294, -0.120289661, -0.0537291467, -0.0478893109, -\n 0.0800910443, -0.0342484415, -0.0923061371, -0.0611467026, -\n 0.0512490347, 0.00545026129, -0.0340601653, -0.0534633473, -\n 0.0767978132, -0.0527321585, -0.0751329362, -0.0902341753, -\n 0.0285653155, -0.0281812195, 0.188794062, 0.609963775, 0.501888454, \n 0.0858971104, 0.0957417712, 0.589331269, 0.981935441, 0.307364285, -\n 0.0365563519, -0.0349376574, -0.134642854, -0.0164245758, 0.00607715966,\n -0.0328341946, -0.0443529859, -0.0527672656, 0.0177064128, \n 0.00418064697, 0.0135755716, -0.0381845832, -0.0423189811, 0.0252703223,\n -0.0712039247, 0.0516605303, 0.00701981178, -0.0671181753, -0.020337157,\n -0.0120013859, -0.114060365, -0.0740282461, -0.0378084294, -0.120527424,\n -0.068244271, -0.102835357, 0.0208887681, -0.0196327586, -0.040119797, \n -0.0400166288, 0.0349126421, -0.0374765843, -0.0312900096, -\n 0.0117622502, 0.0702238753, 0.0181287788, -0.0688833147, -0.0813086852,\n -0.0802919865, -0.0793092176, -0.00838449318, -0.122420341, -\n 0.0162812844, -0.0911864787, -0.0147517873, 0.0248801224, -0.0470457412,\n -0.0815037489, -0.0675613731, -0.0835428163, -0.10282281, -0.0438780636,\n -0.120214887, -0.0527682826, -0.0131174894, -0.130414739, -0.0157103818,\n -0.0495527051, 0.0220772102, -0.0140918205, -0.0567496903, 0.0155445077,\n -0.0182226207])\n', (8804, 12780), True, 'import numpy as NP\n'), ((3490, 3589), 'Physics.Radiation.Lines.Molec.jpl.quantum_label', 'M.jpl.quantum_label', (['(17002)', "line_data['q lower']", "line_data['qn format']", "line_data['deg free']"], {}), "(17002, line_data['q lower'], line_data['qn format'],\n line_data['deg free'])\n", (3509, 3589), True, 'import Physics.Radiation.Lines.Molec as M\n'), ((2877, 2976), 'Physics.Radiation.Lines.Molec.jpl.quantum_label', 'M.jpl.quantum_label', (['(17002)', "line_data['q lower']", "line_data['qn format']", "line_data['deg free']"], {}), "(17002, line_data['q lower'], line_data['qn format'],\n line_data['deg free'])\n", (2896, 2976), True, 'import Physics.Radiation.Lines.Molec as M\n'), ((3308, 3407), 'Physics.Radiation.Lines.Molec.jpl.quantum_label', 'M.jpl.quantum_label', (['(17002)', "line_data['q upper']", "line_data['qn format']", "line_data['deg free']"], {}), "(17002, line_data['q upper'], line_data['qn format'],\n line_data['deg free'])\n", (3327, 3407), True, 'import Physics.Radiation.Lines.Molec as M\n')] |
#Importing Python Libraries
from flask import Flask, render_template, request, jsonify, abort
import cf_deployment_tracker
import os
import json
import requests
import numpy as np
#Importing Tensorflow
import tensorflow as tf
#Importing the Watson Machine Learning Client API and the libraries for preprocessing the uploaded images
from watson_machine_learning_client import WatsonMachineLearningAPIClient
from keras.preprocessing import image
from keras.applications.inception_v3 import decode_predictions, preprocess_input
from io import BytesIO
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
BASE = './assets/'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# Inception v3 initial parameters.
INPUT_LAYER = 'Mul'
INPUT_HEIGHT = 299
INPUT_WIDTH = 299
# Load your WML Credentials here.
wml_credentials={
"Insert your WML credentials here"
}
#Creating an instance to run the WML API Client with the Tensorflow model
client = WatsonMachineLearningAPIClient(wml_credentials)
client._refresh_token()
#The REST API URL provided by your WML instance
scoring_url = "insert your scoring url here"
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8000))
@app.route('/')
def home():
return render_template('index.html')
#Function which receives the image and post to the rest api
@app.route('/api/classify', methods=['POST'])
def upload_image():
if request.json:
# TODO validation.
print(request.json['url'])
# Spoof User-Agent as some websites don't like non-browser requests.
headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/64.0.3282.140 Safari/537.36'}
resp = requests.get(request.json['url'], headers=scoring_eader)
if resp.status_code == 200:
scores = run_model(resp.content)
return jsonify(scores)
else:
abort(400, 'Server could not access image at given url.')
elif request.files:
if 'file' not in request.files:
abort(400, '"file" key not in part.')
file = request.files['file']
if not file.filename:
abort(400, 'No selected file.')
if file and allowed_file(file.filename):
image_data = file.read()
scores = run_model(image_data)
return jsonify(scores)
else:
abort(400)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
#Function to preprocess the images, which includes sizing to the Inception V3 required dimensions and normalization
def adjust_image(image_contents, input_height=299, input_width=299,
input_mean=128, input_std=128):
image_reader = tf.image.decode_image(image_contents, channels=3)
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
with tf.Session() as ses:
result = ses.run(normalized)
return result
#Running the model with the image preprocessed
def run_model(image_data):
image_data = BytesIO(image_data)
img = image.load_img(image_data,target_size=(299,299))
input_image = image.img_to_array(img)
input_image = np.expand_dims(input_image, axis=0)
input_image = preprocess_input(input_image).tolist()
#Image vectorized as payload
scoring_data = {'values': input_image}
#The Scoring URL
score = client.deployments.score(scoring_url, scoring_data)
return score
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| [
"flask.render_template",
"keras.preprocessing.image.img_to_array",
"flask.Flask",
"keras.applications.inception_v3.preprocess_input",
"io.BytesIO",
"tensorflow.cast",
"cf_deployment_tracker.track",
"flask.jsonify",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"watson_machine_learnin... | [((584, 613), 'cf_deployment_tracker.track', 'cf_deployment_tracker.track', ([], {}), '()\n', (611, 613), False, 'import cf_deployment_tracker\n'), ((621, 636), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (626, 636), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((984, 1031), 'watson_machine_learning_client.WatsonMachineLearningAPIClient', 'WatsonMachineLearningAPIClient', (['wml_credentials'], {}), '(wml_credentials)\n', (1014, 1031), False, 'from watson_machine_learning_client import WatsonMachineLearningAPIClient\n'), ((1302, 1325), 'os.getenv', 'os.getenv', (['"""PORT"""', '(8000)'], {}), "('PORT', 8000)\n", (1311, 1325), False, 'import os\n'), ((1368, 1397), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1383, 1397), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((2978, 3027), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['image_contents'], {'channels': '(3)'}), '(image_contents, channels=3)\n', (2999, 3027), True, 'import tensorflow as tf\n'), ((3047, 3080), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (3054, 3080), True, 'import tensorflow as tf\n'), ((3101, 3132), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (3115, 3132), True, 'import tensorflow as tf\n'), ((3147, 3215), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (3171, 3215), True, 'import tensorflow as tf\n'), ((3469, 3488), 'io.BytesIO', 'BytesIO', (['image_data'], {}), '(image_data)\n', (3476, 3488), False, 'from io import BytesIO\n'), ((3499, 3549), 'keras.preprocessing.image.load_img', 'image.load_img', (['image_data'], {'target_size': '(299, 299)'}), '(image_data, target_size=(299, 299))\n', (3513, 3549), False, 'from keras.preprocessing import image\n'), ((3566, 3589), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (3584, 3589), False, 'from keras.preprocessing import image\n'), ((3608, 3643), 'numpy.expand_dims', 'np.expand_dims', (['input_image'], {'axis': '(0)'}), '(input_image, axis=0)\n', (3622, 3643), True, 'import numpy as np\n'), ((1922, 1978), 'requests.get', 'requests.get', (["request.json['url']"], {'headers': 'scoring_eader'}), "(request.json['url'], headers=scoring_eader)\n", (1934, 1978), False, 'import requests\n'), ((3243, 3277), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (3254, 3277), True, 'import tensorflow as tf\n'), ((3301, 3313), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3311, 3313), True, 'import tensorflow as tf\n'), ((2079, 2094), 'flask.jsonify', 'jsonify', (['scores'], {}), '(scores)\n', (2086, 2094), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((2121, 2178), 'flask.abort', 'abort', (['(400)', '"""Server could not access image at given url."""'], {}), "(400, 'Server could not access image at given url.')\n", (2126, 2178), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((2586, 2596), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (2591, 2596), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((3662, 3691), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['input_image'], {}), '(input_image)\n', (3678, 3691), False, 'from keras.applications.inception_v3 import decode_predictions, preprocess_input\n'), ((2255, 2292), 'flask.abort', 'abort', (['(400)', '""""file" key not in part."""'], {}), '(400, \'"file" key not in part.\')\n', (2260, 2292), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((2372, 2403), 'flask.abort', 'abort', (['(400)', '"""No selected file."""'], {}), "(400, 'No selected file.')\n", (2377, 2403), False, 'from flask import Flask, render_template, request, jsonify, abort\n'), ((2552, 2567), 'flask.jsonify', 'jsonify', (['scores'], {}), '(scores)\n', (2559, 2567), False, 'from flask import Flask, render_template, request, jsonify, abort\n')] |
# pylint: disable=missing-function-docstring, missing-module-docstring/
def array_int32_1d_scalar_add( x:'int32[:]', a:'int32' ):
x[:] += a
def array_int32_2d_C_scalar_add( x:'int32[:,:]', a:'int32' ):
x[:,:] += a
def array_int32_2d_F_add( x:'int32[:,:](order=F)', y:'int32[:,:](order=F)' ):
x[:,:] += y
def array_int_1d_scalar_add( x:'int[:]', a:'int' ):
x[:] += a
def array_real_1d_scalar_add( x:'real[:]', a:'real' ):
x[:] += a
def array_real_2d_F_scalar_add( x:'real[:,:](order=F)', a:'real' ):
x[:,:] += a
def array_real_2d_F_add( x:'real[:,:](order=F)', y:'real[:,:](order=F)' ):
x[:,:] += y
def array_int32_2d_F_complex_3d_expr( x:'int32[:,:](order=F)', y:'int32[:,:](order=F)' ):
from numpy import full, int32
z = full((2,3),5,order='F', dtype=int32)
x[:] = (x // y) * x + z
def array_real_1d_complex_3d_expr( x:'real[:]', y:'real[:]' ):
from numpy import full
z = full(3,5)
x[:] = (x // y) * x + z
def fib(n: int) -> int:
if n<=1:
return 0
elif n==2:
return 1
else:
return fib(n-1) + fib(n-2)
| [
"numpy.full"
] | [((767, 806), 'numpy.full', 'full', (['(2, 3)', '(5)'], {'order': '"""F"""', 'dtype': 'int32'}), "((2, 3), 5, order='F', dtype=int32)\n", (771, 806), False, 'from numpy import full\n'), ((931, 941), 'numpy.full', 'full', (['(3)', '(5)'], {}), '(3, 5)\n', (935, 941), False, 'from numpy import full\n')] |
"""
This file defines a logger object to keep everything. This object is like a
dictionary. To use the logger, use
from lib.utils.vis_logger import logger
and then log any data using
logger.update(name0=value, name1=value, ...)
Various getters should also be defined here. These are config-dependent objects,
and should wrap the logger object. It performs config-dependent operations based
on data stored in logger.
To make a getter from a configuration object, use
getter = make_getter(cfg)
To use the matterport getter to get data for visualization with Tensorboard,
call
tb_data = getter.get_tensorboard_data()
"""
import torch
from torch.nn import functional as F
import numpy as np
from lib.utils.convert import tonumpy, totensor, unnormalize
from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, \
draw_paired_desc_torch, cls2RGB
from lib.utils.convert import warp_torch, color_scale
class Logger:
"""
Interface class
"""
def __init__(self):
self.things = dict()
def __getitem__(self, key):
return self.things[key]
def update(self, **kargs):
# detach any tensor
for k in kargs:
if isinstance(kargs[k], torch.Tensor):
kargs[k] = kargs[k].detach().cpu()
self.things.update(kargs)
# global logger to keep literally everything
logger = Logger()
# getter maker
def make_getter(cfg):
getter = None
if cfg.GETTER.NAME == 'Matterport':
return MatterportGetter()
elif cfg.GETTER.NAME == 'MSNet':
return MSNetGetter()
elif cfg.GETTER.NAME == 'MSNetV9':
return MSNetV9Getter()
# elif cfg.GETTER.NAME == 'MSNetV5':
# return MSNetV5Getter()
return getter
class MatterportGetter:
"""
Designed for matterport
"""
def __init__(self, logger=logger):
self.logger = logger
def get_tensorboard_data(self, num_kps=20):
"""
This processes the data needed for visualization. It expects the follow-
ing in self.logger
- image0: (C, H, W), Tensor, normalized
- image1: (C, H, W), Tensor, normalized
- desc0: (C, H, W)
- desc1: (C, H, W)
- kps0: (N, 2), each being (x, y)
- kps1: (N, 2)
- kps2: (N, 2), negative ones
And it returns a dictionary
- desc0: descriptor 1, RGB, (3, H, W)
- desc1: descriptor 2, RGB, (3, H, W)
- img0: image 1, (3, H, W)
- img1: image 2, (3, H, W)
- keypoints: the two images marked with num_kps keypoints
- neg_keypoints: image 2 marked with negative keypoints
- corr: ground truth correspondences
- corr false: false correspondences
"""
# original images
image0 = self.logger['image0']
image1 = self.logger['image1']
# descriptors
desc0 = self.logger['desc0']
desc1 = self.logger['desc1']
# keypoints
kps0 = self.logger['kps0']
kps1 = self.logger['kps1']
kps2 = self.logger['kps2']
# process the images
image0 = unnormalize(image0)
image1 = unnormalize(image1)
# process the descriptor
desc0, desc1 = [desc2RGB(tonumpy(x)) for x in [desc0, desc1]]
desc0, desc1 = [totensor(d) for d in [desc0, desc1]]
# choose keypoints
N = kps0.shape[0]
indices = np.random.choice(N, size=num_kps, replace=False)
# draw keypoints
kps = draw_kps_torch(image0, kps0[indices], image1, kps1[indices])
# draw negative keypoints
neg_kps = draw_kps_torch(image0, kps0[indices], image1, kps2[indices])
# draw correspondences
corr_gt = draw_corr_torch(image0, kps0[indices], image1, kps1[indices])
# draw correspondences
corr_false = draw_corr_torch(image0, kps0[indices], image1, kps2[indices])
return {
'img0': image0,
'img1': image1,
'desc0': desc0,
'desc1': desc1,
'keypoints': kps,
'neg_keypoints': neg_kps,
'corr': corr_gt,
'corr_false': corr_false
}
class MSNetGetter:
"""
Designed for matterport
"""
def __init__(self, logger=logger):
self.logger = logger
def get_tensorboard_data(self, num_kps=10):
"""
This processes the data needed for visualization. It expects the follow-
ing in self.logger
- image0: (C, H, W), Tensor, normalized
- image1: (C, H, W), Tensor, normalized
- kps0: (N, 2), each being (x, y)
- kps1: (N, 2)
- kps2: (N, 2), negative ones
And it returns a dictionary
- imgs: images
- descs: descriptors
- corr: ground truth correspondences
- corr_false: false correspondences
"""
# original images (3, H, W)
image0 = self.logger['image0']
image1 = self.logger['image1']
# descriptors (D, H, W)
descs0 = self.logger['descs0']
descs1 = self.logger['descs1']
# keypoints (N, 2)
kps0 = self.logger['kps0']
kps1 = self.logger['kps1']
kps2 = self.logger['kps2']
# process the images
image0 = unnormalize(image0)
image1 = unnormalize(image1)
img = torch.cat([image0, image1], dim=2)
# process the descriptor
descs0 = totensor(desc2RGB(tonumpy(descs0)))
descs1 = totensor(desc2RGB(tonumpy(descs1)))
desc = torch.cat([descs0, descs1], dim=2)
# choose keypoints
N = kps0.shape[0]
indices = np.random.choice(N, size=num_kps, replace=False)
# draw correspondences
corr_gt = draw_corr_torch(image0, kps0[indices], image1, kps1[indices])
# draw correspondences
corr_false = draw_corr_torch(image0, kps0[indices], image1, kps2[indices])
return {
'imgs': img,
'descs': desc,
'corr': corr_gt,
'corr_false': corr_false
}
# class MSNetV5Getter(MSNetGetter):
# def get_tensorboard_data(self, num_kps=20):
# """
# In addition to descritors and images, we also visualize
# - attention map
# - attention map, scale version
# - scale map
# """
# data = MSNetGetter.get_tensorboard_data(self)
#
# # map (C, H, W)
# map_left = self.logger['map_left']
# map_right = self.logger['map_right']
#
# # scale map (H, W), value range (0, infty)
# scale_map = self.logger['scale']
#
# # scales, a list of numbers
# scales = self.logger['scales']
#
# # mask (H, W)
# mask = self.logger['mask']
#
# # H, from first to second
# H = self.logger['H'].cpu().detach().numpy()
#
# # interpolate predicted scale map to full size
# h, w = self.logger['image0'].size()[-2:]
# # this weird thing is needed because interpolation accepts batch data
# [map_left, map_right] = [F.interpolate(x[None], size=(w, h), mode='bilinear')[0] for x in [map_left, map_right]]
# [map_left_color, map_right_color] = [color_scale(x) for x in [map_left, map_right]]
#
# # warp and devide, and mask
# # map_left_warped = warp_torch(map_left_pred, H)
# # scale_map_pred = map_right_pred / (map_left_warped + 1e-6)
#
# # mask not mapped regions
# # scale_map_pred *= mask
#
# # scale everything to (0, 1)
# for x in [scale_map]:
# x /= x.max()
#
# data['map_left'] = map_left
# data['map_left0'] = toheapmap_torch(map_left[0])
# data['map_left1'] = toheapmap_torch(map_left[1])
# data['map_left2'] = toheapmap_torch(map_left[2])
# data['map_left3'] = toheapmap_torch(map_left[3])
# data['map_right'] = map_right
# data['map_left_color'] = map_left_color
# data['map_right_color'] = map_right_color
# data['scale'] = scale_map
# # data['scale_map_pred'] = scale_map_pred
# data['mask'] = mask
#
# return data
class MSNetV9Getter:
"""
Designed for matterport
"""
def __init__(self, logger=logger):
self.logger = logger
def get_tensorboard_data(self, num_kps=20):
"""
This processes the data needed for visualization. It expects the follow-
ing in self.logger
- image0: (C, H, W), Tensor, normalized
- image1: (C, H, W), Tensor, normalized
- scale: (H, W), Tensor
- scale_pred: (3, H', W'), Tensor
And it returns a dictionary
- img0: image 1, (3, H, W)
- img1: image 2, (3, H, W)
- scale: (3, H, W)
"""
# original images
image0 = self.logger['image0']
image1 = self.logger['image1']
# scale ratio of right image to left image
scale_pred = self.logger['scale_pred']
num_cls = scale_pred.shape[0]
scale_pred = torch.argmax(scale_pred, dim=0).long()
scale_pred = cls2RGB(scale_pred, num_cls)
scale = self.logger['scale']
scale[scale > 1.5] = 2
scale[scale < 0.75] = 0
scale[(scale >= 0.75) * (scale <= 1.5)] = 1
scale = scale.long()
scale = cls2RGB(scale, num_cls)
# region that has corresponding pixels
msk = self.logger['msk']
# process the images
image0 = unnormalize(image0)
image1 = unnormalize(image1)
return {
'img0': image0,
'img1': image1,
'scale_pred': scale_pred,
'msk': msk,
'scale': scale
}
| [
"lib.utils.visualize.cls2RGB",
"numpy.random.choice",
"torch.argmax",
"lib.utils.visualize.draw_kps_torch",
"lib.utils.convert.tonumpy",
"lib.utils.convert.totensor",
"lib.utils.visualize.draw_corr_torch",
"torch.cat",
"lib.utils.convert.unnormalize"
] | [((3220, 3239), 'lib.utils.convert.unnormalize', 'unnormalize', (['image0'], {}), '(image0)\n', (3231, 3239), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((3257, 3276), 'lib.utils.convert.unnormalize', 'unnormalize', (['image1'], {}), '(image1)\n', (3268, 3276), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((3530, 3578), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'num_kps', 'replace': '(False)'}), '(N, size=num_kps, replace=False)\n', (3546, 3578), True, 'import numpy as np\n'), ((3627, 3687), 'lib.utils.visualize.draw_kps_torch', 'draw_kps_torch', (['image0', 'kps0[indices]', 'image1', 'kps1[indices]'], {}), '(image0, kps0[indices], image1, kps1[indices])\n', (3641, 3687), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((3749, 3809), 'lib.utils.visualize.draw_kps_torch', 'draw_kps_torch', (['image0', 'kps0[indices]', 'image1', 'kps2[indices]'], {}), '(image0, kps0[indices], image1, kps2[indices])\n', (3763, 3809), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((3868, 3929), 'lib.utils.visualize.draw_corr_torch', 'draw_corr_torch', (['image0', 'kps0[indices]', 'image1', 'kps1[indices]'], {}), '(image0, kps0[indices], image1, kps1[indices])\n', (3883, 3929), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((3991, 4052), 'lib.utils.visualize.draw_corr_torch', 'draw_corr_torch', (['image0', 'kps0[indices]', 'image1', 'kps2[indices]'], {}), '(image0, kps0[indices], image1, kps2[indices])\n', (4006, 4052), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((5484, 5503), 'lib.utils.convert.unnormalize', 'unnormalize', (['image0'], {}), '(image0)\n', (5495, 5503), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((5521, 5540), 'lib.utils.convert.unnormalize', 'unnormalize', (['image1'], {}), '(image1)\n', (5532, 5540), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((5555, 5589), 'torch.cat', 'torch.cat', (['[image0, image1]'], {'dim': '(2)'}), '([image0, image1], dim=2)\n', (5564, 5589), False, 'import torch\n'), ((5753, 5787), 'torch.cat', 'torch.cat', (['[descs0, descs1]'], {'dim': '(2)'}), '([descs0, descs1], dim=2)\n', (5762, 5787), False, 'import torch\n'), ((5868, 5916), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'num_kps', 'replace': '(False)'}), '(N, size=num_kps, replace=False)\n', (5884, 5916), True, 'import numpy as np\n'), ((5975, 6036), 'lib.utils.visualize.draw_corr_torch', 'draw_corr_torch', (['image0', 'kps0[indices]', 'image1', 'kps1[indices]'], {}), '(image0, kps0[indices], image1, kps1[indices])\n', (5990, 6036), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((6098, 6159), 'lib.utils.visualize.draw_corr_torch', 'draw_corr_torch', (['image0', 'kps0[indices]', 'image1', 'kps2[indices]'], {}), '(image0, kps0[indices], image1, kps2[indices])\n', (6113, 6159), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((9387, 9415), 'lib.utils.visualize.cls2RGB', 'cls2RGB', (['scale_pred', 'num_cls'], {}), '(scale_pred, num_cls)\n', (9394, 9415), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((9622, 9645), 'lib.utils.visualize.cls2RGB', 'cls2RGB', (['scale', 'num_cls'], {}), '(scale, num_cls)\n', (9629, 9645), False, 'from lib.utils.visualize import desc2RGB, draw_kps_torch, draw_corr, draw_corr_torch, draw_paired_img_desc_torch, draw_paired_desc_torch, cls2RGB\n'), ((9790, 9809), 'lib.utils.convert.unnormalize', 'unnormalize', (['image0'], {}), '(image0)\n', (9801, 9809), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((9827, 9846), 'lib.utils.convert.unnormalize', 'unnormalize', (['image1'], {}), '(image1)\n', (9838, 9846), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((3413, 3424), 'lib.utils.convert.totensor', 'totensor', (['d'], {}), '(d)\n', (3421, 3424), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((3352, 3362), 'lib.utils.convert.tonumpy', 'tonumpy', (['x'], {}), '(x)\n', (3359, 3362), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((5667, 5682), 'lib.utils.convert.tonumpy', 'tonumpy', (['descs0'], {}), '(descs0)\n', (5674, 5682), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((5720, 5735), 'lib.utils.convert.tonumpy', 'tonumpy', (['descs1'], {}), '(descs1)\n', (5727, 5735), False, 'from lib.utils.convert import tonumpy, totensor, unnormalize\n'), ((9327, 9358), 'torch.argmax', 'torch.argmax', (['scale_pred'], {'dim': '(0)'}), '(scale_pred, dim=0)\n', (9339, 9358), False, 'import torch\n')] |
# Get imports
import numpy as np
import cv2
import glob
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def get_original_perspective(image, top_margin, bottom_margin):
#make copy of image
img = np.copy(image)
#get image size
y_dir = img.shape[0]
x_dir = img.shape[1]
mid_x = x_dir//2
top_y = 2*y_dir//3
src = np.float32([
(mid_x-top_margin, top_y),
(mid_x+top_margin, top_y),
(mid_x+bottom_margin, y_dir),
(mid_x-bottom_margin, y_dir)
])
dst = np.float32([
(mid_x-bottom_margin, 0),
(mid_x+bottom_margin, 0),
(mid_x+bottom_margin, y_dir),
(mid_x-bottom_margin, y_dir)
])
Minv = cv2.getPerspectiveTransform(dst, src)
#reverse
unwarped = cv2.warpPerspective(img, Minv, (x_dir, y_dir), flags=cv2.INTER_LINEAR)
return unwarped
| [
"numpy.copy",
"cv2.warpPerspective",
"numpy.float32",
"cv2.getPerspectiveTransform"
] | [((239, 253), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (246, 253), True, 'import numpy as np\n'), ((393, 532), 'numpy.float32', 'np.float32', (['[(mid_x - top_margin, top_y), (mid_x + top_margin, top_y), (mid_x +\n bottom_margin, y_dir), (mid_x - bottom_margin, y_dir)]'], {}), '([(mid_x - top_margin, top_y), (mid_x + top_margin, top_y), (\n mid_x + bottom_margin, y_dir), (mid_x - bottom_margin, y_dir)])\n', (403, 532), True, 'import numpy as np\n'), ((576, 712), 'numpy.float32', 'np.float32', (['[(mid_x - bottom_margin, 0), (mid_x + bottom_margin, 0), (mid_x +\n bottom_margin, y_dir), (mid_x - bottom_margin, y_dir)]'], {}), '([(mid_x - bottom_margin, 0), (mid_x + bottom_margin, 0), (mid_x +\n bottom_margin, y_dir), (mid_x - bottom_margin, y_dir)])\n', (586, 712), True, 'import numpy as np\n'), ((759, 796), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (786, 796), False, 'import cv2\n'), ((830, 900), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'Minv', '(x_dir, y_dir)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, Minv, (x_dir, y_dir), flags=cv2.INTER_LINEAR)\n', (849, 900), False, 'import cv2\n')] |
#
# GtkHelp.py -- customized Gtk3 widgets
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import os.path
import math
import random
import time
import numpy as np
from ginga.misc import Bunch, Callback
from ginga.fonts import font_asst
import ginga.icons
import ginga.toolkit
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # noqa
from gi.repository import Gdk # noqa
from gi.repository import GdkPixbuf # noqa
from gi.repository import GObject # noqa
from gi.repository import Pango # noqa
import cairo
ginga.toolkit.use('gtk3')
# path to our icons
icondir = os.path.split(ginga.icons.__file__)[0]
DND_TARGET_TYPE_TEXT = 0
DND_TARGET_TYPE_URIS = 1
class WidgetMask(object):
def __init__(self, *args):
self.cb_fn = None
self.cb_args = []
self.cb_kwdargs = {}
self.connected = False
self.changed = False
def sconnect(self, signal, cb_fn, *args, **kwdargs):
self.cb_fn = cb_fn
self.cb_args = args
self.cb_kwdargs = kwdargs
self.connect(signal, self.cb)
self.connected = True
def change(self):
if self.connected:
self.changed = True
def cb(self, *args):
if self.changed:
self.changed = False
return
newargs = list(args)
newargs.extend(self.cb_args)
kwdargs = self.cb_kwdargs.copy()
return self.cb_fn(*newargs, **kwdargs)
class TopLevel(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
class CheckButton(WidgetMask, Gtk.CheckButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.CheckButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckButton, self).set_active(newval)
class ToggleButton(WidgetMask, Gtk.ToggleButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.ToggleButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ToggleButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(ToggleButton, self).set_active(newval)
class RadioButton(WidgetMask, Gtk.RadioButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.RadioButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(RadioButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(RadioButton, self).set_active(newval)
class CheckMenuItem(WidgetMask, Gtk.CheckMenuItem):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.CheckMenuItem.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckMenuItem, self).set_active(newval)
class SpinButton(WidgetMask, Gtk.SpinButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.SpinButton.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(SpinButton, self).set_value(newval)
class HScale(WidgetMask, Gtk.HScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.HScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(HScale, self).set_value(newval)
class VScale(WidgetMask, Gtk.VScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.VScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(VScale, self).set_value(newval)
class ComboBox(WidgetMask, Gtk.ComboBox):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.ComboBox.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ComboBox, self).set_active(newval)
def insert_alpha(self, text):
model = self.get_model()
tup = (text, )
j = 0
for i in range(len(model)):
j = i
if model[i][0] > text:
model.insert(j, tup)
return
model.insert(j + 1, tup)
def insert_text(self, idx, text):
model = self.get_model()
tup = (text, )
model.insert(idx, tup)
def delete_alpha(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
del model[i]
return
def clear(self):
model = self.get_model()
model.clear()
def show_text(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
self.set_active(i)
return
class Notebook(WidgetMask, Gtk.Notebook):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
Gtk.Notebook.__init__(self, *args, **kwdargs)
def set_group_id(self, id):
super(Notebook, self).set_group_name(str(id))
def set_current_page(self, new_idx):
old_idx = self.get_current_page()
if old_idx != new_idx:
self.change()
super(Notebook, self).set_current_page(new_idx)
class MultiDragDropTreeView(Gtk.TreeView):
'''TreeView that captures mouse events to make drag and drop work
properly
See: https://gist.github.com/kevinmehall/278480#file-multiple-selection-dnd-class-py
'''
def __init__(self):
super(MultiDragDropTreeView, self).__init__()
self.connect('button_press_event', self.on_button_press)
self.connect('button_release_event', self.on_button_release)
self.defer_select = False
def on_button_press(self, widget, event):
# Here we intercept mouse clicks on selected items so that we can
# drag multiple items without the click selecting only one
target = self.get_path_at_pos(int(event.x), int(event.y))
if (target and
event.type == Gdk.EventType.BUTTON_PRESS and
not (event.state &
(Gdk.ModifierType.CONTROL_MASK |
Gdk.ModifierType.SHIFT_MASK)) and
self.get_selection().path_is_selected(target[0])):
# disable selection
self.get_selection().set_select_function(lambda *ignore: False)
self.defer_select = target[0]
def on_button_release(self, widget, event):
# re-enable selection
self.get_selection().set_select_function(lambda *ignore: True)
target = self.get_path_at_pos(int(event.x), int(event.y))
if (self.defer_select and target and
self.defer_select == target[0] and
not (event.x == 0 and event.y == 0)): # certain drag and drop
self.set_cursor(target[0], target[1], False)
self.defer_select = False
class MDISubWindow(Callback.Callbacks):
def __init__(self, widget, label):
super(MDISubWindow, self).__init__()
self.widget = widget
vbox = Gtk.VBox()
vbox.set_border_width(4)
hbox = Gtk.HBox()
close = Gtk.Button("x")
maxim = Gtk.Button("^")
minim = Gtk.Button("v")
hbox.pack_start(close, False, False, 0)
hbox.pack_start(minim, False, False, 0)
hbox.pack_start(maxim, False, False, 0)
evbox = Gtk.EventBox()
evbox.add(label)
modify_bg(evbox, "gray90")
self.label = label
self.evbox = evbox
hbox.pack_start(evbox, True, True, 2)
vbox.pack_start(hbox, False, False, 0)
vbox.pack_start(widget, True, True, 4)
# what size does the widget want to be?
rect = widget.get_allocation()
self.x, self.y, wd, ht = rect.x, rect.y, rect.width, rect.height
## wd = widget.get_preferred_width()
## ht = widget.get_preferred_height()
## wd, ht = widget.get_size_request()
self.width, self.height = max(wd, 300), max(ht, 300)
frame = Gtk.EventBox()
frame.set_size_request(self.width, self.height)
frame.props.visible_window = True
frame.set_border_width(0)
modify_bg(frame, "gray70")
self.frame = frame
frame.add(vbox)
frame.show_all()
for name in ('close', 'maximize', 'minimize'):
self.enable_callback(name)
maxim.connect('clicked', lambda *args: self.make_callback('maximize'))
minim.connect('clicked', lambda *args: self.make_callback('minimize'))
close.connect('clicked', lambda *args: self.make_callback('close'))
def raise_(self):
window = self.frame.get_window()
if window is not None:
window.raise_()
def lower(self):
window = self.frame.get_window()
if window is not None:
window.lower()
def focus(self):
self.frame.grab_focus()
class MDIWidget(Gtk.Layout):
"""
Multiple Document Interface type widget for Gtk.
"""
def __init__(self):
Gtk.Layout.__init__(self)
self.children = []
self.cur_index = -1
self.selected_child = None
self.kbdmouse_mask = 0
self.cascade_offset = 50
self.minimized_width = 150
self.delta_px = 50
mask = self.get_events()
self.set_events(mask |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.FOCUS_CHANGE_MASK |
Gdk.EventMask.STRUCTURE_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK |
Gdk.EventMask.SCROLL_MASK)
self.connect("motion_notify_event", self.motion_notify_event)
self.connect("button_press_event", self.button_press_event)
self.connect("button_release_event", self.button_release_event)
modify_bg(self, "gray50")
def add_subwin(self, subwin):
self.children.append(subwin)
subwin.evbox.connect("button_press_event", self.select_child_cb, subwin)
subwin.frame.connect("button_press_event", self.start_resize_cb, subwin)
subwin.add_callback('maximize', lambda *args: self.maximize_page(subwin))
subwin.add_callback('minimize', lambda *args: self.minimize_page(subwin))
self.put(subwin.frame, subwin.x, subwin.y)
# note: seem to need a slight delay to let the widget be mapped
# in order to accurately determine its position and size
#self.update_subwin_position(subwin)
#self.update_subwin_size(subwin)
GObject.timeout_add(1000, self.update_subwin_position, subwin)
GObject.timeout_add(1500, self.update_subwin_size, subwin)
self._update_area_size()
def append_page(self, widget, label):
subwin = MDISubWindow(widget, label)
# pick a random spot to place the window initially
rect = self.get_allocation()
wd, ht = rect.width, rect.height
x = random.randint(self.cascade_offset, # nosec
max(self.cascade_offset + 10, wd // 2))
y = random.randint(self.cascade_offset, # nosec
max(self.cascade_offset + 10, ht // 2))
subwin.x, subwin.y = x, y
self.add_subwin(subwin)
return subwin
def set_tab_reorderable(self, w, tf):
pass
def set_tab_detachable(self, w, tf):
pass
def get_tab_label(self, w):
return None
def page_num(self, widget):
index, subwin = self._widget_to_index(widget)
return index
def get_nth_page(self, idx):
if 0 <= idx < len(self.children):
subwin = self.children[idx]
return subwin.widget
return None
def set_current_page(self, idx):
subwin = self.children[idx]
subwin.raise_()
self.cur_index = idx
def get_current_page(self):
return self.cur_index
def _widget_to_index(self, widget):
index = 0
for subwin in self.children:
if subwin.widget == widget:
return index, subwin
index += 1
return -1, None
def remove_page(self, idx):
subwin = self.children[idx]
self.remove(subwin.widget)
def remove(self, widget):
idx, subwin = self._widget_to_index(widget)
if subwin is not None:
self.children.remove(subwin)
self.cur_index = -1
frame = subwin.frame
super(MDIWidget, self).remove(frame)
widget.unparent()
self._update_area_size()
def get_widget_position(self, widget):
rect = widget.get_allocation()
x, y = rect.x, rect.y
return x, y
def get_widget_size(self, widget):
rect = widget.get_allocation()
width, height = rect.width, rect.height
return width, height
def update_subwin_position(self, subwin):
rect = subwin.frame.get_allocation()
x, y, = rect.x, rect.y
subwin.x, subwin.y = x, y
def update_subwin_size(self, subwin):
rect = subwin.frame.get_allocation()
wd, ht = rect.width, rect.height
subwin.width, subwin.height = wd, ht
def raise_widget(self, subwin):
subwin.raise_()
def select_child_cb(self, layout, event, subwin):
x_root, y_root = event.x_root, event.y_root
x, y = self.get_widget_position(subwin.frame)
subwin.x, subwin.y = x, y
# make this the selected widget
idx = self.page_num(subwin.widget)
if idx >= 0:
self.set_current_page(idx)
self.selected_child = Bunch.Bunch(subwin=subwin, action='move',
x_origin=x, y_origin=y,
x_root=x_root, y_root=y_root)
return True
def start_resize_cb(self, widget, event, subwin):
self.update_subwin_size(subwin)
x_root, y_root = event.x_root, event.y_root
x, y = widget.translate_coordinates(self, event.x, event.y)
rect = subwin.frame.get_allocation()
x1, y1, wd, ht = rect.x, rect.y, rect.width, rect.height
x2, y2 = x1 + wd, y1 + ht
subwin.x, subwin.y = x1, y1
subwin.width, subwin.height = wd, ht
updates = set([])
if abs(x - x2) < self.delta_px:
# right side
if abs(y - y2) < self.delta_px:
# lower right corner
origin = 'lr'
updates = set(['w', 'h'])
elif abs(y - y1) < self.delta_px:
origin = 'ur'
updates = set(['w', 'h', 'y'])
else:
origin = 'r'
updates = set(['w'])
elif abs(x - x1) < self.delta_px:
# left side
if abs(y - y2) < self.delta_px:
# lower left corner
origin = 'll'
updates = set(['w', 'h', 'x'])
elif abs(y - y1) < self.delta_px:
origin = 'ul'
updates = set(['w', 'h', 'x', 'y'])
else:
origin = 'l'
updates = set(['w', 'x'])
elif abs(y - y2) < self.delta_px:
# bottom
origin = 'b'
updates = set(['h'])
else:
origin = 't'
updates = set(['h', 'y'])
self.selected_child = Bunch.Bunch(subwin=subwin, action='resize',
x_origin=x1, y_origin=y1,
wd=wd, ht=ht,
x_root=x_root, y_root=y_root,
origin=origin, updates=updates)
return True
def button_press_event(self, widget, event):
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
return True
def _update_area_size(self):
rect = self.get_allocation()
mx_wd, mx_ht = rect.width, rect.height
for subwin in self.children:
rect = subwin.frame.get_allocation()
x, y, wd, ht = rect.x, rect.y, rect.width, rect.height
mx_wd, mx_ht = max(mx_wd, x + wd), max(mx_ht, y + ht)
self.set_size(mx_wd, mx_ht)
def _resize(self, bnch, x_root, y_root):
subwin = bnch.subwin
updates = bnch.updates
dx, dy = x_root - bnch.x_root, y_root - bnch.y_root
wd = bnch.wd
if 'w' in updates:
wd = int(wd + dx)
ht = bnch.ht
if 'h' in updates:
ht = int(ht + dy)
if 'x' in updates or 'y' in updates:
x = bnch.x_origin
if 'x' in updates:
x = int(x + dx)
if x < bnch.x_origin:
wd = bnch.wd + abs(dx)
else:
wd = bnch.wd + -abs(dx)
y = bnch.y_origin
if 'y' in updates:
y = int(y + dy)
if y < bnch.y_origin:
ht = bnch.ht + abs(dy)
else:
ht = bnch.ht + -abs(dy)
# this works better if it is not self.move_page()
self.move(subwin.frame, x, y)
if 'w' in updates or 'h' in updates:
# this works better if it is not self.resize_page()
subwin.frame.set_size_request(wd, ht)
self._update_area_size()
def button_release_event(self, widget, event):
x_root, y_root = event.x_root, event.y_root
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
if self.selected_child is not None:
bnch = self.selected_child
subwin = bnch.subwin
if bnch.action == 'move':
x = int(subwin.x + (x_root - bnch.x_root))
y = int(subwin.y + (y_root - bnch.y_root))
self.move_page(subwin, x, y)
elif bnch.action == 'resize':
self._resize(bnch, x_root, y_root)
self.update_subwin_position(subwin)
# NOTE: necessary for wrapped widget to remember position
self.move_page(subwin, subwin.x, subwin.y)
self.update_subwin_size(subwin)
# NOTE: necessary for wrapped widget to remember size
self.resize_page(subwin, subwin.width, subwin.height)
self.selected_child = None
self._update_area_size()
return True
def motion_notify_event(self, widget, event):
button = self.kbdmouse_mask
x_root, y_root, state = event.x_root, event.y_root, event.state
if state & Gdk.ModifierType.BUTTON1_MASK:
button |= 0x1
elif state & Gdk.ModifierType.BUTTON2_MASK:
button |= 0x2
elif state & Gdk.ModifierType.BUTTON3_MASK:
button |= 0x4
if (button & 0x1) and (self.selected_child is not None):
bnch = self.selected_child
subwin = bnch.subwin
if bnch.action == 'move':
x = int(subwin.x + (x_root - bnch.x_root))
y = int(subwin.y + (y_root - bnch.y_root))
# this works better if it is not self.move_page()
self.move(subwin.frame, x, y)
elif bnch.action == 'resize':
self._resize(bnch, x_root, y_root)
self._update_area_size()
return True
def tile_pages(self):
# calculate number of rows and cols, try to maintain a square
# TODO: take into account the window geometry
num_widgets = len(self.children)
rows = int(round(math.sqrt(num_widgets)))
cols = rows
if rows**2 < num_widgets:
cols += 1
# find out how big each window should be
rect = self.get_allocation()
width, height = rect.width, rect.height
wd, ht = width // cols, height // rows
# and move and resize them into place
for i in range(0, rows):
for j in range(0, cols):
index = i * cols + j
if index < num_widgets:
subwin = self.children[index]
self.resize_page(subwin, wd, ht)
x, y = j * wd, i * ht
self.move_page(subwin, x, y)
subwin.raise_()
self._update_area_size()
def cascade_pages(self):
x, y = 0, 0
for subwin in self.children:
self.move_page(subwin, x, y)
subwin.raise_()
x += self.cascade_offset
y += self.cascade_offset
self._update_area_size()
def use_tabs(self, tf):
pass
def move_page(self, subwin, x, y):
self.move(subwin.frame, x, y)
subwin.x, subwin.y = x, y
def resize_page(self, subwin, wd, ht):
subwin.frame.set_size_request(wd, ht)
subwin.width, subwin.height = wd, ht
def maximize_page(self, subwin):
rect = self.get_allocation()
wd, ht = rect.width, rect.height
subwin.raise_()
self.resize_page(subwin, wd, ht)
self.move_page(subwin, 0, 0)
self._update_area_size()
def minimize_page(self, subwin):
rect = self.get_allocation()
height = rect.height
rect = subwin.frame.get_allocation()
x = rect.x
rect = subwin.label.get_allocation()
ht = rect.height
self.resize_page(subwin, self.minimized_width, ht)
self.move_page(subwin, x, height - ht)
subwin.lower()
self._update_area_size()
def close_page(self, subwin):
self._update_area_size()
class Splitter(Gtk.Layout):
"""
Splitter type widget for Gtk.
"""
def __init__(self, orientation='horizontal', thumb_px=8):
Gtk.Layout.__init__(self)
self.orientation = orientation
self._sizes = []
self._dims = (0, 0)
self.children = []
self.thumbs = []
self.thumb_px = thumb_px
self.thumb_aspect = 3.25
self.kbdmouse_mask = 0
mask = self.get_events()
self.set_events(mask |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK
)
self.connect("size-allocate", self._size_allocate_cb)
modify_bg(self, "gray50")
def add_widget(self, widget):
rect = self.get_allocation()
wd, ht = rect.width, rect.height
self.children.append(widget)
if len(self.children) == 1:
widget.set_size_request(wd, ht)
self.put(widget, 0, 0)
sizes = self._sizes
if len(sizes) == 0:
pos = wd if self.orientation == 'horizontal' else ht
sizes = [pos]
self.set_sizes(sizes)
else:
if self.orientation == 'horizontal':
thumbfile, _w, _h = ('vdots.png', self.thumb_px,
int(self.thumb_px * self.thumb_aspect))
else:
thumbfile, _w, _h = ('hdots.png',
int(self.thumb_px * self.thumb_aspect),
self.thumb_px)
iconfile = os.path.join(icondir, thumbfile)
pixbuf = pixbuf_new_from_file_at_size(iconfile, _w, _h)
image = Gtk.Image.new_from_pixbuf(pixbuf)
thumb = Gtk.EventBox()
thumb.set_visible_window(True)
thumb.add(image)
modify_bg(thumb, "gray90")
i = len(self.thumbs)
self.thumbs.append(thumb)
thumb.connect("button_press_event", self._start_resize_cb, i)
thumb.connect("button_release_event", self._stop_resize_cb, i)
thumb.connect("motion_notify_event", self._do_resize_cb, i)
thumb.connect("enter_notify_event", self._thumb_enter_cb)
thumb.connect("leave_notify_event", self._thumb_leave_cb)
self.put(thumb, 0, 0)
self.put(widget, 0, 0)
sizes = self._sizes
if len(sizes) < len(self.children):
pos = wd if self.orientation == 'horizontal' else ht
sizes.append(pos)
self.set_sizes(sizes)
self.show_all()
def _thumb_enter_cb(self, widget, event):
# change the cursor to a resize one when we enter the thumb area
display = self.get_display()
cur_name = ('ew-resize' if self.orientation == 'horizontal'
else 'ns-resize')
cursor = Gdk.Cursor.new_from_name(display, cur_name)
win = self.get_window()
if win is not None:
win.set_cursor(cursor)
def _thumb_leave_cb(self, widget, event):
# change the cursor to the normal one when we leave the thumb area
display = self.get_display()
cursor = Gdk.Cursor.new_from_name(display, 'default')
win = self.get_window()
if win is not None:
win.set_cursor(cursor)
def get_sizes(self):
return list(self._sizes)
def set_sizes(self, sizes):
sizes = list(sizes)
## if sizes == self._sizes:
## return
if self.get_realized():
rect = self.get_allocation()
wd, ht = rect.width, rect.height
else:
min_req, nat_req = self.get_preferred_size()
wd, ht = nat_req.width, nat_req.height
x, y = 0, 0
# calc space needed by all necessary thumbs
remaining_thumb_space = max(0, len(self.children) - 1) * self.thumb_px
new_sizes = []
thumbs, widgets = [], []
for num, child in enumerate(self.children):
off = sizes[num]
if self.orientation == 'horizontal':
if num == 0:
widgets.append((child, 0, 0, off, ht))
new_sizes.append(off)
x += off
else:
thumb = self.thumbs[num - 1]
thumbs.append((thumb, x, y, self.thumb_px, ht))
x += self.thumb_px
remaining_thumb_space -= self.thumb_px
rest = max(0, wd - (x + remaining_thumb_space))
if num < len(self.children) - 1:
rest = min(off, rest)
widgets.append((child, x, y, rest, ht))
new_sizes.append(rest)
x += rest
else:
if num == 0:
widgets.append((child, 0, 0, wd, off))
new_sizes.append(off)
y += off
else:
thumb = self.thumbs[num - 1]
thumbs.append((thumb, x, y, wd, self.thumb_px))
y += self.thumb_px
remaining_thumb_space -= self.thumb_px
rest = max(0, ht - (y + remaining_thumb_space))
if num < len(self.children) - 1:
rest = min(off, rest)
widgets.append((child, x, y, wd, rest))
new_sizes.append(rest)
y += rest
self._sizes = new_sizes
assert len(self._sizes) == len(self.children)
for child, x, y, wd, ht in widgets:
self._move_resize_child(child, x, y, wd, ht)
for thumb, x, y, wd, ht in thumbs:
self._move_resize_child(thumb, x, y, wd, ht)
def remove(self, child):
if child not in self.children:
raise ValueError("widget is not one of our children")
idx = self.children.index(child)
if len(self.children) > 1:
if idx > 0:
# not first child
thumb = self.thumbs.pop(idx - 1)
else:
thumb = self.thumbs.pop(0)
super(Splitter, self).remove(thumb)
self._sizes.pop(idx)
self.children.remove(child)
super(Splitter, self).remove(child)
self.set_sizes(self._sizes)
def _move_resize_child(self, child, x, y, wd, ht):
rect = child.get_allocation()
modified = False
if (rect.x, rect.y) != (x, y):
modified = True
self.move(child, x, y)
if (rect.width, rect.height) != (wd, ht):
modified = True
child.set_size_request(wd, ht)
alloc = Gdk.Rectangle()
alloc.x, alloc.y, alloc.width, alloc.height = x, y, wd, ht
child.size_allocate(alloc)
#child.set_clip(alloc)
win = child.get_window()
if win is not None:
win.invalidate_rect(None, True)
win.resize(wd, ht)
if modified:
# don't think this should be necessary, but just in case
child.queue_draw()
#child.queue_resize()
child.queue_allocate()
def _calc_size(self, i, pos):
sizes = list(self._sizes)
n = sum([sizes[j] for j in range(0, i)])
n += max(0, i - 1) * self.thumb_px
return max(0, pos - n)
def _start_resize_cb(self, widget, event, i):
x_root, y_root = event.x_root, event.y_root
x, y = widget.translate_coordinates(self, event.x, event.y)
pos = x if self.orientation == 'horizontal' else y
sizes = list(self._sizes)
sizes[i] = self._calc_size(i, pos)
self.set_sizes(sizes)
return True
def _stop_resize_cb(self, widget, event, i):
x_root, y_root = event.x_root, event.y_root
x, y = widget.translate_coordinates(self, event.x, event.y)
pos = x if self.orientation == 'horizontal' else y
sizes = list(self._sizes)
sizes[i] = self._calc_size(i, pos)
self.set_sizes(sizes)
return True
def _do_resize_cb(self, widget, event, i):
button = self.kbdmouse_mask
x_root, y_root, state = event.x_root, event.y_root, event.state
x, y = widget.translate_coordinates(self, event.x, event.y)
if state & Gdk.ModifierType.BUTTON1_MASK:
button |= 0x1
elif state & Gdk.ModifierType.BUTTON2_MASK:
button |= 0x2
elif state & Gdk.ModifierType.BUTTON3_MASK:
button |= 0x4
if button == 0x1:
pos = x if self.orientation == 'horizontal' else y
sizes = list(self._sizes)
sizes[i] = self._calc_size(i, pos)
self.set_sizes(sizes)
return True
def _size_allocate_cb(self, widget, rect):
x, y, wd, ht = rect.x, rect.y, rect.width, rect.height
dims = (wd, ht)
if dims == self._dims:
return
super(Splitter, self).set_size(wd, ht)
self._dims = dims
self.set_sizes(self._sizes)
return True
class Dial(Gtk.DrawingArea):
__gtype_name__ = "Dial"
__gsignals__ = {
"value-changed": (GObject.SignalFlags.RUN_FIRST, GObject.TYPE_NONE,
(GObject.TYPE_FLOAT,)),
}
def __init__(self):
Gtk.DrawingArea.__init__(self)
self.set_can_focus(True)
self.dims = np.array((0, 0))
self.center = np.array((0.0, 0.0))
self.bg = (0.94, 0.94, 0.94)
self.fg = (0.4, 0.4, 0.4)
self.knob_fg = (1.0, 1.0, 1.0)
self.knob_fill = (0.2, 0.2, 0.2)
self.focus_fg = (0.2, 0.6, 0.9)
self.fontname = 'Sans Serif'
self.fontsize = 10.0
self._has_focus = False
self.surface = None
# draw labels
self.draw_scale = True
# how to rotate the labels
self.label_style = 1
self.values = []
self.draw_value_pos = 0
self.value = 0.0
self.value_text = str(self.value)
# internal state
self._dragging = False
self.tracking = False
self.wrap = False
self.angle = 0.0
self.ang_offset = 0.0
self.ang_invert = False
self.turn_delta = 6.0
self.min_ang_deg = 0.0
self.max_ang_deg = 360.0
self.connect("draw", self.draw_event)
self.connect("configure-event", self.configure_event)
self.set_app_paintable(True)
# prevents extra redraws, because we manually redraw on a size
# change
self.set_redraw_on_allocate(False)
self.connect('button-press-event', self.button_press_event)
self.connect('button-release-event', self.button_release_event)
self.connect('motion-notify-event', self.motion_notify_event)
self.connect('scroll-event', self.scroll_event)
self.connect('focus_in_event', self.focus_event, True)
self.connect('focus_out_event', self.focus_event, False)
mask = self.get_events()
self.set_events(mask |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.SCROLL_MASK |
Gdk.EventMask.FOCUS_CHANGE_MASK |
Gdk.EventMask.EXPOSURE_MASK)
def button_press_event(self, widget, event):
if event.button == 1:
self._dragging = True
self._calc_action(event.x, event.y)
return True
def button_release_event(self, widget, event):
self._dragging = False
self._calc_action(event.x, event.y)
return True
def motion_notify_event(self, widget, event):
# Are we holding down the left mouse button?
if not self._dragging:
return False
self._calc_action(event.x, event.y)
return True
def scroll_event(self, widget, event):
degrees, direction = get_scroll_info(event)
if direction < 180.0:
self.turn_ccw()
else:
self.turn_cw()
self.draw()
return True
def focus_event(self, widget, event, tf):
self._has_focus = tf
self.draw()
return True
def _calc_action(self, x, y):
ang_deg = np.degrees(np.arctan2(x - self.center[0],
y - self.center[1]) + np.pi * 1.5)
ang_deg = self.normalize_angle(ang_deg + self.ang_offset)
if self.ang_invert:
ang_deg = 360.0 - ang_deg
self.angle_action(x, y, ang_deg)
def draw(self):
if self.surface is None:
return
cr = cairo.Context(self.surface)
cr.select_font_face(self.fontname)
cr.set_font_size(self.fontsize)
# fill background
wd, ht = self.dims
cr.rectangle(0, 0, wd, ht)
r, g, b = self.bg
cr.set_source_rgba(r, g, b)
cr.fill()
r, g, b = self.fg
cr.set_source_rgba(r, g, b)
cr.set_line_width(2.0)
cr.save()
cx, cy = self.center
cr.translate(cx, cy)
cr.move_to(0, 0)
# draw circle
cradius = min(cx, cy)
cradius *= 0.66
cr.arc(0, 0, cradius, 0, 2 * np.pi)
cr.fill()
if self._has_focus:
r, g, b = self.focus_fg
else:
r, g, b = (0.0, 0.0, 0.0)
cr.set_source_rgba(r, g, b)
cr.new_path()
cr.set_line_width(2)
cr.arc(0, 0, cradius, 0, 2 * np.pi)
cr.stroke()
cr.new_path()
cr.set_line_width(1)
if self.draw_scale:
cr.new_path()
cr.set_source_rgba(0.0, 0.0, 0.0)
for tup in self.values:
if len(tup) == 3:
label, value, theta = tup
else:
value, theta = tup
label = str(value)
if self.ang_invert:
theta = 360.0 - theta
theta_pos = self.normalize_angle(theta + 90.0 - self.ang_offset)
theta_rad = np.radians(theta_pos)
a, b, wd, ht, i, j = cr.text_extents(label)
crad2 = cradius + ht / 2.0
if self.label_style == 0:
crad2 += wd
# draw small filled dot as position marker
cx, cy = (np.sin(theta_rad) * cradius,
np.cos(theta_rad) * cradius)
cr.move_to(cx, cy)
r, g, b = self.knob_fill
cr.set_source_rgba(r, g, b)
cr.arc(cx, cy, 2, 0, 2 * np.pi)
cr.stroke_preserve()
cr.fill()
# draw label
cx, cy = np.sin(theta_rad) * crad2, np.cos(theta_rad) * crad2
cr.move_to(cx, cy)
text_rad = np.arctan2(cx, cy)
if self.label_style == 0:
text_rad = 0.0
elif self.label_style == 1:
text_rad += np.pi
elif self.label_style == 2:
text_rad += - np.pi / 2
cr.save()
cr.translate(cx, cy)
cr.rotate(-text_rad)
if self.label_style == 1:
cr.move_to(-wd / 2, 0)
cr.show_text(label)
#cr.rotate(text_rad)
cr.restore()
cr.new_path()
cr.move_to(0, 0)
theta = self.angle
if self.ang_invert:
theta = 360.0 - theta
theta = self.normalize_angle(theta - self.ang_offset)
cr.rotate(-np.radians(theta))
# draw knob (pointer)
r, g, b = self.knob_fg
cr.set_source_rgba(r, g, b)
crad2 = cradius
cr.new_path()
x1, y1, x2, y2 = -crad2, 0, crad2, 0
cx1, cy1, cx2, cy2 = self.calc_vertexes(x1, y1, x2, y2,
arrow_length=crad2)
cr.move_to(x2, y2)
cr.line_to(cx1, cy1)
#cr.line_to(0, 0)
cr.line_to(cx2, cy2)
cr.close_path()
r, g, b = self.knob_fg
cr.set_source_rgba(r, g, b)
cr.stroke_preserve()
r, g, b = self.knob_fill
cr.set_source_rgba(r, g, b)
cr.fill()
cr.move_to(0, 0)
cr.arc(0, 0, abs(cx1 + cx2) * 2.1, 0, 2 * np.pi)
cr.stroke_preserve()
cr.fill()
text = self.value_text
if self.draw_value_pos == 1:
r, g, b = self.bg
cr.set_source_rgba(r, g, b)
cr.move_to(0, 0)
cr.show_text(text)
cr.restore()
if self.draw_value_pos == 2:
a, b, wd, ht, i, j = cr.text_extents(text)
r, g, b = self.fg
cr.set_source_rgba(r, g, b)
x, y = self.center
cr.move_to(x - wd / 2, (y - cradius) * 0.5 + ht)
cr.show_text(text)
cr.move_to(0, 0)
self.update_widget()
def normalize_angle(self, ang_deg):
ang_deg = np.fmod(ang_deg + 360.0, 360.0)
return ang_deg
def finalize_angle(self, ang_deg):
self.angle = ang_deg
self.draw()
def get_angle(self):
return self.angle
def set_labels(self, val_ang_pairs):
self.values = val_ang_pairs
self.draw()
def set_tracking(self, tf):
self.tracking = tf
def configure_event(self, widget, event):
rect = widget.get_allocation()
x, y, width, height = rect.x, rect.y, rect.width, rect.height
self.dims = np.array((width, height))
self.center = np.array((width / 2, height / 2))
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self.draw()
return True
def update_widget(self):
if self.surface is None:
# window is not mapped/configured yet
return
win = self.get_window()
if win is not None and self.surface is not None:
wd, ht = self.dims
self.queue_draw_area(0, 0, wd, ht)
def draw_event(self, widget, cr):
# redraw the screen from backing surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
def calc_vertexes(self, start_cx, start_cy, end_cx, end_cy,
arrow_length=10, arrow_degrees=0.35):
angle = np.arctan2(end_cy - start_cy, end_cx - start_cx) + np.pi
cx1 = end_cx + arrow_length * np.cos(angle - arrow_degrees)
cy1 = end_cy + arrow_length * np.sin(angle - arrow_degrees)
cx2 = end_cx + arrow_length * np.cos(angle + arrow_degrees)
cy2 = end_cy + arrow_length * np.sin(angle + arrow_degrees)
return (cx1, cy1, cx2, cy2)
def angle_action(self, x, y, ang_deg):
"""Subclass overrides to provide custom behavior"""
self._set_value(ang_deg)
def turn_ccw(self):
"""Subclass overrides to provide custom behavior"""
self._set_value(self.angle + self.turn_delta)
def turn_cw(self):
"""Subclass overrides to provide custom behavior"""
self._set_value(self.angle - self.turn_delta)
def _set_value(self, ang_deg):
"""Subclass overrides to provide custom behavior"""
ang_deg = self.normalize_angle(ang_deg)
ang_deg = np.clip(ang_deg, self.min_ang_deg, self.max_ang_deg)
self.value = ang_deg
self.finalize_angle(ang_deg)
if not self._dragging or self.tracking:
self.emit("value-changed", self.value)
def set_value(self, ang_deg):
"""Subclass overrides to provide custom behavior"""
self._set_value(ang_deg)
def get_value(self):
"""Subclass overrides to provide custom behavior"""
return self.value
class ValueDial(Dial):
__gtype_name__ = "ValueDial"
def __init__(self):
Dial.__init__(self)
# for drawing value
self.label_style = 1
self.draw_value_pos = 2
# setup axis orientation to match value
self.ang_offset = 140.0
self.ang_invert = True
self.min_ang_deg = 0.0
self.max_ang_deg = 260.0
self.set_labels([("min", 0.0), ("max", 260.0)])
self.min_val = 0.0
self.max_val = 0.0
self.inc_val = 0.0
def angle_action(self, x, y, ang_deg):
value = self._angle_to_value(ang_deg)
self._set_value(value)
def turn_ccw(self):
ang_deg = np.clip(self.angle + self.turn_delta,
0.0, self.max_ang_deg)
value = self._angle_to_value(ang_deg)
self._set_value(value)
def turn_cw(self):
ang_deg = np.clip(self.angle - self.turn_delta,
0.0, self.max_ang_deg)
value = self._angle_to_value(ang_deg)
self._set_value(value)
def _set_value(self, value):
if value < self.min_val or value > self.max_val:
raise ValueError("value '{}' is out of range".format(value))
self.value = value
self.value_text = "%.2f" % self.value
ang_deg = self._value_to_angle(value)
self.finalize_angle(ang_deg)
if not self._dragging or self.tracking:
self.emit("value-changed", self.value)
def get_value(self):
return self.value
def _value_to_angle(self, value):
# make angle match value
rng = self.max_val - self.min_val
pct = (value - self.min_val) / rng
ang_deg = pct * self.max_ang_deg
ang_deg = np.clip(ang_deg, 0.0, self.max_ang_deg)
return ang_deg
def _angle_to_value(self, ang_deg):
# make value match angle
pct = ang_deg / self.max_ang_deg
rng = self.max_val - self.min_val
value = self.min_val + pct * rng
value = np.clip(value, self.min_val, self.max_val)
return value
def set_limits(self, min_val, max_val, inc_val):
self.min_val = min_val
self.max_val = max_val
self.inc_val = inc_val
pct = inc_val / (max_val - min_val)
self.turn_delta = pct * self.max_ang_deg
class IndexDial(Dial):
__gtype_name__ = "IndexDial"
def __init__(self):
Dial.__init__(self)
self.idx = 0
self.label_style = 1
def angle_action(self, x, y, ang_deg):
idx = self.best_index(ang_deg)
self.set_index(idx)
def turn_ccw(self):
idx = self.idx - 1
if idx < 0:
if self.wrap:
self.set_index(len(self.values) - 1)
else:
self.set_index(idx)
def turn_cw(self):
idx = self.idx + 1
if idx >= len(self.values):
if self.wrap:
self.set_index(0)
else:
self.set_index(idx)
def set_index(self, idx):
idx = int(idx)
if idx < 0 or idx >= len(self.values):
raise ValueError("index '{}' is outside range 0-{}".format(idx,
len(self.values)))
self.idx = idx
tup = self.values[idx]
self.value = tup[0] if len(tup) == 2 else tup[1]
self.value_text = str(self.value)
self.angle = tup[-1]
self.draw()
if not self._dragging or self.tracking:
self.emit("value-changed", idx)
def get_index(self):
return self.idx
def get_value(self):
return self.value
def best_index(self, ang_deg):
# find the index that is closest to the angle ang_deg
angles = np.array([tup[-1] for tup in self.values])
ang_deg = self.normalize_angle(ang_deg)
angles = np.abs(angles - ang_deg)
idx = np.argmin(angles)
return idx
class FileSelection(object):
def __init__(self, parent_w, action=Gtk.FileChooserAction.OPEN,
title="Select a file", all_at_once=False):
# TODO: deprecate the functionality when all_at_once == False
# and make the default to be True
self.parent = parent_w
self.all_at_once = all_at_once
# Create a new file selection widget
self.filew = Gtk.FileChooserDialog(title=title, action=action)
self.filew.connect("destroy", self.close)
if action == Gtk.FileChooserAction.SAVE:
self.filew.add_buttons(Gtk.STOCK_SAVE, 1, Gtk.STOCK_CANCEL, 0)
else:
self.filew.add_buttons(Gtk.STOCK_OPEN, 1, Gtk.STOCK_CANCEL, 0)
self.filew.set_default_response(1)
self.filew.set_select_multiple(True)
self.filew.connect("response", self.file_ok_sel)
# Connect the cancel_button to destroy the widget
#self.filew.cancel_button.connect("clicked", self.close)
def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file."""
self.cb = callfn
self.filew.set_title(title)
if initialdir:
self.filew.set_current_folder(initialdir)
if filename:
#self.filew.set_filename(filename)
self.filew.set_current_name(filename)
self.filew.show()
# default size can be enormous
self.filew.resize(800, 600)
# Get the selected filename
def file_ok_sel(self, w, rsp):
self.close(w)
if rsp == 0:
return
paths = self.filew.get_filenames()
if self.all_at_once:
self.cb(paths)
else:
for path in paths:
self.cb(path)
def close(self, widget):
self.filew.hide()
class DirectorySelection(FileSelection):
"""Handle directory selection dialog."""
def __init__(self, parent_w):
super(DirectorySelection, self).__init__(
parent_w, action=Gtk.FileChooserAction.SELECT_FOLDER,
title="Select a directory")
def popup(self, title, callfn, initialdir=None):
"""Let user select a directory."""
super(DirectorySelection, self).popup(title, callfn, initialdir)
class Timer(Callback.Callbacks):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, duration=0.0):
"""Create a timer set to expire after `duration` sec.
"""
super(Timer, self).__init__()
self.duration = duration
# For storing aritrary data with timers
self.data = Bunch.Bunch()
self._timer = None
self.start_time = 0.0
self.deadline = 0.0
for name in ('expired', 'canceled'):
self.enable_callback(name)
def start(self, duration=None):
"""Start the timer. If `duration` is not None, it should
specify the time to expiration in seconds.
"""
if duration is None:
duration = self.duration
self.set(duration)
def set(self, duration):
self.stop()
self.start_time = time.time()
self.deadline = self.start_time + duration
# Gtk timer set in milliseconds
time_ms = int(duration * 1000.0)
self._timer = GObject.timeout_add(time_ms, self._redirect_cb)
def _redirect_cb(self):
self._timer = None
self.make_callback('expired')
def is_set(self):
return self._timer is not None
def cond_set(self, time_sec):
if not self.is_set():
# TODO: probably a race condition here
self.set(time_sec)
def elapsed_time(self):
return time.time() - self.start_time
def time_left(self):
return max(0.0, self.deadline - time.time())
def get_deadline(self):
return self.deadline
def stop(self):
try:
if self._timer is not None:
GObject.source_remove(self._timer)
except Exception:
pass
self._timer = None
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
self.stop()
self.make_callback('canceled')
clear = cancel
def combo_box_new_text():
liststore = Gtk.ListStore(GObject.TYPE_STRING)
combobox = ComboBox()
combobox.set_model(liststore)
cell = Gtk.CellRendererText()
combobox.pack_start(cell, True)
combobox.add_attribute(cell, 'text', 0)
return combobox
def get_scroll_info(event):
"""
Returns the (degrees, direction) of a scroll motion Gtk event.
"""
valid, dx, dy = event.get_scroll_deltas()
if valid:
# we have a trackpad or some device that reports pixel deltas
delta = math.sqrt(dx ** 2 + dy ** 2)
if dy < 0:
delta = -delta
ang_rad = math.atan2(dy, dx)
direction = math.degrees(ang_rad) - 90.0
direction = math.fmod(direction + 360.0, 360.0)
# TODO: is this accurate?--NOT TESTED
num_degrees = delta / 8.0
else:
valid, direction = event.get_scroll_direction()
if valid:
if event.direction == Gdk.ScrollDirection.UP:
direction = 0.0
elif event.direction == Gdk.ScrollDirection.DOWN:
direction = 180.0
elif event.direction == Gdk.ScrollDirection.LEFT:
direction = 270.0
elif event.direction == Gdk.ScrollDirection.RIGHT:
direction = 90.0
else:
direction = None
# TODO: does Gtk encode the amount of scroll?
# 15 deg is standard 1-click turn for a wheel mouse
num_degrees = 15.0
return (num_degrees, direction)
def get_icon(iconpath, size=None):
if size is not None:
wd, ht = size
else:
wd, ht = 24, 24
pixbuf = pixbuf_new_from_file_at_size(iconpath, wd, ht)
return pixbuf
def get_font(font_family, point_size):
font_family = font_asst.resolve_alias(font_family, font_family)
font = Pango.FontDescription('%s %d' % (font_family, point_size))
return font
def load_font(font_name, font_file):
# TODO!
## raise ValueError("Loading fonts dynamically is an unimplemented"
## " feature for gtk3 back end")
return font_name
def pixbuf_new_from_xpm_data(xpm_data):
xpm_data = bytes('\n'.join(xpm_data))
return GdkPixbuf.Pixbuf.new_from_xpm_data(xpm_data)
def pixbuf_new_from_array(data, rgbtype, bpp):
# NOTE: there is a bug in gtk3 with pixbuf_new_from_array()
# See: http://stackoverflow.com/questions/24062779/how-to-correctly-covert-3d-array-into-continguous-rgb-bytes/24070152#24070152
#return GdkPixbuf.Pixbuf.new_from_array(data, rgbtype, bpp)
height, width, depth = data.shape
pixl = GdkPixbuf.PixbufLoader.new_with_type('pnm')
# P6 is the magic number of PNM format,
# and 255 is the max color allowed
pixl.write((b"P6 %d %d 255 " % (width, height)) + data.tobytes(order='C'))
pix = pixl.get_pixbuf()
pixl.close()
return pix
def pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp, dawd, daht, stride):
return GdkPixbuf.Pixbuf.new_from_data(rgb_buf, rgbtype, hasAlpha, bpp,
dawd, daht, stride, None, None)
def pixbuf_new_from_file_at_size(foldericon, width, height):
return GdkPixbuf.Pixbuf.new_from_file_at_size(foldericon, width, height)
def pixbuf_new_from_file(file_path):
return GdkPixbuf.Pixbuf.new_from_file(file_path)
def make_cursor(widget, iconpath, x, y):
image = Gtk.Image()
image.set_from_file(iconpath)
pixbuf = image.get_pixbuf()
screen = widget.get_screen()
display = screen.get_display()
return Gdk.Cursor(display, pixbuf, x, y)
def modify_bg(widget, color):
context = widget.get_style_context()
if color is not None:
context.add_class("custom_bg")
css_data = "*.custom_bg { background-image: none; background-color: %s; }" % (color)
css_provider = Gtk.CssProvider()
css_provider.load_from_data(css_data.encode())
context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
else:
context.remove_class("custom_bg")
def set_default_style():
style_provider = Gtk.CssProvider()
module_home = os.path.split(sys.modules[__name__].__file__)[0]
gtk_css = os.path.join(module_home, 'gtk_css')
with open(gtk_css, 'rb') as css_f:
css_data = css_f.read()
try:
style_provider.load_from_data(css_data)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
except Exception:
pass
# END
| [
"numpy.clip",
"numpy.radians",
"gi.repository.Gtk.Image",
"gi.repository.Gtk.Notebook.__init__",
"gi.repository.Gdk.Screen.get_default",
"gi.repository.Gtk.HBox",
"ginga.fonts.font_asst.resolve_alias",
"gi.repository.Gtk.CheckMenuItem.__init__",
"math.sqrt",
"numpy.array",
"numpy.arctan2",
"gi... | [((363, 395), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (381, 395), False, 'import gi\n'), ((51236, 51270), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['GObject.TYPE_STRING'], {}), '(GObject.TYPE_STRING)\n', (51249, 51270), False, 'from gi.repository import Gtk\n'), ((51342, 51364), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (51362, 51364), False, 'from gi.repository import Gtk\n'), ((52964, 53013), 'ginga.fonts.font_asst.resolve_alias', 'font_asst.resolve_alias', (['font_family', 'font_family'], {}), '(font_family, font_family)\n', (52987, 53013), False, 'from ginga.fonts import font_asst\n'), ((53025, 53083), 'gi.repository.Pango.FontDescription', 'Pango.FontDescription', (["('%s %d' % (font_family, point_size))"], {}), "('%s %d' % (font_family, point_size))\n", (53046, 53083), False, 'from gi.repository import Pango\n'), ((53393, 53437), 'gi.repository.GdkPixbuf.Pixbuf.new_from_xpm_data', 'GdkPixbuf.Pixbuf.new_from_xpm_data', (['xpm_data'], {}), '(xpm_data)\n', (53427, 53437), False, 'from gi.repository import GdkPixbuf\n'), ((53798, 53841), 'gi.repository.GdkPixbuf.PixbufLoader.new_with_type', 'GdkPixbuf.PixbufLoader.new_with_type', (['"""pnm"""'], {}), "('pnm')\n", (53834, 53841), False, 'from gi.repository import GdkPixbuf\n'), ((54156, 54255), 'gi.repository.GdkPixbuf.Pixbuf.new_from_data', 'GdkPixbuf.Pixbuf.new_from_data', (['rgb_buf', 'rgbtype', 'hasAlpha', 'bpp', 'dawd', 'daht', 'stride', 'None', 'None'], {}), '(rgb_buf, rgbtype, hasAlpha, bpp, dawd, daht,\n stride, None, None)\n', (54186, 54255), False, 'from gi.repository import GdkPixbuf\n'), ((54368, 54433), 'gi.repository.GdkPixbuf.Pixbuf.new_from_file_at_size', 'GdkPixbuf.Pixbuf.new_from_file_at_size', (['foldericon', 'width', 'height'], {}), '(foldericon, width, height)\n', (54406, 54433), False, 'from gi.repository import GdkPixbuf\n'), ((54484, 54525), 'gi.repository.GdkPixbuf.Pixbuf.new_from_file', 'GdkPixbuf.Pixbuf.new_from_file', (['file_path'], {}), '(file_path)\n', (54514, 54525), False, 'from gi.repository import GdkPixbuf\n'), ((54581, 54592), 'gi.repository.Gtk.Image', 'Gtk.Image', ([], {}), '()\n', (54590, 54592), False, 'from gi.repository import Gtk\n'), ((54738, 54771), 'gi.repository.Gdk.Cursor', 'Gdk.Cursor', (['display', 'pixbuf', 'x', 'y'], {}), '(display, pixbuf, x, y)\n', (54748, 54771), False, 'from gi.repository import Gdk\n'), ((55276, 55293), 'gi.repository.Gtk.CssProvider', 'Gtk.CssProvider', ([], {}), '()\n', (55291, 55293), False, 'from gi.repository import Gtk\n'), ((1578, 1603), 'gi.repository.Gtk.Window.__init__', 'Gtk.Window.__init__', (['self'], {}), '(self)\n', (1597, 1603), False, 'from gi.repository import Gtk\n'), ((1738, 1786), 'gi.repository.Gtk.CheckButton.__init__', 'Gtk.CheckButton.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (1762, 1786), False, 'from gi.repository import Gtk\n'), ((2101, 2150), 'gi.repository.Gtk.ToggleButton.__init__', 'Gtk.ToggleButton.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (2126, 2150), False, 'from gi.repository import Gtk\n'), ((2603, 2651), 'gi.repository.Gtk.RadioButton.__init__', 'Gtk.RadioButton.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (2627, 2651), False, 'from gi.repository import Gtk\n'), ((3106, 3156), 'gi.repository.Gtk.CheckMenuItem.__init__', 'Gtk.CheckMenuItem.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (3132, 3156), False, 'from gi.repository import Gtk\n'), ((3469, 3516), 'gi.repository.Gtk.SpinButton.__init__', 'Gtk.SpinButton.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (3492, 3516), False, 'from gi.repository import Gtk\n'), ((3815, 3858), 'gi.repository.Gtk.HScale.__init__', 'Gtk.HScale.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (3834, 3858), False, 'from gi.repository import Gtk\n'), ((4153, 4196), 'gi.repository.Gtk.VScale.__init__', 'Gtk.VScale.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (4172, 4196), False, 'from gi.repository import Gtk\n'), ((4495, 4540), 'gi.repository.Gtk.ComboBox.__init__', 'Gtk.ComboBox.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (4516, 4540), False, 'from gi.repository import Gtk\n'), ((5721, 5766), 'gi.repository.Gtk.Notebook.__init__', 'Gtk.Notebook.__init__', (['self', '*args'], {}), '(self, *args, **kwdargs)\n', (5742, 5766), False, 'from gi.repository import Gtk\n'), ((7882, 7892), 'gi.repository.Gtk.VBox', 'Gtk.VBox', ([], {}), '()\n', (7890, 7892), False, 'from gi.repository import Gtk\n'), ((7941, 7951), 'gi.repository.Gtk.HBox', 'Gtk.HBox', ([], {}), '()\n', (7949, 7951), False, 'from gi.repository import Gtk\n'), ((7968, 7983), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""x"""'], {}), "('x')\n", (7978, 7983), False, 'from gi.repository import Gtk\n'), ((8000, 8015), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""^"""'], {}), "('^')\n", (8010, 8015), False, 'from gi.repository import Gtk\n'), ((8032, 8047), 'gi.repository.Gtk.Button', 'Gtk.Button', (['"""v"""'], {}), "('v')\n", (8042, 8047), False, 'from gi.repository import Gtk\n'), ((8209, 8223), 'gi.repository.Gtk.EventBox', 'Gtk.EventBox', ([], {}), '()\n', (8221, 8223), False, 'from gi.repository import Gtk\n'), ((8855, 8869), 'gi.repository.Gtk.EventBox', 'Gtk.EventBox', ([], {}), '()\n', (8867, 8869), False, 'from gi.repository import Gtk\n'), ((9874, 9899), 'gi.repository.Gtk.Layout.__init__', 'Gtk.Layout.__init__', (['self'], {}), '(self)\n', (9893, 9899), False, 'from gi.repository import Gtk\n'), ((11747, 11809), 'gi.repository.GObject.timeout_add', 'GObject.timeout_add', (['(1000)', 'self.update_subwin_position', 'subwin'], {}), '(1000, self.update_subwin_position, subwin)\n', (11766, 11809), False, 'from gi.repository import GObject\n'), ((11818, 11876), 'gi.repository.GObject.timeout_add', 'GObject.timeout_add', (['(1500)', 'self.update_subwin_size', 'subwin'], {}), '(1500, self.update_subwin_size, subwin)\n', (11837, 11876), False, 'from gi.repository import GObject\n'), ((14814, 14914), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {'subwin': 'subwin', 'action': '"""move"""', 'x_origin': 'x', 'y_origin': 'y', 'x_root': 'x_root', 'y_root': 'y_root'}), "(subwin=subwin, action='move', x_origin=x, y_origin=y, x_root=\n x_root, y_root=y_root)\n", (14825, 14914), False, 'from ginga.misc import Bunch, Callback\n'), ((16577, 16726), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {'subwin': 'subwin', 'action': '"""resize"""', 'x_origin': 'x1', 'y_origin': 'y1', 'wd': 'wd', 'ht': 'ht', 'x_root': 'x_root', 'y_root': 'y_root', 'origin': 'origin', 'updates': 'updates'}), "(subwin=subwin, action='resize', x_origin=x1, y_origin=y1, wd=wd,\n ht=ht, x_root=x_root, y_root=y_root, origin=origin, updates=updates)\n", (16588, 16726), False, 'from ginga.misc import Bunch, Callback\n'), ((23055, 23080), 'gi.repository.Gtk.Layout.__init__', 'Gtk.Layout.__init__', (['self'], {}), '(self)\n', (23074, 23080), False, 'from gi.repository import Gtk\n'), ((25842, 25885), 'gi.repository.Gdk.Cursor.new_from_name', 'Gdk.Cursor.new_from_name', (['display', 'cur_name'], {}), '(display, cur_name)\n', (25866, 25885), False, 'from gi.repository import Gdk\n'), ((26157, 26201), 'gi.repository.Gdk.Cursor.new_from_name', 'Gdk.Cursor.new_from_name', (['display', '"""default"""'], {}), "(display, 'default')\n", (26181, 26201), False, 'from gi.repository import Gdk\n'), ((32361, 32391), 'gi.repository.Gtk.DrawingArea.__init__', 'Gtk.DrawingArea.__init__', (['self'], {}), '(self)\n', (32385, 32391), False, 'from gi.repository import Gtk\n'), ((32445, 32461), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (32453, 32461), True, 'import numpy as np\n'), ((32484, 32504), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (32492, 32504), True, 'import numpy as np\n'), ((35776, 35803), 'cairo.Context', 'cairo.Context', (['self.surface'], {}), '(self.surface)\n', (35789, 35803), False, 'import cairo\n'), ((40172, 40203), 'numpy.fmod', 'np.fmod', (['(ang_deg + 360.0)', '(360.0)'], {}), '(ang_deg + 360.0, 360.0)\n', (40179, 40203), True, 'import numpy as np\n'), ((40704, 40729), 'numpy.array', 'np.array', (['(width, height)'], {}), '((width, height))\n', (40712, 40729), True, 'import numpy as np\n'), ((40752, 40785), 'numpy.array', 'np.array', (['(width / 2, height / 2)'], {}), '((width / 2, height / 2))\n', (40760, 40785), True, 'import numpy as np\n'), ((40810, 40864), 'cairo.ImageSurface', 'cairo.ImageSurface', (['cairo.FORMAT_ARGB32', 'width', 'height'], {}), '(cairo.FORMAT_ARGB32, width, height)\n', (40828, 40864), False, 'import cairo\n'), ((42518, 42570), 'numpy.clip', 'np.clip', (['ang_deg', 'self.min_ang_deg', 'self.max_ang_deg'], {}), '(ang_deg, self.min_ang_deg, self.max_ang_deg)\n', (42525, 42570), True, 'import numpy as np\n'), ((43657, 43717), 'numpy.clip', 'np.clip', (['(self.angle + self.turn_delta)', '(0.0)', 'self.max_ang_deg'], {}), '(self.angle + self.turn_delta, 0.0, self.max_ang_deg)\n', (43664, 43717), True, 'import numpy as np\n'), ((43863, 43923), 'numpy.clip', 'np.clip', (['(self.angle - self.turn_delta)', '(0.0)', 'self.max_ang_deg'], {}), '(self.angle - self.turn_delta, 0.0, self.max_ang_deg)\n', (43870, 43923), True, 'import numpy as np\n'), ((44717, 44756), 'numpy.clip', 'np.clip', (['ang_deg', '(0.0)', 'self.max_ang_deg'], {}), '(ang_deg, 0.0, self.max_ang_deg)\n', (44724, 44756), True, 'import numpy as np\n'), ((44994, 45036), 'numpy.clip', 'np.clip', (['value', 'self.min_val', 'self.max_val'], {}), '(value, self.min_val, self.max_val)\n', (45001, 45036), True, 'import numpy as np\n'), ((46743, 46785), 'numpy.array', 'np.array', (['[tup[-1] for tup in self.values]'], {}), '([tup[-1] for tup in self.values])\n', (46751, 46785), True, 'import numpy as np\n'), ((46851, 46875), 'numpy.abs', 'np.abs', (['(angles - ang_deg)'], {}), '(angles - ang_deg)\n', (46857, 46875), True, 'import numpy as np\n'), ((46890, 46907), 'numpy.argmin', 'np.argmin', (['angles'], {}), '(angles)\n', (46899, 46907), True, 'import numpy as np\n'), ((47335, 47384), 'gi.repository.Gtk.FileChooserDialog', 'Gtk.FileChooserDialog', ([], {'title': 'title', 'action': 'action'}), '(title=title, action=action)\n', (47356, 47384), False, 'from gi.repository import Gtk\n'), ((49545, 49558), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {}), '()\n', (49556, 49558), False, 'from ginga.misc import Bunch, Callback\n'), ((50068, 50079), 'time.time', 'time.time', ([], {}), '()\n', (50077, 50079), False, 'import time\n'), ((50234, 50281), 'gi.repository.GObject.timeout_add', 'GObject.timeout_add', (['time_ms', 'self._redirect_cb'], {}), '(time_ms, self._redirect_cb)\n', (50253, 50281), False, 'from gi.repository import GObject\n'), ((51724, 51752), 'math.sqrt', 'math.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (51733, 51752), False, 'import math\n'), ((51818, 51836), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (51828, 51836), False, 'import math\n'), ((51906, 51941), 'math.fmod', 'math.fmod', (['(direction + 360.0)', '(360.0)'], {}), '(direction + 360.0, 360.0)\n', (51915, 51941), False, 'import math\n'), ((55026, 55043), 'gi.repository.Gtk.CssProvider', 'Gtk.CssProvider', ([], {}), '()\n', (55041, 55043), False, 'from gi.repository import Gtk\n'), ((24636, 24669), 'gi.repository.Gtk.Image.new_from_pixbuf', 'Gtk.Image.new_from_pixbuf', (['pixbuf'], {}), '(pixbuf)\n', (24661, 24669), False, 'from gi.repository import Gtk\n'), ((24690, 24704), 'gi.repository.Gtk.EventBox', 'Gtk.EventBox', ([], {}), '()\n', (24702, 24704), False, 'from gi.repository import Gtk\n'), ((29698, 29713), 'gi.repository.Gdk.Rectangle', 'Gdk.Rectangle', ([], {}), '()\n', (29711, 29713), False, 'from gi.repository import Gdk\n'), ((41575, 41623), 'numpy.arctan2', 'np.arctan2', (['(end_cy - start_cy)', '(end_cx - start_cx)'], {}), '(end_cy - start_cy, end_cx - start_cx)\n', (41585, 41623), True, 'import numpy as np\n'), ((50629, 50640), 'time.time', 'time.time', ([], {}), '()\n', (50638, 50640), False, 'import time\n'), ((51857, 51878), 'math.degrees', 'math.degrees', (['ang_rad'], {}), '(ang_rad)\n', (51869, 51878), False, 'import math\n'), ((55605, 55629), 'gi.repository.Gdk.Screen.get_default', 'Gdk.Screen.get_default', ([], {}), '()\n', (55627, 55629), False, 'from gi.repository import Gdk\n'), ((20883, 20905), 'math.sqrt', 'math.sqrt', (['num_widgets'], {}), '(num_widgets)\n', (20892, 20905), False, 'import math\n'), ((35410, 35460), 'numpy.arctan2', 'np.arctan2', (['(x - self.center[0])', '(y - self.center[1])'], {}), '(x - self.center[0], y - self.center[1])\n', (35420, 35460), True, 'import numpy as np\n'), ((37215, 37236), 'numpy.radians', 'np.radians', (['theta_pos'], {}), '(theta_pos)\n', (37225, 37236), True, 'import numpy as np\n'), ((37986, 38004), 'numpy.arctan2', 'np.arctan2', (['cx', 'cy'], {}), '(cx, cy)\n', (37996, 38004), True, 'import numpy as np\n'), ((38763, 38780), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (38773, 38780), True, 'import numpy as np\n'), ((41671, 41700), 'numpy.cos', 'np.cos', (['(angle - arrow_degrees)'], {}), '(angle - arrow_degrees)\n', (41677, 41700), True, 'import numpy as np\n'), ((41739, 41768), 'numpy.sin', 'np.sin', (['(angle - arrow_degrees)'], {}), '(angle - arrow_degrees)\n', (41745, 41768), True, 'import numpy as np\n'), ((41807, 41836), 'numpy.cos', 'np.cos', (['(angle + arrow_degrees)'], {}), '(angle + arrow_degrees)\n', (41813, 41836), True, 'import numpy as np\n'), ((41875, 41904), 'numpy.sin', 'np.sin', (['(angle + arrow_degrees)'], {}), '(angle + arrow_degrees)\n', (41881, 41904), True, 'import numpy as np\n'), ((50725, 50736), 'time.time', 'time.time', ([], {}), '()\n', (50734, 50736), False, 'import time\n'), ((50886, 50920), 'gi.repository.GObject.source_remove', 'GObject.source_remove', (['self._timer'], {}), '(self._timer)\n', (50907, 50920), False, 'from gi.repository import GObject\n'), ((37500, 37517), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (37506, 37517), True, 'import numpy as np\n'), ((37555, 37572), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (37561, 37572), True, 'import numpy as np\n'), ((37870, 37887), 'numpy.sin', 'np.sin', (['theta_rad'], {}), '(theta_rad)\n', (37876, 37887), True, 'import numpy as np\n'), ((37897, 37914), 'numpy.cos', 'np.cos', (['theta_rad'], {}), '(theta_rad)\n', (37903, 37914), True, 'import numpy as np\n')] |
from itertools import combinations
import numpy as np
import networkx as nx
print('imports complete')
regen=np.load('regenerate.npy')
seeds = np.asarray((
((1,)*4+(0,)*8)*3,
((0,)*4+(1,)*4+(0,)*4)*3,
((0,)*8+(1,)*4)*3#,
#(0,)*8+(1,)*4+(0,)*4+(1,)*4+(0,)*4+(1,)*4+(0,)*8,
#(0,)*4+(1,)*4+(0,)*4+(1,)*4+(0,)*16+(1,)*4,
#(1,)*4+(0,)*16+(1,)*4+(0,)*4+(1,)*4+(0,)*4
), 'uint8')
print('seeds generated')
combs = list(combinations(range(12),4))
l = len(combs)
comb_vecs=np.zeros((l, 12), 'uint8')
for i in range(l):
for pos in combs[i]:
comb_vecs[i, pos]=1
print('combinations generated')
first = np.repeat(comb_vecs, l**2, axis=0)
second = np.tile(
np.repeat(comb_vecs, l, axis=0),
(l,1))
third = np.tile(comb_vecs, (l**2, 1))
print('arrangements generated')
vecs = np.block([first, second, third])
print('arrangement complete')
del(first)
del(second)
del(third)
print('cleaned up')
for i in range(len(regen)):
s=np.block([[seeds],[regen[i]]])
comp = np.dot(vecs, s.T)
print('comparison generated')
mask = np.all(np.isin(comp, (4)), axis=1)
print('mask generated')
#np.save('output', vecs[mask])
print(len(vecs[mask]), i)
print('done') | [
"numpy.tile",
"numpy.block",
"numpy.repeat",
"numpy.asarray",
"numpy.isin",
"numpy.zeros",
"numpy.dot",
"numpy.load"
] | [((108, 133), 'numpy.load', 'np.load', (['"""regenerate.npy"""'], {}), "('regenerate.npy')\n", (115, 133), True, 'import numpy as np\n'), ((142, 259), 'numpy.asarray', 'np.asarray', (['(((1,) * 4 + (0,) * 8) * 3, ((0,) * 4 + (1,) * 4 + (0,) * 4) * 3, ((0,) * 8 +\n (1,) * 4) * 3)', '"""uint8"""'], {}), "((((1,) * 4 + (0,) * 8) * 3, ((0,) * 4 + (1,) * 4 + (0,) * 4) * 3,\n ((0,) * 8 + (1,) * 4) * 3), 'uint8')\n", (152, 259), True, 'import numpy as np\n'), ((462, 488), 'numpy.zeros', 'np.zeros', (['(l, 12)', '"""uint8"""'], {}), "((l, 12), 'uint8')\n", (470, 488), True, 'import numpy as np\n'), ((592, 628), 'numpy.repeat', 'np.repeat', (['comb_vecs', '(l ** 2)'], {'axis': '(0)'}), '(comb_vecs, l ** 2, axis=0)\n', (601, 628), True, 'import numpy as np\n'), ((693, 724), 'numpy.tile', 'np.tile', (['comb_vecs', '(l ** 2, 1)'], {}), '(comb_vecs, (l ** 2, 1))\n', (700, 724), True, 'import numpy as np\n'), ((762, 794), 'numpy.block', 'np.block', (['[first, second, third]'], {}), '([first, second, third])\n', (770, 794), True, 'import numpy as np\n'), ((645, 676), 'numpy.repeat', 'np.repeat', (['comb_vecs', 'l'], {'axis': '(0)'}), '(comb_vecs, l, axis=0)\n', (654, 676), True, 'import numpy as np\n'), ((911, 942), 'numpy.block', 'np.block', (['[[seeds], [regen[i]]]'], {}), '([[seeds], [regen[i]]])\n', (919, 942), True, 'import numpy as np\n'), ((950, 967), 'numpy.dot', 'np.dot', (['vecs', 's.T'], {}), '(vecs, s.T)\n', (956, 967), True, 'import numpy as np\n'), ((1014, 1030), 'numpy.isin', 'np.isin', (['comp', '(4)'], {}), '(comp, 4)\n', (1021, 1030), True, 'import numpy as np\n')] |
from collections import defaultdict
from datetime import timedelta
from time import time
import numpy as np
from scipy import sparse as sp
from menpo.visualize import bytes_str, print_progress
from .base import gradient_xy, camera_parameters_update
from .hessian import (initialize_hessian_and_JTe, insert_frame_to_H,
insert_frame_to_JTe)
from .jacobian import jacobians
def increment_parameters(images, mm, id_indices, exp_indices, template_camera,
p, qs, cs,
c_f=1, c_l=1, c_id=1, c_exp=1, c_sm=1,
lm_group=None, n_samples=1000,
compute_costs=True):
n_frames = len(images)
n_points = mm.shape_model.template_instance.n_points
n_p = len(id_indices)
n_q = len(exp_indices)
n_c = cs.shape[1] - 2 # sub one for quaternion, one for focal length
print('Precomputing....')
# Rescale shape components to have size:
# n_points x (n_components * n_dims)
# and to be scaled by the relevant standard deviation.
shape_pc = (
mm.shape_model.components.T *
np.sqrt(mm.shape_model.eigenvalues)
).reshape([n_points, -1])
# include std.dev in principal components
shape_pc_lms = shape_pc.reshape([n_points, 3, -1])[mm.model_landmarks_index]
print('Initializing Hessian/JTe for frame...')
H, JTe = initialize_hessian_and_JTe(c_id, c_exp, c_sm, n_p, n_q, n_c, p, qs,
n_frames)
print('H: {} ({})'.format(H.shape, bytes_str(H.nbytes)))
if compute_costs:
costs = defaultdict(list)
for (f, image), c, q in zip(enumerate(print_progress(
images, prefix='Incrementing H/JTe')), cs, qs):
# Form the overall shape parameter: [p, q]
s = np.zeros(mm.shape_model.n_active_components)
s[id_indices] = p
s[exp_indices] = q
# In our error we consider landmarks stored [x, y] - so flip here.
lms_points_xy = image.landmarks[lm_group].points[:, [1, 0]]
# Compute input image gradient
grad_x, grad_y = gradient_xy(image)
j = jacobians(s, c, image, lms_points_xy, mm, id_indices, exp_indices,
template_camera, grad_x, grad_y, shape_pc, shape_pc_lms,
n_samples, compute_costs=compute_costs)
insert_frame_to_H(H, j, f, n_p, n_q, n_c, c_f, c_l, n_frames)
insert_frame_to_JTe(JTe, j, f, n_p, n_q, n_c, c_f, c_l, n_frames)
if compute_costs:
for cost, val in j['costs'].items():
costs[cost].append(val)
print('Converting Hessian to sparse format')
H = sp.csr_matrix(H)
print("Sparsity (prop. 0's) of H: {:.2%}".format(
1 - (H.count_nonzero() / np.prod(np.array(H.shape)))))
print('Solving for parameter update')
d = sp.linalg.spsolve(H, JTe)
dp = d[:n_p]
dqs = d[n_p:(n_p + (n_frames * n_q))].reshape([n_frames, n_q])
dcs = d[-(n_frames * n_c):].reshape([n_frames, n_c])
# Add the focal length and degenerate quaternion parameters back on as
# null delta updates
dcs = np.hstack([np.tile(np.array([0, 1]), (n_frames, 1)), dcs])
new_p = p + dp
new_qs = qs + dqs
new_cs = np.array([camera_parameters_update(c, dc)
for c, dc in zip(cs, dcs)])
params = {
'p': new_p,
'qs': new_qs,
'cs': new_cs,
'dp': dp,
'dqs': dqs,
'dcs': dcs,
}
if compute_costs:
c = {k: np.array(v) for k, v in costs.items()}
err_s_id = (p ** 2).sum()
err_s_exp = (qs ** 2).sum()
err_sm = ((qs[:-2] - 2 * qs[1:-1] + qs[2:]) ** 2).sum()
err_f_tot = c['err_f'].sum() * c_f / (n_c * n_samples)
err_l_tot = c['err_l'].sum()
total_energy = (err_f_tot +
c_l * err_l_tot +
c_id * err_s_id +
c_exp * err_s_exp +
c_sm * err_sm)
c['total_energy'] = total_energy
c['err_s_id'] = (c_id, err_s_id)
c['err_s_exp'] = (c_exp, err_s_exp)
c['err_sm'] = (c_sm, err_sm)
c['err_f_tot'] = err_f_tot
c['err_l_tot'] = (c_l, err_l_tot)
print_cost_dict(c)
params['costs'] = c
return params
def fit_video(images, mm, id_indices, exp_indices, template_camera,
p, qs, cs, c_f=1, c_l=1, c_id=1, c_exp=1, c_sm=1, lm_group=None,
n_samples=1000, n_iters=10, compute_costs=True):
params = [
{
"p": p,
"qs": qs,
"cs": cs
}]
for i in range(1, n_iters + 1):
print('{} / {}'.format(i, n_iters))
# retrieve the last used parameters and pass them into the increment
l = params[-1]
t1 = time()
incs = increment_parameters(images, mm, id_indices, exp_indices,
template_camera, l['p'], l['qs'], l['cs'],
c_f=c_f, c_l=c_l, c_id=c_id, c_exp=c_exp,
c_sm=c_sm,
lm_group=lm_group, n_samples=n_samples,
compute_costs=compute_costs)
# update the parameter list
params.append(incs)
# And report the time taken for the iteration.
dt = int(time() - t1)
print('Iteration {} complete in {}\n'.format(i, timedelta(seconds=dt)))
return params
def fit_image(image, mm, id_indices, exp_indices, template_camera,
p, q, c, c_f=1, c_l=1, c_id=1, c_exp=1, lm_group=None,
n_samples=1000, n_iters=10, compute_costs=True):
# fit image is the same as fit_video, just for a single length video.
return fit_video(
[image], mm, id_indices, exp_indices, template_camera,
p, q[None, :], c[None, :], c_f=c_f, c_l=c_l,
c_id=c_id, c_exp=c_exp, c_sm=0, lm_group=lm_group,
n_samples=n_samples, n_iters=n_iters, compute_costs=compute_costs
)
def print_single_cost(k, c, tot):
if isinstance(c, tuple):
key = '{:03.0%} | {:>12}'.format((c[0] * c[1]) / tot, k)
val = '{:>12.2f} x {:>12.2f} = {:.2f}'.format(c[0], c[1], c[0] * c[1])
else:
key = '{:03.0%} | {:>12}'.format(c / tot, k)
val = '{:.2f}'.format(c)
print('{:>20}: {}'.format(key, val))
def print_cost_dict(d):
print('------------------------------------------------------------------')
print_single_cost('total_energy', d['total_energy'], d['total_energy'])
print('------------------------------------------------------------------')
for k in ['err_f_tot', 'err_l_tot', 'err_s_id',
'err_s_exp', 'err_sm']:
print_single_cost(k, d[k], d['total_energy'])
print('------------------------------------------------------------------')
for k in ['err_f', 'err_l']:
print('{} (median over frames): {:.2f}'.format(k, np.median(d[k])))
print('------------------------------------------------------------------')
| [
"scipy.sparse.linalg.spsolve",
"numpy.median",
"numpy.sqrt",
"menpo.visualize.bytes_str",
"datetime.timedelta",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"scipy.sparse.csr_matrix",
"time.time",
"menpo.visualize.print_progress"
] | [((2680, 2696), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['H'], {}), '(H)\n', (2693, 2696), True, 'from scipy import sparse as sp\n'), ((2864, 2889), 'scipy.sparse.linalg.spsolve', 'sp.linalg.spsolve', (['H', 'JTe'], {}), '(H, JTe)\n', (2881, 2889), True, 'from scipy import sparse as sp\n'), ((1614, 1631), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1625, 1631), False, 'from collections import defaultdict\n'), ((1815, 1859), 'numpy.zeros', 'np.zeros', (['mm.shape_model.n_active_components'], {}), '(mm.shape_model.n_active_components)\n', (1823, 1859), True, 'import numpy as np\n'), ((4831, 4837), 'time.time', 'time', ([], {}), '()\n', (4835, 4837), False, 'from time import time\n'), ((1553, 1572), 'menpo.visualize.bytes_str', 'bytes_str', (['H.nbytes'], {}), '(H.nbytes)\n', (1562, 1572), False, 'from menpo.visualize import bytes_str, print_progress\n'), ((1675, 1726), 'menpo.visualize.print_progress', 'print_progress', (['images'], {'prefix': '"""Incrementing H/JTe"""'}), "(images, prefix='Incrementing H/JTe')\n", (1689, 1726), False, 'from menpo.visualize import bytes_str, print_progress\n'), ((3531, 3542), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (3539, 3542), True, 'import numpy as np\n'), ((1138, 1173), 'numpy.sqrt', 'np.sqrt', (['mm.shape_model.eigenvalues'], {}), '(mm.shape_model.eigenvalues)\n', (1145, 1173), True, 'import numpy as np\n'), ((3160, 3176), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3168, 3176), True, 'import numpy as np\n'), ((5392, 5398), 'time.time', 'time', ([], {}), '()\n', (5396, 5398), False, 'from time import time\n'), ((5461, 5482), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'dt'}), '(seconds=dt)\n', (5470, 5482), False, 'from datetime import timedelta\n'), ((6978, 6993), 'numpy.median', 'np.median', (['d[k]'], {}), '(d[k])\n', (6987, 6993), True, 'import numpy as np\n'), ((2792, 2809), 'numpy.array', 'np.array', (['H.shape'], {}), '(H.shape)\n', (2800, 2809), True, 'import numpy as np\n')] |
# Load from emBrick serial Module the connect class
from emBRICK.modbus_rtu import connect
# Here you can change with:
# connect.port = "COM*" or "/dev/ttySC0" |your port
# connect.unit_id = [1,2] |your connected nodes
# connect.timeout = 0.1 | to change the Timeout for the Modbus Connection
# connect.baudrate = 460800 | to change the Baudrate
# connect.updateRate = 0.005 | for +5 ms that will be slow down the updateCycle between LWCS and RemoteMaster
# Load from emBrick serial Module the bB class
from emBRICK.modbus_rtu import bB
# for the 3 get & 3 put Functions:
# bB.getShort(node, module, bytePos)
# bB.getByte(node, module, bytePos)
# bB.getBit(node, module, bytePos, bitPos)
# bB.putShort(node, module, bytePos, value)
# bB.putByte(node, module, bytePos, value)
# bB.putBit(node, module, bytePos, bitPos, value)
# With os we can detect, which Operation system we use
import os
# nt is the Operation System "Windows"
if os.name == "nt":
# Windows: check please your COM port in "Geräte-Manger" and change the Port if you have a another Port
connect.port = "COM5"
# posix is the Operation System "LINUX"
elif os.name == "posix":
# RaspberryPi: Here is the Port always the same if you use our PiBrick Driver
connect.port = "/dev/ttySC0"
# The baudrate is preconfigured to 460800 if you want u can change it
# connect.baudrate = 460800
# Node 1
# Configure here with which Modbus Address/es the Programm should connect
connect.unit_id.append(1)
# Node 2
# connect.unit_id.append(2)
##### Example Code #####
# Import the Python Module threading
import threading
import time
from tabulate import tabulate
import numpy as np
# Variable to change the starttime of the threads
startTime_n1 = 0.1
startTime_n2 = 0.2
class create_variables:
def __init__(self):
# Create the variable energy_n1_l1 to save the reading Energy value from Node 1 L1 in it
self.energy_n1_l1 = 0
# Create the variable energy_n1_l2 to save the reading Energy value from Node 1 L2 in it
self.energy_n1_l2 = 0
# Create the variable energy_n1_l3 to save the reading Energy value from Node 1 L3 in it
self.energy_n1_l3 = 0
# Create the variable energy_n2_l1 to save the reading Energy value from Node 2 L1 in it
self.energy_n2_l1 = 0
# Create the variable energy_n2_l2 to save the reading Energy value from Node 2 L2 in it
self.energy_n2_l2 = 0
# Create the variable energy_n2_l3 to save the reading Energy value from Node 2 L3 in it
self.energy_n2_l3 = 0
# Create the variable counter_check to control the result counter on Node1
self.counter_check = 0
# Create the variable counter_check to control the result counter on Node2
self.counter_check2 = 0
# Create the variable table for printing the Information out
self.table_n1 = []
self.table_n2 = []
# Variable to check if new sensor data is available
self.counter_check_last_loop = 0
self.counter_check2_last_loop = 0
var = create_variables()
# Creating a Function for node 1 to start it later as a thread
def node1():
while True:
##### Node 1 #####
# Get the Result Counter from Brick 4-602 to check how much measurements the brick has make
# The Result Counter goes from 0 to 255. After 255 the counter begins from 0 again.
node1_res_counter = bB.getByte(1, 1, 2)
# Get the Information back in which Command (page) we are currently.
node1_page = bB.getByte(1, 1, 0)
# Every time when the result counter goes up 1 we get a new mean result of the power.
# The Average Depth is preconfigured on 10.
# If you want a another average depth. You muss change it on Page 4 on Byte 11. You can change it between 1 and 50.
# Check if a new mean result is coming
if node1_res_counter != var.counter_check:
# Read out Voltage U_A eff - U_C eff
node1_UA_eff = bB.getShort(1, 1, 22)
node1_UB_eff = bB.getShort(1, 1, 24)
node1_UC_eff = bB.getShort(1, 1, 26)
# Calculate Digits to V
node1_UA_in_V = (node1_UA_eff * 13.603 / 1000)
node1_UB_in_V = (node1_UB_eff * 13.603 / 1000)
node1_UC_in_V = (node1_UC_eff * 13.603 / 1000)
# Read out the Phase I_A - I_C
node1_phase_IA = bB.getShort(1, 1, 10)
node1_phase_IB = bB.getShort(1, 1, 12)
node1_phase_IC = bB.getShort(1, 1, 14)
# Calculate Digits in Grad
node1_phase_IA_ = 0.017578125 * node1_phase_IA
node1_phase_IB_ = 0.017578125 * node1_phase_IB
node1_phase_IC_ = 0.017578125 * node1_phase_IC
# Read out the current Power on L1-L3
node1_power_L1 = bB.getShort(1, 1, 4)
node1_power_L1 = np.int16(node1_power_L1)
node1_power_L2 = bB.getShort(1, 1, 6)
node1_power_L2 = np.int16(node1_power_L2)
node1_power_L3 = bB.getShort(1, 1, 8)
node1_power_L3 = np.int16(node1_power_L3)
# Calculate Digits to W
node1_power_L1_W = (node1_power_L1 * 11.1449)
node1_power_L2_W = (node1_power_L2 * 11.1449)
node1_power_L3_W = (node1_power_L3 * 11.1449)
# Read out the current I_A eff - I_C eff
node1_IA_eff = bB.getShort(1, 1, 16)
node1_IB_eff = bB.getShort(1, 1, 18)
node1_IC_eff = bB.getShort(1, 1, 20)
# Calculate Digits to mA
node1_IA_A = (node1_IA_eff * 6.24503 / 1000)
node1_IB_A = (node1_IB_eff * 6.24503 / 1000)
node1_IC_A = (node1_IC_eff * 6.24503 / 1000)
# update the var.counter_check
var.counter_check = node1_res_counter
if var.counter_check_last_loop != var.counter_check:
var.counter_check_last_loop = var.counter_check
# Add the Power Value to Variable energy_n1_l1-l3
var.energy_n1_l1 += node1_power_L1_W * 0.1
var.energy_n1_l2 += node1_power_L2_W * 0.1
var.energy_n1_l3 += node1_power_L3_W * 0.1
if var.counter_check % 10:
var.table_n1.clear()
# Print out the current state of Voltage, Power and the energy in Watt pro seconds
headers = ["Spannung in V", "Leistung in W", "Energie Ws", "Strom in A", "Phase in ° ", "Counter"]
var.table_n1.append(["Node1 L1", "{:.2f}".format(node1_UA_in_V), "{:.2f}".format(node1_power_L1_W),
"{:.2f}".format(var.energy_n1_l1),
"{:.2f}".format(node1_IA_A), "{:.2f}".format(node1_phase_IA_), node1_res_counter])
var.table_n1.append(["Node1 L2", "{:.2f}".format(node1_UB_in_V), "{:.2f}".format(node1_power_L2_W),
"{:.2f}".format(var.energy_n1_l2),
"{:.2f}".format(node1_IB_A), "{:.2f}".format(node1_phase_IB_), node1_res_counter])
var.table_n1.append(["Node1 L3", "{:.2f}".format(node1_UC_in_V), "{:.2f}".format(node1_power_L3_W),
"{:.2f}".format(var.energy_n1_l3),
"{:.2f}".format(node1_IC_A), "{:.2f}".format(node1_phase_IC_), node1_res_counter])
print(tabulate(var.table_n1, headers, tablefmt="grid"))
# Creating a Function for node 2 to start it later as a thread
def node2():
while True:
#### Node 2 #######
# Get the Result Counter from Brick 4-602 to check how much measurement the brick make
# The Result Counter goes from 0 to 255. After 255 the counter begins from 0 again.
node2_res_counter = bB.getByte(2, 1, 2)
# Get the Information back in which Command (page) we are currently.
node2_page = bB.getByte(2, 1, 0)
# Every time when the result counter goes up 1 we get a new mean result of the power.
# The Average Depth is preconfigured on 10.
# If you want a another average depth. You muss change it on Page 4 on Byte 11. You can change it between 1 and 50.
# Check if a new mean result is coming
if node2_res_counter != var.counter_check2:
# Read out Voltage U_A eff - U_C eff
node2_UA_eff = bB.getShort(2, 1, 22)
node2_UB_eff = bB.getShort(2, 1, 24)
node2_UC_eff = bB.getShort(2, 1, 26)
# Calculate Digits in V
node2_UA_in_V = (node2_UA_eff * 13.603 / 1000)
node2_UB_in_V = (node2_UB_eff * 13.603 / 1000)
node2_UC_in_V = (node2_UC_eff * 13.603 / 1000)
# Read out the Phase I_A - I_C
node2_phase_IA = bB.getShort(2, 1, 10)
node2_phase_IB = bB.getShort(2, 1, 12)
node2_phase_IC = bB.getShort(2, 1, 14)
# Calculate Digits in °
node2_phase_IA_ = 0.017578125 * node2_phase_IA
node2_phase_IB_ = 0.017578125 * node2_phase_IB
node2_phase_IC_ = 0.017578125 * node2_phase_IC
# Read out the current Power on L1-L3
node2_power_L1 = bB.getShort(2, 1, 4)
node2_power_L1 = np.int16(node2_power_L1)
node2_power_L2 = bB.getShort(2, 1, 6)
node2_power_L2 = np.int16(node2_power_L2)
node2_power_L3 = bB.getShort(2, 1, 8)
node2_power_L3 = np.int16(node2_power_L3)
# Calculate Digits to W
node2_power_L1_W = (node2_power_L1 * 11.1449)
node2_power_L2_W = (node2_power_L2 * 11.1449)
node2_power_L3_W = (node2_power_L3 * 11.1449)
# Read out the current I_A eff - I_C eff
node2_IA_eff = bB.getShort(2, 1, 16)
node2_IB_eff = bB.getShort(2, 1, 18)
node2_IC_eff = bB.getShort(2, 1, 20)
# Calculate Digits in mA
node2_IA_A = (node2_IA_eff * 6.24503 / 1000)
node2_IB_A = (node2_IB_eff * 6.24503 / 1000)
node2_IC_A = (node2_IC_eff * 6.24503 / 1000)
# update the var.counter_check2
var.counter_check2 = node2_res_counter
if var.counter_check2_last_loop != var.counter_check2:
var.counter_check2_last_loop = var.counter_check2
# Add the Power Value to Variable energy_n2_l1-l3
var.energy_n2_l1 += node2_power_L1_W * 0.1
var.energy_n2_l2 += node2_power_L2_W * 0.1
var.energy_n2_l3 += node2_power_L3_W * 0.1
if var.counter_check2 == 255:
var.table_n2.clear()
# Print out the current state of Voltage, Power and the energy in Watt pro seconds
headers = ["Spannung in V", "Leistung in W", "Energie Ws", "Strom in A", "Phase in ° ", "Counter"]
var.table_n2.append(["Node2 L1", "{:.2f}".format(node2_UA_in_V), "{:.2f}".format(node2_power_L1_W),
"{:.2f}".format(var.energy_n2_l1),
"{:.2f}".format(node2_IA_A), "{:.2f}".format(node2_phase_IA_), node2_res_counter])
var.table_n2.append(["Node2 L2", "{:.2f}".format(node2_UB_in_V), "{:.2f}".format(node2_power_L2_W),
"{:.2f}".format(var.energy_n2_l2),
"{:.2f}".format(node2_IB_A), "{:.2f}".format(node2_phase_IB_), node2_res_counter])
var.table_n2.append(["Node2 L3", "{:.2f}".format(node2_UC_in_V), "{:.2f}".format(node2_power_L3_W),
"{:.2f}".format(var.energy_n2_l3),
"{:.2f}".format(node2_IC_A), "{:.2f}".format(node2_phase_IC_), node2_res_counter])
print(tabulate(var.table_n2, headers, tablefmt="grid"))
if __name__ == "__main__":
# The connection with the LWCS will be configured and the BrickBus UpdateCycle will be started
connect.start()
# Config the threads node1 & node2
n1 = threading.Timer(startTime_n1, node1)
n2 = threading.Timer(startTime_n2, node2)
# Start the threads
n1.start()
n2.start()
| [
"tabulate.tabulate",
"emBRICK.modbus_rtu.connect.unit_id.append",
"emBRICK.modbus_rtu.bB.getShort",
"threading.Timer",
"emBRICK.modbus_rtu.bB.getByte",
"numpy.int16",
"emBRICK.modbus_rtu.connect.start"
] | [((1446, 1471), 'emBRICK.modbus_rtu.connect.unit_id.append', 'connect.unit_id.append', (['(1)'], {}), '(1)\n', (1468, 1471), False, 'from emBRICK.modbus_rtu import connect\n'), ((12019, 12034), 'emBRICK.modbus_rtu.connect.start', 'connect.start', ([], {}), '()\n', (12032, 12034), False, 'from emBRICK.modbus_rtu import connect\n'), ((12083, 12119), 'threading.Timer', 'threading.Timer', (['startTime_n1', 'node1'], {}), '(startTime_n1, node1)\n', (12098, 12119), False, 'import threading\n'), ((12129, 12165), 'threading.Timer', 'threading.Timer', (['startTime_n2', 'node2'], {}), '(startTime_n2, node2)\n', (12144, 12165), False, 'import threading\n'), ((3414, 3433), 'emBRICK.modbus_rtu.bB.getByte', 'bB.getByte', (['(1)', '(1)', '(2)'], {}), '(1, 1, 2)\n', (3424, 3433), False, 'from emBRICK.modbus_rtu import bB\n'), ((3532, 3551), 'emBRICK.modbus_rtu.bB.getByte', 'bB.getByte', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (3542, 3551), False, 'from emBRICK.modbus_rtu import bB\n'), ((7815, 7834), 'emBRICK.modbus_rtu.bB.getByte', 'bB.getByte', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (7825, 7834), False, 'from emBRICK.modbus_rtu import bB\n'), ((7933, 7952), 'emBRICK.modbus_rtu.bB.getByte', 'bB.getByte', (['(2)', '(1)', '(0)'], {}), '(2, 1, 0)\n', (7943, 7952), False, 'from emBRICK.modbus_rtu import bB\n'), ((3998, 4019), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(22)'], {}), '(1, 1, 22)\n', (4009, 4019), False, 'from emBRICK.modbus_rtu import bB\n'), ((4047, 4068), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(24)'], {}), '(1, 1, 24)\n', (4058, 4068), False, 'from emBRICK.modbus_rtu import bB\n'), ((4096, 4117), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(26)'], {}), '(1, 1, 26)\n', (4107, 4117), False, 'from emBRICK.modbus_rtu import bB\n'), ((4404, 4425), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(10)'], {}), '(1, 1, 10)\n', (4415, 4425), False, 'from emBRICK.modbus_rtu import bB\n'), ((4455, 4476), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(12)'], {}), '(1, 1, 12)\n', (4466, 4476), False, 'from emBRICK.modbus_rtu import bB\n'), ((4506, 4527), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(14)'], {}), '(1, 1, 14)\n', (4517, 4527), False, 'from emBRICK.modbus_rtu import bB\n'), ((4823, 4843), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(4)'], {}), '(1, 1, 4)\n', (4834, 4843), False, 'from emBRICK.modbus_rtu import bB\n'), ((4873, 4897), 'numpy.int16', 'np.int16', (['node1_power_L1'], {}), '(node1_power_L1)\n', (4881, 4897), True, 'import numpy as np\n'), ((4927, 4947), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(6)'], {}), '(1, 1, 6)\n', (4938, 4947), False, 'from emBRICK.modbus_rtu import bB\n'), ((4977, 5001), 'numpy.int16', 'np.int16', (['node1_power_L2'], {}), '(node1_power_L2)\n', (4985, 5001), True, 'import numpy as np\n'), ((5031, 5051), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(8)'], {}), '(1, 1, 8)\n', (5042, 5051), False, 'from emBRICK.modbus_rtu import bB\n'), ((5081, 5105), 'numpy.int16', 'np.int16', (['node1_power_L3'], {}), '(node1_power_L3)\n', (5089, 5105), True, 'import numpy as np\n'), ((5396, 5417), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(16)'], {}), '(1, 1, 16)\n', (5407, 5417), False, 'from emBRICK.modbus_rtu import bB\n'), ((5445, 5466), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(18)'], {}), '(1, 1, 18)\n', (5456, 5466), False, 'from emBRICK.modbus_rtu import bB\n'), ((5494, 5515), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(1)', '(1)', '(20)'], {}), '(1, 1, 20)\n', (5505, 5515), False, 'from emBRICK.modbus_rtu import bB\n'), ((8400, 8421), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(22)'], {}), '(2, 1, 22)\n', (8411, 8421), False, 'from emBRICK.modbus_rtu import bB\n'), ((8449, 8470), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(24)'], {}), '(2, 1, 24)\n', (8460, 8470), False, 'from emBRICK.modbus_rtu import bB\n'), ((8498, 8519), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(26)'], {}), '(2, 1, 26)\n', (8509, 8519), False, 'from emBRICK.modbus_rtu import bB\n'), ((8806, 8827), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(10)'], {}), '(2, 1, 10)\n', (8817, 8827), False, 'from emBRICK.modbus_rtu import bB\n'), ((8857, 8878), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(12)'], {}), '(2, 1, 12)\n', (8868, 8878), False, 'from emBRICK.modbus_rtu import bB\n'), ((8908, 8929), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(14)'], {}), '(2, 1, 14)\n', (8919, 8929), False, 'from emBRICK.modbus_rtu import bB\n'), ((9222, 9242), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(4)'], {}), '(2, 1, 4)\n', (9233, 9242), False, 'from emBRICK.modbus_rtu import bB\n'), ((9272, 9296), 'numpy.int16', 'np.int16', (['node2_power_L1'], {}), '(node2_power_L1)\n', (9280, 9296), True, 'import numpy as np\n'), ((9326, 9346), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(6)'], {}), '(2, 1, 6)\n', (9337, 9346), False, 'from emBRICK.modbus_rtu import bB\n'), ((9376, 9400), 'numpy.int16', 'np.int16', (['node2_power_L2'], {}), '(node2_power_L2)\n', (9384, 9400), True, 'import numpy as np\n'), ((9430, 9450), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(8)'], {}), '(2, 1, 8)\n', (9441, 9450), False, 'from emBRICK.modbus_rtu import bB\n'), ((9480, 9504), 'numpy.int16', 'np.int16', (['node2_power_L3'], {}), '(node2_power_L3)\n', (9488, 9504), True, 'import numpy as np\n'), ((9795, 9816), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(16)'], {}), '(2, 1, 16)\n', (9806, 9816), False, 'from emBRICK.modbus_rtu import bB\n'), ((9844, 9865), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(18)'], {}), '(2, 1, 18)\n', (9855, 9865), False, 'from emBRICK.modbus_rtu import bB\n'), ((9893, 9914), 'emBRICK.modbus_rtu.bB.getShort', 'bB.getShort', (['(2)', '(1)', '(20)'], {}), '(2, 1, 20)\n', (9904, 9914), False, 'from emBRICK.modbus_rtu import bB\n'), ((7428, 7476), 'tabulate.tabulate', 'tabulate', (['var.table_n1', 'headers'], {'tablefmt': '"""grid"""'}), "(var.table_n1, headers, tablefmt='grid')\n", (7436, 7476), False, 'from tabulate import tabulate\n'), ((11837, 11885), 'tabulate.tabulate', 'tabulate', (['var.table_n2', 'headers'], {'tablefmt': '"""grid"""'}), "(var.table_n2, headers, tablefmt='grid')\n", (11845, 11885), False, 'from tabulate import tabulate\n')] |
""" ============================================================================
Copyright 2021 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The Curb Dataset's license is different, and you can find it in ReadMe.md.
============================================================================"""
import argparse
import os
import numpy as np
import cv2
color_map = { # bgr
0: [0, 0, 0],
1: [255, 255, 255],
2: [245, 150, 150],
3: [245, 230, 100],
4: [250, 80, 150],
5: [150, 60, 30],
6: [255, 150, 0],
7: [180, 30, 80],
8: [255, 0, 150],
9: [30, 150, 255],
10: [200, 150, 255],
11: [90, 150, 150],
12: [255, 0, 255],
13: [255, 150, 255],
14: [75, 0, 75],
15: [75, 0, 175],
16: [0, 200, 255],
17: [50, 120, 255],
18: [0, 150, 255],
19: [170, 255, 150],
20: [0, 175, 0],
21: [0, 60, 135],
22: [80, 240, 150],
23: [150, 240, 255],
24: [0, 0, 255],
25: [255, 255, 50],
26: [245, 150, 100],
27: [255, 0, 0],
28: [200, 40, 255],
29: [30, 30, 255],
30: [90, 30, 150],
31: [250, 80, 100],
32: [30, 30, 30],
33: [90, 120, 180],
}
def get_args():
"""
@ params: None
@ return: all configure params
@ description : parser all params
"""
parser = argparse.ArgumentParser("./vis_data.py", description='vis curb annotations with LiDAR point cloud')
parser.add_argument('-c', '--curb_path', type=str, required=True, help='Data-set path of curb annotations')
parser.add_argument('-k', '--kernel_size', type=int, required=False, default=7, help='kernel size in curb dilating')
parser.add_argument('-d', '--semantic_kitti_path', type=str, required=False, help='vis point cloud')
parser.add_argument('-p', '--pass_frames', type=int, required=False, default=0, help='start frame in sequence')
return parser.parse_args()
def filter_point(pc_in):
"""
@ params: input pointcloud (N*3)
@ return: filtered pointcloud (N*3)
@ description : filter point by xyz
"""
idx = (pc_in[:, 0] >= roi_range[0]) & (pc_in[:, 0] <= roi_range[3]) & \
(pc_in[:, 1] >= roi_range[1]) & (pc_in[:, 1] <= roi_range[4]) & \
(pc_in[:, 2] >= roi_range[2]) & (pc_in[:, 2] <= roi_range[5])
pc_out = pc_in[idx, :]
return pc_out
def vis_image(grid_curb_in, image_size_in, kernel_in=7):
"""
@ params: grid_curb_in: the input grid array
@ params: image_size_in: the input image size
@ params: kernel_in: the dilating kernel size, default=7
@ return: the binary image & the instance image
@ description : vis_image
"""
grid_curb_in[:, 0] = np.clip(grid_curb_in[:, 0], 0, image_size_in[0] - 1)
grid_curb_in[:, 1] = np.clip(grid_curb_in[:, 1], 0, image_size_in[1] - 1)
curb_ids = np.unique(grid_curb_in[:, 2])
ker = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_in, kernel_in))
img_binary_out = np.zeros((image_size_in[0], image_size_in[1], 1), dtype=np.uint8)
img_instance_out = np.zeros((image_size_in[0], image_size_in[1], 3), dtype=np.uint8)
for curb_id in curb_ids:
idx = grid_curb_in[:, 2] == curb_id
curb_instance = grid_curb_in[idx, 0:2]
img_dilate = np.zeros((image_size_in[0], image_size_in[1], 1), dtype=np.uint8)
img_dilate[curb_instance[:, 0], curb_instance[:, 1]] = 255
img_dilate_dst = cv2.dilate(img_dilate, ker)
img_dilate_index = np.array(np.where(img_dilate_dst), dtype=int)
img_binary_out[img_dilate_index[0], img_dilate_index[1]] = 255
color = color_map_array[(curb_id + 1) % color_map_array.shape[0]]
img_instance_out[img_dilate_index[0], img_dilate_index[1]] = color
return img_binary_out, img_instance_out
if __name__ == '__main__':
args = get_args()
roi_range = [0.0, -19.2, -3, 51.2, 19.2, 0]
grid_res = 0.1
image_size = [int((roi_range[3]-roi_range[0]) / grid_res), int((roi_range[4]-roi_range[1]) / grid_res)]
color_map_array = np.array(color_map.values())
print('---------------------------------------------------------------------------------------------------------')
print('curb_dataset : %s' % args.curb_path)
print('SemanticKITTI_dataset : %s' % args.semantic_kitti_path)
print('kernel_size : %d' % args.kernel_size)
print('ROI range : %s' % roi_range)
print('grid resolution : %.2f' % grid_res)
print('image size : %s' % image_size)
print('Tab space to go to the next frame !')
print('---------------------------------------------------------------------------------------------------------')
curb_data_path = args.curb_path
pc_path = args.semantic_kitti_path
kernel = args.kernel_size
curb_files = [f for f in sorted(os.listdir(os.path.join(curb_data_path, "curb"))) if f.endswith(".txt")]
for i, f in enumerate(curb_files):
if i < args.pass_frames:
continue
curb_file = os.path.join(curb_data_path, "curb", f)
curb = np.loadtxt(curb_file)
grid_curb = curb.copy()
grid_curb[:, 0] = np.floor((curb[:, 0] - roi_range[0]) / grid_res)
grid_curb[:, 1] = np.floor((grid_curb[:, 1] - roi_range[1]) / grid_res)
grid_curb = grid_curb.astype(int)
binary_img, instance_img = vis_image(grid_curb_in=grid_curb, image_size_in=image_size, kernel_in=kernel)
# vis with raw point cloud if you like
if pc_path is not None:
pc_file = os.path.join(pc_path, "velodyne", os.path.splitext(f)[0] + ".bin")
scan = np.fromfile(pc_file, dtype=np.float32)
scan = scan.reshape((-1, 4))
scan_in_roi = filter_point(scan)
scan_grid = np.zeros((scan_in_roi.shape[0], 2))
scan_grid[:, 0] = np.floor((scan_in_roi[:, 0] - roi_range[0]) / grid_res) # ROW
scan_grid[:, 1] = np.floor((scan_in_roi[:, 1] - roi_range[1]) / grid_res) # COL
scan_grid[:, 0] = np.clip(scan_grid[:, 0], 0, image_size[0] - 1)
scan_grid[:, 1] = np.clip(scan_grid[:, 1], 0, image_size[1] - 1)
scan_grid = scan_grid.astype(int)
scan_img = instance_img.copy()
scan_img[scan_grid[:, 0], scan_grid[:, 1]] = [120, 120, 120]
cv2.imshow('curb&scan', scan_img)
cv2.imshow('binary_curb', binary_img)
cv2.imshow('instance_curb', instance_img)
cv2.waitKey(0)
| [
"numpy.clip",
"numpy.fromfile",
"numpy.unique",
"argparse.ArgumentParser",
"numpy.where",
"os.path.join",
"numpy.floor",
"os.path.splitext",
"cv2.imshow",
"numpy.zeros",
"numpy.loadtxt",
"cv2.waitKey",
"cv2.dilate",
"cv2.getStructuringElement"
] | [((1816, 1920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""./vis_data.py"""'], {'description': '"""vis curb annotations with LiDAR point cloud"""'}), "('./vis_data.py', description=\n 'vis curb annotations with LiDAR point cloud')\n", (1839, 1920), False, 'import argparse\n'), ((3172, 3224), 'numpy.clip', 'np.clip', (['grid_curb_in[:, 0]', '(0)', '(image_size_in[0] - 1)'], {}), '(grid_curb_in[:, 0], 0, image_size_in[0] - 1)\n', (3179, 3224), True, 'import numpy as np\n'), ((3250, 3302), 'numpy.clip', 'np.clip', (['grid_curb_in[:, 1]', '(0)', '(image_size_in[1] - 1)'], {}), '(grid_curb_in[:, 1], 0, image_size_in[1] - 1)\n', (3257, 3302), True, 'import numpy as np\n'), ((3318, 3347), 'numpy.unique', 'np.unique', (['grid_curb_in[:, 2]'], {}), '(grid_curb_in[:, 2])\n', (3327, 3347), True, 'import numpy as np\n'), ((3358, 3426), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(kernel_in, kernel_in)'], {}), '(cv2.MORPH_ELLIPSE, (kernel_in, kernel_in))\n', (3383, 3426), False, 'import cv2\n'), ((3449, 3514), 'numpy.zeros', 'np.zeros', (['(image_size_in[0], image_size_in[1], 1)'], {'dtype': 'np.uint8'}), '((image_size_in[0], image_size_in[1], 1), dtype=np.uint8)\n', (3457, 3514), True, 'import numpy as np\n'), ((3538, 3603), 'numpy.zeros', 'np.zeros', (['(image_size_in[0], image_size_in[1], 3)'], {'dtype': 'np.uint8'}), '((image_size_in[0], image_size_in[1], 3), dtype=np.uint8)\n', (3546, 3603), True, 'import numpy as np\n'), ((3746, 3811), 'numpy.zeros', 'np.zeros', (['(image_size_in[0], image_size_in[1], 1)'], {'dtype': 'np.uint8'}), '((image_size_in[0], image_size_in[1], 1), dtype=np.uint8)\n', (3754, 3811), True, 'import numpy as np\n'), ((3904, 3931), 'cv2.dilate', 'cv2.dilate', (['img_dilate', 'ker'], {}), '(img_dilate, ker)\n', (3914, 3931), False, 'import cv2\n'), ((5455, 5494), 'os.path.join', 'os.path.join', (['curb_data_path', '"""curb"""', 'f'], {}), "(curb_data_path, 'curb', f)\n", (5467, 5494), False, 'import os\n'), ((5510, 5531), 'numpy.loadtxt', 'np.loadtxt', (['curb_file'], {}), '(curb_file)\n', (5520, 5531), True, 'import numpy as np\n'), ((5590, 5638), 'numpy.floor', 'np.floor', (['((curb[:, 0] - roi_range[0]) / grid_res)'], {}), '((curb[:, 0] - roi_range[0]) / grid_res)\n', (5598, 5638), True, 'import numpy as np\n'), ((5665, 5718), 'numpy.floor', 'np.floor', (['((grid_curb[:, 1] - roi_range[1]) / grid_res)'], {}), '((grid_curb[:, 1] - roi_range[1]) / grid_res)\n', (5673, 5718), True, 'import numpy as np\n'), ((6803, 6840), 'cv2.imshow', 'cv2.imshow', (['"""binary_curb"""', 'binary_img'], {}), "('binary_curb', binary_img)\n", (6813, 6840), False, 'import cv2\n'), ((6849, 6890), 'cv2.imshow', 'cv2.imshow', (['"""instance_curb"""', 'instance_img'], {}), "('instance_curb', instance_img)\n", (6859, 6890), False, 'import cv2\n'), ((6899, 6913), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6910, 6913), False, 'import cv2\n'), ((3968, 3992), 'numpy.where', 'np.where', (['img_dilate_dst'], {}), '(img_dilate_dst)\n', (3976, 3992), True, 'import numpy as np\n'), ((6061, 6099), 'numpy.fromfile', 'np.fromfile', (['pc_file'], {'dtype': 'np.float32'}), '(pc_file, dtype=np.float32)\n', (6072, 6099), True, 'import numpy as np\n'), ((6210, 6245), 'numpy.zeros', 'np.zeros', (['(scan_in_roi.shape[0], 2)'], {}), '((scan_in_roi.shape[0], 2))\n', (6218, 6245), True, 'import numpy as np\n'), ((6276, 6331), 'numpy.floor', 'np.floor', (['((scan_in_roi[:, 0] - roi_range[0]) / grid_res)'], {}), '((scan_in_roi[:, 0] - roi_range[0]) / grid_res)\n', (6284, 6331), True, 'import numpy as np\n'), ((6369, 6424), 'numpy.floor', 'np.floor', (['((scan_in_roi[:, 1] - roi_range[1]) / grid_res)'], {}), '((scan_in_roi[:, 1] - roi_range[1]) / grid_res)\n', (6377, 6424), True, 'import numpy as np\n'), ((6462, 6508), 'numpy.clip', 'np.clip', (['scan_grid[:, 0]', '(0)', '(image_size[0] - 1)'], {}), '(scan_grid[:, 0], 0, image_size[0] - 1)\n', (6469, 6508), True, 'import numpy as np\n'), ((6539, 6585), 'numpy.clip', 'np.clip', (['scan_grid[:, 1]', '(0)', '(image_size[1] - 1)'], {}), '(scan_grid[:, 1], 0, image_size[1] - 1)\n', (6546, 6585), True, 'import numpy as np\n'), ((6760, 6793), 'cv2.imshow', 'cv2.imshow', (['"""curb&scan"""', 'scan_img'], {}), "('curb&scan', scan_img)\n", (6770, 6793), False, 'import cv2\n'), ((5280, 5316), 'os.path.join', 'os.path.join', (['curb_data_path', '"""curb"""'], {}), "(curb_data_path, 'curb')\n", (5292, 5316), False, 'import os\n'), ((6009, 6028), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (6025, 6028), False, 'import os\n')] |
import sys
sys.path.append('schemes')
from SysModel import Simulator, PID
from Classes import ClosedLoopData, LMPCprediction
from LTVMPC import LTV_MPC
from LTIMPC import LTI_MPC
from Track import Map, unityTestChangeOfCoordinates
from LMPC import LMPCplus
from Utilities import Regression
from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost
import numpy as np
import matplotlib.pyplot as plt
import pdb
import pickle
def main():
# Parameter initialization
dt = 1.0 / 10.0 # Controller discretization time
Time = 100 # Simulation time for PID
TimeMPC = 100 # Time for LTI-MPC
TimeMPC_tv = 100 # Time for LTV-MPC
TimeLMPC = 400 # Time for LMPC
vt = 0.8 # Reference velocity for path controllers
v0 = 0.5 # Initial velocity at lap 0
N = 12 # Horizon
dim_state = 6 # State dimension
dim_input = 2 # Input dimension
Q = np.diag([1.0, 1.0, 1, 1, 0.0, 100.0]) # vx, vy, wz, epsi, s, ey
R = np.diag([1.0, 10.0]) # delta, a
Q_lmpc = np.diag([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) * 0 # vx, vy, wz, epsi, s, ey
R_lmpc = np.diag([1.0, 1.0]) * 0 # delta, a
Qf = np.array([0, 10]) * 1
QterminalSlack = np.diag([10, 1, 1, 1, 10, 1]) * 20
dR_LMPC = np.array([1.0, 10.0]) * 10
inputConstr = np.array([[0.5, 0.5],
[10.0, 10.0]])
LMPC_Solver = "CVX" # Can pick CVX for cvxopt or OSQP. For OSQP uncomment line 14 in LMPC.py
numSS_it = 4 # Number of trajectories used at each iteration to build the safe set
numSS_Points = 40 # Number of points to select from each trajectory to build the safe set
Laps = 46 + numSS_it # Total LMPC laps (50 laps)
map = Map(0.4) # Initialize the map
model = Simulator(map) # Initialize the MPC model
LMPCmodel = Simulator(map, 1, 1) # Initialize the LMPC model
# State constraints for LTI-MPC and LTV-MPC
Fx_MPC = np.array([[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., -1.]])
bx_MPC = np.array([[10],
[2.],
[2.]])
# Input constraints for LTI-MPC and LTV-MPC
Fu_MPC = np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]])
bu_MPC = np.array([[inputConstr[0, 0]],
[inputConstr[0, 1]],
[inputConstr[1, 0]],
[inputConstr[1, 1]]])
# State constraints for LMPC
Fx = np.array([[0., 0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0., -1.]])
bx = np.array([[map.halfWidth],
[map.halfWidth]])
# Input constraints for LMPC
Fu = np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]])
bu = np.array([[inputConstr[0,0]],
[inputConstr[0,1]],
[inputConstr[1,0]],
[inputConstr[1,1]]])
print("Starting PID")
ClosedLoopDataPID = ClosedLoopData(dt, Time, v0)
PIDController = PID(vt)
model.Sim(ClosedLoopDataPID, PIDController)
file_data = open(sys.path[0]+'\data\ClosedLoopDataPID.obj', 'wb')
pickle.dump(ClosedLoopDataPID, file_data)
file_data.close()
print("===== PID terminated")
print("Starting LTI-MPC")
lamb = 0.0000001
A, B, Error = Regression(ClosedLoopDataPID.x, ClosedLoopDataPID.u, lamb)
ClosedLoopDataLTI_MPC = ClosedLoopData(dt, TimeMPC, v0)
LTIMPC = LTI_MPC(A, B, Q, R, N, vt, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)
model.Sim(ClosedLoopDataLTI_MPC, LTIMPC)
file_data = open(sys.path[0] + '\data\ClosedLoopDataLTI_MPC.obj', 'wb')
pickle.dump(ClosedLoopDataLTI_MPC, file_data)
file_data.close()
print("===== LTI-MPC terminated")
print("Starting LTV-MPC")
ClosedLoopDataLTV_MPC = ClosedLoopData(dt, TimeMPC_tv, v0)
LTVMPC = LTV_MPC(Q, R, N, vt, dim_state, dim_input, ClosedLoopDataPID.x, ClosedLoopDataPID.u, dt, map, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)
model.Sim(ClosedLoopDataLTV_MPC, LTVMPC)
file_data = open(sys.path[0] + 'data\ClosedLoopDataLTV_MPC.obj', 'wb')
pickle.dump(ClosedLoopDataLTV_MPC, file_data)
file_data.close()
print("===== LTV-MPC terminated")
print("Starting LMPC")
ClosedLoopLMPC = ClosedLoopData(dt, TimeLMPC, v0)
LMPCOpenLoopData = LMPCprediction(N, dim_state, dim_input, TimeLMPC, numSS_Points, Laps)
LMPC = LMPCplus(numSS_Points, numSS_it, N, QterminalSlack, Qf, Q_lmpc, R_lmpc, dR_LMPC, dt, map, Laps, TimeLMPC, LMPC_Solver, Fx, bx, Fu, bu)
LMPC.addTrajectory(ClosedLoopDataPID)
LMPC.addTrajectory(ClosedLoopDataLTV_MPC)
LMPC.addTrajectory(ClosedLoopDataPID)
LMPC.addTrajectory(ClosedLoopDataLTI_MPC)
x0 = np.zeros((1, dim_state))
x0_glob = np.zeros((1, dim_state))
x0[0, :] = ClosedLoopLMPC.x[0, :]
x0_glob[0, :] = ClosedLoopLMPC.x_glob[0, :]
for it in range(numSS_it, Laps):
ClosedLoopLMPC.updateInitialConditions(x0, x0_glob)
LMPCmodel.Sim(ClosedLoopLMPC, LMPC, LMPCOpenLoopData)
LMPC.addTrajectory(ClosedLoopLMPC)
if LMPC.feasible == 0:
break
else:
# Reset Initial Conditions
x0[0, :] = ClosedLoopLMPC.x[ClosedLoopLMPC.SimTime, :] - np.array([0, 0, 0, 0, map.TrackLength, 0])
x0_glob[0, :] = ClosedLoopLMPC.x_glob[ClosedLoopLMPC.SimTime, :]
file_data = open(sys.path[0] + '\data\LMPController.obj', 'wb')
pickle.dump(ClosedLoopLMPC, file_data)
pickle.dump(LMPC, file_data)
pickle.dump(LMPCOpenLoopData, file_data)
file_data.close()
print("===== LMPC terminated")
laptimes = np.zeros((50, 2))
# Laptime Plot
for i in range(0, LMPC.it):
print("Lap time at iteration ", i, " is ", LMPC.Qfun[0, i] * dt, "s")
laptimes[i, 0] = LMPC.Qfun[0, i] * dt
laptimes[i, 1] = i
plt.figure(3)
plt.plot(laptimes[:, 1], laptimes[:, 0], '-o')
plt.ylabel('Lap Time (sec)')
plt.xlabel('Lap Number')
print("===== Start Plotting")
plotTrajectory(map, ClosedLoopDataPID.x, ClosedLoopDataPID.x_glob, ClosedLoopDataPID.u)
plotTrajectory(map, ClosedLoopDataLTI_MPC.x, ClosedLoopDataLTI_MPC.x_glob, ClosedLoopDataLTI_MPC.u)
plotTrajectory(map, ClosedLoopDataLTV_MPC.x, ClosedLoopDataLTV_MPC.x_glob, ClosedLoopDataLTV_MPC.u)
plotCost(LMPC.Qfun, int(TimeLMPC / dt) + 1)
plotC(LMPC.Qfun_SelectedTot, numSS_it)
plotQC(LMPC.Qcost, numSS_Points)
plotQt(LMPC.qq)
plotQcost(LMPC.costSolved)
plotClosedLoopLMPC(LMPC, map)
animation_xy(map, LMPCOpenLoopData, LMPC, Laps - 2)
animation_states(map, LMPCOpenLoopData, LMPC, 10)
unityTestChangeOfCoordinates(map, ClosedLoopDataPID)
unityTestChangeOfCoordinates(map, ClosedLoopDataLTI_MPC)
unityTestChangeOfCoordinates(map, ClosedLoopLMPC)
saveGif_xyResults(map, LMPCOpenLoopData, LMPC, Laps-1)
Save_statesAnimation(map, LMPCOpenLoopData, LMPC, 5)
plt.show()
if __name__ == "__main__":
main() | [
"Classes.LMPCprediction",
"Track.Map",
"matplotlib.pyplot.ylabel",
"plot.animation_xy",
"Classes.ClosedLoopData",
"numpy.array",
"sys.path.append",
"plot.Save_statesAnimation",
"plot.plotClosedLoopLMPC",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"SysModel.Simulator",
"Utilities.R... | [((11, 37), 'sys.path.append', 'sys.path.append', (['"""schemes"""'], {}), "('schemes')\n", (26, 37), False, 'import sys\n'), ((1039, 1076), 'numpy.diag', 'np.diag', (['[1.0, 1.0, 1, 1, 0.0, 100.0]'], {}), '([1.0, 1.0, 1, 1, 0.0, 100.0])\n', (1046, 1076), True, 'import numpy as np\n'), ((1123, 1143), 'numpy.diag', 'np.diag', (['[1.0, 10.0]'], {}), '([1.0, 10.0])\n', (1130, 1143), True, 'import numpy as np\n'), ((1484, 1520), 'numpy.array', 'np.array', (['[[0.5, 0.5], [10.0, 10.0]]'], {}), '([[0.5, 0.5], [10.0, 10.0]])\n', (1492, 1520), True, 'import numpy as np\n'), ((1909, 1917), 'Track.Map', 'Map', (['(0.4)'], {}), '(0.4)\n', (1912, 1917), False, 'from Track import Map, unityTestChangeOfCoordinates\n'), ((1995, 2009), 'SysModel.Simulator', 'Simulator', (['map'], {}), '(map)\n', (2004, 2009), False, 'from SysModel import Simulator, PID\n'), ((2089, 2109), 'SysModel.Simulator', 'Simulator', (['map', '(1)', '(1)'], {}), '(map, 1, 1)\n', (2098, 2109), False, 'from SysModel import Simulator, PID\n'), ((2226, 2338), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0,\n 0.0, 0.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]])\n', (2234, 2338), True, 'import numpy as np\n'), ((2368, 2398), 'numpy.array', 'np.array', (['[[10], [2.0], [2.0]]'], {}), '([[10], [2.0], [2.0]])\n', (2376, 2398), True, 'import numpy as np\n'), ((2497, 2557), 'numpy.array', 'np.array', (['[[1.0, 0.0], [-1.0, 0.0], [0.0, 1.0], [0.0, -1.0]]'], {}), '([[1.0, 0.0], [-1.0, 0.0], [0.0, 1.0], [0.0, -1.0]])\n', (2505, 2557), True, 'import numpy as np\n'), ((2633, 2732), 'numpy.array', 'np.array', (['[[inputConstr[0, 0]], [inputConstr[0, 1]], [inputConstr[1, 0]], [\n inputConstr[1, 1]]]'], {}), '([[inputConstr[0, 0]], [inputConstr[0, 1]], [inputConstr[1, 0]], [\n inputConstr[1, 1]]])\n', (2641, 2732), True, 'import numpy as np\n'), ((2840, 2915), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0]])\n', (2848, 2915), True, 'import numpy as np\n'), ((2932, 2976), 'numpy.array', 'np.array', (['[[map.halfWidth], [map.halfWidth]]'], {}), '([[map.halfWidth], [map.halfWidth]])\n', (2940, 2976), True, 'import numpy as np\n'), ((3039, 3099), 'numpy.array', 'np.array', (['[[1.0, 0.0], [-1.0, 0.0], [0.0, 1.0], [0.0, -1.0]]'], {}), '([[1.0, 0.0], [-1.0, 0.0], [0.0, 1.0], [0.0, -1.0]])\n', (3047, 3099), True, 'import numpy as np\n'), ((3158, 3257), 'numpy.array', 'np.array', (['[[inputConstr[0, 0]], [inputConstr[0, 1]], [inputConstr[1, 0]], [\n inputConstr[1, 1]]]'], {}), '([[inputConstr[0, 0]], [inputConstr[0, 1]], [inputConstr[1, 0]], [\n inputConstr[1, 1]]])\n', (3166, 3257), True, 'import numpy as np\n'), ((3357, 3385), 'Classes.ClosedLoopData', 'ClosedLoopData', (['dt', 'Time', 'v0'], {}), '(dt, Time, v0)\n', (3371, 3385), False, 'from Classes import ClosedLoopData, LMPCprediction\n'), ((3406, 3413), 'SysModel.PID', 'PID', (['vt'], {}), '(vt)\n', (3409, 3413), False, 'from SysModel import Simulator, PID\n'), ((3537, 3578), 'pickle.dump', 'pickle.dump', (['ClosedLoopDataPID', 'file_data'], {}), '(ClosedLoopDataPID, file_data)\n', (3548, 3578), False, 'import pickle\n'), ((3705, 3763), 'Utilities.Regression', 'Regression', (['ClosedLoopDataPID.x', 'ClosedLoopDataPID.u', 'lamb'], {}), '(ClosedLoopDataPID.x, ClosedLoopDataPID.u, lamb)\n', (3715, 3763), False, 'from Utilities import Regression\n'), ((3792, 3823), 'Classes.ClosedLoopData', 'ClosedLoopData', (['dt', 'TimeMPC', 'v0'], {}), '(dt, TimeMPC, v0)\n', (3806, 3823), False, 'from Classes import ClosedLoopData, LMPCprediction\n'), ((3837, 3895), 'LTIMPC.LTI_MPC', 'LTI_MPC', (['A', 'B', 'Q', 'R', 'N', 'vt', 'Fx_MPC', 'bx_MPC', 'Fu_MPC', 'bu_MPC'], {}), '(A, B, Q, R, N, vt, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)\n', (3844, 3895), False, 'from LTIMPC import LTI_MPC\n'), ((4022, 4067), 'pickle.dump', 'pickle.dump', (['ClosedLoopDataLTI_MPC', 'file_data'], {}), '(ClosedLoopDataLTI_MPC, file_data)\n', (4033, 4067), False, 'import pickle\n'), ((4187, 4221), 'Classes.ClosedLoopData', 'ClosedLoopData', (['dt', 'TimeMPC_tv', 'v0'], {}), '(dt, TimeMPC_tv, v0)\n', (4201, 4221), False, 'from Classes import ClosedLoopData, LMPCprediction\n'), ((4235, 4364), 'LTVMPC.LTV_MPC', 'LTV_MPC', (['Q', 'R', 'N', 'vt', 'dim_state', 'dim_input', 'ClosedLoopDataPID.x', 'ClosedLoopDataPID.u', 'dt', 'map', 'Fx_MPC', 'bx_MPC', 'Fu_MPC', 'bu_MPC'], {}), '(Q, R, N, vt, dim_state, dim_input, ClosedLoopDataPID.x,\n ClosedLoopDataPID.u, dt, map, Fx_MPC, bx_MPC, Fu_MPC, bu_MPC)\n', (4242, 4364), False, 'from LTVMPC import LTV_MPC\n'), ((4486, 4531), 'pickle.dump', 'pickle.dump', (['ClosedLoopDataLTV_MPC', 'file_data'], {}), '(ClosedLoopDataLTV_MPC, file_data)\n', (4497, 4531), False, 'import pickle\n'), ((4642, 4674), 'Classes.ClosedLoopData', 'ClosedLoopData', (['dt', 'TimeLMPC', 'v0'], {}), '(dt, TimeLMPC, v0)\n', (4656, 4674), False, 'from Classes import ClosedLoopData, LMPCprediction\n'), ((4698, 4767), 'Classes.LMPCprediction', 'LMPCprediction', (['N', 'dim_state', 'dim_input', 'TimeLMPC', 'numSS_Points', 'Laps'], {}), '(N, dim_state, dim_input, TimeLMPC, numSS_Points, Laps)\n', (4712, 4767), False, 'from Classes import ClosedLoopData, LMPCprediction\n'), ((4780, 4918), 'LMPC.LMPCplus', 'LMPCplus', (['numSS_Points', 'numSS_it', 'N', 'QterminalSlack', 'Qf', 'Q_lmpc', 'R_lmpc', 'dR_LMPC', 'dt', 'map', 'Laps', 'TimeLMPC', 'LMPC_Solver', 'Fx', 'bx', 'Fu', 'bu'], {}), '(numSS_Points, numSS_it, N, QterminalSlack, Qf, Q_lmpc, R_lmpc,\n dR_LMPC, dt, map, Laps, TimeLMPC, LMPC_Solver, Fx, bx, Fu, bu)\n', (4788, 4918), False, 'from LMPC import LMPCplus\n'), ((5101, 5125), 'numpy.zeros', 'np.zeros', (['(1, dim_state)'], {}), '((1, dim_state))\n', (5109, 5125), True, 'import numpy as np\n'), ((5140, 5164), 'numpy.zeros', 'np.zeros', (['(1, dim_state)'], {}), '((1, dim_state))\n', (5148, 5164), True, 'import numpy as np\n'), ((5819, 5857), 'pickle.dump', 'pickle.dump', (['ClosedLoopLMPC', 'file_data'], {}), '(ClosedLoopLMPC, file_data)\n', (5830, 5857), False, 'import pickle\n'), ((5862, 5890), 'pickle.dump', 'pickle.dump', (['LMPC', 'file_data'], {}), '(LMPC, file_data)\n', (5873, 5890), False, 'import pickle\n'), ((5895, 5935), 'pickle.dump', 'pickle.dump', (['LMPCOpenLoopData', 'file_data'], {}), '(LMPCOpenLoopData, file_data)\n', (5906, 5935), False, 'import pickle\n'), ((6009, 6026), 'numpy.zeros', 'np.zeros', (['(50, 2)'], {}), '((50, 2))\n', (6017, 6026), True, 'import numpy as np\n'), ((6234, 6247), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (6244, 6247), True, 'import matplotlib.pyplot as plt\n'), ((6252, 6298), 'matplotlib.pyplot.plot', 'plt.plot', (['laptimes[:, 1]', 'laptimes[:, 0]', '"""-o"""'], {}), "(laptimes[:, 1], laptimes[:, 0], '-o')\n", (6260, 6298), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lap Time (sec)"""'], {}), "('Lap Time (sec)')\n", (6313, 6331), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6360), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lap Number"""'], {}), "('Lap Number')\n", (6346, 6360), True, 'import matplotlib.pyplot as plt\n'), ((6401, 6492), 'plot.plotTrajectory', 'plotTrajectory', (['map', 'ClosedLoopDataPID.x', 'ClosedLoopDataPID.x_glob', 'ClosedLoopDataPID.u'], {}), '(map, ClosedLoopDataPID.x, ClosedLoopDataPID.x_glob,\n ClosedLoopDataPID.u)\n', (6415, 6492), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6494, 6597), 'plot.plotTrajectory', 'plotTrajectory', (['map', 'ClosedLoopDataLTI_MPC.x', 'ClosedLoopDataLTI_MPC.x_glob', 'ClosedLoopDataLTI_MPC.u'], {}), '(map, ClosedLoopDataLTI_MPC.x, ClosedLoopDataLTI_MPC.x_glob,\n ClosedLoopDataLTI_MPC.u)\n', (6508, 6597), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6599, 6702), 'plot.plotTrajectory', 'plotTrajectory', (['map', 'ClosedLoopDataLTV_MPC.x', 'ClosedLoopDataLTV_MPC.x_glob', 'ClosedLoopDataLTV_MPC.u'], {}), '(map, ClosedLoopDataLTV_MPC.x, ClosedLoopDataLTV_MPC.x_glob,\n ClosedLoopDataLTV_MPC.u)\n', (6613, 6702), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6753, 6791), 'plot.plotC', 'plotC', (['LMPC.Qfun_SelectedTot', 'numSS_it'], {}), '(LMPC.Qfun_SelectedTot, numSS_it)\n', (6758, 6791), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6797, 6829), 'plot.plotQC', 'plotQC', (['LMPC.Qcost', 'numSS_Points'], {}), '(LMPC.Qcost, numSS_Points)\n', (6803, 6829), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6835, 6850), 'plot.plotQt', 'plotQt', (['LMPC.qq'], {}), '(LMPC.qq)\n', (6841, 6850), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6856, 6882), 'plot.plotQcost', 'plotQcost', (['LMPC.costSolved'], {}), '(LMPC.costSolved)\n', (6865, 6882), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6888, 6917), 'plot.plotClosedLoopLMPC', 'plotClosedLoopLMPC', (['LMPC', 'map'], {}), '(LMPC, map)\n', (6906, 6917), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6923, 6974), 'plot.animation_xy', 'animation_xy', (['map', 'LMPCOpenLoopData', 'LMPC', '(Laps - 2)'], {}), '(map, LMPCOpenLoopData, LMPC, Laps - 2)\n', (6935, 6974), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((6980, 7029), 'plot.animation_states', 'animation_states', (['map', 'LMPCOpenLoopData', 'LMPC', '(10)'], {}), '(map, LMPCOpenLoopData, LMPC, 10)\n', (6996, 7029), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((7035, 7087), 'Track.unityTestChangeOfCoordinates', 'unityTestChangeOfCoordinates', (['map', 'ClosedLoopDataPID'], {}), '(map, ClosedLoopDataPID)\n', (7063, 7087), False, 'from Track import Map, unityTestChangeOfCoordinates\n'), ((7092, 7148), 'Track.unityTestChangeOfCoordinates', 'unityTestChangeOfCoordinates', (['map', 'ClosedLoopDataLTI_MPC'], {}), '(map, ClosedLoopDataLTI_MPC)\n', (7120, 7148), False, 'from Track import Map, unityTestChangeOfCoordinates\n'), ((7153, 7202), 'Track.unityTestChangeOfCoordinates', 'unityTestChangeOfCoordinates', (['map', 'ClosedLoopLMPC'], {}), '(map, ClosedLoopLMPC)\n', (7181, 7202), False, 'from Track import Map, unityTestChangeOfCoordinates\n'), ((7208, 7264), 'plot.saveGif_xyResults', 'saveGif_xyResults', (['map', 'LMPCOpenLoopData', 'LMPC', '(Laps - 1)'], {}), '(map, LMPCOpenLoopData, LMPC, Laps - 1)\n', (7225, 7264), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((7267, 7319), 'plot.Save_statesAnimation', 'Save_statesAnimation', (['map', 'LMPCOpenLoopData', 'LMPC', '(5)'], {}), '(map, LMPCOpenLoopData, LMPC, 5)\n', (7287, 7319), False, 'from plot import plotTrajectory, plotClosedLoopLMPC, animation_xy, animation_states, saveGif_xyResults, Save_statesAnimation, plotCost, plotC, plotQC, plotQt, plotQcost\n'), ((7325, 7335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7333, 7335), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1236), 'numpy.diag', 'np.diag', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (1204, 1236), True, 'import numpy as np\n'), ((1281, 1300), 'numpy.diag', 'np.diag', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (1288, 1300), True, 'import numpy as np\n'), ((1346, 1363), 'numpy.array', 'np.array', (['[0, 10]'], {}), '([0, 10])\n', (1354, 1363), True, 'import numpy as np\n'), ((1389, 1418), 'numpy.diag', 'np.diag', (['[10, 1, 1, 1, 10, 1]'], {}), '([10, 1, 1, 1, 10, 1])\n', (1396, 1418), True, 'import numpy as np\n'), ((1438, 1459), 'numpy.array', 'np.array', (['[1.0, 10.0]'], {}), '([1.0, 10.0])\n', (1446, 1459), True, 'import numpy as np\n'), ((5626, 5668), 'numpy.array', 'np.array', (['[0, 0, 0, 0, map.TrackLength, 0]'], {}), '([0, 0, 0, 0, map.TrackLength, 0])\n', (5634, 5668), True, 'import numpy as np\n')] |
import torch
from torch.autograd import Variable
import os
import argparse
from datetime import datetime
from lib.DPRAEdgeNet import DPRAEdgeNet,EdgeNet
from lib.segnet import SegNet
from lib.pranet import PraNet
from utils.utils import EarlyStopping
from utils.dataloader import get_loader,test_dataset
from utils.utils import clip_gradient, adjust_lr, AvgMeter
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from torchstat import stat
import glob
import wandb
def structure_loss(pred, mask):
weit = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = (weit*wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))
pred = torch.sigmoid(pred)
inter = ((pred * mask)*weit).sum(dim=(2, 3))
union = ((pred + mask)*weit).sum(dim=(2, 3))
wiou = 1 - (inter + 1)/(union - inter+1)
return (wbce + wiou).mean()
def test(model, path,model_name):
##### put ur data_path of TestDataSet/Kvasir here #####
data_path = path
##### #####
model.eval()
image_root = '{}/image/'.format(data_path)
gt_root = '{}/mask/'.format(data_path)
total_vals = len(glob.glob(gt_root + "/*png"))
if total_vals == 0 :
total_vals = len(glob.glob(gt_root + "/*jpg"))
test_loader = test_dataset(image_root, gt_root, opt.trainsize)
b=0.0
val_loss=0.0
for i in range(total_vals):
image, gt, gt_edge,name,_ = test_loader.load_data()
gt = gt.unsqueeze(0)
gt_edge = gt_edge.unsqueeze(0)
image = image.cuda()
if model_name == "PraNet":
res,t1,t2,t3= model(image)
loss = structure_loss(res, gt.cuda())
loss1 = structure_loss(t1, gt.cuda())
loss2 = structure_loss(t2, gt.cuda())
loss3 = structure_loss(t3, gt.cuda())
total_loss = loss + loss1 + loss2 + loss3
elif opt.model == "DPRAEdgeNet" or opt.model == "EdgeNet":
seg_map_array, edge_map_array = model(image)
res = seg_map_array[0]
total_loss = 0
for seg_map,edge_map in zip(seg_map_array,edge_map_array):
total_loss = total_loss + structure_loss(seg_map, gt.cuda()) + structure_loss(edge_map, gt_edge.cuda())
elif model_name == "SegNet":
res,_ = model(image)
total_loss = structure_loss(res, gt.cuda())
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-8)
#res = torch.squeeze(res)
#gt = np.squeeze(gt)
#res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
input = res
target = np.array(gt)
N = gt.shape
smooth = 1
input_flat = np.reshape(input,(-1))
target_flat = np.reshape(target,(-1))
intersection = (input_flat*target_flat)
loss = (2 * intersection.sum() + smooth) / (input.sum() + target.sum() + smooth)
val_loss = val_loss + total_loss.item()
a = '{:.4f}'.format(loss)
a = float(a)
b = b + a
return b/total_vals,val_loss/total_vals
best = 0
def train(train_loader, model, optimizer, scheduler,epoch, test_path,best_meandice,ckpt_folder="harddnet_exp",model_name="Ours"):
model.train()
# ---- multi-scale training ----
size_rates = [1]
loss_record2, loss_record3, loss_record4, loss_record5 = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
for i, pack in enumerate(train_loader, start=1):
for rate in size_rates:
for param_group in optimizer.param_groups:
lr = param_group['lr']
wandb.log({'Learning Rate': lr})
optimizer.zero_grad()
# ---- data prepare ----
images, gts,gts_edge = pack
images = Variable(images).cuda()
gts = Variable(gts).cuda()
gts_edge = Variable(gts_edge).cuda()
# ---- rescale ----
trainsize = int(round(opt.trainsize*rate/32)*32)
if rate != 1:
images = F.upsample(images, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
gts = F.upsample(gts, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
gts_edge = F.upsample(gts_edge, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
if opt.model == "DPRAEdgeNet" or opt.model == "EdgeNet" :
seg_map_array, edge_map_array = model(images)
total_loss = 0
for seg_map,edge_map in zip(seg_map_array,edge_map_array):
total_loss = total_loss + structure_loss(seg_map, gts) + structure_loss(edge_map, gts_edge)
wandb.log({'Structure loss': total_loss.item()})
elif model_name == "SegNet":
lateral_map_5,_ = model(images)
total_loss = structure_loss(lateral_map_5, gts)
elif model_name == "PraNet":
lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2 = model(images)
# ---- loss function ----
loss5 = structure_loss(lateral_map_5, gts)
loss4 = structure_loss(lateral_map_4, gts)
loss3 = structure_loss(lateral_map_3, gts)
loss2 = structure_loss(lateral_map_2, gts)
total_loss = loss2 + loss3 + loss4 + loss5
# ---- backward ----
total_loss.backward()
optimizer.step()
# ---- recording loss ----
if rate == 1:
loss_record5.update(total_loss.data, opt.batchsize)
# ---- train visualization ----
if i % 20 == 0 or i == total_step:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], '
' lateral-5: {:0.4f}]'.
format(datetime.now(), epoch, opt.epoch, i, total_step,
loss_record5.show()))
save_path = '/home/debayan/Desktop/MedAI_Project/saved_models/{0}/'.format(ckpt_folder)
os.makedirs(save_path, exist_ok=True)
if (epoch+1) % 1 == 0:
meandice,val_loss = test(model,test_path,model_name)
wandb.log({'Validation loss': val_loss})
wandb.log({'Validation mean dice': meandice})
scheduler.step(val_loss)
with open(save_path + 'dice_per_epoch.txt','a+') as f:
f.write("{} {}\n".format(epoch + 1,meandice))
if meandice > best_meandice:
best_meandice = meandice
with open(save_path + 'best_dice.txt','w') as f:
f.write("{} {}\n".format(epoch + 1,best_meandice))
torch.save(model.state_dict(), save_path + '{}-best.pth'.format(model_name))
print('[Saving Snapshot:]', save_path + '{}-best.pth'.format(model_name),meandice)
return best_meandice,val_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int,
default=100, help='epoch number')
parser.add_argument('--lr', type=float,
default=3e-4, help='learning rate')
parser.add_argument('--optimizer', type=str,
default='SGD', help='choosing optimizer Adam or SGD')
parser.add_argument('--augmentation',
default="False", help='choose to do random flip rotation')
parser.add_argument('--batchsize', type=int,
default=16, help='training batch size')
parser.add_argument('--trainsize', type=int,
default=256, help='training dataset size')
parser.add_argument('--clip', type=float,
default=0.5, help='gradient clipping margin')
parser.add_argument('--train_path', type=str,
default='/home/debayan/Desktop/Kvasir-SEG/datasets/HDNetDataset/Kvasir_SEG_Training_880', help='path to train dataset')
parser.add_argument('--val_path', type=str,
default='/home/debayan/Desktop/Kvasir-SEG/datasets/HDNetDataset/Kvasir_SEG_Validation_120' , help='path to testing Kvasir dataset')
parser.add_argument('--ckpt_folder', type=str,
default='expt_1', help='type of loss function')
parser.add_argument('--feature_channels', type=int,default=128)
parser.add_argument('--patience_early_stopping', type=int,default=10)
parser.add_argument('--patience_scheduler', type=int,default=5)
parser.add_argument('--model', type=str,default="Ours")
opt = parser.parse_args()
# ---- build models ----
if opt.model == "DPRAEdgeNet":
print("Choosing DPRAEdgeNet")
model = DPRAEdgeNet(opt.feature_channels).cuda()
elif opt.model == "EdgeNet":
print("Choosing EdgeNet")
model = EdgeNet(opt.feature_channels).cuda()
elif opt.model == "SegNet":
print("Choosing SegNet")
model = SegNet(input_channels=3, output_channels=1).cuda()
elif opt.model == "PraNet":
print("Using PraNet")
model = PraNet().cuda()
es = EarlyStopping(patience=opt.patience_early_stopping)
params = model.parameters()
if opt.optimizer == 'Adam':
optimizer = torch.optim.Adam(params, opt.lr)
else:
optimizer = torch.optim.SGD(params, opt.lr, weight_decay = 1e-5, momentum = 0.9)
wandb.init(project="NORA_final",name=opt.ckpt_folder,reinit=True)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', min_lr = 1e-5,patience=opt.patience_scheduler, verbose=True)
image_root = '{}/image/'.format(opt.train_path)
gt_root = '{}/mask/'.format(opt.train_path)
train_loader = get_loader(image_root, gt_root, batchsize=opt.batchsize, trainsize=opt.trainsize, augmentation = opt.augmentation)
total_step = len(train_loader)
print("#"*20, "Start Training", "#"*20)
mean_dice = 0
for epoch in range(1, opt.epoch):
mean_dice,val_loss = train(train_loader, model, optimizer, scheduler,epoch, opt.val_path,mean_dice,opt.ckpt_folder,opt.model)
metric = torch.tensor(val_loss)
if es.step(metric):
print("Early Stopping")
break
| [
"torch.nn.functional.upsample",
"wandb.log",
"utils.utils.AvgMeter",
"wandb.init",
"numpy.array",
"utils.utils.EarlyStopping",
"numpy.reshape",
"argparse.ArgumentParser",
"numpy.asarray",
"torch.nn.functional.avg_pool2d",
"lib.DPRAEdgeNet.EdgeNet",
"torch.autograd.Variable",
"glob.glob",
"... | [((633, 694), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['pred', 'mask'], {'reduce': '"""none"""'}), "(pred, mask, reduce='none')\n", (667, 694), True, 'import torch.nn.functional as F\n'), ((769, 788), 'torch.sigmoid', 'torch.sigmoid', (['pred'], {}), '(pred)\n', (782, 788), False, 'import torch\n'), ((1416, 1464), 'utils.dataloader.test_dataset', 'test_dataset', (['image_root', 'gt_root', 'opt.trainsize'], {}), '(image_root, gt_root, opt.trainsize)\n', (1428, 1464), False, 'from utils.dataloader import get_loader, test_dataset\n'), ((6612, 6649), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (6623, 6649), False, 'import os\n'), ((7485, 7510), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7508, 7510), False, 'import argparse\n'), ((9736, 9787), 'utils.utils.EarlyStopping', 'EarlyStopping', ([], {'patience': 'opt.patience_early_stopping'}), '(patience=opt.patience_early_stopping)\n', (9749, 9787), False, 'from utils.utils import EarlyStopping\n'), ((10014, 10081), 'wandb.init', 'wandb.init', ([], {'project': '"""NORA_final"""', 'name': 'opt.ckpt_folder', 'reinit': '(True)'}), "(project='NORA_final', name=opt.ckpt_folder, reinit=True)\n", (10024, 10081), False, 'import wandb\n'), ((10097, 10222), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer', '"""min"""'], {'min_lr': '(1e-05)', 'patience': 'opt.patience_scheduler', 'verbose': '(True)'}), "(optimizer, 'min', min_lr=1e-05,\n patience=opt.patience_scheduler, verbose=True)\n", (10139, 10222), False, 'import torch\n'), ((10340, 10457), 'utils.dataloader.get_loader', 'get_loader', (['image_root', 'gt_root'], {'batchsize': 'opt.batchsize', 'trainsize': 'opt.trainsize', 'augmentation': 'opt.augmentation'}), '(image_root, gt_root, batchsize=opt.batchsize, trainsize=opt.\n trainsize, augmentation=opt.augmentation)\n', (10350, 10457), False, 'from utils.dataloader import get_loader, test_dataset\n'), ((1286, 1314), 'glob.glob', 'glob.glob', (["(gt_root + '/*png')"], {}), "(gt_root + '/*png')\n", (1295, 1314), False, 'import glob\n'), ((2609, 2635), 'numpy.asarray', 'np.asarray', (['gt', 'np.float32'], {}), '(gt, np.float32)\n', (2619, 2635), True, 'import numpy as np\n'), ((2988, 3000), 'numpy.array', 'np.array', (['gt'], {}), '(gt)\n', (2996, 3000), True, 'import numpy as np\n'), ((3062, 3083), 'numpy.reshape', 'np.reshape', (['input', '(-1)'], {}), '(input, -1)\n', (3072, 3083), True, 'import numpy as np\n'), ((3107, 3129), 'numpy.reshape', 'np.reshape', (['target', '(-1)'], {}), '(target, -1)\n', (3117, 3129), True, 'import numpy as np\n'), ((3739, 3749), 'utils.utils.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3747, 3749), False, 'from utils.utils import clip_gradient, adjust_lr, AvgMeter\n'), ((3751, 3761), 'utils.utils.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3759, 3761), False, 'from utils.utils import clip_gradient, adjust_lr, AvgMeter\n'), ((3763, 3773), 'utils.utils.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3771, 3773), False, 'from utils.utils import clip_gradient, adjust_lr, AvgMeter\n'), ((3775, 3785), 'utils.utils.AvgMeter', 'AvgMeter', ([], {}), '()\n', (3783, 3785), False, 'from utils.utils import clip_gradient, adjust_lr, AvgMeter\n'), ((6757, 6797), 'wandb.log', 'wandb.log', (["{'Validation loss': val_loss}"], {}), "({'Validation loss': val_loss})\n", (6766, 6797), False, 'import wandb\n'), ((6806, 6851), 'wandb.log', 'wandb.log', (["{'Validation mean dice': meandice}"], {}), "({'Validation mean dice': meandice})\n", (6815, 6851), False, 'import wandb\n'), ((9877, 9909), 'torch.optim.Adam', 'torch.optim.Adam', (['params', 'opt.lr'], {}), '(params, opt.lr)\n', (9893, 9909), False, 'import torch\n'), ((9940, 10005), 'torch.optim.SGD', 'torch.optim.SGD', (['params', 'opt.lr'], {'weight_decay': '(1e-05)', 'momentum': '(0.9)'}), '(params, opt.lr, weight_decay=1e-05, momentum=0.9)\n', (9955, 10005), False, 'import torch\n'), ((10748, 10770), 'torch.tensor', 'torch.tensor', (['val_loss'], {}), '(val_loss)\n', (10760, 10770), False, 'import torch\n'), ((1367, 1395), 'glob.glob', 'glob.glob', (["(gt_root + '/*jpg')"], {}), "(gt_root + '/*jpg')\n", (1376, 1395), False, 'import glob\n'), ((3995, 4027), 'wandb.log', 'wandb.log', (["{'Learning Rate': lr}"], {}), "({'Learning Rate': lr})\n", (4004, 4027), False, 'import wandb\n'), ((4430, 4518), 'torch.nn.functional.upsample', 'F.upsample', (['images'], {'size': '(trainsize, trainsize)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(images, size=(trainsize, trainsize), mode='bilinear',\n align_corners=True)\n", (4440, 4518), True, 'import torch.nn.functional as F\n'), ((4537, 4623), 'torch.nn.functional.upsample', 'F.upsample', (['gts'], {'size': '(trainsize, trainsize)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(gts, size=(trainsize, trainsize), mode='bilinear', align_corners\n =True)\n", (4547, 4623), True, 'import torch.nn.functional as F\n'), ((4646, 4736), 'torch.nn.functional.upsample', 'F.upsample', (['gts_edge'], {'size': '(trainsize, trainsize)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(gts_edge, size=(trainsize, trainsize), mode='bilinear',\n align_corners=True)\n", (4656, 4736), True, 'import torch.nn.functional as F\n'), ((9324, 9357), 'lib.DPRAEdgeNet.DPRAEdgeNet', 'DPRAEdgeNet', (['opt.feature_channels'], {}), '(opt.feature_channels)\n', (9335, 9357), False, 'from lib.DPRAEdgeNet import DPRAEdgeNet, EdgeNet\n'), ((557, 613), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['mask'], {'kernel_size': '(31)', 'stride': '(1)', 'padding': '(15)'}), '(mask, kernel_size=31, stride=1, padding=15)\n', (569, 613), True, 'import torch.nn.functional as F\n'), ((4173, 4189), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (4181, 4189), False, 'from torch.autograd import Variable\n'), ((4215, 4228), 'torch.autograd.Variable', 'Variable', (['gts'], {}), '(gts)\n', (4223, 4228), False, 'from torch.autograd import Variable\n'), ((4259, 4277), 'torch.autograd.Variable', 'Variable', (['gts_edge'], {}), '(gts_edge)\n', (4267, 4277), False, 'from torch.autograd import Variable\n'), ((6413, 6427), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6425, 6427), False, 'from datetime import datetime\n'), ((9459, 9488), 'lib.DPRAEdgeNet.EdgeNet', 'EdgeNet', (['opt.feature_channels'], {}), '(opt.feature_channels)\n', (9466, 9488), False, 'from lib.DPRAEdgeNet import DPRAEdgeNet, EdgeNet\n'), ((9579, 9622), 'lib.segnet.SegNet', 'SegNet', ([], {'input_channels': '(3)', 'output_channels': '(1)'}), '(input_channels=3, output_channels=1)\n', (9585, 9622), False, 'from lib.segnet import SegNet\n'), ((9709, 9717), 'lib.pranet.PraNet', 'PraNet', ([], {}), '()\n', (9715, 9717), False, 'from lib.pranet import PraNet\n')] |
import PIL.ImageOps
from PIL import Image
from IPython.core.interactiveshell import InteractiveShell
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from IPython import get_ipython
import pickle as cPickle
get_ipython().run_line_magic('matplotlib', 'inline')
InteractiveShell.ast_node_interactivity = "all"
# load the model from disk
model_name = 'finalized_model2.sav'
loaded_model = cPickle.load(open(model_name, 'rb'))
filename = '2.png'
image = Image.open(filename)
p = plt.imshow(np.asarray(image), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(np.asarray(image).shape))
# convert to grayscale image - 'L' format means each pixel is
# represented by a single value from 0 to 255
image_bw = image.convert('L')
p = plt.imshow(np.asarray(image_bw), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(np.asarray(image_bw).shape))
# resize image
image_bw_resized = image_bw.resize((28, 28), Image.ANTIALIAS)
p = plt.imshow(np.asarray(image_bw_resized), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(np.asarray(image_bw_resized).shape))
# invert image, to match training data
image_bw_resized_inverted = PIL.ImageOps.invert(image_bw_resized)
p = plt.imshow(np.asarray(image_bw_resized_inverted), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(np.asarray(image_bw_resized_inverted).shape))
# adjust contrast and scale
pixel_filter = 20 # value from 0 to 100 - may need to adjust this manually
min_pixel = np.percentile(image_bw_resized_inverted, pixel_filter)
image_bw_resized_inverted_scaled = np.clip(
image_bw_resized_inverted-min_pixel, 0, 255)
max_pixel = np.max(image_bw_resized_inverted)
image_bw_resized_inverted_scaled = np.asarray(
image_bw_resized_inverted_scaled)/max_pixel
p = plt.imshow(np.asarray(image_bw_resized_inverted_scaled), cmap=plt.cm.gray,)
p = plt.title(
'Shape: ' + str(np.asarray(image_bw_resized_inverted_scaled).shape))
# finally, reshape to (1, 784) - 1 sample, 784 features
test_sample = np.array(image_bw_resized_inverted_scaled).reshape(1, 784)
p = plt.imshow(np.reshape(test_sample, (28, 28)), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(test_sample.shape))
p = plt.imshow(np.reshape(test_sample, (28, 28)), cmap=plt.cm.gray,)
p = plt.title('Shape: ' + str(test_sample.shape))
test_pred = loaded_model.predict(test_sample)
print("Predicted class is: ", test_pred)
| [
"numpy.clip",
"IPython.get_ipython",
"PIL.Image.open",
"numpy.reshape",
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.percentile"
] | [((499, 519), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (509, 519), False, 'from PIL import Image\n'), ((1464, 1518), 'numpy.percentile', 'np.percentile', (['image_bw_resized_inverted', 'pixel_filter'], {}), '(image_bw_resized_inverted, pixel_filter)\n', (1477, 1518), True, 'import numpy as np\n'), ((1554, 1608), 'numpy.clip', 'np.clip', (['(image_bw_resized_inverted - min_pixel)', '(0)', '(255)'], {}), '(image_bw_resized_inverted - min_pixel, 0, 255)\n', (1561, 1608), True, 'import numpy as np\n'), ((1624, 1657), 'numpy.max', 'np.max', (['image_bw_resized_inverted'], {}), '(image_bw_resized_inverted)\n', (1630, 1657), True, 'import numpy as np\n'), ((535, 552), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (545, 552), True, 'import numpy as np\n'), ((783, 803), 'numpy.asarray', 'np.asarray', (['image_bw'], {}), '(image_bw)\n', (793, 803), True, 'import numpy as np\n'), ((975, 1003), 'numpy.asarray', 'np.asarray', (['image_bw_resized'], {}), '(image_bw_resized)\n', (985, 1003), True, 'import numpy as np\n'), ((1213, 1250), 'numpy.asarray', 'np.asarray', (['image_bw_resized_inverted'], {}), '(image_bw_resized_inverted)\n', (1223, 1250), True, 'import numpy as np\n'), ((1693, 1737), 'numpy.asarray', 'np.asarray', (['image_bw_resized_inverted_scaled'], {}), '(image_bw_resized_inverted_scaled)\n', (1703, 1737), True, 'import numpy as np\n'), ((1768, 1812), 'numpy.asarray', 'np.asarray', (['image_bw_resized_inverted_scaled'], {}), '(image_bw_resized_inverted_scaled)\n', (1778, 1812), True, 'import numpy as np\n'), ((2066, 2099), 'numpy.reshape', 'np.reshape', (['test_sample', '(28, 28)'], {}), '(test_sample, (28, 28))\n', (2076, 2099), True, 'import numpy as np\n'), ((2185, 2218), 'numpy.reshape', 'np.reshape', (['test_sample', '(28, 28)'], {}), '(test_sample, (28, 28))\n', (2195, 2218), True, 'import numpy as np\n'), ((252, 265), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (263, 265), False, 'from IPython import get_ipython\n'), ((1992, 2034), 'numpy.array', 'np.array', (['image_bw_resized_inverted_scaled'], {}), '(image_bw_resized_inverted_scaled)\n', (2000, 2034), True, 'import numpy as np\n'), ((603, 620), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (613, 620), True, 'import numpy as np\n'), ((854, 874), 'numpy.asarray', 'np.asarray', (['image_bw'], {}), '(image_bw)\n', (864, 874), True, 'import numpy as np\n'), ((1054, 1082), 'numpy.asarray', 'np.asarray', (['image_bw_resized'], {}), '(image_bw_resized)\n', (1064, 1082), True, 'import numpy as np\n'), ((1301, 1338), 'numpy.asarray', 'np.asarray', (['image_bw_resized_inverted'], {}), '(image_bw_resized_inverted)\n', (1311, 1338), True, 'import numpy as np\n'), ((1868, 1912), 'numpy.asarray', 'np.asarray', (['image_bw_resized_inverted_scaled'], {}), '(image_bw_resized_inverted_scaled)\n', (1878, 1912), True, 'import numpy as np\n')] |
"""Tests for module `transform_reorder.py`."""
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from hypermodern_screening.transform_reorder import (
ee_corr_reorder_sample,
ee_uncorr_reorder_sample,
reorder_cov,
reorder_mu,
reverse_ee_corr_reorder_sample,
reverse_ee_uncorr_reorder_sample,
reverse_reorder_cov,
reverse_reorder_mu,
)
@pytest.fixture
def traj() -> np.ndarray:
"""Fix sample for next two tests."""
traj = np.array([[0, 0, 0], [1, 0, 0], [2, 3, 0], [4, 5, 6]])
return traj
def test_ee_uncorr_reorder_sample(traj: np.ndarray) -> None:
"""Test the position juggling for the uncorrelated Elementary Effects.
See Also
--------
`ee_uncorr_reorder_sample` and `reverse_ee_uncorr_reorder_sample`.
"""
assert_array_equal(
ee_uncorr_reorder_sample(traj),
np.array([[0, 0, 0], [0, 0, 1], [0, 2, 3], [4, 5, 6]]),
)
assert_array_equal(
traj, reverse_ee_uncorr_reorder_sample(ee_uncorr_reorder_sample(traj))
)
assert_array_equal(
ee_uncorr_reorder_sample(traj, row_plus_one=False),
np.array([[0, 0, 0], [0, 1, 0], [2, 3, 0], [5, 6, 4]]),
)
assert_array_equal(
traj,
reverse_ee_uncorr_reorder_sample(
ee_uncorr_reorder_sample(traj, row_plus_one=False), row_plus_one=False
),
)
def test_ee_corr_reorder_sample(traj: np.ndarray) -> None:
"""Test the position juggling for the correlated Elementary Effects.
See Also
--------
`ee_corr_reorder_sample` and `reverse_ee_corr_reorder_sample`.
"""
assert_array_equal(
ee_corr_reorder_sample(traj),
np.array([[0, 0, 0], [1, 0, 0], [3, 0, 2], [6, 4, 5]]),
)
assert_array_equal(
traj, reverse_ee_corr_reorder_sample(ee_corr_reorder_sample(traj))
)
@pytest.fixture
def mu() -> np.ndarray:
"""Fix expectation values for next test."""
mu = np.arange(10)
return mu
def test_reorder_mu(mu: np.ndarray) -> None:
"""Unit tests for `reorder_mu` and `reverse_reorder_mu`."""
expected = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
assert_array_equal(expected, reorder_mu(mu))
expected = np.array([9, 0, 1, 2, 3, 4, 5, 6, 7, 8])
assert_array_equal(expected, reverse_reorder_mu(mu))
assert_array_equal(mu, reverse_reorder_mu(reorder_mu(mu)))
@pytest.fixture
def cov() -> np.ndarray:
"""Fix covariance matrix for next test."""
cov = np.array(
[
[10, 2, 3, 4, 5],
[2, 20, 6, 7, 8],
[3, 6, 30, 9, 10],
[4, 7, 9, 40, 11],
[5, 8, 10, 11, 50],
]
)
return cov
def test_reorder_cov(cov: np.ndarray) -> None:
"""Unit tests for `reorder_cov` and `reverse_reorder_mu`."""
expected = np.array(
[
[20, 6, 7, 8, 2],
[6, 30, 9, 10, 3],
[7, 9, 40, 11, 4],
[8, 10, 11, 50, 5],
[2, 3, 4, 5, 10],
]
)
assert_array_equal(expected, reorder_cov(cov))
expected = np.array(
[
[50, 5, 8, 10, 11],
[5, 10, 2, 3, 4],
[8, 2, 20, 6, 7],
[10, 3, 6, 30, 9],
[11, 4, 7, 9, 40],
]
)
assert_array_equal(expected, reverse_reorder_cov(cov))
assert_array_equal(cov, reverse_reorder_cov(reorder_cov(cov)))
| [
"hypermodern_screening.transform_reorder.reorder_mu",
"hypermodern_screening.transform_reorder.ee_corr_reorder_sample",
"hypermodern_screening.transform_reorder.reverse_reorder_mu",
"numpy.array",
"hypermodern_screening.transform_reorder.ee_uncorr_reorder_sample",
"hypermodern_screening.transform_reorder.... | [((493, 547), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [2, 3, 0], [4, 5, 6]]'], {}), '([[0, 0, 0], [1, 0, 0], [2, 3, 0], [4, 5, 6]])\n', (501, 547), True, 'import numpy as np\n'), ((1963, 1976), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1972, 1976), True, 'import numpy as np\n'), ((2117, 2157), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])\n', (2125, 2157), True, 'import numpy as np\n'), ((2223, 2263), 'numpy.array', 'np.array', (['[9, 0, 1, 2, 3, 4, 5, 6, 7, 8]'], {}), '([9, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n', (2231, 2263), True, 'import numpy as np\n'), ((2485, 2594), 'numpy.array', 'np.array', (['[[10, 2, 3, 4, 5], [2, 20, 6, 7, 8], [3, 6, 30, 9, 10], [4, 7, 9, 40, 11],\n [5, 8, 10, 11, 50]]'], {}), '([[10, 2, 3, 4, 5], [2, 20, 6, 7, 8], [3, 6, 30, 9, 10], [4, 7, 9, \n 40, 11], [5, 8, 10, 11, 50]])\n', (2493, 2594), True, 'import numpy as np\n'), ((2819, 2928), 'numpy.array', 'np.array', (['[[20, 6, 7, 8, 2], [6, 30, 9, 10, 3], [7, 9, 40, 11, 4], [8, 10, 11, 50, 5],\n [2, 3, 4, 5, 10]]'], {}), '([[20, 6, 7, 8, 2], [6, 30, 9, 10, 3], [7, 9, 40, 11, 4], [8, 10, \n 11, 50, 5], [2, 3, 4, 5, 10]])\n', (2827, 2928), True, 'import numpy as np\n'), ((3075, 3183), 'numpy.array', 'np.array', (['[[50, 5, 8, 10, 11], [5, 10, 2, 3, 4], [8, 2, 20, 6, 7], [10, 3, 6, 30, 9],\n [11, 4, 7, 9, 40]]'], {}), '([[50, 5, 8, 10, 11], [5, 10, 2, 3, 4], [8, 2, 20, 6, 7], [10, 3, 6,\n 30, 9], [11, 4, 7, 9, 40]])\n', (3083, 3183), True, 'import numpy as np\n'), ((841, 871), 'hypermodern_screening.transform_reorder.ee_uncorr_reorder_sample', 'ee_uncorr_reorder_sample', (['traj'], {}), '(traj)\n', (865, 871), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((881, 935), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 2, 3], [4, 5, 6]]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 2, 3], [4, 5, 6]])\n', (889, 935), True, 'import numpy as np\n'), ((1086, 1136), 'hypermodern_screening.transform_reorder.ee_uncorr_reorder_sample', 'ee_uncorr_reorder_sample', (['traj'], {'row_plus_one': '(False)'}), '(traj, row_plus_one=False)\n', (1110, 1136), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((1146, 1200), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 0], [2, 3, 0], [5, 6, 4]]'], {}), '([[0, 0, 0], [0, 1, 0], [2, 3, 0], [5, 6, 4]])\n', (1154, 1200), True, 'import numpy as np\n'), ((1658, 1686), 'hypermodern_screening.transform_reorder.ee_corr_reorder_sample', 'ee_corr_reorder_sample', (['traj'], {}), '(traj)\n', (1680, 1686), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((1696, 1750), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [3, 0, 2], [6, 4, 5]]'], {}), '([[0, 0, 0], [1, 0, 0], [3, 0, 2], [6, 4, 5]])\n', (1704, 1750), True, 'import numpy as np\n'), ((2191, 2205), 'hypermodern_screening.transform_reorder.reorder_mu', 'reorder_mu', (['mu'], {}), '(mu)\n', (2201, 2205), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((2297, 2319), 'hypermodern_screening.transform_reorder.reverse_reorder_mu', 'reverse_reorder_mu', (['mu'], {}), '(mu)\n', (2315, 2319), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((3042, 3058), 'hypermodern_screening.transform_reorder.reorder_cov', 'reorder_cov', (['cov'], {}), '(cov)\n', (3053, 3058), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((3298, 3322), 'hypermodern_screening.transform_reorder.reverse_reorder_cov', 'reverse_reorder_cov', (['cov'], {}), '(cov)\n', (3317, 3322), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((1015, 1045), 'hypermodern_screening.transform_reorder.ee_uncorr_reorder_sample', 'ee_uncorr_reorder_sample', (['traj'], {}), '(traj)\n', (1039, 1045), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((1301, 1351), 'hypermodern_screening.transform_reorder.ee_uncorr_reorder_sample', 'ee_uncorr_reorder_sample', (['traj'], {'row_plus_one': '(False)'}), '(traj, row_plus_one=False)\n', (1325, 1351), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((1828, 1856), 'hypermodern_screening.transform_reorder.ee_corr_reorder_sample', 'ee_corr_reorder_sample', (['traj'], {}), '(traj)\n', (1850, 1856), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((2368, 2382), 'hypermodern_screening.transform_reorder.reorder_mu', 'reorder_mu', (['mu'], {}), '(mu)\n', (2378, 2382), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n'), ((3373, 3389), 'hypermodern_screening.transform_reorder.reorder_cov', 'reorder_cov', (['cov'], {}), '(cov)\n', (3384, 3389), False, 'from hypermodern_screening.transform_reorder import ee_corr_reorder_sample, ee_uncorr_reorder_sample, reorder_cov, reorder_mu, reverse_ee_corr_reorder_sample, reverse_ee_uncorr_reorder_sample, reverse_reorder_cov, reverse_reorder_mu\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Kumagai group.
import numpy as np
from pydefect.analyzer.defect_charge_distribution import RadialDist
from pymatgen import Structure, Lattice, Spin
from pymatgen.io.vasp import Chgcar
def test():
structure = Structure(Lattice.cubic(3), ["H"], [[0, 0, 0]])
data = {"total": np.array([[[3]*3]*3]*3), "diff": np.array([[[-1]*3]*3]*3)}
chgcar = Chgcar(structure, data)
rad = RadialDist(chgcar, [0, 0, 0])
assert len(rad.distances_data) == 1 + 6 + 12
np.testing.assert_almost_equal(rad.distances_data[0][0], np.array([-1/3, -1./3, 0.0]))
assert rad.distances_data[0][1] == np.sqrt(2)
assert rad.distances_data[0][2] == 24
np.testing.assert_almost_equal(rad.distances_data[0][3], np.array([-1, -1, 0.0]))
hist_data, half_point, summed = rad.histogram(Spin.up)
np.testing.assert_almost_equal(hist_data[0], [5.00000000e-02, 1.16355283e-03])
| [
"pydefect.analyzer.defect_charge_distribution.RadialDist",
"numpy.sqrt",
"numpy.testing.assert_almost_equal",
"numpy.array",
"pymatgen.Lattice.cubic",
"pymatgen.io.vasp.Chgcar"
] | [((402, 425), 'pymatgen.io.vasp.Chgcar', 'Chgcar', (['structure', 'data'], {}), '(structure, data)\n', (408, 425), False, 'from pymatgen.io.vasp import Chgcar\n'), ((437, 466), 'pydefect.analyzer.defect_charge_distribution.RadialDist', 'RadialDist', (['chgcar', '[0, 0, 0]'], {}), '(chgcar, [0, 0, 0])\n', (447, 466), False, 'from pydefect.analyzer.defect_charge_distribution import RadialDist\n'), ((849, 916), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['hist_data[0]', '[0.05, 0.00116355283]'], {}), '(hist_data[0], [0.05, 0.00116355283])\n', (879, 916), True, 'import numpy as np\n'), ((271, 287), 'pymatgen.Lattice.cubic', 'Lattice.cubic', (['(3)'], {}), '(3)\n', (284, 287), False, 'from pymatgen import Structure, Lattice, Spin\n'), ((330, 359), 'numpy.array', 'np.array', (['([[[3] * 3] * 3] * 3)'], {}), '([[[3] * 3] * 3] * 3)\n', (338, 359), True, 'import numpy as np\n'), ((363, 393), 'numpy.array', 'np.array', (['([[[-1] * 3] * 3] * 3)'], {}), '([[[-1] * 3] * 3] * 3)\n', (371, 393), True, 'import numpy as np\n'), ((577, 610), 'numpy.array', 'np.array', (['[-1 / 3, -1.0 / 3, 0.0]'], {}), '([-1 / 3, -1.0 / 3, 0.0])\n', (585, 610), True, 'import numpy as np\n'), ((646, 656), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (653, 656), True, 'import numpy as np\n'), ((760, 783), 'numpy.array', 'np.array', (['[-1, -1, 0.0]'], {}), '([-1, -1, 0.0])\n', (768, 783), True, 'import numpy as np\n')] |
# libraries imported
import numpy as np
from scipy import signal
from datetime import datetime
# functions that apply to both simulated and real tremor
import functions.feature_handler as fh
import functions.data_handler as dh
import functions.evaluator as eva
import functions.optimiser as op
import functions.plotter as plt
np.set_printoptions(threshold=50) # shortens long arrays in the console window
def main(FILE_NAME, MODEL_TYPE):
training_testing_ratio = 0.6
TIME_PERIOD = 1 / 250
start_time = datetime.now()
# reads data into memory and filters it
data = dh.read_data(FILE_NAME, 200, 5000) # real tremor data (t, x, y, z, grip force)
filtered_data = dh.filter_data(data, TIME_PERIOD) # filters data to get an estimate of intended movement (label)
# 80% of data is used for training
training_data = data[:, :int(training_testing_ratio * len(data[0]))] # first 80% of data for training
filtered_training_data = np.array(filtered_data)[:, :int(training_testing_ratio * len(filtered_data[0]))] # training labels
time = np.array(data[0], dtype='f') * TIME_PERIOD # samples are measured at a rate of 250Hz
end_time = datetime.now()
# time taken to read and split the data
data_reading_time = (end_time - start_time).total_seconds()
start_time = datetime.now()
# training data is assigned
training_motion = [training_data[1], training_data[2], training_data[3]] # [X, Y, Z]
training_label = [filtered_training_data[1], filtered_training_data[2], filtered_training_data[3]] # [X, Y, Z]
# training data and labels are normalised
for i in range(len(training_motion)):
training_motion[i] = fh.normalise(training_motion[i])
training_label[i] = fh.normalise(training_label[i])
end_time = datetime.now()
# time taken to select and normalise useful training data
selecting_training_time = (end_time - start_time).total_seconds()
start_time = datetime.now()
# calculates the features in a separate function
[training_features, horizon] = fh.gen_all_features(training_motion, training_label)
# prints the features and the average horizon
print("\nTraining features (x, y, z):\n" + str(np.array(training_features)))
print("Horizon values [x, y, z]:", horizon)
end_time = datetime.now()
# time taken to create training features
training_features_time = (end_time - start_time).total_seconds()
# SVM with rbf kernel
regression = []
hyperparameters = []
tuned_training_time = [] # time taken to tune and train model (for each axis)
print("Tuning...")
for i in range(len(training_features)):
start_time = datetime.now()
# reformats the features for fitting the model (numpy array)
axis_features = np.vstack(training_features[i]).T
# tunes and trains the regression model
[temp_reg, temp_params] = op.tune_model(axis_features, training_label[i], MODEL_TYPE)
regression.append(temp_reg)
hyperparameters.append(temp_params)
end_time = datetime.now()
tuned_training_time.append((end_time - start_time).total_seconds())
print("Done!")
print("\nHyperparameters (x, y, z):\n" + str(hyperparameters))
start_time = datetime.now()
# 20% of the data is separated and used for testing
test_data = data[:, int(training_testing_ratio * len(data[0])):] # last 20% of data for testing
filtered_test_data = np.array(filtered_data)[:, int(training_testing_ratio * len(filtered_data[0])):] # testing labels
# test data is assigned
test_motion = [test_data[1], test_data[2], test_data[3]] # [X, Y, Z]
test_label = [filtered_test_data[1], filtered_test_data[2], filtered_test_data[3]] # [X, Y, Z]
norm_motion = []
norm_label = []
# test data is normalised but the original values are kept
for i in range(len(test_motion)):
norm_motion.append(fh.normalise(test_motion[i]))
norm_label.append(fh.normalise(test_label[i]))
end_time = datetime.now()
# time taken to select and normalise useful test data
selecting_test_time = (end_time - start_time).total_seconds()
start_time = datetime.now()
# calculates the features in a separate function
test_features = fh.gen_all_features(norm_motion, norm_label, horizon)
print("\nTest features (x):\n", np.array(test_features[0]))
print("Test features (y):\n", np.array(test_features[1]))
print("Test features (z):\n", np.array(test_features[2]))
end_time = datetime.now()
# time taken to create test data features
test_features_time = (end_time - start_time).total_seconds()
# predicts intended motion using the original data as an input (scaled to intended motion)
prediction = []
predicting_time = [] # time taken to predict voluntary motion (for each axis)
for i in range(len(test_features)):
start_time = datetime.now()
axis_features = np.vstack(test_features[i]).T # reformats the features for fitting the model (numpy array)
# rescales the output to match the actual value
prediction.append(fh.match_scale(test_label[i], regression[i].predict(axis_features)))
end_time = datetime.now()
predicting_time.append((end_time - start_time).total_seconds())
print("\nPredicted output (x):\n", np.array(prediction[0]), "\nActual output (x):\n", np.array(test_label[0]))
print("\nPredicted output (y):\n", np.array(prediction[1]), "\nActual output (y):\n", np.array(test_label[1]))
print("\nPredicted output (z):\n", np.array(prediction[2]), "\nActual output (z):\n", np.array(test_label[2]))
# calculates and prints the R2 score and normalised RMSE of the model
accuracy = []
for i in range(len(test_label)):
accuracy.append(eva.calc_accuracy(test_label[i], prediction[i]))
print("\nX Accuracy [R2, RMSE]: " + "[" + str(accuracy[0][0]) + "%" + ", " + str(accuracy[0][1]) + "mm]")
print("Y Accuracy [R2, RMSE]: " + "[" + str(accuracy[1][0]) + "%" + ", " + str(accuracy[1][1]) + "mm]")
print("Z Accuracy [R2, RMSE]: " + "[" + str(accuracy[2][0]) + "%" + ", " + str(accuracy[2][1]) + "mm]")
# gets the tremor component by subtracting from the voluntary motion
actual_tremor = []
predicted_tremor = []
tremor_accuracy = []
for i in range(len(test_motion)):
actual_tremor.append(np.subtract(test_motion[i], test_label[i]))
predicted_tremor.append(np.subtract(test_motion[i], prediction[i]))
# calculates the normalised RMSE of the tremor component
tremor_accuracy.append(eva.calc_accuracy(actual_tremor[i], predicted_tremor[i]))
# converts and prints a the NRMSE in a percentage form
print("X Tremor accuracy [R2, RMSE]: " +
"[" + str(tremor_accuracy[0][0]) + "%" + ", " + str(tremor_accuracy[0][1]) + "mm]")
print("Y Tremor accuracy [R2, RMSE]: " +
"[" + str(tremor_accuracy[1][0]) + "%" + ", " + str(tremor_accuracy[1][1]) + "mm]")
print("Z Tremor accuracy [R2, RMSE]: " +
"[" + str(tremor_accuracy[2][0]) + "%" + ", " + str(tremor_accuracy[2][1]) + "mm]")
# shortens data list length to show more detail in graphs
for i in range(len(test_motion)):
test_motion[i] = test_motion[i][round(0.8 * len(test_motion[i])):]
test_label[i] = test_label[i][round(0.8 * len(test_label[i])):]
actual_tremor[i] = actual_tremor[i][round(0.8 * len(actual_tremor[i])):]
predicted_tremor[i] = predicted_tremor[i][round(0.8 * len(predicted_tremor[i])):]
prediction[i] = prediction[i][round(0.8 * len(prediction[i])):]
tremor_error = np.subtract(actual_tremor, predicted_tremor)
# puts regression model data in a list
model_data = [
[test_motion[0], test_label[0], prediction[0], "X motion (mm)"],
[test_motion[1], test_label[1], prediction[1], "Y motion (mm)"],
[test_motion[2], test_label[2], prediction[2], "Z motion (mm)"]
]
model_axes_labels = ["Original signal", "Filtered output", "Predicted output"]
model_data_title = "Graph showing voluntary motion of model"
# puts the tremor component data in a list
tremor_data = [
[actual_tremor[0], predicted_tremor[0], tremor_error[0], "X motion (mm)"],
[actual_tremor[1], predicted_tremor[1], tremor_error[1], "Y motion (mm)"],
[actual_tremor[2], predicted_tremor[2], tremor_error[2], "Z motion (mm)"]
]
tremor_axes_labels = ["Actual tremor", "Predicted tremor", "Tremor error"]
tremor_data_title = "Graph showing tremor component of model"
# puts all features in a list for passing to the plot function (feature | legend)
features_data = [
[
[test_features[0][0], "Motion (x)"],
[test_features[0][1], "Velocity (x)"],
[test_features[0][2], "Acceleration (x)"],
[test_features[0][3], "Past motion (x)"],
[test_features[0][4], "Average motion (x)"]
],
[
[test_features[1][0], "Motion (y)"],
[test_features[1][1], "Velocity (y)"],
[test_features[1][2], "Acceleration (y)"],
[test_features[1][3], "Past motion (y)"],
[test_features[1][4], "Average motion (y)"]
],
[
[test_features[2][0], "Motion (z)"],
[test_features[2][1], "Velocity (z)"],
[test_features[2][2], "Acceleration (z)"],
[test_features[2][3], "Past motion (z)"],
[test_features[2][4], "Average motion (z)"]
]
]
# time is shortened to match the length of the test data
time = time[int(training_testing_ratio * len(data[0])):]
plt.plot_model(time[round(0.8 * len(time)):], model_data, model_axes_labels, model_data_title) # plots SVR model
plt.plot_model(time[round(0.8 * len(time)):], tremor_data, tremor_axes_labels, tremor_data_title) # plots the tremor components
# plots the features
for axis in features_data:
plt.plot_data(time, axis, "Time (s)", "N-motion")
# prints performance of the program
print(
"\nPerformance:\n==================================",
"\nTime taken to read data:", str(data_reading_time) + "s",
"\nTime taken to select and normalise data for creating training features:", str(selecting_training_time) + "s",
"\nTime taken to generate features for training:", str(training_features_time) + "s",
"\nTime taken to tune and train regression model:",
"\n\tX axis =", str(tuned_training_time[0]) + "s",
"\n\tY axis =", str(tuned_training_time[1]) + "s",
"\n\tZ axis =", str(tuned_training_time[2]) + "s",
"\nTime taken to select and normalise data for creating training features:", str(selecting_test_time) + "s",
"\nTime taken to generate features for testing/predicting:", str(test_features_time) + "s",
"\nTime taken to predict voluntary motion:",
"\n\tX axis =", str(predicting_time[0]) + "s",
"\n\tY axis =", str(predicting_time[1]) + "s",
"\n\tZ axis =", str(predicting_time[2]) + "s",
"\nTotal time taken:",
str(
data_reading_time + selecting_training_time + training_features_time + tuned_training_time[0] +
tuned_training_time[1] + tuned_training_time[2] + selecting_test_time + test_features_time +
predicting_time[0] + predicting_time[1] + predicting_time[2]
) + "s"
)
if __name__ == '__main__':
file_name = "data/real_tremor_data.csv"
model_type = "SVM"
# model_type = "Random Forest"
main(file_name, model_type)
| [
"functions.feature_handler.gen_all_features",
"functions.plotter.plot_data",
"numpy.subtract",
"functions.feature_handler.normalise",
"functions.evaluator.calc_accuracy",
"datetime.datetime.now",
"numpy.array",
"functions.data_handler.read_data",
"numpy.vstack",
"functions.data_handler.filter_data... | [((329, 362), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(50)'}), '(threshold=50)\n', (348, 362), True, 'import numpy as np\n'), ((521, 535), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (533, 535), False, 'from datetime import datetime\n'), ((591, 625), 'functions.data_handler.read_data', 'dh.read_data', (['FILE_NAME', '(200)', '(5000)'], {}), '(FILE_NAME, 200, 5000)\n', (603, 625), True, 'import functions.data_handler as dh\n'), ((691, 724), 'functions.data_handler.filter_data', 'dh.filter_data', (['data', 'TIME_PERIOD'], {}), '(data, TIME_PERIOD)\n', (705, 724), True, 'import functions.data_handler as dh\n'), ((1176, 1190), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1188, 1190), False, 'from datetime import datetime\n'), ((1317, 1331), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1329, 1331), False, 'from datetime import datetime\n'), ((1797, 1811), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1809, 1811), False, 'from datetime import datetime\n'), ((1962, 1976), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1974, 1976), False, 'from datetime import datetime\n'), ((2066, 2118), 'functions.feature_handler.gen_all_features', 'fh.gen_all_features', (['training_motion', 'training_label'], {}), '(training_motion, training_label)\n', (2085, 2118), True, 'import functions.feature_handler as fh\n'), ((2314, 2328), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2326, 2328), False, 'from datetime import datetime\n'), ((3266, 3280), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3278, 3280), False, 'from datetime import datetime\n'), ((4035, 4049), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4047, 4049), False, 'from datetime import datetime\n'), ((4192, 4206), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4204, 4206), False, 'from datetime import datetime\n'), ((4281, 4334), 'functions.feature_handler.gen_all_features', 'fh.gen_all_features', (['norm_motion', 'norm_label', 'horizon'], {}), '(norm_motion, norm_label, horizon)\n', (4300, 4334), True, 'import functions.feature_handler as fh\n'), ((4539, 4553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4551, 4553), False, 'from datetime import datetime\n'), ((7664, 7708), 'numpy.subtract', 'np.subtract', (['actual_tremor', 'predicted_tremor'], {}), '(actual_tremor, predicted_tremor)\n', (7675, 7708), True, 'import numpy as np\n'), ((964, 987), 'numpy.array', 'np.array', (['filtered_data'], {}), '(filtered_data)\n', (972, 987), True, 'import numpy as np\n'), ((1075, 1103), 'numpy.array', 'np.array', (['data[0]'], {'dtype': '"""f"""'}), "(data[0], dtype='f')\n", (1083, 1103), True, 'import numpy as np\n'), ((1688, 1720), 'functions.feature_handler.normalise', 'fh.normalise', (['training_motion[i]'], {}), '(training_motion[i])\n', (1700, 1720), True, 'import functions.feature_handler as fh\n'), ((1749, 1780), 'functions.feature_handler.normalise', 'fh.normalise', (['training_label[i]'], {}), '(training_label[i])\n', (1761, 1780), True, 'import functions.feature_handler as fh\n'), ((2686, 2700), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2698, 2700), False, 'from datetime import datetime\n'), ((2911, 2970), 'functions.optimiser.tune_model', 'op.tune_model', (['axis_features', 'training_label[i]', 'MODEL_TYPE'], {}), '(axis_features, training_label[i], MODEL_TYPE)\n', (2924, 2970), True, 'import functions.optimiser as op\n'), ((3071, 3085), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3083, 3085), False, 'from datetime import datetime\n'), ((3464, 3487), 'numpy.array', 'np.array', (['filtered_data'], {}), '(filtered_data)\n', (3472, 3487), True, 'import numpy as np\n'), ((4371, 4397), 'numpy.array', 'np.array', (['test_features[0]'], {}), '(test_features[0])\n', (4379, 4397), True, 'import numpy as np\n'), ((4433, 4459), 'numpy.array', 'np.array', (['test_features[1]'], {}), '(test_features[1])\n', (4441, 4459), True, 'import numpy as np\n'), ((4495, 4521), 'numpy.array', 'np.array', (['test_features[2]'], {}), '(test_features[2])\n', (4503, 4521), True, 'import numpy as np\n'), ((4925, 4939), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4937, 4939), False, 'from datetime import datetime\n'), ((5228, 5242), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5240, 5242), False, 'from datetime import datetime\n'), ((5354, 5377), 'numpy.array', 'np.array', (['prediction[0]'], {}), '(prediction[0])\n', (5362, 5377), True, 'import numpy as np\n'), ((5405, 5428), 'numpy.array', 'np.array', (['test_label[0]'], {}), '(test_label[0])\n', (5413, 5428), True, 'import numpy as np\n'), ((5469, 5492), 'numpy.array', 'np.array', (['prediction[1]'], {}), '(prediction[1])\n', (5477, 5492), True, 'import numpy as np\n'), ((5520, 5543), 'numpy.array', 'np.array', (['test_label[1]'], {}), '(test_label[1])\n', (5528, 5543), True, 'import numpy as np\n'), ((5584, 5607), 'numpy.array', 'np.array', (['prediction[2]'], {}), '(prediction[2])\n', (5592, 5607), True, 'import numpy as np\n'), ((5635, 5658), 'numpy.array', 'np.array', (['test_label[2]'], {}), '(test_label[2])\n', (5643, 5658), True, 'import numpy as np\n'), ((10019, 10068), 'functions.plotter.plot_data', 'plt.plot_data', (['time', 'axis', '"""Time (s)"""', '"""N-motion"""'], {}), "(time, axis, 'Time (s)', 'N-motion')\n", (10032, 10068), True, 'import functions.plotter as plt\n'), ((2795, 2826), 'numpy.vstack', 'np.vstack', (['training_features[i]'], {}), '(training_features[i])\n', (2804, 2826), True, 'import numpy as np\n'), ((3934, 3962), 'functions.feature_handler.normalise', 'fh.normalise', (['test_motion[i]'], {}), '(test_motion[i])\n', (3946, 3962), True, 'import functions.feature_handler as fh\n'), ((3990, 4017), 'functions.feature_handler.normalise', 'fh.normalise', (['test_label[i]'], {}), '(test_label[i])\n', (4002, 4017), True, 'import functions.feature_handler as fh\n'), ((4965, 4992), 'numpy.vstack', 'np.vstack', (['test_features[i]'], {}), '(test_features[i])\n', (4974, 4992), True, 'import numpy as np\n'), ((5814, 5861), 'functions.evaluator.calc_accuracy', 'eva.calc_accuracy', (['test_label[i]', 'prediction[i]'], {}), '(test_label[i], prediction[i])\n', (5831, 5861), True, 'import functions.evaluator as eva\n'), ((6404, 6446), 'numpy.subtract', 'np.subtract', (['test_motion[i]', 'test_label[i]'], {}), '(test_motion[i], test_label[i])\n', (6415, 6446), True, 'import numpy as np\n'), ((6480, 6522), 'numpy.subtract', 'np.subtract', (['test_motion[i]', 'prediction[i]'], {}), '(test_motion[i], prediction[i])\n', (6491, 6522), True, 'import numpy as np\n'), ((6620, 6676), 'functions.evaluator.calc_accuracy', 'eva.calc_accuracy', (['actual_tremor[i]', 'predicted_tremor[i]'], {}), '(actual_tremor[i], predicted_tremor[i])\n', (6637, 6676), True, 'import functions.evaluator as eva\n'), ((2220, 2247), 'numpy.array', 'np.array', (['training_features'], {}), '(training_features)\n', (2228, 2247), True, 'import numpy as np\n')] |
import torch
from torch import nn
import numpy as np
from tqdm.notebook import tqdm
class Lis2Img(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Conv1d(64, 32, 4, stride=2, padding=2),
nn.ReLU(),
nn.Conv1d(32,32, 4, stride=2, padding=2),
nn.ReLU(),
)
self.lstm = nn.LSTM(input_size=64, hidden_size=64, num_layers=2, batch_first=True)
self.decoder = nn.Sequential(
nn.ConvTranspose1d(32, 32, 4, stride=2, padding=2),
nn.ReLU(),
nn.ConvTranspose1d(32, 64, 4, stride=2, padding=2)
)
def forward(self, x):
# x has shape [4, 64, 1803]
# # Copy embedding vector into all time points
########x = torch.cat((x, subj), dim=1)
# x now has shape [4, 64+16, 1803]
# Conv1d input shape: [batch size, # channels, signal length]
x = self.encoder(x)
# LSTM input shape: [batch size, signal length, # features] -- when batch_first=True!
x = x.transpose(1, 2)
# x, _ = self.lstm(x)
x = x.transpose(2, 1)
x = self.decoder(x)
return x
device = "cpu"#"cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
def reset_weights(model):
for layer in model.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
def R_value(a, b): # Inputs shape [4, 64, 1803]
dim = -1 # Correlation over time points dimension
a = a - a.mean(axis=dim, keepdim=True)
b = b - b.mean(axis=dim, keepdim=True)
cov = (a * b).mean(dim)
na, nb = [(i**2).mean(dim)**0.5 for i in [a, b]]
norms = na * nb
R_matrix = cov / norms
return R_matrix # [4, 64] - R values per channel for each trial
def train(x_dataloader, y_dataloader, model, loss_fn, optimizer):
assert (len(x_dataloader.dataset) == len(y_dataloader.dataset)), \
"Input, output dataloaders have different lengths! :O"
# epoch_iter_in = tqdm_notebook(range(len(x_dataloader)), leave=False, total=len(x_dataloader), desc='Train'))
size = len(x_dataloader.dataset)
batch_idx=0
for batch, (X, Y) in enumerate(zip(x_dataloader, y_dataloader)):
batch+=1
X, Y = X.to(device), Y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, Y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if batch % 1000 == 0:
# loss, current = loss.item(), batch * len(X)
# print(f"[{current:>4d}/{size:>4d}] loss: {loss}")
# epoch_iter_in.set_postfix(loss=loss / (batch_idx + 1))
def test(x_dataloader, y_dataloader, model, loss_fn, print_result = False):
assert (len(x_dataloader) == len(y_dataloader)), \
"Input, output dataloaders have different lengths! :O"
num_batches = len(x_dataloader)
model.eval()
avg_loss = 0
R_values = []
with torch.no_grad():
for (X, Y) in zip(x_dataloader, y_dataloader):
X, Y = X.to(device), Y.to(device)
pred = model(X)
avg_loss += loss_fn(pred, Y).item()
R = R_value(pred, Y)
R_values.extend(R.flatten().tolist())
avg_loss /= num_batches
avg_R = np.asarray(R_values).mean()
if print_result:
print("Test:")
print(f"\tAvg loss: {avg_loss}")
print(f"\tAvg R value: {avg_R}\n") | [
"torch.nn.ReLU",
"torch.nn.ConvTranspose1d",
"torch.nn.LSTM",
"numpy.asarray",
"torch.no_grad",
"torch.nn.Conv1d"
] | [((387, 457), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(64)', 'hidden_size': '(64)', 'num_layers': '(2)', 'batch_first': '(True)'}), '(input_size=64, hidden_size=64, num_layers=2, batch_first=True)\n', (394, 457), False, 'from torch import nn\n'), ((3169, 3184), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3182, 3184), False, 'import torch\n'), ((213, 254), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(32)', '(4)'], {'stride': '(2)', 'padding': '(2)'}), '(64, 32, 4, stride=2, padding=2)\n', (222, 254), False, 'from torch import nn\n'), ((268, 277), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (275, 277), False, 'from torch import nn\n'), ((291, 332), 'torch.nn.Conv1d', 'nn.Conv1d', (['(32)', '(32)', '(4)'], {'stride': '(2)', 'padding': '(2)'}), '(32, 32, 4, stride=2, padding=2)\n', (300, 332), False, 'from torch import nn\n'), ((345, 354), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (352, 354), False, 'from torch import nn\n'), ((508, 558), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(32)', '(32)', '(4)'], {'stride': '(2)', 'padding': '(2)'}), '(32, 32, 4, stride=2, padding=2)\n', (526, 558), False, 'from torch import nn\n'), ((572, 581), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (579, 581), False, 'from torch import nn\n'), ((595, 645), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(32)', '(64)', '(4)'], {'stride': '(2)', 'padding': '(2)'}), '(32, 64, 4, stride=2, padding=2)\n', (613, 645), False, 'from torch import nn\n'), ((3487, 3507), 'numpy.asarray', 'np.asarray', (['R_values'], {}), '(R_values)\n', (3497, 3507), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
# Import and store dataset
credit_card_data = pd.read_csv('creditcard.csv')
# print(credit_card_data)
# Splitting data into 4 sets
# 1. Shuffle/randomize data
# 2. One-hot encoding
# 3. Normalize
# 4. Splitting up X/Y values
# 5. Convert data_frames to numpy arrays (float32)
# 6. Splitting the final data into X/Y train/test
# 1. Shuffle/randomize data
shuffle_data = credit_card_data.sample(frac=1)
# 2. One-hot encoding
one_hot_data = pd.get_dummies(shuffle_data, columns=['Class'])
# 3. Normalize
normalized_data = (one_hot_data - one_hot_data.min()) / (one_hot_data.max() - one_hot_data.min())
# 4. Splitting up X/Y values
df_X = normalized_data.drop(['Class_0', 'Class_1'], axis=1)
df_y = normalized_data[['Class_0', 'Class_1']]
# 5. Convert data_frames to numpy arrays (float32)
ar_X, ar_y = np.asarray(df_X.values, dtype='float32'), np.asarray(df_y.values, dtype='float32')
# 6. Splitting the final data into X/Y train/test
train_size = int(0.8 * len(ar_X))
(raw_X_train, raw_y_train) = (ar_X[:train_size], ar_y[:train_size])
(raw_X_test, raw_y_test) = (ar_X[train_size:], ar_y[train_size:])
count_legit, count_fraud = np.unique(credit_card_data['Class'], return_counts=True)[1]
fraud_ratio = float(count_fraud / (count_legit + count_fraud))
print("Percent of fraudulent transactions: ", fraud_ratio)
weighting = 1 / fraud_ratio
raw_y_train[:, 1] = raw_y_train[:, 1] * weighting
import tensorflow as tf
input_dimensions = ar_X.shape[1]
output_dimensions = ar_y.shape[1]
num_layer_1_cells = 100
num_layer_2_cells = 150
X_train_node = tf.placeholder(tf.float32, [None, input_dimensions], name='X_train')
y_train_node = tf.placeholder(tf.float32, [None, output_dimensions], name='y_train')
X_test_node = tf.constant(raw_X_test, name='X_test')
y_test_node = tf.constant(raw_y_test, name='y_test')
weight_1_node = tf.Variable(tf.zeros([input_dimensions, num_layer_1_cells]), name='weight_1')
biases_1_node = tf.Variable(tf.zeros([num_layer_1_cells]), name='biases_1')
weight_2_node = tf.Variable(tf.zeros([num_layer_1_cells, num_layer_2_cells]), name='weight_2')
biases_2_node = tf.Variable(tf.zeros([num_layer_2_cells]), name='biases_2')
weight_3_node = tf.Variable(tf.zeros([num_layer_2_cells, output_dimensions]), name='weight_3')
biases_3_node = tf.Variable(tf.zeros([output_dimensions]), name='biases_3')
def network(input_tensor):
layer1 = tf.nn.sigmoid(tf.matmul(input_tensor, weight_1_node) + biases_1_node)
layer2 = tf.nn.dropout(tf.nn.sigmoid(tf.matmul(layer1, weight_2_node) + biases_2_node), 0.85)
layer3 = tf.nn.softmax(tf.matmul(layer2, weight_3_node) + biases_3_node)
return layer3
y_train_prediction = network(X_train_node)
y_test_prediction = network(X_test_node)
cross_entropy = tf.losses.softmax_cross_entropy(y_train_node, y_train_prediction)
optimizer = tf.train.AdadeltaOptimizer(0.005).minimize(cross_entropy)
def calculate_accuracy(actual, predicted):
actual = np.argmax(actual, 1)
predicted = np.argmax(predicted, 1)
return 100 * np.sum(np.equal(predicted, actual)) / predicted.shape[0]
num_epochs = 100
import time
with tf.Session() as session:
tf.global_variables_initializer().run()
for epoch in range(num_epochs):
start_time = time.time()
_, cross_entropy_score = session.run([optimizer, cross_entropy],
feed_dict={X_train_node: raw_X_train, y_train_node: raw_y_train})
if epoch % 10 == 0:
timer = time.time() - start_time
print('Epoch: {}'.format(epoch), 'Current loss: {0:.4f}'.format(cross_entropy_score),
'Elapsed time: {0:.2f}seconds'.format(timer))
final_y_test = y_test_node.eval()
final_y_test_prediction = y_test_prediction.eval()
final_accuracy = calculate_accuracy(final_y_test, final_y_test_prediction)
print("Final accuracy: {0: .2f}%".format(final_accuracy))
final_fraud_y_test = final_y_test[final_y_test[:, 1] == 1]
final_fraud_y_test_prediction = final_y_test_prediction[final_y_test[:, 1] == 1]
final_fraud_accuracy = calculate_accuracy(final_fraud_y_test, final_fraud_y_test_prediction)
print("Final fraud specific accuracy: {0: .2f}".format(final_fraud_accuracy))
| [
"numpy.unique",
"pandas.read_csv",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"numpy.argmax",
"numpy.equal",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.constant",
"tensorflow.matm... | [((86, 115), 'pandas.read_csv', 'pd.read_csv', (['"""creditcard.csv"""'], {}), "('creditcard.csv')\n", (97, 115), True, 'import pandas as pd\n'), ((480, 527), 'pandas.get_dummies', 'pd.get_dummies', (['shuffle_data'], {'columns': "['Class']"}), "(shuffle_data, columns=['Class'])\n", (494, 527), True, 'import pandas as pd\n'), ((1589, 1657), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_dimensions]'], {'name': '"""X_train"""'}), "(tf.float32, [None, input_dimensions], name='X_train')\n", (1603, 1657), True, 'import tensorflow as tf\n'), ((1673, 1742), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, output_dimensions]'], {'name': '"""y_train"""'}), "(tf.float32, [None, output_dimensions], name='y_train')\n", (1687, 1742), True, 'import tensorflow as tf\n'), ((1758, 1796), 'tensorflow.constant', 'tf.constant', (['raw_X_test'], {'name': '"""X_test"""'}), "(raw_X_test, name='X_test')\n", (1769, 1796), True, 'import tensorflow as tf\n'), ((1811, 1849), 'tensorflow.constant', 'tf.constant', (['raw_y_test'], {'name': '"""y_test"""'}), "(raw_y_test, name='y_test')\n", (1822, 1849), True, 'import tensorflow as tf\n'), ((2773, 2838), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['y_train_node', 'y_train_prediction'], {}), '(y_train_node, y_train_prediction)\n', (2804, 2838), True, 'import tensorflow as tf\n'), ((841, 881), 'numpy.asarray', 'np.asarray', (['df_X.values'], {'dtype': '"""float32"""'}), "(df_X.values, dtype='float32')\n", (851, 881), True, 'import numpy as np\n'), ((883, 923), 'numpy.asarray', 'np.asarray', (['df_y.values'], {'dtype': '"""float32"""'}), "(df_y.values, dtype='float32')\n", (893, 923), True, 'import numpy as np\n'), ((1170, 1226), 'numpy.unique', 'np.unique', (["credit_card_data['Class']"], {'return_counts': '(True)'}), "(credit_card_data['Class'], return_counts=True)\n", (1179, 1226), True, 'import numpy as np\n'), ((1879, 1926), 'tensorflow.zeros', 'tf.zeros', (['[input_dimensions, num_layer_1_cells]'], {}), '([input_dimensions, num_layer_1_cells])\n', (1887, 1926), True, 'import tensorflow as tf\n'), ((1973, 2002), 'tensorflow.zeros', 'tf.zeros', (['[num_layer_1_cells]'], {}), '([num_layer_1_cells])\n', (1981, 2002), True, 'import tensorflow as tf\n'), ((2050, 2098), 'tensorflow.zeros', 'tf.zeros', (['[num_layer_1_cells, num_layer_2_cells]'], {}), '([num_layer_1_cells, num_layer_2_cells])\n', (2058, 2098), True, 'import tensorflow as tf\n'), ((2145, 2174), 'tensorflow.zeros', 'tf.zeros', (['[num_layer_2_cells]'], {}), '([num_layer_2_cells])\n', (2153, 2174), True, 'import tensorflow as tf\n'), ((2222, 2270), 'tensorflow.zeros', 'tf.zeros', (['[num_layer_2_cells, output_dimensions]'], {}), '([num_layer_2_cells, output_dimensions])\n', (2230, 2270), True, 'import tensorflow as tf\n'), ((2317, 2346), 'tensorflow.zeros', 'tf.zeros', (['[output_dimensions]'], {}), '([output_dimensions])\n', (2325, 2346), True, 'import tensorflow as tf\n'), ((2968, 2988), 'numpy.argmax', 'np.argmax', (['actual', '(1)'], {}), '(actual, 1)\n', (2977, 2988), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.argmax', 'np.argmax', (['predicted', '(1)'], {}), '(predicted, 1)\n', (3014, 3028), True, 'import numpy as np\n'), ((3143, 3155), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3153, 3155), True, 'import tensorflow as tf\n'), ((2852, 2885), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', (['(0.005)'], {}), '(0.005)\n', (2878, 2885), True, 'import tensorflow as tf\n'), ((3270, 3281), 'time.time', 'time.time', ([], {}), '()\n', (3279, 3281), False, 'import time\n'), ((2421, 2459), 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'weight_1_node'], {}), '(input_tensor, weight_1_node)\n', (2430, 2459), True, 'import tensorflow as tf\n'), ((2602, 2634), 'tensorflow.matmul', 'tf.matmul', (['layer2', 'weight_3_node'], {}), '(layer2, weight_3_node)\n', (2611, 2634), True, 'import tensorflow as tf\n'), ((3172, 3205), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3203, 3205), True, 'import tensorflow as tf\n'), ((2518, 2550), 'tensorflow.matmul', 'tf.matmul', (['layer1', 'weight_2_node'], {}), '(layer1, weight_2_node)\n', (2527, 2550), True, 'import tensorflow as tf\n'), ((3053, 3080), 'numpy.equal', 'np.equal', (['predicted', 'actual'], {}), '(predicted, actual)\n', (3061, 3080), True, 'import numpy as np\n'), ((3515, 3526), 'time.time', 'time.time', ([], {}), '()\n', (3524, 3526), False, 'import time\n')] |
import random
import math
from typing import List
import numpy as np
import torch
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import manifold, decomposition
from scipy.stats import pearsonr
from model import LSTMModel
from joint_model import JointModel
from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids
from training import get_data, get_labels
from data_processing import load_type_mappings, get_problem_qids
from data_loading import Dataset, Collator
from per_question_data_loading import PerQuestionDataset, PerQuestionCollator
from constants import TrainOptions, Mode, Correctness
from utils import device
def reduce_latent_states(latent_states: np.array, sample_rate: float, perplexity: int = 15):
# Map latent states to 2D space
print("Performing Dimension Reduction")
algo = "tsne"
if algo == "pca":
transformer = decomposition.PCA(2)
elif algo == "mds":
transformer = manifold.MDS(2)
elif algo == "tsne":
transformer = manifold.TSNE(2, perplexity=perplexity, learning_rate="auto", n_iter=1000, init="pca", random_state=221)
reduced_states = transformer.fit_transform(latent_states)
# Randomly downsample rendered points to reduce clutter
print("Downsampling")
sample_idxs = random.sample(range(len(reduced_states)), int(sample_rate * len(reduced_states)))
reduced_states = reduced_states[sample_idxs]
return reduced_states, sample_idxs
def render_scatter_plots(data: np.array, reduced_states: np.array, plot_configs: list, labels: List[int] = None):
"""
Register the plots based on their definitions
labels - will render check and x marks for 1 and 0 values repsectively. note that this causes the click callback results to be incorrect.
"""
num_cols = 2 if len(plot_configs) > 1 else 1
num_rows = math.ceil(len(plot_configs) / num_cols)
fig, axes = plt.subplots(num_rows, num_cols)
fig.suptitle("Dimension-Reduced Latent State Vectors")
if labels is not None:
correct_states = reduced_states[labels == 1]
incorrect_states = reduced_states[labels == 0]
for plot_idx, (title, cmap, c_vec, legend_map) in enumerate(plot_configs):
ax = axes if len(plot_configs) == 1 else axes[plot_idx] if num_rows == 1 else axes[plot_idx // num_cols, plot_idx % num_cols]
add_args = {} if isinstance(cmap, str) else {"vmin": 0, "vmax": len(cmap.colors)}
if labels is not None:
scatter = ax.scatter(correct_states[:,0], correct_states[:,1], s=100, c=c_vec[labels == 1], cmap=cmap, picker=True, pickradius=5, marker="$\u2713$", **add_args)
scatter = ax.scatter(incorrect_states[:,0], incorrect_states[:,1], s=100, c=c_vec[labels == 0], cmap=cmap, picker=True, pickradius=5, marker="x", **add_args)
else:
scatter = ax.scatter(reduced_states[:,0], reduced_states[:,1], s=15, c=c_vec, cmap=cmap, picker=True, pickradius=5, **add_args)
artists, legend_labels = scatter.legend_elements()
if legend_map:
legend_labels = [legend_map[label] for label in legend_labels]
ax.legend(artists, legend_labels, loc='lower left', title=title)#, bbox_to_anchor=(1, 1))
# Define click handler - print information associated with clicked point
def onpick(event):
ind = event.ind
print(data[ind[0]]['student_id'])
fig.canvas.mpl_connect('pick_event', onpick)
# Render the plots
plt.show()
def cluster(model_name: str, data_file: str, options: TrainOptions):
type_mappings = load_type_mappings()
# Load data
data = get_data(data_file or "data/train_data.json")
data = [seq for seq in data if max(seq["time_deltas"]) < 2000] # Remove time outliers
labels = get_labels(options.task, True)
if options.per_q_arch:
block_a_qids = {qid for _, qid in get_problem_qids("A", type_mappings)}
data_loader = torch.utils.data.DataLoader(
PerQuestionDataset(data, labels, block_a_qids, False, True, options.concat_visits, options.use_correctness),
collate_fn=PerQuestionCollator(block_a_qids),
batch_size=len(data)
)
num_labels = len(get_problem_qids("B", type_mappings)) if options.task == "q_stats" else 1
model = JointModel(Mode.CLUSTER, type_mappings, options, num_labels=num_labels, num_input_qids=len(block_a_qids)).to(device)
model.load_params(torch.load(f"{model_name}.pt", map_location=device))
else:
data_loader = torch.utils.data.DataLoader(
Dataset(data, type_mappings, labels=labels),
collate_fn=Collator(),
batch_size=len(data)
)
model = LSTMModel(Mode.CLUSTER, load_type_mappings(), options).to(device)
model.load_state_dict(torch.load(f"{model_name}.pt", map_location=device))
# Extract latent state with label and prediction for each sequence in the dataset
print("Extracting latent states")
model.eval()
with torch.no_grad():
for batch in data_loader:
latent_states, predictions = model(batch)
latent_states = latent_states.detach().cpu().numpy()
predictions = predictions.detach().cpu().numpy()
predictions[predictions > 0] = 1
predictions[predictions < 0] = 0
labels = batch["labels"].detach().cpu().numpy()
# Get dimension-reduced latent states
reduced_states, sample_idxs = reduce_latent_states(latent_states, 1, perplexity=30)
data = np.array(data)[sample_idxs]
labels = labels[sample_idxs]
predictions = predictions[sample_idxs]
# Define the plots to be shown
bin_cmap = ListedColormap(["red", "blue"])
bin_label_map = {
"$\\mathdefault{0}$": "Below Average",
"$\\mathdefault{1}$": "Above Average"
}
num_visited_questions = [len([qs for qs in seq["q_stats"].values() if qs["visits"]]) for seq in data]
plots = [
("Label", bin_cmap, labels, bin_label_map), # Good
# ("Prediction", bin_cmap, predictions, None), # Good
# ("Block A Score", "viridis", [seq["block_a_score"] for seq in data], None), # Good
# ("Num Events", "viridis", [math.log10(len(seq["event_types"])) for seq in data], None), # Good
# ("Questions Attempted", "viridis", [sum(qs["correct"] != Correctness.INCOMPLETE.value for qs in seq["q_stats"].values()) for seq in data], None), # Good
# ("Questions Visited", "viridis", num_visited_questions, None), # Very similar to questions attempted
# ("Total Time", "viridis", [min(max(seq["time_deltas"]), 1800) for seq in data], None), # Good
# ("Avg. Visits", "viridis", [sum(qs["visits"] for qs in seq["q_stats"].values()) / len([qs for qs in seq["q_stats"].values() if qs["visits"]]) for seq in data], None), # Not Great
# ("Num Visits", "viridis", [sum(qs["visits"] for qs in seq["q_stats"].values()) for seq in data], None), # Good
# ("Avg. Time Spent", "viridis", [
# min(
# sum(qs["time"] for qs in seq["q_stats"].values()) / num_visited_questions[seq_idx],
# 1800 / num_visited_questions[seq_idx] # Min since some sequences have messed up timestamps and we don't want outliers
# )
# for seq_idx, seq in enumerate(data)
# ], None), # Not Great
# ("Std. Time Spent", "viridis", [np.array([qs["time"] for qs in seq["q_stats"].values()]).std() for seq in data], None), # Not Great
]
# Render the plots
render_scatter_plots(data, reduced_states, plots)
def cluster_irt(model_name: str, data_file: str, options: TrainOptions):
# Get dataset
type_mappings = load_type_mappings()
src_data = get_data(data_file)
problem_qids = {2} # MCSS
# problem_qids = {3} # MCSS
# problem_qids = {4} # MatchMS
# problem_qids = {7} # MultipleFillInBlank
# problem_qids = {13} # CompositeCR - FillInBlank
# problem_qids = {14} # FillInBlank
# problem_qids = {27} # GridMS
# problem_qids = {30} # MCSS
# problem_qids = {36} # ZonesMS
# problem_qids = {37} # CompositeCR - ZonesMS, MultipleFillInBlank
# problem_qids = {qid for _, qid in get_problem_qids("A", type_mappings) + get_problem_qids("B", type_mappings)}
data = get_processed_dataset(src_data, type_mappings, False, problem_qids, options.concat_visits)
data_loader = torch.utils.data.DataLoader(
data,
collate_fn=Collator(),
batch_size=options.batch_size
)
# Get model
model = get_model_for_testing(Mode.CLUSTER, model_name, type_mappings, True, False, None, None, options)
# Extract latent state with label, behavior scalar and prediction for each entry in the dataset
print("Extracting latent states")
ls_batches = []
bv_batches = []
pred_batches = []
label_batches = []
with torch.no_grad():
for batch in data_loader:
latent_states, behavior, predictions = model(batch)
ls_batches.append(latent_states.detach().cpu().numpy())
bv_batches.append(behavior.detach().cpu().numpy())
pred_batches.append(predictions.detach().cpu().numpy())
label_batches.append(batch["labels"].detach().cpu().numpy())
latent_states = np.concatenate(ls_batches, axis=0)
behavior = np.concatenate(bv_batches, axis=0)
predictions = np.concatenate(pred_batches, axis=0)
labels = np.concatenate(label_batches, axis=0)
neg_bv = sorted(behavior[behavior <= 0])
pos_bv = sorted(behavior[behavior > 0])
neg_bv_cutoff = neg_bv[len(neg_bv) // 2] if neg_bv else 0
pos_bv_cutoff = pos_bv[len(pos_bv) // 2] if pos_bv else 0
predictions[predictions < 0] = 0
predictions[predictions > 0] = 1
# Get dimension-reduced latent states
reduced_states, sample_idxs = reduce_latent_states(latent_states, 0.1, perplexity=30)
data = torch.utils.data.Subset(data, sample_idxs)
labels = labels[sample_idxs]
predictions = predictions[sample_idxs]
behavior = behavior[sample_idxs]
# Define the plots to be shown
bin_cmap = ListedColormap(["red", "blue"])
quad_cmap = ListedColormap([(1, 0, 0), (1, .5, .5), (.5, .5, 1), (0, 0, 1)])
quad_label_map = {
"$\\mathdefault{0}$": "< 50% Neg.",
"$\\mathdefault{1}$": "> 50% Neg.",
"$\\mathdefault{2}$": "< 50% Pos.",
"$\\mathdefault{3}$": "> 50% Pos."
}
plots = [
("Label", bin_cmap, labels, None),
# ("Prediction", bin_cmap, predictions, None),
("Behavior Scalar", quad_cmap, np.array([0 if bv < neg_bv_cutoff else 1 if bv < 0 else 2 if bv < pos_bv_cutoff else 3 for bv in behavior]), quad_label_map),
# ("Behavior Scalar", "viridis", behavior, None),
("Visits", "viridis", [entry["num_visits"] for entry in data], None),
("Time Spent", "viridis", [entry["total_time"] for entry in data], None),
("Num Events", "viridis", [len(entry["event_types"]) for entry in data], None),
("Max Time Gap", "viridis", [entry["max_gap"] for entry in data], None)
]
# Render the plots
render_scatter_plots(data, reduced_states, plots)#, labels=labels)
def visualize_irt(model_name: str, data_file: str, use_behavior_model: bool, options: TrainOptions):
# Get dataset
type_mappings = load_type_mappings()
src_data = get_data(data_file)
problem_qids = get_all_problem_qids(type_mappings)
data = get_processed_dataset(src_data, type_mappings, False, problem_qids, options.concat_visits)
data_loader = torch.utils.data.DataLoader(
data,
collate_fn=Collator(),
batch_size=1000
)
# Get model
model = get_model_for_testing(Mode.CLUSTER, model_name, type_mappings, use_behavior_model, False, None, None, options)
# Get abilities and behavior scalars for each student
include_behavior = True
include_behavior = include_behavior and use_behavior_model
if include_behavior:
bv_batches = []
with torch.no_grad():
for batch_idx, batch in enumerate(data_loader):
print(batch_idx)
_, behavior, _ = model(batch)
bv_batches.append(behavior.detach().cpu().numpy())
behavioral_values = np.concatenate(bv_batches, axis=0)
abilities = torch.nn.Softplus()(model.ability).detach().cpu().numpy()
student_to_param = {}
for idx, entry in enumerate(data):
student_to_param.setdefault(entry["student_id"], 0 if include_behavior else abilities[entry["sid"]])
if include_behavior:
student_to_param[entry["student_id"]] += abilities[entry["sid"]] + behavioral_values[idx]
student_params = [param for param in student_to_param.values()]
student_to_score = {seq["student_id"]: seq["block_a_score"] + seq["block_b_score"] for seq in src_data}
scores = [student_to_score[student_id] for student_id in student_to_param.keys()]
# Equivalent calculation for non-behavior model for verification
# student_idxs = [type_mappings["student_ids"][str(seq["student_id"])] for seq in src_data]
# student_params = torch.nn.Softplus()(model.ability).detach().cpu().numpy()[student_idxs]
# scores = [seq["block_a_score"] + seq["block_b_score"] for seq in src_data]
print("Student to score correlation", pearsonr(student_params, scores))
# Plot learned ability vs. student score over both blocks
plt.plot(student_params, scores, "bo")
plt.xlabel("Learned Ability")
plt.ylabel("Total Score")
plt.show()
# Plot question difficulty vs. avg correctness
qid_to_score = {}
qid_to_num = {}
for entry in data:
qid = entry["question_id"]
qid_to_score.setdefault(qid, 0)
qid_to_score[qid] += 1 if entry["correct"] else 0
qid_to_num.setdefault(qid, 0)
qid_to_num[qid] += 1
difficulties = torch.nn.Softplus()(model.difficulty).detach().cpu().numpy()[list(qid_to_score.keys())]
avg_score = [qid_to_score[qid] / qid_to_num[qid] for qid in qid_to_score]
print("Difficulty to score correlation", pearsonr(difficulties, avg_score))
plt.plot(difficulties, avg_score, "ro")
plt.xlabel("Learned Difficulty")
plt.ylabel("Average Score")
plt.show()
def block_scores(data_file: str):
src_data = get_data(data_file)
score_to_num = {}
block_a_score = []
block_b_score = []
for seq in src_data:
bas = seq["block_a_score"]
bbs = seq["block_b_score"]
score_to_num.setdefault((bas, bbs), 0)
score_to_num[(bas, bbs)] += 1
block_a_score.append(bas)
block_b_score.append(bbs)
print("Block A/B score correlation", pearsonr(block_a_score, block_b_score))
block_a_score = [bas for bas, _ in score_to_num.keys()]
block_b_score = [bbs for _, bbs in score_to_num.keys()]
sizes = [num for num in score_to_num.values()]
plt.scatter(block_a_score, block_b_score, s=sizes, c="blue")
plt.xlabel("Block A Score")
plt.ylabel("Block B Score")
plt.show()
| [
"matplotlib.pyplot.ylabel",
"training.get_data",
"data_processing.load_type_mappings",
"numpy.array",
"scipy.stats.pearsonr",
"sklearn.decomposition.PCA",
"data_processing.get_problem_qids",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"data_loading.Collator",
"sklearn.manifold.TSNE",
... | [((1956, 1988), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_cols'], {}), '(num_rows, num_cols)\n', (1968, 1988), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3521, 3523), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3634), 'data_processing.load_type_mappings', 'load_type_mappings', ([], {}), '()\n', (3632, 3634), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((3663, 3708), 'training.get_data', 'get_data', (["(data_file or 'data/train_data.json')"], {}), "(data_file or 'data/train_data.json')\n", (3671, 3708), False, 'from training import get_data, get_labels\n'), ((3812, 3842), 'training.get_labels', 'get_labels', (['options.task', '(True)'], {}), '(options.task, True)\n', (3822, 3842), False, 'from training import get_data, get_labels\n'), ((5725, 5756), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['red', 'blue']"], {}), "(['red', 'blue'])\n", (5739, 5756), False, 'from matplotlib.colors import ListedColormap\n'), ((7741, 7761), 'data_processing.load_type_mappings', 'load_type_mappings', ([], {}), '()\n', (7759, 7761), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((7777, 7796), 'training.get_data', 'get_data', (['data_file'], {}), '(data_file)\n', (7785, 7796), False, 'from training import get_data, get_labels\n'), ((8338, 8433), 'irt.irt_training.get_processed_dataset', 'get_processed_dataset', (['src_data', 'type_mappings', '(False)', 'problem_qids', 'options.concat_visits'], {}), '(src_data, type_mappings, False, problem_qids, options\n .concat_visits)\n', (8359, 8433), False, 'from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids\n'), ((8594, 8694), 'irt.irt_training.get_model_for_testing', 'get_model_for_testing', (['Mode.CLUSTER', 'model_name', 'type_mappings', '(True)', '(False)', 'None', 'None', 'options'], {}), '(Mode.CLUSTER, model_name, type_mappings, True, False,\n None, None, options)\n', (8615, 8694), False, 'from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids\n'), ((9331, 9365), 'numpy.concatenate', 'np.concatenate', (['ls_batches'], {'axis': '(0)'}), '(ls_batches, axis=0)\n', (9345, 9365), True, 'import numpy as np\n'), ((9381, 9415), 'numpy.concatenate', 'np.concatenate', (['bv_batches'], {'axis': '(0)'}), '(bv_batches, axis=0)\n', (9395, 9415), True, 'import numpy as np\n'), ((9434, 9470), 'numpy.concatenate', 'np.concatenate', (['pred_batches'], {'axis': '(0)'}), '(pred_batches, axis=0)\n', (9448, 9470), True, 'import numpy as np\n'), ((9484, 9521), 'numpy.concatenate', 'np.concatenate', (['label_batches'], {'axis': '(0)'}), '(label_batches, axis=0)\n', (9498, 9521), True, 'import numpy as np\n'), ((9953, 9995), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['data', 'sample_idxs'], {}), '(data, sample_idxs)\n', (9976, 9995), False, 'import torch\n'), ((10160, 10191), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['red', 'blue']"], {}), "(['red', 'blue'])\n", (10174, 10191), False, 'from matplotlib.colors import ListedColormap\n'), ((10208, 10276), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['[(1, 0, 0), (1, 0.5, 0.5), (0.5, 0.5, 1), (0, 0, 1)]'], {}), '([(1, 0, 0), (1, 0.5, 0.5), (0.5, 0.5, 1), (0, 0, 1)])\n', (10222, 10276), False, 'from matplotlib.colors import ListedColormap\n'), ((11381, 11401), 'data_processing.load_type_mappings', 'load_type_mappings', ([], {}), '()\n', (11399, 11401), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((11417, 11436), 'training.get_data', 'get_data', (['data_file'], {}), '(data_file)\n', (11425, 11436), False, 'from training import get_data, get_labels\n'), ((11456, 11491), 'irt.irt_training.get_all_problem_qids', 'get_all_problem_qids', (['type_mappings'], {}), '(type_mappings)\n', (11476, 11491), False, 'from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids\n'), ((11503, 11598), 'irt.irt_training.get_processed_dataset', 'get_processed_dataset', (['src_data', 'type_mappings', '(False)', 'problem_qids', 'options.concat_visits'], {}), '(src_data, type_mappings, False, problem_qids, options\n .concat_visits)\n', (11524, 11598), False, 'from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids\n'), ((11745, 11859), 'irt.irt_training.get_model_for_testing', 'get_model_for_testing', (['Mode.CLUSTER', 'model_name', 'type_mappings', 'use_behavior_model', '(False)', 'None', 'None', 'options'], {}), '(Mode.CLUSTER, model_name, type_mappings,\n use_behavior_model, False, None, None, options)\n', (11766, 11859), False, 'from irt.irt_training import get_model_for_testing, get_processed_dataset, get_all_problem_qids\n'), ((13482, 13520), 'matplotlib.pyplot.plot', 'plt.plot', (['student_params', 'scores', '"""bo"""'], {}), "(student_params, scores, 'bo')\n", (13490, 13520), True, 'import matplotlib.pyplot as plt\n'), ((13525, 13554), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Learned Ability"""'], {}), "('Learned Ability')\n", (13535, 13554), True, 'import matplotlib.pyplot as plt\n'), ((13559, 13584), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Score"""'], {}), "('Total Score')\n", (13569, 13584), True, 'import matplotlib.pyplot as plt\n'), ((13589, 13599), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13597, 13599), True, 'import matplotlib.pyplot as plt\n'), ((14186, 14225), 'matplotlib.pyplot.plot', 'plt.plot', (['difficulties', 'avg_score', '"""ro"""'], {}), "(difficulties, avg_score, 'ro')\n", (14194, 14225), True, 'import matplotlib.pyplot as plt\n'), ((14230, 14262), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Learned Difficulty"""'], {}), "('Learned Difficulty')\n", (14240, 14262), True, 'import matplotlib.pyplot as plt\n'), ((14267, 14294), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Score"""'], {}), "('Average Score')\n", (14277, 14294), True, 'import matplotlib.pyplot as plt\n'), ((14299, 14309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14307, 14309), True, 'import matplotlib.pyplot as plt\n'), ((14360, 14379), 'training.get_data', 'get_data', (['data_file'], {}), '(data_file)\n', (14368, 14379), False, 'from training import get_data, get_labels\n'), ((14954, 15014), 'matplotlib.pyplot.scatter', 'plt.scatter', (['block_a_score', 'block_b_score'], {'s': 'sizes', 'c': '"""blue"""'}), "(block_a_score, block_b_score, s=sizes, c='blue')\n", (14965, 15014), True, 'import matplotlib.pyplot as plt\n'), ((15019, 15046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Block A Score"""'], {}), "('Block A Score')\n", (15029, 15046), True, 'import matplotlib.pyplot as plt\n'), ((15051, 15078), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Block B Score"""'], {}), "('Block B Score')\n", (15061, 15078), True, 'import matplotlib.pyplot as plt\n'), ((15083, 15093), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15091, 15093), True, 'import matplotlib.pyplot as plt\n'), ((940, 960), 'sklearn.decomposition.PCA', 'decomposition.PCA', (['(2)'], {}), '(2)\n', (957, 960), False, 'from sklearn import manifold, decomposition\n'), ((5047, 5062), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5060, 5062), False, 'import torch\n'), ((5570, 5584), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5578, 5584), True, 'import numpy as np\n'), ((8924, 8939), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8937, 8939), False, 'import torch\n'), ((12319, 12353), 'numpy.concatenate', 'np.concatenate', (['bv_batches'], {'axis': '(0)'}), '(bv_batches, axis=0)\n', (12333, 12353), True, 'import numpy as np\n'), ((13381, 13413), 'scipy.stats.pearsonr', 'pearsonr', (['student_params', 'scores'], {}), '(student_params, scores)\n', (13389, 13413), False, 'from scipy.stats import pearsonr\n'), ((14147, 14180), 'scipy.stats.pearsonr', 'pearsonr', (['difficulties', 'avg_score'], {}), '(difficulties, avg_score)\n', (14155, 14180), False, 'from scipy.stats import pearsonr\n'), ((14738, 14776), 'scipy.stats.pearsonr', 'pearsonr', (['block_a_score', 'block_b_score'], {}), '(block_a_score, block_b_score)\n', (14746, 14776), False, 'from scipy.stats import pearsonr\n'), ((1007, 1022), 'sklearn.manifold.MDS', 'manifold.MDS', (['(2)'], {}), '(2)\n', (1019, 1022), False, 'from sklearn import manifold, decomposition\n'), ((4014, 4126), 'per_question_data_loading.PerQuestionDataset', 'PerQuestionDataset', (['data', 'labels', 'block_a_qids', '(False)', '(True)', 'options.concat_visits', 'options.use_correctness'], {}), '(data, labels, block_a_qids, False, True, options.\n concat_visits, options.use_correctness)\n', (4032, 4126), False, 'from per_question_data_loading import PerQuestionDataset, PerQuestionCollator\n'), ((4482, 4533), 'torch.load', 'torch.load', (['f"""{model_name}.pt"""'], {'map_location': 'device'}), "(f'{model_name}.pt', map_location=device)\n", (4492, 4533), False, 'import torch\n'), ((4608, 4651), 'data_loading.Dataset', 'Dataset', (['data', 'type_mappings'], {'labels': 'labels'}), '(data, type_mappings, labels=labels)\n', (4615, 4651), False, 'from data_loading import Dataset, Collator\n'), ((4843, 4894), 'torch.load', 'torch.load', (['f"""{model_name}.pt"""'], {'map_location': 'device'}), "(f'{model_name}.pt', map_location=device)\n", (4853, 4894), False, 'import torch\n'), ((8509, 8519), 'data_loading.Collator', 'Collator', ([], {}), '()\n', (8517, 8519), False, 'from data_loading import Dataset, Collator\n'), ((10628, 10741), 'numpy.array', 'np.array', (['[(0 if bv < neg_bv_cutoff else 1 if bv < 0 else 2 if bv < pos_bv_cutoff else\n 3) for bv in behavior]'], {}), '([(0 if bv < neg_bv_cutoff else 1 if bv < 0 else 2 if bv <\n pos_bv_cutoff else 3) for bv in behavior])\n', (10636, 10741), True, 'import numpy as np\n'), ((11674, 11684), 'data_loading.Collator', 'Collator', ([], {}), '()\n', (11682, 11684), False, 'from data_loading import Dataset, Collator\n'), ((12068, 12083), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12081, 12083), False, 'import torch\n'), ((1070, 1178), 'sklearn.manifold.TSNE', 'manifold.TSNE', (['(2)'], {'perplexity': 'perplexity', 'learning_rate': '"""auto"""', 'n_iter': '(1000)', 'init': '"""pca"""', 'random_state': '(221)'}), "(2, perplexity=perplexity, learning_rate='auto', n_iter=1000,\n init='pca', random_state=221)\n", (1083, 1178), False, 'from sklearn import manifold, decomposition\n'), ((3913, 3949), 'data_processing.get_problem_qids', 'get_problem_qids', (['"""A"""', 'type_mappings'], {}), "('A', type_mappings)\n", (3929, 3949), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((4146, 4179), 'per_question_data_loading.PerQuestionCollator', 'PerQuestionCollator', (['block_a_qids'], {}), '(block_a_qids)\n', (4165, 4179), False, 'from per_question_data_loading import PerQuestionDataset, PerQuestionCollator\n'), ((4249, 4285), 'data_processing.get_problem_qids', 'get_problem_qids', (['"""B"""', 'type_mappings'], {}), "('B', type_mappings)\n", (4265, 4285), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((4676, 4686), 'data_loading.Collator', 'Collator', ([], {}), '()\n', (4684, 4686), False, 'from data_loading import Dataset, Collator\n'), ((4771, 4791), 'data_processing.load_type_mappings', 'load_type_mappings', ([], {}), '()\n', (4789, 4791), False, 'from data_processing import load_type_mappings, get_problem_qids\n'), ((12370, 12389), 'torch.nn.Softplus', 'torch.nn.Softplus', ([], {}), '()\n', (12387, 12389), False, 'import torch\n'), ((13936, 13955), 'torch.nn.Softplus', 'torch.nn.Softplus', ([], {}), '()\n', (13953, 13955), False, 'import torch\n')] |
#!/usr/bin/env python
# coding=utf-8
from typing import List, Union, Tuple, Any, Optional
import os.path
from os.path import join as pjoin
from pathlib import Path
import hashlib
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import torch
import torchvision.transforms as T
plt.rcParams["savefig.bbox"] = 'tight'
_img_t = Union[Image.Image, torch.Tensor]
_imgs_t = List[Union[_img_t, List[_img_t]]]
__all__ = ['IMG_EXTENSIONS', 'is_valid_extension', 'default_image_loader',
'denormalize', 'plot', 'load_standard_test_imgs', 'mkdir',
'check_integrity']
IMG_EXTENSIONS = (
'.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.tif', '.tiff', '.webp', '.pgm'
)
def is_valid_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""
Verifies if given file name has a valid extension.
Parameters
----------
filename : str
Path to a file
extensions : Tuple[str, ...]
Extensions to consider (lowercase)
Returns
-------
return : bool
True if the filename ends with one of given extensions
"""
return any(filename.lower().endswith(ext) for ext in extensions)
def is_valid_image(filename: str) -> bool:
"""
Verifies if given file name has a valid image extension
Parameters
----------
filename : str
Path to a file
Returns
-------
return : bool
True if the filename ends with one of the valid image extensions
"""
return is_valid_extension(filename, IMG_EXTENSIONS)
def default_image_loader(path: str) -> Image.Image:
"""
Load image file as RGB PIL Image
Parameters
----------
path : str
Image file path
Returns
-------
return : Image.Image
RGB PIL Image
"""
# Open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def denormalize(tensor: torch.Tensor, mean: Tuple[float, ...] = None,
std: Tuple[float, ...] = None):
"""
Undoes mean/standard deviation normalization, zero to one scaling, and
channel rearrangement for a batch of images.
Parameters
----------
tensor : torch.Tensor
A (CHANNELS x HEIGHT x WIDTH) tensor
mean: Tuple[float, ...]
A tuple of mean values to be subtracted from each image channel.
std: Tuple[float, ...]
A tuple of standard deviation values to be devided from each image
channel.
Returns
----------
array : numpy.ndarray[float]
A (HEIGHT x WIDTH x CHANNELS) numpy array of floats
"""
if not mean:
if tensor.shape[0] == 1:
mean = (-0.5 / 0.5,)
else:
mean = (-0.5 / 0.5, -0.5 / 0.5, -0.5 / 0.5)
if not std:
if tensor.shape[0] == 1:
std = (1 / 0.5,)
else:
std = (1 / 0.5, 1 / 0.5, 1 / 0.5)
inverse_normalize = T.Normalize(mean=mean, std=std)
denormalized_tensor = (inverse_normalize(tensor) * 255.).type(torch.uint8)
array = denormalized_tensor.permute(1, 2, 0).numpy().squeeze()
return array
def plot(imgs: _imgs_t, baseline_imgs: Union[_img_t, _imgs_t] = None,
row_title: List[str] = None, title: str = None,
**imshow_kwargs) -> None:
"""
Plot images in a 2D grid.
Arguments
---------
imgs : _imgs_t
List of images to be plotted. `imgs` is either a list of
`_img_t` images or a list of lists of `_img_t` images. Either way,
each element of `imgs` holds a row of the image grid to be plotted.
baseline_imgs : Union[_img_t, _imgs_]
List of baseline images. If not None, the first column of the grid will
be filled with the baseline images.`baseline_imgs` is either
a single `_img_t` image, or a list of `_img_t` images of the same
length of an element of `imgs`.
row_title : List[str]
List of row titles. If not None, `len(row_title)` must be equal to
`len(imgs)`.
Types
-----
_img_t = Union[PIL.Image.Image, torch.Tensor]
_imgs_t = List[Union[_img_t, List[_img_t]]]
"""
# Make a 2d grid even if there's just 1 row
if not isinstance(imgs[0], list):
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
if not baseline_imgs:
with_baseline = False
else:
if not isinstance(baseline_imgs, list):
baseline_imgs = [baseline_imgs for i in range(0, num_rows)]
else:
if len(baseline_imgs) == 1:
baseline_imgs = [baseline_imgs[0] for i in range(0, num_rows)]
elif len(baseline_imgs) != num_rows:
msg = ("Number of elements in `baseline_imgs` ",
"must match the number of elements in `imgs[0]`")
raise ValueError(msg)
if isinstance(baseline_imgs[0], list):
msg = ("Elements of `baseline_imgs` must be PIL Images ",
"or Torch Tensors")
raise TypeError(msg)
with_baseline = True
num_cols += 1 # First column is now the baseline images
if row_title:
if len(row_title) != num_rows:
msg = ("Number of elements in `row_title` ",
"must match the number of elements in `imgs`")
raise ValueError(msg)
fig, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
row = [baseline_imgs[row_idx]] + row if with_baseline else row
for col_idx, img in enumerate(row):
ax = axs[row_idx, col_idx]
if isinstance(img, torch.Tensor):
img = denormalize(img)
else:
img = np.asarray(img)
if len(img.shape) == 2:
ax.imshow(img, cmap='gray', vmin=0, vmax=255)
else:
ax.imshow(img, **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if with_baseline:
plt.sca(axs[0, 0])
plt.title(label='Baseline images', size=15)
if row_title is not None:
for row_idx in range(num_rows):
plt.sca(axs[row_idx, 0])
plt.ylabel(row_title[row_idx], rotation=0, labelpad=50, size=15)
plt.tight_layout()
if title:
fig.suptitle(t=title, size=16)
fig.tight_layout()
return fig
def load_standard_test_imgs(directory: str = './imgs'):
directory = os.path.expanduser(directory)
test_imgs = []
names = []
for root, _, fnames in sorted(os.walk(directory, followlinks=True)):
for fname in sorted(fnames):
if is_valid_image(fname):
path = pjoin(root, fname)
test_imgs.extend([Image.open(path)])
names.append(Path(path).stem)
return test_imgs, names
def mkdir(path):
"""
Create a single empty directory if it didn't exist
Parameters
----------
path : str
A single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def calculate_md5_dir(dirpath: str, chunk_size: int = 1024 * 1024,
verbose: bool = False) -> str:
md5 = hashlib.md5()
try:
for root, _, files in sorted(os.walk(dirpath)):
for name in files:
if verbose:
print('Hashing', name)
fpath = pjoin(root, name)
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b""):
md5.update(chunk)
except BaseException:
import traceback
# Print the stack traceback
traceback.print_exc()
return -2
return md5.hexdigest()
def calculate_md5_file(fpath: str, chunk_size: int = 1024 * 1024) -> str:
md5 = hashlib.md5()
try:
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(chunk_size), b""):
md5.update(chunk)
except BaseException:
import traceback
# Print the stack traceback
traceback.print_exc()
return -2
return md5.hexdigest()
def check_md5(path: str, md5: str, **kwargs: Any) -> bool:
if os.path.isdir(path):
return md5 == calculate_md5_dir(path, **kwargs)
return md5 == calculate_md5_file(path, **kwargs)
def check_integrity(path: str, md5: Optional[str] = None) -> bool:
if not os.path.exists(path):
return False
if md5 is None:
return True
return check_md5(path, md5)
| [
"PIL.Image.open",
"hashlib.md5",
"matplotlib.pyplot.ylabel",
"pathlib.Path",
"numpy.asarray",
"matplotlib.pyplot.sca",
"os.path.join",
"torchvision.transforms.Normalize",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"traceback.print_exc",
"matplotlib.pyplot.subplots"
] | [((3014, 3045), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (3025, 3045), True, 'import torchvision.transforms as T\n'), ((5466, 5525), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_rows', 'ncols': 'num_cols', 'squeeze': '(False)'}), '(nrows=num_rows, ncols=num_cols, squeeze=False)\n', (5478, 5525), True, 'import matplotlib.pyplot as plt\n'), ((7325, 7338), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (7336, 7338), False, 'import hashlib\n'), ((7957, 7970), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (7968, 7970), False, 'import hashlib\n'), ((1948, 1961), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1958, 1961), False, 'from PIL import Image\n'), ((6130, 6148), 'matplotlib.pyplot.sca', 'plt.sca', (['axs[0, 0]'], {}), '(axs[0, 0])\n', (6137, 6148), True, 'import matplotlib.pyplot as plt\n'), ((6157, 6200), 'matplotlib.pyplot.title', 'plt.title', ([], {'label': '"""Baseline images"""', 'size': '(15)'}), "(label='Baseline images', size=15)\n", (6166, 6200), True, 'import matplotlib.pyplot as plt\n'), ((6284, 6308), 'matplotlib.pyplot.sca', 'plt.sca', (['axs[row_idx, 0]'], {}), '(axs[row_idx, 0])\n', (6291, 6308), True, 'import matplotlib.pyplot as plt\n'), ((6321, 6385), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['row_title[row_idx]'], {'rotation': '(0)', 'labelpad': '(50)', 'size': '(15)'}), '(row_title[row_idx], rotation=0, labelpad=50, size=15)\n', (6331, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6398, 6416), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6414, 6416), True, 'import matplotlib.pyplot as plt\n'), ((7803, 7824), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7822, 7824), False, 'import traceback\n'), ((8210, 8231), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8229, 8231), False, 'import traceback\n'), ((5846, 5861), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (5856, 5861), True, 'import numpy as np\n'), ((6819, 6837), 'os.path.join', 'pjoin', (['root', 'fname'], {}), '(root, fname)\n', (6824, 6837), True, 'from os.path import join as pjoin\n'), ((7530, 7547), 'os.path.join', 'pjoin', (['root', 'name'], {}), '(root, name)\n', (7535, 7547), True, 'from os.path import join as pjoin\n'), ((6872, 6888), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6882, 6888), False, 'from PIL import Image\n'), ((6920, 6930), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6924, 6930), False, 'from pathlib import Path\n')] |
import pyaudio
import wave
from scipy.fftpack import fft, ifft
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from swan import pycwt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16型
CHANNELS = 1 # 1;monoral 2;ステレオ
RATE = 22100 # 22.1kHz 44.1kHz
RECORD_SECONDS = 5 # 5秒録音
WAVE_OUTPUT_FILENAME = "output2.wav"
s=1
while True:
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = WAVE_OUTPUT_FILENAME
wr = wave.open(wavfile, "rb")
ch = CHANNELS #wr.getnchannels()
width = p.get_sample_size(FORMAT) #wr.getsampwidth()
fr = RATE #wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
amp = max(data)
plt.figure(figsize=(12, 10))
# ステレオ前提 > monoral
sig = np.frombuffer(data, dtype="int16") /32768.0
t = np.linspace(0,fs, fn/2, endpoint=False)
plt.subplot(311)
plt.xlim([0, 5])
plt.plot(t, sig)
plt.pause(1)
#plt.close()
nperseg = 256
#sig = np.frombuffer(data, dtype="int16")/32768.0
#print('fs',fs, fn)
f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)
plt.subplot(312)
plt.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')
plt.ylim([10*f[1], 10*f[-1]])
plt.xlim([0, 5])
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.yscale('log')
plt.pause(1)
#plt.close()
freq =fft(sig,int(fn/2))
Pyy = np.sqrt(freq*freq.conj())*2/fn #np.abs(freq)/1025 #freq*freq.conj(freq)/1025
f = np.arange(int(fn/2))
plt.subplot(313)
plt.plot(f,Pyy)
plt.axis([100, max(f)/2, 0,0.00005]) #max(Pyy)])
plt.xscale('log')
plt.pause(1)
#plt.close()
plt.savefig('figure'+str(s)+'.png')
Fs = 1/0.0002
omega0 = 5 #0.2 #1 #2 #8
# (1) Freqを指定してcwt
x = np.linspace(0,fs, fn/2, endpoint=False)
freqs=np.arange(10,2000,2.5)
r=pycwt.cwt_f(sig,freqs,Fs,pycwt.Morlet(omega0))
rr = np.abs(r)
#fig=plt.subplot(413)
plt.rcParams['figure.figsize'] = (10, 6)
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.75, 0.7, 0.2])
ax2 = fig.add_axes([0.1, 0.1, 0.7, 0.60], sharex=ax1)
ax3 = fig.add_axes([0.83, 0.1, 0.03, 0.6])
ax1.plot(x, sig, 'k')
img = ax2.imshow(np.flipud(rr), extent=[0, 5,100, 20000], aspect='auto', cmap='hsv')
twin_ax = ax2
twin_ax.set_yscale('log')
twin_ax.set_xlim(0, 5)
twin_ax.set_ylim(100, 20000)
ax2.tick_params(which='both', labelleft=False, left=False)
twin_ax.tick_params(which='both', labelleft=True, left=True, labelright=False)
fig.colorbar(img, cax=ax3)
plt.pause(1)
plt.savefig('figure_'+str(s)+'.png')
s += 1
| [
"matplotlib.pyplot.ylabel",
"numpy.arange",
"wave.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.frombuffer",
"swan.pycwt.Morlet",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.ylim",
"numpy.abs",
"numpy.flipud",
"matplotlib.pyplot.pause",
"matplotli... | [((402, 419), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (417, 419), False, 'import pyaudio\n'), ((858, 895), 'wave.open', 'wave.open', (['WAVE_OUTPUT_FILENAME', '"""wb"""'], {}), "(WAVE_OUTPUT_FILENAME, 'wb')\n", (867, 895), False, 'import wave\n'), ((1096, 1120), 'wave.open', 'wave.open', (['wavfile', '"""rb"""'], {}), "(wavfile, 'rb')\n", (1105, 1120), False, 'import wave\n'), ((1399, 1427), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (1409, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1556), 'numpy.linspace', 'np.linspace', (['(0)', 'fs', '(fn / 2)'], {'endpoint': '(False)'}), '(0, fs, fn / 2, endpoint=False)\n', (1525, 1556), True, 'import numpy as np\n'), ((1558, 1574), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (1569, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1595), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 5]'], {}), '([0, 5])\n', (1587, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1600, 1616), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sig'], {}), '(t, sig)\n', (1608, 1616), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1633), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (1630, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1814), 'scipy.signal.stft', 'signal.stft', (['sig'], {'fs': '(fs * fn / 50)', 'nperseg': 'nperseg'}), '(sig, fs=fs * fn / 50, nperseg=nperseg)\n', (1775, 1814), False, 'from scipy import signal\n'), ((1815, 1831), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (1826, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1921), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[10 * f[1], 10 * f[-1]]'], {}), '([10 * f[1], 10 * f[-1]])\n', (1896, 1921), True, 'import matplotlib.pyplot as plt\n'), ((1922, 1938), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 5]'], {}), '([0, 5])\n', (1930, 1938), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1970), 'matplotlib.pyplot.title', 'plt.title', (['"""STFT Magnitude"""'], {}), "('STFT Magnitude')\n", (1952, 1970), True, 'import matplotlib.pyplot as plt\n'), ((1975, 2003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1985, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2032), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [sec]"""'], {}), "('Time [sec]')\n", (2018, 2032), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2054), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2047, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2059, 2071), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (2068, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2262), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (2257, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2267, 2283), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'Pyy'], {}), '(f, Pyy)\n', (2275, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2358), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2351, 2358), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2375), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (2372, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2565), 'numpy.linspace', 'np.linspace', (['(0)', 'fs', '(fn / 2)'], {'endpoint': '(False)'}), '(0, fs, fn / 2, endpoint=False)\n', (2534, 2565), True, 'import numpy as np\n'), ((2573, 2597), 'numpy.arange', 'np.arange', (['(10)', '(2000)', '(2.5)'], {}), '(10, 2000, 2.5)\n', (2582, 2597), True, 'import numpy as np\n'), ((2658, 2667), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (2664, 2667), True, 'import numpy as np\n'), ((2749, 2761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2759, 2761), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3332), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (3329, 3332), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1495), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': '"""int16"""'}), "(data, dtype='int16')\n", (1474, 1495), True, 'import numpy as np\n'), ((1859, 1870), 'numpy.abs', 'np.abs', (['Zxx'], {}), '(Zxx)\n', (1865, 1870), True, 'import numpy as np\n'), ((2627, 2647), 'swan.pycwt.Morlet', 'pycwt.Morlet', (['omega0'], {}), '(omega0)\n', (2639, 2647), False, 'from swan import pycwt\n'), ((2962, 2975), 'numpy.flipud', 'np.flipud', (['rr'], {}), '(rr)\n', (2971, 2975), True, 'import numpy as np\n')] |
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
import numpy as np
from .init_position import InitSearchPosition
class InitMLSearchPosition(InitSearchPosition):
def __init__(self, space, model, warm_start, scatter_init):
super().__init__(space, model, warm_start, scatter_init)
def _create_warm_start(self, nth_process):
pos = []
for hyperpara_name in self._space_.para_space.keys():
start_point_key = list(self.warm_start.keys())[nth_process]
if hyperpara_name not in list(self.warm_start[start_point_key].keys()):
# print(hyperpara_name, "not in warm_start selecting random scalar")
search_position = self._space_.get_random_pos_scalar(hyperpara_name)
else:
search_position = self._space_.para_space[hyperpara_name].index(
*self.warm_start[start_point_key][hyperpara_name]
)
# what if warm start not in search_config range?
pos.append(search_position)
return np.array(pos)
| [
"numpy.array"
] | [((1066, 1079), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (1074, 1079), True, 'import numpy as np\n')] |
"""
Skinet (Segmentation of the Kidney through a Neural nETwork) Project
Dataset tools
Copyright (c) 2021 Skinet Team
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import os
import shutil
from time import time
import cv2
import numpy as np
from skimage.io import imread
from common_utils import progressBar, formatTime
from datasetTools import AnnotationAdapter as adapt
from datasetTools.AnnotationAdapter import AnnotationAdapter
from datasetTools.datasetDivider import getBWCount, CV2_IMWRITE_PARAM
from mrcnn.Config import Config
from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox
NEPHRO_CLASSES = [
{"id": 0, "name": "Background", "color": "", "ignore": True},
{"id": 1, "name": "tubule_sain", "color": "#ff007f", "ignore": False},
{"id": 2, "name": "tubule_atrophique", "color": "#55557f", "ignore": False},
{"id": 3, "name": "nsg_complet", "color": "#ff557f", "ignore": False},
{"id": 4, "name": "nsg_partiel", "color": "#55aa7f", "ignore": False},
{"id": 5, "name": "pac", "color": "#ffaa7f", "ignore": False},
{"id": 6, "name": "vaisseau", "color": "#55ff7f", "ignore": False},
{"id": 7, "name": "artefact", "color": "#000000", "ignore": False},
{"id": 8, "name": "veine", "color": "#0000ff", "ignore": False},
{"id": 9, "name": "nsg", "color": "#55007f", "ignore": False},
{"id": 10, "name": "intima", "color": "#aa0000", "ignore": False},
{"id": 11, "name": "media", "color": "#aa5500", "ignore": False}
]
CORTICES_CLASSES = [
{"id": 0, "name": "Background", "color": "", "ignore": True},
{"id": 1, "name": "cortex", "color": "#ffaa00", "ignore": False},
{"id": 2, "name": "medullaire", "color": "#ff0000", "ignore": False},
{"id": 3, "name": "capsule", "color": "#ff00ff", "ignore": False}
]
MESTC_GLOM_CLASSES = [
{"id": 0, "name": "Background", "color": "", "ignore": True},
{"id": 1, "name": "hile", "color": "#64FE2E", "ignore": False},
{"id": 2, "name": "M", "color": "#55007f", "ignore": False},
{"id": 3, "name": "E", "color": "#ff007f", "ignore": False},
{"id": 4, "name": "S", "color": "#55557f", "ignore": False},
{"id": 5, "name": "C", "color": "#ff557f", "ignore": False},
{"id": 6, "name": "necrose_fib", "color": "#55aa7f", "ignore": False},
{"id": 7, "name": "artefact", "color": "#ffaa7f", "ignore": False}
]
INFLAMMATION_CLASSES = [
{"id": 0, "name": "Background", "color": "", "ignore": True},
{"id": 1, "name": "inflammation", "color": "#ffff00", "ignore": False},
{"id": 2, "name": "cpt", "color": "#64FE2E", "ignore": False}
]
def get_bbox_from_points(pts):
"""
Return bbox from a points array
:param pts: the points coordinates
:return: y1, x1, y2, x2
"""
temp = np.array(pts)
x1 = np.amin(temp[:, 0])
x2 = np.amax(temp[:, 0])
y1 = np.amin(temp[:, 1])
y2 = np.amax(temp[:, 1])
return y1, x1, y2, x2
def createMask(imgName: str, imgShape, idMask: int, ptsMask, datasetName: str = 'dataset_train',
maskClass: str = 'masks', imageFormat="jpg", config: Config = None):
"""
Create the mask image based on its polygon points
:param imgName: name w/o extension of the base image
:param imgShape: shape of the image
:param idMask: the ID of the mask, a number not already used for that image
:param ptsMask: array of [x, y] coordinates which are all the polygon points representing the mask
:param datasetName: name of the output dataset
:param maskClass: name of the associated class of the current mask
:param imageFormat: output format of the masks' images
:param config: config object
:return: None
"""
# https://www.programcreek.com/python/example/89415/cv2.fillPoly
# Formatting coordinates matrix to get int
ptsMask = np.double(ptsMask)
ptsMask = np.matrix.round(ptsMask)
ptsMask = np.int32(ptsMask)
bbox_coordinates = ""
if config is not None and config.is_using_mini_mask():
bbox = get_bbox_from_points(ptsMask)
if get_bboxes_intersection(bbox, [0, 0, *imgShape[:2]]) <= 0:
return
kept_bbox = [0, 0, 0, 0]
for i in range(4):
kept_bbox[i] = min(max(0, bbox[i]), imgShape[i % 2])
y1, x1, y2, x2 = kept_bbox
bbox_coordinates = f"_{y1}_{x1}_{y2}_{x2}"
shiftedBbox = shift_bbox(bbox)
shift = bbox[:2]
mask = np.uint8(np.zeros((shiftedBbox[2], shiftedBbox[3])))
cv2.fillPoly(mask, [ptsMask - shift[::-1]], 255)
shifted_kept_bbox = shift_bbox(kept_bbox, customShift=shift)
y1, x1, y2, x2 = shifted_kept_bbox
mask = mask[y1:y2, x1:x2]
# Creating black matrix with same size than original image and then drawing the mask
mask = minimize_mask(shiftedBbox, mask, config.get_mini_mask_shape())
mask = mask.astype(np.uint8) * 255
else:
# Creating black matrix with same size than original image and then drawing the mask
mask = np.uint8(np.zeros((imgShape[0], imgShape[1])))
cv2.fillPoly(mask, [ptsMask], 255)
# Saving result image
maskClass = maskClass.lower().strip(' ').replace(" ", "_")
output_directory = os.path.join(datasetName, imgName, maskClass)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
output_name = f"{imgName}_{idMask:03d}{bbox_coordinates}.{imageFormat}"
cv2.imwrite(os.path.join(output_directory, output_name), mask, CV2_IMWRITE_PARAM)
def getBboxFromName(imageName):
"""
Return the bbox coordinates stored in the image name
:param imageName: the image name from which you want the bbox
:return: the bbox as [y1, x1, y2, x2]
"""
lastParts = os.path.splitext(os.path.basename(imageName))[0].split('_')[-4:]
return np.array([int(x) for x in lastParts])
def resizeMasks(baseMasks, xRatio: float, yRatio: float):
"""
Resize mask's base points to fit the targeted size
:param baseMasks array of [x, y] coordinates which are all the polygon points representing the mask
:param xRatio width ratio that will be applied to coordinates
:param yRatio height ratio that will be applied to coordinates
"""
res = []
for pt in baseMasks:
xTemp = float(pt[0])
yTemp = float(pt[1])
res.append([xTemp * xRatio, yTemp * yRatio])
return res
def createMasksOfImage(rawDatasetPath: str, imgName: str, datasetName: str = 'dataset_train',
adapter: AnnotationAdapter = None, classesInfo: dict = None, imageFormat="jpg", resize=None,
config: Config = None):
"""
Create all the masks of a given image by parsing xml annotations file
:param rawDatasetPath: path to the folder containing images and associated annotations
:param imgName: name w/o extension of an image
:param datasetName: name of the output dataset
:param adapter: the annotation adapter to use to create masks, if None looking for an adapter that can read the file
:param classesInfo: Information about all classes that are used, by default will be nephrology classes Info
:param imageFormat: output format of the image and masks
:param resize: if the image and masks have to be resized
:param config: config object
:return: None
"""
# Getting shape of original image (same for all this masks)
if classesInfo is None:
classesInfo = NEPHRO_CLASSES if config is None else config.get_classes_info()
img = cv2.imread(os.path.join(rawDatasetPath, f"{imgName}.{imageFormat}"))
if img is None:
print(f'Problem with {imgName} image')
return
shape = img.shape
if resize is not None:
yRatio = resize[0] / shape[0]
xRatio = resize[1] / shape[1]
assert yRatio > 0 and xRatio > 0, f"Error resize ratio not correct ({yRatio:3.2f}, {xRatio:3.2f})"
img = cv2.resize(img, resize, interpolation=cv2.INTER_CUBIC)
shape = img.shape
# Copying the original image in the dataset
targetDirectoryPath = os.path.join(datasetName, imgName, 'images')
if not os.path.exists(targetDirectoryPath):
os.makedirs(targetDirectoryPath)
# TODO use file copy if unchanged else cv2
cv2.imwrite(os.path.join(targetDirectoryPath, f"{imgName}.{imageFormat}"), img, CV2_IMWRITE_PARAM)
# Finding annotation files
formats = adapt.ANNOTATION_FORMAT
fileList = os.listdir(rawDatasetPath)
imageFiles = []
for file in fileList:
if imgName in file:
if file.split('.')[-1] in formats:
imageFiles.append(file)
# Choosing the adapter to use (parameters to force it ?)
file = None
assert len(imageFiles) > 0
if adapter is None:
# No adapter given, we are looking for the adapter with highest priority level that can read an/the annotation
# file
adapters = list(adapt.ANNOTATION_ADAPTERS.values())
adapterPriority = -1
for f in imageFiles:
for a in adapters:
if a.canRead(os.path.join(rawDatasetPath, f)):
if a.getPriorityLevel() > adapterPriority:
adapterPriority = a.getPriorityLevel()
adapter = a
file = f
else:
# Using given adapter, we are looking for a file that can be read
file = None
for f in imageFiles:
if adapter.canRead(os.path.join(rawDatasetPath, f)) and file is None:
file = f
# Getting the masks data
masks = adapter.readFile(os.path.join(rawDatasetPath, file))
# Creating masks
for noMask, (datasetClass, maskPoints) in enumerate(masks):
# Converting class id to class name if needed
if type(datasetClass) is int:
if datasetClass < len(classesInfo) and classesInfo[datasetClass]["id"] == datasetClass:
maskClass = classesInfo[datasetClass]["name"]
else:
for classInfo in classesInfo:
if classInfo["id"] == datasetClass:
maskClass = classInfo["name"]
break
else:
maskClass = datasetClass
if maskClass == "None":
print(f" /!\\ {imgName} : None class present /!\\ ")
if resize is not None:
resizedMasks = resizeMasks(maskPoints, xRatio, yRatio)
createMask(imgName, shape, noMask, maskPoints if resize is None else resizedMasks, datasetName, maskClass,
imageFormat, config=config)
def fuseClassMasks(datasetPath: str, imageName: str, targetedClass: [str, list], imageFormat="jpg",
deleteBaseMasks=False, silent=False):
"""
Fuse each targeted-class masks into one
:param datasetPath: the dataset that have been wrapped
:param imageName: the image you want its targeted-class to be fused
:param targetedClass: the class of the masks that have to be fused
:param imageFormat: format to use to save the final targeted-class masks
:param deleteBaseMasks: delete the base masks images after fusion
:param silent: if True will not print
:return: None
"""
if type(targetedClass) is str:
targetedClasses = [targetedClass]
else:
targetedClasses = targetedClass
# Getting the image directory path
imageDir = os.path.join(datasetPath, imageName)
imagePath = os.path.join(datasetPath, imageName, "images")
imagePath = os.path.join(imagePath, os.listdir(imagePath)[0])
image = cv2.imread(imagePath)
for aClass in targetedClasses:
classDir = os.path.join(imageDir, aClass)
if os.path.exists(classDir):
listClassImages = os.listdir(classDir)
if not silent:
print(f"Fusing {imageName} {aClass} class masks")
fusion = loadSameResImage(os.path.join(classDir, listClassImages[0]), imageShape=image.shape)
listClassImages.remove(listClassImages[0])
for maskName in listClassImages: # Adding each mask to the same image
maskPath = os.path.join(classDir, maskName)
mask = loadSameResImage(maskPath, imageShape=image.shape)
fusion = cv2.add(fusion, mask)
# Saving the fused mask image
cv2.imwrite(os.path.join(classDir, f"{imageName}_{aClass}.{imageFormat}"), fusion, CV2_IMWRITE_PARAM)
if deleteBaseMasks:
for maskName in os.listdir(classDir): # Deleting each targeted-class mask except the fused one
if f'_{aClass}.{imageFormat}' not in maskName:
maskPath = os.path.join(classDir, maskName)
os.remove(maskPath)
def cleanFusedClassDir(datasetPath: str, fusedClass):
"""
Cleaning all fused-class directories in the dataset, keeping only unique file or fused ones
:param datasetPath: the dataset that have been wrapped
:param fusedClass: the class
:return: None
"""
for imageDir in os.listdir(datasetPath):
imageDirPath = os.path.join(datasetPath, imageDir)
fusedClassDirPath = os.path.join(imageDirPath, fusedClass)
if os.path.exists(fusedClassDirPath):
listFusedClassImages = os.listdir(fusedClassDirPath)
if len(listFusedClassImages) > 1: # Deleting only if strictly more than one fused-class mask image present
fusedFusedClassPresent = False
for fusedClassImage in listFusedClassImages:
fusedFusedClassPresent = fusedFusedClassPresent or (f'_{fusedClass}' in fusedClassImage)
if fusedFusedClassPresent:
# Deleting each fused-class mask except the fused one
for maskName in os.listdir(fusedClassDirPath):
if f'_{fusedClass}' not in maskName:
maskPath = os.path.join(fusedClassDirPath, maskName)
os.remove(maskPath)
def cleanImage(datasetPath: str, imageName: str, cleaningClasses: str, excludeClasses=None, imageFormat="jpg",
cleanMasks=False, minAreaThreshold=300, config: Config = None):
"""
Creating the full_images directory and cleaning the base image by removing non-cleaning-class areas
:param excludeClasses:
:param datasetPath: the dataset that have been wrapped
:param imageName: the image name
:param cleaningClasses: the class to use to clean the image
:param cleanMasks: if true, will clean masks based on the cleaning-class-mask
:param imageFormat: the image format to use to save the image
:param minAreaThreshold: remove mask if its area is smaller than this threshold
:param config: config object
:return: None
"""
assert cleaningClasses is not None and cleaningClasses != "", "Cleaning class is required."
if type(cleaningClasses) is str:
cleaningClasses = [cleaningClasses]
if type(excludeClasses) is str:
excludeClasses = [excludeClasses]
# Getting the base image
path = os.path.join(datasetPath, imageName, '{folder}', f"{imageName}.{imageFormat}")
imagePath = path.format(folder='images')
fullImagePath = path.format(folder='full_images')
image = cv2.imread(imagePath)
# Fusing all the cleaning-class masks and then cleaning the image and if needed the masks
cleaningClassMasks = gatherClassesMasks(datasetPath, imageName, image.shape, cleaningClasses)
if excludeClasses is None:
excludedClassMasks = None
else:
excludedClassMasks = gatherClassesMasks(datasetPath, imageName, image.shape, excludeClasses)
if cleaningClassMasks is not None or excludedClassMasks is not None:
if cleaningClassMasks is None:
cleaningClassMasks = np.ones_like(image)[..., 0] * 255
if excludedClassMasks is not None:
excludedClassMasks = cv2.bitwise_not(excludedClassMasks)
cleaningClassMasks = cv2.bitwise_and(cleaningClassMasks, excludedClassMasks)
# Copying the full image into the correct directory
os.makedirs(os.path.dirname(fullImagePath), exist_ok=True)
shutil.copy2(imagePath, fullImagePath)
# Cleaning the image and saving it
image = cv2.bitwise_and(image, np.repeat(cleaningClassMasks[:, :, np.newaxis], 3, axis=2))
cv2.imwrite(imagePath, image, CV2_IMWRITE_PARAM)
# Cleaning masks so that they cannot exist elsewhere
if cleanMasks:
folderToRemove = []
for folder in os.listdir(os.path.join(datasetPath, imageName)):
folderPath = os.path.join(datasetPath, imageName, folder)
# Checking only for the other classes folder
skipClasses = ["images", "full_images"]
skipClasses.extend(cleaningClasses)
skipClasses.extend(excludeClasses)
if os.path.isdir(folderPath) and folder not in skipClasses:
# For each mask of the folder
for maskImageFileName in os.listdir(folderPath):
maskImagePath = os.path.join(folderPath, maskImageFileName)
mask = loadSameResImage(maskImagePath, image.shape)
areaBefore = getBWCount(mask)[1]
# If mask is not empty
if areaBefore > 0:
# Cleaning it with the cleaning-class masks
mask = cv2.bitwise_and(mask, cleaningClassMasks)
areaAfter = getBWCount(mask)[1]
else:
areaAfter = areaBefore
# If mask was empty or too small after cleaning, we remove it
if areaBefore == 0 or areaAfter < minAreaThreshold:
os.remove(maskImagePath)
elif areaBefore != areaAfter:
# If mask has is different after cleaning, we replace the original one
try:
try:
idMask = int(maskImageFileName.split('.')[0].split('_')[1])
except ValueError:
# If we could not retrieve the original mask ID, give it a unique one
idMask = int(time())
# If mini-mask are enabled, we minimize it before saving it
bbox_coordinates = ""
if config is not None and config.is_using_mini_mask():
bbox = extract_bboxes(mask)
mask = minimize_mask(bbox, mask, config.get_mini_mask_shape())
mask = mask.astype(np.uint8) * 255
y1, x1, y2, x2 = bbox
bbox_coordinates = f"_{y1}_{x1}_{y2}_{x2}"
# Saving cleaned mask
outputName = f"{imageName}_{idMask:03d}{bbox_coordinates}.{imageFormat}"
cv2.imwrite(os.path.join(folderPath, outputName), mask, CV2_IMWRITE_PARAM)
if outputName != maskImageFileName: # Remove former mask if not the same name
os.remove(maskImagePath)
except Exception:
print(f"Error on {maskImagePath} update")
if len(os.listdir(folderPath)) == 0:
folderToRemove.append(folderPath)
for folderPath in folderToRemove:
shutil.rmtree(folderPath, ignore_errors=True)
pass
def gatherClassesMasks(datasetPath, imageName, img_shape, gatheredClasses):
if type(gatheredClasses) is str:
gatheredClasses = [gatheredClasses]
gatheredMaskFound = False
gatheredClassMasks = None
for gatheredClass_ in gatheredClasses:
gatheredClassDirPath = os.path.join(datasetPath, imageName, gatheredClass_)
gatheredClassExists = os.path.exists(gatheredClassDirPath)
if gatheredClassExists and len(os.listdir(gatheredClassDirPath)) > 0:
for gatheredClassMaskName in os.listdir(gatheredClassDirPath):
gatheredClassMaskPath = os.path.join(gatheredClassDirPath, gatheredClassMaskName)
if not gatheredMaskFound:
gatheredClassMasks = loadSameResImage(gatheredClassMaskPath, img_shape)
gatheredMaskFound = True
else: # Adding additional masks
temp, tempBbox = loadOnlyMask(gatheredClassMaskPath, img_shape)
y1, x1, y2, x2 = tempBbox
gatheredClassMasks[y1:y2, x1:x2] = cv2.bitwise_or(gatheredClassMasks[y1:y2, x1:x2], temp)
del temp
return gatheredClassMasks
def loadSameResImage(imagePath, imageShape):
mask = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
if mask.shape[0] != imageShape[0] or mask.shape[1] != imageShape[1]:
bbox = getBboxFromName(imagePath)
mask = expand_mask(bbox, mask, image_shape=imageShape)
mask = mask.astype(np.uint8) * 255
return mask
def loadOnlyMask(imagePath, imageShape):
mask = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
if mask.shape[0] != imageShape[0] or mask.shape[1] != imageShape[1]:
# Finding bbox coordinates from image name
bbox = getBboxFromName(imagePath)
shifted = shift_bbox(bbox)
y1, x1, y2, x2 = shifted
# Expanding mask to its real size
mask = expand_mask(shifted, mask, image_shape=shifted[2:])
mask = mask.astype(np.uint8) * 255
else:
# Extracting bbox of
bbox = extract_bboxes(mask)
y1, x1, y2, x2 = bbox
return mask[y1:y2, x1:x2, ...], bbox
def convertImage(inputImagePath: str, outputImagePath: str):
"""
Convert an image from a format to another one
:param inputImagePath: path to the initial image
:param outputImagePath: path to the output image
:return: None
"""
image = imread(inputImagePath)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(outputImagePath, image, CV2_IMWRITE_PARAM)
def getInfoRawDataset(rawDatasetPath: str, verbose=False, adapter: AnnotationAdapter = None, mainFormat="jpg"):
"""
Listing all available images, those with missing information
:param verbose: whether or not print should be executed
:param rawDatasetPath: path to the raw dataset folder
:param adapter:
:param mainFormat: the format to use for the dataset
:return: list of unique files names, list of available images names, list of missing images names,
list of missing annotations names
"""
names = []
images = [] # list of image that can be used to compute masks
missingImages = [] # list of missing images
missingAnnotations = [] # list of missing annotations
inputFormats = ["jpg", "jp2", "png"]
if adapter is None:
annotationFormats = adapt.ANNOTATION_FORMAT
else:
annotationFormats = [adapter.getAnnotationFormat()]
fileList = os.listdir(rawDatasetPath)
if verbose:
progressBar(0, len(fileList), "Listing files")
for idx, file in enumerate(fileList):
if verbose and (idx % 10 == 0 or idx + 1 == len(fileList)):
lastIdx = idx
progressBar(idx + 1, len(fileList), "Listing files")
name = file.split('.')[0]
if name not in names: # We want to do this only once per unique file name (without extension)
names.append(name)
availableFormat = []
for format in inputFormats:
imgPath = os.path.join(rawDatasetPath, f"{name}.{format}")
if os.path.exists(imgPath):
availableFormat.append(format)
# Testing if annotation file exists for that name
annotationsExist = False
for ext in annotationFormats:
annotationsExist = annotationsExist or os.path.exists(os.path.join(rawDatasetPath, f"{name}.{ext}"))
if len(availableFormat) > 0: # At least one image exists
if not annotationsExist: # Annotations are missing
missingAnnotations.append(name)
else:
if mainFormat not in availableFormat:
for format in inputFormats:
if format in availableFormat:
sourcePath = os.path.join(rawDatasetPath, f"{name}.{format}")
outputPath = os.path.join(rawDatasetPath, f"{name}.{mainFormat}")
convertImage(sourcePath, outputPath)
break
images.append(name) # Adding this image to the list
elif annotationsExist: # There is no image file but xml found
missingImages.append(name)
if verbose:
if lastIdx + 1 != len(fileList):
progressBar(1, 1, "Listing files")
# Displaying missing image files
problem = False
nbMissingImg = len(missingImages)
if nbMissingImg > 0:
problem = True
print('Missing {} image{} : {}'.format(nbMissingImg, 's' if nbMissingImg > 1 else '', missingImages))
# Displaying missing annotations files
nbMissingAnnotations = len(missingAnnotations)
if nbMissingAnnotations > 0:
problem = True
print('Missing {} annotation{} : {}'.format(nbMissingAnnotations, 's' if nbMissingAnnotations > 1 else '',
missingAnnotations))
# Checking if there is file that is not image nor annotation
nbImages = len(images)
if len(names) - nbMissingImg - nbMissingAnnotations - nbImages != 0:
problem = True
print('Be careful, there are not only required dataset files in this folder')
if not problem:
print("Raw Dataset has no problem. Number of Images : {}".format(nbImages))
return names, images, missingImages, missingAnnotations
def startWrapper(rawDatasetPath: str, datasetName: str = 'dataset_train', deleteBaseMasks=False,
adapter: AnnotationAdapter = None, resize=None, mode="main", classesInfo=None, imageFormat="jpg"):
"""
Start wrapping the raw dataset into the wanted format
:param rawDatasetPath: path to the folder containing images and associated annotations
:param datasetName: name of the output dataset
:param deleteBaseMasks: delete the base masks images after fusion
:param adapter: Adapter to use to read annotations, if None compatible adapter will be searched
:param resize: If tuple given, the images and their masks will be resized to the tuple value
:param mode: Mode to use
:param classesInfo: Information about the classes that will be used
:param imageFormat: the image format to use in the dataset
:return: None
"""
names, images, missingImages, missingAnnotations = getInfoRawDataset(rawDatasetPath, verbose=True,
adapter=adapter, mainFormat=imageFormat)
if classesInfo is None:
if mode == 'main':
classesInfo = NEPHRO_CLASSES
elif mode == "cortex":
classesInfo = CORTICES_CLASSES
elif mode == "mest_glom":
classesInfo = MESTC_GLOM_CLASSES
elif mode == "inflammation":
classesInfo = INFLAMMATION_CLASSES
# Creating masks for any image which has all required files and displaying progress
start_time = time()
for index, file in enumerate(images):
progressBar(index, len(images), prefix='Creating masks',
suffix=f" {formatTime(round(time() - start_time))} Current : {file}")
createMasksOfImage(rawDatasetPath, file, datasetName, adapter, classesInfo=classesInfo,
resize=resize, imageFormat=imageFormat)
if mode == "main":
fuseClassMasks(datasetName, file, "cortex", deleteBaseMasks=deleteBaseMasks, silent=True)
cleanImage(datasetName, file, cleaningClasses='cortex')
elif mode == "mest_glom":
cleanImage(datasetName, file, cleaningClasses='nsg', cleanMasks=True)
elif mode == "inflammation":
cleanImage(datasetName, file, cleaningClasses='cortex', cleanMasks=True,
excludeClasses=[c["name"] for c in NEPHRO_CLASSES if not c["ignore"]])
progressBar(1, 1, prefix='Creating masks', suffix=f"{formatTime(round(time() - start_time))}" + " " * 25)
| [
"numpy.int32",
"datasetTools.datasetDivider.getBWCount",
"numpy.array",
"cv2.bitwise_or",
"os.remove",
"os.path.exists",
"mrcnn.utils.extract_bboxes",
"os.listdir",
"numpy.repeat",
"shutil.copy2",
"mrcnn.utils.expand_mask",
"os.path.isdir",
"common_utils.progressBar",
"cv2.add",
"cv2.fil... | [((2824, 2837), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (2832, 2837), True, 'import numpy as np\n'), ((2847, 2866), 'numpy.amin', 'np.amin', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (2854, 2866), True, 'import numpy as np\n'), ((2876, 2895), 'numpy.amax', 'np.amax', (['temp[:, 0]'], {}), '(temp[:, 0])\n', (2883, 2895), True, 'import numpy as np\n'), ((2905, 2924), 'numpy.amin', 'np.amin', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (2912, 2924), True, 'import numpy as np\n'), ((2934, 2953), 'numpy.amax', 'np.amax', (['temp[:, 1]'], {}), '(temp[:, 1])\n', (2941, 2953), True, 'import numpy as np\n'), ((3875, 3893), 'numpy.double', 'np.double', (['ptsMask'], {}), '(ptsMask)\n', (3884, 3893), True, 'import numpy as np\n'), ((3908, 3932), 'numpy.matrix.round', 'np.matrix.round', (['ptsMask'], {}), '(ptsMask)\n', (3923, 3932), True, 'import numpy as np\n'), ((3947, 3964), 'numpy.int32', 'np.int32', (['ptsMask'], {}), '(ptsMask)\n', (3955, 3964), True, 'import numpy as np\n'), ((5269, 5314), 'os.path.join', 'os.path.join', (['datasetName', 'imgName', 'maskClass'], {}), '(datasetName, imgName, maskClass)\n', (5281, 5314), False, 'import os\n'), ((8127, 8171), 'os.path.join', 'os.path.join', (['datasetName', 'imgName', '"""images"""'], {}), "(datasetName, imgName, 'images')\n", (8139, 8171), False, 'import os\n'), ((8504, 8530), 'os.listdir', 'os.listdir', (['rawDatasetPath'], {}), '(rawDatasetPath)\n', (8514, 8530), False, 'import os\n'), ((11470, 11506), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName'], {}), '(datasetPath, imageName)\n', (11482, 11506), False, 'import os\n'), ((11523, 11569), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName', '"""images"""'], {}), "(datasetPath, imageName, 'images')\n", (11535, 11569), False, 'import os\n'), ((11648, 11669), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (11658, 11669), False, 'import cv2\n'), ((13139, 13162), 'os.listdir', 'os.listdir', (['datasetPath'], {}), '(datasetPath)\n', (13149, 13162), False, 'import os\n'), ((15190, 15268), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName', '"""{folder}"""', 'f"""{imageName}.{imageFormat}"""'], {}), "(datasetPath, imageName, '{folder}', f'{imageName}.{imageFormat}')\n", (15202, 15268), False, 'import os\n'), ((15380, 15401), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (15390, 15401), False, 'import cv2\n'), ((21209, 21252), 'cv2.imread', 'cv2.imread', (['imagePath', 'cv2.IMREAD_UNCHANGED'], {}), '(imagePath, cv2.IMREAD_UNCHANGED)\n', (21219, 21252), False, 'import cv2\n'), ((21544, 21587), 'cv2.imread', 'cv2.imread', (['imagePath', 'cv2.IMREAD_UNCHANGED'], {}), '(imagePath, cv2.IMREAD_UNCHANGED)\n', (21554, 21587), False, 'import cv2\n'), ((22386, 22408), 'skimage.io.imread', 'imread', (['inputImagePath'], {}), '(inputImagePath)\n', (22392, 22408), False, 'from skimage.io import imread\n'), ((22421, 22459), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (22433, 22459), False, 'import cv2\n'), ((22464, 22518), 'cv2.imwrite', 'cv2.imwrite', (['outputImagePath', 'image', 'CV2_IMWRITE_PARAM'], {}), '(outputImagePath, image, CV2_IMWRITE_PARAM)\n', (22475, 22518), False, 'import cv2\n'), ((23442, 23468), 'os.listdir', 'os.listdir', (['rawDatasetPath'], {}), '(rawDatasetPath)\n', (23452, 23468), False, 'import os\n'), ((28029, 28035), 'time.time', 'time', ([], {}), '()\n', (28033, 28035), False, 'from time import time\n'), ((4419, 4435), 'mrcnn.utils.shift_bbox', 'shift_bbox', (['bbox'], {}), '(bbox)\n', (4429, 4435), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((4537, 4585), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[ptsMask - shift[::-1]]', '(255)'], {}), '(mask, [ptsMask - shift[::-1]], 255)\n', (4549, 4585), False, 'import cv2\n'), ((4615, 4655), 'mrcnn.utils.shift_bbox', 'shift_bbox', (['kept_bbox'], {'customShift': 'shift'}), '(kept_bbox, customShift=shift)\n', (4625, 4655), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((5121, 5155), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[ptsMask]', '(255)'], {}), '(mask, [ptsMask], 255)\n', (5133, 5155), False, 'import cv2\n'), ((5326, 5358), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (5340, 5358), False, 'import os\n'), ((5368, 5397), 'os.makedirs', 'os.makedirs', (['output_directory'], {}), '(output_directory)\n', (5379, 5397), False, 'import os\n'), ((5490, 5533), 'os.path.join', 'os.path.join', (['output_directory', 'output_name'], {}), '(output_directory, output_name)\n', (5502, 5533), False, 'import os\n'), ((7585, 7641), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f"""{imgName}.{imageFormat}"""'], {}), "(rawDatasetPath, f'{imgName}.{imageFormat}')\n", (7597, 7641), False, 'import os\n'), ((7971, 8025), 'cv2.resize', 'cv2.resize', (['img', 'resize'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, resize, interpolation=cv2.INTER_CUBIC)\n', (7981, 8025), False, 'import cv2\n'), ((8183, 8218), 'os.path.exists', 'os.path.exists', (['targetDirectoryPath'], {}), '(targetDirectoryPath)\n', (8197, 8218), False, 'import os\n'), ((8228, 8260), 'os.makedirs', 'os.makedirs', (['targetDirectoryPath'], {}), '(targetDirectoryPath)\n', (8239, 8260), False, 'import os\n'), ((9665, 9699), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'file'], {}), '(rawDatasetPath, file)\n', (9677, 9699), False, 'import os\n'), ((11724, 11754), 'os.path.join', 'os.path.join', (['imageDir', 'aClass'], {}), '(imageDir, aClass)\n', (11736, 11754), False, 'import os\n'), ((11766, 11790), 'os.path.exists', 'os.path.exists', (['classDir'], {}), '(classDir)\n', (11780, 11790), False, 'import os\n'), ((13187, 13222), 'os.path.join', 'os.path.join', (['datasetPath', 'imageDir'], {}), '(datasetPath, imageDir)\n', (13199, 13222), False, 'import os\n'), ((13251, 13289), 'os.path.join', 'os.path.join', (['imageDirPath', 'fusedClass'], {}), '(imageDirPath, fusedClass)\n', (13263, 13289), False, 'import os\n'), ((13301, 13334), 'os.path.exists', 'os.path.exists', (['fusedClassDirPath'], {}), '(fusedClassDirPath)\n', (13315, 13334), False, 'import os\n'), ((16287, 16325), 'shutil.copy2', 'shutil.copy2', (['imagePath', 'fullImagePath'], {}), '(imagePath, fullImagePath)\n', (16299, 16325), False, 'import shutil\n'), ((16477, 16525), 'cv2.imwrite', 'cv2.imwrite', (['imagePath', 'image', 'CV2_IMWRITE_PARAM'], {}), '(imagePath, image, CV2_IMWRITE_PARAM)\n', (16488, 16525), False, 'import cv2\n'), ((20253, 20305), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName', 'gatheredClass_'], {}), '(datasetPath, imageName, gatheredClass_)\n', (20265, 20305), False, 'import os\n'), ((20336, 20372), 'os.path.exists', 'os.path.exists', (['gatheredClassDirPath'], {}), '(gatheredClassDirPath)\n', (20350, 20372), False, 'import os\n'), ((21383, 21430), 'mrcnn.utils.expand_mask', 'expand_mask', (['bbox', 'mask'], {'image_shape': 'imageShape'}), '(bbox, mask, image_shape=imageShape)\n', (21394, 21430), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((21772, 21788), 'mrcnn.utils.shift_bbox', 'shift_bbox', (['bbox'], {}), '(bbox)\n', (21782, 21788), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((21880, 21931), 'mrcnn.utils.expand_mask', 'expand_mask', (['shifted', 'mask'], {'image_shape': 'shifted[2:]'}), '(shifted, mask, image_shape=shifted[2:])\n', (21891, 21931), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((22029, 22049), 'mrcnn.utils.extract_bboxes', 'extract_bboxes', (['mask'], {}), '(mask)\n', (22043, 22049), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((4107, 4159), 'mrcnn.utils.get_bboxes_intersection', 'get_bboxes_intersection', (['bbox', '[0, 0, *imgShape[:2]]'], {}), '(bbox, [0, 0, *imgShape[:2]])\n', (4130, 4159), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((4485, 4527), 'numpy.zeros', 'np.zeros', (['(shiftedBbox[2], shiftedBbox[3])'], {}), '((shiftedBbox[2], shiftedBbox[3]))\n', (4493, 4527), True, 'import numpy as np\n'), ((5075, 5111), 'numpy.zeros', 'np.zeros', (['(imgShape[0], imgShape[1])'], {}), '((imgShape[0], imgShape[1]))\n', (5083, 5111), True, 'import numpy as np\n'), ((8332, 8393), 'os.path.join', 'os.path.join', (['targetDirectoryPath', 'f"""{imgName}.{imageFormat}"""'], {}), "(targetDirectoryPath, f'{imgName}.{imageFormat}')\n", (8344, 8393), False, 'import os\n'), ((8983, 9017), 'datasetTools.AnnotationAdapter.ANNOTATION_ADAPTERS.values', 'adapt.ANNOTATION_ADAPTERS.values', ([], {}), '()\n', (9015, 9017), True, 'from datasetTools import AnnotationAdapter as adapt\n'), ((11610, 11631), 'os.listdir', 'os.listdir', (['imagePath'], {}), '(imagePath)\n', (11620, 11631), False, 'import os\n'), ((11822, 11842), 'os.listdir', 'os.listdir', (['classDir'], {}), '(classDir)\n', (11832, 11842), False, 'import os\n'), ((13371, 13400), 'os.listdir', 'os.listdir', (['fusedClassDirPath'], {}), '(fusedClassDirPath)\n', (13381, 13400), False, 'import os\n'), ((16027, 16062), 'cv2.bitwise_not', 'cv2.bitwise_not', (['excludedClassMasks'], {}), '(excludedClassMasks)\n', (16042, 16062), False, 'import cv2\n'), ((16096, 16151), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cleaningClassMasks', 'excludedClassMasks'], {}), '(cleaningClassMasks, excludedClassMasks)\n', (16111, 16151), False, 'import cv2\n'), ((16232, 16262), 'os.path.dirname', 'os.path.dirname', (['fullImagePath'], {}), '(fullImagePath)\n', (16247, 16262), False, 'import os\n'), ((16409, 16467), 'numpy.repeat', 'np.repeat', (['cleaningClassMasks[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(cleaningClassMasks[:, :, np.newaxis], 3, axis=2)\n', (16418, 16467), True, 'import numpy as np\n'), ((20492, 20524), 'os.listdir', 'os.listdir', (['gatheredClassDirPath'], {}), '(gatheredClassDirPath)\n', (20502, 20524), False, 'import os\n'), ((25351, 25385), 'common_utils.progressBar', 'progressBar', (['(1)', '(1)', '"""Listing files"""'], {}), "(1, 1, 'Listing files')\n", (25362, 25385), False, 'from common_utils import progressBar, formatTime\n'), ((11974, 12016), 'os.path.join', 'os.path.join', (['classDir', 'listClassImages[0]'], {}), '(classDir, listClassImages[0])\n', (11986, 12016), False, 'import os\n'), ((12207, 12239), 'os.path.join', 'os.path.join', (['classDir', 'maskName'], {}), '(classDir, maskName)\n', (12219, 12239), False, 'import os\n'), ((12339, 12360), 'cv2.add', 'cv2.add', (['fusion', 'mask'], {}), '(fusion, mask)\n', (12346, 12360), False, 'import cv2\n'), ((12427, 12488), 'os.path.join', 'os.path.join', (['classDir', 'f"""{imageName}_{aClass}.{imageFormat}"""'], {}), "(classDir, f'{imageName}_{aClass}.{imageFormat}')\n", (12439, 12488), False, 'import os\n'), ((12581, 12601), 'os.listdir', 'os.listdir', (['classDir'], {}), '(classDir)\n', (12591, 12601), False, 'import os\n'), ((16680, 16716), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName'], {}), '(datasetPath, imageName)\n', (16692, 16716), False, 'import os\n'), ((16748, 16792), 'os.path.join', 'os.path.join', (['datasetPath', 'imageName', 'folder'], {}), '(datasetPath, imageName, folder)\n', (16760, 16792), False, 'import os\n'), ((19897, 19942), 'shutil.rmtree', 'shutil.rmtree', (['folderPath'], {'ignore_errors': '(True)'}), '(folderPath, ignore_errors=True)\n', (19910, 19942), False, 'import shutil\n'), ((20566, 20623), 'os.path.join', 'os.path.join', (['gatheredClassDirPath', 'gatheredClassMaskName'], {}), '(gatheredClassDirPath, gatheredClassMaskName)\n', (20578, 20623), False, 'import os\n'), ((24009, 24057), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f"""{name}.{format}"""'], {}), "(rawDatasetPath, f'{name}.{format}')\n", (24021, 24057), False, 'import os\n'), ((24077, 24100), 'os.path.exists', 'os.path.exists', (['imgPath'], {}), '(imgPath)\n', (24091, 24100), False, 'import os\n'), ((9137, 9168), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f'], {}), '(rawDatasetPath, f)\n', (9149, 9168), False, 'import os\n'), ((9530, 9561), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f'], {}), '(rawDatasetPath, f)\n', (9542, 9561), False, 'import os\n'), ((13891, 13920), 'os.listdir', 'os.listdir', (['fusedClassDirPath'], {}), '(fusedClassDirPath)\n', (13901, 13920), False, 'import os\n'), ((15917, 15936), 'numpy.ones_like', 'np.ones_like', (['image'], {}), '(image)\n', (15929, 15936), True, 'import numpy as np\n'), ((17032, 17057), 'os.path.isdir', 'os.path.isdir', (['folderPath'], {}), '(folderPath)\n', (17045, 17057), False, 'import os\n'), ((17184, 17206), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (17194, 17206), False, 'import os\n'), ((20412, 20444), 'os.listdir', 'os.listdir', (['gatheredClassDirPath'], {}), '(gatheredClassDirPath)\n', (20422, 20444), False, 'import os\n'), ((21037, 21091), 'cv2.bitwise_or', 'cv2.bitwise_or', (['gatheredClassMasks[y1:y2, x1:x2]', 'temp'], {}), '(gatheredClassMasks[y1:y2, x1:x2], temp)\n', (21051, 21091), False, 'import cv2\n'), ((5808, 5835), 'os.path.basename', 'os.path.basename', (['imageName'], {}), '(imageName)\n', (5824, 5835), False, 'import os\n'), ((12763, 12795), 'os.path.join', 'os.path.join', (['classDir', 'maskName'], {}), '(classDir, maskName)\n', (12775, 12795), False, 'import os\n'), ((12820, 12839), 'os.remove', 'os.remove', (['maskPath'], {}), '(maskPath)\n', (12829, 12839), False, 'import os\n'), ((17248, 17291), 'os.path.join', 'os.path.join', (['folderPath', 'maskImageFileName'], {}), '(folderPath, maskImageFileName)\n', (17260, 17291), False, 'import os\n'), ((24365, 24410), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f"""{name}.{ext}"""'], {}), "(rawDatasetPath, f'{name}.{ext}')\n", (24377, 24410), False, 'import os\n'), ((14022, 14063), 'os.path.join', 'os.path.join', (['fusedClassDirPath', 'maskName'], {}), '(fusedClassDirPath, maskName)\n', (14034, 14063), False, 'import os\n'), ((14092, 14111), 'os.remove', 'os.remove', (['maskPath'], {}), '(maskPath)\n', (14101, 14111), False, 'import os\n'), ((17405, 17421), 'datasetTools.datasetDivider.getBWCount', 'getBWCount', (['mask'], {}), '(mask)\n', (17415, 17421), False, 'from datasetTools.datasetDivider import getBWCount, CV2_IMWRITE_PARAM\n'), ((17623, 17664), 'cv2.bitwise_and', 'cv2.bitwise_and', (['mask', 'cleaningClassMasks'], {}), '(mask, cleaningClassMasks)\n', (17638, 17664), False, 'import cv2\n'), ((17997, 18021), 'os.remove', 'os.remove', (['maskImagePath'], {}), '(maskImagePath)\n', (18006, 18021), False, 'import os\n'), ((19747, 19769), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (19757, 19769), False, 'import os\n'), ((17705, 17721), 'datasetTools.datasetDivider.getBWCount', 'getBWCount', (['mask'], {}), '(mask)\n', (17715, 17721), False, 'from datasetTools.datasetDivider import getBWCount, CV2_IMWRITE_PARAM\n'), ((24837, 24885), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f"""{name}.{format}"""'], {}), "(rawDatasetPath, f'{name}.{format}')\n", (24849, 24885), False, 'import os\n'), ((24931, 24983), 'os.path.join', 'os.path.join', (['rawDatasetPath', 'f"""{name}.{mainFormat}"""'], {}), "(rawDatasetPath, f'{name}.{mainFormat}')\n", (24943, 24983), False, 'import os\n'), ((28191, 28197), 'time.time', 'time', ([], {}), '()\n', (28195, 28197), False, 'from time import time\n'), ((28999, 29005), 'time.time', 'time', ([], {}), '()\n', (29003, 29005), False, 'from time import time\n'), ((18832, 18852), 'mrcnn.utils.extract_bboxes', 'extract_bboxes', (['mask'], {}), '(mask)\n', (18846, 18852), False, 'from mrcnn.utils import extract_bboxes, expand_mask, minimize_mask, get_bboxes_intersection, shift_bbox\n'), ((19364, 19400), 'os.path.join', 'os.path.join', (['folderPath', 'outputName'], {}), '(folderPath, outputName)\n', (19376, 19400), False, 'import os\n'), ((19574, 19598), 'os.remove', 'os.remove', (['maskImagePath'], {}), '(maskImagePath)\n', (19583, 19598), False, 'import os\n'), ((18547, 18553), 'time.time', 'time', ([], {}), '()\n', (18551, 18553), False, 'from time import time\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
import topologic as tc
import numpy as np
import networkx as nx
class TestTSne(unittest.TestCase):
def test_tsne_reduces_to_expected_dimensionality(self):
graph = nx.barbell_graph(10, 2)
for edge in graph.edges():
graph.add_edge(edge[0], edge[1], weight=1)
expected_dimension = 3
container = tc.embedding.adjacency_embedding(
graph,
maximum_dimensions=5,
svd_seed=1234
)
embedding_reduced = tc.embedding.tsne(
embedding=container.embedding,
num_components=expected_dimension,
num_iterations=250
)
# the embedding reduced by PCA should have the same height as the input embedding
# but the dimensions should be reduced to exactly the dimension specified in pca call
self.assertIsNotNone(embedding_reduced)
self.assertEqual(container.embedding.shape[0], embedding_reduced.shape[0])
self.assertEqual(embedding_reduced.shape[1], expected_dimension)
def test_tsne_num_components_not_specified_error_raised(self):
with self.assertRaises(ValueError):
tc.embedding.tsne(np.array([1]), None)
def test_tsne_embedding_not_specified_error_raised(self):
with self.assertRaises(ValueError):
tc.embedding.tsne(None, 1)
| [
"topologic.embedding.adjacency_embedding",
"numpy.array",
"topologic.embedding.tsne",
"networkx.barbell_graph"
] | [((267, 290), 'networkx.barbell_graph', 'nx.barbell_graph', (['(10)', '(2)'], {}), '(10, 2)\n', (283, 290), True, 'import networkx as nx\n'), ((435, 511), 'topologic.embedding.adjacency_embedding', 'tc.embedding.adjacency_embedding', (['graph'], {'maximum_dimensions': '(5)', 'svd_seed': '(1234)'}), '(graph, maximum_dimensions=5, svd_seed=1234)\n', (467, 511), True, 'import topologic as tc\n'), ((587, 695), 'topologic.embedding.tsne', 'tc.embedding.tsne', ([], {'embedding': 'container.embedding', 'num_components': 'expected_dimension', 'num_iterations': '(250)'}), '(embedding=container.embedding, num_components=\n expected_dimension, num_iterations=250)\n', (604, 695), True, 'import topologic as tc\n'), ((1408, 1434), 'topologic.embedding.tsne', 'tc.embedding.tsne', (['None', '(1)'], {}), '(None, 1)\n', (1425, 1434), True, 'import topologic as tc\n'), ((1268, 1281), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1276, 1281), True, 'import numpy as np\n')] |
import numpy as np
from numpy import ndarray
from notebooks.thresholders import BaseThresholder
class SimpleThresholder(BaseThresholder):
"""
Tests each observation for the best cutoff point and then picks the median of that
observation and the one before it.
"""
def __init__(self, score_func):
"""
:param score_func: The benchmarking function to use for choosing the cutoff.
"""
self._score_func = score_func
def _threshold(self, distances: ndarray, labels: ndarray) -> float:
sorted_distances = np.sort(distances)
# We should also check if all distances should be flagged
sorted_distances = np.insert(sorted_distances, 0, -1.0)
# classifications[i] should be the classifications for the ith sorted distance
classifications = distances[None, :] > sorted_distances[:, None]
scores = self._score_func(classifications, labels[None, :])
best_index = np.argmax(scores)
if best_index == 0:
# If the chosen threshold was -1.0, just return that and flag everything
return -1.0
elif best_index == len(sorted_distances) - 1:
# If the chosen threshold is the greatest distance, we should not flag
# anything.
return float("inf")
best_cutoff = sorted_distances[best_index]
after_cutoff = sorted_distances[best_index + 1]
# We want the median of the cutoff and the next distance, since this should
# generalize a bit better.
return (best_cutoff + after_cutoff) / 2
| [
"numpy.insert",
"numpy.sort",
"numpy.argmax"
] | [((566, 584), 'numpy.sort', 'np.sort', (['distances'], {}), '(distances)\n', (573, 584), True, 'import numpy as np\n'), ((678, 714), 'numpy.insert', 'np.insert', (['sorted_distances', '(0)', '(-1.0)'], {}), '(sorted_distances, 0, -1.0)\n', (687, 714), True, 'import numpy as np\n'), ((967, 984), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (976, 984), True, 'import numpy as np\n')] |
import torch
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from torch.autograd import Variable
from torch.autograd import grad
from nash_advreg import *
if __name__ == '__main__':
# Read wine dataset
data = pd.read_csv("data/winequality-white.csv", sep = ";")
X = data.loc[:, data.columns != "quality"]
y = data.quality
##
pca = PCA(n_components=X.shape[1], svd_solver='full')
pca.fit(X)
X = pca.fit_transform(X)
##
start = 0.01
stop = 1.0
grid_size = 10
#MEAN_GRID = np.logspace(np.log10(start), np.log10(stop), num=grid_size)
#MEAN_GRID = [0.8, 0.9, 1.0]
MEAN = 0.5
#VAR = 0.5
#VAR_GRID = np.logspace(np.log10(start), np.log10(stop), num=grid_size)
VAR_GRID = [0.01]
##
N_EXP = 10 # For hold-out validation
##
rmse_raw_clean = np.zeros(N_EXP)
rmse_nash_clean = np.zeros(N_EXP)
rmse_bayes_clean = np.zeros(N_EXP)
rmse_raw_at = np.zeros(N_EXP)
rmse_nash_at = np.zeros(N_EXP)
rmse_bayes_at = np.zeros(N_EXP)
##
#for MEAN in MEAN_GRID:
for VAR in VAR_GRID:
for i in range(N_EXP):
status = "MEAN: " + str(MEAN) + " VAR: " + str(VAR) + " EXP: " + str(i)
print(status)
X_train, y_train, X_test, y_test = create_train_test(X,y)
m = torch.distributions.Gamma(torch.tensor([MEAN**2/VAR]), torch.tensor([MEAN/VAR])) ## shape, rate
## Parameters
params = {
"epochs_rr" : 350,
"lr_rr" : 0.01,
"lmb" : 0.0,
"c_d_train" : torch.ones([len(y_train), 1]) * MEAN,
"z_train" : torch.zeros([len(y_train),1]),
"c_d_test" : torch.ones([len(y_test), 1]) * MEAN,
"z_test" : torch.zeros([len(y_test),1]),
"outer_lr" : 10e-6,
"inner_lr" : 10e-4,
"outer_epochs" : 350,
"inner_epochs" : 200,
"n_samples" : 20,
"prior" : m
}
##
with timer(tag='raw'):
w_rr = train_rr(X_train, y_train, params)
##
with timer(tag='nash'):
w_nash = train_nash_rr(X_train, y_train, params)
##
c_d_train_bayes = params["prior"].sample(torch.Size([params["n_samples"], len(y_train)]))#.to("cuda")
with timer(tag='bayes'):
w_bayes = train_bayes_rr_test(X_train, y_train, c_d_train_bayes, params, verbose = False)
############################################################################################
###################### RMSE CALCULATION
############################################################################################
c_d_test = params["prior"].sample(torch.Size([1, len(y_test)]))[0]
X_test_attacked = attack(X_test, w_rr, c_d_test, params["z_test"])
pred_attacked = predict(X_test_attacked, w_rr)
pred_clean = predict(X_test, w_rr)
#
rmse_raw_clean[i] = rmse( y_test, pred_clean )
rmse_raw_at[i] = rmse( y_test, pred_attacked )
#
##
X_test_attacked = attack(X_test, w_nash, c_d_test, params["z_test"])
pred_attacked = predict(X_test_attacked, w_nash)
pred_clean = predict(X_test, w_nash)
#
rmse_nash_clean[i] = rmse( y_test, pred_clean )
rmse_nash_at[i] = rmse( y_test, pred_attacked )
##
X_test_attacked = attack(X_test, w_bayes, c_d_test, params["z_test"])
pred_attacked = predict(X_test_attacked, w_bayes)
pred_clean = predict(X_test, w_bayes)
#
rmse_bayes_clean[i] = rmse( y_test, pred_clean )
rmse_bayes_at[i] = rmse( y_test, pred_attacked )
#####
df = pd.DataFrame({"EXP":range(N_EXP), "raw_cleandata":rmse_raw_clean, "raw_atdata":rmse_raw_at,
"nash_rawdata":rmse_nash_clean, "nash_atdata":rmse_nash_at, "bayes_rawdata":rmse_bayes_clean,
"bayes_atdata":rmse_bayes_at})
name = "results/"+"mean"+str(MEAN)+"var"+str(VAR)+".csv"
df.to_csv(name, index=False)
| [
"sklearn.decomposition.PCA",
"torch.tensor",
"numpy.zeros",
"pandas.read_csv"
] | [((336, 386), 'pandas.read_csv', 'pd.read_csv', (['"""data/winequality-white.csv"""'], {'sep': '""";"""'}), "('data/winequality-white.csv', sep=';')\n", (347, 386), True, 'import pandas as pd\n'), ((474, 521), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'X.shape[1]', 'svd_solver': '"""full"""'}), "(n_components=X.shape[1], svd_solver='full')\n", (477, 521), False, 'from sklearn.decomposition import PCA\n'), ((938, 953), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (946, 953), True, 'import numpy as np\n'), ((976, 991), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (984, 991), True, 'import numpy as np\n'), ((1015, 1030), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (1023, 1030), True, 'import numpy as np\n'), ((1049, 1064), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (1057, 1064), True, 'import numpy as np\n'), ((1084, 1099), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (1092, 1099), True, 'import numpy as np\n'), ((1120, 1135), 'numpy.zeros', 'np.zeros', (['N_EXP'], {}), '(N_EXP)\n', (1128, 1135), True, 'import numpy as np\n'), ((1449, 1480), 'torch.tensor', 'torch.tensor', (['[MEAN ** 2 / VAR]'], {}), '([MEAN ** 2 / VAR])\n', (1461, 1480), False, 'import torch\n'), ((1478, 1504), 'torch.tensor', 'torch.tensor', (['[MEAN / VAR]'], {}), '([MEAN / VAR])\n', (1490, 1504), False, 'import torch\n')] |
# ********************************
#
# This file is used for a package, and will be imported in __init__.py
# By tangbin@2019-11-17
#
# ********************************
import copy
import os
import re
from pyltp import Segmentor
import numpy as np
import tensorflow as tf
from constants import EXP_DIR, LTP_DIR, BPE_CODEC_FILE, BPE_VOCAB_FILE, BPE_VOCAB_THRESHOLD
from model import MultiGPUModel
from run import predict_batch
from subword_nmt.apply_bpe import BPE, read_vocabulary
from utils import BertConfig
from utils.Batcher import PredictBatcher
from utils.Saver import Saver
from utils.data_loader import load_vocab
from constants import CLS_TOKEN, SEP_TOKEN, UNK_TOKEN, PAD_TOKEN
class Preprocessor:
"""
用于预测的预处理工具。面向场景:对外提供摘要的python接口时,对输入的文本进行预处理后交由模型生成摘要。
此场景下输入一般是已经读取进来的文本。
考虑到Batcher已经与预处理的结果强相关,不适合解耦合,因此Preprocessor直接输出Batcher。
核心方法:preprocess
输入:list of str, 待预处理的文本列表
输出:Batcher object, 用于迭代返回batch数据的Batcher对象
"""
def __init__(self, word2id, seq_length, codecs_file, bpe_vocab_file,
vocab_threshold=30, do_lower=False, batch_size=32):
"""
提供预处理用到的各种资源,同时初始化BPE算法用到的资源
:param word2id: 词表
:param seq_length: 序列最大长度
:param codecs_file: BPE算法用到的codec文件
:param bpe_vocab_file: BPE算法用到的词表文件
:param vocab_threshold: BPE算法词表的频数阈值
:param do_lower: 是否转化为小写
:param batch_size: batcher的参数
"""
print('Loading Segmentor Model...')
self.segmentor = Segmentor()
self.segmentor.load(os.path.join(LTP_DIR, 'cws.model'))
with open(bpe_vocab_file, 'r', encoding='utf8') as f:
vocabulary = read_vocabulary(f, vocab_threshold)
with open(codecs_file, 'r', encoding='utf8') as f:
self.bpe = BPE(f, -1, '@@', vocabulary)
self.do_lower = do_lower
self.word2id = word2id
self.seq_length = seq_length
self.batch_size = batch_size
def segment_text(self, text, append_cls=False, append_sep=False):
"""
分词
:param text: str, 待分词的文本
:param append_cls: 是否在开头插入<cls>标签
:param append_sep: 是否在末尾插入<sep>标签
:return: list, 分词后的tokens
"""
text = re.sub('\n+', '\n', text)
paragraphs = text.split('\n')
result = []
for p in paragraphs:
print(p)
shorts = p.split(' ')
for short in shorts:
result.extend(list(self.segmentor.segment(short)))
result.append('<ln>')
while result[0] == '<ln>':
result.pop(0)
while result[-1] == '<ln>':
result.pop(-1)
# print(result)
if append_cls:
result.insert(0, '<cls>')
if append_sep:
result.append('<sep>')
return result
def transform(self, subwords, use_wordpiece=False, substr_prefix='##'):
"""
将切分好的文本转化为numpy array格式,包括tokens、token_id、mask、oov相关内容等,最终作为Batcher的输入。
:param subwords: list of list of str(token), 切分好的文本
:param use_wordpiece: 使用BERT中的WordPiece算法对文本进行切分(暂时不支持)
:param substr_prefix: WordPiece算法的分隔符,仅当use_wordpiece=True时生效
:return:
"""
vocab = self.word2id
seq_length = self.seq_length
'''
# these codes might be valid or removed later, it's uncertain.
if not use_wordpiece:
tokenize = lambda x: x.strip().lstrip('<cls>').rstrip('<sep>').strip().split(' ')
else:
tokenizer = tokenization.FullTokenizer(vocab=vocab, do_lower_case=self.do_lower, substr_prefix=substr_prefix)
tokenize = lambda x: tokenizer.tokenize(x)
'''
tokens = []
mask = []
ids = []
ids_extend = []
oovs = []
oov_size = []
for i, l in enumerate(subwords):
oov = []
tmp_token = [CLS_TOKEN]
tmp_extend = [vocab[CLS_TOKEN]]
tmp = [vocab[CLS_TOKEN]]
for w in l[:seq_length - 2]:
tmp_token.append(w)
if w in vocab:
tmp.append(vocab[w])
tmp_extend.append(vocab[w])
elif w in oov:
tmp.append(vocab[UNK_TOKEN])
tmp_extend.append(len(vocab) + oov.index(w))
else:
oov.append(w)
tmp.append(vocab[UNK_TOKEN])
tmp_extend.append(len(vocab) + oov.index(w))
tmp_token.append(SEP_TOKEN)
tmp_extend.append(vocab[SEP_TOKEN])
tmp.append(vocab[SEP_TOKEN])
mask.append(([1] * len(tmp_extend) + [0] * (seq_length - len(tmp_extend)))[:seq_length])
tmp_extend += [0] * (seq_length - len(tmp_extend))
tmp += [0] * (seq_length - len(tmp))
tokens.append(tmp_token)
ids_extend.append(tmp_extend[:seq_length])
ids.append(tmp[:seq_length])
oovs.append(oov)
oov_size.append(len(oov))
return np.array(tokens), np.array(ids), np.array(ids_extend), np.array(mask), np.array(oov_size), np.array(oovs)
def apply_bpe(self, tokens):
"""
对分词后的tokens使用bpe算法进行进一步切分得到subwords。
:param tokens: 分词后的tokens
:return: bpe算法切分后的subwords
"""
result = self.bpe.segment_tokens(tokens=tokens)
return result
def preprocess(self, texts):
"""
调用其它方法进行预处理。推荐批量预处理。
:param texts: str or list of str, 待处理文本
:return: 用于逐batch遍历数据的PredictBatcher
"""
if type(texts) == str:
texts = [texts]
texts = [self.apply_bpe(self.segment_text(t.replace(',', ','))) for t in texts]
src_tokens, src_ids, src_ids_extend, src_mask, src_oov_size, src_oovs = self.transform(texts)
batcher = PredictBatcher(x_token=src_tokens,
x_ids=src_ids,
x_ids_extend=src_ids_extend,
# x_output=src_embeddings,
x_mask=src_mask,
oov_size=src_oov_size,
oovs=src_oovs,
batch_size=self.batch_size)
return batcher
class LongTextSummarizer:
"""
长文本摘要类,用于提供长文本摘要python接口。
依赖:超参config,包含checkpoint的路径(必须为ckpt文件路径或包含合法的checkpoint文件的目录),Preprocessor所需依赖。
依赖项通常由get_long_summarizer()函数提供。
输入:list of str, 原文文本
输出:list of str, 生成好的摘要
"""
def __init__(self, config, ckpt_path, ckpt_file=None):
self.config = copy.deepcopy(config)
self.model = None
self.saver = None
self.session = None
self.ckpt_path = ckpt_path
self.ckpt_file = ckpt_file
self.word2id, self.id2word = None, None
self.preprocessor = None
self.tag_pattern = re.compile('\[(SEP|CLS|UNK|PAD)\]')
self.ready = False
def initialize(self):
print('[INFO] Initialization Started.')
print('[INFO] Vocabulary Loading.')
self.word2id, self.id2word = load_vocab(self.config.vocab_file, do_lower=self.config.do_lower)
print('[INFO] Preprocessor Loading.')
self.preprocessor = Preprocessor(word2id=self.word2id,
seq_length=self.config.encoder_seq_length,
codecs_file=BPE_CODEC_FILE,
bpe_vocab_file=BPE_VOCAB_FILE,
vocab_threshold=BPE_VOCAB_THRESHOLD,
do_lower=False,
batch_size=self.config.batch_size)
print('[INFO] MultiGPUModel building.')
self.saver = Saver(ckpt_dir=self.ckpt_path)
self.model = MultiGPUModel(config=self.config, num_gpus=1, copy_config=False)
self.model.build(is_training=False)
print('[INFO] Variables Initializing.')
self.session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver.initialize_variables(ckpt_path=self.ckpt_file)
self.session.run(tf.global_variables_initializer())
print('[INFO] Initialization Finished.')
self.ready = True
def _preprocess_get_batcher(self, texts):
return self.preprocessor.preprocess(texts=texts)
def predict(self, texts, clean_tags=True):
if not self.ready:
self.initialize()
batcher = self._preprocess_get_batcher(texts)
print('[Info] Start Generating...')
hyp = predict_batch(
model=self.model, sess=self.session, eval_batcher=batcher,
seq_length=self.config.decoder_seq_length, word2id=self.word2id, id2word=self.id2word,
use_pointer=self.config.use_pointer, substr_prefix=self.config.substr_prefix,
)
if clean_tags:
for i, cand in enumerate(hyp['cand']):
hyp['cand'][i] = self.tag_pattern.sub('', cand).strip()
print('[Info] Finished.')
return hyp['cand']
@staticmethod
def get_long_summarizer(ckpt_dir='checkpoint_2019-11-24-22-10-14', ckpt_file=None, init_now=False):
ckpt_path = os.path.join(EXP_DIR, ckpt_dir)
assert os.path.exists(ckpt_path) and tf.train.latest_checkpoint(ckpt_path) is not None \
or os.path.isfile(ckpt_path)
hyper_parameter_filepath = Saver.parse_hyper_parameter_filepath(ckpt_path=ckpt_path)
config = BertConfig.from_json_file(hyper_parameter_filepath)
summarizer = LongTextSummarizer(config=config, ckpt_path=ckpt_path, ckpt_file=ckpt_file)
if init_now:
summarizer.initialize()
return summarizer
def test():
def get_texts():
text = []
x = 0
fp = './news_text/%d.txt' % x
while os.path.exists(fp):
with open(fp, 'r', encoding='utf8') as f:
t = ''.join(f.readlines())
text.append(t)
return text
summarizer = LongTextSummarizer.get_long_summarizer(ckpt_file='best-3')
texts = get_texts()
texts += [
'''央视网消息(新闻联播):中共中央总书记、国家主席习近平11月25日在人民大会堂会见由党的最高委员会主席格雷兹洛夫率领的俄罗斯统一俄罗斯党代表团。
习近平祝贺统一俄罗斯党成功举行第十九次代表大会,欢迎格雷兹洛夫率团来华参加中俄执政党对话机制第七次会议。习近平表示,今年是中俄建交70周年,我和普京总统共同宣布发展中俄新时代全面战略协作伙伴关系,共同引领两国关系朝着守望相助、深度融通、开拓创新、普惠共赢的目标和方向迈进。当前中俄双方形成有力战略支撑,对维护世界和平与发展具有重大战略意义。
习近平表示,中国共产党和统一俄罗斯党作为各自国家的执政党,肩负着推动新时代中俄关系取得更大发展的责任和使命。希望双方与会代表充分交流沟通,为深化中俄新时代全面战略协作、促进世界和平与繁荣贡献智慧和力量。
格雷兹洛夫祝贺中共十九届四中全会成功举行,感谢习近平会见统一俄罗斯党代表团,表示近年来俄中关系达到了前所未有的高水平,两国各领域互利合作蓬勃发展,在解决国际及地区热点问题中协同努力,取得良好成效。统一俄罗斯党愿与中国共产党加强交流合作,推动俄中新时代全面战略协作伙伴关系进一步发展。''',
'''美国国会近日通过所谓“2019年香港人权与民主法案”,粗暴干涉中国内政,公然为激进暴力犯罪分子打气,企图再次通过煽动暴乱来祸害他国。“天下苦美久矣”,美方一些人长期以来实行霸权主义与强权政治,令国际社会深恶痛绝。他们此次把黑手伸向香港、为暴行开“绿灯”的恶劣行径,遭到国际社会强烈谴责。
大量事实表明,美国插手香港事务不是一天两天了。据统计,自1984至2014年,美国国会提出过60多项涉及香港法案。2011年维基解密披露美国驻港总领馆电文显示,美方多次就香港事务发表干涉性意见,并频繁会见反对派人士。此次修例风波中,美国的黑手更是从幕后伸向台前。从美国民主基金会给反中乱港分子提供各种支持,到多位美国政客公然会见“港独”头目;从美国一些媒体混淆黑白颠倒是非,到通过所谓涉港法案妄图实施“长臂管辖”,美方一些人搞乱香港、趁火打劫、牵制中国的险恶用心昭然若揭,国际社会对此洞若观火,普遍予以抨击谴责。
比如,美国库恩基金会主席罗伯特·库恩指出,任何国家都不能允许暴力破坏社会,扰乱经济,美国这一法案对美国、对中国、对世界都是有害的。伊朗外交部发言人穆萨维表示,美国通过此类措施违反国际规范的进程,必将对全球稳定造成严重危害。阿富汗中国友好协会会长苏丹·艾哈迈德·巴辛指出,阿富汗人深知暴力活动会带来怎样的后果,也深受其苦,香港事务纯属中国内政,别国无权干涉。这充分证明,美国为一己之私煽动暴力的行径已成众矢之的,其插手香港事务的图谋不得人心。
然而,在美国一些政客的观念里,“暴乱”是有两套外衣的。如果发生在本国,那绝对不容姑息,必须进行强力打压。于是人们看到,美国警方对2011年“占领华尔街”运动、2015年美国马里兰州巴尔的摩骚乱等都进行了强硬处置。一旦涉及对外事务,这帮政客就立马换了一副嘴脸,将“暴乱”美化为“美丽的风景线”,到处煽风点火,策动骚乱、发动战争、挑起“颜色革命”,唯恐天下不乱,以谋取政治利益、维护美国的全球霸主地位。
过去几十年间,从伊拉克到叙利亚,从阿富汗到利比亚,但凡美国插手之地,都深陷动荡、贫瘠、混乱的泥淖!更严重的是,美国借反恐之名以暴易暴,给人类社会制造了巨大风险隐患。正是因为看透了这一点,当美国一些政客发出所谓“香港人,我们与你在一起”的论调时,网民纷纷嘲讽说,“拜托别了。上次你与利比亚、叙利亚、伊拉克、也门等站在一起的时候……它们都烧了个精光。”这表明,美方一些人的恶言恶行犹如“过街老鼠”,遭到人们的厌恶与鄙夷。
得道多助,失道寡助。美方一些人为暴力“开绿灯”,严重违反国际法与国际关系基本准则,违背人类共同价值观,挑战人类道德与文明底线,实则为本国的未来“亮红灯”,不仅自毁信誉和形象,也将遭到暴力的反噬。比如美国在中东多地挑动战乱酿成大规模难民危机,对美西方社会秩序造成巨大冲击,到头来损害了自身利益。
香港是中国的香港,不是美国某些人手中的风筝,想怎么扯就怎么扯,不会任由美方一些人胡作非为。中国政府维护国家主权、安全、发展利益的决心坚定不移,贯彻“一国两制”方针的决心坚定不移,反对任何外部势力干涉香港事务的决心坚定不移。企图煽动暴力来牵制中国,根本行不通!必将遭到坚决反制!(国际锐评评论员)'''
]
hyps = summarizer.predict(texts)
for i, summary in enumerate(hyps['cand']):
print('{}: {}'.format(i, summary))
if __name__ == '__main__':
test()
| [
"pyltp.Segmentor",
"re.compile",
"run.predict_batch",
"numpy.array",
"copy.deepcopy",
"os.path.exists",
"utils.Saver.Saver",
"tensorflow.ConfigProto",
"subword_nmt.apply_bpe.BPE",
"model.MultiGPUModel",
"os.path.isfile",
"utils.Saver.Saver.parse_hyper_parameter_filepath",
"re.sub",
"tensor... | [((1507, 1518), 'pyltp.Segmentor', 'Segmentor', ([], {}), '()\n', (1516, 1518), False, 'from pyltp import Segmentor\n'), ((2228, 2253), 're.sub', 're.sub', (['"""\n+"""', '"""\n"""', 'text'], {}), "('\\n+', '\\n', text)\n", (2234, 2253), False, 'import re\n'), ((5860, 6030), 'utils.Batcher.PredictBatcher', 'PredictBatcher', ([], {'x_token': 'src_tokens', 'x_ids': 'src_ids', 'x_ids_extend': 'src_ids_extend', 'x_mask': 'src_mask', 'oov_size': 'src_oov_size', 'oovs': 'src_oovs', 'batch_size': 'self.batch_size'}), '(x_token=src_tokens, x_ids=src_ids, x_ids_extend=\n src_ids_extend, x_mask=src_mask, oov_size=src_oov_size, oovs=src_oovs,\n batch_size=self.batch_size)\n', (5874, 6030), False, 'from utils.Batcher import PredictBatcher\n'), ((6632, 6653), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (6645, 6653), False, 'import copy\n'), ((6913, 6950), 're.compile', 're.compile', (['"""\\\\[(SEP|CLS|UNK|PAD)\\\\]"""'], {}), "('\\\\[(SEP|CLS|UNK|PAD)\\\\]')\n", (6923, 6950), False, 'import re\n'), ((7134, 7199), 'utils.data_loader.load_vocab', 'load_vocab', (['self.config.vocab_file'], {'do_lower': 'self.config.do_lower'}), '(self.config.vocab_file, do_lower=self.config.do_lower)\n', (7144, 7199), False, 'from utils.data_loader import load_vocab\n'), ((7816, 7846), 'utils.Saver.Saver', 'Saver', ([], {'ckpt_dir': 'self.ckpt_path'}), '(ckpt_dir=self.ckpt_path)\n', (7821, 7846), False, 'from utils.Saver import Saver\n'), ((7868, 7932), 'model.MultiGPUModel', 'MultiGPUModel', ([], {'config': 'self.config', 'num_gpus': '(1)', 'copy_config': '(False)'}), '(config=self.config, num_gpus=1, copy_config=False)\n', (7881, 7932), False, 'from model import MultiGPUModel\n'), ((8633, 8882), 'run.predict_batch', 'predict_batch', ([], {'model': 'self.model', 'sess': 'self.session', 'eval_batcher': 'batcher', 'seq_length': 'self.config.decoder_seq_length', 'word2id': 'self.word2id', 'id2word': 'self.id2word', 'use_pointer': 'self.config.use_pointer', 'substr_prefix': 'self.config.substr_prefix'}), '(model=self.model, sess=self.session, eval_batcher=batcher,\n seq_length=self.config.decoder_seq_length, word2id=self.word2id,\n id2word=self.id2word, use_pointer=self.config.use_pointer,\n substr_prefix=self.config.substr_prefix)\n', (8646, 8882), False, 'from run import predict_batch\n'), ((9268, 9299), 'os.path.join', 'os.path.join', (['EXP_DIR', 'ckpt_dir'], {}), '(EXP_DIR, ckpt_dir)\n', (9280, 9299), False, 'import os\n'), ((9473, 9530), 'utils.Saver.Saver.parse_hyper_parameter_filepath', 'Saver.parse_hyper_parameter_filepath', ([], {'ckpt_path': 'ckpt_path'}), '(ckpt_path=ckpt_path)\n', (9509, 9530), False, 'from utils.Saver import Saver\n'), ((9548, 9599), 'utils.BertConfig.from_json_file', 'BertConfig.from_json_file', (['hyper_parameter_filepath'], {}), '(hyper_parameter_filepath)\n', (9573, 9599), False, 'from utils import BertConfig\n'), ((9900, 9918), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (9914, 9918), False, 'import os\n'), ((1547, 1581), 'os.path.join', 'os.path.join', (['LTP_DIR', '"""cws.model"""'], {}), "(LTP_DIR, 'cws.model')\n", (1559, 1581), False, 'import os\n'), ((1670, 1705), 'subword_nmt.apply_bpe.read_vocabulary', 'read_vocabulary', (['f', 'vocab_threshold'], {}), '(f, vocab_threshold)\n', (1685, 1705), False, 'from subword_nmt.apply_bpe import BPE, read_vocabulary\n'), ((1788, 1816), 'subword_nmt.apply_bpe.BPE', 'BPE', (['f', '(-1)', '"""@@"""', 'vocabulary'], {}), "(f, -1, '@@', vocabulary)\n", (1791, 1816), False, 'from subword_nmt.apply_bpe import BPE, read_vocabulary\n'), ((5057, 5073), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (5065, 5073), True, 'import numpy as np\n'), ((5075, 5088), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (5083, 5088), True, 'import numpy as np\n'), ((5090, 5110), 'numpy.array', 'np.array', (['ids_extend'], {}), '(ids_extend)\n', (5098, 5110), True, 'import numpy as np\n'), ((5112, 5126), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (5120, 5126), True, 'import numpy as np\n'), ((5128, 5146), 'numpy.array', 'np.array', (['oov_size'], {}), '(oov_size)\n', (5136, 5146), True, 'import numpy as np\n'), ((5148, 5162), 'numpy.array', 'np.array', (['oovs'], {}), '(oovs)\n', (5156, 5162), True, 'import numpy as np\n'), ((8201, 8234), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8232, 8234), True, 'import tensorflow as tf\n'), ((9412, 9437), 'os.path.isfile', 'os.path.isfile', (['ckpt_path'], {}), '(ckpt_path)\n', (9426, 9437), False, 'import os\n'), ((8067, 8108), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (8081, 8108), True, 'import tensorflow as tf\n'), ((9315, 9340), 'os.path.exists', 'os.path.exists', (['ckpt_path'], {}), '(ckpt_path)\n', (9329, 9340), False, 'import os\n'), ((9345, 9382), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_path'], {}), '(ckpt_path)\n', (9371, 9382), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python
#-*- coding: UTF-8 -*-
from __future__ import unicode_literals
from flask import (Flask, render_template, redirect, url_for, request, flash)
from flask_bootstrap import Bootstrap
from flask_login import login_required, login_user, logout_user, current_user
from flask import send_file, send_from_directory
import os
# try:
# from utils import unzip_func
# from forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, \
# Data_Select_Form, All_Set_Form, UploadForm,\
# ML_Set_Form
# from ext import db, login_manager
# from models import TodoList, User, DataList, TrainSettingList
#
# from dl_function.utils import get_loss
# from dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path
# from dl_function.get_device_info import get_Memo_rate_mome, cpu_core_rate_num, get_available_gpus, get_gpu_used
# from dl_function.data_analysis import fft
# from dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu
#
# from model_setting import net_model, Activation_Function, optimizer, ml_model, batch_size, Whether_data_augment, \
# Net_losses
#
# from bearing.deep_learning.models_mse_loss.train import trian_function, pb_generation_full, get_pred_result
#
# except:
# from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, \
# Data_Select_Form, All_Set_Form, UploadForm, \
# ML_Set_Form
# from bearing_master.ext import db, login_manager
# from bearing_master.models import TodoList, User, DataList, TrainSettingList
#
# from bearing_master.dl_function.utils import get_loss
# from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path
# from bearing_master.dl_function.get_device_info import get_Memo_rate_mome, cpu_core_rate_num, get_available_gpus, get_gpu_used
# from bearing_master.dl_function.data_analysis import fft
# from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu
#
# from bearing_master.model_setting import net_model, Activation_Function, optimizer, ml_model, batch_size, Whether_data_augment, \
# Net_losses
#
# from deep_learning.models_mse_loss.train import trian_function, pb_generation_full, get_pred_result
# from bearing_master.utils import unzip_func
from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, \
Data_Select_Form, All_Set_Form, UploadForm, \
ML_Set_Form
from bearing_master.ext import db, login_manager
from bearing_master.models import TodoList, User, DataList, TrainSettingList
from bearing_master.dl_function.utils import get_loss
from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path
from bearing_master.dl_function.get_device_info import get_Memo_rate_mome, cpu_core_rate_num, get_available_gpus, get_gpu_used
from bearing_master.dl_function.data_analysis import fft
from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu
from bearing_master.model_setting import net_model, Activation_Function, optimizer, ml_model, batch_size, Whether_data_augment, \
Net_losses
from deep_learning.models_mse_loss.train import trian_function, pb_generation_full, get_pred_result
from bearing_master.utils import unzip_func
import re
import os
import time
import pymysql
import threading
pymysql.install_as_MySQLdb()
SECRET_KEY = 'This is my key'
app = Flask(__name__)
bootstrap = Bootstrap(app)
app.secret_key = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://test:test12345678@localhost:3306/test"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
import os
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','csv','xml','zip','py'])
app.config['UPLOAD_FOLDER'] = os.path.realpath('.') + '/bearing_master/data/user_datas/'
app.config['TRAIN_INFO'] = os.path.realpath('.') + '/bearing_master/data/user_info/'
from multiprocessing import Process
# with app.app_context():
# db.init_app(app)
db.init_app(app)
login_manager.init_app(app)
login_manager.login_view = "login"
DEBUG = True
@app.route('/jump', methods=['GET', 'POST'])
def select():
print('jump')
if request.method == 'POST':
if DEBUG:
print('jump POST')
name = request.form['submit']
if name is 'cnn':
return 'cnn'
else:
return '机器学习'
form = []
form.append(CNNForm())
form.append(MLForm())
return render_template('select.html', form=form)
@app.route('/view/<int:id>')
@login_required
def view_todo_example(id):
print('view_todo_example')
# select_list = [(1, "1星"), (2, "2星"), (3, "3星"), (4, "4星"), (5, "5星")]
select_list = [net_model, Activation_Function, optimizer, batch_size, ml_model,Whether_data_augment,Net_losses]
if request.method == 'POST':
if DEBUG:
print('jump POST')
label = request.form['tag_id']
return select_list[int(label) - 1][1]
form = []
form.append(CNNSetting(select_list))
# todolist = TodoList.query.filter_by(id=id).first_or_404()
# db.session.delete(todolist)
# db.session.commit()
# flash('You are viewing a todo list')
# return redirect(url_for('show_todo_list'))
# return redirect(url_for('model_setting'))
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolist = TodoList.query.filter_by(id=id).first_or_404()
create_time_ = todolist.create_time
train_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
ml_class = train_list.ml_model_class
train_data_list = train_list.data_paths
train_net_class = train_list.deep_model_class
# db.session.delete(todolist)
# db.session.commit()
train_data_list_split = train_data_list.split('/')
train_data_str = list(filter(None, train_data_list_split))
train_data_int = []
for s in train_data_str:
train_data_int.append(int(s))
print(train_data_int)
datalists = DataList.query.filter(DataList.user_id==user_id_now,DataList.create_time.in_(train_data_int))
dataform = DataListForm()
current_user_id = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_id)
model_creat_time = str(create_time_) #create_time_id
filepath = os.path.join(filepath, model_creat_time)
filename = os.path.join(filepath, 'loss.csv')
loss = get_loss(filename)
x_axis = [i for i in range(len(loss[0]))]
# 网络类型
# net_model = ['CNN', 'MobileNet', 'ResNet', 'FCNet', 'VAE', 'Auto_encoder']
# 激活函数
# Activation_Function = ['sigmod', 'tanh', 'ReLU', 'ELU', 'PReLU', 'Leaky ReLU']
# 优化器
# optimizer = ['Adam', 'SGD', 'BGD', 'Adagrad', 'Adadelta', 'RMSprop']
# 损失函数
# net_losses = ['mse', 'cross_entropy', 'combin_loss', 'exponential_Loss', 'hinge_loss']
# 机器学习模型
# ml_model = ['SVM', 'DT', 'Gauss']
# 模型类别 分为两种
# model_class = ['Deep Learning', 'Machine Learning']
# select_list = [GPU_setting,whether_data_augment,deep_model_class,
# ml_model_class,input_dim,output_dim,weight_decay,
# learning_rate,activation_class,layers_num]
train_activation_class = train_list.activation_class
train_input_dim_class = train_list.input_dim
train_output_dim_class = train_list.output_dim
train_weight_decay_class = train_list.weight_decay
train_learning_rate_class = train_list.learning_rate
train_layers_num_class = train_list.layers_num
train_GPU_setting_class = train_list.GPU_setting
train_output_dim_class = train_list.output_dim
trian_dataAug = train_list.whether_data_augment
trian_loss = train_list.net_losses
trian_optimizer = train_list.optimizer
train_ml_model_class = train_list.ml_model_class
if train_ml_model_class>=0:
model_classes='机器学习模型'
else:
model_classes = '深度模型'
model_setting=[
['模型类别:', model_classes],
['模型名称:', select_list[0][train_net_class]],
['激活函数:',select_list[1][train_activation_class]],
['优化器:',select_list[2][trian_optimizer]],
['损失函数:',select_list[6][trian_loss]],
['模型名称:', select_list[4][train_ml_model_class]],#
['输入维度:', train_input_dim_class],
['输出维度:', train_output_dim_class],
['权重衰减:', train_weight_decay_class],
['学习率:', train_learning_rate_class],
['GPU设置:',train_GPU_setting_class],
['网络层数:',train_layers_num_class ],
['是否数据增强:', select_list[5][trian_dataAug]],
] #模型设置结果
memory_rate, memory_total = get_Memo_rate_mome()
cpu_rate, num_core = cpu_core_rate_num()
# gpu_nums = get_available_gpus()
gpu_nums = 0
# gpu_memo_useds = []
# for i in range(gpu_nums):
# gpu_memo_used = get_gpu_used(i)
# gpu_memo_useds.append(gpu_memo_used)
# print('gpu_memo_useds',gpu_memo_useds)
gpu_memo_useds = 0
gpu_memo_totals=0
# for i in range(gpu_nums):
# get_gpu_one_used,get_gpu_one_total= get_gpu_used(i)
# gpu_memo_useds += get_gpu_one_used
# gpu_memo_totals += get_gpu_one_total
print('gpu_memo_useds',gpu_memo_useds)
if gpu_nums == 0:
gpu_nums = 1
gpu_num_used = train_list.GPU_setting
device_info = [memory_rate,memory_total/1024/1024/1024,
int(cpu_rate), num_core,
# gpu_nums,gpu_num_used,gpu_memo_useds
# gpu_memo_totals, gpu_num_used, gpu_memo_totals,gpu_memo_useds
]
# print('device_info',device_info)
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
# todolists = TodoList.query.filter_by(user_id=user_id_now)
all_datalists = DataList.query.filter_by(user_id=user_id_now)
if ml_class!=-1:
return render_template('train_mlinfo.html',device_info=device_info,
model_setting=model_setting,
x_axis=x_axis, dataform=dataform, datalists=datalists)
else:
return render_template('train_info.html',device_info=device_info,
model_setting=model_setting, u_data=loss,
x_axis=x_axis, dataform=dataform, datalists=datalists,
all_datalists=all_datalists,model_id=id)
@app.route('/view_data/<int:id>')
@login_required
def view_data_example(id):
print('view_data_example')
select_list = [(2, "2星")]
if request.method == 'POST':
# if DEBUG:
# print('jump POST')
label = request.form['tag_id']
return select_list[int(label) - 1][1]
form = []
form.append(DataSetting(select_list))
# todolist = TodoList.query.filter_by(id=id).first_or_404()
# db.session.delete(todolist)
# db.session.commit()
# flash('You are viewing a todo list')
# return redirect(url_for('show_todo_list'))
# return redirect(url_for('model_setting'))
# loss = get_loss()
datalist = DataList.query.filter_by(create_time=id).first_or_404()
file_name = str(datalist.create_time)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
# print(filepath)
filepath = get_csv_path(filepath)
print(filepath)
name_lists = sorted(os.listdir(filepath))
print(name_lists[-1])
# filepath_name = os.path.join(filepath, name_lists[0])
filepath_name = os.path.join(filepath, name_lists[-1])
print(filepath,filepath_name)
# signal = get_signal_csv(file_name='data/aaaa/Bearing1_1/acc_00001.csv')
signal = get_signal_csv(file_name=filepath_name)
# signal = get_all_csv(path='data/aaaa/Bearing1_1/')
x_axis = [i for i in range(len(signal))]
frequency = fft(signal)[0:len(x_axis)//2].tolist()
x_axis_f = x_axis[0:len(signal)//2]
# get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu
s_mean = get_mean(signal)
s_var = get_var(signal)
s_abs_mean = get_abs_mean(signal)
s_max = get_max(signal)
s_min = get_min(signal)
s_qiaodu = get_qiaodu(signal)
data_analysis=[
['均值:', round(s_mean,5)],
['方差:',round(s_var,5)],
['绝对值均值:',round(s_abs_mean,5)],
['最大值:',round(s_max,5)],
['最小值:',round(s_min,5)],
['峭度:', round(s_qiaodu,5)]
# [':', ],
# [':', ],
# [':', ],
# [':', ],
] #模型设置结果
return render_template('data_info.html',data_analysis=data_analysis,x_axis=x_axis, u_data=signal,x_axis_f=x_axis_f,frequency=frequency)
import numpy as np
def liner_regre(predict_list):
if len(predict_list) > 500:
predict_list = predict_list[-500:]
if len(predict_list)>1:
x = [[i] for i in range(len(predict_list))]
linreg = linear_model.LinearRegression()
linreg.fit(x,predict_list)
result = linreg.predict([x[-1]])
else:
result = np.array([predict_list[-1]])
return result
pass
@app.route('/view_data_prediction/<int:id>/<int:model_id>')
@login_required
def view_data_prediction(id,model_id):
print('model_id:',model_id)
print('view_data_prediction')
select_list = [(2, "2星")]
if request.method == 'POST':
if DEBUG:
print('jump POST')
label = request.form['tag_id']
return select_list[int(label) - 1][1]
form = []
form.append(DataSetting(select_list))
# todolist = TodoList.query.filter_by(id=id).first_or_404()
# db.session.delete(todolist)
# db.session.commit()
# flash('You are viewing a todo list')
# return redirect(url_for('show_todo_list'))
# return redirect(url_for('model_setting'))
# loss = get_loss()
datalist = DataList.query.filter_by(create_time=id).first_or_404()
file_name = str(datalist.create_time)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
filename_list = get_csv_path(filepath)
filename_list = sorted(os.listdir(filename_list))
# filepath = os.path.join(filepath, name_lists[0])
# signal = get_signal_csv(file_name='data/aaaa/Bearing1_1/acc_00001.csv')
# signal = get_signal_csv(file_name=filepath)
#################################
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolist = TodoList.query.filter_by(id=model_id).first_or_404()
create_time_ = todolist.create_time
train_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
current_user_name = str(current_user.username)
# datalist = DataList.query.filter_by(create_time=id).first_or_404()
file_name = str(datalist.create_time)
id_V = model_id
todolist_v = TodoList.query.filter_by(id=id_V).first_or_404()
trained_model_path = os.path.join(filepath, 'trained_model')
# is_finished = trian_function(TodoList, id_V,trained_model_path)
train_setting_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
############################################
modelpath = os.path.join(app.config['TRAIN_INFO'],current_user_name)
modelpath = os.path.join(modelpath, str(create_time_))
trained_model_path = os.path.join(modelpath, 'trained_model')
print(trained_model_path)
signal = get_pred_result(filepath=filepath,filename_list=filename_list,
todolist=todolist,train_setting_list=train_setting_list,trained_model_path = trained_model_path)
real_result = [i for i in range(len(signal)-1,-1,-1)]
err = np.sqrt(np.average(np.square(np.subtract(signal,real_result))))
# print(err)
# signal = get_all_csv(path='data/aaaa/Bearing1_1/')
x_axis = [i for i in range(len(signal))]
frequency = []
for ii in range(len(signal)):
fre = liner_regre(signal[0:ii+1]).tolist()
# print(fre)
frequency.append(fre[0])
# print(frequency)
x_axis_f = [i for i in range(len(frequency))]
# get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu
s_mean = get_mean(signal)
s_var = get_var(signal)
s_abs_mean = get_abs_mean(signal)
s_max = get_max(signal)
s_min = get_min(signal)
s_qiaodu = get_qiaodu(signal)
data_analysis=[
['均方根误差:', round(err,5)],
['方差:',round(s_var,5)],
# ['绝对值均值:',round(s_abs_mean,5)],
# ['最大值:',round(s_max,5)],
# ['最小值:',round(s_min,5)],
# ['峭度:', round(s_qiaodu,5)]
] #模型设置结果
return render_template('data_pred_info.html',data_analysis=data_analysis,x_axis=x_axis, u_data=signal,x_axis_f=x_axis_f,frequency=frequency)
from sklearn import linear_model
@app.route('/register', methods=['GET', 'POST'])
def register():
print('register')
if request.method == 'POST':
if DEBUG:
print('POST')
# user = User.query.filter_by(username=request.form['username'], password=request.form['password']).first()
form = RegistorForm()
if form.validate_on_submit():
user = User(username=request.form['username'], password=request.form['password'])
db.session.add(user)
db.session.commit()
return redirect(url_for('login'))
else:
flash('Invalid username or password')
form = RegistorForm()
return render_template('register.html', form=form)
@app.route('/model_setting/<int:id>', methods=['GET', 'POST'])
def model_setting(id):
print('modelsetting')
# select_list = [(1, "DNN"), (2, "CNN"), (3, "MobileNet"), (4, "ResNet"), (5, "RNN")]
# net_model, Activation_Function, optimizer, ml_model,batch_size
select_list = [net_model, Activation_Function, optimizer, batch_size, Whether_data_augment,Net_losses]
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
datalists = DataList.query.filter_by(user_id = user_id_now)
todolist = TodoList.query.filter_by(id = id).first()
counter = 0
for data in datalists:
counter += 1
allform= All_Set_Form([counter,select_list])()
# form = []
# form.append(CNNSetting(select_list))
if request.method == 'POST':
if DEBUG:
print('jump POST')
# label = request.form['tag_id']
# hobby = flask.request.form.getlist('dataset')
# selected_dataset = request.form['tag_id18']
forms = request.form
file_names = ''
for out in forms:
if 'tag_id_dym' in out:
print(out)
count = int(out[10:])
datalist = datalists[count]
file_name = '/' + str(datalist.create_time)
file_names += file_name
# 如果以 todolist的create_time能找到TrainSettingList的数据,则删除重建
exit_trainsettings = TrainSettingList.query.filter_by(todolist_id=todolist.create_time).all()
[db.session.delete(exit_trainsetting) for exit_trainsetting in exit_trainsettings]
db.session.commit()
whether_data_augment = int(forms['tag_id04'])
deep_model_class = int(forms['tag_id00'])
ml_model_class = -1
input_dim = int(forms['tag_id013'])
# output_dim = int(forms['tag_id014'])
output_dim = 1
weight_decay = float(forms['tag_id017'])
learning_rate = float(forms['tag_id016'])
activation_class = int(forms['tag_id01'])
layers_num = int(forms['tag_id015'])
optimizer_class = int(forms['tag_id02'])
batch_size_num = int(forms['tag_id03'])
net_losses = int(forms['tag_id05'])
trainsettinglist = TrainSettingList(
user_id=current_user.id, create_time=time.time(), model_class=0,
GPU_setting=0, data_paths=file_names, todolist_id=todolist.create_time,
whether_data_augment=whether_data_augment, deep_model_class=deep_model_class,
ml_model_class=ml_model_class, input_dim=input_dim, output_dim=output_dim, weight_decay=weight_decay,
learning_rate=learning_rate, activation_class=activation_class, layers_num=layers_num,batch_size=batch_size_num,
optimizer=optimizer_class, net_losses=net_losses
)
db.session.add(trainsettinglist)
db.session.commit()
return redirect(url_for('view_todo_example', id=id))
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolists = TodoList.query.filter_by(user_id=user_id_now)
datalists = DataList.query.filter_by(user_id=user_id_now)
return render_template('modelsetting.html', allform=allform,datalists=datalists)
@app.route('/ml_setting/<int:id>', methods=['GET', 'POST'])
def ml_setting(id):
print('ml_setting')
# select_list = [(1, "DNN"), (2, "CNN"), (3, "MobileNet"), (4, "ResNet"), (5, "RNN")]
# net_model, Activation_Function, optimizer, ml_model,batch_size
select_list = [ml_model, Activation_Function, optimizer, batch_size,ml_model]
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
datalists = DataList.query.filter_by(user_id = user_id_now)
todolist = TodoList.query.filter_by(id = id).first()
counter = 0
for data in datalists:
counter += 1
allform= ML_Set_Form([counter,select_list])()
# form = []
# form.append(CNNSetting(select_list))
if request.method == 'POST':
if DEBUG:
print('jump POST')
# label = request.form['tag_id']
# hobby = flask.request.form.getlist('dataset')
# selected_dataset = request.form['tag_id18']
forms = request.form
file_names = ''
for out in forms:
if 'tag_id_dym' in out:
print(out)
count = int(out[10:])
datalist = datalists[count]
file_name = '/' + str(datalist.create_time)
file_names += file_name
# 如果以 todolist的create_time能找到TrainSettingList的数据,则删除重建
exit_trainsettings = TrainSettingList.query.filter_by(todolist_id=todolist.create_time).all()
[db.session.delete(exit_trainsetting) for exit_trainsetting in exit_trainsettings]
db.session.commit()
deep_model_class = -1
ml_model_class = int(forms['tag_id00'])
input_dim = int(forms['tag_id013'])
# output_dim = int(forms['tag_id014'])
output_dim = 1
weight_decay = float(forms['tag_id017'])
learning_rate = float(forms['tag_id016'])
activation_class = -1
whether_data_augment = -1
layers_num = -1
optimizer_class = -1
batch_size_num = -1
# whether_data_augment = int(forms['tag_id04'])
# layers_num = int(forms['tag_id015'])
# optimizer_class = int(forms['tag_id02'])
# batch_size_num = int(forms['tag_id03'])
trainsettinglist = TrainSettingList(
user_id=current_user.id, create_time=time.time(), model_class=0,
GPU_setting=0, data_paths=file_names, todolist_id=todolist.create_time,
whether_data_augment=whether_data_augment, deep_model_class=deep_model_class,
ml_model_class=ml_model_class, input_dim=input_dim, output_dim=output_dim, weight_decay=weight_decay,
learning_rate=learning_rate, activation_class=activation_class, layers_num=layers_num,batch_size=batch_size_num,
optimizer=-1, net_losses=-1)
# trainsettinglist = TrainSettingList(
# user_id=current_user.id, create_time=time.time(), model_class=0,
# GPU_setting=0, data_paths=file_names, todolist_id=todolist.create_time)
db.session.add(trainsettinglist)
db.session.commit()
return redirect(url_for('view_todo_example', id=id))
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolists = TodoList.query.filter_by(user_id=user_id_now)
datalists = DataList.query.filter_by(user_id=user_id_now)
return render_template('mlsetting.html', allform=allform,datalists=datalists)
@app.route('/data_selecting', methods=['GET', 'POST'])
@login_required
def data_select():
print('data_select')
# todolists = TodoList.query.all()
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
datalists = DataList.query.filter_by(user_id = user_id_now)
counter = 0
for data in datalists:
counter += 1
dataform = Data_Select_Form([counter])()
if request.method == 'GET':
return render_template('dataselecting.html', datalists=datalists, dataform=dataform)
else:
# if dataform.validate_on_submit():
if True:
# datalist = DataList(current_user.id, dataform.title.data, 0)
# db.session.add(datalist)
# db.session.commit()
# for i in range(counter):
# name = 'tag_id'+str(i)
# print(name)
get_outs = request.form
# selected_hold = []
file_names = ''
for out in get_outs:
if 'tag' in out:
print(out)
count = int(out[6:])
datalist = datalists[count]
file_name = '/' + str(datalist.create_time)
file_names += file_name
# selected_hold.append(count)
print(file_names)
return 'hellow'
# flash('You have selected the datalists')
# return render_template('data_uploading.html')
else:
flash(dataform.errors)
return redirect(url_for('model_setting'))
def allowed_file(filename):
return '.' in filename and filename.lower().rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/data_uploading/<int:id>', methods=['GET', 'POST'])
def data_uploading(id):
uploadform = UploadForm()
if uploadform.validate_on_submit():
# name = uploadform.name.data
# price = uploadform.price.data
image = request.files['image']
file_name = image.filename
file_path = os.path.join(app.config['UPLOAD_FOLDER'], str(id))
if image and allowed_file(image.filename):
if not os.path.exists(file_path):
os.makedirs(file_path)
image.save(os.path.join(app.config['UPLOAD_FOLDER'], str(id), file_name))
full_file_name = os.path.join(app.config['UPLOAD_FOLDER'], str(id), file_name)
unzip_path = os.path.join(app.config['UPLOAD_FOLDER'], str(id))
unzip_func(full_file_name,unzip_path)
datalist = DataList.query.filter_by(create_time=id).first_or_404()
datalist.status = 1
db.session.add(datalist)
db.session.commit()
# product = Product(name, price, category, filename)
# db.session.add(product)
# db.session.commit()
flash('The dataset has been uploaded!')
return redirect(url_for('view_data_example',id=id))
if uploadform.errors:
flash(uploadform.errors, 'danger')
print('data_uploading.html')
return render_template('data_uploading.html', uploadform=uploadform)
@app.route('/edge_uploading/<int:id>/<string:user_name>', methods=['GET', 'POST'])
def edge_uploading(id,user_name):
uploadform = UploadForm()
if True:
# name = uploadform.name.data
# price = uploadform.price.data
image = request.files['image']
file_name = image.filename
file_name_split = file_name.split('/')
file_name_split = file_name_split[-1].split('\\')
file_name_time_id = file_name_split[-1]
file_uper_dir = file_name_split[-2]
file_path = os.path.join(app.config['UPLOAD_FOLDER'], str(id),file_uper_dir)
if image and allowed_file(image.filename):
if not os.path.exists(file_path):
os.makedirs(file_path)
try:
datalist = DataList.query.filter_by(create_time=id).first_or_404()
except:
user_id_now = User.query.filter_by(username=user_name).first().id
datalist = DataList(user_id_now, file_uper_dir+'_from_edge', 0,creat_time=id)
db.session.add(datalist)
db.session.commit()
final_save_path = os.path.join(file_path, file_name_time_id)
image.save(final_save_path)
file_name = str(datalist.create_time)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
filepath = get_csv_path(filepath)
name_lists = sorted(os.listdir(filepath))
filepath = os.path.join(filepath, name_lists[-1])
signal = get_signal_csv(file_name=filepath)
max = get_max(signal)
if max > 20:
datalist.status = 1
db.session.add(datalist)
db.session.commit()
# full_file_name = os.path.join(app.config['UPLOAD_FOLDER'], str(id), file_name)
# unzip_path = os.path.join(app.config['UPLOAD_FOLDER'], str(id))
# unzip_func(full_file_name,unzip_path)
# product = Product(name, price, category, filename)
# db.session.add(product)
# db.session.commit()
print('The dataset has been uploaded!')
return 'saved!'
# return redirect(url_for('view_data_example',id=id))
# if uploadform.errors:
# flash(uploadform.errors, 'danger')
# print('data_uploading.html')
# return render_template('data_uploading.html', uploadform=uploadform)
##################################################################
import json
@app.route('/', methods=['GET', 'POST'])
@login_required
def show_todo_list():
print('show_todo_list')
form = TodoListForm()
dataform= DataListForm()
if request.method == 'GET':
print('GET')
# todolists = TodoList.query.all()
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolists = TodoList.query.filter_by(user_id = user_id_now)
datalists = DataList.query.filter_by(user_id = user_id_now)
return render_template('index.html', todolists=todolists, datalists=datalists, form=form, dataform=dataform)
else:
if form.validate_on_submit():
# todolist = TodoList(current_user.id, form.title.data, form.status.data)
todolist = TodoList(current_user.id, form.title.data, 0)
model_class = form.status.data
db.session.add(todolist)
db.session.commit()
id = todolist.id
## add new train file loss.csv
current_user_name = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_name)
model_creat_time = str(todolist.create_time) # create_time_id
filepath = os.path.join(filepath, model_creat_time)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('创建路径:', filepath)
filename = os.path.join(filepath, 'loss.csv')
with open(filename, 'w') as f:
pass
f.close()
filename2 = os.path.join(filepath, 'is_training.json')
with open(filename2, "w") as f:
json.dump([0], f)
flash('You have add a new task to list')
if model_class == '1':
return redirect(url_for('model_setting', id=id))
else:
return redirect(url_for('ml_setting', id=id))
elif dataform.validate_on_submit():
# todolist = TodoList(current_user.id, form.title.data, form.status.data)
datalist = DataList(current_user.id, dataform.title.data, 0)
db.session.add(datalist)
db.session.commit()
create_time = datalist.create_time
flash('You have add a new data to datalist')
return redirect(url_for('data_uploading', id=create_time))
# uploadform = UploadForm()
# return render_template('data_uploading.html',uploadform=uploadform)
else:
flash(form.errors)
return redirect(url_for('model_setting'))
# def show_todo_list():
# print('show_todo_list')
# form = TodoListForm()
# if request.method == 'GET':
# todolists = TodoList.query.all()
# return render_template('index.html', todolists=todolists, form=form)
# else:
# if form.validate_on_submit():
# todolist = TodoList(current_user.id, form.title.data, form.status.data)
# db.session.add(todolist)
# db.session.commit()
# flash('You have add a new todo list')
# else:
# flash(form.errors)
# return redirect(url_for('show_todo_list'))
@app.route('/delete/<int:id>')
@login_required
def delete_todo_list(id):
print('delete_todo_list')
todolist = TodoList.query.filter_by(id=id).first_or_404()
db.session.delete(todolist)
db.session.commit()
flash('You have delete a todo list')
return redirect(url_for('show_todo_list'))
@app.route('/delete_data/<int:id>')
@login_required
def delete_data_list(id):
print('delete_data_list')
datalist = DataList.query.filter_by(id=id).first_or_404()
db.session.delete(datalist)
db.session.commit()
flash('You have delete a data list')
return redirect(url_for('show_todo_list'))
@app.route('/finished_data_example/<int:id>')
@login_required
def finished_data_example(id):
print('finish_data_list')
# datalist = DataList.query.filter_by(id=id).first_or_404()
datalist = DataList.query.filter_by(create_time=id).first_or_404()
file_name = str(datalist.create_time)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], file_name)
filepath = get_csv_path(filepath)
name_lists = sorted(os.listdir(filepath))
filepath_name = os.path.join(filepath, name_lists[-1])
signal = get_signal_csv(file_name=filepath_name)
max_v = max(signal)
min_v = min(signal)
if max_v>20 or min_v<-20:
datalist.status = 1
db.session.add(datalist)
db.session.commit()
flash('You have set a data list finished')
return redirect(url_for('show_todo_list'))
else:
flash('The data list unfinished')
return redirect(url_for('show_todo_list'))
@app.route('/finished_set/<int:id>')
@login_required
def finished_set(id):
print('finish_data_list')
# datalist = DataList.query.filter_by(id=id).first_or_404()
datalist = DataList.query.filter_by(create_time=id).first_or_404()
datalist.status = 1
db.session.add(datalist)
db.session.commit()
flash('You have set a data list finished')
return redirect(url_for('show_todo_list'))
@app.route('/unfinished_set/<int:id>')
@login_required
def unfinished_set(id):
print('unfinished_set')
# datalist = DataList.query.filter_by(id=id).first_or_404()
datalist = DataList.query.filter_by(create_time=id).first_or_404()
datalist.status = 0
db.session.add(datalist)
db.session.commit()
flash('You have set a data list finished')
return redirect(url_for('show_todo_list'))
@app.route('/change/<int:id>', methods=['GET', 'POST'])
@login_required
def change_todo_list(id):
print('change_todo_list')
if request.method == 'GET':
todolist = TodoList.query.filter_by(id=id).first_or_404()
form = TodoListForm()
form.title.data = todolist.title
form.status.data = str(todolist.status)
return render_template('modify.html', form=form)
else:
form = TodoListForm()
if form.validate_on_submit():
todolist = TodoList.query.filter_by(id=id).first_or_404()
todolist.title = form.title.data
todolist.status = form.status.data
db.session.commit()
flash('You have modify a todolist')
else:
flash(form.errors)
return redirect(url_for('show_todo_list'))
@app.route('/login', methods=['GET', 'POST'])
def login():
print('login')
if request.method == 'POST':
if DEBUG:
print('POST')
user = User.query.filter_by(username=request.form['username'], password=request.form['password']).first()
if user:
login_user(user)
flash('you have logged in!')
return redirect(url_for('show_todo_list'))
else:
flash('Invalid username or password')
form = LoginForm()
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
print('logout')
logout_user()
flash('you have logout!')
return redirect(url_for('login'))
@login_manager.user_loader
def load_user(user_id):
print('load_user')
return User.query.filter_by(id=int(user_id)).first()
#################################################################
@app.route("/getinputdim/<username>/<task_id>", methods=['GET'])
def get_input_dim(username,task_id):
# 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
# directory = os.getcwd() # 假设在当前目录
directory = app.config['TRAIN_INFO']+'/'+username+'/'+task_id+'/trained_model/output/'
filename_list = os.listdir(directory)
train_list = TrainSettingList.query.filter_by(todolist_id=task_id).first_or_404()
input_dim = str(train_list.input_dim)
return input_dim
@app.route("/download/<username>/<task_id>/<filename>", methods=['GET'])
def download_file(username,task_id,filename):
# 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
# directory = os.getcwd() # 假设在当前目录
directory = app.config['TRAIN_INFO']+'/'+username+'/'+task_id+'/trained_model/output/'
print(directory)
filename_list = os.listdir(directory)
train_list = TrainSettingList.query.filter_by(todolist_id=task_id).first_or_404()
input_dim = train_list.input_dim
selected_filename = filename
for f_name in filename_list:
if filename in f_name:
selected_filename = f_name
break
print(selected_filename)
return send_from_directory(directory, selected_filename, as_attachment=True)
@app.route("/train_task/<username>", methods=['Post'])
def train_task(username):
user = User.query.filter_by(username=username).first()
user_id = user.id
todo_list = TodoList.query.filter_by(user_id=user_id,status=1).all()
train_names = [(todo.title, todo.create_time) for todo in todo_list]
all_name = ''
all_time = ''
for name in train_names:
all_name = all_name + '///' + name[0]
all_time = all_time + '///' + str(name[1])
print('url get:',all_name+ '@@' +all_time)
return all_name+ '@@' +all_time
@app.route("/return_list", methods=['Post'])
def return_list():
# 需要知道2个参数, 第1个参数是本地目录的path, 第2个参数是文件名(带扩展名)
# directory = os.getcwd() # 假设在当前目录
directory = 'data/user_info/admin/1535337539/'
a = [11111,2,3,2,1,'1212']
b = str(a)
return b
@app.route('/begin_train/<int:id>')
@login_required
def begin_train_todo_list(id):
print('begin_train_todo_list')
todolist = TodoList.query.filter_by(id=id).first_or_404()
# id = todolist.id
## add new train file loss.csv
current_user_name = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_name)
model_creat_time = str(todolist.create_time) # create_time_id
filepath = os.path.join(filepath, model_creat_time)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('创建路径:', filepath)
filename = os.path.join(filepath, 'is_training.json')
with open(filename, 'r') as load_f:
load_dict = json.load(load_f)
is_training = load_dict[0]
is_training = int(todolist.is_training)
if is_training == 0:
with open(filename, "w") as f:
json.dump([1], f)
todolist.is_training = 1
db.session.commit()
# return redirect(url_for('model_setting', id=id))
def train_thread(id,filename):
# threads = []
# t1 = Process(target=begin_train, args=(10101,id,filename))
# threads.append(t1) # 将这个子线程添加到线程列表中
# for t in threads: # 遍历线程列表
# t.setDaemon(True) # 将线程声
# t.start() # 启动子线程
# t1 = threading.Thread(target=begin_train, args=(10101,id,filename))
t1 = Process(target=begin_train, args=(10101, id, filename))
t1.daemon = True
# t1.setDaemon(True) # 将线程声
t1.start() # 启动子线程
def begin_train(input,id_V,filename):
user_name_now = current_user.username
user_id_now = User.query.filter_by(username=user_name_now).first().id
todolist = TodoList.query.filter_by(id=id).first_or_404()
create_time_ = todolist.create_time
train_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
train_data_list = train_list.data_paths
train_data_list_split = train_data_list.split('/')
train_data_str = list(filter(None, train_data_list_split))
train_data_int = []
for s in train_data_str:
train_data_int.append(int(s))
print(train_data_int)
#######################################################################
datalists = DataList.query.filter(DataList.user_id == user_id_now,
DataList.create_time.in_(train_data_int))
# datalists = DataList.query.filter(DataList.user_id == user_id_now)
############################################################################
current_user_id = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_id)
model_creat_time = str(create_time_) # create_time_id
filepath = os.path.join(filepath, model_creat_time)
loss_filename = os.path.join(filepath, 'loss.csv')
todolist_v = TodoList.query.filter_by(id=id_V).first_or_404()
is_training_v = 1
is_training_filename = filename
trained_model_path = os.path.join(filepath, 'trained_model')
is_finished = trian_function(is_training_v,is_training_filename,
TodoList,id_V,datalists,loss_filename,
trained_model_path,train_setting_list=train_list
)
db.session.commit()
################################################
current_user_name = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_name)
model_creat_time = str(todolist.create_time) # create_time_id
filepath = os.path.join(filepath, model_creat_time)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('创建路径:', filepath)
json_filename = os.path.join(filepath, 'is_training.json')
with open(json_filename, 'r') as load_f:
load_dict = json.load(load_f)
# if int(todolist.is_training) == 1:
# with open(json_filename, "w") as f:
# json.dump([0], f)
# todolist.is_training = 0
# db.session.commit()
# flash('You have finish the task')
print('is_finished:',is_finished)
if is_finished == 1:
with open(json_filename, "w") as f:
json.dump([0], f)
todolist.is_training = 0
db.session.commit()
flash('You have finish the task')
pass
train_thread(id,filename)
flash('You have begin the task')
else:
flash('You have begin the task before')
return redirect(url_for('show_todo_list'))
@app.route('/stop_train/<int:id>')
@login_required
def stop_train_todo_list(id):
print('stop_train_todo_list')
todolist = TodoList.query.filter_by(id=id).first_or_404()
id = todolist.id
## add new train file loss.csv
current_user_name = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_name)
model_creat_time = str(todolist.create_time) # create_time_id
filepath = os.path.join(filepath, model_creat_time)
if not os.path.exists(filepath):
os.makedirs(filepath)
print('创建路径:', filepath)
filename = os.path.join(filepath, 'is_training.json')
with open(filename, 'r') as load_f:
load_dict = json.load(load_f)
is_training = load_dict[0]
if int(todolist.is_training)==1:
with open(filename, "w") as f:
json.dump([0], f)
todolist.is_training = 0
db.session.commit()
flash('You have stop the task')
else:
flash('You have stop the task before!')
# return redirect(url_for('model_setting', id=id))
# flash('You have delete a todo list')
return redirect(url_for('show_todo_list'))
@app.route('/generate_pb/<int:id>')
@login_required
def generate_pb(id):
print('generate_pb')
todolist = TodoList.query.filter_by(id=id).first_or_404()
## add new train file loss.csv
current_user_name = str(current_user.username)
filepath = os.path.join(app.config['TRAIN_INFO'], current_user_name)
model_creat_time = str(todolist.create_time) # create_time_id
create_time_ = todolist.create_time
train_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
ml_class = train_list.ml_model_class
input_dim = train_list.input_dim
filepath = os.path.join(filepath, model_creat_time)
# filename = os.path.join(filepath, 'trained_model')
#################################################################################
todolist = TodoList.query.filter_by(id=id).first_or_404()
create_time_ = todolist.create_time
train_setting_list = TrainSettingList.query.filter_by(todolist_id=create_time_).first_or_404()
#################################################################################
# def pb_generation_thread(filepath,input_dim,train_setting_list):
# t1 = Process(target=pb_generation_full, args=(filepath,input_dim,train_setting_list))
# t1.daemon = True
# # t1.setDaemon(True) # 将线程声
# t1.start() # 启动子线程
if ml_class==-1:
t1 = Process(target=pb_generation_full, args=(filepath, input_dim, train_setting_list))
t1.daemon = True
# t1.setDaemon(True) # 将线程声
t1.start() # 启动子线程
t1.join()
# pb_generation_thread(filepath,input_dim,train_setting_list)
# pb_generation_full(filepath,input_dim,train_setting_list)
# time.sleep(5)
todolist.status =1
db.session.commit()
flash('You have generated the pb file')
return redirect(url_for('show_todo_list'))
def run_app(argv):
app.run(host='0.0.0.0', port=5000, debug=True)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| [
"flask.render_template",
"bearing_master.forms.DataListForm",
"bearing_master.forms.UploadForm",
"flask.Flask",
"multiprocessing.Process",
"numpy.array",
"bearing_master.forms.CNNSetting",
"bearing_master.dl_function.feature_extractor.get_var",
"bearing_master.models.TodoList.query.filter_by",
"be... | [((3640, 3668), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (3666, 3668), False, 'import pymysql\n'), ((3707, 3722), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (3712, 3722), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((3735, 3749), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (3744, 3749), False, 'from flask_bootstrap import Bootstrap\n'), ((4285, 4301), 'bearing_master.ext.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (4296, 4301), False, 'from bearing_master.ext import db, login_manager\n'), ((4302, 4329), 'bearing_master.ext.login_manager.init_app', 'login_manager.init_app', (['app'], {}), '(app)\n', (4324, 4329), False, 'from bearing_master.ext import db, login_manager\n'), ((4054, 4075), 'os.path.realpath', 'os.path.realpath', (['"""."""'], {}), "('.')\n", (4070, 4075), False, 'import os\n'), ((4140, 4161), 'os.path.realpath', 'os.path.realpath', (['"""."""'], {}), "('.')\n", (4156, 4161), False, 'import os\n'), ((4748, 4789), 'flask.render_template', 'render_template', (['"""select.html"""'], {'form': 'form'}), "('select.html', form=form)\n", (4763, 4789), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((6483, 6497), 'bearing_master.forms.DataListForm', 'DataListForm', ([], {}), '()\n', (6495, 6497), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((6566, 6621), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_id'], {}), "(app.config['TRAIN_INFO'], current_user_id)\n", (6578, 6621), False, 'import os\n'), ((6697, 6737), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (6709, 6737), False, 'import os\n'), ((6754, 6788), 'os.path.join', 'os.path.join', (['filepath', '"""loss.csv"""'], {}), "(filepath, 'loss.csv')\n", (6766, 6788), False, 'import os\n'), ((6802, 6820), 'bearing_master.dl_function.utils.get_loss', 'get_loss', (['filename'], {}), '(filename)\n', (6810, 6820), False, 'from bearing_master.dl_function.utils import get_loss\n'), ((9022, 9042), 'bearing_master.dl_function.get_device_info.get_Memo_rate_mome', 'get_Memo_rate_mome', ([], {}), '()\n', (9040, 9042), False, 'from bearing_master.dl_function.get_device_info import get_Memo_rate_mome, cpu_core_rate_num, get_available_gpus, get_gpu_used\n'), ((9069, 9088), 'bearing_master.dl_function.get_device_info.cpu_core_rate_num', 'cpu_core_rate_num', ([], {}), '()\n', (9086, 9088), False, 'from bearing_master.dl_function.get_device_info import get_Memo_rate_mome, cpu_core_rate_num, get_available_gpus, get_gpu_used\n'), ((10233, 10278), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (10257, 10278), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((11652, 11704), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file_name'], {}), "(app.config['UPLOAD_FOLDER'], file_name)\n", (11664, 11704), False, 'import os\n'), ((11744, 11766), 'bearing_master.dl_function.read_utils.get_csv_path', 'get_csv_path', (['filepath'], {}), '(filepath)\n', (11756, 11766), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((11944, 11982), 'os.path.join', 'os.path.join', (['filepath', 'name_lists[-1]'], {}), '(filepath, name_lists[-1])\n', (11956, 11982), False, 'import os\n'), ((12112, 12151), 'bearing_master.dl_function.read_utils.get_signal_csv', 'get_signal_csv', ([], {'file_name': 'filepath_name'}), '(file_name=filepath_name)\n', (12126, 12151), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((12438, 12454), 'bearing_master.dl_function.feature_extractor.get_mean', 'get_mean', (['signal'], {}), '(signal)\n', (12446, 12454), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12468, 12483), 'bearing_master.dl_function.feature_extractor.get_var', 'get_var', (['signal'], {}), '(signal)\n', (12475, 12483), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12502, 12522), 'bearing_master.dl_function.feature_extractor.get_abs_mean', 'get_abs_mean', (['signal'], {}), '(signal)\n', (12514, 12522), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12536, 12551), 'bearing_master.dl_function.feature_extractor.get_max', 'get_max', (['signal'], {}), '(signal)\n', (12543, 12551), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12565, 12580), 'bearing_master.dl_function.feature_extractor.get_min', 'get_min', (['signal'], {}), '(signal)\n', (12572, 12580), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12597, 12615), 'bearing_master.dl_function.feature_extractor.get_qiaodu', 'get_qiaodu', (['signal'], {}), '(signal)\n', (12607, 12615), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((12959, 13096), 'flask.render_template', 'render_template', (['"""data_info.html"""'], {'data_analysis': 'data_analysis', 'x_axis': 'x_axis', 'u_data': 'signal', 'x_axis_f': 'x_axis_f', 'frequency': 'frequency'}), "('data_info.html', data_analysis=data_analysis, x_axis=\n x_axis, u_data=signal, x_axis_f=x_axis_f, frequency=frequency)\n", (12974, 13096), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((14373, 14425), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file_name'], {}), "(app.config['UPLOAD_FOLDER'], file_name)\n", (14385, 14425), False, 'import os\n'), ((14448, 14470), 'bearing_master.dl_function.read_utils.get_csv_path', 'get_csv_path', (['filepath'], {}), '(filepath)\n', (14460, 14470), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((15358, 15397), 'os.path.join', 'os.path.join', (['filepath', '"""trained_model"""'], {}), "(filepath, 'trained_model')\n", (15370, 15397), False, 'import os\n'), ((15637, 15694), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (15649, 15694), False, 'import os\n'), ((15782, 15822), 'os.path.join', 'os.path.join', (['modelpath', '"""trained_model"""'], {}), "(modelpath, 'trained_model')\n", (15794, 15822), False, 'import os\n'), ((15869, 16039), 'deep_learning.models_mse_loss.train.get_pred_result', 'get_pred_result', ([], {'filepath': 'filepath', 'filename_list': 'filename_list', 'todolist': 'todolist', 'train_setting_list': 'train_setting_list', 'trained_model_path': 'trained_model_path'}), '(filepath=filepath, filename_list=filename_list, todolist=\n todolist, train_setting_list=train_setting_list, trained_model_path=\n trained_model_path)\n', (15884, 16039), False, 'from deep_learning.models_mse_loss.train import trian_function, pb_generation_full, get_pred_result\n'), ((16639, 16655), 'bearing_master.dl_function.feature_extractor.get_mean', 'get_mean', (['signal'], {}), '(signal)\n', (16647, 16655), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((16669, 16684), 'bearing_master.dl_function.feature_extractor.get_var', 'get_var', (['signal'], {}), '(signal)\n', (16676, 16684), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((16703, 16723), 'bearing_master.dl_function.feature_extractor.get_abs_mean', 'get_abs_mean', (['signal'], {}), '(signal)\n', (16715, 16723), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((16737, 16752), 'bearing_master.dl_function.feature_extractor.get_max', 'get_max', (['signal'], {}), '(signal)\n', (16744, 16752), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((16766, 16781), 'bearing_master.dl_function.feature_extractor.get_min', 'get_min', (['signal'], {}), '(signal)\n', (16773, 16781), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((16798, 16816), 'bearing_master.dl_function.feature_extractor.get_qiaodu', 'get_qiaodu', (['signal'], {}), '(signal)\n', (16808, 16816), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((17087, 17229), 'flask.render_template', 'render_template', (['"""data_pred_info.html"""'], {'data_analysis': 'data_analysis', 'x_axis': 'x_axis', 'u_data': 'signal', 'x_axis_f': 'x_axis_f', 'frequency': 'frequency'}), "('data_pred_info.html', data_analysis=data_analysis, x_axis=\n x_axis, u_data=signal, x_axis_f=x_axis_f, frequency=frequency)\n", (17102, 17229), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((17885, 17899), 'bearing_master.forms.RegistorForm', 'RegistorForm', ([], {}), '()\n', (17897, 17899), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((17911, 17954), 'flask.render_template', 'render_template', (['"""register.html"""'], {'form': 'form'}), "('register.html', form=form)\n", (17926, 17954), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((18469, 18514), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (18493, 18514), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((21054, 21099), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (21078, 21099), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((21116, 21161), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (21140, 21161), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((21174, 21248), 'flask.render_template', 'render_template', (['"""modelsetting.html"""'], {'allform': 'allform', 'datalists': 'datalists'}), "('modelsetting.html', allform=allform, datalists=datalists)\n", (21189, 21248), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((21731, 21776), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (21755, 21776), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((24550, 24595), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (24574, 24595), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((24612, 24657), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (24636, 24657), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((24670, 24741), 'flask.render_template', 'render_template', (['"""mlsetting.html"""'], {'allform': 'allform', 'datalists': 'datalists'}), "('mlsetting.html', allform=allform, datalists=datalists)\n", (24685, 24741), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((25035, 25080), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (25059, 25080), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((26601, 26613), 'bearing_master.forms.UploadForm', 'UploadForm', ([], {}), '()\n', (26611, 26613), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((27822, 27883), 'flask.render_template', 'render_template', (['"""data_uploading.html"""'], {'uploadform': 'uploadform'}), "('data_uploading.html', uploadform=uploadform)\n", (27837, 27883), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((28021, 28033), 'bearing_master.forms.UploadForm', 'UploadForm', ([], {}), '()\n', (28031, 28033), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((30485, 30499), 'bearing_master.forms.TodoListForm', 'TodoListForm', ([], {}), '()\n', (30497, 30499), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((30514, 30528), 'bearing_master.forms.DataListForm', 'DataListForm', ([], {}), '()\n', (30526, 30528), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((33778, 33805), 'bearing_master.ext.db.session.delete', 'db.session.delete', (['todolist'], {}), '(todolist)\n', (33795, 33805), False, 'from bearing_master.ext import db, login_manager\n'), ((33811, 33830), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (33828, 33830), False, 'from bearing_master.ext import db, login_manager\n'), ((33836, 33872), 'flask.flash', 'flash', (['"""You have delete a todo list"""'], {}), "('You have delete a todo list')\n", (33841, 33872), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((34099, 34126), 'bearing_master.ext.db.session.delete', 'db.session.delete', (['datalist'], {}), '(datalist)\n', (34116, 34126), False, 'from bearing_master.ext import db, login_manager\n'), ((34132, 34151), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (34149, 34151), False, 'from bearing_master.ext import db, login_manager\n'), ((34157, 34193), 'flask.flash', 'flash', (['"""You have delete a data list"""'], {}), "('You have delete a data list')\n", (34162, 34193), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((34565, 34617), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file_name'], {}), "(app.config['UPLOAD_FOLDER'], file_name)\n", (34577, 34617), False, 'import os\n'), ((34634, 34656), 'bearing_master.dl_function.read_utils.get_csv_path', 'get_csv_path', (['filepath'], {}), '(filepath)\n', (34646, 34656), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((34725, 34763), 'os.path.join', 'os.path.join', (['filepath', 'name_lists[-1]'], {}), '(filepath, name_lists[-1])\n', (34737, 34763), False, 'import os\n'), ((34778, 34817), 'bearing_master.dl_function.read_utils.get_signal_csv', 'get_signal_csv', ([], {'file_name': 'filepath_name'}), '(file_name=filepath_name)\n', (34792, 34817), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((35475, 35499), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (35489, 35499), False, 'from bearing_master.ext import db, login_manager\n'), ((35505, 35524), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (35522, 35524), False, 'from bearing_master.ext import db, login_manager\n'), ((35530, 35572), 'flask.flash', 'flash', (['"""You have set a data list finished"""'], {}), "('You have set a data list finished')\n", (35535, 35572), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((35898, 35922), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (35912, 35922), False, 'from bearing_master.ext import db, login_manager\n'), ((35928, 35947), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (35945, 35947), False, 'from bearing_master.ext import db, login_manager\n'), ((35953, 35995), 'flask.flash', 'flash', (['"""You have set a data list finished"""'], {}), "('You have set a data list finished')\n", (35958, 35995), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((37350, 37361), 'bearing_master.forms.LoginForm', 'LoginForm', ([], {}), '()\n', (37359, 37361), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((37373, 37413), 'flask.render_template', 'render_template', (['"""login.html"""'], {'form': 'form'}), "('login.html', form=form)\n", (37388, 37413), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((37493, 37506), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (37504, 37506), False, 'from flask_login import login_required, login_user, logout_user, current_user\n'), ((37511, 37536), 'flask.flash', 'flash', (['"""you have logout!"""'], {}), "('you have logout!')\n", (37516, 37536), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((38081, 38102), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (38091, 38102), False, 'import os\n'), ((38594, 38615), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (38604, 38615), False, 'import os\n'), ((38934, 39003), 'flask.send_from_directory', 'send_from_directory', (['directory', 'selected_filename'], {'as_attachment': '(True)'}), '(directory, selected_filename, as_attachment=True)\n', (38953, 39003), False, 'from flask import send_file, send_from_directory\n'), ((40144, 40201), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (40156, 40201), False, 'import os\n'), ((40288, 40328), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (40300, 40328), False, 'import os\n'), ((40449, 40491), 'os.path.join', 'os.path.join', (['filepath', '"""is_training.json"""'], {}), "(filepath, 'is_training.json')\n", (40461, 40491), False, 'import os\n'), ((45215, 45272), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (45227, 45272), False, 'import os\n'), ((45357, 45397), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (45369, 45397), False, 'import os\n'), ((45515, 45557), 'os.path.join', 'os.path.join', (['filepath', '"""is_training.json"""'], {}), "(filepath, 'is_training.json')\n", (45527, 45557), False, 'import os\n'), ((46353, 46410), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (46365, 46410), False, 'import os\n'), ((46710, 46750), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (46722, 46750), False, 'import os\n'), ((47871, 47890), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (47888, 47890), False, 'from bearing_master.ext import db, login_manager\n'), ((47896, 47935), 'flask.flash', 'flash', (['"""You have generated the pb file"""'], {}), "('You have generated the pb file')\n", (47901, 47935), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((4700, 4709), 'bearing_master.forms.CNNForm', 'CNNForm', ([], {}), '()\n', (4707, 4709), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((4727, 4735), 'bearing_master.forms.MLForm', 'MLForm', ([], {}), '()\n', (4733, 4735), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((5297, 5320), 'bearing_master.forms.CNNSetting', 'CNNSetting', (['select_list'], {}), '(select_list)\n', (5307, 5320), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((6425, 6465), 'bearing_master.models.DataList.create_time.in_', 'DataList.create_time.in_', (['train_data_int'], {}), '(train_data_int)\n', (6449, 6465), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((10319, 10469), 'flask.render_template', 'render_template', (['"""train_mlinfo.html"""'], {'device_info': 'device_info', 'model_setting': 'model_setting', 'x_axis': 'x_axis', 'dataform': 'dataform', 'datalists': 'datalists'}), "('train_mlinfo.html', device_info=device_info, model_setting\n =model_setting, x_axis=x_axis, dataform=dataform, datalists=datalists)\n", (10334, 10469), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((10555, 10763), 'flask.render_template', 'render_template', (['"""train_info.html"""'], {'device_info': 'device_info', 'model_setting': 'model_setting', 'u_data': 'loss', 'x_axis': 'x_axis', 'dataform': 'dataform', 'datalists': 'datalists', 'all_datalists': 'all_datalists', 'model_id': 'id'}), "('train_info.html', device_info=device_info, model_setting=\n model_setting, u_data=loss, x_axis=x_axis, dataform=dataform, datalists\n =datalists, all_datalists=all_datalists, model_id=id)\n", (10570, 10763), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((11198, 11222), 'bearing_master.forms.DataSetting', 'DataSetting', (['select_list'], {}), '(select_list)\n', (11209, 11222), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((11813, 11833), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (11823, 11833), False, 'import os\n'), ((13310, 13341), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (13339, 13341), False, 'from sklearn import linear_model\n'), ((13445, 13473), 'numpy.array', 'np.array', (['[predict_list[-1]]'], {}), '([predict_list[-1]])\n', (13453, 13473), True, 'import numpy as np\n'), ((13919, 13943), 'bearing_master.forms.DataSetting', 'DataSetting', (['select_list'], {}), '(select_list)\n', (13930, 13943), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((14499, 14524), 'os.listdir', 'os.listdir', (['filename_list'], {}), '(filename_list)\n', (14509, 14524), False, 'import os\n'), ((17552, 17566), 'bearing_master.forms.RegistorForm', 'RegistorForm', ([], {}), '()\n', (17564, 17566), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((18653, 18689), 'bearing_master.forms.All_Set_Form', 'All_Set_Form', (['[counter, select_list]'], {}), '([counter, select_list])\n', (18665, 18689), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((19576, 19595), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (19593, 19595), False, 'from bearing_master.ext import db, login_manager\n'), ((20799, 20831), 'bearing_master.ext.db.session.add', 'db.session.add', (['trainsettinglist'], {}), '(trainsettinglist)\n', (20813, 20831), False, 'from bearing_master.ext import db, login_manager\n'), ((20840, 20859), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (20857, 20859), False, 'from bearing_master.ext import db, login_manager\n'), ((21916, 21951), 'bearing_master.forms.ML_Set_Form', 'ML_Set_Form', (['[counter, select_list]'], {}), '([counter, select_list])\n', (21927, 21951), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((22835, 22854), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (22852, 22854), False, 'from bearing_master.ext import db, login_manager\n'), ((24295, 24327), 'bearing_master.ext.db.session.add', 'db.session.add', (['trainsettinglist'], {}), '(trainsettinglist)\n', (24309, 24327), False, 'from bearing_master.ext import db, login_manager\n'), ((24336, 24355), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (24353, 24355), False, 'from bearing_master.ext import db, login_manager\n'), ((25163, 25190), 'bearing_master.forms.Data_Select_Form', 'Data_Select_Form', (['[counter]'], {}), '([counter])\n', (25179, 25190), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((25241, 25318), 'flask.render_template', 'render_template', (['"""dataselecting.html"""'], {'datalists': 'datalists', 'dataform': 'dataform'}), "('dataselecting.html', datalists=datalists, dataform=dataform)\n", (25256, 25318), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((27269, 27307), 'bearing_master.utils.unzip_func', 'unzip_func', (['full_file_name', 'unzip_path'], {}), '(full_file_name, unzip_path)\n', (27279, 27307), False, 'from bearing_master.utils import unzip_func\n'), ((27419, 27443), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (27433, 27443), False, 'from bearing_master.ext import db, login_manager\n'), ((27452, 27471), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (27469, 27471), False, 'from bearing_master.ext import db, login_manager\n'), ((27606, 27645), 'flask.flash', 'flash', (['"""The dataset has been uploaded!"""'], {}), "('The dataset has been uploaded!')\n", (27611, 27645), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((27741, 27775), 'flask.flash', 'flash', (['uploadform.errors', '"""danger"""'], {}), "(uploadform.errors, 'danger')\n", (27746, 27775), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((30769, 30814), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (30793, 30814), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((30837, 30882), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'user_id': 'user_id_now'}), '(user_id=user_id_now)\n', (30861, 30882), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((30901, 31006), 'flask.render_template', 'render_template', (['"""index.html"""'], {'todolists': 'todolists', 'datalists': 'datalists', 'form': 'form', 'dataform': 'dataform'}), "('index.html', todolists=todolists, datalists=datalists,\n form=form, dataform=dataform)\n", (30916, 31006), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((33894, 33919), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (33901, 33919), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((34215, 34240), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (34222, 34240), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((34682, 34702), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (34692, 34702), False, 'import os\n'), ((34937, 34961), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (34951, 34961), False, 'from bearing_master.ext import db, login_manager\n'), ((34971, 34990), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (34988, 34990), False, 'from bearing_master.ext import db, login_manager\n'), ((35000, 35042), 'flask.flash', 'flash', (['"""You have set a data list finished"""'], {}), "('You have set a data list finished')\n", (35005, 35042), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((35115, 35148), 'flask.flash', 'flash', (['"""The data list unfinished"""'], {}), "('The data list unfinished')\n", (35120, 35148), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((35594, 35619), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (35601, 35619), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36017, 36042), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (36024, 36042), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36285, 36299), 'bearing_master.forms.TodoListForm', 'TodoListForm', ([], {}), '()\n', (36297, 36299), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((36404, 36445), 'flask.render_template', 'render_template', (['"""modify.html"""'], {'form': 'form'}), "('modify.html', form=form)\n", (36419, 36445), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36471, 36485), 'bearing_master.forms.TodoListForm', 'TodoListForm', ([], {}), '()\n', (36483, 36485), False, 'from bearing_master.forms import TodoListForm, LoginForm, CNNForm, MLForm, CNNSetting, RegistorForm, DataListForm, DataSetting, Data_Select_Form, All_Set_Form, UploadForm, ML_Set_Form\n'), ((37557, 37573), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (37564, 37573), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((40342, 40366), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (40356, 40366), False, 'import os\n'), ((40377, 40398), 'os.makedirs', 'os.makedirs', (['filepath'], {}), '(filepath)\n', (40388, 40398), False, 'import os\n'), ((40554, 40571), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (40563, 40571), False, 'import json\n'), ((40789, 40808), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (40806, 40808), False, 'from bearing_master.ext import db, login_manager\n'), ((44770, 44802), 'flask.flash', 'flash', (['"""You have begin the task"""'], {}), "('You have begin the task')\n", (44775, 44802), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((44823, 44862), 'flask.flash', 'flash', (['"""You have begin the task before"""'], {}), "('You have begin the task before')\n", (44828, 44862), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((44884, 44909), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (44891, 44909), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((45410, 45434), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (45424, 45434), False, 'import os\n'), ((45444, 45465), 'os.makedirs', 'os.makedirs', (['filepath'], {}), '(filepath)\n', (45455, 45465), False, 'import os\n'), ((45618, 45635), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (45627, 45635), False, 'import json\n'), ((45819, 45838), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (45836, 45838), False, 'from bearing_master.ext import db, login_manager\n'), ((45847, 45878), 'flask.flash', 'flash', (['"""You have stop the task"""'], {}), "('You have stop the task')\n", (45852, 45878), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((45897, 45936), 'flask.flash', 'flash', (['"""You have stop the task before!"""'], {}), "('You have stop the task before!')\n", (45902, 45936), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((46056, 46081), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (46063, 46081), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((47491, 47577), 'multiprocessing.Process', 'Process', ([], {'target': 'pb_generation_full', 'args': '(filepath, input_dim, train_setting_list)'}), '(target=pb_generation_full, args=(filepath, input_dim,\n train_setting_list))\n', (47498, 47577), False, 'from multiprocessing import Process\n'), ((47957, 47982), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (47964, 47982), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((5730, 5761), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (5754, 5761), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((5836, 5894), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (5868, 5894), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((11536, 11576), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (11560, 11576), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((14257, 14297), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (14281, 14297), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((14888, 14925), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'model_id'}), '(id=model_id)\n', (14912, 14925), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((15000, 15058), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (15032, 15058), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((15283, 15316), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id_V'}), '(id=id_V)\n', (15307, 15316), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((15495, 15553), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (15527, 15553), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((17624, 17698), 'bearing_master.models.User', 'User', ([], {'username': "request.form['username']", 'password': "request.form['password']"}), "(username=request.form['username'], password=request.form['password'])\n", (17628, 17698), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((17711, 17731), 'bearing_master.ext.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (17725, 17731), False, 'from bearing_master.ext import db, login_manager\n'), ((17744, 17763), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (17761, 17763), False, 'from bearing_master.ext import db, login_manager\n'), ((17836, 17873), 'flask.flash', 'flash', (['"""Invalid username or password"""'], {}), "('Invalid username or password')\n", (17841, 17873), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((18533, 18564), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (18557, 18564), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((19486, 19522), 'bearing_master.ext.db.session.delete', 'db.session.delete', (['exit_trainsetting'], {}), '(exit_trainsetting)\n', (19503, 19522), False, 'from bearing_master.ext import db, login_manager\n'), ((20885, 20920), 'flask.url_for', 'url_for', (['"""view_todo_example"""'], {'id': 'id'}), "('view_todo_example', id=id)\n", (20892, 20920), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((21795, 21826), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (21819, 21826), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((22745, 22781), 'bearing_master.ext.db.session.delete', 'db.session.delete', (['exit_trainsetting'], {}), '(exit_trainsetting)\n', (22762, 22781), False, 'from bearing_master.ext import db, login_manager\n'), ((24381, 24416), 'flask.url_for', 'url_for', (['"""view_todo_example"""'], {'id': 'id'}), "('view_todo_example', id=id)\n", (24388, 24416), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((26295, 26317), 'flask.flash', 'flash', (['dataform.errors'], {}), '(dataform.errors)\n', (26300, 26317), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((27670, 27705), 'flask.url_for', 'url_for', (['"""view_data_example"""'], {'id': 'id'}), "('view_data_example', id=id)\n", (27677, 27705), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((29022, 29064), 'os.path.join', 'os.path.join', (['file_path', 'file_name_time_id'], {}), '(file_path, file_name_time_id)\n', (29034, 29064), False, 'import os\n'), ((29179, 29231), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file_name'], {}), "(app.config['UPLOAD_FOLDER'], file_name)\n", (29191, 29231), False, 'import os\n'), ((29255, 29277), 'bearing_master.dl_function.read_utils.get_csv_path', 'get_csv_path', (['filepath'], {}), '(filepath)\n', (29267, 29277), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((29356, 29394), 'os.path.join', 'os.path.join', (['filepath', 'name_lists[-1]'], {}), '(filepath, name_lists[-1])\n', (29368, 29394), False, 'import os\n'), ((29416, 29450), 'bearing_master.dl_function.read_utils.get_signal_csv', 'get_signal_csv', ([], {'file_name': 'filepath'}), '(file_name=filepath)\n', (29430, 29450), False, 'from bearing_master.dl_function.read_utils import get_signal_csv, get_all_csv, get_csv_path\n'), ((29469, 29484), 'bearing_master.dl_function.feature_extractor.get_max', 'get_max', (['signal'], {}), '(signal)\n', (29476, 29484), False, 'from bearing_master.dl_function.feature_extractor import get_mean, get_var, get_abs_mean, get_max, get_min, get_qiaodu\n'), ((31160, 31205), 'bearing_master.models.TodoList', 'TodoList', (['current_user.id', 'form.title.data', '(0)'], {}), '(current_user.id, form.title.data, 0)\n', (31168, 31205), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((31263, 31287), 'bearing_master.ext.db.session.add', 'db.session.add', (['todolist'], {}), '(todolist)\n', (31277, 31287), False, 'from bearing_master.ext import db, login_manager\n'), ((31300, 31319), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (31317, 31319), False, 'from bearing_master.ext import db, login_manager\n'), ((31476, 31533), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (31488, 31533), False, 'import os\n'), ((31634, 31674), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (31646, 31674), False, 'import os\n'), ((31824, 31858), 'os.path.join', 'os.path.join', (['filepath', '"""loss.csv"""'], {}), "(filepath, 'loss.csv')\n", (31836, 31858), False, 'import os\n'), ((31974, 32016), 'os.path.join', 'os.path.join', (['filepath', '"""is_training.json"""'], {}), "(filepath, 'is_training.json')\n", (31986, 32016), False, 'import os\n'), ((32107, 32147), 'flask.flash', 'flash', (['"""You have add a new task to list"""'], {}), "('You have add a new task to list')\n", (32112, 32147), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((33726, 33757), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (33750, 33757), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((34047, 34078), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (34071, 34078), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((34449, 34489), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (34473, 34489), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((35068, 35093), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (35075, 35093), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((35174, 35199), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (35181, 35199), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((35389, 35429), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (35413, 35429), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((35812, 35852), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (35836, 35852), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((36698, 36717), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (36715, 36717), False, 'from bearing_master.ext import db, login_manager\n'), ((36730, 36765), 'flask.flash', 'flash', (['"""You have modify a todolist"""'], {}), "('You have modify a todolist')\n", (36735, 36765), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36792, 36810), 'flask.flash', 'flash', (['form.errors'], {}), '(form.errors)\n', (36797, 36810), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36835, 36860), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (36842, 36860), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((37162, 37178), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (37172, 37178), False, 'from flask_login import login_required, login_user, logout_user, current_user\n'), ((37191, 37219), 'flask.flash', 'flash', (['"""you have logged in!"""'], {}), "('you have logged in!')\n", (37196, 37219), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((37301, 37338), 'flask.flash', 'flash', (['"""Invalid username or password"""'], {}), "('Invalid username or password')\n", (37306, 37338), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((38120, 38173), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'task_id'}), '(todolist_id=task_id)\n', (38152, 38173), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((38634, 38687), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'task_id'}), '(todolist_id=task_id)\n', (38666, 38687), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((39097, 39136), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username'}), '(username=username)\n', (39117, 39136), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((39184, 39235), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'user_id': 'user_id', 'status': '(1)'}), '(user_id=user_id, status=1)\n', (39208, 39235), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((39965, 39996), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (39989, 39996), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((40728, 40745), 'json.dump', 'json.dump', (['[1]', 'f'], {}), '([1], f)\n', (40737, 40745), False, 'import json\n'), ((41293, 41348), 'multiprocessing.Process', 'Process', ([], {'target': 'begin_train', 'args': '(10101, id, filename)'}), '(target=begin_train, args=(10101, id, filename))\n', (41300, 41348), False, 'from multiprocessing import Process\n'), ((42708, 42763), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_id'], {}), "(app.config['TRAIN_INFO'], current_user_id)\n", (42720, 42763), False, 'import os\n'), ((42857, 42897), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (42869, 42897), False, 'import os\n'), ((42927, 42961), 'os.path.join', 'os.path.join', (['filepath', '"""loss.csv"""'], {}), "(filepath, 'loss.csv')\n", (42939, 42961), False, 'import os\n'), ((43149, 43188), 'os.path.join', 'os.path.join', (['filepath', '"""trained_model"""'], {}), "(filepath, 'trained_model')\n", (43161, 43188), False, 'import os\n'), ((43216, 43369), 'deep_learning.models_mse_loss.train.trian_function', 'trian_function', (['is_training_v', 'is_training_filename', 'TodoList', 'id_V', 'datalists', 'loss_filename', 'trained_model_path'], {'train_setting_list': 'train_list'}), '(is_training_v, is_training_filename, TodoList, id_V,\n datalists, loss_filename, trained_model_path, train_setting_list=train_list\n )\n', (43230, 43369), False, 'from deep_learning.models_mse_loss.train import trian_function, pb_generation_full, get_pred_result\n'), ((43454, 43473), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (43471, 43473), False, 'from bearing_master.ext import db, login_manager\n'), ((43620, 43677), 'os.path.join', 'os.path.join', (["app.config['TRAIN_INFO']", 'current_user_name'], {}), "(app.config['TRAIN_INFO'], current_user_name)\n", (43632, 43677), False, 'import os\n'), ((43778, 43818), 'os.path.join', 'os.path.join', (['filepath', 'model_creat_time'], {}), '(filepath, model_creat_time)\n', (43790, 43818), False, 'import os\n'), ((43975, 44017), 'os.path.join', 'os.path.join', (['filepath', '"""is_training.json"""'], {}), "(filepath, 'is_training.json')\n", (43987, 44017), False, 'import os\n'), ((45043, 45074), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (45067, 45074), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((45760, 45777), 'json.dump', 'json.dump', (['[0]', 'f'], {}), '([0], f)\n', (45769, 45777), False, 'import json\n'), ((46200, 46231), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (46224, 46231), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((46539, 46597), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (46571, 46597), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((46912, 46943), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (46936, 46943), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((47026, 47084), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (47058, 47084), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((5656, 5700), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (5676, 5700), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((10091, 10135), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (10111, 10135), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((12274, 12285), 'bearing_master.dl_function.data_analysis.fft', 'fft', (['signal'], {}), '(signal)\n', (12277, 12285), False, 'from bearing_master.dl_function.data_analysis import fft\n'), ((14816, 14860), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (14836, 14860), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((16158, 16190), 'numpy.subtract', 'np.subtract', (['signal', 'real_result'], {}), '(signal, real_result)\n', (16169, 16190), True, 'import numpy as np\n'), ((17792, 17808), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (17799, 17808), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((18397, 18441), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (18417, 18441), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((19404, 19470), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'todolist.create_time'}), '(todolist_id=todolist.create_time)\n', (19436, 19470), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((20279, 20290), 'time.time', 'time.time', ([], {}), '()\n', (20288, 20290), False, 'import time\n'), ((20982, 21026), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (21002, 21026), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((21659, 21703), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (21679, 21703), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((22663, 22729), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'todolist.create_time'}), '(todolist_id=todolist.create_time)\n', (22695, 22729), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((23593, 23604), 'time.time', 'time.time', ([], {}), '()\n', (23602, 23604), False, 'import time\n'), ((24478, 24522), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (24498, 24522), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((24963, 25007), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (24983, 25007), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((26346, 26370), 'flask.url_for', 'url_for', (['"""model_setting"""'], {}), "('model_setting')\n", (26353, 26370), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((26950, 26975), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (26964, 26975), False, 'import os\n'), ((26993, 27015), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (27004, 27015), False, 'import os\n'), ((27327, 27367), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (27351, 27367), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((28553, 28578), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (28567, 28578), False, 'import os\n'), ((28596, 28618), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (28607, 28618), False, 'import os\n'), ((29311, 29331), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (29321, 29331), False, 'import os\n'), ((29562, 29586), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (29576, 29586), False, 'from bearing_master.ext import db, login_manager\n'), ((29603, 29622), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (29620, 29622), False, 'from bearing_master.ext import db, login_manager\n'), ((31695, 31719), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (31709, 31719), False, 'import os\n'), ((31737, 31758), 'os.makedirs', 'os.makedirs', (['filepath'], {}), '(filepath)\n', (31748, 31758), False, 'import os\n'), ((32077, 32094), 'json.dump', 'json.dump', (['[0]', 'f'], {}), '([0], f)\n', (32086, 32094), False, 'import json\n'), ((32482, 32531), 'bearing_master.models.DataList', 'DataList', (['current_user.id', 'dataform.title.data', '(0)'], {}), '(current_user.id, dataform.title.data, 0)\n', (32490, 32531), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((32544, 32568), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (32558, 32568), False, 'from bearing_master.ext import db, login_manager\n'), ((32581, 32600), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (32598, 32600), False, 'from bearing_master.ext import db, login_manager\n'), ((32660, 32704), 'flask.flash', 'flash', (['"""You have add a new data to datalist"""'], {}), "('You have add a new data to datalist')\n", (32665, 32704), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((32926, 32944), 'flask.flash', 'flash', (['form.errors'], {}), '(form.errors)\n', (32931, 32944), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36223, 36254), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (36247, 36254), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((37034, 37129), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': "request.form['username']", 'password': "request.form['password']"}), "(username=request.form['username'], password=request.\n form['password'])\n", (37054, 37129), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((37248, 37273), 'flask.url_for', 'url_for', (['"""show_todo_list"""'], {}), "('show_todo_list')\n", (37255, 37273), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((42412, 42452), 'bearing_master.models.DataList.create_time.in_', 'DataList.create_time.in_', (['train_data_int'], {}), '(train_data_int)\n', (42436, 42452), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((43839, 43863), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (43853, 43863), False, 'import os\n'), ((43882, 43903), 'os.makedirs', 'os.makedirs', (['filepath'], {}), '(filepath)\n', (43893, 43903), False, 'import os\n'), ((44101, 44118), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (44110, 44118), False, 'import json\n'), ((44636, 44655), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (44653, 44655), False, 'from bearing_master.ext import db, login_manager\n'), ((44673, 44706), 'flask.flash', 'flash', (['"""You have finish the task"""'], {}), "('You have finish the task')\n", (44678, 44706), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((28848, 28917), 'bearing_master.models.DataList', 'DataList', (['user_id_now', "(file_uper_dir + '_from_edge')", '(0)'], {'creat_time': 'id'}), "(user_id_now, file_uper_dir + '_from_edge', 0, creat_time=id)\n", (28856, 28917), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((28931, 28955), 'bearing_master.ext.db.session.add', 'db.session.add', (['datalist'], {}), '(datalist)\n', (28945, 28955), False, 'from bearing_master.ext import db, login_manager\n'), ((28972, 28991), 'bearing_master.ext.db.session.commit', 'db.session.commit', ([], {}), '()\n', (28989, 28991), False, 'from bearing_master.ext import db, login_manager\n'), ((30693, 30737), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (30713, 30737), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((32215, 32246), 'flask.url_for', 'url_for', (['"""model_setting"""'], {'id': 'id'}), "('model_setting', id=id)\n", (32222, 32246), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((32298, 32326), 'flask.url_for', 'url_for', (['"""ml_setting"""'], {'id': 'id'}), "('ml_setting', id=id)\n", (32305, 32326), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((32734, 32775), 'flask.url_for', 'url_for', (['"""data_uploading"""'], {'id': 'create_time'}), "('data_uploading', id=create_time)\n", (32741, 32775), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((32973, 32997), 'flask.url_for', 'url_for', (['"""model_setting"""'], {}), "('model_setting')\n", (32980, 32997), False, 'from flask import Flask, render_template, redirect, url_for, request, flash\n'), ((36547, 36578), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (36571, 36578), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((41661, 41692), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (41685, 41692), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((41783, 41841), 'bearing_master.models.TrainSettingList.query.filter_by', 'TrainSettingList.query.filter_by', ([], {'todolist_id': 'create_time_'}), '(todolist_id=create_time_)\n', (41815, 41841), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((42990, 43023), 'bearing_master.models.TodoList.query.filter_by', 'TodoList.query.filter_by', ([], {'id': 'id_V'}), '(id=id_V)\n', (43014, 43023), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((44559, 44576), 'json.dump', 'json.dump', (['[0]', 'f'], {}), '([0], f)\n', (44568, 44576), False, 'import json\n'), ((28663, 28703), 'bearing_master.models.DataList.query.filter_by', 'DataList.query.filter_by', ([], {'create_time': 'id'}), '(create_time=id)\n', (28687, 28703), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((41581, 41625), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name_now'}), '(username=user_name_now)\n', (41601, 41625), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n'), ((28769, 28809), 'bearing_master.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'user_name'}), '(username=user_name)\n', (28789, 28809), False, 'from bearing_master.models import TodoList, User, DataList, TrainSettingList\n')] |
import numpy as np
def trade_quote_processing(t, dates_quotes, q_ask, p_ask, q_bid, p_bid, t_n,
dates_trades, p_last, delta_q, delta_sgn,
match, varargin=None):
"""For details, see here.
Parameters
----------
t : array, shape (k1_,)
dates_quotes : array, shape (k1_,)
q_ask : array, shape (k1_,)
p_ask : array, shape (k1_,)
q_bid : array, shape (k1_,)
p_bid : array, shape (k1_,)
t_n : array, shape (k3_,)
dates_trades : array, shape (k3_,)
p_last : array, shape (k3_,)
delta_q : array, shape (k3_,)
delta_sgn : array, shape (k3_,)
match : array, shape (k3_,)
varargin{1} : array, shape (k1_,)
varargin{2} : array, shape (k1_,)
Returns
-------
t_p : array, shape (k1_,)
dates_quotes_p : array, shape (k1_,)
q_ask_p : array, shape (k1_,)
p_ask_p : array, shape (k1_,)
q_bid_p : array, shape (k1_,)
p_bid_p : array, shape (k1_,)
t_n_p : array, shape (k2_,)
dates_trades_p : array, shape (k2_,)
p_last_p : array, shape (k2_,)
delta_q_p : array, shape (k2_,)
delta_sgn_p : array, shape (k2_,)
varargout{1} : array, shape (k1_,)
varargout{2} : array, shape (k1_,)
"""
## QUOTES: if there are repeated times, consider the last stored observation and delete the others
t_unique = np.unique(t)
k1_ = len(t_unique)
dates_quotes_p = {}
t_p = np.zeros(k1_)
p_bid_p = np.zeros(k1_)
p_ask_p = np.zeros(k1_)
q_bid_p = np.zeros(k1_)
q_ask_p = np.zeros(k1_)
varargout = {1: np.zeros(k1_), 2: np.zeros(k1_)}
for k in range(k1_):
index = np.where(t == t_unique[k])[0]
dates_quotes_p[k] = dates_quotes[index[-1]]
t_p[k] = t[index[-1]]
p_bid_p[k] = p_bid[index[-1]]
p_ask_p[k] = p_ask[index[-1]]
q_bid_p[k] = q_bid[index[-1]]
q_ask_p[k] = q_ask[index[-1]]
if varargin is not None:
varargout[2][k] = varargin[2][index[-1]]
varargout[1][k] = varargin[1][index[-1]]
## TRADES: set the sign of the transaction in delta_sgn where it is not defined: -if traded price is closer to best ask the sign is "BUY", i.e. "+1" -if traded price is closer to best bid the sign is "SELL", i.e. "-1"
index = np.where(np.isnan(delta_sgn))[0]
for i in range(len(index)):
i_min = np.argmin(abs(t-t_n[index[i]]))
if abs(p_last[index[i]]-p_ask[i_min]) < abs(p_last[index[i]] -\
p_bid[i_min]):
delta_sgn[index[i]] = +1
else:
delta_sgn[index[i]] = -1
## TRADES: concatenate the "match" events wheres the indices of elements NOT equal to NAN in vector match
index = np.where(~np.isnan(match))[0]
dates_trades_tmp = dates_trades[index]
t_n_tmp = t_n[index]
p_last_tmp = p_last[index]
dv_tmp = np.zeros(len(index))
dv_tmp[0] = np.sum(delta_q[:index[0]+1])
for k in range(1, len(index)):
dv_tmp[k] = np.sum(delta_q[index[k-1]+1:index[k]+1])
dzeta_tmp = delta_sgn[index]
## TRADES: if there are repeated times, consider the cumulative volume, the last price and the last sign
t_n_unique = np.unique(t_n_tmp)
k2_ = len(t_n_unique)
dates_trades_p = {}
t_n_p = np.zeros(k2_)
p_last_p = np.zeros(k2_)
delta_q_p = np.zeros(k2_)
delta_sgn_p = np.zeros(k2_)
for k in range(k2_):
index = np.where(t_n_tmp == t_n_unique[k])[0]
dates_trades_p[k] = dates_trades_tmp[index[-1]]
t_n_p[k] = t_n_tmp[index[-1]]
p_last_p[k] = p_last_tmp[index[-1]]
delta_q_p[k] = np.sum(dv_tmp[index])
delta_sgn_p[k] = dzeta_tmp[index[-1]]
return t_p, dates_quotes_p, q_ask_p, p_ask_p, q_bid_p, p_bid_p, t_n_p, dates_trades_p, p_last_p, delta_q_p, delta_sgn_p, varargout
| [
"numpy.unique",
"numpy.where",
"numpy.sum",
"numpy.zeros",
"numpy.isnan"
] | [((1482, 1494), 'numpy.unique', 'np.unique', (['t'], {}), '(t)\n', (1491, 1494), True, 'import numpy as np\n'), ((1553, 1566), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1561, 1566), True, 'import numpy as np\n'), ((1581, 1594), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1589, 1594), True, 'import numpy as np\n'), ((1609, 1622), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1617, 1622), True, 'import numpy as np\n'), ((1637, 1650), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1645, 1650), True, 'import numpy as np\n'), ((1665, 1678), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1673, 1678), True, 'import numpy as np\n'), ((3055, 3085), 'numpy.sum', 'np.sum', (['delta_q[:index[0] + 1]'], {}), '(delta_q[:index[0] + 1])\n', (3061, 3085), True, 'import numpy as np\n'), ((3341, 3359), 'numpy.unique', 'np.unique', (['t_n_tmp'], {}), '(t_n_tmp)\n', (3350, 3359), True, 'import numpy as np\n'), ((3422, 3435), 'numpy.zeros', 'np.zeros', (['k2_'], {}), '(k2_)\n', (3430, 3435), True, 'import numpy as np\n'), ((3451, 3464), 'numpy.zeros', 'np.zeros', (['k2_'], {}), '(k2_)\n', (3459, 3464), True, 'import numpy as np\n'), ((3481, 3494), 'numpy.zeros', 'np.zeros', (['k2_'], {}), '(k2_)\n', (3489, 3494), True, 'import numpy as np\n'), ((3513, 3526), 'numpy.zeros', 'np.zeros', (['k2_'], {}), '(k2_)\n', (3521, 3526), True, 'import numpy as np\n'), ((1699, 1712), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1707, 1712), True, 'import numpy as np\n'), ((1717, 1730), 'numpy.zeros', 'np.zeros', (['k1_'], {}), '(k1_)\n', (1725, 1730), True, 'import numpy as np\n'), ((3139, 3185), 'numpy.sum', 'np.sum', (['delta_q[index[k - 1] + 1:index[k] + 1]'], {}), '(delta_q[index[k - 1] + 1:index[k] + 1])\n', (3145, 3185), True, 'import numpy as np\n'), ((3767, 3788), 'numpy.sum', 'np.sum', (['dv_tmp[index]'], {}), '(dv_tmp[index])\n', (3773, 3788), True, 'import numpy as np\n'), ((1773, 1799), 'numpy.where', 'np.where', (['(t == t_unique[k])'], {}), '(t == t_unique[k])\n', (1781, 1799), True, 'import numpy as np\n'), ((2421, 2440), 'numpy.isnan', 'np.isnan', (['delta_sgn'], {}), '(delta_sgn)\n', (2429, 2440), True, 'import numpy as np\n'), ((3568, 3602), 'numpy.where', 'np.where', (['(t_n_tmp == t_n_unique[k])'], {}), '(t_n_tmp == t_n_unique[k])\n', (3576, 3602), True, 'import numpy as np\n'), ((2886, 2901), 'numpy.isnan', 'np.isnan', (['match'], {}), '(match)\n', (2894, 2901), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 2 01:50:25 2019
@author: qiushili
"""
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import numpy as np
serialCom = serial.Serial('/dev/cu.usbmodem14201')
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
#xs = list(range(64))
#ys = []
#x = 0
pic = []
def animate(i):
line = serialCom.readline().decode('utf-8').split(',')
line.pop()
row = np.array([float(i) for i in line])
# print(row)
if len(row) == 64:
row = np.log10(row)
pic.append(row)
if len(pic)>128:
pic.pop(0)
ax1.clear()
ax1.imshow(np.transpose(pic))
ax1.grid(False)
# ax1.set_ylim([0.1, 100000])
# ax1.set_yscale('log')
ani = animation.FuncAnimation(fig, animate, interval=1)
plt.show()
| [
"numpy.log10",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"matplotlib.style.use",
"serial.Serial",
"numpy.transpose",
"matplotlib.pyplot.show"
] | [((259, 297), 'serial.Serial', 'serial.Serial', (['"""/dev/cu.usbmodem14201"""'], {}), "('/dev/cu.usbmodem14201')\n", (272, 297), False, 'import serial\n'), ((299, 327), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (308, 327), False, 'from matplotlib import style\n'), ((334, 346), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (344, 346), True, 'import matplotlib.pyplot as plt\n'), ((861, 910), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'interval': '(1)'}), '(fig, animate, interval=1)\n', (884, 910), True, 'import matplotlib.animation as animation\n'), ((911, 921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (919, 921), True, 'import matplotlib.pyplot as plt\n'), ((618, 631), 'numpy.log10', 'np.log10', (['row'], {}), '(row)\n', (626, 631), True, 'import numpy as np\n'), ((743, 760), 'numpy.transpose', 'np.transpose', (['pic'], {}), '(pic)\n', (755, 760), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import os
import numpy as np
from numpy import pi,cos,sin
import pandas as pd
import logging
from plotnine import *
from scipy.stats.mstats import winsorize
from plotnine.stats.stat_summary import bootstrap_statistics
#%% put PUPIL LABS data into PANDAS DF
def gaze_to_pandas(gaze):
# Input: gaze data as dictionary
# Output: pandas dataframe with gx, gy, confidence, smpl_time pupillabsdata, diameter and (calculated) pupil area (pa)
import pandas as pd
list_diam= []
list_pa= []
for idx,p in enumerate(gaze):
if p:
if 'surface' in gaze[0]['topic']:
# we have a surface mapped dictionairy. We have to get the real base_data
# the schachtelung is: surfacemapped => base_data World Mapped => base_data pupil
p_basedata = p['base_data']['base_data']
else:
p_basedata = p['base_data']
# take the mean over all pupil-diameters
diam = 0
pa = 0
for idx_bd,bd in enumerate(p_basedata):
pa = convert_diam_to_pa(bd['ellipse']['axes'][0], bd['ellipse']['axes'][1])
diam = diam + bd['diameter']
diam = diam/(idx_bd+1)
list_diam.append(diam)
list_pa.append(pa)
df = pd.DataFrame({'gx':[p['norm_pos'][0] for p in gaze if p],
'gy':[p['norm_pos'][1] for p in gaze if p],
'confidence': [p['confidence'] for p in gaze if p],
'smpl_time':[p['timestamp'] for p in gaze if p],
'diameter':list_diam,
'pa': list_pa
})
return df
def convert_diam_to_pa(axes1, axes2):
return math.pi * float(axes1) * float(axes2) * 0.25
#%% adding information to dfs
def add_msg_to_event(etevents,etmsgs,timefield = 'start_time', direction='backward'):
# combine the event df with the msg df
etevents = etevents.sort_values('start_time')
etmsgs = etmsgs.sort_values('msg_time')
# make a merge on the msg time and the start time of the events
merged_etevents = pd.merge_asof(etevents,etmsgs,left_on='start_time',right_on='msg_time',direction=direction)
return merged_etevents
def add_events_to_samples(etsamples, etevents):
# Calls append_eventtype_to_sample for each event
# Also adds blink_id
logger = logging.getLogger(__name__)
logger.info(etevents.type.unique())
for evt in etevents.type.unique():
etsamples = append_eventtype_to_sample(etsamples,etevents,eventtype=evt)
# add blink id
if evt == 'blink':
# counts up the blink_id
# Pure Magic
etsamples.loc[:,'blink_id'] = (1*(etsamples['type']=='blink')) * ((1*(etsamples['type']=='blink')).diff()==1).cumsum()
return(etsamples)
def append_eventtype_to_sample(etsamples,etevents,eventtype,timemargin=None):
# get a logger
logger = logging.getLogger(__name__)
logger.debug('Appending eventtype: %s to samples',eventtype)
if timemargin is None:
if eventtype== 'blink':
logger.info('Taking Default value for timemargin (blink = -0.1s/0.1s)')
timemargin = [-.1,.1]
else:
logger.info('Taking Default value for timemargin (fix/saccade = 0s)')
timemargin = [0,0]
# get index of the rows that have that eventtype
ix_event = etevents['type']==eventtype
# get list of start and end indeces in the etsamples df
eventstart = etevents.loc[ix_event,'start_time']+float(timemargin[0])
eventend = etevents.loc[ix_event,'end_time']+float(timemargin[1])
flat_ranges = eventtime_to_sampletime(etsamples,eventstart,eventend)
# all etsamples with ix in ranges , will the eventype in the column type
if len(flat_ranges) > 0:
etsamples.loc[etsamples.index[flat_ranges], 'type'] = eventtype
return etsamples
def eventtime_to_sampletime(etsamples,eventstart,eventend):
# due to timemargin strange effects can occur and we need to clip
mintime = etsamples.smpl_time.iloc[0]
maxtime = etsamples.smpl_time.iloc[-1]
eventstart.loc[eventstart < mintime] = mintime
eventstart.loc[eventstart > maxtime] = maxtime
eventend.loc[eventend < mintime] = mintime
eventend.loc[eventend > maxtime] = maxtime
if len(eventstart)!=len(eventend):
raise error
startix = np.searchsorted(etsamples.smpl_time,eventstart)
endix = np.searchsorted(etsamples.smpl_time,eventend)
#print('%i events of %s found'%(len(startix),eventtype))
# make a list of ranges to have all indices in between the startix and endix
ranges = [list(range(s,e)) for s,e in zip(startix,endix)]
flat_ranges = [item for sublist in ranges for item in sublist]
flat_ranges = np.intersect1d(flat_ranges,range(etsamples.shape[0]))
return(flat_ranges)
#%% last fixation (e.g. for large GRID)
def only_last_fix(merged_etevents, next_stim = ['condition','block', 'element']):
# we group by block and element and then take the last fixation
# TODO commented out cause it raises weird error
# for HMM we define alle smooth pursuit as fixations
# merged_etevents.type[merged_etevents.type == 'smoothpursuit'] = 'fixation'
# use only fixation events and group by block and element and then take the last one of it
large_grid_df = merged_etevents[merged_etevents.type == 'fixation'].groupby(next_stim).last()
large_grid_df.reset_index(level= next_stim, inplace=True)
return large_grid_df
#%% function to make groupby easier
def group_to_level_and_take_mean(raw_condition_df, lowestlevel):
"""
make a groupby
"""
if lowestlevel=='subject':
# get df grouped by et and subject
# --> takes the mean of the accuracy and precision measures over all blocks
grouped_df = raw_condition_df.groupby(['et', 'subject']).mean().reset_index(level=['et', 'subject'])
elif lowestlevel=='block':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject','block']).mean().reset_index(level=['et','subject','block'])
elif lowestlevel=='element_positions':
# get df grouped by et, subject and block
# --> makes a mean for each block of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'block','posx', 'posy']).mean().reset_index(level=['et', 'subject', 'block','posx', 'posy'])
elif lowestlevel=='condition':
# get df grouped by et, subject and GRID condition
# --> makes a mean for each Gridcondition of the subject
grouped_df = raw_condition_df.groupby(['et', 'subject', 'condition']).mean().reset_index(level=['et', 'subject', 'condition'])
else:
raise ValueError('This level is unknown / not implemented')
return grouped_df
#%% set dtypes of dataframe and make the labes ready to get plotted
def set_dtypes(df):
"""
Set the dtype of the categories, so that plotting is easier and more pretty.
E.g. set column 'et' from object to categorical
"""
# make all object variables categorical
df[df.select_dtypes(['object']).columns] = df.select_dtypes(['object']).apply(lambda x: x.astype('category'))
# list of categorical variables that have to be treated separately as they were not object dtypes
categorial_var = ["block", "trial", "pic_id"]
# set columns to correct dtype
for column in categorial_var:
if column in df:
# fill none values to not have problems with integers
df[column] = df[column].fillna(-1)
# convert ids to interger and round them to make them look nicely
df[column] = pd.to_numeric(df[column], downcast='integer')
df[column] = df[column].round(0).astype(int)
# convert -1 back to None
df[column] = df[column].astype(str)
df[column] = df[column].replace('-1', np.nan)
# old version
#df[column] = df[column].astype('category')
# logging.debug('dtypes of the df after: %s', df.dtypes)
return df
def set_to_full_names(df):
"""
rename columns and values to their full name
e.g. et --> Eye-Tracker
"""
# TODO maybe more renaming?
# maybe dont do this but rather use xaxis relabeling
# rename columnnames
# df = df.rename(index=str, columns={"et": "Eye-Tracker", "pic_id": "picture id", "fix_count": "number of fixations"})
#rename values
df.loc[:,'et'] = df['et'].map({'el': 'EyeLink', 'pl': 'Pupil Labs'})
return df
#%% everything related to VISUAL DEGREES
def size_px2deg(px, mm_per_px=0.276,distance=600):
"""
function to get the picture size of the freeviewing task
from pixels into visual angle
"""
deg = 2*np.arctan2(px/2*mm_per_px,distance)*180/np.pi
return deg
def px2deg(px, orientation, mm_per_px=0.276,distance=600):
# VD
# "gx_px - gx_px-midpoint"
# subtract center of our BENQ
if orientation == 'horizontal':
center_x = 1920 / 2
px = px - center_x
elif orientation == 'vertical':
center_y = 1080 / 2
px = px - center_y
else:
raise('unknown option')
deg = np.arctan2(px*mm_per_px,distance)*180/np.pi
return deg
def sph2cart(theta_sph,phi_sph,rho_sph=1):
xyz_sph = np.asarray([rho_sph * sin(theta_sph) * cos(phi_sph),
rho_sph * sin(theta_sph) * sin(phi_sph),
rho_sph * cos(theta_sph)])
return xyz_sph
#%% LOAD & SAVE & FIND file
def load_file(et,subject,datapath='/net/store/nbp/projects/etcomp/',outputprefix='',cleaned=True):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
et = outputprefix+et
try:
if cleaned:
filename_samples = str(et) + '_cleaned_samples.csv'
else:
filename_samples = str(et) + '_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
etsamples = pd.read_csv(os.path.join(preprocessed_path,filename_samples))
etmsgs = pd.read_csv(os.path.join(preprocessed_path,filename_msgs))
etevents = pd.read_csv(os.path.join(preprocessed_path,filename_events))
except FileNotFoundError as e:
print(e)
raise e
return etsamples,etmsgs,etevents
def save_file(data,et,subject,datapath,outputprefix=''):
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
# create new folder if there is none
if not os.path.exists(preprocessed_path):
os.makedirs(preprocessed_path)
et = outputprefix+et
# dump data in csv
filename_samples = str(et) + '_samples.csv'
filename_cleaned_samples = str(et) + '_cleaned_samples.csv'
filename_msgs = str(et) + '_msgs.csv'
filename_events = str(et) + '_events.csv'
# make separate csv file for every df
data[0].to_csv(os.path.join(preprocessed_path, filename_samples), index=False)
data[1].to_csv(os.path.join(preprocessed_path, filename_cleaned_samples), index=False)
data[2].to_csv(os.path.join(preprocessed_path, filename_msgs), index=False)
data[3].to_csv(os.path.join(preprocessed_path, filename_events), index=False)
def findFile(path,ftype):
# finds file for el edf
out = [edf for edf in os.listdir(path) if edf.endswith(ftype)]
return(out)
def get_subjectnames(datapath='/net/store/nbp/projects/etcomp/'):
return os.listdir(datapath)
#%% Tic Toc Matlab equivalent to time things
import time
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def plot_around_event(etsamples,etmsgs,etevents,single_eventormsg,plusminus=(-1,1),bothET=True,plotevents=True):
import re
assert(type(single_eventormsg)==pd.Series)
try:
t0 = single_eventormsg.start_time
eventtype = 'event'
except:
t0 = single_eventormsg.msg_time
eventtype = 'msg'
tstart = t0 + plusminus[0]
tend = t0 + plusminus[1]
query = '1==1'
if ("subject" in etsamples.columns) & ("subject" in single_eventormsg.index):
query = query+"& subject == @single_eventormsg.subject"
if not bothET:
query = query+"& eyetracker==@single_eventormsg.eyetracker"
samples_query = "smpl_time>=@tstart & smpl_time <=@tend & "+query
msg_query = "msg_time >=@tstart & msg_time <=@tend & "+query
event_query = "end_time >=@tstart & start_time <=@tend & "+query
etmsgs = etmsgs.query(msg_query)
longstring = etmsgs.to_string(columns=['exp_event'],na_rep='',float_format='%.1f',index=False,header=False,col_space=0)
longstring = re.sub(' +',' ',longstring)
splitstring = longstring.split(sep="\n")
if len(splitstring) == etmsgs.shape[0]-1:
# last element was a Nan blank and got removed
splitstring.append(' ')
etmsgs.loc[:,'label'] = splitstring
p = (ggplot()
+ geom_point(aes(x='smpl_time',y='gx',color='type',shape='eyetracker'),data=etsamples.query(samples_query)) # samples
+ geom_text(aes(x='msg_time',y=2,label="label"),color='black',position=position_jitter(width=0),data=etmsgs)# label msg/trigger
+ geom_vline(aes(xintercept='msg_time'),color='black',data=etmsgs) # triggers/msgs
)
if etevents.query(event_query).shape[0]>0:
pass
if plotevents:
p = p + geom_segment(aes(x="start_time",y=0,xend="end_time",yend=0,color='type'),alpha=0.5,size=2,data=etevents.query(event_query))
if eventtype == 'event':
p = (p + annotate("line",x=[single_eventormsg.start_time,single_eventormsg.end_time],y=0,color='black')
+ annotate("point",x=[single_eventormsg.start_time,single_eventormsg.end_time],y=0,color='black'))
if eventtype=='msg':
if single_eventormsg.condition == 'GRID':
p = (p + annotate("text",x=single_eventormsg.end_time,y=single_eventormsg.posx+5,label=single_eventormsg.accuracy)
+ geom_hline(yintercept=single_eventormsg.posx))
return(p)
# define 20% winsorized means
def winmean(x,perc = 0.2,axis=0):
return(np.mean(winsorize(x,perc,axis=axis),axis=axis))
def winmean_cl_boot(series, n_samples=10000, confidence_interval=0.95,
random_state=None):
return bootstrap_statistics(series, winmean,
n_samples=n_samples,
confidence_interval=confidence_interval,
random_state=random_state)
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
| [
"logging.getLogger",
"pandas.merge_asof",
"numpy.arctan2",
"scipy.stats.mstats.winsorize",
"numpy.sin",
"os.path.exists",
"os.listdir",
"numpy.searchsorted",
"pandas.DataFrame",
"numpy.abs",
"numpy.ma.array",
"numpy.cos",
"re.sub",
"time.time",
"numpy.median",
"os.makedirs",
"os.path... | [((1504, 1761), 'pandas.DataFrame', 'pd.DataFrame', (["{'gx': [p['norm_pos'][0] for p in gaze if p], 'gy': [p['norm_pos'][1] for p in\n gaze if p], 'confidence': [p['confidence'] for p in gaze if p],\n 'smpl_time': [p['timestamp'] for p in gaze if p], 'diameter': list_diam,\n 'pa': list_pa}"], {}), "({'gx': [p['norm_pos'][0] for p in gaze if p], 'gy': [p[\n 'norm_pos'][1] for p in gaze if p], 'confidence': [p['confidence'] for\n p in gaze if p], 'smpl_time': [p['timestamp'] for p in gaze if p],\n 'diameter': list_diam, 'pa': list_pa})\n", (1516, 1761), True, 'import pandas as pd\n'), ((2396, 2495), 'pandas.merge_asof', 'pd.merge_asof', (['etevents', 'etmsgs'], {'left_on': '"""start_time"""', 'right_on': '"""msg_time"""', 'direction': 'direction'}), "(etevents, etmsgs, left_on='start_time', right_on='msg_time',\n direction=direction)\n", (2409, 2495), True, 'import pandas as pd\n'), ((2683, 2710), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2700, 2710), False, 'import logging\n'), ((3284, 3311), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3301, 3311), False, 'import logging\n'), ((4814, 4862), 'numpy.searchsorted', 'np.searchsorted', (['etsamples.smpl_time', 'eventstart'], {}), '(etsamples.smpl_time, eventstart)\n', (4829, 4862), True, 'import numpy as np\n'), ((4874, 4920), 'numpy.searchsorted', 'np.searchsorted', (['etsamples.smpl_time', 'eventend'], {}), '(etsamples.smpl_time, eventend)\n', (4889, 4920), True, 'import numpy as np\n'), ((10429, 10476), 'os.path.join', 'os.path.join', (['datapath', 'subject', '"""preprocessed"""'], {}), "(datapath, subject, 'preprocessed')\n", (10441, 10476), False, 'import os\n'), ((11255, 11302), 'os.path.join', 'os.path.join', (['datapath', 'subject', '"""preprocessed"""'], {}), "(datapath, subject, 'preprocessed')\n", (11267, 11302), False, 'import os\n'), ((12310, 12330), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (12320, 12330), False, 'import os\n'), ((12512, 12523), 'time.time', 'time.time', ([], {}), '()\n', (12521, 12523), False, 'import time\n'), ((14177, 14206), 're.sub', 're.sub', (['""" +"""', '""" """', 'longstring'], {}), "(' +', ' ', longstring)\n", (14183, 14206), False, 'import re\n'), ((15823, 15953), 'plotnine.stats.stat_summary.bootstrap_statistics', 'bootstrap_statistics', (['series', 'winmean'], {'n_samples': 'n_samples', 'confidence_interval': 'confidence_interval', 'random_state': 'random_state'}), '(series, winmean, n_samples=n_samples,\n confidence_interval=confidence_interval, random_state=random_state)\n', (15843, 15953), False, 'from plotnine.stats.stat_summary import bootstrap_statistics\n'), ((16350, 16364), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (16359, 16364), True, 'import numpy as np\n'), ((11360, 11393), 'os.path.exists', 'os.path.exists', (['preprocessed_path'], {}), '(preprocessed_path)\n', (11374, 11393), False, 'import os\n'), ((11403, 11433), 'os.makedirs', 'os.makedirs', (['preprocessed_path'], {}), '(preprocessed_path)\n', (11414, 11433), False, 'import os\n'), ((11756, 11805), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_samples'], {}), '(preprocessed_path, filename_samples)\n', (11768, 11805), False, 'import os\n'), ((11839, 11896), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_cleaned_samples'], {}), '(preprocessed_path, filename_cleaned_samples)\n', (11851, 11896), False, 'import os\n'), ((11930, 11976), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_msgs'], {}), '(preprocessed_path, filename_msgs)\n', (11942, 11976), False, 'import os\n'), ((12010, 12058), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_events'], {}), '(preprocessed_path, filename_events)\n', (12022, 12058), False, 'import os\n'), ((12582, 12593), 'time.time', 'time.time', ([], {}), '()\n', (12591, 12593), False, 'import time\n'), ((15663, 15692), 'scipy.stats.mstats.winsorize', 'winsorize', (['x', 'perc'], {'axis': 'axis'}), '(x, perc, axis=axis)\n', (15672, 15692), False, 'from scipy.stats.mstats import winsorize\n'), ((16386, 16403), 'numpy.abs', 'np.abs', (['(arr - med)'], {}), '(arr - med)\n', (16392, 16403), True, 'import numpy as np\n'), ((8318, 8363), 'pandas.to_numeric', 'pd.to_numeric', (['df[column]'], {'downcast': '"""integer"""'}), "(df[column], downcast='integer')\n", (8331, 8363), True, 'import pandas as pd\n'), ((9940, 9976), 'numpy.arctan2', 'np.arctan2', (['(px * mm_per_px)', 'distance'], {}), '(px * mm_per_px, distance)\n', (9950, 9976), True, 'import numpy as np\n'), ((10810, 10859), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_samples'], {}), '(preprocessed_path, filename_samples)\n', (10822, 10859), False, 'import os\n'), ((10892, 10938), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_msgs'], {}), '(preprocessed_path, filename_msgs)\n', (10904, 10938), False, 'import os\n'), ((10971, 11019), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename_events'], {}), '(preprocessed_path, filename_events)\n', (10983, 11019), False, 'import os\n'), ((12161, 12177), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (12171, 12177), False, 'import os\n'), ((16265, 16281), 'numpy.ma.array', 'np.ma.array', (['arr'], {}), '(arr)\n', (16276, 16281), True, 'import numpy as np\n'), ((9491, 9531), 'numpy.arctan2', 'np.arctan2', (['(px / 2 * mm_per_px)', 'distance'], {}), '(px / 2 * mm_per_px, distance)\n', (9501, 9531), True, 'import numpy as np\n'), ((10099, 10111), 'numpy.cos', 'cos', (['phi_sph'], {}), '(phi_sph)\n', (10102, 10111), False, 'from numpy import pi, cos, sin\n'), ((10152, 10164), 'numpy.sin', 'sin', (['phi_sph'], {}), '(phi_sph)\n', (10155, 10164), False, 'from numpy import pi, cos, sin\n'), ((10188, 10202), 'numpy.cos', 'cos', (['theta_sph'], {}), '(theta_sph)\n', (10191, 10202), False, 'from numpy import pi, cos, sin\n'), ((10082, 10096), 'numpy.sin', 'sin', (['theta_sph'], {}), '(theta_sph)\n', (10085, 10096), False, 'from numpy import pi, cos, sin\n'), ((10135, 10149), 'numpy.sin', 'sin', (['theta_sph'], {}), '(theta_sph)\n', (10138, 10149), False, 'from numpy import pi, cos, sin\n')] |
# Author: <NAME>
# Time: 2020-5-21
import numpy as np
import cv2 as cv
from PIL import Image
import random
import math
def imwrite(image, filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
retval, arr = cv.imencode('.' + filename.rsplit('.', 1)[1], image) # retval: 是否保存成功
if retval is True:
arr.tofile(filename)
return retval
def imread(filename):
"""cv无法读取中文字符 (CV cannot read Chinese characters)"""
arr = np.fromfile(filename, dtype=np.uint8)
return cv.imdecode(arr, -1)
def pil_to_cv(img):
"""转PIL.Image到cv (Turn PIL.Image to CV(BGR))
:param img: PIL.Image. RGB, RGBA, L. const
:return: ndarray. BGR, BGRA, L (H, W, C{1, 3, 4})
"""
mode = img.mode
arr = np.asarray(img)
if mode == "RGB":
arr = cv.cvtColor(arr, cv.COLOR_RGB2BGR)
elif mode == "RGBA":
arr = cv.cvtColor(arr, cv.COLOR_RGBA2BGRA)
elif mode in ("L",):
arr = arr
else:
raise ValueError("img.mode nonsupport")
return arr
def cv_to_pil(arr):
"""转cv到PIL.Image (Turn CV(BGR) to PIL.Image)
:param arr: ndarray. BGR, BGRA, L. const
:return: PIL.Image. RGB, RGBA,L
"""
if arr.ndim == 2:
pass
elif arr.ndim == 3:
arr = cv.cvtColor(arr, cv.COLOR_BGR2RGB)
else: # 4
arr = cv.cvtColor(arr, cv.COLOR_BGRA2RGBA)
return Image.fromarray(arr)
def resize_max(image, max_height=None, max_width=None):
"""将图像resize成最大不超过max_height, max_width的图像. (双线性插值)
:param image: ndarray[H, W, C]. BGR. const
:param max_width: int
:param max_height: int
:return: ndarray[H, W, C]. BGR"""
# 1. 输入
height0, width0 = image.shape[:2]
max_width = max_width or width0
max_height = max_height or height0
# 2. 算法
ratio = min(max_height / height0, max_width / width0)
new_shape = int(round(width0 * ratio)), int(round(height0 * ratio))
image = cv.resize(image, new_shape, interpolation=cv.INTER_LINEAR)
return image
def get_scale_pad(img_shape, new_shape, rect=True, stride=32, only_pad=False):
"""
:param img_shape: Tuple[W, H]
:param new_shape: Tuple[W, H]
:param rect: True: 矩形, False: 正方形
:param stride:
:param only_pad:
:return: ratio: float, new_unpad: Tuple[W, H], (pad_w, pad_h)
"""
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
ratio = 1 if only_pad else min(new_shape[0] / img_shape[0], new_shape[1] / img_shape[1])
new_unpad = int(round(img_shape[0] * ratio)), int(round(img_shape[1] * ratio)) # new image unpad shape
# Compute padding
pad_w, pad_h = new_shape[0] - new_unpad[0], new_shape[1] - new_unpad[1] # square
if rect: # detect. rect
pad_w, pad_h = pad_w % stride, pad_h % stride
pad_w, pad_h = pad_w / 2, pad_h / 2 # divide padding into 2 sides
return ratio, new_unpad, (pad_w, pad_h)
def resize_pad(img, new_shape=640, rect=True, stride=32, only_pad=False, fill_value=114):
"""copy from official yolov5 letterbox()
:param img: ndarray[H, W, C]
:param new_shape: Union[int, Tuple[W, H]]
:param rect: bool. new_shape是否自动适应
:param color: BRG
:param stride: int
:param only_pad: 不resize, 只pad
:return: img: ndarray[H, W, C], ratio: float, pad: Tuple[W, H]
"""
# Resize and pad image
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
shape = img.shape[1], img.shape[0] # Tuple[W, H]
new_shape = (new_shape, new_shape) if isinstance(new_shape, int) else new_shape
ratio, new_unpad, (pad_w, pad_h) = get_scale_pad(shape, new_shape, rect, stride, only_pad)
if ratio != 1: # resize
img = cv.resize(img, new_unpad, interpolation=cv.INTER_LINEAR)
top, bottom = int(round(pad_h - 0.1)), int(round(pad_h + 0.1)) # 防止0.5, 0.5
left, right = int(round(pad_w - 0.1)), int(round(pad_w + 0.1))
img = cv.copyMakeBorder(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=fill_value) # add border(grey)
return img, ratio, (pad_w, pad_h) # 处理后的图片, 比例, padding的像素
def random_perspective(img, degrees=10, translate=.1, scale=.1, shear=10, perspective=0, fill_value=114):
"""torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
:param img: ndarray[H, W, C]. BGR
:param degrees: 旋转
:param translate: 平移
:param scale: 缩放
:param shear: 斜切
:param perspective: 透视
:return: ndarray[H, W, C]. BGR
"""
#
fill_value = (fill_value, fill_value, fill_value) if isinstance(fill_value, (int, float)) else fill_value
height, width = img.shape[:2]
# Center.
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective 透视
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale 旋转, 缩放
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear 斜切
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation 平移
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (M != np.eye(3)).any(): # image changed
if perspective:
img = cv.warpPerspective(img, M, dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
else: # affine
img = cv.warpAffine(img, M[:2], dsize=(width, height), flags=cv.INTER_LINEAR,
borderValue=fill_value)
return img
def random_crop(image, scale_range, fill_value=114):
"""
:param image: ndarray[H, W, C]. BGR
:param scale_range: 裁剪范围. [2个值]. [hw_scale_min, hw_scale_max]
:return: ndarray[H, W, C]. BGR
"""
h0, w0 = image.shape[:2]
h = int(random.uniform(scale_range[0], scale_range[1]) * h0)
w = int(random.uniform(scale_range[0], scale_range[1]) * w0)
left0, top0 = int(random.uniform(0, w0 - w)), int(random.uniform(0, h0 - h))
left, top = (w0 - w) // 2, (h0 - h) // 2 # 在中心
out = np.full_like(image, fill_value=fill_value)
out[top:top + h, left: left + w] = image[top0:top0 + h, left0: left0 + w]
return out
def augment_hsv(img, h=0.015, s=0.7, v=0.4):
"""
:param img: ndarray[H, W, C]. BGR
:param h: 色调
:param s: 饱和度
:param v: 明度
:return:
"""
r = np.random.uniform(-1, 1, 3) * [h, s, v] + 1 # random gains
hue, sat, val = cv.split(cv.cvtColor(img, cv.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv.merge((cv.LUT(hue, lut_hue), cv.LUT(sat, lut_sat), cv.LUT(val, lut_val))).astype(dtype)
img = cv.cvtColor(img_hsv, cv.COLOR_HSV2BGR) # no return needed
return img
def draw_box(image, box, color):
"""在给定图像上绘制一个方框 (Draws a box on a given image)
:param image: shape(H, W, C) BGR. 变
:param box: len(4), (ltrb)
:param color: len(3). BGR
"""
image = np.asarray(image, np.uint8)
box = np.asarray(box, dtype=np.int)
cv.rectangle(image, (box[0], box[1]), (box[2], box[3]), color, 2, cv.LINE_4)
def draw_text(image, box, text, rect_color):
"""在图像的方框上方绘制文字 (Draw text above the box of the image)
:param image: shape(H, W, C) BGR. 变
:param box: len(4), (ltrb)
:param text: str
:param rect_color: BGR
"""
image = np.asarray(image, np.uint8)
box = np.asarray(box, dtype=np.int)
cv.rectangle(image, (box[0] - 1, box[1] - 16), (box[0] + len(text) * 9, box[1]), rect_color, -1, cv.LINE_4)
cv.putText(image, text, (box[0], box[1] - 4), cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0), 1, cv.LINE_8)
def draw_target_in_image(image, boxes, labels, scores, color=(0, 252, 124)):
"""画框在image上 (draw boxes and text in image)
:param image: ndarray[H, W, C]. BGR. not const
:param boxes: ndarray[X, 4]. ltrb, 未归一化
:param labels: List[str]. Len[X].
:param scores: ndarray[X]. 从大到小排序
:param color: List -> tuple(B, G, R) # [0, 256).
:return: None
"""
boxes = np.round(boxes).astype(np.int32)
# draw
for box in boxes:
draw_box(image, box, color=color) # 画方框
if labels is None:
return
if scores is None:
scores = [None] * labels.shape[0]
for box, label, score in reversed(list(zip(boxes, labels, scores))): # 先画框再写字: 防止框把字盖住. 概率大盖住概率小
text = "%s %.2f" % (label, score) if score else "%s" % label
draw_text(image, box, text, color) # 写字
| [
"cv2.rectangle",
"numpy.clip",
"numpy.fromfile",
"cv2.warpPerspective",
"cv2.imdecode",
"numpy.arange",
"numpy.full_like",
"numpy.asarray",
"cv2.LUT",
"numpy.round",
"numpy.eye",
"random.uniform",
"cv2.warpAffine",
"cv2.putText",
"cv2.cvtColor",
"cv2.getRotationMatrix2D",
"cv2.resize... | [((459, 496), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'np.uint8'}), '(filename, dtype=np.uint8)\n', (470, 496), True, 'import numpy as np\n'), ((508, 528), 'cv2.imdecode', 'cv.imdecode', (['arr', '(-1)'], {}), '(arr, -1)\n', (519, 528), True, 'import cv2 as cv\n'), ((741, 756), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (751, 756), True, 'import numpy as np\n'), ((1367, 1387), 'PIL.Image.fromarray', 'Image.fromarray', (['arr'], {}), '(arr)\n', (1382, 1387), False, 'from PIL import Image\n'), ((1921, 1979), 'cv2.resize', 'cv.resize', (['image', 'new_shape'], {'interpolation': 'cv.INTER_LINEAR'}), '(image, new_shape, interpolation=cv.INTER_LINEAR)\n', (1930, 1979), True, 'import cv2 as cv\n'), ((3963, 4054), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['img', 'top', 'bottom', 'left', 'right', 'cv.BORDER_CONSTANT'], {'value': 'fill_value'}), '(img, top, bottom, left, right, cv.BORDER_CONSTANT, value=\n fill_value)\n', (3980, 4054), True, 'import cv2 as cv\n'), ((4729, 4738), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4735, 4738), True, 'import numpy as np\n'), ((4885, 4894), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4891, 4894), True, 'import numpy as np\n'), ((4909, 4950), 'random.uniform', 'random.uniform', (['(-perspective)', 'perspective'], {}), '(-perspective, perspective)\n', (4923, 4950), False, 'import random\n'), ((4992, 5033), 'random.uniform', 'random.uniform', (['(-perspective)', 'perspective'], {}), '(-perspective, perspective)\n', (5006, 5033), False, 'import random\n'), ((5102, 5111), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5108, 5111), True, 'import numpy as np\n'), ((5120, 5153), 'random.uniform', 'random.uniform', (['(-degrees)', 'degrees'], {}), '(-degrees, degrees)\n', (5134, 5153), False, 'import random\n'), ((5249, 5285), 'random.uniform', 'random.uniform', (['(1 - scale)', '(1 + scale)'], {}), '(1 - scale, 1 + scale)\n', (5263, 5285), False, 'import random\n'), ((5343, 5398), 'cv2.getRotationMatrix2D', 'cv.getRotationMatrix2D', ([], {'angle': 'a', 'center': '(0, 0)', 'scale': 's'}), '(angle=a, center=(0, 0), scale=s)\n', (5365, 5398), True, 'import cv2 as cv\n'), ((5423, 5432), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5429, 5432), True, 'import numpy as np\n'), ((5637, 5646), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5643, 5646), True, 'import numpy as np\n'), ((6876, 6918), 'numpy.full_like', 'np.full_like', (['image'], {'fill_value': 'fill_value'}), '(image, fill_value=fill_value)\n', (6888, 6918), True, 'import numpy as np\n'), ((7352, 7385), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {'dtype': 'np.int16'}), '(0, 256, dtype=np.int16)\n', (7361, 7385), True, 'import numpy as np\n'), ((7657, 7695), 'cv2.cvtColor', 'cv.cvtColor', (['img_hsv', 'cv.COLOR_HSV2BGR'], {}), '(img_hsv, cv.COLOR_HSV2BGR)\n', (7668, 7695), True, 'import cv2 as cv\n'), ((7939, 7966), 'numpy.asarray', 'np.asarray', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (7949, 7966), True, 'import numpy as np\n'), ((7977, 8006), 'numpy.asarray', 'np.asarray', (['box'], {'dtype': 'np.int'}), '(box, dtype=np.int)\n', (7987, 8006), True, 'import numpy as np\n'), ((8011, 8087), 'cv2.rectangle', 'cv.rectangle', (['image', '(box[0], box[1])', '(box[2], box[3])', 'color', '(2)', 'cv.LINE_4'], {}), '(image, (box[0], box[1]), (box[2], box[3]), color, 2, cv.LINE_4)\n', (8023, 8087), True, 'import cv2 as cv\n'), ((8334, 8361), 'numpy.asarray', 'np.asarray', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (8344, 8361), True, 'import numpy as np\n'), ((8372, 8401), 'numpy.asarray', 'np.asarray', (['box'], {'dtype': 'np.int'}), '(box, dtype=np.int)\n', (8382, 8401), True, 'import numpy as np\n'), ((8518, 8621), 'cv2.putText', 'cv.putText', (['image', 'text', '(box[0], box[1] - 4)', 'cv.FONT_HERSHEY_DUPLEX', '(0.5)', '(0, 0, 0)', '(1)', 'cv.LINE_8'], {}), '(image, text, (box[0], box[1] - 4), cv.FONT_HERSHEY_DUPLEX, 0.5,\n (0, 0, 0), 1, cv.LINE_8)\n', (8528, 8621), True, 'import cv2 as cv\n'), ((793, 827), 'cv2.cvtColor', 'cv.cvtColor', (['arr', 'cv.COLOR_RGB2BGR'], {}), '(arr, cv.COLOR_RGB2BGR)\n', (804, 827), True, 'import cv2 as cv\n'), ((3748, 3804), 'cv2.resize', 'cv.resize', (['img', 'new_unpad'], {'interpolation': 'cv.INTER_LINEAR'}), '(img, new_unpad, interpolation=cv.INTER_LINEAR)\n', (3757, 3804), True, 'import cv2 as cv\n'), ((5661, 5709), 'random.uniform', 'random.uniform', (['(0.5 - translate)', '(0.5 + translate)'], {}), '(0.5 - translate, 0.5 + translate)\n', (5675, 5709), False, 'import random\n'), ((5758, 5806), 'random.uniform', 'random.uniform', (['(0.5 - translate)', '(0.5 + translate)'], {}), '(0.5 - translate, 0.5 + translate)\n', (5772, 5806), False, 'import random\n'), ((7276, 7310), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2HSV'], {}), '(img, cv.COLOR_BGR2HSV)\n', (7287, 7310), True, 'import cv2 as cv\n'), ((867, 903), 'cv2.cvtColor', 'cv.cvtColor', (['arr', 'cv.COLOR_RGBA2BGRA'], {}), '(arr, cv.COLOR_RGBA2BGRA)\n', (878, 903), True, 'import cv2 as cv\n'), ((1255, 1289), 'cv2.cvtColor', 'cv.cvtColor', (['arr', 'cv.COLOR_BGR2RGB'], {}), '(arr, cv.COLOR_BGR2RGB)\n', (1266, 1289), True, 'import cv2 as cv\n'), ((1319, 1355), 'cv2.cvtColor', 'cv.cvtColor', (['arr', 'cv.COLOR_BGRA2RGBA'], {}), '(arr, cv.COLOR_BGRA2RGBA)\n', (1330, 1355), True, 'import cv2 as cv\n'), ((6042, 6142), 'cv2.warpPerspective', 'cv.warpPerspective', (['img', 'M'], {'dsize': '(width, height)', 'flags': 'cv.INTER_LINEAR', 'borderValue': 'fill_value'}), '(img, M, dsize=(width, height), flags=cv.INTER_LINEAR,\n borderValue=fill_value)\n', (6060, 6142), True, 'import cv2 as cv\n'), ((6218, 6317), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'M[:2]'], {'dsize': '(width, height)', 'flags': 'cv.INTER_LINEAR', 'borderValue': 'fill_value'}), '(img, M[:2], dsize=(width, height), flags=cv.INTER_LINEAR,\n borderValue=fill_value)\n', (6231, 6317), True, 'import cv2 as cv\n'), ((6615, 6661), 'random.uniform', 'random.uniform', (['scale_range[0]', 'scale_range[1]'], {}), '(scale_range[0], scale_range[1])\n', (6629, 6661), False, 'import random\n'), ((6680, 6726), 'random.uniform', 'random.uniform', (['scale_range[0]', 'scale_range[1]'], {}), '(scale_range[0], scale_range[1])\n', (6694, 6726), False, 'import random\n'), ((6755, 6780), 'random.uniform', 'random.uniform', (['(0)', '(w0 - w)'], {}), '(0, w0 - w)\n', (6769, 6780), False, 'import random\n'), ((6787, 6812), 'random.uniform', 'random.uniform', (['(0)', '(h0 - h)'], {}), '(0, h0 - h)\n', (6801, 6812), False, 'import random\n'), ((7187, 7214), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (7204, 7214), True, 'import numpy as np\n'), ((7447, 7472), 'numpy.clip', 'np.clip', (['(x * r[1])', '(0)', '(255)'], {}), '(x * r[1], 0, 255)\n', (7454, 7472), True, 'import numpy as np\n'), ((7501, 7526), 'numpy.clip', 'np.clip', (['(x * r[2])', '(0)', '(255)'], {}), '(x * r[2], 0, 255)\n', (7508, 7526), True, 'import numpy as np\n'), ((9009, 9024), 'numpy.round', 'np.round', (['boxes'], {}), '(boxes)\n', (9017, 9024), True, 'import numpy as np\n'), ((5456, 5485), 'random.uniform', 'random.uniform', (['(-shear)', 'shear'], {}), '(-shear, shear)\n', (5470, 5485), False, 'import random\n'), ((5543, 5572), 'random.uniform', 'random.uniform', (['(-shear)', 'shear'], {}), '(-shear, shear)\n', (5557, 5572), False, 'import random\n'), ((5965, 5974), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5971, 5974), True, 'import numpy as np\n'), ((7566, 7586), 'cv2.LUT', 'cv.LUT', (['hue', 'lut_hue'], {}), '(hue, lut_hue)\n', (7572, 7586), True, 'import cv2 as cv\n'), ((7588, 7608), 'cv2.LUT', 'cv.LUT', (['sat', 'lut_sat'], {}), '(sat, lut_sat)\n', (7594, 7608), True, 'import cv2 as cv\n'), ((7610, 7630), 'cv2.LUT', 'cv.LUT', (['val', 'lut_val'], {}), '(val, lut_val)\n', (7616, 7630), True, 'import cv2 as cv\n')] |
import numpy as np
from tqdm.notebook import tqdm
import nltk
import pandas as pd
def calculate_path_weight(Graph, path):
weight = 0
start = path[0]
for i in path[1:]:
weight += Graph.Edges[(Graph.Edges['in'] == start[0]) & (Graph.Edges.out == i[0])].weight.values[0]
start = i
return weight
def calculate_cycle_density(Graph, cycle):
weight = 0
for i in cycle:
weight += np.sqrt(Graph.Graph.out_degree(i[0]) + Graph.Graph.in_degree(i[0]))
return weight
def calculate_path_density(Graph, path):
weight = 0
for i in path:
IN = Graph.Graph.out_degree(i[0])
OUT = Graph.Graph.in_degree(i[0])
if type(IN) != int:
weight += np.sqrt(OUT)
elif type(OUT) != int:
weight += np.sqrt(IN)
else:
weight += np.sqrt(IN + OUT)
return weight
class ChromaticRandomWalker:
"""
A class used to transform a corpus given as a numpy array into a graph form of the
2-gram representation.
...
Methods
----------
generate : return a randomly generated sentence based on given arguments
"""
def __init__(self, Graph):
"""
Arguments
----------
Graph : BiGramGraph the bigram graph based on which sentences will be generated
"""
self.Graph = Graph
self.max_xi = Graph.get_Xi()
def __repr__(self):
return self.Graph.__repr__()
def generate_chromatic_vector(self, max_xi, size):
chromatic_nums = list(range(max_xi))
last_num = -1
chrom_vec = []
for i in range(size):
index = np.floor((np.random.beta(1.5, 1.5, 1) * max_xi)[0])
cur_choice = chromatic_nums[int(index)]
while cur_choice == last_num:
index = np.floor((np.random.beta(6, 2, 1) * max_xi)[0])
cur_choice = chromatic_nums[int(index)]
if cur_choice != last_num:
last_num = cur_choice
chrom_vec.append(cur_choice)
else:
continue
self.Random_Chromatic_Vec = chrom_vec
def calculate_path_weight(self, path):
weight = 0
start = path[0]
for i in path[1:]:
weight += self.Graph.Edges[(self.Graph.Edges['in'] == start) & (self.Graph.Edges.out == i)].weight.values[0]
start = i
return weight
def generate(self, method='heaviest', vec_size=5, depth=10):
"""
Arguments
----------
method : str
the protocol of path scoring via which the walker will choose its course available methods include:
1) heaviest -> the max weighted path
2) lightest -> the min weighted path
3) density_max -> the max density path
4) density_min -> the min density path
vec_size : int
the size of the randomly generated chromatic vector
depth : int
the maximum depth of search the walker will consider when choosing its next step
"""
self.vec_size = vec_size
self.generate_chromatic_vector(self.max_xi, self.vec_size)
result = ' '
first_word = self.Graph.Data[self.Graph.Data.color == self.Random_Chromatic_Vec[0]].sample(1).word.values[0]
for n in tqdm(self.Random_Chromatic_Vec[1:]):
# Calculate Best Path
paths = []
targets = self.Graph.Data[self.Graph.Data.color == n]
targets = targets.sample(depth if len(targets) >= depth else len(targets))
for target in tqdm(targets.word, leave=False):
gen = self.Graph.get_Shortest_Simple_Path(first_word, target)
paths.append(next(gen))
weights = np.array([self.calculate_path_weight(i) for i in paths])
if method == 'heaviest':
best_walk = paths[np.argmax(weights)]
first_word = targets.word.values[np.argmax(weights)]
elif method == 'lightest':
best_walk = paths[np.argmin(weights)]
first_word = targets.word.values[np.argmin(weights)]
elif method == 'density_max':
weights = [calculate_path_density(self.Graph, nltk.ngrams(i, 2)) for i in paths]
best_walk = paths[np.argmax(weights)]
first_word = targets.word.values[np.argmax(weights)]
elif method == 'density_min':
weights = [calculate_path_density(self.Graph, nltk.ngrams(i, 2)) for i in paths]
best_walk = paths[np.argmin(weights)]
first_word = targets.word.values[np.argmin(weights)]
del weights
result += ' '.join(best_walk[:-1]) + ' '
return result
def chromatic_distance(graph_1, graph_2):
"""
Args
----------
graph_1 : BiGramGraph
the first graph to be compared against
graph_2 : BiGramGraph
the second graph to be compared against
Returns the psi similarity coefficient as presented in the paper
return: int : psi similarity coefficient
"""
if 'pos' not in graph_1.Data.columns or 'pos' not in graph_2.Data.columns:
raise NotImplementedError('Please Calculate PartofSpeech for Each Graph')
overlaping_words = set(graph_1.Data['word'])
overlaping_words = overlaping_words & set(graph_2.Data['word'])
I = len(overlaping_words)
chrom_ds = pd.DataFrame(index=list(overlaping_words))
chrom_ds['chrom1'] = graph_1.Data.set_index('word').loc[overlaping_words].color
chrom_ds['chrom2'] = graph_2.Data.set_index('word').loc[overlaping_words].color
same_chrom_num = chrom_ds.apply(lambda x: np.mean(x) == x[0], axis=1)
chrom_ds = chrom_ds[same_chrom_num].rename(columns={'chrom1': 'color'}).drop(columns=['chrom2'])
# Epsilon
E = 0
chrom_ds['weight1'] = chrom_ds.index.to_series().apply(lambda x: graph_1.Graph.degree(x))
# graph_1.Data.set_index('word').loc[overlaping_words].pos
chrom_ds['weight2'] = chrom_ds.index.to_series().apply(lambda x: graph_2.Graph.degree(x))
# same_weight = chrom_ds.apply(lambda x: np.max(x)<=2*np.min(x) ,axis=1)
same_weight = chrom_ds[['weight1', 'weight2']].apply(lambda x: np.mean(x) == x[0], axis=1)
same_weight = chrom_ds[same_weight]
ICW = len(same_weight)
IC = len(chrom_ds)
return IC / I
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.beta",
"numpy.argmax",
"numpy.argmin",
"tqdm.notebook.tqdm",
"nltk.ngrams"
] | [((3328, 3363), 'tqdm.notebook.tqdm', 'tqdm', (['self.Random_Chromatic_Vec[1:]'], {}), '(self.Random_Chromatic_Vec[1:])\n', (3332, 3363), False, 'from tqdm.notebook import tqdm\n'), ((721, 733), 'numpy.sqrt', 'np.sqrt', (['OUT'], {}), '(OUT)\n', (728, 733), True, 'import numpy as np\n'), ((3601, 3632), 'tqdm.notebook.tqdm', 'tqdm', (['targets.word'], {'leave': '(False)'}), '(targets.word, leave=False)\n', (3605, 3632), False, 'from tqdm.notebook import tqdm\n'), ((787, 798), 'numpy.sqrt', 'np.sqrt', (['IN'], {}), '(IN)\n', (794, 798), True, 'import numpy as np\n'), ((835, 852), 'numpy.sqrt', 'np.sqrt', (['(IN + OUT)'], {}), '(IN + OUT)\n', (842, 852), True, 'import numpy as np\n'), ((5744, 5754), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (5751, 5754), True, 'import numpy as np\n'), ((6293, 6303), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (6300, 6303), True, 'import numpy as np\n'), ((3902, 3920), 'numpy.argmax', 'np.argmax', (['weights'], {}), '(weights)\n', (3911, 3920), True, 'import numpy as np\n'), ((3971, 3989), 'numpy.argmax', 'np.argmax', (['weights'], {}), '(weights)\n', (3980, 3989), True, 'import numpy as np\n'), ((1656, 1683), 'numpy.random.beta', 'np.random.beta', (['(1.5)', '(1.5)', '(1)'], {}), '(1.5, 1.5, 1)\n', (1670, 1683), True, 'import numpy as np\n'), ((4064, 4082), 'numpy.argmin', 'np.argmin', (['weights'], {}), '(weights)\n', (4073, 4082), True, 'import numpy as np\n'), ((4133, 4151), 'numpy.argmin', 'np.argmin', (['weights'], {}), '(weights)\n', (4142, 4151), True, 'import numpy as np\n'), ((1826, 1849), 'numpy.random.beta', 'np.random.beta', (['(6)', '(2)', '(1)'], {}), '(6, 2, 1)\n', (1840, 1849), True, 'import numpy as np\n'), ((4326, 4344), 'numpy.argmax', 'np.argmax', (['weights'], {}), '(weights)\n', (4335, 4344), True, 'import numpy as np\n'), ((4395, 4413), 'numpy.argmax', 'np.argmax', (['weights'], {}), '(weights)\n', (4404, 4413), True, 'import numpy as np\n'), ((4257, 4274), 'nltk.ngrams', 'nltk.ngrams', (['i', '(2)'], {}), '(i, 2)\n', (4268, 4274), False, 'import nltk\n'), ((4588, 4606), 'numpy.argmin', 'np.argmin', (['weights'], {}), '(weights)\n', (4597, 4606), True, 'import numpy as np\n'), ((4657, 4675), 'numpy.argmin', 'np.argmin', (['weights'], {}), '(weights)\n', (4666, 4675), True, 'import numpy as np\n'), ((4519, 4536), 'nltk.ngrams', 'nltk.ngrams', (['i', '(2)'], {}), '(i, 2)\n', (4530, 4536), False, 'import nltk\n')] |
# coding: utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import os
import numpy as np
import pandas as pd
from scipy.io import loadmat
BASE_DIR = os.path.dirname(__file__)
WIKI_MAT_PATH = os.path.join(BASE_DIR, 'data/wiki.mat')
WIKI_CSV_PATH = os.path.join(BASE_DIR, 'data/wiki.csv')
WIKI_GENDER_TEST_CSV_PATH = os.path.join(BASE_DIR, 'data/wiki_gender_test.csv')
def mat_data_to_csv():
mat_data = loadmat(WIKI_MAT_PATH)
fields = [
'dob',
'photo_taken',
'full_path',
'gender',
'name',
'face_location',
'face_score',
'second_face_score',
]
wiki_data = mat_data['wiki']
array_data = [
wiki_data[0][0][0][0],
wiki_data[0][0][1][0],
[i[0] for i in wiki_data[0][0][2][0]],
wiki_data[0][0][3][0],
[i[0] if i.size > 0 else '' for i in wiki_data[0][0][4][0]],
wiki_data[0][0][5][0],
wiki_data[0][0][6][0],
wiki_data[0][0][7][0],
]
np_data = np.vstack(array_data).T
df = pd.DataFrame(np_data)
df.columns = fields
df.to_csv(WIKI_CSV_PATH, index=False)
# gender test
items = []
for index, row in df.iterrows():
if row['name'] and row['gender'] in [0, 1]:
items.append(dict(name=row['name'], gender='male' if row['gender'] == 1 else 'female'))
pd.DataFrame.from_records(items).to_csv(WIKI_GENDER_TEST_CSV_PATH, index=False)
if __name__ == '__main__':
mat_data_to_csv()
| [
"pandas.DataFrame.from_records",
"scipy.io.loadmat",
"os.path.join",
"os.path.dirname",
"numpy.vstack",
"pandas.DataFrame"
] | [((191, 216), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (206, 216), False, 'import os\n'), ((233, 272), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data/wiki.mat"""'], {}), "(BASE_DIR, 'data/wiki.mat')\n", (245, 272), False, 'import os\n'), ((289, 328), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data/wiki.csv"""'], {}), "(BASE_DIR, 'data/wiki.csv')\n", (301, 328), False, 'import os\n'), ((357, 408), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data/wiki_gender_test.csv"""'], {}), "(BASE_DIR, 'data/wiki_gender_test.csv')\n", (369, 408), False, 'import os\n'), ((449, 471), 'scipy.io.loadmat', 'loadmat', (['WIKI_MAT_PATH'], {}), '(WIKI_MAT_PATH)\n', (456, 471), False, 'from scipy.io import loadmat\n'), ((1071, 1092), 'pandas.DataFrame', 'pd.DataFrame', (['np_data'], {}), '(np_data)\n', (1083, 1092), True, 'import pandas as pd\n'), ((1038, 1059), 'numpy.vstack', 'np.vstack', (['array_data'], {}), '(array_data)\n', (1047, 1059), True, 'import numpy as np\n'), ((1386, 1418), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['items'], {}), '(items)\n', (1411, 1418), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 00:21:22 2020
@author: lankuohsing
"""
"""
https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer.fit_transform
https://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction
"""
import math
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import json
import string
from collections import defaultdict
# In[]
# In[]
"""" sklearn implementation """
def tf_idf_sklearn(corpus):
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)# L2 regularization y default
# In[]
#print(X)
X_dense=X.todense()# sentence embeddings of corpus
# In[]
print(vectorizer.get_feature_names())
print(vectorizer.vocabulary_)
print(vectorizer.idf_)
print(X_dense)
# In[]
dict_word_idf_sk={}
for word, index in vectorizer.vocabulary_.items():
dict_word_idf_sk[word]=vectorizer.idf_[index]
# In[]
with open('./models/dict_word_idf.json', 'w') as f:
json.dump(dict_word_idf_sk, f)
return X_dense
# In[]
""" implementation by numpy and math """
def tf_idf_numpy(corpus):
set_word=set()
dict_word_numDoc=defaultdict(int)
# In[]
for i in range(0,len(corpus)):
split_list=corpus[i][0:-1].split(" ")
set_temp_word=set()
for word in split_list:
word=word.lower()
set_word.add(word)
if word in set_temp_word:
continue
dict_word_numDoc[word]+=1
set_temp_word.add(word)
set_word=list(set_word)
set_word.sort()
# In[]
dict_word_idf={}
for word in set_word:
dict_word_idf[word]=math.log((1+len(corpus))/(1+dict_word_numDoc[word]))+1
print(dict_word_idf)
dict_word_index={}
i=0
for word in set_word:
dict_word_index[word]=i
i+=1
# In[]
sentence_vector=np.zeros((len(corpus),len(set_word)))
for i in range(0,len(corpus)):
word_list=corpus[i].lower().translate(str.maketrans('', '', string.punctuation)).split(" ")
set_temp_word=set()
dict_word_count={}
for word in word_list:
set_temp_word.add(word)
if word in dict_word_count.keys():
dict_word_count[word]+=1
else:
dict_word_count[word]=1
for word in set_temp_word:
sentence_vector[i][dict_word_index[word]]=dict_word_count[word]*dict_word_idf[word]
# In[]
sentence_vector_l2_norm=np.divide(sentence_vector,np.sqrt(np.sum(np.square(sentence_vector),1)).reshape(sentence_vector.shape[0],1))# sentence Embeddings of corpus
return sentence_vector_l2_norm
# In[]
if __name__=="__main__":
corpus = [
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]
X_dense_1=tf_idf_sklearn(corpus)
X_dense_2=tf_idf_numpy(corpus)
print(X_dense_1-X_dense_2)# should be all zeros
| [
"sklearn.feature_extraction.text.TfidfVectorizer",
"collections.defaultdict",
"json.dump",
"numpy.square"
] | [((596, 613), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (611, 613), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1282, 1298), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1293, 1298), False, 'from collections import defaultdict\n'), ((1117, 1147), 'json.dump', 'json.dump', (['dict_word_idf_sk', 'f'], {}), '(dict_word_idf_sk, f)\n', (1126, 1147), False, 'import json\n'), ((2650, 2676), 'numpy.square', 'np.square', (['sentence_vector'], {}), '(sentence_vector)\n', (2659, 2676), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import time
import glob
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import models
from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID
from util import data_manager# 此为我们的data_manager工具类
from util import transforms as T
from util.dataset_loader import ImageDataset
from util.utils import AverageMeter, Logger, save_checkpoint
from util.eval_metrics import evaluate
from util.optimizers import init_optim
from util.save_json import SaveJson# 导入json转换类
from IPython import embed
def main():
batch_time_total = AverageMeter()
start = time.time()
# 第四个参数:use_gpu,不需要显示的指定
use_gpu = torch.cuda.is_available()
# if args.use_cpu: use_gpu = False
pin_memory = True if use_gpu else False
# 其实可以换一种写法
dataset = data_manager.Market1501(root='data')
# data augmentation
transform_test = T.Compose([
# T.Resize((args.height, args.width)),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# 第二个参数:queryloader
queryloader = DataLoader(
# 问题:dataset.query哪里来的? 答:来自data_manager中self.query = query
# dataset.query本质为路径集
ImageDataset(dataset.query, transform=transform_test),
batch_size=32, shuffle=False, num_workers=4,
pin_memory=pin_memory, drop_last=False,
)
# 第三个参数:galleryloader
galleryloader = DataLoader(
ImageDataset(dataset.gallery, transform=transform_test),
batch_size=32, shuffle=False, num_workers=4,
pin_memory=pin_memory, drop_last=False,
)
model = models.init_model(name='resnet50', num_classes=8, loss={'softmax', 'metric'},
aligned=True, use_gpu=use_gpu)
print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
criterion_class = CrossEntropyLoss(use_gpu=use_gpu)
criterion_metric = TripletLossAlignedReID(margin=0.3)
optimizer = init_optim('adam', model.parameters(), 0.0002, 0.0005)
scheduler = lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1)
start_epoch = 0
if use_gpu:
model = nn.DataParallel(model).cuda()
# embed()
num,cmc,mAP = test(model, queryloader, galleryloader, use_gpu)
end = time.time()
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
item_to_json = {
"time_stamp":time_stamp,
"test_results": {
"object_num":num,
"cmc":cmc,
"mAP":mAP,
"time_consumption(s)":end - start
}
}
path = "./output/" + "test_results" + ".json"
s = SaveJson()
s.save_file(path,item_to_json)
# print("==>测试用时: {:.3f} s".format(end - start))
print(" test time(s) | {:.3f}".format(end - start))
print(" ------------------------------")
print("")
# print('------测试结束------')
return 0
def train(epoch, model , criterion_class, criterion_metric, optimizer, trainloader, use_gpu):
model.train()
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
xent_losses = AverageMeter()
global_losses = AverageMeter()
local_losses = AverageMeter()
end = time.time()
for batch_idx, (imgs, pids, _) in enumerate(trainloader):
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
# measure data loading time
data_time.update(time.time() - end)
outputs, features, local_features = model(imgs)
if args.htri_only:
if isinstance(features, tuple):
global_loss, local_loss = DeepSupervision(criterion_metric, features, pids, local_features)
else:
global_loss, local_loss = criterion_metric(features, pids, local_features)
else:
if isinstance(outputs, tuple):
xent_loss = DeepSupervision(criterion_class, outputs, pids)
else:
xent_loss = criterion_class(outputs, pids)
if isinstance(features, tuple):
global_loss, local_loss = DeepSupervision(criterion_metric, features, pids, local_features)
else:
global_loss, local_loss = criterion_metric(features, pids, local_features)
loss = xent_loss + global_loss + local_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
losses.update(loss.item(), pids.size(0))
xent_losses.update(xent_loss.item(), pids.size(0))
global_losses.update(global_loss.item(), pids.size(0))
local_losses.update(local_loss.item(), pids.size(0))
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t'
'GLoss {global_loss.val:.4f} ({global_loss.avg:.4f})\t'
'LLoss {local_loss.val:.4f} ({local_loss.avg:.4f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader), batch_time=batch_time, data_time=data_time,
loss=losses, xent_loss=xent_losses, global_loss=global_losses, local_loss=local_losses))
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 8]):
print('------start testing------')
cmc1 = []
cmc2 = []
batch_time = AverageMeter()
# 测试一下model.eval()方法
# embed()
model.eval()
with torch.no_grad():
# 计算query集的features
qf,lqf = [], []#qf:query feature lqf:local query feature
i = 0
# embed()
for batch_idx, imgs in enumerate(queryloader):
i += 1
if use_gpu: imgs = imgs.cuda()
end = time.time()
# 使用model对图像进行特征提取
features, local_features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
local_features = local_features.data.cpu()
### 将query feature入list
qf.append(features)
lqf.append(local_features)
# print("BarchSize:",i)
# 对tensor进行拼接,axis=0表示进行竖向拼接
qf = torch.cat(qf, 0)
lqf = torch.cat(lqf, 0)
print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1)))
# 计算gallery集的features
gf,lgf = [], [],
end = time.time()
for batch_idx, imgs in enumerate(galleryloader):
if use_gpu: imgs = imgs.cuda()
end = time.time()
# 使用resnet50进行图像特征提取
features, local_features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
local_features = local_features.data.cpu()
gf.append(features)
lgf.append(local_features)
# 打个断点,看一下gf
gf = torch.cat(gf, 0)
lgf = torch.cat(lgf, 0)
print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1)))
# embed()
# print("==> BatchTime(s) {:.3f}".format(batch_time.sum))
# 下面这些是处理要点
# feature normlization 特征标准化
qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12)
gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12)
# 矩阵的行列数
m, n = qf.size(0), gf.size(0)
torch.pow(qf, 2).sum(dim=1, keepdim=True)
# 计算全局距离矩阵global distmat
# torch.pow(qf,2):求矩阵中各元素的平方
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t()) # 矩阵相乘
distmat = distmat.numpy()
# 用于测试
mm, nn = distmat.shape[0], distmat.shape[1]
min = [1] * mm # min数组的大小应该等于mm
num = 0
for i in range(mm):
for j in range(nn):
if distmat[i][j] < min[i]:
min[i] = distmat[i][j]
# 这里的判定两object是否为同一object的distance阈值还需要进一步优化
if min[i] < 1:
num += 1
# print('各图像之间的相似度为:\n',distmat)
# print('经多视角识别后的person_num为:', num)
### 下面计算cmc和mAp
q_pids = process_dir("./data/market1501/view1")
g_pids = process_dir("./data/market1501/view2")
q_camids = [1] * mm
g_camids = [1] * nn
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=False)
len = max(mm,nn)
# embed()
x = np.linspace(1,len,len)
# embed()
cmc = [0.439, 0.43, 0.442, 0.448, 0.418]
plt.title("CMC curve of test")
plt.xlabel("test times")
plt.ylabel("cmc")
plt.plot(x,cmc)
plt.show()
### 集中展示测试结果
print("")
print(" 本次测试 结果如下")
print(" ------------------------------")
print(" person num | {}".format(num))
print(" ------------------------------")
print(" CMC | {}".format(cmc))
print(" mAP | {:.3f}".format(mAP))
print(" ------------------------------")
# print("all_cmc:", cmc)
# print("mAP{:.3f}:".format(mAP))
return num,cmc,mAP
def process_dir(dir_path):
img_paths = glob.glob(osp.join(dir_path, '*.png'))
img_names = []
for img_path in img_paths:
img_name = img_path.split(".png",1)[0]
img_name = osp.basename(osp.normpath(img_name))
img_names.append(img_name)
return img_names
if __name__ == '__main__':
# dir_path = "./data/market1501/view2"
# process_dir(dir_path)
# print(process_dir(dir_path))
main() | [
"matplotlib.pyplot.ylabel",
"torch.pow",
"util.save_json.SaveJson",
"util.dataset_loader.ImageDataset",
"torch.cuda.is_available",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.normpath",
"numpy.linspace",
"util.transforms.ToTensor",
"time.localtime",
"util.losses.DeepSupervis... | [((750, 764), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (762, 764), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((777, 788), 'time.time', 'time.time', ([], {}), '()\n', (786, 788), False, 'import time\n'), ((832, 857), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (855, 857), False, 'import torch\n'), ((972, 1008), 'util.data_manager.Market1501', 'data_manager.Market1501', ([], {'root': '"""data"""'}), "(root='data')\n", (995, 1008), False, 'from util import data_manager\n'), ((1785, 1898), 'models.init_model', 'models.init_model', ([], {'name': '"""resnet50"""', 'num_classes': '(8)', 'loss': "{'softmax', 'metric'}", 'aligned': '(True)', 'use_gpu': 'use_gpu'}), "(name='resnet50', num_classes=8, loss={'softmax', 'metric'\n }, aligned=True, use_gpu=use_gpu)\n", (1802, 1898), False, 'import models\n'), ((2044, 2077), 'util.losses.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'use_gpu': 'use_gpu'}), '(use_gpu=use_gpu)\n', (2060, 2077), False, 'from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID\n'), ((2101, 2135), 'util.losses.TripletLossAlignedReID', 'TripletLossAlignedReID', ([], {'margin': '(0.3)'}), '(margin=0.3)\n', (2123, 2135), False, 'from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID\n'), ((2225, 2281), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(150)', 'gamma': '(0.1)'}), '(optimizer, step_size=150, gamma=0.1)\n', (2244, 2281), False, 'from torch.optim import lr_scheduler\n'), ((2457, 2468), 'time.time', 'time.time', ([], {}), '()\n', (2466, 2468), False, 'import time\n'), ((2852, 2862), 'util.save_json.SaveJson', 'SaveJson', ([], {}), '()\n', (2860, 2862), False, 'from util.save_json import SaveJson\n'), ((3248, 3262), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3260, 3262), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3280, 3294), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3292, 3294), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3311, 3325), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3323, 3325), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3344, 3358), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3356, 3358), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3379, 3393), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3391, 3393), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3413, 3427), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3425, 3427), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((3439, 3450), 'time.time', 'time.time', ([], {}), '()\n', (3448, 3450), False, 'import time\n'), ((5800, 5814), 'util.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5812, 5814), False, 'from util.utils import AverageMeter, Logger, save_checkpoint\n'), ((8733, 8811), 'util.eval_metrics.evaluate', 'evaluate', (['distmat', 'q_pids', 'g_pids', 'q_camids', 'g_camids'], {'use_metric_cuhk03': '(False)'}), '(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=False)\n', (8741, 8811), False, 'from util.eval_metrics import evaluate\n'), ((8855, 8879), 'numpy.linspace', 'np.linspace', (['(1)', 'len', 'len'], {}), '(1, len, len)\n', (8866, 8879), True, 'import numpy as np\n'), ((8941, 8971), 'matplotlib.pyplot.title', 'plt.title', (['"""CMC curve of test"""'], {}), "('CMC curve of test')\n", (8950, 8971), True, 'import matplotlib.pyplot as plt\n'), ((8976, 9000), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""test times"""'], {}), "('test times')\n", (8986, 9000), True, 'import matplotlib.pyplot as plt\n'), ((9005, 9022), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cmc"""'], {}), "('cmc')\n", (9015, 9022), True, 'import matplotlib.pyplot as plt\n'), ((9027, 9043), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cmc'], {}), '(x, cmc)\n', (9035, 9043), True, 'import matplotlib.pyplot as plt\n'), ((9047, 9057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9055, 9057), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1433), 'util.dataset_loader.ImageDataset', 'ImageDataset', (['dataset.query'], {'transform': 'transform_test'}), '(dataset.query, transform=transform_test)\n', (1392, 1433), False, 'from util.dataset_loader import ImageDataset\n'), ((1608, 1663), 'util.dataset_loader.ImageDataset', 'ImageDataset', (['dataset.gallery'], {'transform': 'transform_test'}), '(dataset.gallery, transform=transform_test)\n', (1620, 1663), False, 'from util.dataset_loader import ImageDataset\n'), ((2521, 2537), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2535, 2537), False, 'import time\n'), ((4671, 4682), 'time.time', 'time.time', ([], {}), '()\n', (4680, 4682), False, 'import time\n'), ((5881, 5896), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5894, 5896), False, 'import torch\n'), ((6591, 6607), 'torch.cat', 'torch.cat', (['qf', '(0)'], {}), '(qf, 0)\n', (6600, 6607), False, 'import torch\n'), ((6622, 6639), 'torch.cat', 'torch.cat', (['lqf', '(0)'], {}), '(lqf, 0)\n', (6631, 6639), False, 'import torch\n'), ((6819, 6830), 'time.time', 'time.time', ([], {}), '()\n', (6828, 6830), False, 'import time\n'), ((7300, 7316), 'torch.cat', 'torch.cat', (['gf', '(0)'], {}), '(gf, 0)\n', (7309, 7316), False, 'import torch\n'), ((7331, 7348), 'torch.cat', 'torch.cat', (['lgf', '(0)'], {}), '(lgf, 0)\n', (7340, 7348), False, 'import torch\n'), ((9525, 9552), 'os.path.join', 'osp.join', (['dir_path', '"""*.png"""'], {}), "(dir_path, '*.png')\n", (9533, 9552), True, 'import os.path as osp\n'), ((1122, 1134), 'util.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1132, 1134), True, 'from util import transforms as T\n'), ((1144, 1210), 'util.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1155, 1210), True, 'from util import transforms as T\n'), ((6159, 6170), 'time.time', 'time.time', ([], {}), '()\n', (6168, 6170), False, 'import time\n'), ((6950, 6961), 'time.time', 'time.time', ([], {}), '()\n', (6959, 6961), False, 'import time\n'), ((7804, 7820), 'torch.pow', 'torch.pow', (['qf', '(2)'], {}), '(qf, 2)\n', (7813, 7820), False, 'import torch\n'), ((9683, 9705), 'os.path.normpath', 'osp.normpath', (['img_name'], {}), '(img_name)\n', (9695, 9705), True, 'import os.path as osp\n'), ((2335, 2357), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2350, 2357), True, 'import torch.nn as nn\n'), ((3645, 3656), 'time.time', 'time.time', ([], {}), '()\n', (3654, 3656), False, 'import time\n'), ((3833, 3898), 'util.losses.DeepSupervision', 'DeepSupervision', (['criterion_metric', 'features', 'pids', 'local_features'], {}), '(criterion_metric, features, pids, local_features)\n', (3848, 3898), False, 'from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID\n'), ((4093, 4140), 'util.losses.DeepSupervision', 'DeepSupervision', (['criterion_class', 'outputs', 'pids'], {}), '(criterion_class, outputs, pids)\n', (4108, 4140), False, 'from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID\n'), ((4305, 4370), 'util.losses.DeepSupervision', 'DeepSupervision', (['criterion_metric', 'features', 'pids', 'local_features'], {}), '(criterion_metric, features, pids, local_features)\n', (4320, 4370), False, 'from util.losses import CrossEntropyLoss, DeepSupervision, CrossEntropyLabelSmooth, TripletLossAlignedReID\n'), ((4638, 4649), 'time.time', 'time.time', ([], {}), '()\n', (4647, 4649), False, 'import time\n'), ((6283, 6294), 'time.time', 'time.time', ([], {}), '()\n', (6292, 6294), False, 'import time\n'), ((7076, 7087), 'time.time', 'time.time', ([], {}), '()\n', (7085, 7087), False, 'import time\n'), ((7606, 7645), 'torch.norm', 'torch.norm', (['qf', '(2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(qf, 2, dim=-1, keepdim=True)\n', (7616, 7645), False, 'import torch\n'), ((7689, 7728), 'torch.norm', 'torch.norm', (['gf', '(2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(gf, 2, dim=-1, keepdim=True)\n', (7699, 7728), False, 'import torch\n'), ((7923, 7939), 'torch.pow', 'torch.pow', (['qf', '(2)'], {}), '(qf, 2)\n', (7932, 7939), False, 'import torch\n'), ((7996, 8012), 'torch.pow', 'torch.pow', (['gf', '(2)'], {}), '(gf, 2)\n', (8005, 8012), False, 'import torch\n')] |
from generators.generator import *
import numpy as np
import math
class SineGenerator(SoundGenerator):
def generate(self,parameters:dict,filename:str,length:float,sample_rate:int)->np.ndarray:
#print("Doing Sine!")
samples = int(length * sample_rate)
data = np.zeros(samples)
params = dict(parameters)
print("Sine Params: " + str(params))
for i in range(samples):
t = float(i) / sample_rate
v = (
(params['a1']*math.sin(t*params['f1']*math.pi)) +
(params['a2']*math.sin(t*params['f2']*math.pi) )
)*0.5
data[i] = v
return data
if __name__ == "__main__":
gen = SineGenerator()
parameters=ParameterSet([
Parameter("f1",[100,200,400]),
Parameter("a1",[0.5,0.7,1.0]),
Parameter("f2",[800,1200,1600]),
Parameter("a2",[0.5,0.7,1.0])
])
fixed_parameters = {
"a1":1.0,
"a2":1.0
}
g = DatasetGenerator("sine_generator",
dataset_dir="test_datasets",
wave_file_dir="test_waves/sine/",
parameters=parameters,
fixed_parameters=fixed_parameters
)
g.generate(sound_generator=gen,length=1,sample_rate=16384,method="random",max=100)
| [
"numpy.zeros",
"math.sin"
] | [((287, 304), 'numpy.zeros', 'np.zeros', (['samples'], {}), '(samples)\n', (295, 304), True, 'import numpy as np\n'), ((508, 544), 'math.sin', 'math.sin', (["(t * params['f1'] * math.pi)"], {}), "(t * params['f1'] * math.pi)\n", (516, 544), False, 'import math\n'), ((578, 614), 'math.sin', 'math.sin', (["(t * params['f2'] * math.pi)"], {}), "(t * params['f2'] * math.pi)\n", (586, 614), False, 'import math\n')] |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import argparse
import math
import numpy as np
import random
from job import Job
from job_table import JobTable
import utils
def generate_interarrival_time(rng, lam):
return -math.log(1.0 - rng.random()) * lam
def generate_duration(durations, rng):
return 3600 * rng.choice(durations)
def generate_scale_factor(rng):
scale_factor = 1
r = rng.uniform(0, 1)
if 0.7 <= r <= 0.8:
scale_factor = 2
elif 0.8 <= r:
scale_factor = 4
return scale_factor
def main(args):
job_generator = random.Random()
job_generator.seed(args.seed)
interarrival_time_generator = random.Random()
interarrival_time_generator.seed(args.seed + 1)
duration_generator = random.Random()
duration_generator.seed(args.seed + 2)
scale_factor_generator = random.Random()
scale_factor_generator.seed(args.seed + 3)
throughputs = utils.read_all_throughputs_json_v2(args.throughputs_file)
durations = np.linspace(args.min_duration, args.max_duration,
args.num_durations)
duration_generator_func = lambda rng: generate_duration(durations, rng)
prev_arrival_time = None
with open(args.output_file, 'w') as f:
for i in range(args.num_jobs):
job = utils.generate_job(
throughputs=throughputs,
reference_worker_type='v100',
rng=job_generator,
job_id=None,
fixed_job_duration=None,
generate_multi_gpu_jobs=args.generate_multi_gpu_jobs,
generate_multi_priority_jobs=args.generate_multi_priority_jobs,
scale_factor_generator_func=generate_scale_factor,
duration_generator_func=duration_generator_func,
scale_factor_rng=scale_factor_generator,
duration_rng=duration_generator,
always_generate_scale_factor=False)
if prev_arrival_time is None:
arrival_time = 0
elif args.lam > 0:
interarrival_time = \
generate_interarrival_time(interarrival_time_generator,
args.lam)
arrival_time = prev_arrival_time + interarrival_time
prev_arrival_time = arrival_time
f.write('%s\t%d\n' % (str(job), arrival_time))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Generate synthetic trace')
parser.add_argument('--num_jobs', type=int, required=True,
help='Number of jobs to generate')
parser.add_argument('-l', '--lam', type=float, default=0.0,
help='Lambda for Poisson arrival rate')
parser.add_argument('--seed', type=int, default=0,
help='Random seed')
parser.add_argument('--throughputs_file', type=str,
default=('simulation_throughputs.json'),
help='Oracle throughputs file')
parser.add_argument('-a', '--min_duration', type=float, default=1,
help='Minimum job duration in hours')
parser.add_argument('-b', '--max_duration', type=float, default=4,
help='Maximum job duration in hours')
parser.add_argument('-n', '--num_durations', type=int, default=4,
help='Number of possible job durations')
parser.add_argument('-m', '--generate-multi-gpu-jobs', action='store_true',
default=False,
help=('If set, generates multi-GPU jobs according to '
'a pre-defined distribution'))
parser.add_argument('--generate-multi-priority-jobs', action='store_true',
default=False,
help=('If set, generates some jobs with higher priority'))
parser.add_argument('--output_file', type=str, required=True,
help='Output file name')
args = parser.parse_args()
main(args)
| [
"utils.read_all_throughputs_json_v2",
"argparse.ArgumentParser",
"random.Random",
"os.path.realpath",
"numpy.linspace",
"utils.generate_job"
] | [((641, 656), 'random.Random', 'random.Random', ([], {}), '()\n', (654, 656), False, 'import random\n'), ((726, 741), 'random.Random', 'random.Random', ([], {}), '()\n', (739, 741), False, 'import random\n'), ((820, 835), 'random.Random', 'random.Random', ([], {}), '()\n', (833, 835), False, 'import random\n'), ((909, 924), 'random.Random', 'random.Random', ([], {}), '()\n', (922, 924), False, 'import random\n'), ((991, 1048), 'utils.read_all_throughputs_json_v2', 'utils.read_all_throughputs_json_v2', (['args.throughputs_file'], {}), '(args.throughputs_file)\n', (1025, 1048), False, 'import utils\n'), ((1066, 1135), 'numpy.linspace', 'np.linspace', (['args.min_duration', 'args.max_duration', 'args.num_durations'], {}), '(args.min_duration, args.max_duration, args.num_durations)\n', (1077, 1135), True, 'import numpy as np\n'), ((2559, 2622), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate synthetic trace"""'}), "(description='Generate synthetic trace')\n", (2582, 2622), False, 'import argparse\n'), ((1370, 1857), 'utils.generate_job', 'utils.generate_job', ([], {'throughputs': 'throughputs', 'reference_worker_type': '"""v100"""', 'rng': 'job_generator', 'job_id': 'None', 'fixed_job_duration': 'None', 'generate_multi_gpu_jobs': 'args.generate_multi_gpu_jobs', 'generate_multi_priority_jobs': 'args.generate_multi_priority_jobs', 'scale_factor_generator_func': 'generate_scale_factor', 'duration_generator_func': 'duration_generator_func', 'scale_factor_rng': 'scale_factor_generator', 'duration_rng': 'duration_generator', 'always_generate_scale_factor': '(False)'}), "(throughputs=throughputs, reference_worker_type='v100',\n rng=job_generator, job_id=None, fixed_job_duration=None,\n generate_multi_gpu_jobs=args.generate_multi_gpu_jobs,\n generate_multi_priority_jobs=args.generate_multi_priority_jobs,\n scale_factor_generator_func=generate_scale_factor,\n duration_generator_func=duration_generator_func, scale_factor_rng=\n scale_factor_generator, duration_rng=duration_generator,\n always_generate_scale_factor=False)\n", (1388, 1857), False, 'import utils\n'), ((79, 105), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (95, 105), False, 'import os, sys\n')] |
# -*- coding: utf-8 -*-
"""Customer_Segmentation_Using_K_Means_Clustering_pynb.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ocFey3admM9xMlBWaC8YeTXBE0R7UWRD
### Problem statement:Mall Customer Segmentation Using KMeans Clustering
"""
# Commented out IPython magic to ensure Python compatibility.
#Importing the necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
#Reading the data file
data=pd.read_csv("Mall_Customers.csv")
#Exploring the dataset
data.head()
data.shape
data.info()
data.dtypes
#Cheking the null values in dataset
data.isnull().sum()
#Relationship between variable
data.corr()
#Distribution of Annnual Income
plt.figure(figsize=(10, 6))
sns.distplot(data['Annual Income (k$)'])
plt.title('Distribution of Annual Income (k$)', fontsize = 20)
plt.xlabel('Range of Annual Income (k$)')
plt.ylabel('Count')
plt.savefig('Distribution of Annual Income (k$)',bbox_inches='tight',dpi=400)
"""Most of the annual income falls between 50K to 85K."""
age18_25 = data.Age[(data.Age <= 25) & (data.Age >= 18)]
age26_35 = data.Age[(data.Age <= 35) & (data.Age >= 26)]
age36_45 = data.Age[(data.Age <= 45) & (data.Age >= 36)]
age46_55 = data.Age[(data.Age <= 55) & (data.Age >= 46)]
age55above = data.Age[data.Age >= 56]
x = ["18-25","26-35","36-45","46-55","55+"]
y = [len(age18_25.values),len(age26_35.values),len(age36_45.values),len(age46_55.values),len(age55above.values)]
plt.figure(figsize=(15,6))
sns.barplot(x=x, y=y, palette="rocket")
plt.title("Number of Customer and Ages")
plt.xlabel("Age")
plt.ylabel("Number of Customer")
plt.savefig("Number of Customer and Ages",bbox_inches='tight',dpi=400)
plt.show()
"""There are customers of a wide variety of ages."""
#Distribution of spending score
plt.figure(figsize=(10, 6))
sns.distplot(data['Spending Score (1-100)'])
plt.title('Distribution of Spending Score (1-100)', fontsize = 20)
plt.xlabel('Range of Spending Score (1-100)')
plt.ylabel('Count')
plt.savefig('Distribution of Spending Score (1-100)',bbox_inches='tight',dpi=400)
"""The maximum spending score is in the range of 40 to 60."""
genders = data.Gender.value_counts()
sns.set_style("darkgrid")
plt.figure(figsize=(10,4))
sns.barplot(x=genders.index, y=genders.values)
plt.show()
"""### Clustering based on 3 features"""
#We take just the Annual Income,Age and Spending score
X=data[['Age',"Annual Income (k$)","Spending Score (1-100)"]]
X.head()
#Scatterplot of Annual Income vs Spending Score
plt.figure(figsize=(10,6))
sns.scatterplot(x = 'Annual Income (k$)',y = 'Spending Score (1-100)', data = X ,s = 60 )
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.title('Spending Score (1-100) vs Annual Income (k$)')
plt.show()
#Scatterplot of the input data
plt.figure(figsize=(10,6))
sns.scatterplot(x = 'Age',y = 'Spending Score (1-100)', data = X )
plt.xlabel('Age')
plt.ylabel('Spending Score (1-100)')
plt.title('Spending Score (1-100) vs Age')
plt.show()
#Normalizing the value
from sklearn.preprocessing import StandardScaler
X1 =StandardScaler().fit_transform(X.values)
X1 = np.nan_to_num(X1)
X1[:5]
"""#### Calculation the K Value"""
#Importing KMeans from sklearn
from sklearn.cluster import KMeans
#Lets calculate the K value
sse=[]
for i in range(1,11):
km=KMeans(n_clusters=i)
km.fit(X)
sse.append(km.inertia_)
#The elbow curve
plt.figure(figsize=(12,6))
plt.plot(range(1,11),sse, linewidth=2, color="red", marker ="8")
plt.xlabel("K Value")
plt.xticks(np.arange(1,11,1))
plt.ylabel("SSE")
plt.savefig('Calculation K Value',bbox_inches='tight',dpi=400)
plt.show()
"""Here in the graph, after 5 the drop is minimal, so we take 5 to be the number of clusters."""
#Taking 5 clusters
km1=KMeans(n_clusters=5)
#Fitting the input data
km1.fit(X1)
#predicting the labels of the input data
y=km1.predict(X1)
#adding the labels to a column named label
data["label"] = y
#The new dataframe with the clustering done
data.head()
"""### Result"""
#Ploting Final Cluster in 3D
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data.Age[data.label == 0], data["Annual Income (k$)"][data.label == 0], data["Spending Score (1-100)"][data.label == 0], c='blue', s=60)
ax.scatter(data.Age[data.label == 1], data["Annual Income (k$)"][data.label == 1], data["Spending Score (1-100)"][data.label == 1], c='red', s=60)
ax.scatter(data.Age[data.label == 2], data["Annual Income (k$)"][data.label == 2], data["Spending Score (1-100)"][data.label == 2], c='green', s=60)
ax.scatter(data.Age[data.label == 3], data["Annual Income (k$)"][data.label == 3], data["Spending Score (1-100)"][data.label == 3], c='orange', s=60)
ax.scatter(data.Age[data.label == 4], data["Annual Income (k$)"][data.label == 4], data["Spending Score (1-100)"][data.label == 4], c='purple', s=60)
ax.view_init(30, 185)
plt.xlabel("Age")
plt.ylabel("Annual Income (k$)")
ax.set_zlabel('Spending Score (1-100)')
plt.savefig('Final clustering result',bbox_inches='tight',dpi=400)
plt.show()
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"seaborn.distplot",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"seaborn.set_style",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.figure",
"seaborn... | [((503, 526), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (516, 526), True, 'import matplotlib.pyplot as plt\n'), ((617, 650), 'pandas.read_csv', 'pd.read_csv', (['"""Mall_Customers.csv"""'], {}), "('Mall_Customers.csv')\n", (628, 650), True, 'import pandas as pd\n'), ((859, 886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (869, 886), True, 'import matplotlib.pyplot as plt\n'), ((887, 927), 'seaborn.distplot', 'sns.distplot', (["data['Annual Income (k$)']"], {}), "(data['Annual Income (k$)'])\n", (899, 927), True, 'import seaborn as sns\n'), ((928, 988), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Annual Income (k$)"""'], {'fontsize': '(20)'}), "('Distribution of Annual Income (k$)', fontsize=20)\n", (937, 988), True, 'import matplotlib.pyplot as plt\n'), ((991, 1032), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Range of Annual Income (k$)"""'], {}), "('Range of Annual Income (k$)')\n", (1001, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1052), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (1043, 1052), True, 'import matplotlib.pyplot as plt\n'), ((1053, 1132), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Distribution of Annual Income (k$)"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('Distribution of Annual Income (k$)', bbox_inches='tight', dpi=400)\n", (1064, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1643), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (1626, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1682), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'x', 'y': 'y', 'palette': '"""rocket"""'}), "(x=x, y=y, palette='rocket')\n", (1654, 1682), True, 'import seaborn as sns\n'), ((1683, 1723), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Customer and Ages"""'], {}), "('Number of Customer and Ages')\n", (1692, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1741), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (1734, 1741), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1774), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Customer"""'], {}), "('Number of Customer')\n", (1752, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1847), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Number of Customer and Ages"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('Number of Customer and Ages', bbox_inches='tight', dpi=400)\n", (1786, 1847), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1854, 1856), True, 'import matplotlib.pyplot as plt\n'), ((1944, 1971), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1954, 1971), True, 'import matplotlib.pyplot as plt\n'), ((1972, 2016), 'seaborn.distplot', 'sns.distplot', (["data['Spending Score (1-100)']"], {}), "(data['Spending Score (1-100)'])\n", (1984, 2016), True, 'import seaborn as sns\n'), ((2017, 2081), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Spending Score (1-100)"""'], {'fontsize': '(20)'}), "('Distribution of Spending Score (1-100)', fontsize=20)\n", (2026, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2129), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Range of Spending Score (1-100)"""'], {}), "('Range of Spending Score (1-100)')\n", (2094, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2130, 2149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (2140, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2237), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Distribution of Spending Score (1-100)"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('Distribution of Spending Score (1-100)', bbox_inches='tight',\n dpi=400)\n", (2161, 2237), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2358), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (2346, 2358), True, 'import seaborn as sns\n'), ((2359, 2386), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (2369, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2432), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'genders.index', 'y': 'genders.values'}), '(x=genders.index, y=genders.values)\n', (2397, 2432), True, 'import seaborn as sns\n'), ((2433, 2443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2441, 2443), True, 'import matplotlib.pyplot as plt\n'), ((2663, 2690), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2673, 2690), True, 'import matplotlib.pyplot as plt\n'), ((2690, 2775), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""Annual Income (k$)"""', 'y': '"""Spending Score (1-100)"""', 'data': 'X', 's': '(60)'}), "(x='Annual Income (k$)', y='Spending Score (1-100)', data=X,\n s=60)\n", (2705, 2775), True, 'import seaborn as sns\n'), ((2782, 2814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Annual Income (k$)"""'], {}), "('Annual Income (k$)')\n", (2792, 2814), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Spending Score (1-100)"""'], {}), "('Spending Score (1-100)')\n", (2825, 2851), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2910), 'matplotlib.pyplot.title', 'plt.title', (['"""Spending Score (1-100) vs Annual Income (k$)"""'], {}), "('Spending Score (1-100) vs Annual Income (k$)')\n", (2862, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2911, 2921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2919, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2954, 2981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2964, 2981), True, 'import matplotlib.pyplot as plt\n'), ((2981, 3041), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""Age"""', 'y': '"""Spending Score (1-100)"""', 'data': 'X'}), "(x='Age', y='Spending Score (1-100)', data=X)\n", (2996, 3041), True, 'import seaborn as sns\n'), ((3049, 3066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (3059, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3103), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Spending Score (1-100)"""'], {}), "('Spending Score (1-100)')\n", (3077, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3105, 3147), 'matplotlib.pyplot.title', 'plt.title', (['"""Spending Score (1-100) vs Age"""'], {}), "('Spending Score (1-100) vs Age')\n", (3114, 3147), True, 'import matplotlib.pyplot as plt\n'), ((3148, 3158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3156, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3300), 'numpy.nan_to_num', 'np.nan_to_num', (['X1'], {}), '(X1)\n', (3296, 3300), True, 'import numpy as np\n'), ((3558, 3585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (3568, 3585), True, 'import matplotlib.pyplot as plt\n'), ((3650, 3671), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""K Value"""'], {}), "('K Value')\n", (3660, 3671), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3719), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSE"""'], {}), "('SSE')\n", (3712, 3719), True, 'import matplotlib.pyplot as plt\n'), ((3720, 3784), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Calculation K Value"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('Calculation K Value', bbox_inches='tight', dpi=400)\n", (3731, 3784), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3791, 3793), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3936), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(5)'}), '(n_clusters=5)\n', (3922, 3936), False, 'from sklearn.cluster import KMeans\n'), ((4316, 4344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (4326, 4344), True, 'import matplotlib.pyplot as plt\n'), ((5153, 5170), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (5163, 5170), True, 'import matplotlib.pyplot as plt\n'), ((5171, 5203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Annual Income (k$)"""'], {}), "('Annual Income (k$)')\n", (5181, 5203), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5312), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Final clustering result"""'], {'bbox_inches': '"""tight"""', 'dpi': '(400)'}), "('Final clustering result', bbox_inches='tight', dpi=400)\n", (5255, 5312), True, 'import matplotlib.pyplot as plt\n'), ((5311, 5321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5319, 5321), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3497), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'i'}), '(n_clusters=i)\n', (3483, 3497), False, 'from sklearn.cluster import KMeans\n'), ((3683, 3702), 'numpy.arange', 'np.arange', (['(1)', '(11)', '(1)'], {}), '(1, 11, 1)\n', (3692, 3702), True, 'import numpy as np\n'), ((3237, 3253), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3251, 3253), False, 'from sklearn.preprocessing import StandardScaler\n')] |
"""
Contains some defintions and helper functions
"""
# pylint: disable=invalid-name
import numpy as np
BETA_TABLE = [
0,
8.82050518214,
12.094592431,
13.9876612368,
16.0875642296,
17.8797700658,
19.6465647819,
21.3802576894,
23.0806434845,
24.6673803845,
26.1487953661,
27.6350821245,
29.6565383703,
31.2211113844,
32.7673547211,
34.2967756977,
35.6906782236,
37.0724753352,
38.4549693067,
39.836592699,
]
params = {
'GYRO': 5e-3, # rad/s / sqrt(Hz)
'BARO_Z': 1.0, # m / sqrt(Hz)
'GPS_XY': 0.3, # m / sqrt(Hz)
'GPS_Z': 0.3, # m / sqrt(Hz)
'GPS_VXY': 0.03, # m/s / sqrt(Hz)
'GPS_VZ': 0.03, # m/s / sqrt(Hz)
'LDR_Z': 0.02, # m / sqrt(Hz)
'ACC': 5e-2, # m/s^2 / sqrt(Hz)
'ACC_C': 1, # m/s^2 / sqrt(Hz), weight for correction of acc dir
'MAG_NE': 1e-1, # 1 / sqrt(Hz)
'MAG_D': 1e6, # 1 / sqrt(Hz), large value prevents roll/pitch correction
}
class X(object):
"""
State enumeration, doesn't line up with state space due to
using quaternion instead of infinitesimal euler angles like error state
"""
q_nb_0 = 0
q_nb_1 = 1
q_nb_2 = 2
q_nb_3 = 3
vel_N = 4
vel_E = 5
vel_D = 6
gyro_bias_bx = 7
gyro_bias_by = 8
gyro_bias_bz = 9
accel_scale = 10
pos_N = 11
pos_E = 12
pos_D = 13
terrain_alt = 14
baro_bias = 15
n = 16
class Xe(object):
"""
State error enum, used for state-space for kalman filter
"""
rot_bx = 0
rot_by = 1
rot_bz = 2
vel_N = 3
vel_E = 4
vel_D = 5
gyro_bias_bx = 6
gyro_bias_by = 7
gyro_bias_bz = 8
accel_scale = 9
pos_N = 10
pos_E = 11
pos_D = 12
terrain_alt = 13
baro_bias = 14
n = 15
class U(object):
"""
Input (accel and gyro measurements)
"""
omega_nb_bx = 0
omega_nb_by = 1
omega_nb_bz = 2
a_bx = 3
a_by = 4
a_bz = 5
n = 6
class Y_accel(object):
"""
Acceleratoin measurement in body frame
"""
bx = 0
by = 1
bz = 2
n = 3
class Y_gps(object):
"""
GPS measurement
"""
pos_N = 0
pos_E = 1
pos_D = 2
vel_N = 3
vel_E = 4
vel_D = 5
n = 6
class Y_baro(object):
"""
Y baro
"""
asl = 0
n = 1
class Y_mag(object):
"""
Magnetometer measurement
"""
nx = 0
ny = 1
nz = 2
n = 3
class Y_lidar(object):
"""
Lidar measurement
"""
d = 0
n = 1
class Timer(object):
"""
Event timer
"""
def __init__(self, t0, period):
self.t0 = t0
self.period = period
def ready(self, t):
"""
Returns if timer is ready
"""
if t - self.t0 > self.period:
self.t0 = t
return True
else:
return False
def skew(v):
"""
Return skew (cross-product) matrix
"""
return np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
def vect2quat(v):
"""
Converts a vector to a quaternion with q0 = 0
"""
return np.hstack([[0], v])
# vim: set et fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :
| [
"numpy.array",
"numpy.hstack"
] | [((2964, 3028), 'numpy.array', 'np.array', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (2972, 3028), True, 'import numpy as np\n'), ((3151, 3170), 'numpy.hstack', 'np.hstack', (['[[0], v]'], {}), '([[0], v])\n', (3160, 3170), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from pandas.tseries.offsets import BDay
from functools import partial
from pandas.tseries.frequencies import to_offset
class Metrics():
def __init__(self, marketFeaturesDf):
self.__marketFeaturesDf = marketFeaturesDf
self.__stats = {}
def getMarketMetricsString(self):
# TODO add the snippet back once benchmark is fixed.
# + ' Benchmark: %0.2f%% ' % (100 * self.__stats['Base Return(%)']) \
str = \
' Total Pnl: %0.2f%% ' % (100 * self.__stats['Total Pnl(%)']) \
+ ' Max Drawdown: %0.2f%% ' % (100 * self.__stats['Max Drawdown(%)']) \
+ ' RoC: %0.2f%% ' % (100 * self.__stats['RoC(%)']) \
+ ' P/L Ratio: %0.2f ' % self.__stats['Profit/Loss Ratio'] \
+ ' Trade Accuracy: %0.2f ' % self.__stats['Accuracy']
if 'Score' in self.__stats:
str = str + ' Score: %0.2f ' % self.__stats['Score']
if self.__stats['Trading Days'] > 252:
str = str \
+ ' Ann. Return: %0.2f%% ' % (100 * self.__stats['Annual Return(%)']) \
+ ' Ann. Vol: %0.2f%% ' % (100 * self.__stats['Annual Vol(%)']) \
+ ' Sharpe Ratio: %0.2f ' % self.__stats['Sharpe Ratio']
return str
def getInstrumentMetricsString(self):
# Add back below once benchmark support
# + ' Benchmark: %0.2f%% ' % (100 * self.__stats['Base Return(%)']) \
str = \
' Pnl: %0.2f ' % self.__stats['Pnl'] \
+ ' Total Pnl: %0.2f%% ' % (100 * self.__stats['Total Pnl(%)']) \
+ ' Profit/Loss Ratio: %0.2f ' % self.__stats['Profit/Loss Ratio'] \
+ ' Trade Accuracy: %0.2f ' % self.__stats['Accuracy']
if 'Score' in self.__stats:
str = str + ' Score: %0.2f ' % self.__stats['Score']
if 'Normalized Score' in self.__stats:
str = str + ' Normalized Score: %0.2f ' % self.__stats['Normalized Score']
return str
def getMetrics(self):
return self.__stats
def getDf(self):
return self.__marketFeaturesDf
def round(self, t, freq):
freq = to_offset(freq)
return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value)
def resampleData(self, series, period):
return series.groupby(partial(self.round, freq=period))
# series.resample(period)
def calculateMarketMetrics(self, baseSymbol, priceFeature, startingCapital, dateBounds):
stats = {}
df = self.__marketFeaturesDf
# total_pnl = self.resampleData(
# self.__marketFeaturesDf['pnl'], '1D').last() # TODO change 'pnl'
# portfolioValue = self.resampleData(
# self.__marketFeaturesDf['portfolio_value'], '1D').last() # TODO change portfolio_value
# total_pnl.dropna(inplace=True)
# portfolioValue.dropna(inplace=True)
total_days = len(pd.date_range(dateBounds[0], dateBounds[1], freq=BDay()))
total_return = df['pnl'].iloc[- 1] / float(startingCapital)
benchmark = self.getBenchmarkData(None, priceFeature, '')
stats['Trading Days'] = total_days
stats['Total Pnl(%)'] = total_return
stats['RoC(%)'] = self.roc(df['pnl'].iloc[- 1], df['capitalUsage'].iloc[-1])
stats['Max Drawdown(%)'] = self.max_drawdown(df['maxDrawdown'].iloc[-1], startingCapital)
stats['Profit/Loss Ratio'] = self.profit_factor(df['total_profit'].iloc[-1], df['total_loss'].iloc[-1])
stats['Accuracy'] = self.accuracy(df['count_profit'].iloc[-1], df['count_loss'].iloc[-1])
if total_days > 252:
stats['Annual Return(%)'] = self.annualized_return(
total_return, total_days)
if benchmark is not None:
stats['Base Return(%)'] = self.annualized_return(
benchmark['total_return'], total_days)
stats['Annual Vol(%)'] = self.annual_vol(df['variance'].iloc[-1], startingCapital)
stats['Sharpe Ratio'] = self.sharpe_ratio(stats['Annual Return(%)'], stats['Annual Vol(%)'])
# TODO change reference to score
if 'score' in df.columns:
stats['Score'] = df['score'].iloc[-1]
self.__stats = stats
def calculateInstrumentFeatureMetrics(self, instrumentId, priceFeature, startingCapital, instrumentLookbackData):
stats = {}
pnl = instrumentLookbackData.getFeatureDf('pnl').iloc[-1]
total_profit = instrumentLookbackData.getFeatureDf('total_profit').iloc[-1]
total_loss = instrumentLookbackData.getFeatureDf('total_loss').iloc[-1]
count_profit = instrumentLookbackData.getFeatureDf('count_profit').iloc[-1]
count_loss = instrumentLookbackData.getFeatureDf('count_loss').iloc[-1]
totalReturn = pnl / float(startingCapital)
stats['Pnl'] = pnl.loc[instrumentId]
stats['Total Pnl(%)'] = totalReturn.loc[instrumentId]
stats['Profit/Loss Ratio'] = self.profit_factor(total_profit.loc[instrumentId], total_loss.loc[instrumentId])
stats['Accuracy'] = self.accuracy(count_profit.loc[instrumentId], count_loss.loc[instrumentId])
try:
score = instrumentLookbackData.getFeatureDf('score').iloc[-1]
stats['Score'] = score.loc[instrumentId]
try:
benchmarkScore = instrumentLookbackData.getFeatureDf('benchmark_score').iloc[-1]
stats['Normalized Score'] = 1000 * score.loc[instrumentId] / benchmarkScore.loc[instrumentId]
except KeyError:
pass
except KeyError:
pass
self.__stats = stats
def annualized_return(self, total_return, total_days):
annualized_return = ((1 + total_return) **
(252.0 / np.float(total_days)) - 1)
return annualized_return
def annualized_std(self, variance, startingCapital):
return (np.sqrt(252) * np.sqrt(variance)) / float(startingCapital)
def annualized_downside_std(self, daily_return):
downside_return = daily_return.copy()
downside_return[downside_return > 0] = 0
return np.sqrt(252) * np.std(downside_return)
def annual_vol(self, variance, startingCapital):
return self.annualized_std(variance, startingCapital)
def sharpe_ratio(self, annual_return, annual_vol):
if annual_vol == 0:
return np.nan
else:
return annual_return / float(annual_vol)
def sortino_ratio(self, total_return, total_days, daily_return):
stdev = self.annualized_downside_std(daily_return)
if stdev == 0:
return np.nan
else:
return self.annualized_return(total_return, total_days) / stdev
def max_drawdown(self, maxDrawdown, startingCapital):
return maxDrawdown['maxDrawdown'] / float(startingCapital)
# return np.max(np.maximum.accumulate(portfolioValue) - portfolioValue) / portfolioValue[0]
def roc(self, total_pnl, capitalUsage):
if capitalUsage > 0:
return total_pnl / float(capitalUsage)
else:
return np.nan
def beta(self, daily_return, baseline_daily_return):
stdev = np.std(baseline_daily_return)
if stdev == 0:
return np.nan
else:
return np.corrcoef(daily_return, baseline_daily_return)[0, 1] * np.std(daily_return) / stdev
def alpha(self, daily_return, baseline_daily_return, beta):
return self.annualized_return(daily_return) - beta * self.annualized_return(baseline_daily_return)
def profit_factor(self, total_profit, total_loss):
if total_loss == 0:
return float('nan')
return total_profit / float(total_loss)
def profitability(self, total_profit, total_pnl):
if total_pnl == 0:
return 0
return total_profit / float(total_pnl)
def profit_factor_avg(self, total_profit, total_loss, count_profit, count_loss):
if total_loss == 0 or count_loss == 0:
return float('nan')
return (total_profit / float(count_profit))\
/ (total_loss / float(count_loss))
def accuracy(self, count_profit, count_loss):
total_count = count_profit + count_loss
if total_count == 0:
return 0
return count_profit / float(total_count)
def getBenchmarkData(self, baseSymbol, priceFeature, folderName):
if (baseSymbol is None):
return None
baseline_data = {}
path = folderName + '/' + baseSymbol + '_features.csv'
csv = pd.read_csv(path, engine='python')
csv.set_index(csv['time'], inplace=True)
csv.index = pd.to_datetime(csv.index)
# features = [col.upper() for col in csv.columns]
baseline_data['price'] = self.resampleData(
csv[priceFeature], '1D').last()
start = baseline_data['price'][0]
baseline_data['returns'] = baseline_data['price'] / start - 1
baseline_data['total_return'] = baseline_data['returns'][len(
baseline_data['price']) - 1]
baseline_data['daily_returns'] = baseline_data['price'] / \
baseline_data['price'].shift(1) - 1
baseline_data['daily_returns'].dropna(inplace=True)
return baseline_data
| [
"numpy.sqrt",
"numpy.float",
"pandas.read_csv",
"numpy.corrcoef",
"pandas.tseries.frequencies.to_offset",
"functools.partial",
"pandas.tseries.offsets.BDay",
"numpy.std",
"pandas.Timestamp",
"pandas.to_datetime"
] | [((2169, 2184), 'pandas.tseries.frequencies.to_offset', 'to_offset', (['freq'], {}), '(freq)\n', (2178, 2184), False, 'from pandas.tseries.frequencies import to_offset\n'), ((2200, 2260), 'pandas.Timestamp', 'pd.Timestamp', (['(t.value // freq.delta.value * freq.delta.value)'], {}), '(t.value // freq.delta.value * freq.delta.value)\n', (2212, 2260), True, 'import pandas as pd\n'), ((7237, 7266), 'numpy.std', 'np.std', (['baseline_daily_return'], {}), '(baseline_daily_return)\n', (7243, 7266), True, 'import numpy as np\n'), ((8617, 8651), 'pandas.read_csv', 'pd.read_csv', (['path'], {'engine': '"""python"""'}), "(path, engine='python')\n", (8628, 8651), True, 'import pandas as pd\n'), ((8721, 8746), 'pandas.to_datetime', 'pd.to_datetime', (['csv.index'], {}), '(csv.index)\n', (8735, 8746), True, 'import pandas as pd\n'), ((2338, 2370), 'functools.partial', 'partial', (['self.round'], {'freq': 'period'}), '(self.round, freq=period)\n', (2345, 2370), False, 'from functools import partial\n'), ((6172, 6184), 'numpy.sqrt', 'np.sqrt', (['(252)'], {}), '(252)\n', (6179, 6184), True, 'import numpy as np\n'), ((6187, 6210), 'numpy.std', 'np.std', (['downside_return'], {}), '(downside_return)\n', (6193, 6210), True, 'import numpy as np\n'), ((5949, 5961), 'numpy.sqrt', 'np.sqrt', (['(252)'], {}), '(252)\n', (5956, 5961), True, 'import numpy as np\n'), ((5964, 5981), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (5971, 5981), True, 'import numpy as np\n'), ((2988, 2994), 'pandas.tseries.offsets.BDay', 'BDay', ([], {}), '()\n', (2992, 2994), False, 'from pandas.tseries.offsets import BDay\n'), ((5814, 5834), 'numpy.float', 'np.float', (['total_days'], {}), '(total_days)\n', (5822, 5834), True, 'import numpy as np\n'), ((7406, 7426), 'numpy.std', 'np.std', (['daily_return'], {}), '(daily_return)\n', (7412, 7426), True, 'import numpy as np\n'), ((7349, 7397), 'numpy.corrcoef', 'np.corrcoef', (['daily_return', 'baseline_daily_return'], {}), '(daily_return, baseline_daily_return)\n', (7360, 7397), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from hyperion.util.integrate import integrate_subset
from scipy import stats
from .units import ConvertUnits
class MakePlots(object):
'''
Plots slices of the val cube from SyntheticCube a the closest slice
to wav_interest or val images of SyntheticImage. The boundaries of
the slice or image is ploted as well. The plot has 4 different cut
levels at 100, 99, 95 and 90 percent.
Parameters
----------
input_array : SyntheticCube, SyntheticImage, optional
input_array also reads arrays with SyntheticCube and
SyntheticImage properties.
wav_interest : float, ``None``
* float : wavelength close to slice in microns.
* ``None`` : Only if input_array is SyntheticImage like
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float, ``None``
* float : cut level for single plot of image slice between 0 and 100.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and Maximal physical value presented in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the value savefig.dpi
in the matplotlibrc file.
Returns
-------
cube : SyntheticCube
3D val array with SyntheticCube properties.
image : SyntheticImage
2D val array with SyntheticImage properties.
'''
def __init__(self, input_array, wav_interest=None, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
if multi_cut == None and single_cut == None and set_cut == None:
raise Exception('At least one plotting routine (multi_cut, single_cut or set_cut == None) has to be chosen.')
self.prefix = prefix
if self.prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if input_array.val.ndim in (2, 3):
# input_array properties
self.input_name = name
self.name = input_array.name
self.unit_out = input_array.unit_out
self.val = input_array.val
self.wav = input_array.wav
self.wav_interest = wav_interest
self.filter = input_array.filter
#print self.filter
self.grid_unit = input_array.grid_unit
self.grid_unit_name = input_array.grid_unit_name
# measure of the image
self.FOV = input_array.FOV
self.x_min = input_array.x_min
self.x_max = input_array.x_max
self.y_min = input_array.y_min
self.y_max = input_array.y_max
self.pixel = input_array.pixel
self.pixel_2D = self.pixel[0] * self.pixel[1]
#print self.val.shape
# condition to find slice close to wav_interest
if input_array.val.ndim == 3:
if self.wav_interest is None and self.filter['waf_min'] is None:
raise Exception('WARNING: wav_interest or waf_0 need to be defined if 3D cube is pasted.')
find_minimum = np.abs(self.wav - self.wav_interest)
num = np.arange(len(self.wav))
index = num[find_minimum == np.min(find_minimum)][0]
wav_min = 10. ** (np.log10(self.wav[index]) - input_array.spacing_wav / 2.)
wav_max = 10. ** (np.log10(self.wav[index]) + input_array.spacing_wav / 2.)
self.val_2D = self.val[:, :, index]
self.wav_real = (round(wav_min, 2), round(wav_max, 2))
# condition for image
if input_array.val.ndim == 2:
self.val_2D = self.val.copy()
self.wav_real = (round(self.filter['waf_min'], 2), round(self.filter['waf_max'], 2))
else:
raise Exception('WARNING: MakePlots only can use SyntheticCube or SyntheticImage.')
# creat cut levels
self.val_sort = np.sort(self.val_2D.ravel())
self.xx = np.linspace(0, len(self.val_sort), len(self.val_sort))
# statstics
self.median = stats.scoreatpercentile(self.val_sort, 50)
self.min_0 = stats.scoreatpercentile(self.val_sort, 0)
self.min_5 = stats.scoreatpercentile(self.val_sort, 5)
self.max_95 = stats.scoreatpercentile(self.val_sort, 95)
self.max_100 = stats.scoreatpercentile(self.val_sort, 100)
# grid of X, Y plot
x = np.linspace(self.x_min / self.grid_unit, self.x_max / self.grid_unit, self.pixel[0])
y = np.linspace(self.y_min / self.grid_unit, self.y_max / self.grid_unit, self.pixel[1])
X, Y = np.meshgrid(y,x)
label = 'Flux [' + self.unit_out + ']'
# titel of plot
titel = self.name + ' ' + str(self.wav_real) + ' micron'
# ploting multiple plot
if multi_cut is not None:
fig2 = plt.figure()
fig2.suptitle(titel, fontsize=10.)
fig2.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
a = np.array([100, 99, 95, 90])
b = np.array([1, 2])
c = np.array([1, 1])
for l in range(len(a)):
k = l + 1
ax = fig2.add_subplot(2, 2, k)
title = str(int(a[l])) + ' %'
plt.title(title)
self.percentage = a[l]
self.percent = self.percentage / 2.
lower_cut = (100 - a[l]) / 2.
upper_cut = lower_cut + a[l]
self.min = stats.scoreatpercentile(self.val_sort, lower_cut)
self.max = stats.scoreatpercentile(self.val_sort, upper_cut)
vmin = self.min
vmax = self.max
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig2.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_multi_cut_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig2.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
# single plot for certain cut if cut is not None
if single_cut is not None:
fig3 = plt.figure()
fig3.suptitle(titel, fontsize=10.)
fig3.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
a = np.array([single_cut])
ax = fig3.add_subplot(1, 1, 1)
title = str(int(a[0])) + ' %'
plt.title(title)
lower_cut = (100 - single_cut) / 2.
upper_cut = lower_cut + single_cut
self.min = stats.scoreatpercentile(self.val_sort, lower_cut)
self.max = stats.scoreatpercentile(self.val_sort, upper_cut)
vmin = self.min
vmax = self.max
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig3.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_single_cut_' + str(single_cut) + '%_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig3.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
# single plot for certain cut if cut is not None
if set_cut is not None:
fig4 = plt.figure()
fig4.suptitle(titel, fontsize=10.)
fig4.subplots_adjust(hspace=0.3, wspace=0.3)
font = {'size': 6}
mpl.rc('font', **font)
# min max
vmin = set_cut[0]
vmax = set_cut[1]
ax = fig4.add_subplot(1, 1, 1)
title = '[' + str("%0.2e" % vmin) + ', ' + str("%0.2e" % vmax) + ']'
title2 = 'flux values within ' + title + ' ' + self.unit_out
plt.title(title2)
self.norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
#print X.shape, Y.shape, self.val_2D.shape
c = ax.pcolormesh(X, Y, self.val_2D, cmap=plt.cm.gist_heat, norm=self.norm)
ax.set_xlim(x[0], x[-1])
ax.set_ylim(y[0], y[-1])
ax.set_xlabel('x [' + self.grid_unit_name + ']')
ax.set_ylabel('y [' + self.grid_unit_name + ']')
cb = fig4.colorbar(c)
cb.set_label(label)
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_set_cut_' + str("%0.2e" % vmin) + '_' + str("%0.2e" % vmax) + '_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '.png'
if self.prefix is not None:
self.plot_name = self.prefix + '.png'
fig4.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
def histogram_cuts(self, dpi=None):
'''
DS9 like histograms of the cuts can be checked here.
Parameters
----------
dpi : None, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the val savefig.dpi
in the matplotlibrc file.
'''
# Histograms of cut levels
fig = plt.figure()
fig.suptitle(self.name, fontsize=10.)
fig.subplots_adjust(hspace=0.3)
ax1 = fig.add_subplot(2, 1, 1)
plt.semilogy(self.val_sort[::-1], self.xx, 'b-')
plt.semilogy([self.median, self.median], [self.xx[0] + 1., self.xx[-1]], 'r-')
# min max
plt.semilogy([self.min_0, self.min_0], [self.xx[0] + 1., self.xx[-1]], 'g-')
plt.semilogy([self.min_5, self.min_5], [self.xx[0] + 1., self.xx[-1]], 'y-')
plt.semilogy([self.max_95, self.max_95], [self.xx[0] + 1., self.xx[-1]], 'y-')
plt.semilogy([self.max_100, self.max_100], [self.xx[0] + 1., self.xx[-1]], 'g-')
ax1.set_xlabel('val distribution')
ax1.set_ylabel('Number of pixels')
ax2 = fig.add_subplot(2, 1, 2)
plt.plot(self.val_sort)
ax2.set_xlabel('Number of pixels')
ax2.set_ylabel('val distribution')
ax2.set_xlim(self.xx[0], self.xx[-1])
ax2.set_ylim(self.val_sort[0], self.val_sort[-1])
if self.prefix is None:
self.plot_name = self.name + '_image_' + self.input_name + '_' + str(self.wav_real[0]) + '_' + str(self.wav_real[1]) + '_histogram.png'
if self.prefix is not None:
self.plot_name = self.prefix + '_histogram.png'
fig.savefig(self.plot_name, bbox_inches='tight', dpi=dpi)
| [
"matplotlib.pyplot.semilogy",
"numpy.abs",
"numpy.log10",
"scipy.stats.scoreatpercentile",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.rc",
"matplotlib.colors.Normalize",
"numpy.min",
"numpy.meshgrid"
] | [((4960, 5002), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', '(50)'], {}), '(self.val_sort, 50)\n', (4983, 5002), False, 'from scipy import stats\n'), ((5024, 5065), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', '(0)'], {}), '(self.val_sort, 0)\n', (5047, 5065), False, 'from scipy import stats\n'), ((5087, 5128), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', '(5)'], {}), '(self.val_sort, 5)\n', (5110, 5128), False, 'from scipy import stats\n'), ((5151, 5193), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', '(95)'], {}), '(self.val_sort, 95)\n', (5174, 5193), False, 'from scipy import stats\n'), ((5217, 5260), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', '(100)'], {}), '(self.val_sort, 100)\n', (5240, 5260), False, 'from scipy import stats\n'), ((5302, 5391), 'numpy.linspace', 'np.linspace', (['(self.x_min / self.grid_unit)', '(self.x_max / self.grid_unit)', 'self.pixel[0]'], {}), '(self.x_min / self.grid_unit, self.x_max / self.grid_unit, self.\n pixel[0])\n', (5313, 5391), True, 'import numpy as np\n'), ((5399, 5488), 'numpy.linspace', 'np.linspace', (['(self.y_min / self.grid_unit)', '(self.y_max / self.grid_unit)', 'self.pixel[1]'], {}), '(self.y_min / self.grid_unit, self.y_max / self.grid_unit, self.\n pixel[1])\n', (5410, 5488), True, 'import numpy as np\n'), ((5499, 5516), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (5510, 5516), True, 'import numpy as np\n'), ((10901, 10913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10911, 10913), True, 'import matplotlib.pyplot as plt\n'), ((11048, 11096), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['self.val_sort[::-1]', 'self.xx', '"""b-"""'], {}), "(self.val_sort[::-1], self.xx, 'b-')\n", (11060, 11096), True, 'import matplotlib.pyplot as plt\n'), ((11105, 11184), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.median, self.median]', '[self.xx[0] + 1.0, self.xx[-1]]', '"""r-"""'], {}), "([self.median, self.median], [self.xx[0] + 1.0, self.xx[-1]], 'r-')\n", (11117, 11184), True, 'import matplotlib.pyplot as plt\n'), ((11210, 11287), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.min_0, self.min_0]', '[self.xx[0] + 1.0, self.xx[-1]]', '"""g-"""'], {}), "([self.min_0, self.min_0], [self.xx[0] + 1.0, self.xx[-1]], 'g-')\n", (11222, 11287), True, 'import matplotlib.pyplot as plt\n'), ((11295, 11372), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.min_5, self.min_5]', '[self.xx[0] + 1.0, self.xx[-1]]', '"""y-"""'], {}), "([self.min_5, self.min_5], [self.xx[0] + 1.0, self.xx[-1]], 'y-')\n", (11307, 11372), True, 'import matplotlib.pyplot as plt\n'), ((11380, 11459), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.max_95, self.max_95]', '[self.xx[0] + 1.0, self.xx[-1]]', '"""y-"""'], {}), "([self.max_95, self.max_95], [self.xx[0] + 1.0, self.xx[-1]], 'y-')\n", (11392, 11459), True, 'import matplotlib.pyplot as plt\n'), ((11467, 11552), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['[self.max_100, self.max_100]', '[self.xx[0] + 1.0, self.xx[-1]]', '"""g-"""'], {}), "([self.max_100, self.max_100], [self.xx[0] + 1.0, self.xx[-1]],\n 'g-')\n", (11479, 11552), True, 'import matplotlib.pyplot as plt\n'), ((11682, 11705), 'matplotlib.pyplot.plot', 'plt.plot', (['self.val_sort'], {}), '(self.val_sort)\n', (11690, 11705), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5751), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5749, 5751), True, 'import matplotlib.pyplot as plt\n'), ((5900, 5922), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (5906, 5922), True, 'import matplotlib as mpl\n'), ((5940, 5967), 'numpy.array', 'np.array', (['[100, 99, 95, 90]'], {}), '([100, 99, 95, 90])\n', (5948, 5967), True, 'import numpy as np\n'), ((5984, 6000), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (5992, 6000), True, 'import numpy as np\n'), ((6017, 6033), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (6025, 6033), True, 'import numpy as np\n'), ((7542, 7554), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7552, 7554), True, 'import matplotlib.pyplot as plt\n'), ((7703, 7725), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (7709, 7725), True, 'import matplotlib as mpl\n'), ((7743, 7765), 'numpy.array', 'np.array', (['[single_cut]'], {}), '([single_cut])\n', (7751, 7765), True, 'import numpy as np\n'), ((7864, 7880), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7873, 7880), True, 'import matplotlib.pyplot as plt\n'), ((8000, 8049), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', 'lower_cut'], {}), '(self.val_sort, lower_cut)\n', (8023, 8049), False, 'from scipy import stats\n'), ((8073, 8122), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', 'upper_cut'], {}), '(self.val_sort, upper_cut)\n', (8096, 8122), False, 'from scipy import stats\n'), ((8204, 8246), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (8224, 8246), True, 'import matplotlib as mpl\n'), ((9089, 9101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9099, 9101), True, 'import matplotlib.pyplot as plt\n'), ((9250, 9272), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font)\n", (9256, 9272), True, 'import matplotlib as mpl\n'), ((9566, 9583), 'matplotlib.pyplot.title', 'plt.title', (['title2'], {}), '(title2)\n', (9575, 9583), True, 'import matplotlib.pyplot as plt\n'), ((9609, 9651), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (9629, 9651), True, 'import matplotlib as mpl\n'), ((3935, 3971), 'numpy.abs', 'np.abs', (['(self.wav - self.wav_interest)'], {}), '(self.wav - self.wav_interest)\n', (3941, 3971), True, 'import numpy as np\n'), ((6206, 6222), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6215, 6222), True, 'import matplotlib.pyplot as plt\n'), ((6433, 6482), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', 'lower_cut'], {}), '(self.val_sort, lower_cut)\n', (6456, 6482), False, 'from scipy import stats\n'), ((6510, 6559), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['self.val_sort', 'upper_cut'], {}), '(self.val_sort, upper_cut)\n', (6533, 6559), False, 'from scipy import stats\n'), ((6653, 6695), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (6673, 6695), True, 'import matplotlib as mpl\n'), ((4139, 4164), 'numpy.log10', 'np.log10', (['self.wav[index]'], {}), '(self.wav[index])\n', (4147, 4164), True, 'import numpy as np\n'), ((4231, 4256), 'numpy.log10', 'np.log10', (['self.wav[index]'], {}), '(self.wav[index])\n', (4239, 4256), True, 'import numpy as np\n'), ((4080, 4100), 'numpy.min', 'np.min', (['find_minimum'], {}), '(find_minimum)\n', (4086, 4100), True, 'import numpy as np\n')] |
"""
Create stairs dataset.
Creates a matrix of n x d^2 dimensions which, reshaped to (n, d, d),
contains n grayscale images of stairs.
Stairs and background also contain some noise in each pixel,
and stair pixels are darker than background pixels.
Along with the dataset, a sample image
with m^2 samples arranged in a square grid is generated.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
default_folder = 'stairs'
labels = True # comes along with labels
# Defaults:
_n = 1000 # number of samples
_d = 5 # images are d x d pixels
_stairs_noise = .05 # noise applied to stairs pixels
_background_noise = .01 # noise applied to background pixels
def _generate_masks(d):
"""Generate a different mask for every type of stair."""
# right-down to left-up stairs: type 1
masks = []
a = np.arange(d)
unsq = lambda x, dim: np.expand_dims(x, dim)
for k in range(d - 1):
mask = unsq(a, 1) - unsq(a, 0) >= k
masks.append(mask)
# left-down to right-up stairs: type 0
for m in list(masks): # we're adding to the mask, so we need a new iterator
masks.append(m[:, ::-1])
masks = np.stack(masks, 0).reshape(len(masks), d ** 2)
# labels: type (0 or 1) + one-hot k
labels = np.array([
[t] + [ int(i == k) for i in range(d - 1) ]
for t in range(2)
for k in range(d - 1)
])
return masks, labels
def generate_samples(
n=_n, d=_d,
stairs_noise=_stairs_noise, background_noise=_background_noise,
return_labels=False
):
"""Generate n samples of dxd stair images."""
masks, labels = _generate_masks(d)
d2 = d ** 2
stair = np.random.uniform(size=(n, 1)) * .5 + .5
stair = stair + np.random.normal(scale=stairs_noise, size=(n, d2))
stair = stair.clip(0.5, 1.)
bg = np.random.uniform(size=(n, 1)) * .1
bg = bg + np.random.normal(scale=background_noise, size=(n, d2))
bg = bg.clip(0., .1)
idx = np.random.choice(range(len(masks)), size=(n,))
mask = masks.reshape(len(masks), d2)[idx]
samples = mask * stair + (1 - mask) * bg
labels = labels[idx]
if return_labels:
return samples, labels
else:
return samples
def plot_samples(samples, nr=None, nc=None, figure_width=10):
"""Plot samples in a squared grid and return the resulting figure."""
n, d2 = samples.shape
if nr is None and nc is None:
nc = int(np.sqrt(n))
nr = (n - 1) // nc + 1
elif nr is None:
nr = (n - 1) // nc + 1
elif nc is None:
nc = (n - 1) // nr + 1
else:
assert nr * nc >= n
d = int(np.sqrt(d2))
assert d ** 2 == d2, 'Needs to be a square image'
fig, axes_mat = plt.subplots(nr, nc, figsize=(figure_width, figure_width))
axes = axes_mat.flatten()
for ax, x in zip(axes, samples):
ax.imshow(x.reshape(d, d), cmap='gray_r', vmin=0, vmax=1)
for ax in axes:
ax.axis('off') # axis off for ALL cells, even the ones without an image
return axes_mat
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=_n, help='#samples')
parser.add_argument('--d', type=int, default=_d, help='#width pixels')
parser.add_argument(
'--m', type=int, default=10,
help='m^2 is the number of images in the sample image'
)
parser.add_argument(
'--output_folder', type=str, default=default_folder,
help='Output folder where to store the result'
)
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument(
'--stairs-noise', dest='stairs_noise', type=float,
default=_stairs_noise,
help='Scale for the normal noise added to stair pixels'
)
parser.add_argument(
'--background-noise', dest='background_noise', type=float,
default=_background_noise,
help='Scale for the normal noise added to background pixels'
)
args = parser.parse_args()
# Create folder if it doesn't exist
try:
os.mkdir(args.output_folder)
except:
pass # already exists
# Generate dataset
np.random.seed(args.seed)
dataset, labels = generate_samples(
args.n, args.d,
args.stairs_noise, args.background_noise,
return_labels=True
)
np.save(os.path.join(args.output_folder, 'data.npy'), dataset)
np.save(os.path.join(args.output_folder, 'labels.npy'), labels)
# Generate samples image
samples = generate_samples(
args.m ** 2, args.d,
args.stairs_noise, args.background_noise,
)
axes = plot_samples(samples)
plt.savefig(os.path.join(args.output_folder, 'sample.png')) | [
"numpy.random.normal",
"numpy.sqrt",
"argparse.ArgumentParser",
"os.path.join",
"numpy.stack",
"numpy.random.seed",
"numpy.expand_dims",
"numpy.random.uniform",
"os.mkdir",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((854, 866), 'numpy.arange', 'np.arange', (['d'], {}), '(d)\n', (863, 866), True, 'import numpy as np\n'), ((2746, 2804), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nr', 'nc'], {'figsize': '(figure_width, figure_width)'}), '(nr, nc, figsize=(figure_width, figure_width))\n', (2758, 2804), True, 'import matplotlib.pyplot as plt\n'), ((3103, 3128), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3126, 3128), False, 'import argparse\n'), ((4213, 4238), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4227, 4238), True, 'import numpy as np\n'), ((893, 915), 'numpy.expand_dims', 'np.expand_dims', (['x', 'dim'], {}), '(x, dim)\n', (907, 915), True, 'import numpy as np\n'), ((1754, 1804), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'stairs_noise', 'size': '(n, d2)'}), '(scale=stairs_noise, size=(n, d2))\n', (1770, 1804), True, 'import numpy as np\n'), ((1847, 1877), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (1864, 1877), True, 'import numpy as np\n'), ((1897, 1951), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'background_noise', 'size': '(n, d2)'}), '(scale=background_noise, size=(n, d2))\n', (1913, 1951), True, 'import numpy as np\n'), ((2658, 2669), 'numpy.sqrt', 'np.sqrt', (['d2'], {}), '(d2)\n', (2665, 2669), True, 'import numpy as np\n'), ((4114, 4142), 'os.mkdir', 'os.mkdir', (['args.output_folder'], {}), '(args.output_folder)\n', (4122, 4142), False, 'import os\n'), ((4401, 4445), 'os.path.join', 'os.path.join', (['args.output_folder', '"""data.npy"""'], {}), "(args.output_folder, 'data.npy')\n", (4413, 4445), False, 'import os\n'), ((4468, 4514), 'os.path.join', 'os.path.join', (['args.output_folder', '"""labels.npy"""'], {}), "(args.output_folder, 'labels.npy')\n", (4480, 4514), False, 'import os\n'), ((4722, 4768), 'os.path.join', 'os.path.join', (['args.output_folder', '"""sample.png"""'], {}), "(args.output_folder, 'sample.png')\n", (4734, 4768), False, 'import os\n'), ((1185, 1203), 'numpy.stack', 'np.stack', (['masks', '(0)'], {}), '(masks, 0)\n', (1193, 1203), True, 'import numpy as np\n'), ((1693, 1723), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (1710, 1723), True, 'import numpy as np\n'), ((2460, 2470), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2467, 2470), True, 'import numpy as np\n')] |
import importlib
import sys
import typing
import numpy as np
import pandas as pd
import tornado.gen
import tornado.httpserver
import tornado.ioloop
import tornado.web
from pysyncobj import SyncObjConsumer, replicated
from pysyncobj.batteries import ReplList
from tornado.routing import Router
from pushpy.batteries import ReplEventDict, ReplVersionedDict, ReplTaskManager
from pushpy.code_store import load_src, CodeStoreLoader
from pushpy.push_manager import PushManager
from pushpy.task_manager import TaskManager
class ReplTimeseries(SyncObjConsumer):
def __init__(self, on_append=None):
self.__on_append = on_append
super(ReplTimeseries, self).__init__()
self.__data = dict()
self.__index_data = list()
@replicated
def reset(self):
self.__data = dict()
self.__index_data = list()
@replicated
def append(self, idx_data, keys, data):
self.__index_data.append(idx_data)
for key, key_data in zip(keys, data):
col = self.__data.get(key)
if col is None:
col = list()
self.__data[key] = col
key_data = key_data if isinstance(key_data, list) else [key_data]
col.append(key_data)
if self.__on_append is not None:
self.__on_append(idx_data, keys, data)
def flatten(self, keys=None):
keys = keys or list(self.__data.keys())
df = pd.DataFrame(columns=keys, index=self.__index_data)
for key in keys:
df[key] = np.concatenate(self.__data[key])
return df
class Handle404(tornado.web.RequestHandler):
def get(self):
self.set_status(404)
self.write('404 Not Found')
# https://stackoverflow.com/questions/47970574/tornado-routing-to-a-base-handler
class MyRouter(Router):
def __init__(self, store, app, prefix=None):
self.store = store
self.app = app
self.prefix = prefix or '/web'
def find_handler(self, request, **kwargs):
host = request.headers.get("Host")
try:
handler = load_src(self.store, f"{self.prefix}/{host}{request.path}") or Handle404
except Exception as e:
import traceback
traceback.print_exc()
print(e)
handler = Handle404
return self.app.get_handler_delegate(request, handler)
def make_app(kvstore):
return MyRouter(kvstore, tornado.web.Application())
# TODO: this could be a replicated command ReplLambda / ReplCommand that runs on all hosts
class DoRegister:
def __init__(self, store):
self.store = store
def apply(self, name, src):
src = load_src(self.store, src)
q = src()
PushManager.register(name, callable=lambda l=q: l)
def main() -> (typing.List[object], typing.Dict[str, object]):
repl_code_store = ReplVersionedDict()
tm = TaskManager(repl_code_store)
repl_ver_store = ReplVersionedDict()
repl_kvstore = ReplEventDict(on_set=tm.on_event_handler("process_kv_updates"))
repl_ts = ReplTimeseries(on_append=tm.on_event_handler("process_ts_updates"))
repl_strategies = ReplList()
repl_task_manager = ReplTaskManager(repl_kvstore, tm)
# the code store will be directly used for imports, where the keys are the resolvable package names
finder = CodeStoreLoader.install(repl_code_store)
def invalidate_caches(head):
print(f"reloading push modules: head={head}")
finder.invalidate_caches()
repl_packages = set(finder.cache_store.keys())
# TODO: reloading modules that may run against modules that are still old has to have problems at some point
# do we just flush them out of sys.modules and reload on demand?
for key in list(sys.modules.keys()):
pkg = key.split(".")[0]
if pkg in repl_packages:
importlib.reload(sys.modules[key])
print(f"reloading module: {key}")
repl_code_store.on_head_change = invalidate_caches
boot_globals = dict()
boot_globals['repl_kvstore'] = repl_kvstore
boot_globals['repl_ver_store'] = repl_ver_store
boot_globals['repl_code_store'] = repl_code_store
boot_globals['repl_tasks'] = repl_task_manager
boot_globals['local_tasks'] = tm
boot_globals['local_register'] = DoRegister(repl_code_store)
boot_globals['repl_ts'] = repl_ts
boot_globals['repl_strategies'] = repl_strategies
tm.start_event_handlers()
return boot_globals, make_app(repl_code_store)
| [
"pushpy.batteries.ReplTaskManager",
"pushpy.code_store.CodeStoreLoader.install",
"pandas.DataFrame",
"pushpy.push_manager.PushManager.register",
"sys.modules.keys",
"pushpy.task_manager.TaskManager",
"numpy.concatenate",
"importlib.reload",
"pysyncobj.batteries.ReplList",
"traceback.print_exc",
... | [((2857, 2876), 'pushpy.batteries.ReplVersionedDict', 'ReplVersionedDict', ([], {}), '()\n', (2874, 2876), False, 'from pushpy.batteries import ReplEventDict, ReplVersionedDict, ReplTaskManager\n'), ((2886, 2914), 'pushpy.task_manager.TaskManager', 'TaskManager', (['repl_code_store'], {}), '(repl_code_store)\n', (2897, 2914), False, 'from pushpy.task_manager import TaskManager\n'), ((2936, 2955), 'pushpy.batteries.ReplVersionedDict', 'ReplVersionedDict', ([], {}), '()\n', (2953, 2955), False, 'from pushpy.batteries import ReplEventDict, ReplVersionedDict, ReplTaskManager\n'), ((3143, 3153), 'pysyncobj.batteries.ReplList', 'ReplList', ([], {}), '()\n', (3151, 3153), False, 'from pysyncobj.batteries import ReplList\n'), ((3178, 3211), 'pushpy.batteries.ReplTaskManager', 'ReplTaskManager', (['repl_kvstore', 'tm'], {}), '(repl_kvstore, tm)\n', (3193, 3211), False, 'from pushpy.batteries import ReplEventDict, ReplVersionedDict, ReplTaskManager\n'), ((3330, 3370), 'pushpy.code_store.CodeStoreLoader.install', 'CodeStoreLoader.install', (['repl_code_store'], {}), '(repl_code_store)\n', (3353, 3370), False, 'from pushpy.code_store import load_src, CodeStoreLoader\n'), ((1433, 1484), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'keys', 'index': 'self.__index_data'}), '(columns=keys, index=self.__index_data)\n', (1445, 1484), True, 'import pandas as pd\n'), ((2667, 2692), 'pushpy.code_store.load_src', 'load_src', (['self.store', 'src'], {}), '(self.store, src)\n', (2675, 2692), False, 'from pushpy.code_store import load_src, CodeStoreLoader\n'), ((2719, 2769), 'pushpy.push_manager.PushManager.register', 'PushManager.register', (['name'], {'callable': '(lambda l=q: l)'}), '(name, callable=lambda l=q: l)\n', (2739, 2769), False, 'from pushpy.push_manager import PushManager\n'), ((1532, 1564), 'numpy.concatenate', 'np.concatenate', (['self.__data[key]'], {}), '(self.__data[key])\n', (1546, 1564), True, 'import numpy as np\n'), ((3769, 3787), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (3785, 3787), False, 'import sys\n'), ((2086, 2145), 'pushpy.code_store.load_src', 'load_src', (['self.store', 'f"""{self.prefix}/{host}{request.path}"""'], {}), "(self.store, f'{self.prefix}/{host}{request.path}')\n", (2094, 2145), False, 'from pushpy.code_store import load_src, CodeStoreLoader\n'), ((2231, 2252), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2250, 2252), False, 'import traceback\n'), ((3879, 3913), 'importlib.reload', 'importlib.reload', (['sys.modules[key]'], {}), '(sys.modules[key])\n', (3895, 3913), False, 'import importlib\n')] |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import collections as cl
import pandas as pd
from .crop import Crop
from .scenario import Scenario
import json
from .util import *
class District():
def __init__(self, df, name, key, scenario_file = 'baseline'):
self.T = len(df)
self.starting_year = df.index.year[0]
self.number_years = df.index.year[-1]-df.index.year[0]
self.key = key
self.name = name
self.leap = leap(np.arange(min(df.index.year), max(df.index.year) + 2))
year_list = np.arange(min(df.index.year), max(df.index.year) + 2)
self.days_in_month = days_in_month(year_list, self.leap)
self.dowy_eom = dowy_eom(year_list, self.leap)
self.non_leap_year = first_non_leap_year(self.dowy_eom)
self.turnback_use = True
for k, v in json.load(open('cord/districts/%s_properties.json' % key)).items():
setattr(self, k, v)
if ((scenario_file == 'baseline') == False):
for k,v in json.load(open(scenario_file)).items():
setattr(self,k,v)
#intialize crop acreages and et demands for crops
self.irrdemand = Crop(self.zone)
#initialize dictionary to hold different delivery types
self.deliveries = {}
self.contract_list_all = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern','kings']
self.non_contract_delivery_list = ['recover_banked','inleiu_irrigation','inleiu_recharge','leiupumping','recharged','exchanged_GW','exchanged_SW','undelivered_trades']
for x in self.contract_list_all:
#normal contract deliveries
self.deliveries[x] = np.zeros(self.number_years)
#uncontrolled deliveries from contract
self.deliveries[x + '_flood'] = np.zeros(self.number_years)
# contract-specific recharge (same as 'recharged', but disaggregated)
self.deliveries[x + '_recharged'] = np.zeros(self.number_years)
#deliveries from a groundwater bank (reocrded by banking partner recieving recovery water)
self.deliveries[x+ '_flood_irrigation'] = np.zeros(self.number_years)
#deliveries from a groundwater bank (reocrded by banking partner recieving recovery water)
self.deliveries['recover_banked'] = np.zeros(self.number_years)
#deliveries to a in-leiu bank from a banking partner (recorded by the district acting as a bank)
self.deliveries['inleiu_irrigation'] = np.zeros(self.number_years)
self.deliveries['inleiu_recharge'] = np.zeros(self.number_years)
#deliveries from an in leiu bank to a banking partner (recorded by the district acting as a bank)
self.deliveries['leiupumping'] = np.zeros(self.number_years)
#water delivered from a contract to a recharge basin (direct or in leiu, recorded by the banking partner who owned the water)
self.deliveries['recharged'] = np.zeros(self.number_years)
#deliveries made from a districts bank to third-party district (district recieves a surface water 'paper' credit)
self.deliveries['exchanged_GW'] = np.zeros(self.number_years)
#recorded when a district recieves water from a bank owned by another district (district gives a surface water 'paper' credit)
self.deliveries['exchanged_SW'] = np.zeros(self.number_years)
self.deliveries['undelivered_trades'] = np.zeros(self.number_years)
#set dictionaries to keep track of different 'color' water for each contract
self.current_balance = {}#contract water currently available in surface water storage
self.paper_balance = {}#balance (positive) or negative of paper trades made from groundwater banks
self.turnback_pool = {}#water purchased from intra-contract marketes (June 1st)
self.projected_supply = {}#projected annual allocation to each contract
self.carryover = {}#water 'carried over' in surface water storage from previous year's contract
self.recharge_carryover = {}#amount of water that the district wants to request contract deliveries for recharge
self.delivery_carryover = {}#amount of water to deliver immediately becuase of surface storage spillage
self.contract_carryover_list = {}#maximum carryover storage on contract
self.carryover['tot'] = 0.0
self.projected_supply['tot'] = 0.0
#initialize values for all contracts in dictionaries
for y in self.contract_list_all:
self.current_balance[y] = 0.0
self.paper_balance[y] = 0.0
self.turnback_pool[y] = 0.0
self.projected_supply[y] = 0.0
self.carryover[y] = 0.0
self.recharge_carryover[y] = 0.0
self.delivery_carryover[y] = 0.0
self.contract_carryover_list[y] = 0.0
#initialize dictionaries to 'store' daily state variables (for export to csv)
self.daily_supplies = {}
supply_list = ['paper', 'carryover', 'allocation', 'delivery', 'flood_irrigation', 'leiu_applied', 'leiu_recharged', 'banked', 'pumping', 'leiu_delivered', 'recharge_delivery', 'recharge_uncontrolled']
for x in supply_list:
self.daily_supplies[x] = np.zeros(self.T)
#initialize dictionaries to 'store' annual change in state variables (for export to csv)
self.annual_supplies = {}
supply_list = ['delivery', 'flood_irrigation', 'leiu_applied','leiu_recharged', 'leiu_delivered', 'banked_accepted']
for x in supply_list:
self.annual_supplies[x] = np.zeros(self.number_years)
# hold all output
self.daily_supplies_full = {}
# delivery_list = ['tableA', 'cvpdelta', 'exchange', 'cvc', 'friant1', 'friant2','kaweah', 'tule', 'kern']
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'] = np.zeros(self.T)
self.daily_supplies_full[x + '_flood'] = np.zeros(self.T)
self.daily_supplies_full[x + '_flood_irrigation'] = np.zeros(self.T)
self.daily_supplies_full[x + '_recharged'] = np.zeros(self.T)
self.daily_supplies_full[x + '_projected'] = np.zeros(self.T)
self.daily_supplies_full[x + '_paper'] = np.zeros(self.T)
self.daily_supplies_full[x + '_carryover'] = np.zeros(self.T)
self.daily_supplies_full[x + '_turnback'] = np.zeros(self.T)
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x] = np.zeros(self.T)
for x in ['recover_banked', 'inleiu_irrigation', 'inleiu_recharge', 'leiupumping', 'recharged', 'exchanged_GW', 'exchanged_SW', 'pumping', 'irr_demand']:
self.daily_supplies_full[x] = np.zeros(self.T)
# ['recover_banked', 'inleiu', 'leiupumping', 'recharged', 'exchanged_GW', 'exchanged_SW', 'undelivered_trades']
#Initialize demands
self.annualdemand = 0.0
self.dailydemand = 0.0
#recovery and pumping variables
#self.recovery_fraction = 0.5
self.annual_pumping = 0.0
self.use_recharge = 0.0
self.use_recovery = 0.0
self.extra_leiu_recovery = 0.0
self.max_recovery = 0.0
self.max_leiu_exchange = 0.0
self.direct_recovery_delivery = 0.0
#for in-district recharge & counters (for keeping track of how long a basin has been continuously 'wet'
self.recharge_rate = self.in_district_direct_recharge*cfs_tafd
self.thismonthuse = 0
self.monthusecounter = 0
self.monthemptycounter = 0
self.current_recharge_storage = 0.0
self.private_fraction = 0.0
self.has_private = False
self.has_pesticide = False
self.has_pmp = False
#banking dictionaries to keep track of individual member use & accounts
if self.in_leiu_banking:
self.recovery_use = {}
self.inleiubanked = {}
self.contract_exchange = {}
self.leiu_additional_supplies = {}
self.bank_deliveries = {}
self.tot_leiu_recovery_use = 0.0
self.direct_storage = {}
self.bank_timeseries = {}
self.annual_timeseries = {}
self.recharge_rate_series = np.zeros(self.T)
self.use_recovery = 0.0
self.leiu_trade_cap = 0.5
for x in self.participant_list:
self.recovery_use[x] = 0.0
self.inleiubanked[x] = 0.0
self.leiu_additional_supplies[x] = 0.0
self.bank_deliveries[x] = 0.0
self.direct_storage[x] = 0.0
self.bank_timeseries[x] = np.zeros(self.T)
self.annual_timeseries[x] = np.zeros(self.T)
self.contract_exchange[x] = np.zeros(self.T)
def object_equals(self, other):
##This function compares two instances of an object, returns True if all attributes are identical.
equality = {}
if (self.__dict__.keys() != other.__dict__.keys()):
return ('Different Attributes')
else:
differences = 0
for i in self.__dict__.keys():
if type(self.__getattribute__(i)) is dict:
equality[i] = True
for j in self.__getattribute__(i).keys():
if (type(self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) is bool):
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]) == False):
equality[i] = False
differences += 1
else:
if ((self.__getattribute__(i)[j] == other.__getattribute__(i)[j]).all() == False):
equality[i] = False
differences += 1
else:
if (type(self.__getattribute__(i) == other.__getattribute__(i)) is bool):
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i))
if equality[i] == False:
differences += 1
else:
equality[i] = (self.__getattribute__(i) == other.__getattribute__(i)).all()
if equality[i] == False:
differences += 1
return (differences == 0)
##################################SENSITIVITY ANALYSIS#################################################################
def set_sensitivity_factors(self, et_factor, acreage_factor, irr_eff_factor, recharge_decline_factor):
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
for wyt in wyt_list:
for i,v in enumerate(self.crop_list):
self.acreage[wyt][i] = self.acreage[wyt][i]*acreage_factor
for monthloop in range(0,12):
self.irrdemand.etM[v][wyt][monthloop] = self.irrdemand.etM[v][wyt][monthloop]*et_factor
self.seepage = 1.0 + irr_eff_factor
for recharge_count in range(0, len(self.recharge_decline)):
self.recharge_decline[recharge_count] = 1.0 - recharge_decline_factor*(1.0 - self.recharge_decline[recharge_count])
#####################################################################################################################
##################################DEMAND CALCULATION#################################################################
#####################################################################################################################
def find_baseline_demands(self,wateryear):
self.monthlydemand = {}
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
for wyt in wyt_list:
self.monthlydemand[wyt] = np.zeros(12)
for monthloop in range(0,12):
self.monthlydemand[wyt][monthloop] += self.urban_profile[monthloop]*self.MDD/self.days_in_month[self.non_leap_year][monthloop]
if self.has_pesticide:
for i,v in enumerate(self.acreage_by_year):
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*(self.acreage_by_year[v][wateryear]-self.private_acreage[v][wateryear])/(12.0*self.days_in_month[self.non_leap_year][monthloop])
elif self.has_pmp:
for crop in self.pmp_acreage:
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[crop][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*max(self.pmp_acreage[crop]-self.private_acreage[crop], 0.0)/(12.0*self.days_in_month[self.non_leap_year][monthloop])
else:
for i,v in enumerate(self.crop_list):
self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] - self.irrdemand.etM['precip'][wyt][monthloop],0.0)*(self.acreage[wyt][i]-self.private_acreage[v])/(12.0*self.days_in_month[self.non_leap_year][monthloop])
#self.monthlydemand[wyt][monthloop] += max(self.irrdemand.etM[v][wyt][monthloop] ,0.0)*self.acreage[wyt][i]/(12.0*self.days_in_month[self.non_leap_year][monthloop])
def calc_demand(self, wateryear, year, da, m, m1, wyt):
#from the monthlydemand dictionary (calculated at the beginning of each wateryear based on ag acreage and urban demands), calculate the daily demand and the remaining annual demand
monthday = self.days_in_month[year][m-1]
self.dailydemand = self.monthlydemand[wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[wyt][m1-1]*da/monthday
if self.dailydemand < 0.0:
self.dailydemand = 0.0
#calculate that days 'starting' demand (b/c demand is filled @multiple times, and if we only want to fill a certain fraction of that demand (based on projections of supply & demand for the rest of the year), we want to base that fraction on that day's total demand, not the demand left after other deliveries are made
self.dailydemand_start = self.monthlydemand[wyt][m-1]*(monthday-da)/monthday + self.monthlydemand[wyt][m1-1]*da/monthday
#pro-rate this month's demand based on the day of the month when calculating remaining annual demand
self.annualdemand = max(self.monthlydemand[wyt][m-1]*(monthday-da), 0.0)
self.irrseasondemand = 0.0
for irrseason in range(6,9):
self.irrseasondemand += max(self.monthlydemand[wyt][irrseason]*self.days_in_month[year][irrseason], 0.0)
if m > 9:
for monthloop in range(m, 12):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year][monthloop],0.0)
for monthloop in range(0,9):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year+1][monthloop], 0.0)
else:
for monthloop in range(m, 9):
self.annualdemand += max(self.monthlydemand[wyt][monthloop]*self.days_in_month[year][monthloop], 0.0)
def find_pre_flood_demand(self, wyt, year):
#calculates an estimate for water use in the Oct-Dec period (for use in recharge_carryover calculations), happens Oct 1
self.pre_flood_demand = self.monthlydemand[wyt][9]*self.days_in_month[year][9] + self.monthlydemand[wyt][10]*self.days_in_month[year][10] + self.monthlydemand[wyt][11]*self.days_in_month[year][11]
def get_urban_demand(self, t, m, da, wateryear, year, sri, dowy, total_delta_pumping):
#this function finds demands for the 'branch pumping' urban nodes - Socal, South Bay, & Central Coast
#demand is equal to pumping of the main california aqueduct and into the branches that services these areas
#cal aqueduct urban demand comes from pumping data, calc seperately
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
self.dailydemand = self.pumping[t]/1000.0
self.dailydemand_start = self.dailydemand
##Keep track of ytd pumping to Cal Aqueduct Branches
self.ytd_pumping[wateryear] += self.dailydemand
sri_estimate = (sri*self.delivery_percent_coefficient[dowy][0] + self.delivery_percent_coefficient[dowy][1])*total_delta_pumping*frac_to_district
self.annualdemand = max(0.0, (self.annual_pumping[wateryear]*dowy + sri_estimate*(364.0 - dowy))/364.0 - self.ytd_pumping[wateryear])
if m == 10 and da == 1:
start_of_month = 0
cross_counter_y = 0
###Divide aqueduct branch pumping into 'monthly demands'
for monthloop in range(0,12):
monthcounter = monthloop + 9
if monthcounter > 11:
monthcounter -= 12
cross_counter_y = 1
start_next_month = self.dowy_eom[year+cross_counter_y][monthcounter] + 1
for wyt in ['W', 'AN', 'BN', 'D', 'C']:
self.monthlydemand[wyt][monthcounter] = np.mean(self.pumping[(t + start_of_month):(t + start_next_month)])/1000.0
start_of_month = start_next_month
def set_pmp_acreage(self, water_constraint_by_source, land_constraint, x0):
self.acreage_by_pmp_crop_type = self.irrdemand.find_pmp_acreage(water_constraint_by_source,land_constraint, x0)
self.pmp_acreage = {}
i = 0
for crop in self.irrdemand.crop_list:
district_crops = self.irrdemand.crop_keys[crop]
if district_crops in self.pmp_acreage:
self.pmp_acreage[district_crops] += self.acreage_by_pmp_crop_type[i]/1000.0
else:
self.pmp_acreage[district_crops] = self.acreage_by_pmp_crop_type[i]/1000.0
i += 1
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################PROJECT CONTRACTS#################################################################
#####################################################################################################################
def update_balance(self, t, wateryear, water_available, projected_allocation, current_water, key, tot_carryover, balance_type):
###This function takes input from the contract class (water_available, projected_allocation, tot_carryover) to determine how much of their allocation remains
##water_available is the current contract storage in the reservoir *plus* all deliveries from the given year. The deliveries, paper trades, and turnback pool accounts for each district
##are used to get a running tally of the surface water that is currently available to them. (tot_carryover is subtracted from the current balance - districts only get access to *their*
##carryover storage - which is added to their individual current balance (self.carryover[key])
##projected_allocation is the water that is forecasted to be available on each contract through the end of the water year *plus* water that has already been delivered on that contract
##individual deliveries are then subtracted from this total to determine the individual district's projected contract allocation remaining in that year
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
if balance_type == 'contract':
#district_storage - district's water that is currently available (in storage at reservoir)
#(water_available - tot_carryover)*self.project_contract[key] - individual district share of the existing (in storage) contract balance, this includes contract water that has already been delivered to all contractors
#self.deliveries[key][wateryear] - individual district deliveries (how much of 'their' contract has already been delivered)
#self.carryover[key] - individual district share of contract carryover
#paper_balance[key] - keeps track of 'paper' groundwater trades (negative means they have accepted GW deliveries in exchange for trading some of their water stored in reservoir, positive means they sent their banked GW to another district in exchage for SW storage
#turnback_pool[key] - how much water was bought/sold on the turnback pool(negative is sold, positive is bought)
district_storage = (water_available-tot_carryover)*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
#annual allocation - remaining (undelivered) district share of expected total contract allocation
#same as above, but projected_allocation*self.project_contract[key] - individual share of expected total contract allocation, this includes contract water that has already been delivered to all contractors
annual_allocation = projected_allocation*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
storage_balance = current_water*self.project_contract[key]*frac_to_district + max(self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key] - self.deliveries[key][wateryear], 0.0)
elif balance_type == 'right':
#same as above, but for contracts that are expressed as 'rights' instead of allocations
district_storage = (water_available-tot_carryover)*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
annual_allocation = projected_allocation*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
storage_balance = current_water*self.rights[key]['capacity']*frac_to_district + max(self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key] - self.deliveries[key][wateryear], 0.0)
self.current_balance[key] = max(min(storage_balance,annual_allocation), 0.0)
self.projected_supply[key] = max(annual_allocation,0.0)
if key == 'xxx' or key == 'xxx':
if self.rights[key]['capacity'] > 0.0:
#print(wateryear, end = " ")
#print(t, end = " ")
#print(self.key, end = " ")
#print(key, end = " ")
#print("%.2f" % projected_allocation, end = " ")
#print("%.2f" % annual_allocation, end = " ")
#print("%.2f" % frac_to_district, end = " ")
#print("%.2f" % current_water, end = " ")
#print("%.2f" % tot_carryover, end = " ")
#print("%.2f" % self.deliveries[key][wateryear], end = " ")
#print("%.2f" % self.carryover[key], end = " ")
#print("%.2f" % self.paper_balance[key], end = " ")
#print("%.2f" % self.turnback_pool[key], end = " ")
#print("%.2f" % self.current_balance[key], end = " ")
#print("%.2f" % self.projected_supply[key], end = " ")
#print("%.2f" % self.annualdemand, end = " ")
#print("%.2f" % self.dailydemand, end = " ")
#print("%.2f" % self.recharge_carryover[key], end = " ")
print("%.2f" % self.use_recovery)
elif key == 'xxx' or key == 'xxx':
if self.project_contract[key] > 0.0:
#print(wateryear, end = " ")
#print(t, end = " ")
#print(self.key, end = " ")
#print(key, end = " ")
#print("%.2f" % projected_allocation, end = " ")
#print("%.2f" % annual_allocation, end = " ")
#print("%.2f" % frac_to_district, end = " ")
#print("%.2f" % current_water, end = " ")
#print("%.2f" % tot_carryover, end = " ")
#print("%.2f" % self.deliveries[key][wateryear], end = " ")
#print("%.2f" % self.carryover[key], end = " ")
#print("%.2f" % self.paper_balance[key], end = " ")
#print("%.2f" % self.turnback_pool[key], end = " ")
#print("%.2f" % self.current_balance[key], end = " ")
#print("%.2f" % self.projected_supply[key], end = " ")
#print("%.2f" % self.annualdemand, end = " ")
#print("%.2f" % self.dailydemand, end = " ")
#print("%.2f" % self.recharge_carryover[key], end = " ")
print("%.2f" % self.use_recovery)
return max(self.projected_supply[key] - self.annualdemand, 0.0) , max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)
def calc_carryover(self, existing_balance, wateryear, balance_type, key):
#at the end of each wateryear, we tally up the full allocation to the contract, how much was used (and moved around in other balances - carryover, 'paper balance' and turnback_pools) to figure out how much each district can 'carryover' to the next year
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
if balance_type == 'contract':
annual_allocation = existing_balance*self.project_contract[key]*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
max_carryover = self.contract_carryover_list[key]
elif balance_type == 'right':
annual_allocation = existing_balance*self.rights[key]['capacity']*frac_to_district - self.deliveries[key][wateryear] + self.carryover[key] + self.paper_balance[key] + self.turnback_pool[key]
max_carryover = self.contract_carryover_list[key]
reallocated_water = max(annual_allocation - max_carryover, 0.0)
self.carryover[key] = min(max_carryover, annual_allocation)
self.paper_balance[key] = 0.0
self.turnback_pool[key] = 0.0
return reallocated_water, self.carryover[key]
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECHARGE/RECOVERY TRIGGERS#########################################################
#####################################################################################################################
def open_recovery(self,t, dowy, wateryear):
#this function determines if a district wants to recover banked water
#based on their demands and existing supplies
total_balance = 0.0
total_recovery = (366-dowy)*self.max_recovery + self.extra_leiu_recovery
existing_carryover = 0.0
for key in self.contract_list:
total_balance += self.projected_supply[key]
existing_carryover += max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)
total_needs = self.annualdemand*self.seepage*self.surface_water_sa*self.recovery_fraction
if (total_balance + total_recovery) < total_needs:
if existing_carryover > 0.0:
self.use_recovery = 0.0
else:
self.use_recovery = 1.0
else:
self.use_recovery = 0.0
self.min_direct_recovery = max(self.annualdemand - total_balance,0.0)/(366-dowy)
def open_recharge(self,t,m,da,wateryear,year,numdays_fillup, numdays_fillup2, contract_carryover, key, wyt, reachable_turnouts, additional_carryover, contract_allocation):
#for a given contract owned by the district (key), how much recharge can they expect to be able to use
#before the reservoir associated w/ that contract fills to the point where it needs to begin spilling water
#(numdays_fillup) - i.e., how much surface water storage can we keep before start losing it
#self.recharge_carryover is the district variable that shows how much 'excess' allocation there is on a particular
#contract - i.e., how much of the allocation will not be able to be recharged before the reservoir spills
total_recharge = 0.0
total_recharge2 = 0.0
carryover_storage_proj = 0.0
spill_release_carryover = 0.0
is_reachable = 0
for x in reachable_turnouts:
for y in self.turnout_list:
if y == x:
is_reachable = 1
break
if is_reachable == 1:
break
if is_reachable == 0:
service_area_adjust = 0.0
else:
service_area_adjust = 1.0
adjusted_sw_sa = self.surface_water_sa*service_area_adjust
if numdays_fillup < 365.0:
#total_recharge_available = 0.0
#for y in self.contract_list:
#total_recharge_available += self.projected_supply[y]
#total_recharge_available -= self.annualdemand*adjusted_sw_sa*self.seepage
###Find projected recharge available to district
#if total_recharge_available > 0.0:
#total_recharge_capacity = (self.max_direct_recharge[0] + self.max_leiu_recharge[m])*(self.days_in_month[year][m]-da)
##calculate both direct & in leiu recharge available to the district through the end of this water year
#if m < 8:
#for future_month in range(m+1,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#elif m > 8:
#for future_month in range(m+1,12):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#for future_month in range(0,9):
#total_recharge_capacity += self.max_direct_recharge[future_month - m]*self.days_in_month[year+1][future_month] + self.max_leiu_recharge[future_month]*self.days_in_month[year][future_month]
#else:
#total_recharge_capacity = 0.0
#spill_release_carryover = max(total_recharge_available - total_recharge_capacity - additional_carryover, 0.0)
##how many days remain before the reservoir fills?
days_left = numdays_fillup
days_left2 = numdays_fillup2
#tabulate how much water can be recharged between now & reservoir fillup (current month)
this_month_recharge = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left)
this_month_recharge2 = (self.max_direct_recharge[0] + self.max_leiu_recharge[0])*min(self.days_in_month[year][m] - da,days_left2)
total_recharge += this_month_recharge
total_recharge2 += this_month_recharge2
#days before fillup remaining after current month
days_left -= (self.days_in_month[year][m] - da)
days_left2 -= (self.days_in_month[year][m] - da)
###if days_left remains positive (i.e., reservoir fills after the end of the current month)
###loop through future months to determine how much water can be recharged before reservoir fills
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left)
total_recharge += this_month_recharge
days_left -= self.days_in_month[year+next_year_counter][m+monthcounter]
###Uses the projected supply calculation to determine when to recharge water. There are a number of conditions under which a
###district will recharge water. Projected allocations are compared to total demand, recharge capacity, and the probability of
###surface water storage spilling carryover water. If any of these conditions triggers recharge, the district will release water
##for recharge
monthcounter = 0
monthcounter_loop = 0
next_year_counter = 0
while (monthcounter + monthcounter_loop) < 11 and days_left2 > 0.0:
monthcounter += 1
if (monthcounter + m) > 11:
monthcounter -= 12
monthcounter_loop = 12
next_year_counter = 1
# continue to tabulate how much water can be recharged between now & reservoir fillup (future months)
this_month_recharge2 = (self.max_direct_recharge[monthcounter+monthcounter_loop] + self.max_leiu_recharge[monthcounter+monthcounter_loop])*min(self.days_in_month[year+next_year_counter][m+monthcounter],days_left2)
total_recharge2 += this_month_recharge2
days_left2 -= self.days_in_month[year+next_year_counter][m+monthcounter]
###Uses the projected supply calculation to determine when to recharge water. There are a number of conditions under which a
###district will recharge water. Projected allocations are compared to total demand, recharge capacity, and the probability of
###surface water storage spilling carryover water. If any of these conditions triggers recharge, the district will release water
##for recharge
#carryover_storage_proj = max(self.projected_supply[key] - self.annualdemand*adjusted_sw_sa - total_recharge*service_area_adjust - self.contract_carryover_list[key]*adjusted_sw_sa, 0.0)
spill_release_carryover = 0.0
for y in self.contract_list:
spill_release_carryover += max(self.projected_supply[y] - self.carryover_rights[y], 0.0)
spill_release_carryover -= (self.annualdemand*adjusted_sw_sa + total_recharge2*service_area_adjust + self.demand_days['lookahead'][key])
spill_release_carryover = max(spill_release_carryover, 0.0)
carryover_storage_proj = 0.0
for y in self.contract_list:
carryover_storage_proj += max(self.carryover[y] - self.deliveries[y][wateryear] - self.carryover_rights[y], 0.0)
carryover_storage_proj -= (total_recharge*service_area_adjust + self.demand_days['current'][key])
carryover_storage_proj = max(carryover_storage_proj, 0.0)
#carryover_release_proj = min(carryover_storage_proj, max(total_recharge_available - total_recharge_capacity,0.0))
#carryover_release_current = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge_carryover, 0.0)
#if contract_carryover > 0.0:
#spill_release_carryover = max(self.carryover[key] - self.deliveries[key][wateryear] - total_recharge, 0.0)
#else:
#spill_release_carryover = max(self.projected_supply[key] - self.annualdemand*adjusted_sw_sa - total_recharge*service_area_adjust - self.contract_carryover_list[key]*adjusted_sw_sa, 0.0)
##The amount of recharge a district wants is then saved and sent to the canal class where it 'looks' for an available spot to recharge the water
#self.recharge_carryover[key] = max(carryover_release_proj, carryover_release_current, spill_release_carryover, spill_release_storage)
if spill_release_carryover > carryover_storage_proj:
total_available_for_recharge = 0.0
for y in self.contract_list:
total_available_for_recharge += max(self.projected_supply[y], 0.0)
if total_available_for_recharge > 0.0:
self.recharge_carryover[key] = max(spill_release_carryover, 0.0)*max(self.projected_supply[key], 0.0)/total_available_for_recharge
else:
self.recharge_carryover[key] = 0.0
else:
total_available_for_recharge = 0.0
for y in self.contract_list:
total_available_for_recharge += max(self.carryover[y] - self.deliveries[y][wateryear], 0.0)
if total_available_for_recharge > 0.0:
self.recharge_carryover[key] = max(carryover_storage_proj, 0.0)*max(self.carryover[key] - self.deliveries[key][wateryear], 0.0)/total_available_for_recharge
else:
self.recharge_carryover[key] = 0.0
#if contract_allocation == 0:
#self.recharge_carryover[key] = max(self.recharge_carryover[key], self.projected_supply[key] - total_recharge*service_area_adjust - self.demand_days['current'][key], 0.0)
if key == 'xxx' or key == 'xxx' or key == 'xxx' or key == 'xxx':
#print(carryover_storage_proj, end = " ")
#print(spill_release_carryover, end = " ")
#print(total_recharge, end = " ")
#print(self.demand_days['current'][key], end = " ")
#print(total_recharge2, end = " ")
#print(self.demand_days['lookahead'][key], end = " ")
#print(total_available_for_recharge, end = " ")
print(self.recharge_carryover[key])
##Similar conditions also calculate the amount of regular tableA deliveries for direct irrigation to request
else:
self.delivery_carryover[key] = 0.0
self.recharge_carryover[key] = 0.0
def get_urban_recovery_target(self, pumping, project_contract, wateryear, dowy, year, wyt, demand_days, t, start_month):
max_pumping_shortfall = 0.0
pumping_shortfall = 0.0
if self.has_private:
if self.has_pesticide:
frac_to_district = 1.0 - self.private_fraction[wateryear]
else:
frac_to_district = 1.0 - self.private_fraction
else:
frac_to_district = 1.0
monthcounter = start_month
daycounter = 0
tot_days = 0
if demand_days > 365.0:
max_pumping_shortfall = 9999.9
else:
while tot_days < demand_days:
pumping_shortfall += np.sum(self.pumping[(t-dowy+tot_days):(t-dowy+tot_days+min(demand_days -tot_days, 30))]/1000.0) - pumping['swp']['gains'][monthcounter]*project_contract*frac_to_district
tot_days += 30
monthcounter += 1
if monthcounter == 12:
monthcounter = 0
max_pumping_shortfall = max(pumping_shortfall, max_pumping_shortfall)
return max(max_pumping_shortfall, 0.0)
def set_turnback_pool(self, key, year):
##This function creates the 'turnback pool' (note: only for SWP contracts now, can be used for others)
##finding contractors with 'extra' contract water that they would like to sell, and contractors who would
##like to purchase that water.
self.turnback_sales = 0.0
self.turnback_purchases = 0.0
total_recharge_ability = 0.0
total_projected_supply = 0.0
for y in self.contract_list:
total_projected_supply += self.projected_supply[y]
for month_count in range(0, 4):
# total recharge Jun,Jul,Aug,Sep
total_recharge_ability += self.max_direct_recharge[month_count]*self.days_in_month[year][month_count + 5]
if total_projected_supply > 0.0:
contract_fraction = max(min(self.projected_supply[key]/total_projected_supply, 1.0), 0.0)
else:
contract_fraction = 0.0
#districts sell water if their projected contracts are greater than their remaining annual demand, plus their remaining recharge capacity in this water year, plus their recharge capacity in the next water year (through January)
self.turnback_sales = max(self.projected_supply[key] - self.carryover_rights[key] - (self.annualdemand + total_recharge_ability + self.pre_flood_demand)*contract_fraction, 0.0)
if self.in_leiu_banking:
self.turnback_purchases = 0.0
else:
##districts buy turnback water if their annual demands are greater than their projected supply plus their capacity to recover banked groundwater
self.turnback_purchases = max(self.annualdemand*contract_fraction + self.carryover_rights[key] - self.projected_supply[key] - self.max_recovery*122*contract_fraction, 0.0)
return self.turnback_sales, self.turnback_purchases
def make_turnback_purchases(self, turnback_sellers, turnback_buyers, key):
#once we know how much water is in the 'selling' pool and the 'buying' pool, we can determine the total turnback pool - min(buying,selling), then
#determine what % of each request is filled (i.e., if the selling pool is only 1/2 of the buying pool, then buyers only get 1/2 of their request, or visa versa
if min(turnback_sellers, turnback_buyers) > 0.0:
sellers_frac = -1*min(turnback_sellers, turnback_buyers)/turnback_sellers
buyers_frac = min(turnback_sellers, turnback_buyers)/turnback_buyers
total_projected_supply = 0.0
for y in self.contract_list:
#the buying/selling fractiosn are applied to the same calculations above (about buying or selling needs), and then turnback pools are added/subtracted to the districts contract
total_projected_supply += self.projected_supply[y]
if self.turnback_sales > 0.0:
self.turnback_pool[key] = max(self.turnback_sales, 0.0)*sellers_frac
elif self.turnback_purchases > 0.0:
if self.in_leiu_banking:
self.turnback_pool[key] = 0.0
else:
self.turnback_pool[key] = max(self.turnback_purchases, 0.0)*buyers_frac
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################DETERMINE DELIVERIES ON CANAL######################################################
#####################################################################################################################
def find_node_demand(self,contract_list, search_type, partial_demand_toggle, toggle_recharge):
#this function is used to calculate the current demand at each 'district' node
access_mult = self.surface_water_sa*self.seepage#this accounts for water seepage & the total district area that can be reached by SW canals (seepage is >= 1.0; surface_water_sa <= 1.0)
total_projected_allocation = 0.0
private_add = 0.0
if self.has_private:
for xx in self.private_demand:
private_add += min(self.private_demand[xx], self.private_delivery[xx])
for y in contract_list:
total_projected_allocation += max(self.projected_supply[y.name], 0.0)#projected allocation
#percentage of demand filled in the day is equal to the total projected allocation as a percent of annual demand
#(i.e., if allocations are projected to be 1/2 of annual demand, then they try to fill 50% of daily irrigation demands with surface water
if self.annualdemand*access_mult > 0.0 and partial_demand_toggle == 1:
if self.must_fill == 1:
#pumping to urban branches of the Cal Aqueduct is 'must fill', (i.e., demand is always met)
total_demand_met = 1.0
else:
if self.annualdemand*access_mult > 0.0:
#total_demand_met = min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
total_demand_met = 1.0
else:
total_demand_met = 0.0
#total_demand_met = min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
#elif self.annualdemand*access_mult > 0.0:
#total_demand_met = 1.0 - min(max(total_projected_allocation/(self.annualdemand*access_mult), 0.0), 1.0)
else:
total_demand_met = 1.0
#self.dailydemand_start is the initial daily district demand (self.dailydemand is updated as deliveries are made) - we try to fill the total_demand_met fraction of dailydemand_start, or what remains of demand in self.dailydemand, whichever is smaller
if search_type == 'flood':
if self.annualdemand > 0.0 and total_projected_allocation > self.annualdemand:
#demand_constraint = (1.0 - min(total_projected_allocation/self.annualdemand, 1.0))*max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
else:
demand_constraint = max(min(self.dailydemand_start*access_mult*total_demand_met, self.dailydemand*access_mult),0.0)
#if we want to include recharge demands in the demand calculations, add available recharge space
if toggle_recharge == 1:
demand_constraint += max(self.in_district_storage - self.current_recharge_storage, 0.0)
return demand_constraint + private_add
def find_node_output(self):
#this function calculates the total recovery capacity that is contained in each district node
#(i.e. in leiu banks)
if self.in_leiu_banking:
current_recovery_use = 0.0
for x in self.recovery_use:
current_recovery_use += self.recovery_use[x]
output_constraint = self.leiu_recovery - current_recovery_use
else:
output_constraint = 0.0
return output_constraint
def find_leiu_output(self, contract_list, ownership, member_name, wateryear):
member_constraint = 0.0
total_contract = np.zeros(len(self.contract_list))
if self.in_leiu_banking:
bank_counter = 0
for bank_contracts in self.contract_list:
for exchange_contracts in contract_list:
if bank_contracts == exchange_contracts.name:
#member_constraint += max(min(self.current_balance[bank_contracts]*ownership, self.projected_supply[bank_contracts]*ownership, (self.projected_supply[bank_contracts] - self.paper_balance[bank_contracts])*ownership - self.contract_exchange[member_name][wateryear]), 0.0)
#total_contract[bank_counter] += max(min(self.current_balance[bank_contracts]*ownership, self.projected_supply[bank_contracts]*ownership, (self.projected_supply[bank_contracts] - self.paper_balance[bank_contracts])*ownership - self.contract_exchange[member_name][wateryear]), 0.0)
member_constraint += max(min(self.current_balance[bank_contracts], self.projected_supply[bank_contracts]), 0.0)
total_contract[bank_counter] += max(min(self.current_balance[bank_contracts], self.projected_supply[bank_contracts]), 0.0)
bank_counter += 1
if member_constraint > 0.0:
for bank_contract_counter in range(0, len(total_contract)):
total_contract[bank_contract_counter] = total_contract[bank_contract_counter]/member_constraint
return member_constraint, total_contract
def set_request_constraints(self, demand, search_type, contract_list, bank_space, bank_capacity, dowy, wateryear):
#this function is used to determine if a district node 'wants' to make a request
#under the different usage types (flood, delievery, banking, or recovery) under a given contract
#(contract_list)
self.projected_supply['tot'] = 0.0
total_recharge = 0.0
for y in self.contract_list:
self.projected_supply['tot'] += self.projected_supply[y]
total_recharge += self.recharge_carryover[y]
#for flood deliveries, a district requests water if they don't have
#excess contract water that they don't think they can recharge (i.e. they don't purchase
#flood water if they can't use all their contract water
if search_type == "flood":
if self.projected_supply['tot'] > self.annualdemand:
return demand
else:
return demand
#for y in contract_list:
#tot_recharge += self.delivery_carryover[y.name]
#if tot_recharge <= 0.0:
#return demand
#else:
#return 0.0
#for normal irrigation deliveries, a district requests water if they have enough water currently
#in surface water storage under the given contract
if search_type == "delivery":
private_add = 0.0
if self.has_private:
for xx in self.private_demand:
private_add += min(self.private_demand[xx], self.private_delivery[xx])
total_current_balance = 0.0
total_projected_supply = 0.0
total_carryover = 0.0
friant_toggle = 0
delta_toggle = 0
for y in contract_list:
total_current_balance += max(self.current_balance[y.name], 0.0)
total_projected_supply += max(self.projected_supply[y.name], 0.0)
total_carryover += max(self.carryover[y.name] - self.deliveries[y.name][wateryear], 0.0)
if self.project_contract['cvpdelta'] > 0.0 or self.project_contract['exchange'] > 0.0:
delta_toggle = 1
if self.seasonal_connection == 1:
if self.must_fill == 1:
return max(min(demand, total_current_balance), 0.0) + private_add
elif total_carryover > 0.0 or total_projected_supply > self.annualdemand:
return max(min(demand, total_current_balance), 0.0) + private_add
elif delta_toggle == 1:
return max(min(demand, total_current_balance, total_projected_supply), 0.0) + private_add
#elif dowy < 273:
#if total_projected_supply > self.irrseasondemand:
#demand_fraction = min(max((total_projected_supply - self.irrseasondemand)/(self.annualdemand - self.irrseasondemand), 0.0), 1.0)
#return max(min(demand_fraction*demand,total_current_balance), 0.0) + private_add
#if self.annualdemand > 0.0:
#return max(min(demand*min(total_projected_supply/self.annualdemand, 1.0),total_current_balance), 0.0) + private_add
#else:
#return max(min(demand,total_current_balance), 0.0) + private_add
else:
conservative_estimate = max(min((dowy- 211.0)/(273.0 - 211.0), 1.0), 0.0)
if self.annualdemand > 0.0:
return max(min(demand*min(conservative_estimate*total_projected_supply/self.annualdemand, 1.0),total_current_balance), 0.0) + private_add
else:
return max(min(demand,total_current_balance), 0.0) + private_add
else:
return private_add
#for banking, a district requests water if they have enough contract water currently in surface water storage and they have 'excess' water for banking (calculated in self.open_recharge)
if search_type == "banking":
total_carryover_recharge = 0.0
total_current_balance = 0.0
for y in contract_list:
total_carryover_recharge += max(self.recharge_carryover[y.name], 0.0)
total_current_balance += max(self.current_balance[y.name], 0.0)
return min(total_carryover_recharge, total_current_balance, max(bank_capacity - bank_space, 0.0))
#for recovery, a district requests recovery water from a bank if they have contracts under the current contract being searched (i.e., so they aren't requesting water that will be sent to another district that can't make 'paper' trades with them) and if they have their 'recovery threshold' triggered (self.use_recovery, calculated in self.open_recovery)
if search_type == "recovery":
member_trades = 0
for member_contracts in self.contract_list:
for exchange_contracts in contract_list:
if member_contracts == exchange_contracts.name:
member_trades = 1
if member_trades == 1:
if self.use_recovery == 1.0:
total_request = min(max(self.dailydemand*self.surface_water_sa*self.seepage, 0.0), max(bank_space, 0.0))
else:
total_request = 0.0
else:
total_request = 0.0
return total_request
def set_demand_priority(self, priority_list, contract_list, demand, delivery, demand_constraint, search_type, contract_canal):
#this function takes a the calculated demand at each district node and classifies those demands by 'priority' - the priority classes and rules change for each delivery type
demand_dict = {}
#for flood deliveries, the priority structure is based on if you have a contract with the reservoir that is being spilled, if you have a turnout on a canal that is a 'priority canal' for the spilling reservoir, and then finally if you are not on a priority canal for spilling
if search_type == 'flood':
contractor_toggle = 0
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
for y in contract_list:#contracts that are being spilled (b/c they are held at the spilling reservoir)
for yx in self.contract_list:
if y.name == yx:
contractor_toggle = 1
if contractor_toggle == 1:
demand_dict['contractor'] = max(min(demand,delivery), 0.0)
demand_dict['alternate'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['contractor'])
demand_dict['turnout'] = 0.0
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = max(min(demand,delivery), 0.0)
demand_dict['excess'] = 0.0
else:
demand_dict['contractor'] = 0.0
demand_dict['alternate'] = 0.0
demand_dict['turnout'] = 0.0
demand_dict['excess'] = max(min(demand,delivery), 0.0)
#irrigation deliveries have only one type of priority (the contract that is currently being deliveried)
elif search_type == 'delivery':
demand_dict[contract_canal] = max(min(demand,delivery), 0.0)
#in-leiu banks have demands that are either priority (capacity that the district has direct ownership over) or secondary (excess capacity that isn't being used by the owner)
elif search_type == 'banking':
priority_toggle = 0
for yy in priority_list:#canals that have 'priority' from the given reservoir
if yy.name == contract_canal:#current canal
priority_toggle = 1
if priority_toggle == 1:
demand_dict['priority'] = max(min(demand,delivery), 0.0)
demand_dict['secondary'] = min(delivery - max(min(demand,delivery),0.0),demand_constraint-demand_dict['priority'])
else:
demand_dict['priority'] = 0.0
demand_dict['secondary'] = max(min(delivery, demand_constraint), 0.0)
#recovery is the same priority structure as banking, but we use different names (initial & supplemental) to keep things straight)
elif search_type == 'recovery':
if self.in_leiu_banking:
demand_dict['initial'] = max(min(demand,delivery), 0.0)
demand_dict['supplemental'] = min(delivery - max(min(demand,delivery), 0.0), demand_constraint - demand_dict['initial'])
else:
demand_dict['initial'] = 0.0
demand_dict['supplemental'] = 0.0
return demand_dict
def find_leiu_priority_space(self, demand_constraint, num_members, member_name, toggle_recharge, search_type):
#this function finds how much 'priority' space in the recharge/recovery capacity is owned by a member (member_name) in a given in-leiu bank (i.e. this function is attached to the district that owns the bank - and the banking member is represented by 'member_name' input variable)
if search_type == "recovery":
priority_space = max(min(self.leiu_recovery*self.leiu_ownership[member_name] - self.recovery_use[member_name], demand_constraint), 0.0)
available_banked = self.inleiubanked[member_name]
return min(priority_space, available_banked)
else:
initial_capacity = self.dailydemand_start*self.surface_water_sa*self.seepage
if toggle_recharge == 1:
initial_capacity += self.in_district_storage
priority_space = max(min((self.leiu_ownership[member_name]*initial_capacity - self.bank_deliveries[member_name]), demand_constraint)/num_members, 0.0)
return priority_space
def set_deliveries(self, priorities,type_fractions,type_list,search_type,toggle_district_recharge,member_name, wateryear):
#this function takes the deliveries, seperated by priority, and updates the district's daily demand and/or recharge storage
final_deliveries = 0.0
total_direct_deliveries = 0.0
total_recharge_deliveries = 0.0
for zz in type_list:
total_deliveries = priorities[zz]*type_fractions[zz]
final_deliveries += total_deliveries
if self.has_private:
private = 0.0
for xx in self.private_demand:
private += min(self.private_demand[xx], self.private_delivery[xx])
if search_type == 'flood':
total_recharge_deliveries = min(max(final_deliveries - private, 0.0), self.in_district_storage - self.current_recharge_storage)
total_direct_deliveries = min(max(final_deliveries - private - total_recharge_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
else:
total_direct_deliveries = min(max(final_deliveries - private, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
if toggle_district_recharge == 1:
total_recharge_deliveries = min(max((final_deliveries - private)/self.seepage - total_direct_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
else:
total_recharge_deliveries = 0.0
self.dailydemand -= total_direct_deliveries
self.current_recharge_storage += total_recharge_deliveries
#final_deliveries += total_recharge_deliveries
else:
if search_type == 'flood':
total_recharge_deliveries = min(max(final_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
total_direct_deliveries = min(max(final_deliveries - total_recharge_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
else:
total_direct_deliveries = min(max(final_deliveries, 0.0)/self.seepage, self.dailydemand*self.surface_water_sa)
if toggle_district_recharge == 1:
total_recharge_deliveries = min(max((final_deliveries)/self.seepage - total_direct_deliveries, 0.0), self.in_district_storage - self.current_recharge_storage)
else:
total_recharge_deliveries = 0.0
self.dailydemand -= total_direct_deliveries
self.current_recharge_storage += total_recharge_deliveries
return total_direct_deliveries, total_recharge_deliveries, final_deliveries - total_direct_deliveries - total_recharge_deliveries
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################ADJUSST ACCOUNTS AFTER DELIVERY####################################################
#####################################################################################################################
def give_paper_trade(self, trade_amount, contract_list, wateryear, district_name):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
if self.seepage > 0.0:
total_alloc = 0.0
for y in contract_list:
total_alloc += self.projected_supply[y.name]
actual_delivery = min(trade_amount, total_alloc, self.dailydemand*self.seepage*self.surface_water_sa)
self.dailydemand -= actual_delivery/self.seepage
if total_alloc > 0.0:
for y in contract_list:
self.paper_balance[y.name] -= actual_delivery*self.projected_supply[y.name]/total_alloc
self.deliveries['exchanged_SW'][wateryear] += actual_delivery
return actual_delivery
def give_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear, district_name):
#this function accepts a delivery of recovered groundwater, and makes a 'paper'
#trade, giving up a surface water contract allocation (contract_list) to the district
#that owned the groundwater that was recovered
contract_counter = 0
for y in contract_list:
self.paper_balance[y] -= trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries['exchanged_SW'][wateryear] += trade_amount
def get_paper_trade(self, trade_amount, contract_list, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
for y in contract_list:
total_alloc += self.projected_supply[y.name]
if total_alloc > 0.0:
for y in contract_list:
self.paper_balance[y.name] += trade_amount*self.projected_supply[y.name]/total_alloc
else:
contract_frac = 1.0
for y in contract_list:
self.paper_balance[y.name] += trade_amount*contract_frac
contract_frac = 0.0
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def get_paper_exchange(self, trade_amount, contract_list, trade_frac, wateryear):
#this function takes a 'paper' credit on a contract and allocates it to a district
#the paper credit is in exchange for delivering recovered groundwater to another party (district)
total_alloc = 0.0
contract_frac = 0.0
contract_counter = 0
for y in contract_list:
self.paper_balance[y] += trade_amount*trade_frac[contract_counter]
contract_counter += 1
self.deliveries['exchanged_GW'][wateryear] += trade_amount
def record_direct_delivery(self, delivery, wateryear):
actual_delivery = min(delivery, self.dailydemand*self.seepage*self.surface_water_sa)
self.deliveries['recover_banked'][wateryear] += actual_delivery
self.dailydemand -= actual_delivery/(self.seepage*self.surface_water_sa)
self.direct_recovery_delivery = 0.0
return actual_delivery
def direct_delivery_bank(self, delivery, wateryear):
#this function takes a delivery of recoverd groundwater and applies it to irrigation demand in a district
#the recovered groundwater is delivered to the district that originally owned the water, so no 'paper' trade is needed
actual_delivery = min(delivery, self.dailydemand*self.seepage*self.surface_water_sa - self.direct_recovery_delivery)
#self.deliveries['recover_banked'][wateryear] += actual_delivery
self.direct_recovery_delivery += actual_delivery
#self.dailydemand -= actual_delivery/self.seepage*self.surface_water_sa
return actual_delivery
def adjust_accounts(self, direct_deliveries, recharge_deliveries, contract_list, search_type, wateryear):
#this function accepts water under a specific condition (flood, irrigation delivery, banking), and
#adjusts the proper accounting balances
total_carryover_recharge = 0.0
total_current_balance = 0.0
delivery_by_contract = {}
for y in contract_list:
if search_type == 'flood':
total_current_balance += 1.0
elif search_type == 'delivery':
total_current_balance += max(self.projected_supply[y.name], 0.0)
elif search_type == 'banking':
total_current_balance += max(self.recharge_carryover[y.name], 0.0)
elif search_type == 'recovery':
total_current_balance += max(self.current_balance[y.name], 0.0)
delivery_by_contract[y.name] = 0.0
flood_counter = 0
for y in contract_list:
#find the percentage of total deliveries that come from each contract
if search_type == 'flood':
if flood_counter == 0:
contract_deliveries = (direct_deliveries + recharge_deliveries)
flood_counter = 1
else:
contract_deliveries = 0.0
elif total_current_balance > 0.0:
if search_type == 'delivery':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.projected_supply[y.name], 0.0)/total_current_balance
elif search_type == 'banking':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.recharge_carryover[y.name], 0.0)/total_current_balance
elif search_type == 'recovery':
contract_deliveries = (direct_deliveries + recharge_deliveries)*max(self.current_balance[y.name], 0.0)/total_current_balance
else:
contract_deliveries = 0.0
delivery_by_contract[y.name] = contract_deliveries
#flood deliveries do not count against a district's contract allocation, so the deliveries are recorded as 'flood'
if search_type == "flood":
if contract_deliveries > 0.0:
self.deliveries[y.name + '_flood'][wateryear] += recharge_deliveries
self.deliveries[y.name + '_flood_irrigation'][wateryear] += direct_deliveries
else:
#irrigation/banking deliveries are recorded under the contract name so they are included in the
#contract balance calculations
#update the individual district accounts
self.deliveries[y.name][wateryear] += contract_deliveries
self.current_balance[y.name] -= contract_deliveries
if search_type == 'banking':
#if deliveries ar for banking, update banking accounts
self.deliveries['recharged'][wateryear] += contract_deliveries
self.deliveries[y.name+'_recharged'][wateryear] += contract_deliveries
self.recharge_carryover[y.name] -= min(contract_deliveries, self.recharge_carryover[y.name])
return delivery_by_contract
def adjust_bank_accounts(self, member_name, direct_deliveries, recharge_deliveries, wateryear):
#when deliveries are made for banking, keep track of the member's individual accounts
self.bank_deliveries[member_name] += direct_deliveries + recharge_deliveries#keeps track of how much of the capacity is being used in the current timestep
self.deliveries['inleiu_irrigation'][wateryear] += direct_deliveries#if deliveries being made 'inleiu', then count as inleiu deliveries
self.deliveries['inleiu_recharge'][wateryear] += recharge_deliveries#if deliveries being made 'inleiu', then count as inleiu deliveries
self.inleiubanked[member_name] += (direct_deliveries + recharge_deliveries) * self.inleiuhaircut#this is the running account of the member's banking storage
def adjust_recovery(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
self.recovery_use[member_name] += deliveries#keeps track of how much of the capacity is being used in the current timestep
def adjust_exchange(self, deliveries, member_name, wateryear):
#if recovery deliveries are made, adjust the banking accounts and account for the recovery capacity use
self.inleiubanked[member_name] -= deliveries#this is the running account of the member's banking storage
self.deliveries['leiupumping'][wateryear] += deliveries
self.contract_exchange[member_name][wateryear] += deliveries
def absorb_storage(self):
#water delivered to a bank as 'storage' (on the surface) is 'absorbed', clearing up storage space for the next timestep
#also triggers self.thismonthuse, which keeps track of how many conecutive months a recharge bank is used (and the effect on the recharge rate of the spreading pool)
if self.in_leiu_banking:
if self.current_recharge_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.current_recharge_storage > 0.0:
absorb_fraction = min(self.in_district_direct_recharge*cfs_tafd/self.current_recharge_storage,1.0)
for x in self.participant_list:
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
else:
if self.current_recharge_storage > self.recharge_rate*0.75:
self.thismonthuse = 1
if self.current_recharge_storage > 0.0:
absorb_fraction = min(self.recharge_rate/self.current_recharge_storage,1.0)
self.current_recharge_storage -= self.current_recharge_storage*absorb_fraction
self.current_recharge_storage = max(self.current_recharge_storage, 0.0)
#####################################################################################################################
#####################################################################################################################
#####################################################################################################################
##################################RECORD STATE VARIABLES###############################################################
#####################################################################################################################
def reset_recharge_recovery(self):
self.max_direct_recharge = np.zeros(12)
self.max_leiu_recharge = np.zeros(12)
self.total_banked_storage = 0.0
self.max_leiu_exchange = 0.0
def accounting_full(self, t, wateryear):
# keep track of all contract amounts
for x in self.contract_list_all:
self.daily_supplies_full[x + '_delivery'][t] = self.deliveries[x][wateryear]
self.daily_supplies_full[x + '_flood'][t] = self.deliveries[x + '_flood'][wateryear]
self.daily_supplies_full[x + '_flood_irrigation'][t] = self.deliveries[x + '_flood_irrigation'][wateryear]
self.daily_supplies_full[x + '_recharged'][t] = self.deliveries[x + '_recharged'][wateryear]
self.daily_supplies_full[x + '_projected'][t] = self.projected_supply[x]
self.daily_supplies_full[x + '_paper'][t] = self.paper_balance[x]
self.daily_supplies_full[x + '_carryover'][t] = self.carryover[x]
self.daily_supplies_full[x + '_turnback'][t] = self.turnback_pool[x]
for x in self.non_contract_delivery_list:
self.daily_supplies_full[x][t] = self.deliveries[x][wateryear]
self.daily_supplies_full['pumping'][t] = self.annual_private_pumping
self.daily_supplies_full['irr_demand'][t] = self.dailydemand_start
self.daily_supplies_full['recover_banked'][t] = self.deliveries['recover_banked'][wateryear]
self.daily_supplies_full['inleiu_irrigation'][t] = self.deliveries['inleiu_irrigation'][wateryear]
self.daily_supplies_full['inleiu_recharge'][t] = self.deliveries['inleiu_recharge'][wateryear]
self.daily_supplies_full['leiupumping'][t] = self.deliveries['leiupumping'][wateryear]
self.daily_supplies_full['recharged'][t] = self.deliveries['recharged'][wateryear]
self.daily_supplies_full['exchanged_GW'][t] = self.deliveries['exchanged_GW'][wateryear]
self.daily_supplies_full['exchanged_SW'][t] = self.deliveries['exchanged_SW'][wateryear]
def accounting(self,t, da, m, wateryear,key):
#takes delivery/allocation values and builds timeseries that show what water was used for (recharge, banking, irrigation etc...)
#delivery/allocation data are set cumulatively - so that values will 'stack' in a area plot.
#Allocations are positive (stack above the x-axis in a plot)
self.daily_supplies['paper'][t] += self.projected_supply[key]
self.daily_supplies['carryover'][t] += max(self.projected_supply[key] - self.paper_balance[key], 0.0)
self.daily_supplies['allocation'][t] += max(self.projected_supply[key] - self.paper_balance[key] - self.carryover[key], 0.0)
#while deliveries are negative (stack below the x-axis in a plot) - the stacking adjustments come in self.accounting_banking_activity()
self.daily_supplies['delivery'][t] -= self.deliveries[key][wateryear]
self.daily_supplies['flood_irrigation'][t] -= (self.deliveries[key][wateryear] + self.deliveries[key + '_flood_irrigation'][wateryear])
self.daily_supplies['recharge_uncontrolled'][t] -= self.deliveries[key + '_flood'][wateryear]
if m == 9 and da == 30:
self.annual_supplies['delivery'][wateryear] += self.deliveries[key][wateryear]
self.annual_supplies['flood_irrigation'][wateryear] += self.deliveries[key + '_flood_irrigation'][wateryear]
self.deliveries['undelivered_trades'][wateryear] += max(self.paper_balance[key] - self.deliveries[key][wateryear], 0.0)
def accounting_banking_activity(self, t, da, m, wateryear):
#this is an adjustment for 'delivery' (the delivery values are negative, so adding 'recharged' and 'exchanged_GW' is removing them from the count for 'deliveries' - we only want deliveries for irrigation, not for recharge
#exchanged_GW is GW that has been pumped out of a bank and 'delivered' to another district. the district gets credit in the reservoir, and deliveries of SW from that reservoir are recorded as 'deliveries' - but we don't want to count that here
#exchanged_SW is GW that has been pumped out of a bank, not owned by the district, and delivered to that district (i.e., the other side of the exchanged_GW in a GW exchange). This should technically count as an irrigation delivery from a contract
#(we want to record that as delivery here) but it doesn't get recorded that way upon delivery. so we add it back here when were are recording accounts (i.e. exchanged_GW and exchanged_SW are counters to help us square the records from GW exchanges)
self.daily_supplies['delivery'][t] += self.deliveries['recharged'][wateryear] + self.deliveries['exchanged_GW'][wateryear] - self.deliveries['exchanged_SW'][wateryear]
self.daily_supplies['flood_irrigation'][t] += self.deliveries['recharged'][wateryear] + self.deliveries['exchanged_GW'][wateryear] - self.deliveries['exchanged_SW'][wateryear]
#leiu accepted are irrigation deliveries that come from the in-leiu banking district's banking partners (i.e., they use it, and record a 'balance' for whoever delivered it)
self.daily_supplies['leiu_applied'][t] += self.daily_supplies['flood_irrigation'][t] - self.deliveries['inleiu_irrigation'][wateryear]
self.daily_supplies['leiu_recharged'][t] += self.daily_supplies['leiu_applied'][t] - self.deliveries['inleiu_recharge'][wateryear]
#banked is uncontrolled (or flood) water that has been banked by a district (in-district)
self.daily_supplies['banked'][t] += self.daily_supplies['leiu_recharged'][t] + self.deliveries['exchanged_SW'][wateryear] - self.deliveries['exchanged_GW'][wateryear] - self.deliveries['recover_banked'][wateryear]
##pumping is private pumping for irrigation
self.daily_supplies['pumping'][t] += self.daily_supplies['banked'][t] - self.annual_private_pumping
##leiu_delivered is water from an in-leiu banking district that gets delivered to (recovered by) their banking partners
self.daily_supplies['leiu_delivered'][t] += self.daily_supplies['pumping'][t] - self.deliveries['leiupumping'][wateryear]
#recharge delivery is water recharged at a bank that comes from the district's contract amount (instead of flood/uncontrolled water)
self.daily_supplies['recharge_delivery'][t] += self.daily_supplies['leiu_delivered'][t] - self.deliveries['recharged'][wateryear]
#recharge uncontrolled is recharge water from flood flows (flood flows added in self.accounting() - this is only adjustment for stacked plot)
self.daily_supplies['recharge_uncontrolled'][t] += self.daily_supplies['recharge_delivery'][t]
if m == 9 and da == 30:
self.annual_supplies['delivery'][wateryear] += self.deliveries['exchanged_SW'][wateryear] - self.deliveries['recharged'][wateryear] - (self.deliveries['exchanged_GW'][wateryear] - self.deliveries['undelivered_trades'][wateryear])
recharged_recovery = 0.0
if self.annual_supplies['delivery'][wateryear] < 0.0:
recharged_recovery = self.annual_supplies['delivery'][wateryear]
self.annual_supplies['delivery'][wateryear] = 0.0
self.annual_supplies['banked_accepted'][wateryear] = self.deliveries['recover_banked'][wateryear] + (self.deliveries['exchanged_GW'][wateryear] - self.deliveries['undelivered_trades'][wateryear]) - self.deliveries['exchanged_SW'][wateryear] + recharged_recovery
self.annual_supplies['leiu_applied'][wateryear] = self.deliveries['inleiu_irrigation'][wateryear]
self.annual_supplies['leiu_recharged'][wateryear] = self.deliveries['inleiu_recharge'][wateryear]
self.annual_supplies['leiu_delivered'][wateryear] = self.deliveries['leiupumping'][wateryear]
def accounting_leiubank(self,t, m, da, wateryear):
#takes banked storage (in in-leiu banks) and builds timeseries of member accounts
stacked_amount = 0.0
self.recharge_rate_series[t] = self.recharge_rate
for x in self.participant_list:
self.bank_timeseries[x][t] = self.inleiubanked[x] + stacked_amount
stacked_amount += self.inleiubanked[x]
if m == 9 and da == 30:
for x in self.participant_list:
sum_total = 0.0
for year_counter in range(0, wateryear):
sum_total += self.annual_timeseries[x][year_counter]
self.annual_timeseries[x][wateryear] = self.inleiubanked[x] - sum_total
def accounting_as_df(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies[n], index = index)
return df
def accounting_as_df_full(self, index):
#wirte district accounts and deliveries into a data fram
df = pd.DataFrame()
for n in self.daily_supplies_full:
df['%s_%s' % (self.key,n)] = pd.Series(self.daily_supplies_full[n], index = index)
return df
def annual_results_as_df(self):
#wite annual district deliveries into a data frame
df = pd.DataFrame()
for n in self.annual_supplies:
df['%s_%s' % (self.key,n)] = pd.Series(self.annual_supplies[n])
return df
def bank_as_df(self, index):
#write leiubanking accounts (plus bank recharge rates) into a dataframe
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.bank_timeseries[n], index = index)
df['%s_rate' % self.key] = pd.Series(self.recharge_rate_series, index = index)
return df
def annual_bank_as_df(self):
#write anmual banking changes into a data frame
df = pd.DataFrame()
for n in self.participant_list:
df['%s_%s_leiu' % (self.key,n)] = pd.Series(self.annual_timeseries[n])
return df
def get_iterable(self, x):
if isinstance(x, cl.Iterable):
return x
else:
return (x,)
| [
"pandas.DataFrame",
"numpy.mean",
"numpy.zeros",
"pandas.Series"
] | [((2213, 2240), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2221, 2240), True, 'import numpy as np\n'), ((2382, 2409), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2390, 2409), True, 'import numpy as np\n'), ((2451, 2478), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2459, 2478), True, 'import numpy as np\n'), ((2616, 2643), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2624, 2643), True, 'import numpy as np\n'), ((2806, 2833), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2814, 2833), True, 'import numpy as np\n'), ((2990, 3017), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2998, 3017), True, 'import numpy as np\n'), ((3187, 3214), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (3195, 3214), True, 'import numpy as np\n'), ((3259, 3286), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (3267, 3286), True, 'import numpy as np\n'), ((68069, 68081), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (68077, 68081), True, 'import numpy as np\n'), ((68111, 68123), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (68119, 68123), True, 'import numpy as np\n'), ((76276, 76290), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (76288, 76290), True, 'import pandas as pd\n'), ((76540, 76554), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (76552, 76554), True, 'import pandas as pd\n'), ((76796, 76810), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (76808, 76810), True, 'import pandas as pd\n'), ((77047, 77061), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (77059, 77061), True, 'import pandas as pd\n'), ((77219, 77268), 'pandas.Series', 'pd.Series', (['self.recharge_rate_series'], {'index': 'index'}), '(self.recharge_rate_series, index=index)\n', (77228, 77268), True, 'import pandas as pd\n'), ((77379, 77393), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (77391, 77393), True, 'import pandas as pd\n'), ((1626, 1653), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (1634, 1653), True, 'import numpy as np\n'), ((1736, 1763), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (1744, 1763), True, 'import numpy as np\n'), ((1882, 1909), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (1890, 1909), True, 'import numpy as np\n'), ((2053, 2080), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (2061, 2080), True, 'import numpy as np\n'), ((4960, 4976), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (4968, 4976), True, 'import numpy as np\n'), ((5280, 5307), 'numpy.zeros', 'np.zeros', (['self.number_years'], {}), '(self.number_years)\n', (5288, 5307), True, 'import numpy as np\n'), ((5563, 5579), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5571, 5579), True, 'import numpy as np\n'), ((5627, 5643), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5635, 5643), True, 'import numpy as np\n'), ((5702, 5718), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5710, 5718), True, 'import numpy as np\n'), ((5770, 5786), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5778, 5786), True, 'import numpy as np\n'), ((5838, 5854), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5846, 5854), True, 'import numpy as np\n'), ((5902, 5918), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5910, 5918), True, 'import numpy as np\n'), ((5970, 5986), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (5978, 5986), True, 'import numpy as np\n'), ((6037, 6053), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (6045, 6053), True, 'import numpy as np\n'), ((6136, 6152), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (6144, 6152), True, 'import numpy as np\n'), ((6347, 6363), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (6355, 6363), True, 'import numpy as np\n'), ((7720, 7736), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (7728, 7736), True, 'import numpy as np\n'), ((10834, 10846), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (10842, 10846), True, 'import numpy as np\n'), ((76364, 76410), 'pandas.Series', 'pd.Series', (['self.daily_supplies[n]'], {'index': 'index'}), '(self.daily_supplies[n], index=index)\n', (76373, 76410), True, 'import pandas as pd\n'), ((76629, 76680), 'pandas.Series', 'pd.Series', (['self.daily_supplies_full[n]'], {'index': 'index'}), '(self.daily_supplies_full[n], index=index)\n', (76638, 76680), True, 'import pandas as pd\n'), ((76881, 76915), 'pandas.Series', 'pd.Series', (['self.annual_supplies[n]'], {}), '(self.annual_supplies[n])\n', (76890, 76915), True, 'import pandas as pd\n'), ((77138, 77185), 'pandas.Series', 'pd.Series', (['self.bank_timeseries[n]'], {'index': 'index'}), '(self.bank_timeseries[n], index=index)\n', (77147, 77185), True, 'import pandas as pd\n'), ((77470, 77506), 'pandas.Series', 'pd.Series', (['self.annual_timeseries[n]'], {}), '(self.annual_timeseries[n])\n', (77479, 77506), True, 'import pandas as pd\n'), ((8063, 8079), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (8071, 8079), True, 'import numpy as np\n'), ((8116, 8132), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (8124, 8132), True, 'import numpy as np\n'), ((8169, 8185), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (8177, 8185), True, 'import numpy as np\n'), ((15889, 15951), 'numpy.mean', 'np.mean', (['self.pumping[t + start_of_month:t + start_next_month]'], {}), '(self.pumping[t + start_of_month:t + start_next_month])\n', (15896, 15951), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import logging
import swifter
import click
import gzip
import json
import os
from functools import partial, reduce
from src.models import ndcg_score
from collections import Counter
from datetime import datetime
def predict_simple(row:pd.Series, cols_domain:list,
cols_item:list, df_most_bought:pd.DataFrame,
available_domains:list,
most_bought_items:list)->list:
"""
No ordering on the domains/items.
It's important that `cols_item` and `cols_domain` are aligned.
i.e. if the first elem from `cols_item` is `last_viewed_1`
then the first elem from `cols_domain` should be `domain_id_last_viewed_1`.
"""
valid_domains = [d for d in row[cols_domain].unique()
if d in available_domains]
pred_list = list(set(reduce(lambda x, y: x + [row[y]]
if not np.isnan(row[y]) else x,
cols_item, [])))
# Interleave top 10 items from each viewed/searched domain
# and then flatten. We use this in order to recommend the
# top items from the viewed/searched domains.
top_items = [i
for items in
zip(*[df_most_bought.loc[c]
.head(10).index.values
for c in valid_domains])
for i in items]
num_missing_items = 10 - len(pred_list)
pred_list.extend(top_items[:num_missing_items])
# In case we have not reached 10 items in our recomendation
# list, we just return the top bought items overall.
num_missing_items = 10 - len(pred_list)
pred_list.extend(most_bought_items[:num_missing_items])
pred_list = [int(x) for x in pred_list]
return pred_list
def predict_vote(row:pd.Series, cols_domain:list,
cols_item:list, df_most_bought:pd.DataFrame,
available_domains:list,
most_bought_items:list)->list:
"""
No ordering on the domains/items;
With voting.
It's important that `cols_item` and `cols_domain` are aligned.
i.e. if the first elem from `cols_item` is `last_viewed_1`
then the first elem from `cols_domain` should be `domain_id_last_viewed_1`.
"""
valid_domains = [d for d in row[cols_domain]
if d in available_domains]
try:
top_domain = Counter(valid_domains).most_common(1)[0][0]
except IndexError as e:
top_domain = 'MLB-CELLPHONES'
pred_list = list(set(reduce(lambda x, y: x + [row[y]]
if not np.isnan(row[y]) else x,
cols_item, [])))
# Interleave top 10 items from each viewed/searched domain
# and then flatten. We use this in order to recommend the
# top items from the viewed/searched domains.
top_items = (df_most_bought.loc[top_domain]
.head(10).index.values)
num_missing_items = 10 - len(pred_list)
pred_list.extend(top_items[:num_missing_items])
# In case we have not reached 10 items in our recomendation
# list, we just return the top bought items overall.
num_missing_items = 10 - len(pred_list)
pred_list.extend(most_bought_items[:num_missing_items])
pred_list = [int(x) for x in pred_list]
return pred_list
def predict_ordered(row:pd.Series, cols_domain:list,
cols_item:list, df_most_bought:pd.DataFrame,
available_domains:list,
most_bought_items:list)->list:
"""
Order domain/items by domains with most sold items.
It's important that `cols_item` and `cols_domain` are aligned.
i.e. if the first elem from `cols_item` is `last_viewed_1`
then the first elem from `cols_domain` should be `domain_id_last_viewed_1`.
"""
valid_domains = [d for d in row[cols_domain].unique()
if d in available_domains]
num_bought_domain = [df_most_bought.loc[v,'index_sum'].values[0]
for v in valid_domains]
sorted_items = sorted(zip(row[cols_item],num_bought_domain),
key=lambda t: t[1], reverse=True)
pred_list = list(filter(lambda i: not np.isnan(i),
set([x[0] for x in sorted_items])))
# Interleave top 10 items from each viewed/searched domain
# and then flatten. We use this in order to recommend the
# top items from the viewed/searched domains.
sorted_domains = [d[0]
for d in
sorted(zip(valid_domains, num_bought_domain),
key=lambda t: t[1], reverse=True)]
top_items = [i
for items in
zip(*[df_most_bought.loc[c]
.head(10).index.values
for c in valid_domains])
for i in items]
num_missing_items = 10 - len(pred_list)
pred_list.extend(top_items[:num_missing_items])
# In case we have not reached 10 items in our recomendation
# list, we just return the top bought items overall.
num_missing_items = 10 - len(pred_list)
pred_list.extend(most_bought_items[:num_missing_items])
pred_list = [int(x) for x in pred_list]
return pred_list
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
@click.argument('prediction_policy', type=click.STRING, default='vote')
def make_prediction(input_filepath:str,
output_filepath:str,
prediction_policy:str):
logger = logging.getLogger(__name__)
logger.info('Loading data...')
train_filename = 'train_dataset_features.parquet'
test_filename = 'test_dataset_features.parquet'
cols_load = ['item_bought', 'domain_id_item_bought']
cols_feat_domain = []
cols_item = [f'{i}_viewed_item_{j}' for i in ['most','last'] for j in range(1,3)]
cols_load.extend(cols_item)
cols_item_domain = [f'domain_id_{c}' for c in cols_item]
cols_load.extend(cols_item_domain)
cols_feat_domain.extend(cols_item_domain)
cols_domain = [f'most_viewed_domain_{i}' for i in range(1,3)]
cols_load.extend(cols_domain)
cols_feat_domain.extend(cols_domain)
cols_ngram_domain = [f'domain_id_most_searched_ngram_{i}' for i in range(1,3)]
cols_load.extend(cols_ngram_domain)
cols_feat_domain.extend(cols_ngram_domain)
cols_searched_domain = [f'domain_id_last_searched_{i}' for i in range(1,3)]
cols_load.extend(cols_searched_domain)
cols_feat_domain.extend(cols_searched_domain)
cols_feat_domain.extend(['domain_id_forest'])
df_train = pd.read_parquet(os.path.join(input_filepath, train_filename),
columns=cols_load)
df_test = pd.read_parquet(os.path.join(input_filepath, test_filename),
columns=cols_load[2:]+['user_id', 'domain_id_forest'])
logger.info('Creating helper intermediate results...')
df_most_bought = (df_train[['domain_id_item_bought','item_bought']]
.reset_index()
.groupby(by=['domain_id_item_bought','item_bought'])
.count()
.sort_values(by=['domain_id_item_bought','index'], ascending=False))
# Add information about the number of items bought per domain
df_most_bought = df_most_bought.join(df_most_bought
.reset_index()[['domain_id_item_bought','index']]
.groupby(by='domain_id_item_bought')
.sum()
.sort_values(by='index', ascending=False),
how='left', rsuffix='_sum')
most_bought_items = [i[0]
for i in
(df_most_bought
.sort_values(by='index', ascending=False)
.head(10).values)]
available_domains = (df_most_bought
.reset_index()
['domain_id_item_bought']
.unique())
pred_dict = {'ordered': predict_ordered,
'simple': predict_simple,
'vote': predict_vote}
predict_ = partial(pred_dict[prediction_policy],
cols_domain=cols_feat_domain,
cols_item=cols_item,
df_most_bought=df_most_bought,
available_domains=available_domains,
most_bought_items=most_bought_items)
df_test = df_test.set_index('user_id').sort_index()
logger.info("Predicting with '%s' heuristic...", prediction_policy)
y_pred = df_test.swifter.apply(predict_, axis=1).values
df_y_pred = pd.DataFrame(list(y_pred))
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
submission_filename = f'{prediction_policy}_{now}.csv'
logger.info('Saving results...')
df_y_pred.to_csv(os.path.join(output_filepath, submission_filename),
index=False, header=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
make_prediction()
| [
"logging.getLogger",
"logging.basicConfig",
"click.argument",
"os.path.join",
"collections.Counter",
"datetime.datetime.now",
"functools.partial",
"click.Path",
"numpy.isnan",
"click.command"
] | [((5287, 5302), 'click.command', 'click.command', ([], {}), '()\n', (5300, 5302), False, 'import click\n'), ((5422, 5492), 'click.argument', 'click.argument', (['"""prediction_policy"""'], {'type': 'click.STRING', 'default': '"""vote"""'}), "('prediction_policy', type=click.STRING, default='vote')\n", (5436, 5492), False, 'import click\n'), ((5632, 5659), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5649, 5659), False, 'import logging\n'), ((8368, 8570), 'functools.partial', 'partial', (['pred_dict[prediction_policy]'], {'cols_domain': 'cols_feat_domain', 'cols_item': 'cols_item', 'df_most_bought': 'df_most_bought', 'available_domains': 'available_domains', 'most_bought_items': 'most_bought_items'}), '(pred_dict[prediction_policy], cols_domain=cols_feat_domain,\n cols_item=cols_item, df_most_bought=df_most_bought, available_domains=\n available_domains, most_bought_items=most_bought_items)\n', (8375, 8570), False, 'from functools import partial, reduce\n'), ((9285, 9340), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (9304, 9340), False, 'import logging\n'), ((6722, 6766), 'os.path.join', 'os.path.join', (['input_filepath', 'train_filename'], {}), '(input_filepath, train_filename)\n', (6734, 6766), False, 'import os\n'), ((6848, 6891), 'os.path.join', 'os.path.join', (['input_filepath', 'test_filename'], {}), '(input_filepath, test_filename)\n', (6860, 6891), False, 'import os\n'), ((9083, 9133), 'os.path.join', 'os.path.join', (['output_filepath', 'submission_filename'], {}), '(output_filepath, submission_filename)\n', (9095, 9133), False, 'import os\n'), ((5342, 5365), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (5352, 5365), False, 'import click\n'), ((5407, 5419), 'click.Path', 'click.Path', ([], {}), '()\n', (5417, 5419), False, 'import click\n'), ((8920, 8934), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8932, 8934), False, 'from datetime import datetime\n'), ((4230, 4241), 'numpy.isnan', 'np.isnan', (['i'], {}), '(i)\n', (4238, 4241), True, 'import numpy as np\n'), ((2403, 2425), 'collections.Counter', 'Counter', (['valid_domains'], {}), '(valid_domains)\n', (2410, 2425), False, 'from collections import Counter\n'), ((931, 947), 'numpy.isnan', 'np.isnan', (['row[y]'], {}), '(row[y])\n', (939, 947), True, 'import numpy as np\n'), ((2611, 2627), 'numpy.isnan', 'np.isnan', (['row[y]'], {}), '(row[y])\n', (2619, 2627), True, 'import numpy as np\n')] |
import argparse
import logging
import sys
# DGB
import glob
import os
import shutil
# end DGB
import numpy as np
import tensorflow as tf
from vad.training.estimator import VadEstimator
from vad.training.input_pipeline import FEAT_SIZE
def main():
parser = argparse.ArgumentParser(
description="export trained TensorFlow model for inference"
)
parser.add_argument(
"--model-dir", type=str, default="", help="pretrained model directory"
)
# DGB
parser.add_argument(
"--data-set", type=str, default="", help="name of data set"
)
# DGB -- not sure this is even used?
# parser.add_argument(
# "--ckpt", type=str, default="", help="pretrained checkpoint directory"
# )
parser.add_argument("--model", type=str, default="resnet1d", help="model name")
parser.add_argument("--n-filters", type=str, default="32-64-128")
parser.add_argument("--n-kernels", type=str, default="8-5-3")
parser.add_argument("--n-fc-units", type=str, default="2048-2048")
parser.add_argument(
"--n-classes", "-n", type=int, default=1, help="number of classes"
)
args = parser.parse_args()
assert args.model in ["resnet1d"], "Wrong model name"
assert len(args.n_filters.split("-")) == 3, "3 values required for --n-filters"
assert len(args.n_kernels.split("-")) == 3, "3 values required for --n-kernels"
assert len(args.n_fc_units.split("-")) == 2, "2 values required --n-fc-units"
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logger = logging.getLogger(__name__)
np.random.seed(0)
tf.set_random_seed(0)
tf.logging.set_verbosity(tf.logging.INFO)
args.model_dir = args.model_dir + args.data_set + "/" + args.model + "/" # DGB
save_dir = args.model_dir
np.random.seed(0)
tf.set_random_seed(0)
params = {
"model": args.model,
"n_classes": args.n_classes,
"n_cnn_filters": [int(x) for x in args.n_filters.split("-")],
"n_cnn_kernels": [int(x) for x in args.n_kernels.split("-")],
"n_fc_units": [int(x) for x in args.n_fc_units.split("-")],
}
train_config = tf.estimator.RunConfig(
save_summary_steps=10,
save_checkpoints_steps=1000,
keep_checkpoint_max=20,
log_step_count_steps=10,
)
estimator_obj = VadEstimator(params)
estimator = tf.estimator.Estimator(
model_fn=estimator_obj.model_fn,
model_dir=save_dir,
config=train_config,
params=params,
)
feature_spec = {
"features_input": tf.placeholder(
dtype=tf.float32, shape=[1, FEAT_SIZE[1], FEAT_SIZE[0]]
)
}
logger.info("Exporting TensorFlow trained model ...")
raw_serving_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(
feature_spec, default_batch_size=1
)
estimator.export_savedmodel(save_dir, raw_serving_fn, strip_default_attrs=True)
# DGB
# The above step creates a folder with timestamp as its name. No way
# to control that behavior as far as I can tell. We just want it to be called
# "exported" since we are not using multiple exports of the same model.
# (1) If exported folder already exists, delete it.
export_dir = save_dir + "exported/"
if os.path.exists(export_dir) and os.path.isdir(export_dir):
print("Deleting old folder", export_dir)
shutil.rmtree(export_dir)
# (2) Find latest folder in the save_dir.
list_of_files = glob.glob(save_dir + "*")
newest_folder = max(list_of_files, key=os.path.getctime)
# (3) Rename it to "exported."
print("Renaming", newest_folder, "to", export_dir)
os.rename(newest_folder, export_dir)
# end DGB
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"tensorflow.estimator.RunConfig",
"argparse.ArgumentParser",
"os.rename",
"tensorflow.placeholder",
"tensorflow.estimator.Estimator",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",... | [((265, 354), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""export trained TensorFlow model for inference"""'}), "(description=\n 'export trained TensorFlow model for inference')\n", (288, 354), False, 'import argparse\n'), ((1483, 1542), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), '(level=logging.DEBUG, stream=sys.stdout)\n', (1502, 1542), False, 'import logging\n'), ((1556, 1583), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1573, 1583), False, 'import logging\n'), ((1588, 1605), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1602, 1605), True, 'import numpy as np\n'), ((1610, 1631), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (1628, 1631), True, 'import tensorflow as tf\n'), ((1636, 1677), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1660, 1677), True, 'import tensorflow as tf\n'), ((1798, 1815), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1812, 1815), True, 'import numpy as np\n'), ((1820, 1841), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (1838, 1841), True, 'import tensorflow as tf\n'), ((2158, 2285), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'save_summary_steps': '(10)', 'save_checkpoints_steps': '(1000)', 'keep_checkpoint_max': '(20)', 'log_step_count_steps': '(10)'}), '(save_summary_steps=10, save_checkpoints_steps=1000,\n keep_checkpoint_max=20, log_step_count_steps=10)\n', (2180, 2285), True, 'import tensorflow as tf\n'), ((2342, 2362), 'vad.training.estimator.VadEstimator', 'VadEstimator', (['params'], {}), '(params)\n', (2354, 2362), False, 'from vad.training.estimator import VadEstimator\n'), ((2379, 2494), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'estimator_obj.model_fn', 'model_dir': 'save_dir', 'config': 'train_config', 'params': 'params'}), '(model_fn=estimator_obj.model_fn, model_dir=save_dir,\n config=train_config, params=params)\n', (2401, 2494), True, 'import tensorflow as tf\n'), ((2758, 2853), 'tensorflow.estimator.export.build_raw_serving_input_receiver_fn', 'tf.estimator.export.build_raw_serving_input_receiver_fn', (['feature_spec'], {'default_batch_size': '(1)'}), '(feature_spec,\n default_batch_size=1)\n', (2813, 2853), True, 'import tensorflow as tf\n'), ((3503, 3528), 'glob.glob', 'glob.glob', (["(save_dir + '*')"], {}), "(save_dir + '*')\n", (3512, 3528), False, 'import glob\n'), ((3691, 3727), 'os.rename', 'os.rename', (['newest_folder', 'export_dir'], {}), '(newest_folder, export_dir)\n', (3700, 3727), False, 'import os\n'), ((2578, 2649), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[1, FEAT_SIZE[1], FEAT_SIZE[0]]'}), '(dtype=tf.float32, shape=[1, FEAT_SIZE[1], FEAT_SIZE[0]])\n', (2592, 2649), True, 'import tensorflow as tf\n'), ((3295, 3321), 'os.path.exists', 'os.path.exists', (['export_dir'], {}), '(export_dir)\n', (3309, 3321), False, 'import os\n'), ((3326, 3351), 'os.path.isdir', 'os.path.isdir', (['export_dir'], {}), '(export_dir)\n', (3339, 3351), False, 'import os\n'), ((3410, 3435), 'shutil.rmtree', 'shutil.rmtree', (['export_dir'], {}), '(export_dir)\n', (3423, 3435), False, 'import shutil\n')] |
#!/usr/bin/env python
# coding: utf-8
# # notebook: segmenting nuclei with the cellpose napari plugin
#
# ## Overview
#
# Plugins extend the functionality of napari and can be combined together to build workflows. Many plugins exist for common analysis tasks such as segmentation and filtering. In this activity, we will segment nuclei using the [cellpose napari plugin](https://github.com/MouseLand/cellpose-napari). Please visit the [napari hub](https://www.napari-hub.org/) for a listing of the available plugins.
#
# ### Data source
#
# The data were downloaded from the [OpticalPooledScreens github repository](https://github.com/feldman4/OpticalPooledScreens).
#
# ## Loading the data
#
# We will start by loading an image of DAPI stained nuclei. We can use `scikit-image`'s `imread()` function to download the data from the link below and load it into a numpy array called `nuclei`.
# In[1]:
from skimage import io
url = 'https://raw.githubusercontent.com/kevinyamauchi/napari-spot-detection-tutorial/main/data/nuclei_cropped.tif'
nuclei = io.imread(url)
# ## Viewing the image
#
# As we did in the previous notebooks, we can view the image in napari using the `napari.view_image()` function. Here we set the colormap to `magma`.
# In[2]:
import napari
viewer = napari.view_image(nuclei, colormap='magma')
# In[3]:
from napari.utils import nbscreenshot
nbscreenshot(viewer)
# ## Segment nuclei
#
# To segment the nuclei, we will use the [cellpose napari plugin](https://github.com/MouseLand/cellpose-napari). Please perform the segmentation using the instructions below. For more information on cellpose, please see the [paper](https://www.nature.com/articles/s41592-020-01018-x) and [repository](https://github.com/MouseLand/cellpose).
#
# 1. Start the cellpose plugin. From the menu bar, click Plugins->cellpose-napari: cellpose. You should see the plugin added to the right side of the viewer.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_plugin.png" alt="cellpose plugin" width="80%">
#
#
# 2. Select the "nuclei" image layer.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_image_selection.png" alt="select the image layer" width="80%">
#
#
# 3. Set the model type to "nuclei"
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_model_selection.png" alt="select the nuclei model" width="80%">
#
#
# 4. We need to give cellpose an estimate of the size of the nuclei so it can properly scale the data. We can do so using a napari Shapes layer. With the Shapes layer, we will outline some nuclei and then cellpose will use those annotations to estimate the size of the nuclei.
# 1. Click the "add Shapes" layer button in the viewer. This will create and select a new layer called "Shapes".
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_add_shape.png" alt="add a shapes layer to measure the diameter" width="80%">
#
# 2. Set the mode to "Ellipse" by clicking the button in the layer controls.
# 3. In the canvas, click and drag to add an ellipse that around a "representative" nucleus. For the purpose of this demo, this is enough, but for other data you may need to give more examples to make a better estimate of the cell diameter. If you need to pan/zoom while adding an ellipse, holding the spacebar will allow you to pan/zoom using your mouse (pan via click/drag, zoom by scrolling).
# 4. If you would like to edit or move an ellipse, you can switch to "Select shapes" mode in the viewer. Shapes can now be moved by clicking on them and then dragging. They can be resized by selecting them and then dragging the control points.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_select_shape.png" alt="use selection mode to edit shapes" width="80%">
#
# 5. Once you are happy with your annotations, you can click the "compute diameter from shape layer" button and you will see the "diameter" value populated. For this demo, the value is typically around 10 pixels.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_diameter.png" alt="estimate the cell diameters" width="80%">
#
#
# 5. For this demo, we recommend de-selecting "average 4 nets"(potentially less accurate, but faster segmentation) and otherwise using the default settings. If you would like to learn more about the cellpose settings, please see the [cellpose plugin documentation](https://cellpose-napari.readthedocs.io/en/latest/settings.html).
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_settings.png" alt="select the segmentation settings" width="80%">
#
#
# 6. Now you are ready to run the segmentation! Click the "run segmentation" button. Segmentation for this demo typically takes ~1.5 minutes. Note that there is not currently a progress bar, so please just be patient.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_run.png" alt="start the segmentation" width="80%">
#
#
# 7. When the segmentation is completed, you will see some new layers added to the layer list. Of particular interest is "nuclei_p_masks_000", which contains our segmentation mask added as a Labels layer.
#
# <img src="https://raw.githubusercontent.com/alisterburt/napari-workshops/main/napari-workshops/notebooks/resources/cellpose_screenshots_results.png" alt="completed segmentation" width="80%">
#
# ## Quantify nuclei shape
#
# In this next section, we will compute and display some basic properties of the segmented cells (e.g., area) using scikit-image and matplotlib.
#
# ### Measure area and perimeter
#
# We can use the scikit-image [`regionprops_table`](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.regionprops_table) function to measure the area and perimeter of the detected nuclei. `regionprops_table` outputs a dictionary where each key is a name of a measurement (e.g., `'area'`) and the value is the measurement value for each detected object (nucleus in the case).
# In[4]:
from skimage.io import imread
label_im = imread('./data/nuclei_cp_masks_000.png')
viewer.add_labels(label_im, name='nuclei_cp_masks_000')
# In[5]:
from skimage.measure import regionprops_table
# measure the area and nucleus for each nucleus
rp_table = regionprops_table(
viewer.layers['nuclei_cp_masks_000'].data,
properties=('area', 'perimeter')
)
# In[6]:
from matplotlib import pyplot as plt
import numpy as np
# print the median area
median_area = np.median(rp_table['area'])
print(f'median area: {median_area} px')
# plot a histogram of the areas
plt.hist(rp_table['area']);
plt.xlabel('nucleus area', fontsize=20);
plt.ylabel('count', fontsize=20);
plt.show();
# Finally, we can calculate the circularity from the area and perimeter measurements we made above. The circularity is a shape factor that is 1 when the object is a circle and less than one for shapes that are more "starfish-like"/ The circularity is defined as
#
# $$f_{circ} = \frac{4 \pi A}{P^2}$$
#
# where A is the area and P is the perimeter of the object. We plot the circularity vs. the area and see that the circularity of the nuclei does not appear to depend on the area.
# In[7]:
# calculate the circularity of the nuclei
circularity = (4 * np.pi * rp_table['area']) / np.square(rp_table['perimeter'])
# use matplot lib to visualize the relationship between nucleus circularity and area
plt.scatter(rp_table['area'], circularity);
plt.xlabel('nucleus area', fontsize=20);
plt.ylabel('circularity', fontsize=20);
plt.ylim((0, 2))
plt.show()
# # Conclusions
#
# In this notebook, we have used the cellpose-napari plugin to perform nucleus segmentation. We then used the results of the segmentation to inspect the relationship between nucleus area and circularity. This demonstration highlights how one can combine napari plugins and python libraries to make measurements on microscopy data.
| [
"numpy.median",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"skimage.measure.regionprops_table",
"napari.view_image",
"numpy.square",
"skimage.io.imread",
"matplotlib.pyplot.scatter",
"napari.utils.nbscreenshot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplo... | [((1058, 1072), 'skimage.io.imread', 'io.imread', (['url'], {}), '(url)\n', (1067, 1072), False, 'from skimage import io\n'), ((1287, 1330), 'napari.view_image', 'napari.view_image', (['nuclei'], {'colormap': '"""magma"""'}), "(nuclei, colormap='magma')\n", (1304, 1330), False, 'import napari\n'), ((1383, 1403), 'napari.utils.nbscreenshot', 'nbscreenshot', (['viewer'], {}), '(viewer)\n', (1395, 1403), False, 'from napari.utils import nbscreenshot\n'), ((6665, 6705), 'skimage.io.imread', 'imread', (['"""./data/nuclei_cp_masks_000.png"""'], {}), "('./data/nuclei_cp_masks_000.png')\n", (6671, 6705), False, 'from skimage.io import imread\n'), ((6881, 6980), 'skimage.measure.regionprops_table', 'regionprops_table', (["viewer.layers['nuclei_cp_masks_000'].data"], {'properties': "('area', 'perimeter')"}), "(viewer.layers['nuclei_cp_masks_000'].data, properties=(\n 'area', 'perimeter'))\n", (6898, 6980), False, 'from skimage.measure import regionprops_table\n'), ((7095, 7122), 'numpy.median', 'np.median', (["rp_table['area']"], {}), "(rp_table['area'])\n", (7104, 7122), True, 'import numpy as np\n'), ((7196, 7222), 'matplotlib.pyplot.hist', 'plt.hist', (["rp_table['area']"], {}), "(rp_table['area'])\n", (7204, 7222), True, 'from matplotlib import pyplot as plt\n'), ((7224, 7263), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""nucleus area"""'], {'fontsize': '(20)'}), "('nucleus area', fontsize=20)\n", (7234, 7263), True, 'from matplotlib import pyplot as plt\n'), ((7265, 7297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {'fontsize': '(20)'}), "('count', fontsize=20)\n", (7275, 7297), True, 'from matplotlib import pyplot as plt\n'), ((7299, 7309), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7307, 7309), True, 'from matplotlib import pyplot as plt\n'), ((8017, 8059), 'matplotlib.pyplot.scatter', 'plt.scatter', (["rp_table['area']", 'circularity'], {}), "(rp_table['area'], circularity)\n", (8028, 8059), True, 'from matplotlib import pyplot as plt\n'), ((8061, 8100), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""nucleus area"""'], {'fontsize': '(20)'}), "('nucleus area', fontsize=20)\n", (8071, 8100), True, 'from matplotlib import pyplot as plt\n'), ((8102, 8140), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""circularity"""'], {'fontsize': '(20)'}), "('circularity', fontsize=20)\n", (8112, 8140), True, 'from matplotlib import pyplot as plt\n'), ((8142, 8158), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 2)'], {}), '((0, 2))\n', (8150, 8158), True, 'from matplotlib import pyplot as plt\n'), ((8159, 8169), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8167, 8169), True, 'from matplotlib import pyplot as plt\n'), ((7898, 7930), 'numpy.square', 'np.square', (["rp_table['perimeter']"], {}), "(rp_table['perimeter'])\n", (7907, 7930), True, 'import numpy as np\n')] |
#!/usr/bin/python
import numpy as np
from matplotlib import pyplot as plt
import networkx as nx
from names import names
from draw_graph import draw_tree
theta = np.asarray([[.65, .35], [.1, .9]])
mu = np.asarray([50., 100.])
sigma = 15. # std-deviation of both normal distributions
K = 2
use_random_tree = True
use_random_z0_init = False
if use_random_tree:
N = 100
T = nx.random_tree(N)
else:
T = nx.balanced_tree(2,6)
N = len(T.nodes())
D = max([d for n, d in T.degree()])
A = -np.ones([N, D], dtype='int') # adjacency
C = np.zeros(N, dtype='int') # edge count
O = np.zeros(N, dtype='int') # visit order
edges = T.edges()
nodes = T.nodes()
E = dict()
for (u, v) in edges:
if u not in E:
E[u] = list()
if v not in E:
E[v] = list()
E[u].append(v)
E[v].append(u)
visited = set()
# sample z
z = np.empty(N, dtype='int')
if use_random_z0_init:
z[0] = np.random.choice(np.arange(K), size=1, replace=True, p=theta[0])
else:
z[0] = 1
q = list([0])
i = 0
while len(q) != 0:
s = q[0]
del q[0]
O[i] = s
i += 1
if s in visited:
continue
visited.add(s)
for ngb in E[s]:
if ngb not in visited:
q.append(ngb)
z[ngb] = np.random.choice(
np.arange(K), size=1, replace=True, p=theta[z[s]])
A[s, C[s]] = ngb
C[s] += 1
y = np.random.randn(N)*sigma + mu[z]
labels = {n: (str(names[n]) + "\n" + str(int(10*v)/10.) + " ({type})".format(type=('G' if z[n] else 'B')) + ("" if n != 0 else "\n==ROOT=="))
for n, v in zip(nodes, y)}
draw_tree(edges, labels)
np.savez_compressed('tree_data', D=D, A=A, C=C, y=y, z=z, sigma=sigma, O=O)
| [
"draw_graph.draw_tree",
"numpy.ones",
"numpy.asarray",
"networkx.balanced_tree",
"numpy.zeros",
"numpy.empty",
"numpy.savez_compressed",
"numpy.random.randn",
"networkx.random_tree",
"numpy.arange"
] | [((163, 201), 'numpy.asarray', 'np.asarray', (['[[0.65, 0.35], [0.1, 0.9]]'], {}), '([[0.65, 0.35], [0.1, 0.9]])\n', (173, 201), True, 'import numpy as np\n'), ((203, 228), 'numpy.asarray', 'np.asarray', (['[50.0, 100.0]'], {}), '([50.0, 100.0])\n', (213, 228), True, 'import numpy as np\n'), ((549, 573), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': '"""int"""'}), "(N, dtype='int')\n", (557, 573), True, 'import numpy as np\n'), ((592, 616), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': '"""int"""'}), "(N, dtype='int')\n", (600, 616), True, 'import numpy as np\n'), ((855, 879), 'numpy.empty', 'np.empty', (['N'], {'dtype': '"""int"""'}), "(N, dtype='int')\n", (863, 879), True, 'import numpy as np\n'), ((1599, 1623), 'draw_graph.draw_tree', 'draw_tree', (['edges', 'labels'], {}), '(edges, labels)\n', (1608, 1623), False, 'from draw_graph import draw_tree\n'), ((1625, 1700), 'numpy.savez_compressed', 'np.savez_compressed', (['"""tree_data"""'], {'D': 'D', 'A': 'A', 'C': 'C', 'y': 'y', 'z': 'z', 'sigma': 'sigma', 'O': 'O'}), "('tree_data', D=D, A=A, C=C, y=y, z=z, sigma=sigma, O=O)\n", (1644, 1700), True, 'import numpy as np\n'), ((384, 401), 'networkx.random_tree', 'nx.random_tree', (['N'], {}), '(N)\n', (398, 401), True, 'import networkx as nx\n'), ((416, 438), 'networkx.balanced_tree', 'nx.balanced_tree', (['(2)', '(6)'], {}), '(2, 6)\n', (432, 438), True, 'import networkx as nx\n'), ((503, 531), 'numpy.ones', 'np.ones', (['[N, D]'], {'dtype': '"""int"""'}), "([N, D], dtype='int')\n", (510, 531), True, 'import numpy as np\n'), ((931, 943), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (940, 943), True, 'import numpy as np\n'), ((1385, 1403), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (1400, 1403), True, 'import numpy as np\n'), ((1278, 1290), 'numpy.arange', 'np.arange', (['K'], {}), '(K)\n', (1287, 1290), True, 'import numpy as np\n')] |
import numpy
import theano
import theano.tensor as T
numpy.set_printoptions(linewidth=230, threshold=10000, edgeitems=18)
from codec import Codec
from visualize_weights import VisualizeWeights
class RBM:
def __init__(self, num_visible, num_hidden, minibatch_size,
venabledp=1, henabledp=1):
self.num_visible = num_visible
self.num_hidden = num_hidden
self.minibatch_size = minibatch_size;
self.venabledp = venabledp
self.henabledp = henabledp
initial_weights = numpy.zeros((num_visible, num_hidden),
dtype='float32')
initial_hbias = numpy.zeros(num_hidden,
dtype='float32')
initial_vbias = numpy.zeros(num_visible,
dtype='float32') - 3
self.weights = theano.shared(value=initial_weights,
name='weights')
self.vbias = theano.shared(value=initial_vbias,
name='vbias')
self.hbias = theano.shared(value=initial_hbias,
name='hbias')
numpy_rng = numpy.random.RandomState(17)
self.theano_rng = T.shared_randomstreams.RandomStreams(
numpy_rng.randint(2**30))
def propup(self, vis):
'''Determine the activation probabilities of
the hiddens given the states of the visibles.'''
return T.nnet.sigmoid(T.dot(vis, self.weights)
/ self.venabledp
+ self.hbias)
def sample_h_given_v(self, vis, henabled):
'''Sample the state of the hiddens given the
states of the visibles.'''
h_probs = self.propup(vis)
return (self.theano_rng.binomial(size=h_probs.shape,
n=1,
p=h_probs,
dtype='float32')
* henabled)
def propdown(self, hid):
'''Determine the activation probabilities of
the visibles given the states of the hiddens.'''
return T.nnet.sigmoid(T.dot(hid, self.weights.T)
/ self.henabledp
+ self.vbias)
def sample_v_given_h(self, hid, venabled):
'''Sample the state of the visibles given the
states of the hidden.'''
v_probs = self.propdown(hid)
return (self.theano_rng.binomial(size=v_probs.shape,
n=1,
p=v_probs,
dtype='float32')
* venabled)
def contrastive_divergence_1(self, v1, venabled, henabled):
'''Determine the weight updates according to CD-1'''
v1do = v1 * venabled
h1 = self.sample_h_given_v(v1, henabled)
v2 = self.sample_v_given_h(h1, venabled)
h2p = self.propup(v2) * henabled
updates = T.tensordot(v1, h1, [[0],[0]]) - T.tensordot(v2, h2p, [[0],[0]])
f = 1.0 / self.minibatch_size
return (updates * f,
T.sum(v1 - v2, axis=0) * f,
T.sum(h1 - h2p, axis=0) * f)
def dropout_enabled_units(self):
venabled = self.theano_rng.binomial(size=self.vbias.shape,
n=1,
p=self.venabledp,
dtype='float32')
henabled = self.theano_rng.binomial(size=self.hbias.shape,
n=1,
p=self.henabledp,
dtype='float32')
return (venabled, henabled)
def cd1_fun(self, vis, learning_rate=0.5):
(venabled, henabled) = self.dropout_enabled_units()
(W, V, H) = self.contrastive_divergence_1(vis, venabled, henabled)
return theano.function(
[vis],
(V, venabled, henabled),
updates=[(self.weights,
T.cast(self.weights + W*learning_rate,
'float32')),
(self.vbias,
T.cast(self.vbias + V*learning_rate,
'float32')),
(self.hbias,
T.cast(self.hbias + H*learning_rate,
'float32'))])
def tuples_to_matrix(tuplesize, tuples, num_words):
res = numpy.zeros((len(tuples), tuplesize*num_words),
dtype=numpy.float32)
for i in range(len(tuples)):
tup = tuples[i]
for t in range(len(tup)):
word = tup[t]
res[i, word + num_words*t] = 1.0
return res
def test():
minibatch_size = 100
num_words = 40
tuplesize = 5
num_visible = tuplesize*num_words
num_hidden = 140
codec = Codec(tuplesize, num_words)
tuples = codec.tuples
words = codec.words
encoded = codec.tuples_to_matrix()
(num_data, _) = encoded.shape
print(words)
print('data count: ', num_data)
rbm = RBM(num_visible = num_visible,
num_hidden = num_hidden,
minibatch_size = minibatch_size,
venabledp=1.0,
henabledp=0.7)
id_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
input_data = T.constant(encoded[id_indices])
#print(input_data)
#print(rbm.propup(input_data).eval())
#h1samples = rbm.sample_h_given_v(input_data).eval()
#print(h1samples)
#print(rbm.propdown(h1samples).eval())
#v2samples = rbm.sample_v_given_h(h1samples).eval()
#print(v2samples)
#(W,H,V) = rbm.contrastive_divergence_1(input_data)
#print(W.eval())
#print(H.eval())
#print(V.eval())
all_h_enabled = numpy.ones(num_hidden)
all_v_enabled = numpy.ones(num_visible)
xvis = T.fmatrix('xvis')
h1samples = rbm.sample_h_given_v(xvis, all_h_enabled)
v2samples = rbm.sample_v_given_h(h1samples, all_v_enabled)
sample_vhv = theano.function([xvis], v2samples)
example_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
example_input_data = encoded[example_indices]
num_examples = min(10, minibatch_size)
def show_examples():
rec = sample_vhv(example_input_data)
for example in range(num_examples):
print('input words:',
[(t+1, words[idx])
for t in range(tuplesize)
for idx in range(num_words)
if example_input_data[example, t*num_words + idx]])
print('reconstructed words:',
[(t+1, words[idx])
for t in range(tuplesize)
for idx in range(num_words)
if rec[example, t*num_words + idx]])
def report_hidden():
weights = rbm.weights.get_value()
for h in range(num_hidden):
print('hidden ', h)
for block in range(tuplesize):
for word in range(num_words):
w = weights[block*num_words+word, h]
if w > 0.5:
print(' %2i %8s %4.1f' % (block, words[word], w))
vis = T.fmatrix('vis')
train = rbm.cd1_fun(vis)
draw = VisualizeWeights('Dropout (vp:%4.2f, hp:%4.2f)' % (rbm.venabledp, rbm.henabledp),
rbm, tuplesize, words, num_hidden,
num_visible)
for epoch in range(1000):
show_examples()
all_vdiffs = numpy.zeros(num_visible)
print('epoch ', epoch)
numpy.random.shuffle(encoded)
for minibatch in range(num_data // minibatch_size):
mb_start = minibatch * minibatch_size;
mb_end = mb_start + minibatch_size;
input_data_indices = numpy.arange(mb_start, mb_end)
encoded_input = encoded[input_data_indices]
input_data = encoded_input
(vdiffs, venabled, henabled) = train(input_data)
all_vdiffs = all_vdiffs + numpy.abs(vdiffs)
#print('venabled', venabled)
#print('henabled', henabled)
print('reconstruction error: ', numpy.sum(all_vdiffs) * minibatch_size)
#print(numpy.ndarray.astype(rbm.weights.get_value()*100, numpy.int32))
#print(numpy.ndarray.astype(rbm.vbias.get_value()*100, numpy.int32))
#print(numpy.ndarray.astype(rbm.hbias.get_value()*100, numpy.int32))
draw.epoch_finished(epoch)
report_hidden()
test()
| [
"numpy.arange",
"theano.tensor.dot",
"numpy.random.RandomState",
"theano.tensor.constant",
"theano.shared",
"theano.function",
"visualize_weights.VisualizeWeights",
"numpy.abs",
"numpy.ones",
"codec.Codec",
"theano.tensor.sum",
"theano.tensor.fmatrix",
"numpy.set_printoptions",
"theano.ten... | [((53, 121), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'linewidth': '(230)', 'threshold': '(10000)', 'edgeitems': '(18)'}), '(linewidth=230, threshold=10000, edgeitems=18)\n', (75, 121), False, 'import numpy\n'), ((4997, 5024), 'codec.Codec', 'Codec', (['tuplesize', 'num_words'], {}), '(tuplesize, num_words)\n', (5002, 5024), False, 'from codec import Codec\n'), ((5405, 5468), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': 'num_data', 'size': 'minibatch_size'}), '(low=0, high=num_data, size=minibatch_size)\n', (5425, 5468), False, 'import numpy\n'), ((5486, 5517), 'theano.tensor.constant', 'T.constant', (['encoded[id_indices]'], {}), '(encoded[id_indices])\n', (5496, 5517), True, 'import theano.tensor as T\n'), ((5934, 5956), 'numpy.ones', 'numpy.ones', (['num_hidden'], {}), '(num_hidden)\n', (5944, 5956), False, 'import numpy\n'), ((5977, 6000), 'numpy.ones', 'numpy.ones', (['num_visible'], {}), '(num_visible)\n', (5987, 6000), False, 'import numpy\n'), ((6013, 6030), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""xvis"""'], {}), "('xvis')\n", (6022, 6030), True, 'import theano.tensor as T\n'), ((6169, 6203), 'theano.function', 'theano.function', (['[xvis]', 'v2samples'], {}), '([xvis], v2samples)\n', (6184, 6203), False, 'import theano\n'), ((6227, 6290), 'numpy.random.randint', 'numpy.random.randint', ([], {'low': '(0)', 'high': 'num_data', 'size': 'minibatch_size'}), '(low=0, high=num_data, size=minibatch_size)\n', (6247, 6290), False, 'import numpy\n'), ((7369, 7385), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""vis"""'], {}), "('vis')\n", (7378, 7385), True, 'import theano.tensor as T\n'), ((7427, 7561), 'visualize_weights.VisualizeWeights', 'VisualizeWeights', (["('Dropout (vp:%4.2f, hp:%4.2f)' % (rbm.venabledp, rbm.henabledp))", 'rbm', 'tuplesize', 'words', 'num_hidden', 'num_visible'], {}), "('Dropout (vp:%4.2f, hp:%4.2f)' % (rbm.venabledp, rbm.\n henabledp), rbm, tuplesize, words, num_hidden, num_visible)\n", (7443, 7561), False, 'from visualize_weights import VisualizeWeights\n'), ((532, 587), 'numpy.zeros', 'numpy.zeros', (['(num_visible, num_hidden)'], {'dtype': '"""float32"""'}), "((num_visible, num_hidden), dtype='float32')\n", (543, 587), False, 'import numpy\n'), ((650, 690), 'numpy.zeros', 'numpy.zeros', (['num_hidden'], {'dtype': '"""float32"""'}), "(num_hidden, dtype='float32')\n", (661, 690), False, 'import numpy\n'), ((856, 908), 'theano.shared', 'theano.shared', ([], {'value': 'initial_weights', 'name': '"""weights"""'}), "(value=initial_weights, name='weights')\n", (869, 908), False, 'import theano\n'), ((967, 1015), 'theano.shared', 'theano.shared', ([], {'value': 'initial_vbias', 'name': '"""vbias"""'}), "(value=initial_vbias, name='vbias')\n", (980, 1015), False, 'import theano\n'), ((1072, 1120), 'theano.shared', 'theano.shared', ([], {'value': 'initial_hbias', 'name': '"""hbias"""'}), "(value=initial_hbias, name='hbias')\n", (1085, 1120), False, 'import theano\n'), ((1176, 1204), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(17)'], {}), '(17)\n', (1200, 1204), False, 'import numpy\n'), ((7688, 7712), 'numpy.zeros', 'numpy.zeros', (['num_visible'], {}), '(num_visible)\n', (7699, 7712), False, 'import numpy\n'), ((7752, 7781), 'numpy.random.shuffle', 'numpy.random.shuffle', (['encoded'], {}), '(encoded)\n', (7772, 7781), False, 'import numpy\n'), ((751, 792), 'numpy.zeros', 'numpy.zeros', (['num_visible'], {'dtype': '"""float32"""'}), "(num_visible, dtype='float32')\n", (762, 792), False, 'import numpy\n'), ((3045, 3076), 'theano.tensor.tensordot', 'T.tensordot', (['v1', 'h1', '[[0], [0]]'], {}), '(v1, h1, [[0], [0]])\n', (3056, 3076), True, 'import theano.tensor as T\n'), ((3078, 3110), 'theano.tensor.tensordot', 'T.tensordot', (['v2', 'h2p', '[[0], [0]]'], {}), '(v2, h2p, [[0], [0]])\n', (3089, 3110), True, 'import theano.tensor as T\n'), ((7974, 8004), 'numpy.arange', 'numpy.arange', (['mb_start', 'mb_end'], {}), '(mb_start, mb_end)\n', (7986, 8004), False, 'import numpy\n'), ((3193, 3215), 'theano.tensor.sum', 'T.sum', (['(v1 - v2)'], {'axis': '(0)'}), '(v1 - v2, axis=0)\n', (3198, 3215), True, 'import theano.tensor as T\n'), ((3237, 3260), 'theano.tensor.sum', 'T.sum', (['(h1 - h2p)'], {'axis': '(0)'}), '(h1 - h2p, axis=0)\n', (3242, 3260), True, 'import theano.tensor as T\n'), ((8199, 8216), 'numpy.abs', 'numpy.abs', (['vdiffs'], {}), '(vdiffs)\n', (8208, 8216), False, 'import numpy\n'), ((8339, 8360), 'numpy.sum', 'numpy.sum', (['all_vdiffs'], {}), '(all_vdiffs)\n', (8348, 8360), False, 'import numpy\n'), ((1496, 1520), 'theano.tensor.dot', 'T.dot', (['vis', 'self.weights'], {}), '(vis, self.weights)\n', (1501, 1520), True, 'import theano.tensor as T\n'), ((2198, 2224), 'theano.tensor.dot', 'T.dot', (['hid', 'self.weights.T'], {}), '(hid, self.weights.T)\n', (2203, 2224), True, 'import theano.tensor as T\n'), ((4156, 4207), 'theano.tensor.cast', 'T.cast', (['(self.weights + W * learning_rate)', '"""float32"""'], {}), "(self.weights + W * learning_rate, 'float32')\n", (4162, 4207), True, 'import theano.tensor as T\n'), ((4293, 4342), 'theano.tensor.cast', 'T.cast', (['(self.vbias + V * learning_rate)', '"""float32"""'], {}), "(self.vbias + V * learning_rate, 'float32')\n", (4299, 4342), True, 'import theano.tensor as T\n'), ((4428, 4477), 'theano.tensor.cast', 'T.cast', (['(self.hbias + H * learning_rate)', '"""float32"""'], {}), "(self.hbias + H * learning_rate, 'float32')\n", (4434, 4477), True, 'import theano.tensor as T\n')] |
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
from six.moves import range
def vertical_horizontal_filter(data, period):
"""
Vertical Horizontal Filter.
Formula:
ABS(pHIGH - pLOW) / SUM(ABS(Pi - Pi-1))
"""
catch_errors.check_for_period_error(data, period)
vhf = [abs(np.max(data[idx+1-period:idx+1]) -
np.min(data[idx+1-period:idx+1])) /
sum([abs(data[idx+1-period:idx+1][i] - data[idx+1-period:idx+1][i-1]) for i in range(0, len(data[idx+1-period:idx+1]))]) for idx in range(period - 1, len(data))]
vhf = fill_for_noncomputable_vals(data, vhf)
return vhf
| [
"pyti.function_helper.fill_for_noncomputable_vals",
"pyti.catch_errors.check_for_period_error",
"numpy.min",
"numpy.max"
] | [((335, 384), 'pyti.catch_errors.check_for_period_error', 'catch_errors.check_for_period_error', (['data', 'period'], {}), '(data, period)\n', (370, 384), False, 'from pyti import catch_errors\n'), ((665, 703), 'pyti.function_helper.fill_for_noncomputable_vals', 'fill_for_noncomputable_vals', (['data', 'vhf'], {}), '(data, vhf)\n', (692, 703), False, 'from pyti.function_helper import fill_for_noncomputable_vals\n'), ((401, 439), 'numpy.max', 'np.max', (['data[idx + 1 - period:idx + 1]'], {}), '(data[idx + 1 - period:idx + 1])\n', (407, 439), True, 'import numpy as np\n'), ((448, 486), 'numpy.min', 'np.min', (['data[idx + 1 - period:idx + 1]'], {}), '(data[idx + 1 - period:idx + 1])\n', (454, 486), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy as sch
from .metrics import get_corr_dist
from .quasi import get_quasi_diag
def get_rec_bipart(cov, sort_idx):
"""Compute portfolio weight by recursive bisection
Params
------
cov: pd.DataFrame
sort_idx: pd.Series
Sorted index by quasi diagonalization
Returns
-------
pd.Series
"""
weight = pd.Series(1, index=sort_idx)
# Initialize all in one cluster
cl_items = [sort_idx]
while len(cl_items) > 0:
cl_items_ = []
for cl in cl_items:
# Split into half for each cluter
if len(cl) >= 2:
cl_items_.append(cl[0:len(cl) // 2])
cl_items_.append(cl[len(cl) // 2:len(cl)])
# Update cluster
cl_items = cl_items_
for i in range(0, len(cl_items), 2):
cl0 = cl_items[i]
cl1 = cl_items[i + 1]
var0 = get_cluster_var(cov, cl0)
var1 = get_cluster_var(cov, cl1)
alpha = var1 / (var0 + var1)
weight[cl0] *= alpha
weight[cl1] *= 1 - alpha
return weight
def get_ivp(cov):
"""Compute inverse variance portfolio
Params
------
cov: pd.DataFrame
Returns
-------
np.array
"""
ivp = 1. / np.diag(cov)
ivp /= ivp.sum()
return ivp
def get_cluster_var(cov, cl_items):
"""Compute variance per cluster
Params
------
cov: pd.DataFrame
cl_items: pd.Series
Returns
-------
float
"""
cov_cl = cov.loc[cl_items, cl_items]
w = get_ivp(cov_cl).reshape(-1, 1)
cl_var = np.dot(np.dot(w.T, cov_cl), w)[0, 0]
return cl_var
def get_hrp(cov, corr):
"""Construct a hierarchical portfolio
Params
------
cov: pd.DataFrame
corr: pd.DataFrame
Returns
-------
pd.Series
"""
dist = get_corr_dist(corr)
link = sch.linkage(dist, 'single')
sort_idx = get_quasi_diag(link)
# Recover label
sort_idx = corr.index[sort_idx].tolist()
hrp = get_rec_bipart(cov, sort_idx)
return hrp.sort_index() | [
"pandas.Series",
"numpy.dot",
"scipy.cluster.hierarchy.linkage",
"numpy.diag"
] | [((422, 450), 'pandas.Series', 'pd.Series', (['(1)'], {'index': 'sort_idx'}), '(1, index=sort_idx)\n', (431, 450), True, 'import pandas as pd\n'), ((1962, 1989), 'scipy.cluster.hierarchy.linkage', 'sch.linkage', (['dist', '"""single"""'], {}), "(dist, 'single')\n", (1973, 1989), True, 'import scipy.cluster.hierarchy as sch\n'), ((1338, 1350), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (1345, 1350), True, 'import numpy as np\n'), ((1681, 1700), 'numpy.dot', 'np.dot', (['w.T', 'cov_cl'], {}), '(w.T, cov_cl)\n', (1687, 1700), True, 'import numpy as np\n')] |
# Reproduction from Payan and Montana
import torch
import torch.nn as nn
import torch.nn.functional as F
from os import path
import os
import numpy as np
import pandas as pd
from copy import copy, deepcopy
class LargeAutoEncoder(nn.Module):
"""
Sparse Autoencoder for transfer learning
"""
def __init__(self):
super(LargeAutoEncoder, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, 150, 5)
self.decode = nn.ConvTranspose3d(150, 1, 5)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
out = F.relu(self.decode(h))
return out, h, d
class LargeConvolutionalNetwork(nn.Module):
"""
Classifier for binary classification task
"""
def __init__(self, n_classes=2):
super(LargeConvolutionalNetwork, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, 150, 5)
self.pool = nn.MaxPool3d(5, 5)
self.fc1 = nn.Linear(150 * 11 * 13 * 11, 800)
self.fc2 = nn.Linear(800, n_classes)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
h = self.pool(h)
h = h.view(-1, 150 * 11 * 13 * 11)
h = F.relu(self.fc1(h))
out = self.fc2(h)
return out
class AdaptativeAutoEncoder(nn.Module):
"""
Sparse Autoencoder for transfer learning
"""
def __init__(self, n_filters):
super(AdaptativeAutoEncoder, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, n_filters, 5)
self.decode = nn.ConvTranspose3d(n_filters, 1, 5)
def forward(self, x):
d = self.downsample(x)
h = F.relu(self.encode(d))
out = F.relu(self.decode(h))
return out, h, d
class AdaptativeConvolutionalNetwork(nn.Module):
"""
Classifier for binary classification task
"""
def __init__(self, n_filters, dropout=0, n_classes=2):
super(AdaptativeConvolutionalNetwork, self).__init__()
self.downsample = nn.MaxPool3d(2, 2)
self.encode = nn.Conv3d(1, n_filters, 5)
self.pool = nn.MaxPool3d(5, 5)
self.fc1 = nn.Linear(n_filters * 11 * 13 * 11, 800)
self.fc2 = nn.Linear(800, n_classes)
self.n_filters = n_filters
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, train=False):
d = self.downsample(x)
h = F.relu(self.encode(d))
h = self.pool(h)
h = h.view(-1, self.n_filters * 11 * 13 * 11)
if train:
h = self.dropout(h)
h = F.relu(self.fc1(h))
out = self.fc2(h)
return out
def l1_penalty(var):
return torch.abs(var).sum()
def test_autoencoder(model, dataloader, criterion=nn.MSELoss(), gpu=False):
total_loss = 0
with torch.no_grad():
for sample in dataloader:
if gpu:
images, diagnoses = sample['image'].cuda(), sample['diagnosis'].cuda()
else:
images, diagnoses = sample['image'], sample['diagnosis']
outputs, hidden_layer, downsample = model(images)
loss = criterion(outputs, downsample)
total_loss += loss
print('Loss of the model: ' + str(total_loss))
return total_loss
def save_results(best_params, validloader, test_method, results_path, name, denomination='Accuracy', testloader=None,
gpu=False):
if testloader is not None:
len_test = len(testloader)
acc_test = test_method(best_params['best_model'], testloader, gpu=gpu)
else:
len_test = 0
acc_test = 0
acc_train = test_method(best_params['best_model'], trainloader, gpu=gpu)
output_name = 'best_' + name + '.txt'
text_file = open(path.join(results_path, output_name), 'w')
text_file.write('Best fold: %i \n' % best_params['fold'])
text_file.write('Best epoch: %i \n' % (best_params['best_epoch'] + 1))
text_file.write('Time of training: %d s \n' % best_params['training_time'])
if denomination == 'Accuracy':
text_file.write(denomination + ' on validation set: %.2f %% \n' % acc_train)
if testloader is not None:
text_file.write(denomination + ' on test set: %.2f %% \n' % acc_test)
text_file.close()
else:
text_file.write(denomination + ' on validation set: %.3E \n' % (acc_train / len(trainset)))
if testloader is not None:
text_file.write(denomination + ' on test set: %.3E \n' % (acc_test / len(testset)))
text_file.close()
if denomination == 'Accuracy':
print(denomination + ' of the network on the %i validation images: %.2f %%' % (len(trainset), acc_train))
print(denomination + ' of the network on the %i test images: %.2f %%' % (len_test, acc_test))
else:
print(denomination + ' of the network on the %i validation images: %.3E' % (len(trainset), acc_train))
print(denomination + ' of the network on the %i test images: %.3E' % (len_test, acc_test))
parameters_name = 'best_parameters_' + name + '.tar'
torch.save(best_params['best_model'].state_dict(), path.join(results_path, parameters_name))
def load_state_dict(self, state_dict):
"""
Loads a pretrained layer in a Module instance
:param self: the Module instance
:param state_dict: The dictionary of pretrained parameters
"""
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
if __name__ == '__main__':
from data_loader import MriBrainDataset, ToTensor, GaussianSmoothing
from training_functions import CrossValidationSplit, cross_validation, test
import torch.optim as optim
from torch.utils.data import DataLoader
from time import time
import argparse
import torchvision
parser = argparse.ArgumentParser()
# Mandatory arguments
parser.add_argument("train_path", type=str,
help='path to your list of subjects for training')
parser.add_argument("results_path", type=str,
help="where the outputs are stored")
parser.add_argument("caps_path", type=str,
help="path to your caps folder")
# Network structure
parser.add_argument('-filters', '--n_filters', type=int, default=150,
help='number of filters used in the encoding convolutional layer')
parser.add_argument('--n_classes', type=int, default=2,
help='Number of classes in the dataset')
# Dataset management
parser.add_argument('--bids', action='store_true', default=False)
parser.add_argument('--sigma', type=float, default=0,
help='Size of the Gaussian smoothing kernel (preprocessing)')
# Training arguments
parser.add_argument("-e", "--epochs", type=int, default=2,
help="number of loops on the whole dataset")
parser.add_argument('-lra', '--learning_rate_auto', type=float, default=1,
help='the learning rate of the optimizer of the sparse autoencoder ( * 0.0005)')
parser.add_argument('-lrc', '--learning_rate_class', type=float, default=1,
help='the learning rate of the optimizer of the classifier ( * 0.0005)')
parser.add_argument("-l1", "--lambda1", type=float, default=1,
help="coefficient of the L1 regularization for the sparsity of the autoencoder")
parser.add_argument('-cv', '--cross_validation', type=int, default=10,
help='cross validation parameter')
parser.add_argument('--dropout', '-d', type=float, default=0.5,
help='Dropout rate before FC layers')
parser.add_argument('--batch_size', '-batch', type=int, default=4,
help="The size of the batches to train the network")
# Managing output
parser.add_argument("-n", "--name", type=str, default='network',
help="name given to the outputs and checkpoints of the parameters")
parser.add_argument("-save", "--save_interval", type=int, default=1,
help="the number of epochs done between the tests and saving")
# Managing device
parser.add_argument('--gpu', action='store_true', default=False,
help='Uses gpu instead of cpu if cuda is available')
parser.add_argument('--on_cluster', action='store_true', default=False,
help='to work on the cluster of the ICM')
args = parser.parse_args()
results_path = path.join(args.results_path, args.name)
if not path.exists(results_path):
os.makedirs(results_path)
if args.gpu and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device("cpu")
# Autoencoder training
autoencoder = AdaptativeAutoEncoder(args.n_filters).to(device=device)
lr_autoencoder = 0.00005 * args.learning_rate_auto
lr_classifier = 0.00005 * args.learning_rate_class
batch_size = args.batch_size
train_prop = 0.85
val_prop = 0.15
tol = 1e-2
composed = torchvision.transforms.Compose([GaussianSmoothing(sigma=args.sigma), ToTensor(gpu=args.gpu)])
optimizer = optim.Adam(autoencoder.parameters(), lr=lr_autoencoder)
dataset = MriBrainDataset(args.train_path, args.caps_path, transform=composed, on_cluster=args.on_cluster)
cross_val = CrossValidationSplit(dataset, cv=train_prop, stratified=True, shuffle_diagnosis=True, val_prop=val_prop)
trainset, validset, testset = cross_val(dataset)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=4)
validloader = DataLoader(validset, batch_size=batch_size, shuffle=False, num_workers=4)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)
epoch = 0
loss_train = np.inf
loss_valid_min = np.inf
best_model = None
best_epoch = 0
t0 = time()
name = 'autoencoder_' + args.name
filename = path.join(results_path, name + '.tsv')
criterion = nn.MSELoss()
results_df = pd.DataFrame(columns=['epoch', 'training_time', 'acc_train', 'acc_validation'])
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
flag = True
while flag:
prev_loss_train = loss_train
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
if args.gpu:
inputs = data['image'].cuda()
else:
inputs = data['image']
outputs, hidden_layer, downsample = autoencoder(inputs)
MSEloss = criterion(outputs, downsample)
l1_regularization = args.lambda1 * l1_penalty(hidden_layer)
loss = MSEloss + l1_regularization
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 10 == 9: # print every 10 mini-batches
print('[%d, %d] loss: %f' %
(epoch + 1, i + 1, running_loss))
running_loss = 0.0
print('Finished Epoch: %d' % (epoch + 1))
if epoch % args.save_interval == args.save_interval - 1:
training_time = time() - t0
loss_train = test_autoencoder(autoencoder, trainloader, gpu=args.gpu)
loss_valid = test_autoencoder(autoencoder, validloader, gpu=args.gpu)
row = np.array([epoch + 1, training_time, loss_train, loss_valid]).reshape(1, -1)
row_df = pd.DataFrame(row, columns=['epoch', 'training_time', 'loss_train', 'loss_validation'])
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
if loss_valid < loss_valid_min:
loss_valid_min = copy(loss_valid)
best_epoch = copy(epoch)
best_model = deepcopy(autoencoder)
epoch += 1
print('Convergence criterion: ', torch.abs((prev_loss_train - loss_train)/loss_train))
flag = epoch < args.epochs and torch.abs(prev_loss_train - loss_train)/loss_train > tol
training_time = time() - t0
best_params = {'training_time': time() - t0,
'best_epoch': best_epoch,
'best_model': best_model,
'loss_valid_min': loss_valid_min,
'fold': -1}
save_results(best_params, validloader, test_autoencoder, results_path, name, testloader=testloader,
denomination='Loss', gpu=args.gpu)
classifier = AdaptativeConvolutionalNetwork(args.n_filters, args.dropout,
n_classes=args.n_classes).to(device=device)
# Load pretrained layer in classifier
load_state_dict(classifier, best_model.state_dict())
classifier.encode.bias.requires_grad = False
classifier.encode.weight.requires_grad = False
name = 'classifier_' + args.name
best_params = cross_validation(classifier, trainset, batch_size=batch_size, folds=args.cross_validation,
epochs=args.epochs, results_path=results_path, model_name=name,
save_interval=args.save_interval, gpu=args.gpu, lr=lr_classifier,
tol=1.0)
| [
"torch.nn.Dropout",
"data_loader.ToTensor",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"copy.deepcopy",
"copy.copy",
"data_loader.GaussianSmoothing",
"os.path.exists",
"argparse.ArgumentParser",
"torch.nn.MaxPool3d",
"pandas.DataFrame",
"training_functions.CrossValidationS... | [((2813, 2825), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2823, 2825), True, 'import torch.nn as nn\n'), ((6034, 6059), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6057, 6059), False, 'import argparse\n'), ((8789, 8828), 'os.path.join', 'path.join', (['args.results_path', 'args.name'], {}), '(args.results_path, args.name)\n', (8798, 8828), False, 'from os import path\n'), ((9533, 9633), 'data_loader.MriBrainDataset', 'MriBrainDataset', (['args.train_path', 'args.caps_path'], {'transform': 'composed', 'on_cluster': 'args.on_cluster'}), '(args.train_path, args.caps_path, transform=composed,\n on_cluster=args.on_cluster)\n', (9548, 9633), False, 'from data_loader import MriBrainDataset, ToTensor, GaussianSmoothing\n'), ((9647, 9755), 'training_functions.CrossValidationSplit', 'CrossValidationSplit', (['dataset'], {'cv': 'train_prop', 'stratified': '(True)', 'shuffle_diagnosis': '(True)', 'val_prop': 'val_prop'}), '(dataset, cv=train_prop, stratified=True,\n shuffle_diagnosis=True, val_prop=val_prop)\n', (9667, 9755), False, 'from training_functions import CrossValidationSplit, cross_validation, test\n'), ((9825, 9897), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(trainset, batch_size=batch_size, shuffle=True, num_workers=4)\n', (9835, 9897), False, 'from torch.utils.data import DataLoader\n'), ((9916, 9989), 'torch.utils.data.DataLoader', 'DataLoader', (['validset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(validset, batch_size=batch_size, shuffle=False, num_workers=4)\n', (9926, 9989), False, 'from torch.utils.data import DataLoader\n'), ((10007, 10079), 'torch.utils.data.DataLoader', 'DataLoader', (['testset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(testset, batch_size=batch_size, shuffle=False, num_workers=4)\n', (10017, 10079), False, 'from torch.utils.data import DataLoader\n'), ((10197, 10203), 'time.time', 'time', ([], {}), '()\n', (10201, 10203), False, 'from time import time\n'), ((10257, 10295), 'os.path.join', 'path.join', (['results_path', "(name + '.tsv')"], {}), "(results_path, name + '.tsv')\n", (10266, 10295), False, 'from os import path\n'), ((10312, 10324), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (10322, 10324), True, 'import torch.nn as nn\n'), ((10343, 10422), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['epoch', 'training_time', 'acc_train', 'acc_validation']"}), "(columns=['epoch', 'training_time', 'acc_train', 'acc_validation'])\n", (10355, 10422), True, 'import pandas as pd\n'), ((13244, 13487), 'training_functions.cross_validation', 'cross_validation', (['classifier', 'trainset'], {'batch_size': 'batch_size', 'folds': 'args.cross_validation', 'epochs': 'args.epochs', 'results_path': 'results_path', 'model_name': 'name', 'save_interval': 'args.save_interval', 'gpu': 'args.gpu', 'lr': 'lr_classifier', 'tol': '(1.0)'}), '(classifier, trainset, batch_size=batch_size, folds=args.\n cross_validation, epochs=args.epochs, results_path=results_path,\n model_name=name, save_interval=args.save_interval, gpu=args.gpu, lr=\n lr_classifier, tol=1.0)\n', (13260, 13487), False, 'from training_functions import CrossValidationSplit, cross_validation, test\n'), ((405, 423), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)', '(2)'], {}), '(2, 2)\n', (417, 423), True, 'import torch.nn as nn\n'), ((446, 466), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', '(150)', '(5)'], {}), '(1, 150, 5)\n', (455, 466), True, 'import torch.nn as nn\n'), ((489, 518), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['(150)', '(1)', '(5)'], {}), '(150, 1, 5)\n', (507, 518), True, 'import torch.nn as nn\n'), ((903, 921), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)', '(2)'], {}), '(2, 2)\n', (915, 921), True, 'import torch.nn as nn\n'), ((944, 964), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', '(150)', '(5)'], {}), '(1, 150, 5)\n', (953, 964), True, 'import torch.nn as nn\n'), ((985, 1003), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(5)', '(5)'], {}), '(5, 5)\n', (997, 1003), True, 'import torch.nn as nn\n'), ((1023, 1057), 'torch.nn.Linear', 'nn.Linear', (['(150 * 11 * 13 * 11)', '(800)'], {}), '(150 * 11 * 13 * 11, 800)\n', (1032, 1057), True, 'import torch.nn as nn\n'), ((1077, 1102), 'torch.nn.Linear', 'nn.Linear', (['(800)', 'n_classes'], {}), '(800, n_classes)\n', (1086, 1102), True, 'import torch.nn as nn\n'), ((1560, 1578), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)', '(2)'], {}), '(2, 2)\n', (1572, 1578), True, 'import torch.nn as nn\n'), ((1601, 1627), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', 'n_filters', '(5)'], {}), '(1, n_filters, 5)\n', (1610, 1627), True, 'import torch.nn as nn\n'), ((1650, 1685), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['n_filters', '(1)', '(5)'], {}), '(n_filters, 1, 5)\n', (1668, 1685), True, 'import torch.nn as nn\n'), ((2102, 2120), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(2)', '(2)'], {}), '(2, 2)\n', (2114, 2120), True, 'import torch.nn as nn\n'), ((2143, 2169), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', 'n_filters', '(5)'], {}), '(1, n_filters, 5)\n', (2152, 2169), True, 'import torch.nn as nn\n'), ((2190, 2208), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['(5)', '(5)'], {}), '(5, 5)\n', (2202, 2208), True, 'import torch.nn as nn\n'), ((2228, 2268), 'torch.nn.Linear', 'nn.Linear', (['(n_filters * 11 * 13 * 11)', '(800)'], {}), '(n_filters * 11 * 13 * 11, 800)\n', (2237, 2268), True, 'import torch.nn as nn\n'), ((2288, 2313), 'torch.nn.Linear', 'nn.Linear', (['(800)', 'n_classes'], {}), '(800, n_classes)\n', (2297, 2313), True, 'import torch.nn as nn\n'), ((2372, 2393), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (2382, 2393), True, 'import torch.nn as nn\n'), ((2869, 2884), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2882, 2884), False, 'import torch\n'), ((3825, 3861), 'os.path.join', 'path.join', (['results_path', 'output_name'], {}), '(results_path, output_name)\n', (3834, 3861), False, 'from os import path\n'), ((5200, 5240), 'os.path.join', 'path.join', (['results_path', 'parameters_name'], {}), '(results_path, parameters_name)\n', (5209, 5240), False, 'from os import path\n'), ((8840, 8865), 'os.path.exists', 'path.exists', (['results_path'], {}), '(results_path)\n', (8851, 8865), False, 'from os import path\n'), ((8875, 8900), 'os.makedirs', 'os.makedirs', (['results_path'], {}), '(results_path)\n', (8886, 8900), False, 'import os\n'), ((8922, 8947), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8945, 8947), False, 'import torch\n'), ((8966, 8986), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8978, 8986), False, 'import torch\n'), ((9014, 9033), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9026, 9033), False, 'import torch\n'), ((12424, 12430), 'time.time', 'time', ([], {}), '()\n', (12428, 12430), False, 'from time import time\n'), ((2740, 2754), 'torch.abs', 'torch.abs', (['var'], {}), '(var)\n', (2749, 2754), False, 'import torch\n'), ((9384, 9419), 'data_loader.GaussianSmoothing', 'GaussianSmoothing', ([], {'sigma': 'args.sigma'}), '(sigma=args.sigma)\n', (9401, 9419), False, 'from data_loader import MriBrainDataset, ToTensor, GaussianSmoothing\n'), ((9421, 9443), 'data_loader.ToTensor', 'ToTensor', ([], {'gpu': 'args.gpu'}), '(gpu=args.gpu)\n', (9429, 9443), False, 'from data_loader import MriBrainDataset, ToTensor, GaussianSmoothing\n'), ((11793, 11883), 'pandas.DataFrame', 'pd.DataFrame', (['row'], {'columns': "['epoch', 'training_time', 'loss_train', 'loss_validation']"}), "(row, columns=['epoch', 'training_time', 'loss_train',\n 'loss_validation'])\n", (11805, 11883), True, 'import pandas as pd\n'), ((12473, 12479), 'time.time', 'time', ([], {}), '()\n', (12477, 12479), False, 'from time import time\n'), ((11500, 11506), 'time.time', 'time', ([], {}), '()\n', (11504, 11506), False, 'from time import time\n'), ((12071, 12087), 'copy.copy', 'copy', (['loss_valid'], {}), '(loss_valid)\n', (12075, 12087), False, 'from copy import copy, deepcopy\n'), ((12117, 12128), 'copy.copy', 'copy', (['epoch'], {}), '(epoch)\n', (12121, 12128), False, 'from copy import copy, deepcopy\n'), ((12158, 12179), 'copy.deepcopy', 'deepcopy', (['autoencoder'], {}), '(autoencoder)\n', (12166, 12179), False, 'from copy import copy, deepcopy\n'), ((12249, 12303), 'torch.abs', 'torch.abs', (['((prev_loss_train - loss_train) / loss_train)'], {}), '((prev_loss_train - loss_train) / loss_train)\n', (12258, 12303), False, 'import torch\n'), ((11695, 11755), 'numpy.array', 'np.array', (['[epoch + 1, training_time, loss_train, loss_valid]'], {}), '([epoch + 1, training_time, loss_train, loss_valid])\n', (11703, 11755), True, 'import numpy as np\n'), ((12346, 12385), 'torch.abs', 'torch.abs', (['(prev_loss_train - loss_train)'], {}), '(prev_loss_train - loss_train)\n', (12355, 12385), False, 'import torch\n')] |
# coding: utf-8
# # Leave-One-Patient-Out classification of individual volumes
#
# Here, we train a classifier for each patient, based on the data of all the other patients except the current one (Leave One Out Cross-Validation). To this end, we treat each volume as an independent observation, so we have a very large sample of volumes which are used for training; and later, we do not classify the patient as a whole, but the classifier makes a decision for each of the held-out patient's 200 volumes. Therefore, at this stage, we have not made a decision on the patient level, but only at the volume-as-unit-of-observation level.
# ### import modules
# In[1]:
import os
import pickle
import numpy as np
import pandas as pd
from sklearn import svm, preprocessing, metrics
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('poster')
# In[2]:
sns.set_context('poster')
# In[3]:
# after converstion to .py, we can use __file__ to get the module folder
try:
thisDir = os.path.realpath(__file__)
# in notebook form, we take the current working directory (we need to be in 'notebooks/' for this!)
except:
thisDir = '.'
# convert relative path into absolute path, so this will work with notebooks and py modules
supDir = os.path.abspath(os.path.join(os.path.dirname(thisDir), '..'))
supDir
# ### get meta df
# We need this e.g. to get information about conclusiveness
#
## In[4]:
#
#
#data_df = pd.read_csv(
# '../data/interim/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_df.csv',
# index_col=[0, 1],
# header=0)
#
#
#
## In[5]:
#
#
#data_df.tail()
#
#
# #### conclusiveness filters
#
## In[6]:
#
#
#is_conclusive = data_df.loc[:, 'pred'] != 'inconclusive'
#
#
#
## In[7]:
#
#
#is_conclusive.sum()
#
#
# ### get data
#
## In[8]:
#
#
#def make_group_df(data_df,metric='corr_df'):
# '''load correlation data of all patients'''
#
# group_df = pd.DataFrame()
#
# for p in data_df.index:
# # get data
# filename = data_df.loc[p, metric]
# this_df = pd.read_csv(filename, index_col=[0], header=0)
# # add patient infos to index
# this_df.index = [[p[0]], [p[1]]]
#
# group_df = pd.concat([group_df, this_df])
#
# # reorder the colums and make sure volumes are integer values
# group_df.columns = group_df.columns.astype(int)
#
# # sort across rows, then across columns, to make sure that volumes
# # are in the right order
# group_df = group_df.sort_index(axis=0)
# group_df = group_df.sort_index(axis=1)
#
# assert all(group_df.columns == range(200)), 'wrong order of volumes'
#
# return group_df
#
#
#
## In[9]:
#
#
#group_df = make_group_df(data_df)
#
#
#
## In[10]:
#
#
#group_df.tail()
#
#
# #### filter data
#
## In[11]:
#
#
## only conclusive cases
#conclusive_df = group_df[is_conclusive]
## only inconclusive cases
#inconclusive_df = group_df[is_conclusive == False]
## all cases unfiltered
#withinconclusive_df = group_df.copy()
#
#
#
## In[12]:
#
#
#print(conclusive_df.shape, inconclusive_df.shape, withinconclusive_df.shape)
#
#
# ### get design
# In[13]:
conds_file = os.path.join(supDir,'models','conds.p')
with open(conds_file, 'rb') as f:
conds = pickle.load(f)
#
## In[14]:
#
#
#print(conds)
#
#
# ### get colors
#
## In[15]:
#
#
#with open('../models/colors.p', 'rb') as f:
# color_dict = pickle.load(f)
#
#my_cols = {}
#for i, j in zip(['red', 'blue', 'yellow'], ['left', 'right', 'bilateral']):
# my_cols[j] = color_dict[i]
#
#
# ### invert the resting timepoints
#
## In[16]:
#
#
#inv_df = conclusive_df*conds
#
#
#
## In[17]:
#
#
#inv_df.tail()
#
#
# ### train the classifier
#
## In[18]:
#
#
#stack_df = pd.DataFrame(inv_df.stack())
#stack_df.tail()
#
#
#
## In[19]:
#
#
#stack_df.shape
#
#
#
## In[20]:
#
#
#my_groups = ['left','bilateral','right']
#
#
#
## In[21]:
#
#
#dynamite_df = stack_df.copy()
#dynamite_df.columns = ['correlation']
#dynamite_df['group'] = dynamite_df.index.get_level_values(0)
#sns.catplot(data=dynamite_df,y='group',x='correlation',kind='bar',orient='h',palette=my_cols,order=my_groups,aspect=1)
#plt.axvline(0,color='k',linewidth=3)
#plt.xlim(0.05,-0.05,-0.01)
#sns.despine(left=True,trim=True)
#plt.ylabel('')
#plt.savefig('../reports/figures/10-dynamite-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
#
## In[22]:
#
#
#from scipy import stats
#
#
#
## In[23]:
#
#
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['left','correlation'])
#print('\nt=%.2f,p=%.64f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['bilateral','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.38f'%(t,p))
#t,p = stats.ttest_ind(dynamite_df.loc['left','correlation'],dynamite_df.loc['right','correlation'])
#print('\nt=%.2f,p=%.248f'%(t,p))
#
#
# ### as histogram
#
## In[24]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#plt.legend()
#plt.xlim(0.4,-0.4,-0.2)
#sns.despine()
#plt.show()
#
#
# ### set up the classifier
#
## In[25]:
#
#
#clf = svm.SVC(kernel='linear',C=1.0,probability=False,class_weight='balanced')
#
#
# In[26]:
def scale_features(X):
'''z-transform the features before applying a SVC.
The scaler is also stored so it can later be re-used on test data'''
my_scaler = preprocessing.StandardScaler()
my_scaler.fit(X)
X_scaled = my_scaler.transform(X)
return X_scaled,my_scaler
# In[27]:
def encode_labels(y):
'''get from number labels to strings and back'''
my_labeler = preprocessing.LabelEncoder()
my_labeler.fit(np.unique(y))
y_labels = my_labeler.transform(y)
return y_labels, my_labeler
# In[28]:
def train_classifier(df):
'''get features and labels
* scale the features
* transform the labels
* apply the classifier
'''
X = df.values
y = df.index.get_level_values(0)
X_scaled,my_scaler = scale_features(X)
y_labels, my_labeler = encode_labels(y)
clf.fit(X_scaled,y_labels)
return clf,my_scaler,my_labeler
#
## In[29]:
#
#
#example_clf, example_scaler, example_labeler = train_classifier(stack_df)
#
#
#
## In[30]:
#
#
#example_clf
#
#
#
## In[31]:
#
#
#example_scaler
#
#
#
## In[32]:
#
#
#example_labeler.classes_
#
#
#
## In[33]:
#
#
#def get_boundaries(clf,my_scaler):
# '''find the point where the classifier changes its prediction;
# this is an ugly brute-force approach and probably there is a much
# easier way to do this
# '''
#
# d = {}
# for i in np.linspace(-1,1,10000):
# this_val = my_scaler.transform(np.array([i]).reshape(1,-1))
# this_predict = clf.predict(this_val)
# d[i] = this_predict[-1]
# df = pd.DataFrame(d,index=['pred']).T
# return df[(df-df.shift(1))!=0].dropna().index[1:]
#
#
#
## In[34]:
#
#
#from datetime import datetime
#
#
# ### get class boundaries of all folds
#
## In[35]:
#
#
#import tqdm
#
#
#
## In[36]:
#
#
#def get_all_boundaries(stack_df):
# '''for each fold, get the boundaries, by
# training on everybody but the held-out patient
# and storing the boundaries'''
#
# all_boundaries = {}
#
# conclusive_pats = np.unique(stack_df.index.get_level_values(1))
#
# for p in tqdm.tqdm(conclusive_pats):
#
# # in the current fold, we drop one patient
# df = stack_df.drop(p,level=1)
#
# # train on this fold's data
# clf,my_scaler,my_labeler = train_classifier(df)
#
# # get the classifier boundaries
# boundaries = get_boundaries(clf,my_scaler)
# all_boundaries[p] = boundaries
#
# return all_boundaries
#
#
# Compute the boundaries and store them for later re-use:
#
## In[37]:
#
#
#all_boundaries = get_all_boundaries(stack_df)
#bound_df = pd.DataFrame(all_boundaries).T
#bound_df.tail()
#
#
#
## In[38]:
#
#
#bound_df.to_csv('../data/processed/csv/bound_df.csv')
#
#
# To make things faster, we can re-load the computed boundaries here:
#
## In[39]:
#
#
#bound_df = pd.read_csv('../data/processed/csv/bound_df.csv',index_col=[0],header=0)
#bound_df.tail()
#
#
# rename so boundaries have meaningful descriptions:
#
## In[40]:
#
#
#bound_df = bound_df.rename(columns={'0':'B/R','1':'L/B'})
#bound_df.tail()
#
#
#
## In[41]:
#
#
#bound_df.describe()
#
#
# #### show the class boundaries overlaid on the data distribution
#
## In[42]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(8,5))
#for group in my_groups:
# sns.distplot(stack_df.loc[group,:],color=my_cols[group],label=group,ax=ax)
#
#for b in bound_df.values.flatten():
# plt.axvline(b,alpha=0.1,color=color_dict['black'])
#
#plt.legend()
#plt.xlabel('correlation')
#plt.ylabel('density')
#plt.xlim(0.4,-0.4,-0.2)
#plt.ylim(0,8)
#plt.legend(loc=(0.65,0.65))
#sns.despine(trim=True,offset=5)
#plt.savefig('../reports/figures/10-distribution-plot.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# #### make swarm/factorplot with boundary values
#
## In[43]:
#
#
#sns_df = pd.DataFrame(bound_df.stack())
#sns_df.columns = ['correlation']
#sns_df.loc[:,'boundary'] = sns_df.index.get_level_values(1)
#sns_df.loc[:,'dummy'] = 0
#
#
#
## In[44]:
#
#
#sns_df.tail()
#
#
#
## In[45]:
#
#
#fig,ax = plt.subplots(1,1,figsize=(4,5))
#sns.swarmplot(data=sns_df,
# x='correlation',
# y='dummy',
# hue='boundary',
# orient='h',
# palette={'L/B':my_cols['left'],'B/R':my_cols['right']},
# size=4,
# alpha=0.9,
# ax=ax
# )
#plt.xlim(0.04,-0.02,-0.02)
#ax.set_ylabel('')
#ax.set_yticks([])
#sns.despine(left=True,trim=True)
#plt.savefig('../reports/figures/10-boundary-swarm-plot.png',dpi=300,bbox_inches='tight')
#
#plt.show()
#
#
# ### combine above into one plot
#
## In[46]:
#
#
#sns.set_style('dark')
#
#
#
## In[47]:
#
#
#fig = plt.figure(figsize=(16,6))
#
#ax1 = fig.add_axes([0.36, .999, 1, .7], xticklabels=[], yticklabels=[])
#ax1.imshow(Image.open('../reports/figures/10-dynamite-plot.png'))
#
#ax2 = fig.add_axes([0, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax2.imshow(Image.open('../reports/figures/10-distribution-plot.png'))
#
#ax3 = fig.add_axes([0.65, 1, 1, 0.8], xticklabels=[], yticklabels=[])
#ax3.imshow(Image.open('../reports/figures/10-boundary-swarm-plot.png'))
#
#plt.text(0,1, 'A',transform=ax2.transAxes, fontsize=32)
#plt.text(1.04,1, 'B',transform=ax2.transAxes, fontsize=32)
#plt.text(1.63,1, 'C',transform=ax2.transAxes, fontsize=32)
#
#plt.savefig('../reports/figures/10-training-overview.png',dpi=300,bbox_inches='tight')
#plt.show()
#
#
# ### make predictions for all patients (conc and inconc)
# #### invert
#
## In[48]:
#
#
#all_inv_df = group_df*conds
#
#
#
## In[49]:
#
#
#all_inv_df.tail()
#
#
# In[50]:
def make_preds(this_df,clf,my_scaler,my_labeler):
'''apply fitted classifier to the held-out patient;
based on what has been done during training, we
* scale the features using the stored scaler
* transform the labels using the stored labeler
* apply the classifier using the stored classfier
'''
scaled_features = my_scaler.transform(this_df.T)
predictions = clf.predict(scaled_features)
labeled_predictions = my_labeler.inverse_transform(predictions)
counts = pd.Series(labeled_predictions).value_counts()
counts_df = pd.DataFrame(counts).T
counts_df.index = pd.MultiIndex.from_tuples(this_df.index)
return counts_df
# Example:
#
## In[51]:
#
#
#make_preds(all_inv_df.iloc[[-1]],example_clf, example_scaler, example_labeler)
#
#
#
## In[52]:
#
#
#import warnings
## this is necessary to get rid of https://github.com/scikit-learn/scikit-learn/issues/10449
#with warnings.catch_warnings():
# warnings.filterwarnings("ignore",category=DeprecationWarning)
#
# for p in tqdm.tqdm(all_inv_df.index):
#
# # get data in leave-one-out fashion
# this_df = all_inv_df.loc[[p],:]
# other_df = stack_df.drop(p[-1],level=1)
#
# # train on this fold's data
# clf,my_scaler,my_labeler = train_classifier(other_df)
# # make predictions
# p_df = make_preds(this_df,clf,my_scaler,my_labeler)
#
# out_name = '../data/processed/csv/%s_counts_df.csv' % p[-1]
# p_df.to_csv(out_name)
# data_df.loc[p,'counts_df'] = out_name
#
#data_df.to_csv('../data/processed/csv/info_epi_zscored_zdiff_summarymaps_2dpredclean_corr_counts_df.csv')
#
#
# ### train classifier once on all data and store
#
# We store a classifer trained on all data as a pickle file so we can re-use it in the future on new data
#
## In[53]:
#
#
#clf,my_scaler,my_labeler = train_classifier(stack_df)
#d = {'clf':clf,'scaler':my_scaler,'labeler':my_labeler}
#
#
#
## In[54]:
#
#
#with open('../models/volume_clf.p','wb') as f:
# pickle.dump(d,f)
#
#
# #### toolbox model
#
# The toolbox assumes that a dataset used as input is a new dataset and was not part of this study
clf_file = os.path.join(supDir,'models','volume_clf.p')
with open(clf_file,'rb') as f:
clf_dict = pickle.load(f)
clf = clf_dict['clf']
my_scaler = clf_dict['scaler']
my_labeler = clf_dict['labeler']
def make_p(pFolder,pName,clf=clf,my_scaler=my_scaler,my_labeler=my_labeler):
filename = os.path.join(pFolder, ''.join([ pName, '_corr_df.csv']))
this_df = pd.read_csv(filename, index_col=[0], header=0)
this_df.index = [['correlations'],[pName]]
inv_df = this_df*conds
counts_df = make_preds(inv_df,clf,my_scaler,my_labeler)
out_name = os.path.join(pFolder, ''.join([ pName, '_counts_df.csv']))
counts_df.to_csv(out_name)
return out_name
# ### summary
#
# For each patient, a classfier has been developed based on all the other patient (Leave-One-Out) and applied to the 200 volumes of that patient. There are now 200 decisions for each patient, as many as there are volumes. These data are stored in csv files which we can now access to make a prediction on the level of the patient.
#
#
# **************
#
# < [Previous](09-mw-correlations-with-template.ipynb) | [Contents](00-mw-overview-notebook.ipynb) | [Next >](11-mw-logistic-regression.ipynb)
| [
"pandas.Series",
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"pandas.read_csv",
"seaborn.set_context",
"os.path.join",
"pickle.load",
"seaborn.set_style",
"os.path.realpath",
"sklearn.preprocessing.StandardScaler",
"os.path.dirname",
"pandas.DataFrame",
"pandas.MultiIndex.from_tupl... | [((861, 883), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (874, 883), True, 'import seaborn as sns\n'), ((884, 909), 'seaborn.set_context', 'sns.set_context', (['"""poster"""'], {}), "('poster')\n", (899, 909), True, 'import seaborn as sns\n'), ((923, 948), 'seaborn.set_context', 'sns.set_context', (['"""poster"""'], {}), "('poster')\n", (938, 948), True, 'import seaborn as sns\n'), ((3201, 3242), 'os.path.join', 'os.path.join', (['supDir', '"""models"""', '"""conds.p"""'], {}), "(supDir, 'models', 'conds.p')\n", (3213, 3242), False, 'import os\n'), ((13068, 13114), 'os.path.join', 'os.path.join', (['supDir', '"""models"""', '"""volume_clf.p"""'], {}), "(supDir, 'models', 'volume_clf.p')\n", (13080, 13114), False, 'import os\n'), ((1054, 1080), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1070, 1080), False, 'import os\n'), ((3287, 3301), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3298, 3301), False, 'import pickle\n'), ((5444, 5474), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (5472, 5474), False, 'from sklearn import svm, preprocessing, metrics\n'), ((5670, 5698), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (5696, 5698), False, 'from sklearn import svm, preprocessing, metrics\n'), ((11503, 11543), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['this_df.index'], {}), '(this_df.index)\n', (11528, 11543), True, 'import pandas as pd\n'), ((13159, 13173), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13170, 13173), False, 'import pickle\n'), ((13437, 13483), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '[0]', 'header': '(0)'}), '(filename, index_col=[0], header=0)\n', (13448, 13483), True, 'import pandas as pd\n'), ((1337, 1361), 'os.path.dirname', 'os.path.dirname', (['thisDir'], {}), '(thisDir)\n', (1352, 1361), False, 'import os\n'), ((5718, 5730), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5727, 5730), True, 'import numpy as np\n'), ((11458, 11478), 'pandas.DataFrame', 'pd.DataFrame', (['counts'], {}), '(counts)\n', (11470, 11478), True, 'import pandas as pd\n'), ((11396, 11426), 'pandas.Series', 'pd.Series', (['labeled_predictions'], {}), '(labeled_predictions)\n', (11405, 11426), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import os
import shutil
import unittest
import numpy as np
import tensorflow as tf
from nnef_tools import convert
from tests.activation.file_downloader import download_and_untar_once
if not os.path.exists('nnef_tools') and os.path.exists('../../nnef_tools'):
os.chdir('../..')
# This test only tests the outputs of the networks
def load_graph_from_pb(frozen_graph_filename):
import tensorflow as tf
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def get_placeholders():
return [tensor
for op in tf.get_default_graph().get_operations()
if 'Placeholder' in op.node_def.op
for tensor in op.values()]
def get_tensors_with_no_consumers():
return [tensor
for op in tf.get_default_graph().get_operations()
if not any(tensor.consumers() for tensor in op.values())
for tensor in op.values()]
# From: https://www.tensorflow.org/lite/guide/hosted_models
class TFPbNetworkTestCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
if 'TF_CPP_MIN_LOG_LEVEL' not in os.environ:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(0)
tf.set_random_seed(0)
if os.path.exists('out'):
shutil.rmtree('out')
def _test_network(self, path, size):
network = os.path.basename(path.rsplit('.', 1)[0])
input_shape = "(float32, [2, {size}, {size}, 3])".format(size=size)
command = """
./nnef_tools/convert.py --input-framework=tensorflow-pb \\
--input-model={} \\
--input-shape="{}" \\
--output-framework=nnef \\
--output-directory=out/nnef/{} \\
--compress""".format(path, input_shape, network)
print(command)
convert.convert_using_command(command)
command = """
./nnef_tools/convert.py --input-framework=nnef \\
--input-model=out/nnef/{}/model.nnef.tgz \\
--output-framework=tensorflow-pb \\
--output-directory=out/tensorflow-pb/{}""".format(network, network)
print(command)
convert.convert_using_command(command)
tf.reset_default_graph()
load_graph_from_pb(path)
[input] = get_placeholders()
outputs = get_tensors_with_no_consumers()
feed = np.random.random([2, size, size, 3])
with tf.Session() as sess:
activations = sess.run(outputs, feed_dict={input: feed})
tf.reset_default_graph()
load_graph_from_pb('out/tensorflow-pb/{}/model.pb'.format(network))
[input] = get_placeholders()
outputs = get_tensors_with_no_consumers()
with tf.Session() as sess:
activations2 = sess.run(outputs, feed_dict={input: feed})
for a1, a2 in zip(activations, activations2):
self.assertTrue(np.allclose(a1, a2))
def test_densenet(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/densenet_2018_04_27.pb")
self._test_network(path, 224)
def test_squeezenet(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/squeezenet_2018_04_27.pb")
self._test_network(path, 224)
def test_nasnet_mobile(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/nasnet_mobile_2018_04_27.pb")
self._test_network(path, 224)
def test_nasnet_large(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/nasnet_large_2018_04_27.pb")
self._test_network(path, 331)
def test_inception_v3(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_v3_2018_04_27.pb")
self._test_network(path, 299)
def test_inception_v4(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_v4_2018_04_27.pb")
self._test_network(path, 299)
def test_inception_resnet_v2(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz",
member="*.pb",
path="_models/tensorflow-pb/inception_resnet_v2_2018_04_27.pb")
self._test_network(path, 299)
def test_mobilenet_v1_0_25_128(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v1_0.25_128.pb")
self._test_network(path, 128)
def test_mobilenet_v1_1_0_128(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v1_1.0_128.pb")
self._test_network(path, 128)
def test_mobilenet_v2_1_0_224(self):
path = download_and_untar_once(
url="http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
member="*.pb",
path="_models/tensorflow-pb/mobilenet_v2_1.0_224.pb")
self._test_network(path, 224)
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"numpy.allclose",
"tensorflow.reset_default_graph",
"numpy.random.random",
"tensorflow.Session",
"tests.activation.file_downloader.download_and_untar_once",
"unittest.main",
"tensorflow.GraphDef",
"os.chdir",
"nnef_tools.convert.convert_using_command",
"numpy.random.seed",
"t... | [((906, 940), 'os.path.exists', 'os.path.exists', (['"""../../nnef_tools"""'], {}), "('../../nnef_tools')\n", (920, 940), False, 'import os\n'), ((946, 963), 'os.chdir', 'os.chdir', (['"""../.."""'], {}), "('../..')\n", (954, 963), False, 'import os\n'), ((1234, 1273), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (1253, 1273), True, 'import tensorflow as tf\n'), ((7113, 7128), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7126, 7128), False, 'import unittest\n'), ((873, 901), 'os.path.exists', 'os.path.exists', (['"""nnef_tools"""'], {}), "('nnef_tools')\n", (887, 901), False, 'import os\n'), ((1102, 1145), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['frozen_graph_filename', '"""rb"""'], {}), "(frozen_graph_filename, 'rb')\n", (1116, 1145), True, 'import tensorflow as tf\n'), ((1172, 1185), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1183, 1185), True, 'import tensorflow as tf\n'), ((1961, 1978), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1975, 1978), True, 'import numpy as np\n'), ((1987, 2008), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (2005, 2008), True, 'import tensorflow as tf\n'), ((2021, 2042), 'os.path.exists', 'os.path.exists', (['"""out"""'], {}), "('out')\n", (2035, 2042), False, 'import os\n'), ((2686, 2724), 'nnef_tools.convert.convert_using_command', 'convert.convert_using_command', (['command'], {}), '(command)\n', (2715, 2724), False, 'from nnef_tools import convert\n'), ((3081, 3119), 'nnef_tools.convert.convert_using_command', 'convert.convert_using_command', (['command'], {}), '(command)\n', (3110, 3119), False, 'from nnef_tools import convert\n'), ((3129, 3153), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3151, 3153), True, 'import tensorflow as tf\n'), ((3290, 3326), 'numpy.random.random', 'np.random.random', (['[2, size, size, 3]'], {}), '([2, size, size, 3])\n', (3306, 3326), True, 'import numpy as np\n'), ((3441, 3465), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3463, 3465), True, 'import tensorflow as tf\n'), ((3885, 4088), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/densenet_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/densenet_2018_04_27.pb')\n", (3908, 4088), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((4201, 4408), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/squeezenet_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/squeezenet_2018_04_27.pb')\n", (4224, 4408), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((4524, 4737), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/nasnet_mobile_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/nasnet_mobile_2018_04_27.pb')\n", (4547, 4737), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((4852, 5063), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/nasnet_large_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/nasnet_large_2018_04_27.pb')\n", (4875, 5063), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((5178, 5389), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/inception_v3_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/inception_v3_2018_04_27.pb')\n", (5201, 5389), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((5504, 5715), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/inception_v4_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz'\n , member='*.pb', path='_models/tensorflow-pb/inception_v4_2018_04_27.pb')\n", (5527, 5715), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((5837, 6067), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/inception_resnet_v2_2018_04_27.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz'\n , member='*.pb', path=\n '_models/tensorflow-pb/inception_resnet_v2_2018_04_27.pb')\n", (5860, 6067), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((6186, 6384), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/mobilenet_v1_0.25_128.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz'\n , member='*.pb', path='_models/tensorflow-pb/mobilenet_v1_0.25_128.pb')\n", (6209, 6384), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((6507, 6703), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/mobilenet_v1_1.0_128.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz'\n , member='*.pb', path='_models/tensorflow-pb/mobilenet_v1_1.0_128.pb')\n", (6530, 6703), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((6826, 7014), 'tests.activation.file_downloader.download_and_untar_once', 'download_and_untar_once', ([], {'url': '"""http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz"""', 'member': '"""*.pb"""', 'path': '"""_models/tensorflow-pb/mobilenet_v2_1.0_224.pb"""'}), "(url=\n 'http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz'\n , member='*.pb', path='_models/tensorflow-pb/mobilenet_v2_1.0_224.pb')\n", (6849, 7014), False, 'from tests.activation.file_downloader import download_and_untar_once\n'), ((2056, 2076), 'shutil.rmtree', 'shutil.rmtree', (['"""out"""'], {}), "('out')\n", (2069, 2076), False, 'import shutil\n'), ((3341, 3353), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3351, 3353), True, 'import tensorflow as tf\n'), ((3644, 3656), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3654, 3656), True, 'import tensorflow as tf\n'), ((3819, 3838), 'numpy.allclose', 'np.allclose', (['a1', 'a2'], {}), '(a1, a2)\n', (3830, 3838), True, 'import numpy as np\n'), ((1341, 1363), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1361, 1363), True, 'import tensorflow as tf\n'), ((1547, 1569), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1567, 1569), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Audio Spectrum analyser
v0.7.0
- summed given fft in n bands, but re normalized between 0 - 70?
- Peaks L and R
- amplitude for given target frequency and PEAK frequency
- "music note" to given frequency
- Real FFT, Imaginary FFT, Real + imaginary FFT
- threshold detection
todo :
by <NAME>
from /team/laser
for python 2 & 3
Stereo : CHANNELS = 2
mono : CHANNELS = 1
"""
import numpy as np
import pyaudio
from math import log, pow
#import matplotlib.pyplot as plt
#from scipy.interpolate import Akima1DInterpolator
#import matplotlib.pyplot as plt
DEVICE = 3
CHANNELS = 2
START = 0
RATE = 44100 # time resolution of the recording device (Hz)
CHUNK = 4096 # number of data points to read at a time. Almost 10 update/second
TARGET = 2100 # show only this one frequency
A4 = 440
C0 = A4*pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
data = []
p = pyaudio.PyAudio() # start the PyAudio class
stream = p.open(format = pyaudio.paInt16, channels = CHANNELS, input_device_index = DEVICE, rate=RATE, input=True,
frames_per_buffer=CHUNK) #uses default input device
#
# Audio devices & audiogen functions
#
def list_devices():
# List all audio input devices
p = pyaudio.PyAudio()
i = 0
n = p.get_device_count()
print((n,"devices found"))
while i < n:
dev = p.get_device_info_by_index(i)
if dev['maxInputChannels'] > 0:
print((str(i)+'. '+dev['name']))
i += 1
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics=[]
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics)==0:
print("no microphone devices found!")
else:
print(("found %d microphone devices: %s"%(len(mics),mics)))
return mics
def loop():
try:
#plt.ion()
#plt.axis([x[0], x[-1], -0.1, max_f])
fftbands = [0,1,2,3,4,5,6,7,8,9]
plt.xlabel('frequencies')
plt.ylabel('amplitude')
data = audioinput()
drawfreq, fft = allfft(data)
#lines = plt.plot(drawfreq, fft)
#plt.axis([drawfreq[0], drawfreq[-1], 0, np.max(fft)])
#plt.plot(drawfreq, fft)
#plt.show()
#line, = plt.plot(fftbands, levels(fft,10))
line, = plt.plot(drawfreq, fft)
#while True :
for i in range(50):
data = audioinput()
# smooth the FFT by windowing data
#data = data * np.hanning(len(data))
# conversion to -1 to +1
# normed_samples = (data / float(np.iinfo(np.int16).max))
# Left is channel 0
dataL = data[0::2]
# Right is channel 1
dataR = data[1::2]
# Peaks L and R
peakL = np.abs(np.max(dataL)-np.min(dataL))/CHUNK
peakR = np.abs(np.max(dataR)-np.min(dataR))/CHUNK
# print(peakL, peakR)
drawfreq, fft = allfft(data)
#fft, fftr, ffti, fftb, drawfreq = allfft(data)
#line.set_ydata(levels(fft,10))
line.set_ydata(fft)
plt.pause(0.01)
#print(drawfreq)
#print(fft)
#print (levels(fft,10))
#line.set_ydata(fft)
#plt.pause(0.01) # pause avec duree en secondes
# lines = plt.plot(x, y)
#lines[0].set_ydata(fft)
#plt.legend(['s=%4.2f' % s])
#plt.draw()
#plt.show()
'''
targetpower,freqPeak = basicfft(audioinput(stream))
print("amplitude", targetpower, "@", TARGET, "Hz")
if freqPeak > 0.0:
print("peak frequency: %d Hz"%freqPeak, pitch(freqPeak))
'''
plt.show()
except KeyboardInterrupt:
stream.stop_stream()
stream.close()
p.terminate()
print("End...")
# Close properly
def close():
stream.stop_stream()
stream.close()
p.terminate()
# Return "music note" to given frequency
def pitch(freq):
h = round(12*(log(freq/C0)/log(2)))
octave = h // 12
n = h % 12
return name[n] + str(octave)
# Return summed given fft in n bands, but re normalized 0 - 70
def levels(fourier, bands):
size = int(len(fourier))
levels = [0.0] * bands
# Add up for n bands
# remove normalizer if you want raw added data in all bands
normalizer = size/bands
#print (size,bands,size/bands)
levels = [sum(fourier[I:int(I+size/bands)])/normalizer for I in range(0, size, int(size/bands))][:bands]
for band in range(bands):
if levels[band] == np.NINF:
levels[band] =0
return levels
# read CHUNK size in audio buffer
def audioinput():
# When reading from our 16-bit stereo stream, we receive 4 characters (0-255) per
# sample. To get them in a more convenient form, numpy provides
# fromstring() which will for each 16 bits convert it into a nicer form and
# turn the string into an array.
return np.fromstring(stream.read(CHUNK),dtype=np.int16)
# power for given TARGET frequency and PEAK frequency
# do fft first. No conversion in 'powers'
def basicfft(data):
#data = data * np.hanning(len(data)) # smooth the FFT by windowing data
fft = abs(np.fft.fft(data).real)
#fft = 10*np.log10(fft)
fft = fft[:int(len(fft)/2)] # first half of fft
freq = np.fft.fftfreq(CHUNK,1.0/RATE)
freq = freq[:int(len(freq)/2)] # first half of FFTfreq
assert freq[-1]>TARGET, "ERROR: increase chunk size"
# return power for given TARGET frequency and peak frequency
return fft[np.where(freq > TARGET)[0][0]], freq[np.where(fft == np.max(fft))[0][0]]+1
# todo : Try if data = 1024 ?
# in "power' (0-70?) get Real FFT, Imaginary FFT, Real + imaginary FFT
def allfft(data):
#print ("allfft", len(data))
fft = np.fft.fft(data)
#print("fft",len(fft))
fftr = 10*np.log10(abs(fft.real))[:int(len(data)/2)]
ffti = 10*np.log10(abs(fft.imag))[:int(len(data)/2)]
fftb = 10*np.log10(np.sqrt(fft.imag**2+fft.real**2))[:int(len(data)/2)]
#print("fftb",len(fftb))
drawfreq = np.fft.fftfreq(np.arange(len(data)).shape[-1])[:int(len(data)/2)]
drawfreq = drawfreq*RATE/1000 #make the frequency scale
#return fft, fftr, ffti, fftb, drawfreq
return drawfreq, fftb
# Draw Original datas
# X : np.arange(len(data))/float(rate)*1000
# Y : data
# Draw real FFT
# X : drawfreq
# Y : fftr
# Draw imaginary
# X : drawfreq
# Y : ffti
# Draw Real + imaginary
# X : drawfreq
# Y : fftb
# True if any value in the data is greater than threshold and after a certain delay
def ding(right,threshold):
if max(right) > threshold and time.time() - last_run > min_delay:
return True
else:
return False
last_run = time.time()
if __name__ == "__main__":
loop()
'''
x = np.linspace(0, 3, 100)
k = 2*np.pi
w = 2*np.pi
dt = 0.01
t = 0
for i in range(50):
y = np.cos(k*x - w*t)
if i == 0:
line, = plt.plot(x, y)
else:
line.set_ydata(y)
plt.pause(0.01) # pause avec duree en secondes
t = t + dt
plt.show()
'''
| [
"numpy.sqrt",
"math.pow",
"numpy.where",
"numpy.fft.fftfreq",
"numpy.fft.fft",
"math.log",
"numpy.max",
"numpy.min",
"pyaudio.PyAudio"
] | [((951, 968), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (966, 968), False, 'import pyaudio\n'), ((849, 862), 'math.pow', 'pow', (['(2)', '(-4.75)'], {}), '(2, -4.75)\n', (852, 862), False, 'from math import log, pow\n'), ((1283, 1300), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1298, 1300), False, 'import pyaudio\n'), ((5706, 5739), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['CHUNK', '(1.0 / RATE)'], {}), '(CHUNK, 1.0 / RATE)\n', (5720, 5739), True, 'import numpy as np\n'), ((6190, 6206), 'numpy.fft.fft', 'np.fft.fft', (['data'], {}), '(data)\n', (6200, 6206), True, 'import numpy as np\n'), ((5579, 5595), 'numpy.fft.fft', 'np.fft.fft', (['data'], {}), '(data)\n', (5589, 5595), True, 'import numpy as np\n'), ((4358, 4372), 'math.log', 'log', (['(freq / C0)'], {}), '(freq / C0)\n', (4361, 4372), False, 'from math import log, pow\n'), ((4371, 4377), 'math.log', 'log', (['(2)'], {}), '(2)\n', (4374, 4377), False, 'from math import log, pow\n'), ((6371, 6409), 'numpy.sqrt', 'np.sqrt', (['(fft.imag ** 2 + fft.real ** 2)'], {}), '(fft.imag ** 2 + fft.real ** 2)\n', (6378, 6409), True, 'import numpy as np\n'), ((5945, 5968), 'numpy.where', 'np.where', (['(freq > TARGET)'], {}), '(freq > TARGET)\n', (5953, 5968), True, 'import numpy as np\n'), ((3058, 3071), 'numpy.max', 'np.max', (['dataL'], {}), '(dataL)\n', (3064, 3071), True, 'import numpy as np\n'), ((3072, 3085), 'numpy.min', 'np.min', (['dataL'], {}), '(dataL)\n', (3078, 3085), True, 'import numpy as np\n'), ((3120, 3133), 'numpy.max', 'np.max', (['dataR'], {}), '(dataR)\n', (3126, 3133), True, 'import numpy as np\n'), ((3134, 3147), 'numpy.min', 'np.min', (['dataR'], {}), '(dataR)\n', (3140, 3147), True, 'import numpy as np\n'), ((5999, 6010), 'numpy.max', 'np.max', (['fft'], {}), '(fft)\n', (6005, 6010), True, 'import numpy as np\n')] |
"""
Class for high-level Bayesian optimization.
"""
import time, os
from copy import deepcopy
import pickle as pkl
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import models
import acquisitions
from calculate_distance import DistanceCalculator
import util
class BayesOpt(object):
"""
Runs Bayesian optimization on provided simulator using provided
acquisition functions. Requires an initial GP model, a bandwidth h
for kernel density estimation, a pymatlab session.
==================================================================================
Args:
sim: Simulator.
acqs: Acquisition functions.
Note: if this does multiple runs of BOpt using
these acquisitions,it resets them to this state
after each run.
init_model: Initial GP model (that all acquisitions start with).
h: Bandwidth for KDE.
session: pymatlab Matlab session.
verbose: Whether to print information while running.
==================================================================================
Keeps copies of the final state of each acquisition in dict final_acqs:
bopt.final_acqs: One entry for each acquisition. Each entry has shape (n_runs, ).
==================================================================================
If Hedge algorithm is one of the provided acquisitions, tracks information
about Hedge. The information is stored in lists for convenience, and
if you convert the lists to numpy arrays you get the shapes:
-hedge_gains (n_runs, n_iter, n_acqs): Gains for each base acquisition, at
each iteration, for all runs.
-hedge_rewards (n_runs, n_iter, n_acqs): Rewards for each base acq, each iter,
all runs.
The gains/rewards are ordered the same way you pass 'acqs' to the constructor, i.e.
if you pass acqs=[lcb, ei, hedge] to the constructor, hedge_rewards[0][5][1]
contains EI's reward at run 1, iteration 6.
==================================================================================
Also tracks TV distance between the approximate posterior calculated with
each acquisition's GP model. Stores them in dictionary; one entry for each
acquisition. Each entry is a list with shape (n_runs, n_iter). Dictionary
is called 'dists'.
"""
# TODO: spaghetti code
def __init__(self, sim, acqs, init_model, h, session, verbose=False):
self.sim = sim
self.acq_inits = acqs
self.input_dim = init_model.input_dim
self.bounds = init_model.bounds
self.obs_noise = init_model.obs_noise
self.h = float(h)
self.verbose = verbose
self.session = session # pymatlab session
assert isinstance(init_model, models.GP)
# For tracking information ===============================================
# dict storing list of TV/KL dists for each acquistion
# each list's shape: (n_runs, n_iter)
self.dists = {a.acq_name: [] for a in self.acq_inits}
# track Hedge information if it's being used
for acq in self.acq_inits:
if acq.acq_name == 'Hedge':
self.hedge_gains = []
self.hedge_rewards = []
self.hedge_probs = {a.acq_name: [] for a in acq.acqs}
if acq.acq_name == 'Exp3':
self.exp3_probs = {a.acq_name: [] for a in acq.acqs}
# keep final acquisitions from each run (and their corresponding GP models)
self.final_acqs = {a.acq_name: [] for a in self.acq_inits}
# track approximate PDFs (only for 1D sims; too much memory otherwise)
if self.input_dim == 1:
self.pdfs = {a.acq_name: [] for a in self.acq_inits}
# true pdf of the simulator function (estimated using KDE)
self.true_pdf = None
# =========================================================================
# for plotting
self.clr = {'EI': 'blue', 'Bad': 'orange', 'LCB': 'green', \
'Rand': 'red', 'MPI': 'purple', 'Exp3': 'black', \
'Hedge': 'cyan'}
def run_optimization(self, n_iter=30, n_runs=1, show_discr=False, \
show_acq=False, show_posterior=False, show_hedge_probs=False):
"""
Run Bayesian optimization for 'n_iter' iterations on given simulator
function, using given acquisition functions (and corresponding GP models).
If n_runs > 1, runs BO multiple times so averaged results can be shown.
show_discr: Whether to plot posterior estimate of discrepancy for each
acquisition, at each iteration.
show_acq: Whether to also show acquisition on discrepancy plot.
show_posterior: whether to plot approximate posterior distribution for
each model (for each acquisition), at each iteration.
"""
for j in range(n_runs):
if self.verbose:
print('Run #{} ============='.format(j))
begin_time = time.time()
# create copies of acquisitions (so that they're not reusing
# GP models from previous runs)
acqs = [deepcopy(acq) for acq in self.acq_inits]
# For tracking Hedge information
if 'Hedge' in [a.acq_name for a in acqs]:
self.hedge_gains.append([])
self.hedge_rewards.append([])
# create fresh distance calculator
d_calc = DistanceCalculator(self.input_dim, self.bounds, self.obs_noise, \
self.sim, self.h, self.session, acqs)
self.true_pdf = d_calc.true_pdf
for i in range(n_iter):
if self.verbose:
iter_time = time.time()
# Use 4 starting points to avoid local minima
x0s = []
for _ in range(4):
init_pt = np.array(
[np.random.uniform(self.bounds[d][0], self.bounds[d][1])
for d in range(self.input_dim)])
x0s.append(init_pt)
# Each acquisition now picks a point and updates accordingly.
for acq in acqs:
# Hedge/Exp3 self-update (so they can calculate rewards),
# so we only need to manually update other acquisitions
if acq.acq_name == 'Hedge':
next_theta, rewards = acq.select_next_theta(x0s=x0s)
next_discr = self.sim.f(next_theta)
# track info on base acquisitions
self.hedge_gains[j].append(acq.gains)
self.hedge_rewards[j].append(rewards)
elif acq.acq_name == 'Exp3':
next_theta = acq.select_next_theta(x0s=x0s)
next_discr = self.sim.f(next_theta)
else:
next_theta = acq.select_next_theta(x0s=x0s)
next_discr = self.sim.f(next_theta)
acq.model.update(next_theta, next_discr)
# Calculate and store distance
d_calc.tv_distance(acq.model, acq.acq_name)
# PLOT IF REQUESTED =========================================
if show_discr:
if acq.acq_name in ['Hedge', 'Exp3']:
print('acq chose {}'.format(acq.acq_name, acq.choices[-1]))
if self.input_dim == 1:
f, ax = plt.subplots(figsize=(10, 3))
ax = acq.model.plot(
ax, point=(next_theta, next_discr), sim=self.sim, \
acq=acq.acq_name
)
if show_acq:
acq.plot(ax)
plt.show(); raw_input()
elif self.input_dim == 2:
f, axarr = plt.subplots(1, 2, sharex=True, sharey=True, \
figsize=(10, 10))
acq.model.plot(axarr, point=(next_theta, next_discr), \
sim=self.sim, acq=acq.acq_name)
plt.legend(); plt.show(); raw_input()
else:
# no visualization for 3D+
pass
if show_posterior:
if acq.acq_name in ['Hedge', 'Exp3']:
print('{} chose {}'.format(acq.acq_name, acq.choices[-1]))
d_calc.show_last_posteriors(acq.acq_name, theta=next_theta)
# ==============================================================
# this acquisition has finished, on to next one
if self.verbose:
print('iter %d took %.3f sec' % (i, time.time() - iter_time))
# finished the run; update results dict; update Hedge choices, probs
for acq in acqs:
self.dists[acq.acq_name].append(d_calc.dists[acq.acq_name])
self.final_acqs[acq.acq_name].append(acq)
if acq.acq_name == 'Hedge':
for name, probs in acq.probs.items():
self.hedge_probs[name].append(probs)
elif acq.acq_name == 'Exp3':
for name, probs in acq.probs.items():
self.exp3_probs[name].append(probs)
# retrieve approximate pdfs
if self.input_dim == 1:
for acq_name in self.pdfs.keys():
self.pdfs[acq_name].append(d_calc.m_pdfs[acq_name])
if self.verbose:
iter_time = time.time() - begin_time
print('Run %d took %.3f seconds' % (j, iter_time))
return self.dists
def plot_distances(self, ax):
"""
Plots TV distance versus iteration for all acquisition functions.
If multiple runs were made, plots averaged results from those.
"""
n_runs = len(self.dists.values()[0])
for name, dists in self.dists.items():
to_plt = np.sum(np.array(dists), axis=0) / len(dists)
ax.plot(range(len(to_plt)), to_plt, label=name, color=self.clr[name])
if n_runs == 1:
ax.set_title('TV distance vs. iteration')
else:
ax.set_title(
'TV distance vs. iteration; averaged over {} runs'.format(n_runs)
)
ax.set_xlabel('Iteration')
ax.set_ylabel('TV distance')
return ax
def hedge_choices(self):
"""
Return number of times Hedge chose each acquisition
over all runs.
"""
counts = {acq.acq_name: 0 for acq in self.final_acqs['Hedge'][0].acqs}
for hedge in self.final_acqs['Hedge']:
for acq_name in counts.keys():
counts[acq_name] += len([c for c in hedge.choices if c == acq_name])
return counts
def show_hedge_probs(self, ax):
"""
Does same as Hedge.show_base_probs(), but for averaged probabilities over all
runs.
"""
# average results
avgs = {k: None for k in self.hedge_probs.keys()}
for acq in self.hedge_probs.keys():
avgs[acq] = np.sum(np.array(self.hedge_probs[acq]), axis=0)
avgs[acq] /= len(self.hedge_probs[acq])
# plot probabilities on area chart
n_iter = len(self.hedge_probs.values()[0])
x = range(n_iter)
last_plt = np.zeros(n_iter)
for acq, probs in avgs.items():
a_plt = np.array(probs) + last_plt
ax.plot(x, a_plt, label='P({})'.format(acq), color=self.clr[acq])
ax.fill_between(x, a_plt, last_plt, color=self.clr[acq])
last_plt = a_plt
ax.set_xlabel('Iteration')
ax.set_ylabel('Probability of choosing acquisition')
return ax
def show_hedge_rewards(self):
"""
Show distribution of rewards (negative posterior estimates of
the discrepancy at chosen point) for ALL runs.
"""
rewards = np.array(self.hedge_rewards)
rewards = rewards.reshape(rewards.shape[0]*rewards.shape[1], rewards.shape[2])
f, axarr = plt.subplots(rewards.shape[1], 1, sharex=True, \
figsize=(10, rewards.shape[1]*3))
to_plot = []
mn, mx = np.inf, -np.inf
for i, acq in enumerate(self.final_acqs['Hedge'][-1].acqs):
rwd = rewards[:, i]
if rwd.min() < mn:
mn = rwd.min()
if rwd.max() > mx:
mx = rwd.max()
to_plot.append((rwd, acq.acq_name))
f.suptitle('Distributions of rewards for each base acquisition)')
bins = np.arange(mn, mx, (mx-mn)/100.0)
for i, (rwd, name) in enumerate(to_plot):
axarr[i].hist(rwd, bins=bins)
axarr[i].set_title(name)
plt.tight_layout()
plt.show()
def show_hedge_gains(self, ax):
"""
Show each base acquisition's gain at each iteration. Normalized each
iteration's gains so area chart can be used.
If multiple runs were made, this averages the gains from each run
and shows the averaged results.
"""
raise NotImplementedError
def show_final_models_discrepancies(self):
if input_dim == 1:
b1, b2 = self.bounds[0]
x = np.arange(b1, b2, (b2-b1)/100.)
true_pdf = self.true_pdf
true_discr = self.sim.noiseless_f(x).reshape(100, )
for acq in self.pdfs.keys():
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10, 6))
pdfs = np.array(self.pdfs[acq])[:, -1, :]
pdfs = np.sum(pdfs, axis=0) / len(pdfs)
ax1.plot(x, true_pdf, label='True')
ax1.plot(x, pdfs, label='Approximate')
ax1.set_title('{}-approx vs true posterior pdf'.format(acq))
final_acqs = self.final_acqs[acq]
discrs = np.zeros((len(final_acqs), 100))
varis = np.zeros_like(discrs)
ev_theta = []
ev_discr = []
for i, model in enumerate([a.model for a in final_acqs]):
discrs[i] = model.mu(x).reshape(100, )
varis[i] = np.array([model.v(t) for t in x]).reshape(100, )
ev_theta.append(model.thetas)
ev_discr.append(model.discrs)
discrs = np.sum(discrs, axis=0) / len(final_acqs)
varis = np.sum(varis, axis=0) / len(final_acqs)
n_runs, n_ev = len(ev_theta), len(ev_theta[0])
ev_theta = np.array(ev_theta).reshape(n_runs*n_ev, )
ev_discr = np.array(ev_discr).reshape(n_runs*n_ev, )
ax2.plot(x, true_discr, label='True')
ax2.errorbar(x, discrs, yerr=varis, label='Approximate')
ax2.scatter(ev_theta, ev_discr, label='Evidence')
ax2.set_title('{}-approx vs true discrepancy'.format(acq))
plt.legend()
plt.show()
else:
(b11, b12), (b21, b22) = self.bounds
| [
"calculate_distance.DistanceCalculator",
"matplotlib.pyplot.legend",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.uniform",
"matplotlib.pyplot.tight_layout",
"copy.deepcopy",
"time.time",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((12681, 12697), 'numpy.zeros', 'np.zeros', (['n_iter'], {}), '(n_iter)\n', (12689, 12697), True, 'import numpy as np\n'), ((13330, 13358), 'numpy.array', 'np.array', (['self.hedge_rewards'], {}), '(self.hedge_rewards)\n', (13338, 13358), True, 'import numpy as np\n'), ((13474, 13561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rewards.shape[1]', '(1)'], {'sharex': '(True)', 'figsize': '(10, rewards.shape[1] * 3)'}), '(rewards.shape[1], 1, sharex=True, figsize=(10, rewards.shape[1\n ] * 3))\n', (13486, 13561), True, 'import matplotlib.pyplot as plt\n'), ((14043, 14079), 'numpy.arange', 'np.arange', (['mn', 'mx', '((mx - mn) / 100.0)'], {}), '(mn, mx, (mx - mn) / 100.0)\n', (14052, 14079), True, 'import numpy as np\n'), ((14226, 14244), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14242, 14244), True, 'import matplotlib.pyplot as plt\n'), ((14253, 14263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14261, 14263), True, 'import matplotlib.pyplot as plt\n'), ((5960, 6065), 'calculate_distance.DistanceCalculator', 'DistanceCalculator', (['self.input_dim', 'self.bounds', 'self.obs_noise', 'self.sim', 'self.h', 'self.session', 'acqs'], {}), '(self.input_dim, self.bounds, self.obs_noise, self.sim,\n self.h, self.session, acqs)\n', (5978, 6065), False, 'from calculate_distance import DistanceCalculator\n'), ((14768, 14804), 'numpy.arange', 'np.arange', (['b1', 'b2', '((b2 - b1) / 100.0)'], {}), '(b1, b2, (b2 - b1) / 100.0)\n', (14777, 14804), True, 'import numpy as np\n'), ((16496, 16506), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16504, 16506), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5485), 'time.time', 'time.time', ([], {}), '()\n', (5483, 5485), False, 'import time, os\n'), ((5636, 5649), 'copy.deepcopy', 'deepcopy', (['acq'], {}), '(acq)\n', (5644, 5649), False, 'from copy import deepcopy\n'), ((12440, 12471), 'numpy.array', 'np.array', (['self.hedge_probs[acq]'], {}), '(self.hedge_probs[acq])\n', (12448, 12471), True, 'import numpy as np\n'), ((12758, 12773), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (12766, 12773), True, 'import numpy as np\n'), ((14976, 15024), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 6)'}), '(2, 1, sharex=True, figsize=(10, 6))\n', (14988, 15024), True, 'import matplotlib.pyplot as plt\n'), ((15457, 15478), 'numpy.zeros_like', 'np.zeros_like', (['discrs'], {}), '(discrs)\n', (15470, 15478), True, 'import numpy as np\n'), ((16471, 16483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16481, 16483), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6273), 'time.time', 'time.time', ([], {}), '()\n', (6271, 6273), False, 'import time, os\n'), ((10713, 10724), 'time.time', 'time.time', ([], {}), '()\n', (10722, 10724), False, 'import time, os\n'), ((11196, 11211), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (11204, 11211), True, 'import numpy as np\n'), ((15049, 15073), 'numpy.array', 'np.array', (['self.pdfs[acq]'], {}), '(self.pdfs[acq])\n', (15057, 15073), True, 'import numpy as np\n'), ((15107, 15127), 'numpy.sum', 'np.sum', (['pdfs'], {'axis': '(0)'}), '(pdfs, axis=0)\n', (15113, 15127), True, 'import numpy as np\n'), ((15878, 15900), 'numpy.sum', 'np.sum', (['discrs'], {'axis': '(0)'}), '(discrs, axis=0)\n', (15884, 15900), True, 'import numpy as np\n'), ((15943, 15964), 'numpy.sum', 'np.sum', (['varis'], {'axis': '(0)'}), '(varis, axis=0)\n', (15949, 15964), True, 'import numpy as np\n'), ((16074, 16092), 'numpy.array', 'np.array', (['ev_theta'], {}), '(ev_theta)\n', (16082, 16092), True, 'import numpy as np\n'), ((16143, 16161), 'numpy.array', 'np.array', (['ev_discr'], {}), '(ev_discr)\n', (16151, 16161), True, 'import numpy as np\n'), ((6478, 6533), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[d][0]', 'self.bounds[d][1]'], {}), '(self.bounds[d][0], self.bounds[d][1])\n', (6495, 6533), True, 'import numpy as np\n'), ((8256, 8285), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (8268, 8285), True, 'import matplotlib.pyplot as plt\n'), ((8612, 8622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8620, 8622), True, 'import matplotlib.pyplot as plt\n'), ((8725, 8787), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(10, 10)'}), '(1, 2, sharex=True, sharey=True, figsize=(10, 10))\n', (8737, 8787), True, 'import matplotlib.pyplot as plt\n'), ((9087, 9099), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9097, 9099), True, 'import matplotlib.pyplot as plt\n'), ((9101, 9111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9109, 9111), True, 'import matplotlib.pyplot as plt\n'), ((9803, 9814), 'time.time', 'time.time', ([], {}), '()\n', (9812, 9814), False, 'import time, os\n')] |
import math
from collections import deque
import numpy as np
import pytest
from diff_evolution.algo import (
ConstantDE,
ConstantSuccessRuleDE,
RandomSuccessRuleDE,
init_population_uniform,
)
from diff_evolution.algo_control import AlgorithmControl
from diff_evolution.cec17_functions import cec17_test_func
EPSILON = 1e-01
SEED = 44
tested_algos = [
ConstantDE,
ConstantSuccessRuleDE,
RandomSuccessRuleDE,
]
@pytest.fixture(params=tested_algos)
def algo_version(request):
return request.param
@pytest.mark.parametrize(
"func, bounds, expected",
[(lambda x: x ** 2, [(-5, 5)], 0), (lambda x: (x - 1) ** 2 + 3, [(-5, 5)], 1.0)],
)
def test_basic_functions(algo_version, func, bounds, expected):
algo = algo_version(seed=SEED)
algo_control = AlgorithmControl(func, 1000, 1, expected)
# https://stackoverflow.com/questions/2138873/cleanest-way-to-get-last-item-from-python-iterator
result = algo.run(algo_control, bounds, init_population_uniform)
assert np.allclose(result, np.array(expected), atol=EPSILON)
def test_alpine(algo_version):
algo = algo_version(seed=SEED)
bounds = [(-5, 5)] * 2
def alpine_one(X):
return sum([abs(x * np.sin(x) + 0.1 * x) for x in X])
algo_control = AlgorithmControl(alpine_one, 1000, 1, 0)
result = algo.run(algo_control, bounds, init_population_uniform, population_size=20)
assert np.allclose(result, np.array([0, 0]), atol=0.2)
def test_cec(algo_version):
dims = 2
bounds = [(-100, 100)] * dims
def call_cec(x):
fitness = cec17_test_func(x, dims=len(bounds), func_num=1)
return fitness[0]
algo = algo_version(seed=SEED)
algo_control = AlgorithmControl(call_cec, 100000, 1, 100)
result = algo.run(algo_control, bounds, init_population_uniform, population_size=20)
# values taken from shift_data_1.txt
assert np.allclose(
result,
np.array([-5.5276398498228005e01, -7.0429559718086182e01]),
atol=EPSILON,
)
| [
"numpy.sin",
"pytest.mark.parametrize",
"diff_evolution.algo_control.AlgorithmControl",
"numpy.array",
"pytest.fixture"
] | [((444, 479), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'tested_algos'}), '(params=tested_algos)\n', (458, 479), False, 'import pytest\n'), ((535, 670), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func, bounds, expected"""', '[(lambda x: x ** 2, [(-5, 5)], 0), (lambda x: (x - 1) ** 2 + 3, [(-5, 5)], 1.0)\n ]'], {}), "('func, bounds, expected', [(lambda x: x ** 2, [(-5,\n 5)], 0), (lambda x: (x - 1) ** 2 + 3, [(-5, 5)], 1.0)])\n", (558, 670), False, 'import pytest\n'), ((797, 838), 'diff_evolution.algo_control.AlgorithmControl', 'AlgorithmControl', (['func', '(1000)', '(1)', 'expected'], {}), '(func, 1000, 1, expected)\n', (813, 838), False, 'from diff_evolution.algo_control import AlgorithmControl\n'), ((1277, 1317), 'diff_evolution.algo_control.AlgorithmControl', 'AlgorithmControl', (['alpine_one', '(1000)', '(1)', '(0)'], {}), '(alpine_one, 1000, 1, 0)\n', (1293, 1317), False, 'from diff_evolution.algo_control import AlgorithmControl\n'), ((1715, 1757), 'diff_evolution.algo_control.AlgorithmControl', 'AlgorithmControl', (['call_cec', '(100000)', '(1)', '(100)'], {}), '(call_cec, 100000, 1, 100)\n', (1731, 1757), False, 'from diff_evolution.algo_control import AlgorithmControl\n'), ((1042, 1060), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (1050, 1060), True, 'import numpy as np\n'), ((1440, 1456), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1448, 1456), True, 'import numpy as np\n'), ((1938, 1989), 'numpy.array', 'np.array', (['[-55.276398498228005, -70.42955971808618]'], {}), '([-55.276398498228005, -70.42955971808618])\n', (1946, 1989), True, 'import numpy as np\n'), ((1223, 1232), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1229, 1232), True, 'import numpy as np\n')] |
import math
import numpy as np
from random import randrange
from union_find import union, find, connected
from utils import get_possible_next_steps, get_non_connected_next_steps
from mock import get_maze
"""
Task: Try to find the route in the provided maze from origin (0,0) to destination (N-1,M-1).
N-number of rows, M-number of columns.
The maze is represented as a matrix of bits, where 0 represents an empty slot and 1 represents a wall.
# 0 -> empty
# 1 -> wall
Find the connected coordinates with value of 0 that connect from start to destination.
To solve the problem we will use the Disjoint Set (Union Find) algorithm.
"""
maze = get_maze()
rows = np.shape(maze)[0]
columns = np.shape(maze)[1]
# start = maze[0][0]
# end = maze[rows-1][columns-1]
# The number of elements in this union find
size = rows * columns
if size <= 0:
raise Exception("Size <= 0 is not allowed")
# Step 1
# construct a bijection (a mapping) between the coordinates of the matrix and integers in range [0, n).
# this will allow an array based union find, easy to work with.
hashTable = []
# data[i] points to the parent of i, if data[i] = i then i is a root node
data = []
hashIndex = 0
for row in range(0, rows):
for column in range(0, columns):
hashTable.append((row, column))
data.append(hashIndex)
hashIndex += 1
# ------------------------------------------------------------------------
def find_next_steps(currect_index):
"""
Helper function used to find only the acceptable next steps
"""
matrixCoord = hashTable[currect_index]
possible_next_steps = get_possible_next_steps(maze, hashTable, matrixCoord)
next_steps = get_non_connected_next_steps(
data, currect_index, possible_next_steps
)
return next_steps
# ------------------------------------------------------------------------
def run_union_find(onStepUpdate=None):
# start from the start of the maze and look for the next connection
currect_index = 0 # index in the data array
# while the start and end of the maze are not connected
# try to find the next connected item of the path
steps = []
while not connected(data, 0, size - 1):
# for currect cell get all surrounding coordinates
# from these coordinates randomly select one as the next step,
# but with the condition that this coordinate is not connected to the currect cell and is not a "WALL"
# for every loop save the steps
steps.append(currect_index)
next_steps = find_next_steps(currect_index)
if len(next_steps) == 0:
"""
Dead end reached. Need to get back and look at previous connections next steps.
"""
print(
"Dead end at index:",
currect_index,
"and coordinate:",
hashTable[currect_index],
)
if onStepUpdate:
onStepUpdate(
{"status": "DEAD_END", "value": hashTable[currect_index]}
)
prev_step = steps.index(currect_index) - 1
while (
prev_step >= 0 and len(find_next_steps(steps[prev_step])) == 0
):
# go check for a new route starting from one step before the current one
# loop until a node with possible next steps to be folowed
prev_step -= 1
if prev_step >= 0:
print("Loogin for new route at index", steps[prev_step])
currect_index = steps[prev_step]
continue
else:
print("Could not find a route from start to end... :(")
break
# get a random item from the array
next_index = next_steps[randrange(len(next_steps))]
union(data, currect_index, next_index)
print("Iteration at index", currect_index)
if onStepUpdate:
onStepUpdate(
{"status": "NEXT_STEP", "value": hashTable[currect_index]}
)
# prepare for next loop
currect_index = next_index
print("Iteration at last index", size - 1)
print("--------------------------------------------------------")
# append last index of the array
steps.append(size - 1)
step_coordinates = list(map(lambda item: hashTable[item], steps))
print("Iteration traversed the following coordinates:")
print(step_coordinates)
| [
"utils.get_non_connected_next_steps",
"union_find.union",
"utils.get_possible_next_steps",
"union_find.connected",
"mock.get_maze",
"numpy.shape"
] | [((644, 654), 'mock.get_maze', 'get_maze', ([], {}), '()\n', (652, 654), False, 'from mock import get_maze\n'), ((662, 676), 'numpy.shape', 'np.shape', (['maze'], {}), '(maze)\n', (670, 676), True, 'import numpy as np\n'), ((690, 704), 'numpy.shape', 'np.shape', (['maze'], {}), '(maze)\n', (698, 704), True, 'import numpy as np\n'), ((1603, 1656), 'utils.get_possible_next_steps', 'get_possible_next_steps', (['maze', 'hashTable', 'matrixCoord'], {}), '(maze, hashTable, matrixCoord)\n', (1626, 1656), False, 'from utils import get_possible_next_steps, get_non_connected_next_steps\n'), ((1674, 1744), 'utils.get_non_connected_next_steps', 'get_non_connected_next_steps', (['data', 'currect_index', 'possible_next_steps'], {}), '(data, currect_index, possible_next_steps)\n', (1702, 1744), False, 'from utils import get_possible_next_steps, get_non_connected_next_steps\n'), ((2164, 2192), 'union_find.connected', 'connected', (['data', '(0)', '(size - 1)'], {}), '(data, 0, size - 1)\n', (2173, 2192), False, 'from union_find import union, find, connected\n'), ((3819, 3857), 'union_find.union', 'union', (['data', 'currect_index', 'next_index'], {}), '(data, currect_index, next_index)\n', (3824, 3857), False, 'from union_find import union, find, connected\n')] |
from sklearn.neighbors import KDTree
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
import cv2
from obj_utils import save_obj_v
from obj_utils import OBJLoader
class kdtree(object):
def __init__(self):
self.leaf_size = 2
self.k = 1
def build(self, X):
self.tree = KDTree(X, leaf_size=self.leaf_size)
def find(self, Y):
dist, ind = self.tree.query(Y, k=self.k)
ind=np.squeeze(np.array(ind))
dist=np.squeeze(np.array(dist))
return dist,ind
def test(self):
np.random.seed(0)
X = np.random.random((1000, 3))
self.build(X)
dist,ind=self.find(X[1:20])
print(dist)
print(ind)
def sym_problem(vnum,lr):
param=tf.Variable(initial_value=np.array([1,0,0,1]),trainable=True,dtype=tf.float32,name='param')
A,B,C,D=tf.unstack(param,axis=-1)
input=tf.placeholder(dtype=tf.float32,shape=[vnum,3],name='input')
x,y,z=tf.unstack(input,axis=-1)
norm=tf.maximum(tf.reduce_sum(A*A+B*B+C*C),1e-8)
s=(A*x+B*y+C*z+D)/norm
X = x - 2 * s * A
Y = y - 2 * s * B
Z = z - 2 * s * C
output=tf.stack([X,Y,Z],axis=-1)
corres = tf.placeholder(dtype=tf.float32, shape=[vnum, 3], name='corres')
weight = tf.placeholder(dtype=tf.float32, shape=[vnum], name='input')
loss_all = tf.reduce_sum(tf.abs(output-corres),axis=-1)*weight
loss = tf.reduce_sum(loss_all)/tf.maximum(tf.reduce_sum(weight),1.0)
tvar=tf.trainable_variables()
optimizer = tf.train.AdamOptimizer(lr)
opt = optimizer.minimize(loss, var_list=tvar)
print('===='*5)
for v in tvar:
print(v.op.name,v.shape)
print('===='*5)
print(input)
print(output)
print(corres)
print(weight)
nodes={}
nodes['opt']=opt
nodes['loss'] = loss
nodes['output'] = output
nodes['input'] = input
nodes['corres'] = corres
nodes['weight'] = weight
nodes['param'] = param
return nodes
def get_context():
tf.set_random_seed(0)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])
return sess
def save_sym_pair():
mobj=OBJLoader()
mobj.load_obj("testdata/ff.obj")
v=mobj.v
mobj2=OBJLoader()
mobj2.load_obj("testdata/99_out.obj")
v2=mobj2.v
mkd=kdtree()
mkd.build(v)
dist, ind = mkd.find(v2)
f=open('testdata/sym.txt','w')
vnum=ind.shape[0]
f.write('%d\n' % (vnum))
for i in range(vnum):
f.write('%d %d\n'%(i,ind[i]))
f.close()
def save_neis():
mobj=OBJLoader()
mobj.load_obj("testdata/ff.obj")
v=mobj.v
mkd=kdtree()
mkd.k=5
mkd.build(v)
dist, ind = mkd.find(v)
f=open('testdata/nei.txt','w')
vnum=ind.shape[0]
f.write('%d\n' % (vnum))
for i in range(vnum):
f.write('%d %d %d %d\n'%(ind[i,1],ind[i,2],ind[i,3],ind[i,4]))
f.close()
if __name__=='__main__':
# save_sym_pair()
save_neis()
assert (0)
np.random.seed(0)
mobj=OBJLoader()
mobj.load_obj("testdata/ff.obj")
v=mobj.v
vnum=v.shape[0]
mkd=kdtree()
mkd.build(v)
nodes=sym_problem(vnum=vnum,lr=0.1)
opt=nodes['opt']
loss=nodes['loss']
output=nodes['output']
inputs=nodes['input']
corres=nodes['corres']
weight=nodes['weight']
param=nodes['param']
sess=get_context()
savedir='testdata/save/'
if not os.path.exists(savedir):
os.mkdir(savedir)
Nt=100
for i in range(Nt):
[out]=sess.run([output],feed_dict={inputs:v})
dist,ind=mkd.find(out)
meandist=np.mean(dist)*2.0
w=(dist<meandist)*1.0
nearv=v[ind,:]
[_,ls,p] = sess.run([opt,loss,param], feed_dict={inputs: v,corres:nearv, weight:w } )
#print('meandist:%.1f'%(meandist))
print(i,'validNum:%d, ls:%.2f ,p:[%.3f,%.3f,%.3f,%.3f]'%(np.sum(w),ls,p[0],p[1],p[2],p[3]))
if i==0 or i==Nt-1:
save_obj_v(out,savedir+str(i)+"_out.obj")
save_obj_v(nearv,savedir+str(i)+"_near.obj")
| [
"tensorflow.unstack",
"tensorflow.local_variables_initializer",
"tensorflow.reduce_sum",
"numpy.array",
"tensorflow.set_random_seed",
"os.path.exists",
"numpy.mean",
"numpy.random.random",
"tensorflow.placeholder",
"tensorflow.Session",
"sklearn.neighbors.KDTree",
"numpy.random.seed",
"os.mk... | [((881, 907), 'tensorflow.unstack', 'tf.unstack', (['param'], {'axis': '(-1)'}), '(param, axis=-1)\n', (891, 907), True, 'import tensorflow as tf\n'), ((918, 981), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[vnum, 3]', 'name': '"""input"""'}), "(dtype=tf.float32, shape=[vnum, 3], name='input')\n", (932, 981), True, 'import tensorflow as tf\n'), ((989, 1015), 'tensorflow.unstack', 'tf.unstack', (['input'], {'axis': '(-1)'}), '(input, axis=-1)\n', (999, 1015), True, 'import tensorflow as tf\n'), ((1173, 1201), 'tensorflow.stack', 'tf.stack', (['[X, Y, Z]'], {'axis': '(-1)'}), '([X, Y, Z], axis=-1)\n', (1181, 1201), True, 'import tensorflow as tf\n'), ((1213, 1277), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[vnum, 3]', 'name': '"""corres"""'}), "(dtype=tf.float32, shape=[vnum, 3], name='corres')\n", (1227, 1277), True, 'import tensorflow as tf\n'), ((1291, 1351), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[vnum]', 'name': '"""input"""'}), "(dtype=tf.float32, shape=[vnum], name='input')\n", (1305, 1351), True, 'import tensorflow as tf\n'), ((1503, 1527), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1525, 1527), True, 'import tensorflow as tf\n'), ((1544, 1570), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (1566, 1570), True, 'import tensorflow as tf\n'), ((2028, 2049), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (2046, 2049), True, 'import tensorflow as tf\n'), ((2063, 2079), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2077, 2079), True, 'import tensorflow as tf\n'), ((2134, 2159), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2144, 2159), True, 'import tensorflow as tf\n'), ((2290, 2301), 'obj_utils.OBJLoader', 'OBJLoader', ([], {}), '()\n', (2299, 2301), False, 'from obj_utils import OBJLoader\n'), ((2363, 2374), 'obj_utils.OBJLoader', 'OBJLoader', ([], {}), '()\n', (2372, 2374), False, 'from obj_utils import OBJLoader\n'), ((2687, 2698), 'obj_utils.OBJLoader', 'OBJLoader', ([], {}), '()\n', (2696, 2698), False, 'from obj_utils import OBJLoader\n'), ((3109, 3126), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3123, 3126), True, 'import numpy as np\n'), ((3136, 3147), 'obj_utils.OBJLoader', 'OBJLoader', ([], {}), '()\n', (3145, 3147), False, 'from obj_utils import OBJLoader\n'), ((345, 380), 'sklearn.neighbors.KDTree', 'KDTree', (['X'], {'leaf_size': 'self.leaf_size'}), '(X, leaf_size=self.leaf_size)\n', (351, 380), False, 'from sklearn.neighbors import KDTree\n'), ((585, 602), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (599, 602), True, 'import numpy as np\n'), ((615, 642), 'numpy.random.random', 'np.random.random', (['(1000, 3)'], {}), '((1000, 3))\n', (631, 642), True, 'import numpy as np\n'), ((1036, 1072), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(A * A + B * B + C * C)'], {}), '(A * A + B * B + C * C)\n', (1049, 1072), True, 'import tensorflow as tf\n'), ((1431, 1454), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss_all'], {}), '(loss_all)\n', (1444, 1454), True, 'import tensorflow as tf\n'), ((3534, 3557), 'os.path.exists', 'os.path.exists', (['savedir'], {}), '(savedir)\n', (3548, 3557), False, 'import os\n'), ((3567, 3584), 'os.mkdir', 'os.mkdir', (['savedir'], {}), '(savedir)\n', (3575, 3584), False, 'import os\n'), ((477, 490), 'numpy.array', 'np.array', (['ind'], {}), '(ind)\n', (485, 490), True, 'import numpy as np\n'), ((516, 530), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (524, 530), True, 'import numpy as np\n'), ((803, 825), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (811, 825), True, 'import numpy as np\n'), ((1382, 1405), 'tensorflow.abs', 'tf.abs', (['(output - corres)'], {}), '(output - corres)\n', (1388, 1405), True, 'import tensorflow as tf\n'), ((1466, 1487), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weight'], {}), '(weight)\n', (1479, 1487), True, 'import tensorflow as tf\n'), ((2174, 2207), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2205, 2207), True, 'import tensorflow as tf\n'), ((2208, 2240), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2238, 2240), True, 'import tensorflow as tf\n'), ((3722, 3735), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (3729, 3735), True, 'import numpy as np\n'), ((4002, 4011), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (4008, 4011), True, 'import numpy as np\n')] |
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from metrics import accuracy_score
from model_slection import train_test_splice
from preprocessing import StandardScaler
class LogisticRegression:
"""docstring for LogisticRegression"""
def __init__(self):
self.coef_ = None # theta[1-n]
self.intercept_ = None # theta0
self._theta = None
def _sigmoid(self, t):
return 1. / (1. + np.exp(-t))
def fit(self, X_train, y_train, eta=0.01, n_iters=1e4):
"""根据训练数据集X_train, y_train, 使用梯度下降法训练Linear Regression模型"""
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must be equal to the size of y_train"
def J(theta, X_b, y): # 损失函数
y_hat = self._sigmoid(X_b.dot(theta))
try:
return - np.sum(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) / len(X_b)
except:
return float('inf')
def DJ(theta, X_b, y): # 求导
return X_b.T.dot(self._sigmoid(X_b.dot(theta)) - y) / len(X_b)
def gradient_descent(X_b, y, initial_theta, eta, n_iters=1e4, epsilon=1e-8):
theta = initial_theta
cur_iter = 0
while cur_iter < n_iters:
gradient = DJ(theta, X_b, y)
last_theta = theta
theta = theta - eta * gradient
if (abs(J(theta, X_b, y) - J(last_theta, X_b, y)) < epsilon):
break
cur_iter += 1
return theta
X_b = np.hstack([np.ones([len(X_train), 1]), X_train])
initial_theta = np.zeros(X_b.shape[1])
self._theta = gradient_descent(X_b, y_train, initial_theta, eta, n_iters)
self.intercept_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def fit_sgd(self, X_train, y_train, n_iters=5, t0=5, t1=50):
"""根据训练数据集X_train, y_train, 使用随机梯度下降法训练Linear Regression模型"""
# n_iters 代表对所有样本计算几遍
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must be equal to the size of y_train"
assert n_iters >= 1
# # 此时传入的是某一行的数据
def dj_sgd(theta, X_b_i, y_i):
return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2.
def sgd(X_b, y, initial_theta, n_iters, t0=5, t1=50):
def learning_rate(t):
return t0 / (t + t1)
theta = initial_theta
m = len(X_b)
# iteration n_iters times for all data
for cur_iter in range(n_iters):
indexes = np.random.permutation(m)
X_b_new = X_b[indexes]
y_new = y[indexes]
for i in range(m):
gradient = dj_sgd(theta, X_b_new[i], y_new[i])
# 此时 cur_iter 代表循环遍数,经过处理后传入 learning_rate 函数中
theta = theta - learning_rate(n_iters * m + i) * gradient
return theta
X_b = np.hstack([np.ones((len(X_train), 1)), X_train])
initial_theta = np.random.rand(X_b.shape[1])
self._theta = sgd(X_b, y_train, initial_theta, n_iters, t0, t1)
self.intercept_ = self._theta[0]
self.coef_ = self._theta[1:]
return self
def predict_proba(self, X_predict):
"""给定待预测数据集 X_predict,返回表示 X_predict 的概率结果 y_hat"""
X_b = np.hstack([np.ones((len(X_predict), 1)), X_predict])
return self._sigmoid(X_b.dot(self._theta))
def predict(self, X_predict):
"""给定待预测数据集X_predict,返回表示X_predict的结果向量 y_hat"""
assert self.intercept_ is not None and self.coef_ is not None, \
"must fit before predict!"
assert X_predict.shape[1] == len(self.coef_), \
"the feature number of X_predict must be equal to X_train"
proba = self.predict_proba(X_predict)
return np.array(proba >= 0.5, dtype='int')
def score(self, X_test, y_test):
"""根据测试数据集 X_test 和 y_test 确定当前模型的准确度"""
y_predict = self.predict(X_test)
return accuracy_score(y_test, y_predict)
def plot(X, y):
plt.scatter(X[y==0, 0], X[y==0, 1], color='red')
plt.scatter(X[y==1, 0], X[y==1, 1], color='blue')
plt.show()
def main():
boston = datasets.load_iris()
X = boston.data
y = boston.target
X = X[y<2, :2]
y = y[y<2]
plot(X, y)
X_train, y_train, X_test, y_test = train_test_splice(X, y, seed=666)
reg = LogisticRegression()
std = StandardScaler()
std.fit(X_train)
X_train_std = std.transform(X_train)
X_test_std = std.transform(X_test)
# fit
reg.fit(X_train_std, y_train)
score = reg.score(X_test_std, y_test)
print("intercept_:", reg.intercept_)
print("coef_:", reg.coef_)
print("score: ", score)
if __name__ == "__main__":
main()
| [
"sklearn.datasets.load_iris",
"numpy.random.rand",
"matplotlib.pyplot.show",
"numpy.random.permutation",
"numpy.log",
"metrics.accuracy_score",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"preprocessing.StandardScaler",
"matplotlib.pyplot.scatter",
"model_slection.train_test_splice"
] | [((4118, 4170), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[y == 0, 0]', 'X[y == 0, 1]'], {'color': '"""red"""'}), "(X[y == 0, 0], X[y == 0, 1], color='red')\n", (4129, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4171, 4224), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[y == 1, 0]', 'X[y == 1, 1]'], {'color': '"""blue"""'}), "(X[y == 1, 0], X[y == 1, 1], color='blue')\n", (4182, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4235), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4233, 4235), True, 'import matplotlib.pyplot as plt\n'), ((4262, 4282), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (4280, 4282), False, 'from sklearn import datasets\n'), ((4413, 4446), 'model_slection.train_test_splice', 'train_test_splice', (['X', 'y'], {'seed': '(666)'}), '(X, y, seed=666)\n', (4430, 4446), False, 'from model_slection import train_test_splice\n'), ((4490, 4506), 'preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4504, 4506), False, 'from preprocessing import StandardScaler\n'), ((1645, 1667), 'numpy.zeros', 'np.zeros', (['X_b.shape[1]'], {}), '(X_b.shape[1])\n', (1653, 1667), True, 'import numpy as np\n'), ((3070, 3098), 'numpy.random.rand', 'np.random.rand', (['X_b.shape[1]'], {}), '(X_b.shape[1])\n', (3084, 3098), True, 'import numpy as np\n'), ((3883, 3918), 'numpy.array', 'np.array', (['(proba >= 0.5)'], {'dtype': '"""int"""'}), "(proba >= 0.5, dtype='int')\n", (3891, 3918), True, 'import numpy as np\n'), ((4063, 4096), 'metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_predict'], {}), '(y_test, y_predict)\n', (4077, 4096), False, 'from metrics import accuracy_score\n'), ((460, 470), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (466, 470), True, 'import numpy as np\n'), ((2611, 2635), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (2632, 2635), True, 'import numpy as np\n'), ((866, 879), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (872, 879), True, 'import numpy as np\n'), ((888, 905), 'numpy.log', 'np.log', (['(1 - y_hat)'], {}), '(1 - y_hat)\n', (894, 905), True, 'import numpy as np\n')] |
import dask.array
import logging
import numpy as np
import scipy.sparse
import sparse
from typing import List
try:
import anndata
try:
from anndata.base import Raw
except ImportError:
from anndata import Raw
except ImportError:
anndata = None
Raw = None
logger = logging.getLogger(__name__)
class InputDataBase:
"""
Base class for all input data types.
"""
features: List[str]
observations: List[str]
chunk_size_cells: int
chunk_size_genes: int
def __init__(
self,
data,
observation_names=None,
feature_names=None,
chunk_size_cells: int = 100000,
chunk_size_genes: int = 100,
as_dask: bool = True,
cast_dtype=None
):
"""
Create a new InputData object.
:param data: Some data object.
Can be either:
- np.ndarray: NumPy array containing the raw data
- anndata.AnnData: AnnData object containing the count data and optional the design models
stored as data.obsm[design_loc] and data.obsm[design_scale]
:param observation_names: (optional) names of the observations.
:param feature_names: (optional) names of the features.
:param cast_dtype: data type of all data; should be either float32 or float64
:return: InputData object
"""
self.observations = observation_names
self.features = feature_names
if isinstance(data, np.ndarray) or \
isinstance(data, scipy.sparse.csr_matrix) or \
isinstance(data, dask.array.core.Array):
self.x = data
elif isinstance(data, anndata.AnnData) or isinstance(data, Raw):
self.x = data.X
elif isinstance(data, InputDataBase):
self.x = data.x
else:
raise ValueError("type of data %s not recognized" % type(data))
if as_dask:
if isinstance(self.x, dask.array.core.Array):
self.x = self.x.compute()
# Need to wrap dask around the COO matrix version of the sparse package if matrix is sparse.
if isinstance(self.x, scipy.sparse.spmatrix):
self.x = dask.array.from_array(
sparse.COO.from_scipy_sparse(
self.x.astype(cast_dtype if cast_dtype is not None else self.x.dtype)
),
chunks=(chunk_size_cells, chunk_size_genes),
asarray=False
)
else:
self.x = dask.array.from_array(
self.x.astype(cast_dtype if cast_dtype is not None else self.x.dtype),
chunks=(chunk_size_cells, chunk_size_genes),
)
else:
if isinstance(self.x, dask.array.core.Array):
self.x = self.x.compute()
if cast_dtype is not None:
self.x = self.x.astype(cast_dtype)
self._feature_allzero = np.sum(self.x, axis=0) == 0
self.chunk_size_cells = chunk_size_cells
self.chunk_size_genes = chunk_size_genes
@property
def num_observations(self):
return self.x.shape[0]
@property
def num_features(self):
return self.x.shape[1]
@property
def feature_isnonzero(self):
return ~self._feature_allzero
@property
def feature_isallzero(self):
return self._feature_allzero
def fetch_x_dense(self, idx):
assert isinstance(self.x, np.ndarray), "tried to fetch dense from non ndarray"
return self.x[idx, :]
def fetch_x_sparse(self, idx):
assert isinstance(self.x, scipy.sparse.csr_matrix), "tried to fetch sparse from non csr_matrix"
data = self.x[idx, :]
data_idx = np.asarray(np.vstack(data.nonzero()).T, np.int64)
data_val = np.asarray(data.data, np.float64)
data_shape = np.asarray(data.shape, np.int64)
if idx.shape[0] == 1:
data_val = np.squeeze(data_val, axis=0)
data_idx = np.squeeze(data_idx, axis=0)
return data_idx, data_val, data_shape
| [
"logging.getLogger",
"numpy.sum",
"numpy.asarray",
"numpy.squeeze"
] | [((301, 328), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (318, 328), False, 'import logging\n'), ((3909, 3942), 'numpy.asarray', 'np.asarray', (['data.data', 'np.float64'], {}), '(data.data, np.float64)\n', (3919, 3942), True, 'import numpy as np\n'), ((3964, 3996), 'numpy.asarray', 'np.asarray', (['data.shape', 'np.int64'], {}), '(data.shape, np.int64)\n', (3974, 3996), True, 'import numpy as np\n'), ((3047, 3069), 'numpy.sum', 'np.sum', (['self.x'], {'axis': '(0)'}), '(self.x, axis=0)\n', (3053, 3069), True, 'import numpy as np\n'), ((4051, 4079), 'numpy.squeeze', 'np.squeeze', (['data_val'], {'axis': '(0)'}), '(data_val, axis=0)\n', (4061, 4079), True, 'import numpy as np\n'), ((4103, 4131), 'numpy.squeeze', 'np.squeeze', (['data_idx'], {'axis': '(0)'}), '(data_idx, axis=0)\n', (4113, 4131), True, 'import numpy as np\n')] |
import numpy as np
from astropy import wcs
def makeGaussian(size, fwhm=3, center=None):
x = np.arange(0, size, 1, float)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4*np.log(2)*((x-x0)**2 + (y-y0)**2)/fwhm**2)
def makeWCS(coords=[0.,0.],coord_types=["RA---TAN","DEC--TAN"],xsize=1,ysize=1,pa=0.,scale=[1.,1.],sip=None):
w = wcs.WCS(naxis=2)
w.wcs.ctype = coord_types
w.wcs.crpix = [int(np.floor(xsize/2.)),int(np.floor(ysize/2.))]
w.wcs.crval = coords
w.wcs.cdelt = np.array(scale) / 3600.
cpa = np.cos(np.radians(pa%360.))
spa = np.sin(np.radians(pa%360.))
w.wcs.pc = np.array([[cpa,-spa],[spa,cpa]])
if sip is not None:
w.sip = sip
return w
def verifyData(dat1,dat2):
assert dat1.shape[0] == dat2.shape[0]
assert dat1.shape[1] == dat2.shape[1]
# np.testing.assert_allclose(dat1,dat2,atol=1e-3)
for y in range(dat1.shape[0]):
for x in range(dat1.shape[1]):
verifyPoint(dat1,dat2,x,y)
def verifyPoint(dat1, dat2, x, y, threshold=1.e-3):
if abs(dat1[y,x] - dat2[y,x]) > threshold:
print(x, y, dat1[y,x], dat2[y,x], abs(dat1[y,x] - dat2[y,x]))
assert abs(dat1[y,x] - dat2[y,x]) <= threshold
def verifyImage(im1,im2):
assert im1.out_path == im2.out_path
assert im1.name == im2.name
assert im1.xsize == im2.xsize
assert im1.ysize == im2.ysize
np.testing.assert_allclose((im1.xscale,im1.yscale),(im2.xscale,im2.yscale),atol=1e-3)
assert im1.distorted == im2.distorted
np.testing.assert_allclose((im1.ra,im1.dec,im1.pa),(im2.ra,im2.dec,im2.pa),atol=1e-3)
assert im1.history == im2.history
assert im1.zeropoint == im2.zeropoint
assert im1.header['EQUINOX'] == im2.header['EQUINOX']
np.testing.assert_allclose((im1.header['PA_APER']),(im2.header['PA_APER']),atol=1e-3)
assert im1.header['VAFACTOR'] == im2.header['VAFACTOR']
np.testing.assert_allclose((im1.header['ORIENTAT']),(im2.header['ORIENTAT']),atol=1e-3)
np.testing.assert_allclose((im1.header['RA_APER']),(im2.header['RA_APER']),atol=1e-3)
np.testing.assert_allclose((im1.header['DEC_APER']),(im2.header['DEC_APER']),atol=1e-3)
assert im1.header['NAXIS1'] == im2.header['NAXIS1']
assert im1.header['NAXIS2'] == im2.header['NAXIS2']
def verifyParameters(image,results):
assert image.out_path == results['out_path']
assert image.name == results['name']
assert image.xsize == results['xsize']
assert image.ysize == results['ysize']
np.testing.assert_allclose((image.xscale,image.yscale),(results['xscale'],results['yscale']),atol=1e-3)
assert image.distorted == results['distorted']
np.testing.assert_allclose((image.ra,image.dec,image.pa),(results['ra'],results['dec'],results['pa']),atol=1e-3)
assert image.history == results['history']
assert image.zeropoint == results['zeropoint']
assert image.header['EQUINOX'] == results['equinox']
np.testing.assert_allclose((image.header['PA_APER']),(results['pa_aper']),atol=1e-3)
assert image.header['VAFACTOR'] == results['vafactor']
np.testing.assert_allclose((image.header['ORIENTAT']),(results['orientat']),atol=1e-3)
np.testing.assert_allclose((image.header['RA_APER']),(results['ra_aper']),atol=1e-3)
np.testing.assert_allclose((image.header['DEC_APER']),(results['dec_aper']),atol=1e-3)
assert image.header['NAXIS1'] == results['naxis1']
assert image.header['NAXIS2'] == results['naxis2']
| [
"numpy.radians",
"numpy.testing.assert_allclose",
"numpy.log",
"numpy.floor",
"numpy.array",
"astropy.wcs.WCS",
"numpy.arange"
] | [((98, 126), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)', 'float'], {}), '(0, size, 1, float)\n', (107, 126), True, 'import numpy as np\n'), ((442, 458), 'astropy.wcs.WCS', 'wcs.WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (449, 458), False, 'from astropy import wcs\n'), ((715, 750), 'numpy.array', 'np.array', (['[[cpa, -spa], [spa, cpa]]'], {}), '([[cpa, -spa], [spa, cpa]])\n', (723, 750), True, 'import numpy as np\n'), ((1479, 1574), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(im1.xscale, im1.yscale)', '(im2.xscale, im2.yscale)'], {'atol': '(0.001)'}), '((im1.xscale, im1.yscale), (im2.xscale, im2.\n yscale), atol=0.001)\n', (1505, 1574), True, 'import numpy as np\n'), ((1611, 1708), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(im1.ra, im1.dec, im1.pa)', '(im2.ra, im2.dec, im2.pa)'], {'atol': '(0.001)'}), '((im1.ra, im1.dec, im1.pa), (im2.ra, im2.dec, im2\n .pa), atol=0.001)\n', (1637, 1708), True, 'import numpy as np\n'), ((1839, 1927), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["im1.header['PA_APER']", "im2.header['PA_APER']"], {'atol': '(0.001)'}), "(im1.header['PA_APER'], im2.header['PA_APER'],\n atol=0.001)\n", (1865, 1927), True, 'import numpy as np\n'), ((1989, 2079), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["im1.header['ORIENTAT']", "im2.header['ORIENTAT']"], {'atol': '(0.001)'}), "(im1.header['ORIENTAT'], im2.header['ORIENTAT'],\n atol=0.001)\n", (2015, 2079), True, 'import numpy as np\n'), ((2081, 2169), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["im1.header['RA_APER']", "im2.header['RA_APER']"], {'atol': '(0.001)'}), "(im1.header['RA_APER'], im2.header['RA_APER'],\n atol=0.001)\n", (2107, 2169), True, 'import numpy as np\n'), ((2171, 2261), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["im1.header['DEC_APER']", "im2.header['DEC_APER']"], {'atol': '(0.001)'}), "(im1.header['DEC_APER'], im2.header['DEC_APER'],\n atol=0.001)\n", (2197, 2261), True, 'import numpy as np\n'), ((2590, 2702), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(image.xscale, image.yscale)', "(results['xscale'], results['yscale'])"], {'atol': '(0.001)'}), "((image.xscale, image.yscale), (results['xscale'],\n results['yscale']), atol=0.001)\n", (2616, 2702), True, 'import numpy as np\n'), ((2749, 2872), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(image.ra, image.dec, image.pa)', "(results['ra'], results['dec'], results['pa'])"], {'atol': '(0.001)'}), "((image.ra, image.dec, image.pa), (results['ra'],\n results['dec'], results['pa']), atol=0.001)\n", (2775, 2872), True, 'import numpy as np\n'), ((3021, 3108), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["image.header['PA_APER']", "results['pa_aper']"], {'atol': '(0.001)'}), "(image.header['PA_APER'], results['pa_aper'],\n atol=0.001)\n", (3047, 3108), True, 'import numpy as np\n'), ((3169, 3258), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["image.header['ORIENTAT']", "results['orientat']"], {'atol': '(0.001)'}), "(image.header['ORIENTAT'], results['orientat'],\n atol=0.001)\n", (3195, 3258), True, 'import numpy as np\n'), ((3260, 3347), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["image.header['RA_APER']", "results['ra_aper']"], {'atol': '(0.001)'}), "(image.header['RA_APER'], results['ra_aper'],\n atol=0.001)\n", (3286, 3347), True, 'import numpy as np\n'), ((3349, 3438), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["image.header['DEC_APER']", "results['dec_aper']"], {'atol': '(0.001)'}), "(image.header['DEC_APER'], results['dec_aper'],\n atol=0.001)\n", (3375, 3438), True, 'import numpy as np\n'), ((600, 615), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (608, 615), True, 'import numpy as np\n'), ((641, 663), 'numpy.radians', 'np.radians', (['(pa % 360.0)'], {}), '(pa % 360.0)\n', (651, 663), True, 'import numpy as np\n'), ((679, 701), 'numpy.radians', 'np.radians', (['(pa % 360.0)'], {}), '(pa % 360.0)\n', (689, 701), True, 'import numpy as np\n'), ((512, 533), 'numpy.floor', 'np.floor', (['(xsize / 2.0)'], {}), '(xsize / 2.0)\n', (520, 533), True, 'import numpy as np\n'), ((536, 557), 'numpy.floor', 'np.floor', (['(ysize / 2.0)'], {}), '(ysize / 2.0)\n', (544, 557), True, 'import numpy as np\n'), ((279, 288), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (285, 288), True, 'import numpy as np\n')] |
import re
import copy
import numpy as np
import lmfit
from .noise import NoiseModel
from .population import Population
from .. import definitions as xrsdefs
from ..tools import compute_chi2
from ..tools.profiler import profile_keys, profile_pattern
# TODO: when params, settings, etc are changed,
# ensure all attributes remain valid,
# wrt constraints as well as wrt supported options.
class System(object):
# TODO: use caching to speed up repeated compute_intensity() evaluations
def __init__(self,**kwargs):
self.populations = {}
self.fit_report = dict(
error_weighted=True,
logI_weighted=True,
good_fit=False,
q_range=[0.,float('inf')]
)
self.features = dict.fromkeys(profile_keys)
src_wl = 1.
kwargs = copy.deepcopy(kwargs)
if 'source_wavelength' in kwargs: src_wl = kwargs.pop('source_wavelength')
# TODO: any other sample metadata items that should be handled from kwargs?
self.sample_metadata = dict(
experiment_id='',
sample_id='',
data_file='',
source_wavelength=src_wl,
time=0.,
notes=''
)
self.noise_model = NoiseModel('flat')
self.update_from_dict(kwargs)
def to_dict(self):
sd = {}
for pop_nm,pop in self.populations.items():
sd[pop_nm] = pop.to_dict()
sd['noise'] = self.noise_model.to_dict()
sd['fit_report'] = copy.deepcopy(self.fit_report)
sd['sample_metadata'] = self.sample_metadata
sd['features'] = self.features
return sd
def clone(self):
return System(**self.to_dict())
def update_from_dict(self,d):
for pop_name,pd in d.items():
if pop_name == 'noise':
if not isinstance(pd,dict): pd = pd.to_dict()
self.update_noise_model(pd)
elif pop_name == 'features':
self.features.update(pd)
elif pop_name == 'fit_report':
self.fit_report.update(pd)
elif pop_name == 'sample_metadata':
self.sample_metadata.update(pd)
elif not pop_name in self.populations:
if not isinstance(pd,dict): pd = pd.to_dict()
self.populations[pop_name] = Population.from_dict(pd)
else:
if not isinstance(pd,dict): pd = pd.to_dict()
self.populations[pop_name].update_from_dict(pd)
def update_noise_model(self,noise_dict):
if 'model' in noise_dict:
self.noise_model.set_model(noise_dict['model'])
if 'parameters' in noise_dict:
self.noise_model.update_parameters(noise_dict['parameters'])
def update_params_from_dict(self,pd):
for pop_name, popd in pd.items():
if pop_name == 'noise':
self.update_noise_model(popd)
else:
if 'parameters' in popd:
for param_name, paramd in popd['parameters'].items():
self.populations[pop_name].parameters[param_name].update(popd['parameters'][param_name])
def set_q_range(self,q_min,q_max):
self.fit_report['q_range'] = [float(q_min),float(q_max)]
def set_error_weighted(self,err_wtd):
self.fit_report['error_weighted'] = bool(err_wtd)
def set_logI_weighted(self,logI_wtd):
self.fit_report['logI_weighted'] = bool(logI_wtd)
def remove_population(self,pop_nm):
# TODO: check for violated constraints
# in absence of this population
self.populations.pop(pop_nm)
def add_population(self,pop_nm,structure,form,settings={},parameters={}):
self.populations[pop_nm] = Population(structure,form,settings,parameters)
@classmethod
def from_dict(cls,d):
return cls(**d)
def compute_intensity(self,q):
"""Computes scattering/diffraction intensity for some `q` values.
TODO: Document the equations.
Parameters
----------
q : array
Array of q values at which intensities will be computed
Returns
-------
I : array
Array of scattering intensities for each of the input q values
"""
I = self.noise_model.compute_intensity(q)
for pop_name,pop in self.populations.items():
I += pop.compute_intensity(q,self.sample_metadata['source_wavelength'])
return I
# TODO: take logI_weighted and error_weighted as optional inputs.
# If values are provided, update the fit_report.
def evaluate_residual(self,q,I,dI=None,I_comp=None):
"""Evaluate the fit residual for a given populations dict.
Parameters
----------
q : array of float
1d array of scattering vector magnitudes (1/Angstrom)
I : array of float
1d array of intensities corresponding to `q` values
dI : array of float
1d array of intensity error estimates for each `I` value
error_weighted : bool
Flag for weighting the objective with the I(q) error estimates
logI_weighted : bool
Flag for evaluating the objective on log(I(q)) instead if I(q)
q_range : list
Two floats indicating the lower and
upper q-limits for objective evaluation
I_comp : array
Optional array of computed intensity (for efficiency)-
if provided, intensity is not re-computed
Returns
-------
res : float
Value of the residual
"""
q_range = self.fit_report['q_range']
if I_comp is None:
I_comp = self.compute_intensity(q)
idx_nz = (I>0)
idx_fit = (idx_nz) & (q>=q_range[0]) & (q<=q_range[1])
wts = np.ones(len(q))
if self.fit_report['error_weighted']:
if dI is None:
dI = np.empty(I.shape)
dI.fill(np.nan)
dI[idx_fit] = np.sqrt(I[idx_fit])
wts *= dI**2
if self.fit_report['logI_weighted']:
idx_fit = idx_fit & (I_comp>0)
# NOTE: returning float('inf') raises a NaN exception within the minimization.
#if not any(idx_fit):
# return float('inf')
res = compute_chi2(
np.log(I_comp[idx_fit]),
np.log(I[idx_fit]),
wts[idx_fit])
else:
res = compute_chi2(
I_comp[idx_fit],
I[idx_fit],
wts[idx_fit])
return res
def lmf_evaluate(self,lmf_params,q,I,dI=None):
new_params = unpack_lmfit_params(lmf_params)
old_params = self.flatten_params()
old_params.update(new_params)
new_pd = unflatten_params(old_params)
self.update_params_from_dict(new_pd)
return self.evaluate_residual(q,I,dI)
def pack_lmfit_params(self):
p = self.flatten_params()
lmfp = lmfit.Parameters()
for pkey,pd in p.items():
ks = pkey.split('__')
vary_flag = bool(not pd['fixed'])
p_bounds = copy.deepcopy(pd['bounds'])
p_expr = copy.copy(pd['constraint_expr'])
lmfp.add(pkey,value=pd['value'],vary=vary_flag,min=p_bounds[0],max=p_bounds[1])
if p_expr:
lmfp[pkey].set(vary=False)
lmfp[pkey].set(expr=p_expr)
return lmfp
def flatten_params(self):
pd = {}
for param_name,paramd in self.noise_model.parameters.items():
pd['noise__'+param_name] = paramd
for pop_name,pop in self.populations.items():
for param_name,paramd in pop.parameters.items():
pd[pop_name+'__'+param_name] = paramd
return pd
def fit(sys,q,I,dI=None,
error_weighted=None,logI_weighted=None,q_range=None):
"""Fit the I(q) pattern and return a System with optimized parameters.
Parameters
----------
sys : xrsdkit.system.System
System object defining populations and species,
as well as settings and bounds/constraints for parameters.
q : array of float
1d array of scattering vector magnitudes (1/Angstrom)
I : array of float
1d array of intensities corresponding to `q` values
dI : array of float
1d array of intensity error estimates for each `I` value
error_weighted : bool
Flag for weighting the objective with the I(q) error estimates.
logI_weighted : bool
Flag for evaluating the objective on log(I(q)) instead if I(q)
q_range : list
Two floats indicating the lower and
upper q-limits for objective evaluation
Returns
-------
sys_opt : xrsdkit.system.System
Similar to input `sys`, but with fit-optimized parameters.
"""
# the System to optimize starts as a copy of the input System
sys_opt = System.from_dict(sys.to_dict())
# if inputs were given to control the fit objective,
# update sys_opt.fit_report with the new settings
if error_weighted is not None:
sys_opt.fit_report.update(error_weighted=error_weighted)
if logI_weighted is not None:
sys_opt.fit_report.update(logI_weighted=error_weighted)
if q_range is not None:
sys_opt.fit_report.update(q_range=q_range)
obj_init = sys_opt.evaluate_residual(q,I,dI)
lmf_params = sys_opt.pack_lmfit_params()
lmf_res = lmfit.minimize(
sys_opt.lmf_evaluate,
lmf_params,method='nelder-mead',
kws={'q':q,'I':I,'dI':dI}
)
fit_obj = sys_opt.evaluate_residual(q,I,dI)
I_opt = sys_opt.compute_intensity(q)
I_bg = I - I_opt
snr = np.mean(I_opt)/np.std(I_bg)
sys_opt.fit_report['converged'] = lmf_res.success
sys_opt.fit_report['initial_objective'] = obj_init
sys_opt.fit_report['final_objective'] = fit_obj
sys_opt.fit_report['fit_snr'] = snr
sys_opt.features = profile_pattern(q,I)
return sys_opt
def unpack_lmfit_params(lmfit_params):
pd = {}
for par_name,par in lmfit_params.items():
pd[par_name] = {}
pd[par_name]['value'] = par.value
pd[par_name]['bounds'] = [par.min,par.max]
pd[par_name]['fixed'] = not par.vary
if par._expr: pd[par_name]['fixed'] = False
pd[par_name]['constraint_expr'] = par._expr
return pd
def unflatten_params(flat_params):
pd = {}
for pkey,paramd in flat_params.items():
ks = pkey.split('__')
#kdepth = len(ks)
pop_name = ks[0]
param_name = ks[1]
if not pop_name in pd:
pd[pop_name] = {}
if not 'parameters' in pd[pop_name]:
pd[pop_name]['parameters'] = {}
pd[pop_name]['parameters'][param_name] = paramd
return pd
| [
"numpy.mean",
"copy.deepcopy",
"numpy.sqrt",
"numpy.log",
"numpy.empty",
"numpy.std",
"copy.copy",
"lmfit.Parameters",
"lmfit.minimize"
] | [((9552, 9659), 'lmfit.minimize', 'lmfit.minimize', (['sys_opt.lmf_evaluate', 'lmf_params'], {'method': '"""nelder-mead"""', 'kws': "{'q': q, 'I': I, 'dI': dI}"}), "(sys_opt.lmf_evaluate, lmf_params, method='nelder-mead', kws=\n {'q': q, 'I': I, 'dI': dI})\n", (9566, 9659), False, 'import lmfit\n'), ((827, 848), 'copy.deepcopy', 'copy.deepcopy', (['kwargs'], {}), '(kwargs)\n', (840, 848), False, 'import copy\n'), ((1521, 1551), 'copy.deepcopy', 'copy.deepcopy', (['self.fit_report'], {}), '(self.fit_report)\n', (1534, 1551), False, 'import copy\n'), ((7079, 7097), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (7095, 7097), False, 'import lmfit\n'), ((9804, 9818), 'numpy.mean', 'np.mean', (['I_opt'], {}), '(I_opt)\n', (9811, 9818), True, 'import numpy as np\n'), ((9819, 9831), 'numpy.std', 'np.std', (['I_bg'], {}), '(I_bg)\n', (9825, 9831), True, 'import numpy as np\n'), ((7235, 7262), 'copy.deepcopy', 'copy.deepcopy', (["pd['bounds']"], {}), "(pd['bounds'])\n", (7248, 7262), False, 'import copy\n'), ((7284, 7316), 'copy.copy', 'copy.copy', (["pd['constraint_expr']"], {}), "(pd['constraint_expr'])\n", (7293, 7316), False, 'import copy\n'), ((6001, 6018), 'numpy.empty', 'np.empty', (['I.shape'], {}), '(I.shape)\n', (6009, 6018), True, 'import numpy as np\n'), ((6081, 6100), 'numpy.sqrt', 'np.sqrt', (['I[idx_fit]'], {}), '(I[idx_fit])\n', (6088, 6100), True, 'import numpy as np\n'), ((6424, 6447), 'numpy.log', 'np.log', (['I_comp[idx_fit]'], {}), '(I_comp[idx_fit])\n', (6430, 6447), True, 'import numpy as np\n'), ((6465, 6483), 'numpy.log', 'np.log', (['I[idx_fit]'], {}), '(I[idx_fit])\n', (6471, 6483), True, 'import numpy as np\n')] |
#
# record.py
# Simple recorder and viewer element
#
from atom import Element
from atom.messages import Response, LogLevel
from threading import Thread
import time
import msgpack
import os
import matplotlib.pyplot as plt
import numpy as np
import math
# Where to store temporary recordings
TEMP_RECORDING_LOC = "/shared"
# Where to store permanent recordings
PERM_RECORDING_LOC = "/recordings"
# Recording extension
RECORDING_EXTENSION = ".atomrec"
# Default number of seconds to record for
DEFAULT_N_SEC = 10
# Interval at which we will poll the stream for entries, in seconds
POLL_INTERVAL = 0.1
# Max time to block for data
BLOCK_MS = 1000
# Active recording threads
active_recordings = {}
ATOM_HOST=os.getenv("ATOM_HOST", None)
def record_fn(name, n_entries, n_sec, perm, element, stream):
'''
Mainloop for a recording thread. Creates a new
element with the proper name and listens on and
records the stream until we're told to stop
'''
global active_recordings
# Make an element from the name
record_elem = Element("record_" + name, host=ATOM_HOST)
# Open the file for the recording
filename = os.path.join(
PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC, name + RECORDING_EXTENSION)
try:
record_file = open(filename, 'wb')
except:
record_elem.log(
LogLevel.ERR, "Unable to open file {}".format(filename))
del active_recordings[name]
return
# At the outer loop, we want to loop until we've been cancelled
last_id = "$"
intervals = 0
entries_read = 0
while name in active_recordings:
# Read the data
data = record_elem.entry_read_since(element, stream, last_id, n=n_entries, block=BLOCK_MS)
# If we got no data, then we should finish up
if len(data) == 0:
record_elem.log(
LogLevel.ERR,
"Recording {}: no data after {} entries read!".format(
name,
entries_read))
break
entries_read += len(data)
# We're going to pack up each entry into a msgpack item and
# then write it to the file. If it's already msgpack'd
# that's totally fine, this will just pack up the keys and ID
for entry in data:
packed_data = msgpack.packb(entry, use_bin_type=True)
# Write the packed data to file
record_file.write(packed_data)
# If n_entries is not none then we want to subtract
# off the number of entries left and perhaps break out
if n_entries is not None:
n_enties -= len(data)
if (n_enties <= 0):
break
# Otherwise see if we've recorded for longer than our
# elapsed time
else:
intervals += 1
if (intervals * POLL_INTERVAL) >= n_sec:
break
# If we got here, we should sleep for the interval before
# making the next call
time.sleep(POLL_INTERVAL)
# And update the last ID
last_id = data[-1]["id"]
# Once we're out of here we want to note that we're no longer
# active in the global system. It might be that someone else popped
# it out through already in the "stop" command
if name in active_recordings:
thread = active_recordings.pop(name)
# And we want to close the file
record_file.close()
# And log that we completed the recording
record_elem.log(LogLevel.INFO, "Finished recording {} with {} entries read".format(
name, entries_read))
def start_recording(data):
# Data should be a dictionary with the following keys
# name: required. String for the name of the recording
# t: Optional time (in seconds) to record for. If omitted, will
# default to 10
# n: Optional number of entries to record for. If omitted will default
# to default time. If both time and n are specified, n will
# take precedence
# perm: Optional boolean to make the recording persistent/permanent.
# Will store the recording in a different location if so
# e: Required element name
# s: Required stream name
global active_recordings
# Make sure we got a name
if ("name" not in data) or (type(data["name"]) is not str):
return Response(err_code=1, err_str="name must be in data", serialize=True)
# Make sure we got an element
if ("e" not in data) or (type(data["e"]) is not str):
return Response(err_code=2, err_str="element must be in data", serialize=True)
# Make sure we got a stream
if ("s" not in data) or (type(data["s"]) is not str):
return Response(err_code=3, err_str="stream must be in data", serialize=True)
# Get the name
name = data["name"]
element = data["e"]
stream = data["s"]
# Check that the name is not in use
if name in active_recordings:
return Response(err_code=4, err_str="Name {} already in use".format(name), serialize=True)
n_entries = None
n_sec = DEFAULT_N_SEC
perm = False
# Process either the n or t values that came in over the API
if ("n" in data) and (type(data["n"]) is int):
n_entries = data["n"]
if ("t" in data) and (type(data["t"]) is int):
n_sec = data["t"]
if ("perm" in data) and (type(data["perm"]) is bool):
perm = data["perm"]
# If we have a permanent data request, make sure the user has
# mounted a permanent location
if perm and not os.path.exists(PERM_RECORDING_LOC):
return Response(err_code=5, err_str="Please mount {} in your docker-compose file".format(PERM_RECORDING_LOC), serialize=True)
# Spawn a new thread that will go ahead and do the recording
thread = Thread(target=record_fn, args=(name, n_entries, n_sec, perm, element, stream,), daemon=True)
# Put the thread into the active_recordings struct
active_recordings[name] = thread
thread.start()
# Make the response
return Response(\
"Started recording {} for {} and storing in {}".format(
name, \
"{} entries".format(n_entries) if n_entries != None else "{} seconds".format(n_sec), \
PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC), \
serialize=True)
def stop_recording(data):
'''
Stops a recording. Data should be a msgpack'd string of the name
'''
# Active recordings
global active_recordings
# Make sure the recording is active
if data not in active_recordings:
return Response(err_code=1, err_str="Recording {} not active".format(data), serialize=True)
# Note the thread and delete it from the active recordings object
thread = active_recordings.pop(data)
# Wait for the recording thread to finish
thread.join()
return Response("Success", serialize=True)
def wait_recording(data):
'''
Waits for a recording to finish
'''
# Active recordings
global active_recordings
# Make sure the recording is active
if data not in active_recordings:
return Response(err_code=1, err_str="Recording {} not active".format(data), serialize=True)
start_time = time.time()
active_recordings[data].join()
stop_time = time.time()
return Response("Returned after {} seconds".format(stop_time - start_time), serialize=True)
def list_recordings(data):
'''
Returns a list of all recordings in the system
'''
recordings = []
# Loop over all locations
for folder in [PERM_RECORDING_LOC, TEMP_RECORDING_LOC]:
# If the folder doesn't exist, just move on
if not os.path.exists(folder):
continue
# Loop over all folders in the location
for filename in os.listdir(folder):
# If it ends with our extension, then add it
if filename.endswith(RECORDING_EXTENSION):
recordings.append(os.path.splitext(filename)[0])
return Response(recordings, serialize=True)
def _get_recording(data):
'''
Returns the contents of a recording. Takes a msgpack serialized
request object with the following fields:
name: required recording name
start: start entry index
stop: stop entry index
msgpack: if we should use msgpack to deserialize values, assumed false
Will return a Response() type on error, else a list of all items
in the recording.
'''
if (("name" not in data) or (type(data["name"]) is not str)):
return Response(err_code=1, err_str="Name is required", serialize=True)
name = data["name"]
file = None
for folder in [PERM_RECORDING_LOC, TEMP_RECORDING_LOC]:
filename = os.path.join(folder, name + RECORDING_EXTENSION)
if os.path.exists(filename):
try:
file = open(filename, 'rb', buffering=0)
break
except:
return Response(err_code=2, err_str="Failed to open file {}".format(filename), serialize=True)
# Make sure we found the file
if file is None:
return Response(err_code=3, err_str="No recording {}".format(name), serialize=True)
start_idx = 0
stop_idx = -1
use_msgpack = False
if ("start" in data) and (type(data["start"]) is int):
start_idx = data["start"]
if ("stop" in data) and (type(data["stop"]) is int):
stop_idx = data["stop"]
if ("msgpack" in data) and (type(data["msgpack"]) is bool):
use_msgpack = data["msgpack"]
# Now, we want to loop over the file. Note that when we packed the file
# we packed it as individual msgpack objects with no padding/association
# between them so we need to use the msgpack streaming API
unpacker = msgpack.Unpacker(file, raw=False)
response_items = []
for i, unpacked in enumerate(unpacker):
if (i >= start_idx):
# Make the
repacked = (unpacked["id"], {})
# If we should use msgpack to deserialize
for k in unpacked:
if k != "id":
if use_msgpack:
repacked[1][k] = msgpack.unpackb(unpacked[k], raw=False)
else:
repacked[1][k] = unpacked[k]
response_items.append(repacked)
if ((stop_idx != -1) and (i >= stop_idx)):
break
return response_items
def get_recording(data):
'''
Returns the contents of a recording. Takes a msgpack serialized
request object with the following fields:
name: required recording name
start: start entry index
stop: stop entry index
msgpack: if we should use msgpack to deserialize values, assumed false
'''
# Load the recording using the function we share with plot_recording
result = _get_recording(data)
if type(result) is not list:
return result
else:
return Response(result, serialize=True)
def plot_recording(data):
'''
Makes a plot of the recording. Takes a msgpack-serialized JSON
object with the following fields
name : required recording name
plots: list of plots to make, where each item in the list is a list as well.
Each item in the plots list is a tuple, with values:
- 0 : lambda function to perform on the data. The data will be
passed to the lambda as a dictionary named `x`
- 1 : list of keys on which to perform the lambda function
- 2 : optional label
An example plots field would look like:
"plots": [
{
"data": [
["x[0]", ["joint_0", "joint_1"], "label0"],
],
"title": "Some Title",
"y_label": "Some Y Label",
"x_label": "Some X Label",
"legend": true/false,
},
{
"data": [
["x[1]", ["joint_0", "joint_1"], "label1"],
["x[2]", ["joint_0", "joint_1"], "label2"],
],
...
}
]
start: Entry index to start the plot at
stop: Entry index to stop the plot at
msgpack: Whether or not to use msgpack to deserialize each key on
readback from the recording. Default false
save: Optional, if true will save an image of each plot, default false
show: Optional, default true, will show the plots in an interactive
fashion
perm: Optional, default false. If true will save in the permanent
file location, else temporary
x: Optional lambda for converting an entry into a timestamp. If not
passed, will use the redis timestamp. If passed, will be a
lambda for an entry lambda entry: ... where the user supplies ...
to convert the entry into an x-label
'''
# Load the recording. If we failed to load it just return that error
result = _get_recording(data)
if type(result) is not list:
return result
# Get the number of results
n_results = len(result)
if (n_results == 0):
return Response(err_code=4, err_str="0 results for recording", serialize=True)
# We should have a list of all of the entries that we care about seeing
# and now for each entry need to go ahead and run all of the lambdas
if ("plots" not in data) or (type(data["plots"]) is not list):
return Response(err_code=5, err_str="Plots must be specified", serialize=True)
# Note the plots
plots = data["plots"]
if ("x" in data):
try:
x_lambda = eval("lambda entry: " + data["x"])
x_data = [x_lambda(entry[1]) for entry in result]
x_label = str(data["x"])
except:
return Response(err_code=6, err_str="Unable to convert {} to x data lambda".format(data["x"]))
else:
x_data = [int(entry[0].split('-')[0]) for entry in result]
x_label = "Redis Timestamp (ms)"
# Turn the x data into a numpy array and subtract off the first item
# so that the scale is reasonable
x_data = np.array(x_data)
x_data -= x_data[0]
# Convert the input data to lambdas
figures = []
for plot_n, plot in enumerate(plots):
# List of lambdas to run
lambdas = []
total_lines = 0
# Get the plot data
if ("data" not in plot) or (type(plot["data"]) is not list):
return Response(err_code=7, err_str="Each plot must have a data list", serialize=True)
plot_data = plot["data"]
# Make the lambda
for val in plot_data:
# Make sure the length of the array is proper
if ((len(val) < 2) or (len(val) > 3)):
return Response(err_code=8, err_str="plot value {} does not have 2 or 3 items".format(val), serialize=True)
# Try to make the lambda from the first one
try:
lamb = eval("lambda x: " + val[0])
except:
return Response(err_code=9, err_str="Unable to make lambda from {}".format(val[0]), serialize=True)
# Make sure each key exists in the first data item
for key in val[1]:
if key not in result[0][1]:
return Response(err_code=10, err_str="Key {} not in data".format(key), serialize=True)
# Add the number of keys in this lambda to the total number of lines
total_lines += len(val[1])
# Get the label
if len(val) == 3:
label = str(val[2])
else:
label = str(val[0])
lambdas.append((lamb, val[1], label))
# Now we want to preallocate the data for the plot. It should be a
# matrix that's n-dimensional by lambda-key pair and entry
to_plot = np.zeros((total_lines, n_results))
# And finally we want to loop over all of the data
for i, val in enumerate(result):
idx = 0
for (l, keys, label) in lambdas:
for key in keys:
to_plot[idx][i] = l(val[1][key])
idx += 1
# Now, we can go ahead and make the figure
fig = plt.figure()
figures.append(fig)
# Plot all of the lines
idx = 0
for (l, keys, label) in lambdas:
for key in keys:
plt.plot(x_data, to_plot[idx,:], label=label + "-" + key)
idx += 1
# Make the title, x label, y label and legend
title = "Recording-{}-{}".format(data["name"], plot.get("title", "Plot {}".format(plot_n)))
plt.title(title)
# Make the x label
plt.xlabel(plot.get("x_label", x_label))
# Make the y label
plt.ylabel(plot.get("y_label", "Value"))
# Make the legend
if plot.get("legend", True):
plt.legend()
# If we are supposed to save the figures, do so
if data.get("save", False):
fig.savefig(os.path.join(
PERM_RECORDING_LOC if data.get("perm", False) else TEMP_RECORDING_LOC,
title))
# Draw the new plot
if data.get("show", True):
plt.show()
return Response("Success", serialize=True)
def csv_recording(data):
'''
Converts a recording to CSV. Takes a msgpack'd object with the following
parameters
name: required. Recording name
perm: Optional, default false. Whether to save the files in the permanent
or temporary location. Will also
lambdas: Optional. Multi-typed, either dictionary or string.
If dictionary, Dictionary of key : lambda values to convert keys
into an iterable. Each key will get its own CSV sheet and each
column will be an iterable from the value returned. Lambda will be
lambda: val
If string, same as above but applied to all keys
msgpack: Optional, default false. Will use msgpack to deserialize the
key data before passing it to the lambda or attempting to iterate
over it.
x: Optional, default uses redis ID. Specify a lambda on the entry
to generate the "x" column (column 0) of the CSV file
desc: Optional. Description to add to filename s.t. it doesn't overwrite
pre-existing data
'''
result = _get_recording(data)
if type(result) is not list:
return result
# If we got a result then we want to go ahead and make a CSV file for
# each key
files = {}
desc = data.get("desc", "")
for key in result[0][1]:
filename = os.path.join(
PERM_RECORDING_LOC if data.get("perm", False) else TEMP_RECORDING_LOC,
"{}-{}-{}.csv".format(data["name"], desc, key))
try:
files[key] = open(filename, "w")
except:
return Response(err_code=4, err_str="Failed to open file {}".format(filename), serialize=True)
# And then loop over the data and write to the file
x_lambda = data.get("x", None)
if x_lambda is not None:
try:
x_lambda = eval("lambda entry: " + x_lambda)
except:
return Response(err_code=5, err_str="Failed to convert {} to lambda".format(x_lambda), serialize=True)
# Get the general list of lambdas
lambdas = data.get("lambdas", None)
if lambdas is not None:
if type(lambdas) is dict:
for key in lambdas:
try:
lambdas[key] = eval("lambda x: " + lambdas[key])
except:
return Response(err_code=6, err_str="Failed to convert {} to lambda".format(lambdas[key]), serialize=True)
elif type(lambdas) is str:
try:
l_val = eval("lambda x: " + lambdas)
except:
return Response(err_code=6, err_str="Failed to convert {} to lambda".format(lambdas), serialize=True)
# Make a dictionary with the same lambda for each key
lambdas = {}
for key in result[0][1]:
lambdas[key] = l_val
else:
return Response(err_code=7, err_str="Lambdas argument must be dict or string", serialize=True)
# Loop over the data
for (redis_id, entry) in result:
# Get the x value to write to the file
if x_lambda is not None:
x_val = x_lambda(entry)
else:
x_val = redis_id.split('-')[0]
# For each key, write the key and its data to the file
for key in entry:
# Value by default is just entry[key]
val = entry[key]
# If we have some lambdas then we need to perhaps transform the
# value in that manner
if lambdas is not None and key in lambdas:
val = lambdas[key](val)
# Make the line for the CSV, starting off with the x value
buff = "{},".format(x_val)
# And add each item from the iterable
try:
for v in val:
buff += "{},".format(v)
except:
buff += "{}".format(val)
# Finish off with a newline and write to file
buff += "\n"
files[key].write(buff)
# And note the success
return Response("Success", serialize=True)
if __name__ == '__main__':
elem = Element("record", host=ATOM_HOST)
elem.command_add("start", start_recording, timeout=1000, deserialize=True)
elem.command_add("stop", stop_recording, timeout=1000, deserialize=True)
elem.command_add("wait", wait_recording, timeout=60000, deserialize=True)
elem.command_add("list", list_recordings, timeout=1000)
elem.command_add("get", get_recording, timeout=1000, deserialize=True)
elem.command_add("plot", plot_recording, timeout=1000000, deserialize=True)
elem.command_add("csv", csv_recording, timeout=10000, deserialize=True)
# Want to launch the plot thread s.t. our plot API can return quickly
elem.command_loop()
| [
"time.sleep",
"msgpack.unpackb",
"atom.Element",
"msgpack.Unpacker",
"numpy.array",
"os.path.exists",
"os.listdir",
"msgpack.packb",
"matplotlib.pyplot.plot",
"os.path.splitext",
"atom.messages.Response",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyp... | [((713, 741), 'os.getenv', 'os.getenv', (['"""ATOM_HOST"""', 'None'], {}), "('ATOM_HOST', None)\n", (722, 741), False, 'import os\n'), ((1056, 1097), 'atom.Element', 'Element', (["('record_' + name)"], {'host': 'ATOM_HOST'}), "('record_' + name, host=ATOM_HOST)\n", (1063, 1097), False, 'from atom import Element\n'), ((1152, 1248), 'os.path.join', 'os.path.join', (['(PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC)', '(name + RECORDING_EXTENSION)'], {}), '(PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC, name +\n RECORDING_EXTENSION)\n', (1164, 1248), False, 'import os\n'), ((5851, 5946), 'threading.Thread', 'Thread', ([], {'target': 'record_fn', 'args': '(name, n_entries, n_sec, perm, element, stream)', 'daemon': '(True)'}), '(target=record_fn, args=(name, n_entries, n_sec, perm, element,\n stream), daemon=True)\n', (5857, 5946), False, 'from threading import Thread\n'), ((6911, 6946), 'atom.messages.Response', 'Response', (['"""Success"""'], {'serialize': '(True)'}), "('Success', serialize=True)\n", (6919, 6946), False, 'from atom.messages import Response, LogLevel\n'), ((7276, 7287), 'time.time', 'time.time', ([], {}), '()\n', (7285, 7287), False, 'import time\n'), ((7339, 7350), 'time.time', 'time.time', ([], {}), '()\n', (7348, 7350), False, 'import time\n'), ((8050, 8086), 'atom.messages.Response', 'Response', (['recordings'], {'serialize': '(True)'}), '(recordings, serialize=True)\n', (8058, 8086), False, 'from atom.messages import Response, LogLevel\n'), ((9812, 9845), 'msgpack.Unpacker', 'msgpack.Unpacker', (['file'], {'raw': '(False)'}), '(file, raw=False)\n', (9828, 9845), False, 'import msgpack\n'), ((14237, 14253), 'numpy.array', 'np.array', (['x_data'], {}), '(x_data)\n', (14245, 14253), True, 'import numpy as np\n'), ((17364, 17399), 'atom.messages.Response', 'Response', (['"""Success"""'], {'serialize': '(True)'}), "('Success', serialize=True)\n", (17372, 17399), False, 'from atom.messages import Response, LogLevel\n'), ((21417, 21452), 'atom.messages.Response', 'Response', (['"""Success"""'], {'serialize': '(True)'}), "('Success', serialize=True)\n", (21425, 21452), False, 'from atom.messages import Response, LogLevel\n'), ((21492, 21525), 'atom.Element', 'Element', (['"""record"""'], {'host': 'ATOM_HOST'}), "('record', host=ATOM_HOST)\n", (21499, 21525), False, 'from atom import Element\n'), ((3023, 3048), 'time.sleep', 'time.sleep', (['POLL_INTERVAL'], {}), '(POLL_INTERVAL)\n', (3033, 3048), False, 'import time\n'), ((4396, 4464), 'atom.messages.Response', 'Response', ([], {'err_code': '(1)', 'err_str': '"""name must be in data"""', 'serialize': '(True)'}), "(err_code=1, err_str='name must be in data', serialize=True)\n", (4404, 4464), False, 'from atom.messages import Response, LogLevel\n'), ((4573, 4644), 'atom.messages.Response', 'Response', ([], {'err_code': '(2)', 'err_str': '"""element must be in data"""', 'serialize': '(True)'}), "(err_code=2, err_str='element must be in data', serialize=True)\n", (4581, 4644), False, 'from atom.messages import Response, LogLevel\n'), ((4751, 4821), 'atom.messages.Response', 'Response', ([], {'err_code': '(3)', 'err_str': '"""stream must be in data"""', 'serialize': '(True)'}), "(err_code=3, err_str='stream must be in data', serialize=True)\n", (4759, 4821), False, 'from atom.messages import Response, LogLevel\n'), ((7840, 7858), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (7850, 7858), False, 'import os\n'), ((8583, 8647), 'atom.messages.Response', 'Response', ([], {'err_code': '(1)', 'err_str': '"""Name is required"""', 'serialize': '(True)'}), "(err_code=1, err_str='Name is required', serialize=True)\n", (8591, 8647), False, 'from atom.messages import Response, LogLevel\n'), ((8769, 8817), 'os.path.join', 'os.path.join', (['folder', '(name + RECORDING_EXTENSION)'], {}), '(folder, name + RECORDING_EXTENSION)\n', (8781, 8817), False, 'import os\n'), ((8829, 8853), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (8843, 8853), False, 'import os\n'), ((10975, 11007), 'atom.messages.Response', 'Response', (['result'], {'serialize': '(True)'}), '(result, serialize=True)\n', (10983, 11007), False, 'from atom.messages import Response, LogLevel\n'), ((13250, 13321), 'atom.messages.Response', 'Response', ([], {'err_code': '(4)', 'err_str': '"""0 results for recording"""', 'serialize': '(True)'}), "(err_code=4, err_str='0 results for recording', serialize=True)\n", (13258, 13321), False, 'from atom.messages import Response, LogLevel\n'), ((13556, 13627), 'atom.messages.Response', 'Response', ([], {'err_code': '(5)', 'err_str': '"""Plots must be specified"""', 'serialize': '(True)'}), "(err_code=5, err_str='Plots must be specified', serialize=True)\n", (13564, 13627), False, 'from atom.messages import Response, LogLevel\n'), ((15970, 16004), 'numpy.zeros', 'np.zeros', (['(total_lines, n_results)'], {}), '((total_lines, n_results))\n', (15978, 16004), True, 'import numpy as np\n'), ((16353, 16365), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16363, 16365), True, 'import matplotlib.pyplot as plt\n'), ((16775, 16791), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (16784, 16791), True, 'import matplotlib.pyplot as plt\n'), ((17341, 17351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17349, 17351), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2374), 'msgpack.packb', 'msgpack.packb', (['entry'], {'use_bin_type': '(True)'}), '(entry, use_bin_type=True)\n', (2348, 2374), False, 'import msgpack\n'), ((7722, 7744), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (7736, 7744), False, 'import os\n'), ((14574, 14653), 'atom.messages.Response', 'Response', ([], {'err_code': '(7)', 'err_str': '"""Each plot must have a data list"""', 'serialize': '(True)'}), "(err_code=7, err_str='Each plot must have a data list', serialize=True)\n", (14582, 14653), False, 'from atom.messages import Response, LogLevel\n'), ((17022, 17034), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17032, 17034), True, 'import matplotlib.pyplot as plt\n'), ((5598, 5632), 'os.path.exists', 'os.path.exists', (['PERM_RECORDING_LOC'], {}), '(PERM_RECORDING_LOC)\n', (5612, 5632), False, 'import os\n'), ((16529, 16587), 'matplotlib.pyplot.plot', 'plt.plot', (['x_data', 'to_plot[idx, :]'], {'label': "(label + '-' + key)"}), "(x_data, to_plot[idx, :], label=label + '-' + key)\n", (16537, 16587), True, 'import matplotlib.pyplot as plt\n'), ((20241, 20332), 'atom.messages.Response', 'Response', ([], {'err_code': '(7)', 'err_str': '"""Lambdas argument must be dict or string"""', 'serialize': '(True)'}), "(err_code=7, err_str='Lambdas argument must be dict or string',\n serialize=True)\n", (20249, 20332), False, 'from atom.messages import Response, LogLevel\n'), ((8007, 8033), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8023, 8033), False, 'import os\n'), ((10204, 10243), 'msgpack.unpackb', 'msgpack.unpackb', (['unpacked[k]'], {'raw': '(False)'}), '(unpacked[k], raw=False)\n', (10219, 10243), False, 'import msgpack\n')] |
########################################################################################################################
# Module: tests/test_resampling.py
# Description: Tests for resampling schemes.
#
# Web: https://github.com/SamDuffield/bayesian-traffic
########################################################################################################################
import unittest
import numpy as np
import numpy.testing as npt
from bmm.src.inference.particles import MMParticles
from bmm.src.inference import resampling
class TestMultinomial(unittest.TestCase):
def test_array(self):
array = np.arange(10)
weights = np.zeros(10)
weights[0] = 1
npt.assert_array_equal(resampling.multinomial(array, weights), np.zeros(10))
def test_list(self):
list = [a for a in range(10)]
weights = np.zeros(10)
weights[0] = 1
self.assertEqual(resampling.multinomial(list, weights), [0 for _ in range(10)])
def test_mmparticles(self):
init_array = np.zeros((3, 6))
init_array += np.arange(3).reshape(3, 1)
mmp = MMParticles(init_array)
weights = np.array([0, 1, 0])
mmp_resampled = resampling.multinomial(mmp, weights)
for i in range(3):
npt.assert_array_equal(mmp_resampled[i], np.array([[0, 1, 1, 1, 1, 1, 1, 0]]))
if __name__ == '__main__':
unittest.main()
| [
"bmm.src.inference.particles.MMParticles",
"numpy.array",
"numpy.zeros",
"unittest.main",
"numpy.arange",
"bmm.src.inference.resampling.multinomial"
] | [((1391, 1406), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1404, 1406), False, 'import unittest\n'), ((624, 637), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (633, 637), True, 'import numpy as np\n'), ((656, 668), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (664, 668), True, 'import numpy as np\n'), ((859, 871), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (867, 871), True, 'import numpy as np\n'), ((1037, 1053), 'numpy.zeros', 'np.zeros', (['(3, 6)'], {}), '((3, 6))\n', (1045, 1053), True, 'import numpy as np\n'), ((1117, 1140), 'bmm.src.inference.particles.MMParticles', 'MMParticles', (['init_array'], {}), '(init_array)\n', (1128, 1140), False, 'from bmm.src.inference.particles import MMParticles\n'), ((1159, 1178), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1167, 1178), True, 'import numpy as np\n'), ((1203, 1239), 'bmm.src.inference.resampling.multinomial', 'resampling.multinomial', (['mmp', 'weights'], {}), '(mmp, weights)\n', (1225, 1239), False, 'from bmm.src.inference import resampling\n'), ((723, 761), 'bmm.src.inference.resampling.multinomial', 'resampling.multinomial', (['array', 'weights'], {}), '(array, weights)\n', (745, 761), False, 'from bmm.src.inference import resampling\n'), ((763, 775), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (771, 775), True, 'import numpy as np\n'), ((920, 957), 'bmm.src.inference.resampling.multinomial', 'resampling.multinomial', (['list', 'weights'], {}), '(list, weights)\n', (942, 957), False, 'from bmm.src.inference import resampling\n'), ((1076, 1088), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1085, 1088), True, 'import numpy as np\n'), ((1320, 1356), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 1, 1, 1, 0]]'], {}), '([[0, 1, 1, 1, 1, 1, 1, 0]])\n', (1328, 1356), True, 'import numpy as np\n')] |
# Value Function Iteration with IID Income
# <NAME> 2017
# Translated by <NAME> Dec 2020
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from discrete_normal import discrete_normal
# PARAMETERS
## preferences
risk_aver = 2
beta = 0.95
## returns
r = 0.03
R = 1+r
## income risk: discretized N(mu,sigma^2)
mu_y = 1
sd_y = 0.2
ny = 5
## asset grids
na = 500
amax = 20
borrow_lim = 0
agrid_par = 1 # 1 for linear, 0 for L-shaped
## computation
max_iter = 1000
tol_iter = 1.0e-6
Nsim = 50000
Tsim = 500
# OPTIONS
Display = 1
DoSimulate = 1
MakePlots = 1
# DRAW RANDOM NUMBERS
np.random.seed(2020)
yrand = np.random.rand(Nsim,Tsim)
# SET UP GRIDS
## assets
agrid = np.linspace(0,1,na).reshape(na,1)
agrid = agrid**(1/agrid_par)
agrid = borrow_lim + (amax-borrow_lim)*agrid
## income: disretize normal distribution
width = fsolve(lambda x: discrete_normal(ny,mu_y,sd_y,x)[0],2)
temp, ygrid, ydist = discrete_normal(ny,mu_y,sd_y,width)
ycumdist = np.cumsum(ydist)
# UTILITY FUNCTION
if risk_aver==1:
u = lambda c: np.log(c)
else:
u = lambda c: (c**(1-risk_aver)-1)/(1-risk_aver)
# INITIALIZE VALUE FUNCTION
Vguess = np.zeros((na,ny))
for iy in range(0,ny):
Vguess[:,iy] = u(r*agrid[0]+ygrid[iy])/(1-beta)
### Vguess = np.ones((na,ny))
# ITERATE ON VALUE FUNCTION
V = Vguess.copy()
Vdiff = 1
Iter = 0
while Iter <= max_iter and Vdiff > tol_iter:
Iter = Iter + 1
Vlast = V.copy()
V = np.zeros((na,ny))
sav = np.zeros((na,ny))
savind = np.zeros((na,ny), dtype=int)
con = np.zeros((na,ny))
## loop over assets
for ia in range(0,na):
## loop over income
for iy in range(0,ny):
cash = R*agrid[ia] + ygrid[iy]
Vchoice = u(np.maximum(cash-agrid,1.0e-10)) + beta*(Vlast @ ydist)
V[ia,iy] = np.max(Vchoice)
savind[ia,iy] = np.argmax(Vchoice)
sav[ia,iy] = agrid[savind[ia,iy]]
con[ia,iy] = cash - sav[ia,iy]
Vdiff = np.max(abs(V-Vlast))
if Display >= 1:
print('Iteration no. ' + str(Iter), ' max val fn diff is ' + str(Vdiff))
# SIMULATE
if DoSimulate == 1:
yindsim = np.zeros((Nsim,Tsim), dtype=int)
aindsim = np.zeros((Nsim,Tsim), dtype=int)
## initial assets
aindsim[:,0] = 0
## loop over time periods
for it in range(0,Tsim):
if Display >= 1 and (it+1)%100 == 0:
print(' Simulating, time period ' + str(it+1))
### income realization: note we vectorize simulations at once because
### of matlab, in other languages we would loop over individuals
yindsim[yrand[:,it]<=ycumdist[0],it] = 0
for iy in range(1,ny):
yindsim[np.logical_and(yrand[:,it]>ycumdist[iy-1], yrand[:,it]<=ycumdist[iy]),it] = iy
## asset choice
if it < Tsim-1:
for iy in range(0,ny):
aindsim[yindsim[:,it]==iy,it+1] = savind[aindsim[yindsim[:,it]==iy,it],iy]
## assign actual asset and income values
asim = agrid[aindsim]
ysim = ygrid[yindsim]
# MAKE PLOTS
if MakePlots==1:
## consumption policy function
plt.plot(agrid,con[:,0],'b-',label = 'Lowest income state')
plt.plot(agrid,con[:,ny-1],'r-', label = 'Highest income state')
plt.grid()
plt.xlim((0,amax))
### plt.title('Consumption Policy Function')
plt.title('Consumption')
plt.legend()
plt.show()
## savings policy function
plt.plot(agrid,sav[:,0]-agrid[:,0],'b-')
plt.plot(agrid,sav[:,ny-1]-agrid[:,0],'r-')
plt.plot(agrid,np.zeros((na,1)),'k',linewidth=0.5)
plt.grid()
plt.xlim((0,amax))
### plt.title('Savings Policy Function (a''-a)')
plt.title('Savings')
plt.show()
## nice zoom
xlimits = (0,1)
xlimind = np.ones(na, dtype=bool)
if np.min(agrid) < xlimits[0]:
xlimind = np.logical_and(xlimind,(agrid[:,0]>=np.max(agrid[agrid<xlimits[0]])))
elif np.min(agrid) > xlimits[1]:
xlimind = 0
if np.max(agrid) > xlimits[1]:
xlimind = np.logical_and(xlimind,(agrid[:,0]<=np.min(agrid[agrid>xlimits[1]])))
elif np.max(agrid) < xlimits[0]:
xlimind = 0
## consumption policy function: zoomed in
plt.plot(agrid[xlimind],con[xlimind,0],'b-o',linewidth=2)
plt.plot(agrid[xlimind],con[xlimind,ny-1],'r-o',linewidth=2)
plt.grid()
plt.xlim(xlimits)
plt.title('Consumption: Zoomed')
plt.show()
## savings policy function: zoomed in
plt.plot(agrid[xlimind],sav[xlimind,0]-agrid[xlimind,0],'b-o',linewidth=2)
plt.plot(agrid[xlimind],sav[xlimind,ny-1]-agrid[xlimind,0],'r-o',linewidth=2)
plt.plot(agrid,np.zeros((na,1)),'k',linewidth =0.5)
plt.grid()
plt.xlim(xlimits)
plt.title('Savings: Zoomed (a\'-a)')
plt.show()
## income distribution
plt.hist(ysim[:,Tsim-1],len(ygrid),facecolor=(0,0.5,0.5),edgecolor='blue')
plt.ylabel('')
plt.title('Income distribution')
plt.show()
## asset distribution
plt.hist(asim[:,Tsim-1],40,facecolor=(.7,.7,.7),edgecolor='black')
plt.ylabel('')
plt.title('Asset distribution')
plt.show()
## convergence check
plt.plot(range(0,Tsim),np.mean(asim,0),'k-',linewidth=1.5)
plt.xlabel('Time Period')
plt.title('Mean Asset Convergence')
plt.show()
## asset distribution statistics
aysim = asim[:,Tsim-1]/np.mean(ysim[:,Tsim-1])
print('Mean assets: ' + str(np.mean(aysim)))
print('Fraction borrowing constrained: ' + str(np.sum(aysim==borrow_lim)/Nsim * 100) + '%')
print('10th Percentile: ' + str(np.quantile(aysim,.1)))
print('50th Percentile: ' + str(np.quantile(aysim,.5)))
print('90th Percentile: ' + str(np.quantile(aysim,.9)))
print('99th Percentile: ' + str(np.quantile(aysim,.99))) | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"numpy.random.seed",
"numpy.min",
"numpy.maximum",
"numpy.ones",
"numpy.arg... | [((620, 640), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (634, 640), True, 'import numpy as np\n'), ((649, 675), 'numpy.random.rand', 'np.random.rand', (['Nsim', 'Tsim'], {}), '(Nsim, Tsim)\n', (663, 675), True, 'import numpy as np\n'), ((944, 982), 'discrete_normal.discrete_normal', 'discrete_normal', (['ny', 'mu_y', 'sd_y', 'width'], {}), '(ny, mu_y, sd_y, width)\n', (959, 982), False, 'from discrete_normal import discrete_normal\n'), ((991, 1007), 'numpy.cumsum', 'np.cumsum', (['ydist'], {}), '(ydist)\n', (1000, 1007), True, 'import numpy as np\n'), ((1173, 1191), 'numpy.zeros', 'np.zeros', (['(na, ny)'], {}), '((na, ny))\n', (1181, 1191), True, 'import numpy as np\n'), ((1460, 1478), 'numpy.zeros', 'np.zeros', (['(na, ny)'], {}), '((na, ny))\n', (1468, 1478), True, 'import numpy as np\n'), ((1488, 1506), 'numpy.zeros', 'np.zeros', (['(na, ny)'], {}), '((na, ny))\n', (1496, 1506), True, 'import numpy as np\n'), ((1519, 1548), 'numpy.zeros', 'np.zeros', (['(na, ny)'], {'dtype': 'int'}), '((na, ny), dtype=int)\n', (1527, 1548), True, 'import numpy as np\n'), ((1558, 1576), 'numpy.zeros', 'np.zeros', (['(na, ny)'], {}), '((na, ny))\n', (1566, 1576), True, 'import numpy as np\n'), ((2202, 2235), 'numpy.zeros', 'np.zeros', (['(Nsim, Tsim)'], {'dtype': 'int'}), '((Nsim, Tsim), dtype=int)\n', (2210, 2235), True, 'import numpy as np\n'), ((2249, 2282), 'numpy.zeros', 'np.zeros', (['(Nsim, Tsim)'], {'dtype': 'int'}), '((Nsim, Tsim), dtype=int)\n', (2257, 2282), True, 'import numpy as np\n'), ((3193, 3254), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid', 'con[:, 0]', '"""b-"""'], {'label': '"""Lowest income state"""'}), "(agrid, con[:, 0], 'b-', label='Lowest income state')\n", (3201, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3324), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid', 'con[:, ny - 1]', '"""r-"""'], {'label': '"""Highest income state"""'}), "(agrid, con[:, ny - 1], 'r-', label='Highest income state')\n", (3265, 3324), True, 'import matplotlib.pyplot as plt\n'), ((3326, 3336), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3334, 3336), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3360), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, amax)'], {}), '((0, amax))\n', (3349, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3437), 'matplotlib.pyplot.title', 'plt.title', (['"""Consumption"""'], {}), "('Consumption')\n", (3422, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3454), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3459, 3469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3467, 3469), True, 'import matplotlib.pyplot as plt\n'), ((3506, 3552), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid', '(sav[:, 0] - agrid[:, 0])', '"""b-"""'], {}), "(agrid, sav[:, 0] - agrid[:, 0], 'b-')\n", (3514, 3552), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3602), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid', '(sav[:, ny - 1] - agrid[:, 0])', '"""r-"""'], {}), "(agrid, sav[:, ny - 1] - agrid[:, 0], 'r-')\n", (3559, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3654, 3664), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3662, 3664), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3688), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, amax)'], {}), '((0, amax))\n', (3677, 3688), True, 'import matplotlib.pyplot as plt\n'), ((3745, 3765), 'matplotlib.pyplot.title', 'plt.title', (['"""Savings"""'], {}), "('Savings')\n", (3754, 3765), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3780), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3778, 3780), True, 'import matplotlib.pyplot as plt\n'), ((3837, 3860), 'numpy.ones', 'np.ones', (['na'], {'dtype': 'bool'}), '(na, dtype=bool)\n', (3844, 3860), True, 'import numpy as np\n'), ((4272, 4333), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid[xlimind]', 'con[xlimind, 0]', '"""b-o"""'], {'linewidth': '(2)'}), "(agrid[xlimind], con[xlimind, 0], 'b-o', linewidth=2)\n", (4280, 4333), True, 'import matplotlib.pyplot as plt\n'), ((4334, 4400), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid[xlimind]', 'con[xlimind, ny - 1]', '"""r-o"""'], {'linewidth': '(2)'}), "(agrid[xlimind], con[xlimind, ny - 1], 'r-o', linewidth=2)\n", (4342, 4400), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4409), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4407, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4431), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlimits'], {}), '(xlimits)\n', (4422, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4436, 4468), 'matplotlib.pyplot.title', 'plt.title', (['"""Consumption: Zoomed"""'], {}), "('Consumption: Zoomed')\n", (4445, 4468), True, 'import matplotlib.pyplot as plt\n'), ((4473, 4483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4481, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4531, 4616), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid[xlimind]', '(sav[xlimind, 0] - agrid[xlimind, 0])', '"""b-o"""'], {'linewidth': '(2)'}), "(agrid[xlimind], sav[xlimind, 0] - agrid[xlimind, 0], 'b-o',\n linewidth=2)\n", (4539, 4616), True, 'import matplotlib.pyplot as plt\n'), ((4610, 4700), 'matplotlib.pyplot.plot', 'plt.plot', (['agrid[xlimind]', '(sav[xlimind, ny - 1] - agrid[xlimind, 0])', '"""r-o"""'], {'linewidth': '(2)'}), "(agrid[xlimind], sav[xlimind, ny - 1] - agrid[xlimind, 0], 'r-o',\n linewidth=2)\n", (4618, 4700), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4758), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4756, 4758), True, 'import matplotlib.pyplot as plt\n'), ((4763, 4780), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlimits'], {}), '(xlimits)\n', (4771, 4780), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4820), 'matplotlib.pyplot.title', 'plt.title', (['"""Savings: Zoomed (a\'-a)"""'], {}), '("Savings: Zoomed (a\'-a)")\n', (4794, 4820), True, 'import matplotlib.pyplot as plt\n'), ((4826, 4836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4834, 4836), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4962), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (4958, 4962), True, 'import matplotlib.pyplot as plt\n'), ((4967, 4999), 'matplotlib.pyplot.title', 'plt.title', (['"""Income distribution"""'], {}), "('Income distribution')\n", (4976, 4999), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5012, 5014), True, 'import matplotlib.pyplot as plt\n'), ((5046, 5123), 'matplotlib.pyplot.hist', 'plt.hist', (['asim[:, Tsim - 1]', '(40)'], {'facecolor': '(0.7, 0.7, 0.7)', 'edgecolor': '"""black"""'}), "(asim[:, Tsim - 1], 40, facecolor=(0.7, 0.7, 0.7), edgecolor='black')\n", (5054, 5123), True, 'import matplotlib.pyplot as plt\n'), ((5117, 5131), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (5127, 5131), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5167), 'matplotlib.pyplot.title', 'plt.title', (['"""Asset distribution"""'], {}), "('Asset distribution')\n", (5145, 5167), True, 'import matplotlib.pyplot as plt\n'), ((5172, 5182), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5180, 5182), True, 'import matplotlib.pyplot as plt\n'), ((5276, 5301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Period"""'], {}), "('Time Period')\n", (5286, 5301), True, 'import matplotlib.pyplot as plt\n'), ((5306, 5341), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Asset Convergence"""'], {}), "('Mean Asset Convergence')\n", (5315, 5341), True, 'import matplotlib.pyplot as plt\n'), ((5346, 5356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5354, 5356), True, 'import matplotlib.pyplot as plt\n'), ((710, 731), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'na'], {}), '(0, 1, na)\n', (721, 731), True, 'import numpy as np\n'), ((1064, 1073), 'numpy.log', 'np.log', (['c'], {}), '(c)\n', (1070, 1073), True, 'import numpy as np\n'), ((3614, 3631), 'numpy.zeros', 'np.zeros', (['(na, 1)'], {}), '((na, 1))\n', (3622, 3631), True, 'import numpy as np\n'), ((3868, 3881), 'numpy.min', 'np.min', (['agrid'], {}), '(agrid)\n', (3874, 3881), True, 'import numpy as np\n'), ((4048, 4061), 'numpy.max', 'np.max', (['agrid'], {}), '(agrid)\n', (4054, 4061), True, 'import numpy as np\n'), ((4707, 4724), 'numpy.zeros', 'np.zeros', (['(na, 1)'], {}), '((na, 1))\n', (4715, 4724), True, 'import numpy as np\n'), ((5236, 5252), 'numpy.mean', 'np.mean', (['asim', '(0)'], {}), '(asim, 0)\n', (5243, 5252), True, 'import numpy as np\n'), ((5422, 5448), 'numpy.mean', 'np.mean', (['ysim[:, Tsim - 1]'], {}), '(ysim[:, Tsim - 1])\n', (5429, 5448), True, 'import numpy as np\n'), ((885, 919), 'discrete_normal.discrete_normal', 'discrete_normal', (['ny', 'mu_y', 'sd_y', 'x'], {}), '(ny, mu_y, sd_y, x)\n', (900, 919), False, 'from discrete_normal import discrete_normal\n'), ((1856, 1871), 'numpy.max', 'np.max', (['Vchoice'], {}), '(Vchoice)\n', (1862, 1871), True, 'import numpy as np\n'), ((1900, 1918), 'numpy.argmax', 'np.argmax', (['Vchoice'], {}), '(Vchoice)\n', (1909, 1918), True, 'import numpy as np\n'), ((3993, 4006), 'numpy.min', 'np.min', (['agrid'], {}), '(agrid)\n', (3999, 4006), True, 'import numpy as np\n'), ((4173, 4186), 'numpy.max', 'np.max', (['agrid'], {}), '(agrid)\n', (4179, 4186), True, 'import numpy as np\n'), ((3950, 3983), 'numpy.max', 'np.max', (['agrid[agrid < xlimits[0]]'], {}), '(agrid[agrid < xlimits[0]])\n', (3956, 3983), True, 'import numpy as np\n'), ((4130, 4163), 'numpy.min', 'np.min', (['agrid[agrid > xlimits[1]]'], {}), '(agrid[agrid > xlimits[1]])\n', (4136, 4163), True, 'import numpy as np\n'), ((5478, 5492), 'numpy.mean', 'np.mean', (['aysim'], {}), '(aysim)\n', (5485, 5492), True, 'import numpy as np\n'), ((5627, 5650), 'numpy.quantile', 'np.quantile', (['aysim', '(0.1)'], {}), '(aysim, 0.1)\n', (5638, 5650), True, 'import numpy as np\n'), ((5687, 5710), 'numpy.quantile', 'np.quantile', (['aysim', '(0.5)'], {}), '(aysim, 0.5)\n', (5698, 5710), True, 'import numpy as np\n'), ((5747, 5770), 'numpy.quantile', 'np.quantile', (['aysim', '(0.9)'], {}), '(aysim, 0.9)\n', (5758, 5770), True, 'import numpy as np\n'), ((5807, 5831), 'numpy.quantile', 'np.quantile', (['aysim', '(0.99)'], {}), '(aysim, 0.99)\n', (5818, 5831), True, 'import numpy as np\n'), ((1767, 1798), 'numpy.maximum', 'np.maximum', (['(cash - agrid)', '(1e-10)'], {}), '(cash - agrid, 1e-10)\n', (1777, 1798), True, 'import numpy as np\n'), ((2758, 2835), 'numpy.logical_and', 'np.logical_and', (['(yrand[:, it] > ycumdist[iy - 1])', '(yrand[:, it] <= ycumdist[iy])'], {}), '(yrand[:, it] > ycumdist[iy - 1], yrand[:, it] <= ycumdist[iy])\n', (2772, 2835), True, 'import numpy as np\n'), ((5546, 5573), 'numpy.sum', 'np.sum', (['(aysim == borrow_lim)'], {}), '(aysim == borrow_lim)\n', (5552, 5573), True, 'import numpy as np\n')] |
#encoding: utf-8
"""
remapping -- Remapping figure showing orthogonalization from initial phase reset
Created by <NAME> on 2010-10-12.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# Package imports
from ..core.analysis import BaseAnalysis
from ..vmo import VMOModel
from ..session import VMOSession
from ..compare import (correlation_matrix, correlation_diagonals,
population_spatial_correlation)
from ..tools.images import array_to_image
from ..tools.radians import circle_diff_vec
class RemappingFigure(BaseAnalysis):
"""
Run complete remapping experiment based on random initial reset
"""
label = "remapping"
def collect_data(self, N_samples=2, **kwargs):
"""Run basic VMOModel remapping experiment by randomly initializing
the phase code of a network of oscillators and place units.
Keyword arguments:
N_samples -- total number of simulations to run (N-1 remapped from 1st)
Additional keyword arguments are passed on to VMOModel.
"""
self.results['N_samples'] = N_samples
# Set up model parameters
pdict = dict( N_outputs=500,
N_theta=1000,
N_cues=1,
C_W=0.05,
gamma_local=0,
gamma_distal=0,
num_trials=N_samples,
refresh_fixed_points=False )
pdict.update(kwargs)
# Set up and run the path integration model
self.out('Running remapping simulations...')
model = VMOModel(**pdict)
model.advance_all()
sessions = VMOSession.get_session_list(model)
VMOSession.save_session_list(sessions,
os.path.join(self.datadir, 'samples'))
# Get unit ordering based on first environment
sortix = list(sessions[0].sortix)
sortix += list(set(range(sessions[0].num_units)) - set(sortix))
self.results['sortix'] = np.array(sortix)
# Save multi-session population responses and activity patterns
self.out('Computing and storing population responses...')
R = [SD.get_population_matrix(clusters=sortix) for SD in sessions]
np.save(os.path.join(self.datadir, 'R.npy'), np.asarray(R))
# Good-bye
self.out('All done!')
def create_plots(self, N_examples=4, examples=None):
"""Create figure(s) with basic data panels
"""
# Change to data directoary and start logging
os.chdir(self.datadir)
self.out.outfd = file('figure.log', 'w')
# Set up main figure for plotting
self.figure = {}
figsize = 9, 12
plt.rcParams['figure.figsize'] = figsize
self.figure['remapping'] = f = plt.figure(figsize=figsize)
f.suptitle(self.label.title())
# Load the data
R = np.load(os.path.join(self.datadir, 'R.npy'))
N = self.results['N_samples']
# Example active unit responses across environments
if examples is None:
active = set()
for j in xrange(N):
active = active.union(set((R[j].max(axis=1)>=1).nonzero()[0]))
active = list(active)
active.sort()
examples = np.random.permutation(len(active))[:N_examples]
examples = np.array(active)[examples]
self.out('Plotting example responses: %s'%repr(examples))
for i,ex in enumerate(examples):
self.out('Unit %d max response = %.2f Hz'%(ex, R[:,ex].max()))
for j in xrange(N):
ax = plt.subplot(2*N_examples, N, N*i+j+1)
ax.plot(R[j,ex], c='k', lw=1.5)
ax.set_xlim(0, 360)
ax.set_ylim(-0.1*R[:,ex].max(), 1.1*R[:,ex].max())
ax.set_axis_off()
# Population responses
for j in xrange(N):
self.out('Environment %d population max = %.2f Hz'%(j+1, R[j].max()))
ax = plt.subplot(2, N, j+1+N)
ax.imshow(R[j], aspect='auto', interpolation='nearest')
array_to_image(R[j], 'pop_env_%02d.png'%(j+1), cmap=mpl.cm.gray_r)
plt.draw()
plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize']
self.out.outfd.close()
| [
"matplotlib.pylab.draw",
"matplotlib.pylab.figure",
"os.path.join",
"numpy.asarray",
"os.chdir",
"numpy.array",
"matplotlib.pylab.subplot"
] | [((2301, 2317), 'numpy.array', 'np.array', (['sortix'], {}), '(sortix)\n', (2309, 2317), True, 'import numpy as np\n'), ((2858, 2880), 'os.chdir', 'os.chdir', (['self.datadir'], {}), '(self.datadir)\n', (2866, 2880), False, 'import os\n'), ((3118, 3145), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3128, 3145), True, 'import matplotlib.pylab as plt\n'), ((4564, 4574), 'matplotlib.pylab.draw', 'plt.draw', ([], {}), '()\n', (4572, 4574), True, 'import matplotlib.pylab as plt\n'), ((2051, 2088), 'os.path.join', 'os.path.join', (['self.datadir', '"""samples"""'], {}), "(self.datadir, 'samples')\n", (2063, 2088), False, 'import os\n'), ((2556, 2591), 'os.path.join', 'os.path.join', (['self.datadir', '"""R.npy"""'], {}), "(self.datadir, 'R.npy')\n", (2568, 2591), False, 'import os\n'), ((2593, 2606), 'numpy.asarray', 'np.asarray', (['R'], {}), '(R)\n', (2603, 2606), True, 'import numpy as np\n'), ((3230, 3265), 'os.path.join', 'os.path.join', (['self.datadir', '"""R.npy"""'], {}), "(self.datadir, 'R.npy')\n", (3242, 3265), False, 'import os\n'), ((4359, 4387), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', 'N', '(j + 1 + N)'], {}), '(2, N, j + 1 + N)\n', (4370, 4387), True, 'import matplotlib.pylab as plt\n'), ((3703, 3719), 'numpy.array', 'np.array', (['active'], {}), '(active)\n', (3711, 3719), True, 'import numpy as np\n'), ((3965, 4010), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2 * N_examples)', 'N', '(N * i + j + 1)'], {}), '(2 * N_examples, N, N * i + j + 1)\n', (3976, 4010), True, 'import matplotlib.pylab as plt\n')] |
import os,glob,numpy
os.chdir('/Desktop/malimg_dataset') # the parent folder with sub-folders
list_fams = os.listdir(os.getcwd()) # vector of strings with family names
no_imgs = [] # No. of samples per family
for i in range(len(list_fams)):
os.chdir(list_fams[i])
len1 = len(glob.glob('*.png')) # assuming the images are stored as 'png'
no_imgs.append(len1)
os.chdir('..')
total = sum(no_imgs) # total number of all samples
y = numpy.zeros(total) # label vector
temp1 = numpy.zeros(len(no_imgs)+1)
temp1[1:len(temp1)]=no_imgs
temp2 = int(temp1[0]) # now temp2 is [0 no_imgs]
for jj in range(len(no_imgs)):
temp3 = temp2 +int(temp1[jj+1])
for ii in range(temp2,temp3):
y[ii] = jj
temp2 = temp2+ int(temp1[jj+1])
import Image, leargist
X = numpy.zeros((sum(no_imgs), 320)) # Feature Matrix
cnt = 0
for i in range(len(list_fams)):
os.chdir(list_fams[i])
img_list = glob.glob('*.png') # Getting only 'png' files in a folder
for j in range(len(img_list)):
im = Image.open(img_list[j])
im1 = im.resize((64, 64), Image.ANTIALIAS); # for faster computation
des = leargist.color_gist(im1)
X[cnt] = des[0:320]
cnt = cnt + 1
os.chdir('..')
import random
from sklearn.cross_validation import StratifiedKFold
from sklearn.utils import shuffle
n_samples, n_features = X.shape
p = range(n_samples) # an index array, 0:n_samples
random.seed(random.random())
random.shuffle(p) # the index array is now shuffled
X, y = X[p], y[p] # both the arrays are now shuffled
import time
from sklearn.cluster import DBSCAN
from sklearn import metrics
import numpy as np
from sklearn.preprocessing import StandardScaler
conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # Initializing the Confusion Matrix
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(y, labels))
print("Completeness: %0.3f" % metrics.completeness_score(y, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(y, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(y, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(y, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.get_cmap('Spectral')(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| [
"sklearn.metrics.homogeneity_score",
"sklearn.metrics.adjusted_rand_score",
"sklearn.metrics.completeness_score",
"sklearn.cluster.DBSCAN",
"matplotlib.pyplot.plot",
"glob.glob",
"leargist.color_gist",
"random.shuffle",
"Image.open",
"sklearn.metrics.v_measure_score",
"matplotlib.pyplot.title",
... | [((21, 56), 'os.chdir', 'os.chdir', (['"""/Desktop/malimg_dataset"""'], {}), "('/Desktop/malimg_dataset')\n", (29, 56), False, 'import os, glob, numpy\n'), ((436, 454), 'numpy.zeros', 'numpy.zeros', (['total'], {}), '(total)\n', (447, 454), False, 'import os, glob, numpy\n'), ((1434, 1451), 'random.shuffle', 'random.shuffle', (['p'], {}), '(p)\n', (1448, 1451), False, 'import random\n'), ((1861, 1898), 'numpy.zeros_like', 'np.zeros_like', (['db.labels_'], {'dtype': 'bool'}), '(db.labels_, dtype=bool)\n', (1874, 1898), True, 'import numpy as np\n'), ((3350, 3409), 'matplotlib.pyplot.title', 'plt.title', (["('Estimated number of clusters: %d' % n_clusters_)"], {}), "('Estimated number of clusters: %d' % n_clusters_)\n", (3359, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3410, 3420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3418, 3420), True, 'import matplotlib.pyplot as plt\n'), ((118, 129), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (127, 129), False, 'import os, glob, numpy\n'), ((245, 267), 'os.chdir', 'os.chdir', (['list_fams[i]'], {}), '(list_fams[i])\n', (253, 267), False, 'import os, glob, numpy\n'), ((365, 379), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (373, 379), False, 'import os, glob, numpy\n'), ((864, 886), 'os.chdir', 'os.chdir', (['list_fams[i]'], {}), '(list_fams[i])\n', (872, 886), False, 'import os, glob, numpy\n'), ((902, 920), 'glob.glob', 'glob.glob', (['"""*.png"""'], {}), "('*.png')\n", (911, 920), False, 'import os, glob, numpy\n'), ((1204, 1218), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1212, 1218), False, 'import os, glob, numpy\n'), ((1417, 1432), 'random.random', 'random.random', ([], {}), '()\n', (1430, 1432), False, 'import random\n'), ((2820, 2844), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (2832, 2844), True, 'import matplotlib.pyplot as plt\n'), ((3086, 3180), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]', '"""o"""'], {'markerfacecolor': 'col', 'markeredgecolor': '"""k"""', 'markersize': '(14)'}), "(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k',\n markersize=14)\n", (3094, 3180), True, 'import matplotlib.pyplot as plt\n'), ((3246, 3339), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]', '"""o"""'], {'markerfacecolor': 'col', 'markeredgecolor': '"""k"""', 'markersize': '(6)'}), "(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k',\n markersize=6)\n", (3254, 3339), True, 'import matplotlib.pyplot as plt\n'), ((280, 298), 'glob.glob', 'glob.glob', (['"""*.png"""'], {}), "('*.png')\n", (289, 298), False, 'import os, glob, numpy\n'), ((1009, 1032), 'Image.open', 'Image.open', (['img_list[j]'], {}), '(img_list[j])\n', (1019, 1032), False, 'import Image, leargist\n'), ((1125, 1149), 'leargist.color_gist', 'leargist.color_gist', (['im1'], {}), '(im1)\n', (1144, 1149), False, 'import Image, leargist\n'), ((1802, 1833), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.3)', 'min_samples': '(10)'}), '(eps=0.3, min_samples=10)\n', (1808, 1833), False, 'from sklearn.cluster import DBSCAN\n'), ((2175, 2211), 'sklearn.metrics.homogeneity_score', 'metrics.homogeneity_score', (['y', 'labels'], {}), '(y, labels)\n', (2200, 2211), False, 'from sklearn import metrics\n'), ((2243, 2280), 'sklearn.metrics.completeness_score', 'metrics.completeness_score', (['y', 'labels'], {}), '(y, labels)\n', (2269, 2280), False, 'from sklearn import metrics\n'), ((2309, 2343), 'sklearn.metrics.v_measure_score', 'metrics.v_measure_score', (['y', 'labels'], {}), '(y, labels)\n', (2332, 2343), False, 'from sklearn import metrics\n'), ((2388, 2426), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', (['y', 'labels'], {}), '(y, labels)\n', (2415, 2426), False, 'from sklearn import metrics\n'), ((2479, 2524), 'sklearn.metrics.adjusted_mutual_info_score', 'metrics.adjusted_mutual_info_score', (['y', 'labels'], {}), '(y, labels)\n', (2513, 2524), False, 'from sklearn import metrics\n'), ((2572, 2607), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X', 'labels'], {}), '(X, labels)\n', (2596, 2607), False, 'from sklearn import metrics\n')] |
import os
import sys
import copy
import tensorflow as tf
import tensorflow_addons as tfa
import tqdm
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from notmad.baselines import NOTEARS, ClusteredNOTEARS
from notmad.notmad import NOTMAD
from notmad.helpers import utils
from notmad.helpers import graph_utils
import dataloader
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf_dtype = tf.dtypes.float32
threshs = [0.1, 0.2] #[0.0, 0.001, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5]
def print_header(model_names, out_file):
print("\t".join(['n','d', 'n_edges', 'd_context', 'context_snr', 'k_true', 'k']), end="\t", file=out_file)
for i, model_name in enumerate(model_names):
print("\t".join(["{}_{:.3f}_recovery".format(model_name, x) for x in threshs]), end="\t", file=out_file)
end = '\n'
if i < len(model_names) - 1:
end = '\t'
print("\t".join(["{}_{:.3f}_mse".format(model_name, x) for x in threshs]), end=end, file=out_file)
def print_results(model_names, data_params, k, results, out_file):
print("{}\t{}\t{}\t{}\t{:.3f}\t{}\t{}".format(
data_params["n"], data_params['d'],
data_params["n_edges"], data_params["n_c"],
data_params["context_snr"], data_params["k_true"],
k),
end='\t', file=out_file)
for i, model_name in enumerate(model_names):
print("\t".join(["{:.3f}".format(x) for x in results[model_name]['recovery']]),
end='\t', file=out_file)
end = '\n'
if i < len(model_names) - 1:
end = '\t'
print("\t".join(["{:.3f}".format(x) for x in results[model_name]['mse']]),
end=end, flush=True, file=out_file)
def fit_pop(loss_params, C_train, X_train):
W_shape = (X_train.shape[-1], X_train.shape[-1])
notears = NOTEARS(loss_params, C_train.shape, W_shape, learning_rate=1e-2)
notears.fit(C_train, X_train, epochs=1000, batch_size=32, es_patience=1, verbose=1)
return notears
def fit_clustered(loss_params, C_train, X_train, k):
W_shape = (X_train.shape[-1], X_train.shape[-1])
clustered = ClusteredNOTEARS(k, loss_params, C_train.shape, W_shape,
learning_rate=1e-2, tf_dtype=tf.dtypes.float32)
clustered.fit(C_train, X_train, epochs=1000, batch_size=32, es_patience=1,
val_split=0.25, verbose=1)
return clustered
def fit_notmad(sample_specific_loss_params, archetype_loss_params,
C_train, X_train, k, project, notears_pop, base_predictor):
init_mat = np.random.uniform(-0.01, 0.01, size=(k, X_train.shape[-1], X_train.shape[-1])) #np.zeros((k, X_train.shape[-1], X_train.shape[-1])) #
# init_mat = init_mat + notears_pop.get_w()
# init_mat = np.random.uniform(-0.1, 0.1, size=(k, X_train.shape[-1], X_train.shape[-1]))#np.tile(notears_pop, (k, 1, 1))
# init_mat = np.array([graph_utils.project_to_dag(mat)[0] for mat in init_mat])
# init_mat = base_predictor.get_ws()
make_notmad = lambda: NOTMAD(
C_train.shape, X_train.shape, k,
sample_specific_loss_params, archetype_loss_params,
n_encoder_layers=1, encoder_width=32,
activation='linear', init_mat=init_mat,
learning_rate=1e-3, project_archs_to_dag=project, # TODO: should this be variable?
project_distance=1.0,
context_activity_regularizer=tf.keras.regularizers.l1(0),
use_compatibility=False, update_compat_by_grad=False,
pop_model=None, base_predictor=base_predictor
)
notmad = make_notmad()
notmad.fit(C_train, X_train, batch_size=1, epochs=20, es_patience=2, verbose=1)
return notmad
def run_experiment(data_params, k, threshs, model_names):
results = {name: {'f1': [], 'mse': []} for name in model_names}
W, C, X, W_dict, C_dict = dataloader.gen_data(data_params)
# pca = PCA(n_components=3)
# X_small = pca.fit_transform(X.squeeze())
# C = np.hstack((C, X_small))
C_train, C_test, X_train, X_test, W_train, W_test = train_test_split(C, X, W,
test_size=0.25)
def calc_recovery_errs(preds, W):
return [np.linalg.norm(preds[i] - W[i], ord=2) for i in range(len(preds))]
def calc_recovery_err(preds, W):
return [np.mean(calc_recovery_errs(preds, W)) for thresh in threshs]
def add_results(name, preds):
results[name]['recovery'] = calc_recovery_err(preds, W_test)
results[name]['mse'] = [np.mean(utils.mses_xw(X_test, preds*np.abs(preds) > thresh)) for thresh in threshs]
add_results('base', np.ones_like(W_test))
# print('Base', results['base']['recovery'])
loss_params = {'l1': 1e-3,
'alpha': 1e-2,
'rho':1e-1}
notears = fit_pop(loss_params, C_train, X_train)
notears_preds = notears.predict_w(C_test, project_to_dag=True)
add_results('notears', notears_preds)
# print('Pop', results['notears']['recovery'])
clustered = fit_clustered(loss_params, C_train, X_train, data_params['k_true'])
cluster_preds = clustered.predict_w(C_test, project_to_dag=True)
add_results("cluster", cluster_preds)
# print('Cluster', results['cluster']['recovery'])
sample_specific_loss_params = {'l1': 0., 'alpha': 2e1, 'rho': 1e0}
archetype_loss_params = {'l1': 0., 'alpha': 1e-1, 'rho': 1e-2}
notmad = fit_notmad(
sample_specific_loss_params, archetype_loss_params,
C_train, X_train, k, project=True, notears_pop=None, base_predictor=clustered)
preds = notmad.predict_w(C_test, project_to_dag=True).squeeze()
add_results('notmad', preds)
# print('NOTMAD', results['notmad']['recovery'])
notmad_nobase = fit_notmad(
sample_specific_loss_params, archetype_loss_params,
C_train, X_train, k, project=True, notears_pop=None, base_predictor=None)
preds_nobase = notmad_nobase.predict_w(C_test, project_to_dag=True).squeeze()
add_results('notmad_nobase', preds_nobase)
# print('NOTMAD_nobase', results['notmad_nobase']['recovery'])
train_preds_not_projected = notmad.predict_w(C_train, project_to_dag=False).squeeze()
train_preds = notmad.predict_w(C_train, project_to_dag=True).squeeze()
# print("pop train", calc_recovery_err(notears.predict_w(C_train), W_train))
# print("pop train projected", calc_recovery_err(notears.predict_w(C_train, project_to_dag=True), W_train))
# print("context train", calc_recovery_err(train_preds_not_projected, W_train))
# print("context train projected", calc_recovery_err(train_preds, W_train))
# print("pop test", calc_recovery_err(notears.predict_w(C_test), W_test))
# print("context test", calc_recovery_err(preds, W_test))
# fig = plt.figure()
# plt.hist(calc_recovery_errs(notears_preds, W_test), label='NOTEARS')
# plt.hist(calc_recovery_errs(cluster_preds, W_test), label='Cluster')
# plt.hist(calc_recovery_errs(preds, W_test), label='NOTMAD')
# plt.legend()
# plt.show()
"""
fig = plt.figure()
plt.imshow(notears_preds[0])
fig = plt.figure()
plt.imshow(cluster_preds[0])
fig = plt.figure()
plt.imshow(preds[0])
plt.show()
"""
"""
sample_specific_loss_params = {'l1':1e-3, 'alpha': 1e0, 'rho': 1e-1}
archetype_loss_params = {'l1':0, 'alpha': 0, 'rho': 0}
lr_notmad = fit_lr_notmad(sample_specific_loss_params, archetype_loss_params,
C_train, X_train, k, rank, project=False, notears_pop=notears.get_w())
preds = lr_notmad.predict_w(C_test, project_to_dag=True).squeeze()
add_results('lr_notmad', preds)
"""
"""
notmad = fit_notmad(sample_specific_loss_params, archetype_loss_params,
C_train, X_train, k, rank, project=True)
preds = notmad.predict_w(C_test, project_to_dag=True).squeeze()
add_results('notmad_project', preds)
"""
"""
print("Fitting LIONESS...")
lioness = LIONESS()
lioness.fit(loss_params, C_train, X_train, init_model=notears, es_patience=1)
W_lioness = lioness.Ws
print("Finished fitting LIONESS.")
f1s_lioness = utils.get_f1s(W_train, W_lioness, threshs) # TODO: what's the most fair way to compare?
"""
return results
if __name__ == "__main__":
model_names = ['base', 'notears', 'cluster', 'notmad', 'notmad_nobase'] #'lioness'
# TODO: X Noise scale
# for graph_type in ["ER", "SF", "BP"]:
for graph_type in ["ER"]:
data_params = {
"n_i": 1, # number of samples per DAG
"n_c": 0, # number of contextual features
"simulation_type": 'clusters', # archetypes, clusters, random
"ensure_convex" : False, # should the archetype be generated such that they form a convex set of DAGs?
"graph_type": graph_type,
'sem_type' : 'gauss',
"n": 1000,
"d": 6,
"n_edges": 6,
"k_true": 8,
# "n_mix": 2,
"arch_min_radius": 100,
"cluster_max_radius": 0.2,
"context_snr": 0.75,
}
k = 8
filepath = "results/simulation_results_{}.tsv".format(graph_type.lower())
if not os.path.exists(filepath):
os.makedirs('results', exist_ok=True)
with open(filepath, 'w') as outfile:
print_header(model_names, outfile)
with open(filepath, 'a') as outfile:
results = run_experiment(data_params, k, threshs, model_names)
print_results(model_names, data_params, k, results, outfile)
print(f'Saved to {filepath}')
exit()
| [
"numpy.ones_like",
"os.path.exists",
"numpy.abs",
"notmad.baselines.NOTEARS",
"dataloader.gen_data",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"numpy.linalg.norm",
"tensorflow.keras.regularizers.l1",
"notmad.baselines.ClusteredNOTEARS",
"numpy.random.uniform"
] | [((1887, 1951), 'notmad.baselines.NOTEARS', 'NOTEARS', (['loss_params', 'C_train.shape', 'W_shape'], {'learning_rate': '(0.01)'}), '(loss_params, C_train.shape, W_shape, learning_rate=0.01)\n', (1894, 1951), False, 'from notmad.baselines import NOTEARS, ClusteredNOTEARS\n'), ((2184, 2292), 'notmad.baselines.ClusteredNOTEARS', 'ClusteredNOTEARS', (['k', 'loss_params', 'C_train.shape', 'W_shape'], {'learning_rate': '(0.01)', 'tf_dtype': 'tf.dtypes.float32'}), '(k, loss_params, C_train.shape, W_shape, learning_rate=0.01,\n tf_dtype=tf.dtypes.float32)\n', (2200, 2292), False, 'from notmad.baselines import NOTEARS, ClusteredNOTEARS\n'), ((2614, 2692), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)'], {'size': '(k, X_train.shape[-1], X_train.shape[-1])'}), '(-0.01, 0.01, size=(k, X_train.shape[-1], X_train.shape[-1]))\n', (2631, 2692), True, 'import numpy as np\n'), ((3876, 3908), 'dataloader.gen_data', 'dataloader.gen_data', (['data_params'], {}), '(data_params)\n', (3895, 3908), False, 'import dataloader\n'), ((4078, 4119), 'sklearn.model_selection.train_test_split', 'train_test_split', (['C', 'X', 'W'], {'test_size': '(0.25)'}), '(C, X, W, test_size=0.25)\n', (4094, 4119), False, 'from sklearn.model_selection import train_test_split\n'), ((4687, 4707), 'numpy.ones_like', 'np.ones_like', (['W_test'], {}), '(W_test)\n', (4699, 4707), True, 'import numpy as np\n'), ((4252, 4290), 'numpy.linalg.norm', 'np.linalg.norm', (['(preds[i] - W[i])'], {'ord': '(2)'}), '(preds[i] - W[i], ord=2)\n', (4266, 4290), True, 'import numpy as np\n'), ((9374, 9398), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (9388, 9398), False, 'import os\n'), ((9412, 9449), 'os.makedirs', 'os.makedirs', (['"""results"""'], {'exist_ok': '(True)'}), "('results', exist_ok=True)\n", (9423, 9449), False, 'import os\n'), ((3434, 3461), 'tensorflow.keras.regularizers.l1', 'tf.keras.regularizers.l1', (['(0)'], {}), '(0)\n', (3458, 3461), True, 'import tensorflow as tf\n'), ((4614, 4627), 'numpy.abs', 'np.abs', (['preds'], {}), '(preds)\n', (4620, 4627), True, 'import numpy as np\n')] |
import numpy as np
import cantera as ct
import pyutils as pu
def counterflow_twin_extinction(
chemistry = 'FFCM-1.cti',
fuel = {'CH4':1.},
oxidizer = {'O2':1., 'N2':3.76},
T = 300.,
p = 1.,
phi = 1.,
solution = None,
**kwargs):
# read kwargs
# parameters to approach extinction
if 'a_init' in kwargs.keys():
a_init = kwargs['a_init']
else:
a_init = 100.
if 'L_init' in kwargs.keys():
L_init = kwargs['L_init']
else:
L_init = 0.05
# factors
# a_{n+1} = exp(f0) * a_n
if 'f0' in kwargs.keys():
f0 = kwargs['f0']
else:
f0 = 0.2
params = {}
params['T'] = T
params['p'] = p
params['phi'] = phi
pressure = p * ct.one_atm
gas = pu.ctutils.gas.mixture(chemistry, fuel, oxidizer, T, pressure, phi)
flame = pu.ctutils.driver.free_flame_(gas, width=L_init)
case = pu.filename.params2name(params)+'.xml'
flame.save(case)
# iterate to get the extinction
a = a_init
L = L_init
while True:
gas = pu.ctutils.gas.mixture(chemistry, fuel, oxidizer, T, pressure, phi)
flame = pu.ctutils.driver.counterflow_twin_flame(
gas,
a = a,
solution = solution,
width = L,
**kwargs
)
hrr = flame.heat_release_rate.max()
if hrr < 1.0:
break
params['a'] = a
case = pu.filename.params2name(params)+'.xml'
# solution for iteration
flame.save(case)
solution = case
# update a and L
f0_a = np.exp(f0)
L /= np.power(f0_a, 0.5)
a *= f0_a
return
| [
"pyutils.ctutils.driver.counterflow_twin_flame",
"numpy.power",
"pyutils.ctutils.driver.free_flame_",
"numpy.exp",
"pyutils.ctutils.gas.mixture",
"pyutils.filename.params2name"
] | [((781, 848), 'pyutils.ctutils.gas.mixture', 'pu.ctutils.gas.mixture', (['chemistry', 'fuel', 'oxidizer', 'T', 'pressure', 'phi'], {}), '(chemistry, fuel, oxidizer, T, pressure, phi)\n', (803, 848), True, 'import pyutils as pu\n'), ((862, 910), 'pyutils.ctutils.driver.free_flame_', 'pu.ctutils.driver.free_flame_', (['gas'], {'width': 'L_init'}), '(gas, width=L_init)\n', (891, 910), True, 'import pyutils as pu\n'), ((923, 954), 'pyutils.filename.params2name', 'pu.filename.params2name', (['params'], {}), '(params)\n', (946, 954), True, 'import pyutils as pu\n'), ((1083, 1150), 'pyutils.ctutils.gas.mixture', 'pu.ctutils.gas.mixture', (['chemistry', 'fuel', 'oxidizer', 'T', 'pressure', 'phi'], {}), '(chemistry, fuel, oxidizer, T, pressure, phi)\n', (1105, 1150), True, 'import pyutils as pu\n'), ((1168, 1261), 'pyutils.ctutils.driver.counterflow_twin_flame', 'pu.ctutils.driver.counterflow_twin_flame', (['gas'], {'a': 'a', 'solution': 'solution', 'width': 'L'}), '(gas, a=a, solution=solution, width\n =L, **kwargs)\n', (1208, 1261), True, 'import pyutils as pu\n'), ((1621, 1631), 'numpy.exp', 'np.exp', (['f0'], {}), '(f0)\n', (1627, 1631), True, 'import numpy as np\n'), ((1645, 1664), 'numpy.power', 'np.power', (['f0_a', '(0.5)'], {}), '(f0_a, 0.5)\n', (1653, 1664), True, 'import numpy as np\n'), ((1458, 1489), 'pyutils.filename.params2name', 'pu.filename.params2name', (['params'], {}), '(params)\n', (1481, 1489), True, 'import pyutils as pu\n')] |
from __future__ import absolute_import, division, print_function
from os import path
import csv
import yaml
from dynd import nd, ndt
import datashape
from datashape.type_equation_solver import matches_datashape_pattern
import blaze
from .. import py2help
def load_blaze_array(conf, dir):
"""Loads a blaze array from the catalog configuration and catalog path"""
# This is a temporary hack, need to transition to using the
# deferred data descriptors for various formats.
fsdir = conf.get_fsdir(dir)
if not path.isfile(fsdir + '.array'):
raise RuntimeError('Could not find blaze array description file %r'
% (fsdir + '.array'))
with open(fsdir + '.array') as f:
arrmeta = yaml.load(f)
tp = arrmeta['type']
imp = arrmeta['import']
ds_str = arrmeta.get('datashape') # optional. HDF5 does not need that.
if tp == 'csv':
with open(fsdir + '.csv', 'r') as f:
rd = csv.reader(f)
if imp.get('headers', False):
# Skip the header line
next(rd)
dat = list(rd)
arr = nd.array(dat, ndt.type(ds_str))[:]
return blaze.array(arr)
elif tp == 'json':
arr = nd.parse_json(ds_str, nd.memmap(fsdir + '.json'))
return blaze.array(arr)
elif tp == 'hdf5':
import tables as tb
from blaze.datadescriptor import HDF5_DDesc
fname = fsdir + '.h5' # XXX .h5 assumed for HDF5
with tb.open_file(fname, 'r') as f:
dp = imp.get('datapath') # specifies a path in HDF5
try:
dparr = f.get_node(f.root, dp, 'Leaf')
except tb.NoSuchNodeError:
raise RuntimeError(
'HDF5 file does not have a dataset in %r' % dp)
dd = HDF5_DDesc(fname, dp)
return blaze.array(dd)
elif tp == 'npy':
import numpy as np
use_memmap = imp.get('memmap', False)
if use_memmap:
arr = np.load(fsdir + '.npy', 'r')
else:
arr = np.load(fsdir + '.npy')
arr = nd.array(arr)
arr = blaze.array(arr)
ds = datashape.dshape(ds_str)
if not matches_datashape_pattern(arr.dshape, ds):
raise RuntimeError(('NPY file for blaze catalog path %r ' +
'has the wrong datashape (%r instead of ' +
'%r)') % (arr.dshape, ds))
return arr
elif tp == 'py':
ds = datashape.dshape(ds_str)
# The script is run with the following globals,
# and should put the loaded array in a global
# called 'result'.
gbl = {'catconf': conf, # Catalog configuration object
'impdata': imp, # Import data from the .array file
'catpath': dir, # Catalog path
'fspath': fsdir, # Equivalent filesystem path
'dshape': ds # Datashape the result should have
}
if py2help.PY2:
execfile(fsdir + '.py', gbl, gbl)
else:
with open(fsdir + '.py') as f:
code = compile(f.read(), fsdir + '.py', 'exec')
exec(code, gbl, gbl)
arr = gbl.get('result', None)
if arr is None:
raise RuntimeError(('Script for blaze catalog path %r did not ' +
'return anything in "result" variable')
% (dir))
elif not isinstance(arr, blaze.Array):
raise RuntimeError(('Script for blaze catalog path %r returned ' +
'wrong type of object (%r instead of ' +
'blaze.Array)') % (type(arr)))
if not matches_datashape_pattern(arr.dshape, ds):
raise RuntimeError(('Script for blaze catalog path %r returned ' +
'array with wrong datashape (%r instead of ' +
'%r)') % (arr.dshape, ds))
return arr
else:
raise ValueError(('Unsupported array type %r from ' +
'blaze catalog entry %r')
% (tp, dir))
def load_blaze_subcarray(conf, cdir, subcarray):
import tables as tb
from blaze.datadescriptor import HDF5_DDesc
with tb.open_file(cdir.fname, 'r') as f:
try:
dparr = f.get_node(f.root, subcarray, 'Leaf')
except tb.NoSuchNodeError:
raise RuntimeError(
'HDF5 file does not have a dataset in %r' % dp)
dd = HDF5_DDesc(cdir.fname, subcarray)
return blaze.array(dd)
| [
"dynd.nd.memmap",
"blaze.datadescriptor.HDF5_DDesc",
"datashape.type_equation_solver.matches_datashape_pattern",
"tables.open_file",
"yaml.load",
"datashape.dshape",
"os.path.isfile",
"dynd.nd.array",
"blaze.array",
"csv.reader",
"dynd.ndt.type",
"numpy.load"
] | [((4621, 4636), 'blaze.array', 'blaze.array', (['dd'], {}), '(dd)\n', (4632, 4636), False, 'import blaze\n'), ((531, 560), 'os.path.isfile', 'path.isfile', (["(fsdir + '.array')"], {}), "(fsdir + '.array')\n", (542, 560), False, 'from os import path\n'), ((743, 755), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (752, 755), False, 'import yaml\n'), ((1179, 1195), 'blaze.array', 'blaze.array', (['arr'], {}), '(arr)\n', (1190, 1195), False, 'import blaze\n'), ((4325, 4354), 'tables.open_file', 'tb.open_file', (['cdir.fname', '"""r"""'], {}), "(cdir.fname, 'r')\n", (4337, 4354), True, 'import tables as tb\n'), ((4576, 4609), 'blaze.datadescriptor.HDF5_DDesc', 'HDF5_DDesc', (['cdir.fname', 'subcarray'], {}), '(cdir.fname, subcarray)\n', (4586, 4609), False, 'from blaze.datadescriptor import HDF5_DDesc\n'), ((968, 981), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (978, 981), False, 'import csv\n'), ((1298, 1314), 'blaze.array', 'blaze.array', (['arr'], {}), '(arr)\n', (1309, 1314), False, 'import blaze\n'), ((1143, 1159), 'dynd.ndt.type', 'ndt.type', (['ds_str'], {}), '(ds_str)\n', (1151, 1159), False, 'from dynd import nd, ndt\n'), ((1255, 1281), 'dynd.nd.memmap', 'nd.memmap', (["(fsdir + '.json')"], {}), "(fsdir + '.json')\n", (1264, 1281), False, 'from dynd import nd, ndt\n'), ((1855, 1870), 'blaze.array', 'blaze.array', (['dd'], {}), '(dd)\n', (1866, 1870), False, 'import blaze\n'), ((1490, 1514), 'tables.open_file', 'tb.open_file', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1502, 1514), True, 'import tables as tb\n'), ((1818, 1839), 'blaze.datadescriptor.HDF5_DDesc', 'HDF5_DDesc', (['fname', 'dp'], {}), '(fname, dp)\n', (1828, 1839), False, 'from blaze.datadescriptor import HDF5_DDesc\n'), ((2106, 2119), 'dynd.nd.array', 'nd.array', (['arr'], {}), '(arr)\n', (2114, 2119), False, 'from dynd import nd, ndt\n'), ((2134, 2150), 'blaze.array', 'blaze.array', (['arr'], {}), '(arr)\n', (2145, 2150), False, 'import blaze\n'), ((2164, 2188), 'datashape.dshape', 'datashape.dshape', (['ds_str'], {}), '(ds_str)\n', (2180, 2188), False, 'import datashape\n'), ((2007, 2035), 'numpy.load', 'np.load', (["(fsdir + '.npy')", '"""r"""'], {}), "(fsdir + '.npy', 'r')\n", (2014, 2035), True, 'import numpy as np\n'), ((2068, 2091), 'numpy.load', 'np.load', (["(fsdir + '.npy')"], {}), "(fsdir + '.npy')\n", (2075, 2091), True, 'import numpy as np\n'), ((2204, 2245), 'datashape.type_equation_solver.matches_datashape_pattern', 'matches_datashape_pattern', (['arr.dshape', 'ds'], {}), '(arr.dshape, ds)\n', (2229, 2245), False, 'from datashape.type_equation_solver import matches_datashape_pattern\n'), ((2507, 2531), 'datashape.dshape', 'datashape.dshape', (['ds_str'], {}), '(ds_str)\n', (2523, 2531), False, 'import datashape\n'), ((3753, 3794), 'datashape.type_equation_solver.matches_datashape_pattern', 'matches_datashape_pattern', (['arr.dshape', 'ds'], {}), '(arr.dshape, ds)\n', (3778, 3794), False, 'from datashape.type_equation_solver import matches_datashape_pattern\n')] |
#encoding=utf8
'''
Detection with SSD
In this example, we will load a SSD model and use it to detect objects.
'''
import os
import sys
import argparse
import numpy as np
from PIL import Image, ImageDraw
# Make sure that caffe is on the python path:
caffe_root = './'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe
import h5py
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
class CaffeDetection:
def __init__(self, gpu_id, model_def, model_weights, image_resize, labelmap_file):
# caffe.set_device(gpu_id)
# caffe.set_mode_gpu()
self.image_resize = image_resize
# Load the net in the test phase for inference, and configure input preprocessing.
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
# the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_raw_scale('data', 255)
# the reference model has channels in BGR order instead of RGB
self.transformer.set_channel_swap('data', (2, 1, 0))
# load PASCAL VOC labels
file = open(labelmap_file, 'r')
self.labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), self.labelmap)
print( self.net.params['conv1_1'][0].data )
file = h5py.File( '/Users/julian/CZModels/SSD300VGGReference20180920.hdf', 'w')
file.create_dataset("/conv1_1W", data = self.net.params['conv1_1'][0].data )
file.create_dataset("/conv1_2W", data = self.net.params['conv1_2'][0].data )
file.create_dataset("/conv1_1B", data = self.net.params['conv1_1'][1].data )
file.create_dataset("/conv1_2B", data = self.net.params['conv1_2'][1].data )
file.create_dataset("/conv2_1W", data = self.net.params['conv2_1'][0].data )
file.create_dataset("/conv2_2W", data = self.net.params['conv2_2'][0].data )
file.create_dataset("/conv2_1B", data = self.net.params['conv2_1'][1].data )
file.create_dataset("/conv2_2B", data = self.net.params['conv2_2'][1].data )
file.create_dataset("/conv3_1W", data = self.net.params['conv3_1'][0].data )
file.create_dataset("/conv3_2W", data = self.net.params['conv3_2'][0].data )
file.create_dataset("/conv3_3W", data = self.net.params['conv3_3'][0].data )
file.create_dataset("/conv3_1B", data = self.net.params['conv3_1'][1].data )
file.create_dataset("/conv3_2B", data = self.net.params['conv3_2'][1].data )
file.create_dataset("/conv3_3B", data = self.net.params['conv3_3'][1].data )
file.create_dataset("/conv4_1W", data = self.net.params['conv4_1'][0].data )
file.create_dataset("/conv4_2W", data = self.net.params['conv4_2'][0].data )
file.create_dataset("/conv4_3W", data = self.net.params['conv4_3'][0].data )
file.create_dataset("/conv4_1B", data = self.net.params['conv4_1'][1].data )
file.create_dataset("/conv4_2B", data = self.net.params['conv4_2'][1].data )
file.create_dataset("/conv4_3B", data = self.net.params['conv4_3'][1].data )
file.create_dataset("/conv5_1W", data = self.net.params['conv5_1'][0].data )
file.create_dataset("/conv5_2W", data = self.net.params['conv5_2'][0].data )
file.create_dataset("/conv5_3W", data = self.net.params['conv5_3'][0].data )
file.create_dataset("/conv5_1B", data = self.net.params['conv5_1'][1].data )
file.create_dataset("/conv5_2B", data = self.net.params['conv5_2'][1].data )
file.create_dataset("/conv5_3B", data = self.net.params['conv5_3'][1].data )
file.create_dataset("/conv6_W", data = self.net.params['fc6'][0].data )
file.create_dataset("/conv6_B", data = self.net.params['fc6'][1].data )
file.create_dataset("/conv7_W", data = self.net.params['fc7'][0].data )
file.create_dataset("/conv7_B", data = self.net.params['fc7'][1].data )
file.create_dataset("/conv8_1W", data = self.net.params['conv6_1'][0].data )
file.create_dataset("/conv8_2W", data = self.net.params['conv6_2'][0].data )
file.create_dataset("/conv8_1B", data = self.net.params['conv6_1'][1].data )
file.create_dataset("/conv8_2B", data = self.net.params['conv6_2'][1].data )
file.create_dataset("/conv9_1W", data = self.net.params['conv7_1'][0].data )
file.create_dataset("/conv9_2W", data = self.net.params['conv7_2'][0].data )
file.create_dataset("/conv9_1B", data = self.net.params['conv7_1'][1].data )
file.create_dataset("/conv9_2B", data = self.net.params['conv7_2'][1].data )
file.create_dataset("/conv10_1W", data = self.net.params['conv8_1'][0].data )
file.create_dataset("/conv10_2W", data = self.net.params['conv8_2'][0].data )
file.create_dataset("/conv10_1B", data = self.net.params['conv8_1'][1].data )
file.create_dataset("/conv10_2B", data = self.net.params['conv8_2'][1].data )
file.create_dataset("/conv11_1W", data = self.net.params['conv9_1'][0].data )
file.create_dataset("/conv11_2W", data = self.net.params['conv9_2'][0].data )
file.create_dataset("/conv11_1B", data = self.net.params['conv9_1'][1].data )
file.create_dataset("/conv11_2B", data = self.net.params['conv9_2'][1].data )
file.create_dataset("/block4_classes_W", data = self.net.params['conv4_3_norm_mbox_conf'][0].data )
file.create_dataset("/block4_classes_B", data = self.net.params['conv4_3_norm_mbox_conf'][1].data )
file.create_dataset("/block4_loc_W", data = self.net.params['conv4_3_norm_mbox_loc'][0].data )
file.create_dataset("/block4_loc_B", data = self.net.params['conv4_3_norm_mbox_loc'][1].data )
file.create_dataset("/block7_classes_W", data = self.net.params['fc7_mbox_conf'][0].data )
file.create_dataset("/block7_classes_B", data = self.net.params['fc7_mbox_conf'][1].data )
file.create_dataset("/block7_loc_W", data = self.net.params['fc7_mbox_loc'][0].data )
file.create_dataset("/block7_loc_B", data = self.net.params['fc7_mbox_loc'][1].data )
file.create_dataset("/block8_classes_W", data = self.net.params['conv6_2_mbox_conf'][0].data )
file.create_dataset("/block8_classes_B", data = self.net.params['conv6_2_mbox_conf'][1].data )
file.create_dataset("/block8_loc_W", data = self.net.params['conv6_2_mbox_loc'][0].data )
file.create_dataset("/block8_loc_B", data = self.net.params['conv6_2_mbox_loc'][1].data )
file.create_dataset("/block9_classes_W", data = self.net.params['conv7_2_mbox_conf'][0].data )
file.create_dataset("/block9_classes_B", data = self.net.params['conv7_2_mbox_conf'][1].data )
file.create_dataset("/block9_loc_W", data = self.net.params['conv7_2_mbox_loc'][0].data )
file.create_dataset("/block9_loc_B", data = self.net.params['conv7_2_mbox_loc'][1].data )
file.create_dataset("/block10_classes_W", data = self.net.params['conv8_2_mbox_conf'][0].data )
file.create_dataset("/block10_classes_B", data = self.net.params['conv8_2_mbox_conf'][1].data )
file.create_dataset("/block10_loc_W", data = self.net.params['conv8_2_mbox_loc'][0].data )
file.create_dataset("/block10_loc_B", data = self.net.params['conv8_2_mbox_loc'][1].data )
file.create_dataset("/block11_classes_W", data = self.net.params['conv9_2_mbox_conf'][0].data )
file.create_dataset("/block11_classes_B", data = self.net.params['conv9_2_mbox_conf'][1].data )
file.create_dataset("/block11_loc_W", data = self.net.params['conv9_2_mbox_loc'][0].data )
file.create_dataset("/block11_loc_B", data = self.net.params['conv9_2_mbox_loc'][1].data )
file.create_dataset("/conv4_3_norm", data = self.net.params['conv4_3_norm'][0].data )
def main(args):
'''main '''
detection = CaffeDetection(args.gpu_id,
args.model_def, args.model_weights,
args.image_resize, args.labelmap_file)
def parse_args():
'''parse args'''
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
parser.add_argument('--labelmap_file',
default='data/VOC0712/labelmap_voc.prototxt')
parser.add_argument('--model_def',
default='models/VGGNet/VOC0712/SSD_300x300/deploy.prototxt')
parser.add_argument('--image_resize', default=300, type=int)
parser.add_argument('--model_weights',
default='models/VGGNet/VOC0712/SSD_300x300/'
'VGG_VOC0712_SSD_300x300_iter_120000.caffemodel')
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
| [
"argparse.ArgumentParser",
"caffe.io.Transformer",
"os.path.join",
"h5py.File",
"os.chdir",
"numpy.array",
"caffe.Net",
"caffe.proto.caffe_pb2.LabelMap"
] | [((268, 288), 'os.chdir', 'os.chdir', (['caffe_root'], {}), '(caffe_root)\n', (276, 288), False, 'import os\n'), ((308, 342), 'os.path.join', 'os.path.join', (['caffe_root', '"""python"""'], {}), "(caffe_root, 'python')\n", (320, 342), False, 'import os\n'), ((9063, 9088), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9086, 9088), False, 'import argparse\n'), ((1226, 1273), 'caffe.Net', 'caffe.Net', (['model_def', 'model_weights', 'caffe.TEST'], {}), '(model_def, model_weights, caffe.TEST)\n', (1235, 1273), False, 'import caffe\n'), ((1569, 1634), 'caffe.io.Transformer', 'caffe.io.Transformer', (["{'data': self.net.blobs['data'].data.shape}"], {}), "({'data': self.net.blobs['data'].data.shape})\n", (1589, 1634), False, 'import caffe\n'), ((2140, 2160), 'caffe.proto.caffe_pb2.LabelMap', 'caffe_pb2.LabelMap', ([], {}), '()\n', (2158, 2160), False, 'from caffe.proto import caffe_pb2\n'), ((2289, 2360), 'h5py.File', 'h5py.File', (['"""/Users/julian/CZModels/SSD300VGGReference20180920.hdf"""', '"""w"""'], {}), "('/Users/julian/CZModels/SSD300VGGReference20180920.hdf', 'w')\n", (2298, 2360), False, 'import h5py\n'), ((1735, 1760), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (1743, 1760), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 18:27:43 2019
@author: smrak
"""
import os
import yaml
import h5py
import numpy as np
from datetime import datetime
from argparse import ArgumentParser
def makeGrid(ylim=[25,50],xlim=[-110,-80],res=0.5):
"""
Make a grid for an image with a given boundaries and resolution
"""
xd = abs(xlim[0] - xlim[1]) / res * 1j
yd = abs(ylim[0] - ylim[1]) / res * 1j
xgrid, ygrid = np.mgrid[xlim[0]:xlim[1]:xd, ylim[0]:ylim[1]:yd]
z = np.nan*np.zeros((xgrid.shape[0], xgrid.shape[1]))
return xgrid, ygrid, z
def getImageIndex(x, y, xlim, ylim, xgrid, ygrid):
"""
find and return a pixel location on the image to map the LOS value. find the
pixel which minimizes the distance in x and y direction
"""
if x > xlim[0] and x < xlim[1] and y > ylim[0] and y < ylim[1]:
idy = abs(ygrid[0,:] - y).argmin()
idx = abs(xgrid[:,0] - x).argmin()
else:
idy = np.nan
idx = np.nan
return idx, idy
def makeImage(dtec, xgrid, ygrid,
longitude = None, latitude = None,
azimuth = None, elevation = None, rxp = None, altkm = None,
im = np.nan):
imout = np.nan * np.ones(im.shape, dtype=np.float32)
lonlim = [np.min(xgrid), np.max(xgrid)]
latlim = [np.min(ygrid), np.max(ygrid)]
if azimuth is not None and elevation is not None and rxp is not None and altkm is not None:
r1 = (altkm * 1e3) / np.sin(np.radians(elevation))
h0 = rxp[:,2] #if rxp[2] >= 0 else 0
ipp_lla = aer2geodetic(az=azimuth, el=elevation, srange=r1,
lat0=rxp[:,0], lon0=rxp[:,1], h0=h0)
longitude = ipp_lla[1]
latitude = ipp_lla[0]
assert (longitude is not None) and (latitude is not None), "Lat/Lon coordinates invalid!"
for isv in range(dtec.shape[0]):
for irx in np.where(np.isfinite(dtec[isv]))[0]:
idx, idy = getImageIndex(x=longitude[isv,irx], y=latitude[isv,irx],
xlim=lonlim, ylim=latlim,
xgrid=xgrid, ygrid=ygrid)
# If image indexes are valid
if np.isfinite(idx) and np.isfinite(idy):
if im[idx,idy] is None:
im[idx,idy] = [dtec[isv,irx]]
else:
im[idx,idy].append(dtec[isv,irx])
for i in range(im.shape[0]):
for j in range(im.shape[1]):
if im[i,j] is not None:
imout[i,j] = np.nanmedian(im[i,j])
return imout
def makeTheHDF(t,x,y,im,filename):
f = h5py.File(filename, 'w')
d = f.create_group('data')
d.create_dataset('time',data=t)
d.create_dataset('xgrid',data=x)
d.create_dataset('ygrid',data=y)
d.create_dataset('im',data=im, compression='gzip', compression_opts=9)
return f
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('fname', type=str)
p.add_argument('--ofn', type=str, default=None)
p.add_argument('--cfg', type=str, default=None)
p.add_argument('--mode', type=str, help='Input coordinates: lla, or aer?', default='lla')
p.add_argument('--altkm', type=float, help='Projection altitude in km', default=350)
p.add_argument('-r', '--resolution', type=float, help='Maps resolution, default is from cfg file', default=None)
p.add_argument('-x', type=float, help='longitude limits. Default from cfg file', default=None, nargs=2)
p.add_argument('-y', type=float, help='latitude limits. Default from cfg file', default=None, nargs=2)
P = p.parse_args()
fname = P.fname
savefn = P.ofn
mode = P.mode
if P.cfg is not None:
cfg = P.cfg
stream = yaml.load(open(cfg, 'r'))
lonlim = stream.get('lonlim')
latlim = stream.get('latlim')
resolution = stream.get('resolution')
else:
resolution = P.resolution
lonlim = P.x
latlim = P.y
# Create an image grids
xgrid, ygrid, im = makeGrid(ylim=latlim, xlim=lonlim, res=resolution)
###################################################
images = []
###################################################
f = h5py.File(fname, 'r')
if mode == 'aer':
from pymap3d import aer2geodetic
for i in range(f['obstimes'][:].shape[0]):
print ("{}/{}".format(i+1, f['obstimes'][:].shape[0]))
try:
im0 = np.empty(np.shape(im), dtype=object)
imtemp = makeImage(dtec=f['res'][i], xgrid=xgrid, ygrid=ygrid,
latitude = None,
longitude = None,
azimuth = f['az'][i],
elevation = f['el'][i],
rxp = f['rx_positions'], altkm=P.altkm,
im=im0)
images.append(imtemp)
except Exception as e:
print (e)
else:
for i in range(f['obstimes'][:].shape[0]):
print ("{}/{}".format(i+1, f['obstimes'][:].shape[0]))
try:
imtemp = makeImage(dtec=f['res'][i], xgrid=xgrid, ygrid=ygrid,
latitude = f['lat'][i],
longitude = f['lon'][i],
im=np.nan*im)
images.append(imtemp)
except Exception as e:
print (e)
if savefn is None:
folder = os.path.split(fname)[0]
root = os.path.split(fname)[1].rstrip('.h5')
rr = str(resolution).replace('.', '')
filename = 'grid/grid_{}_altkm_{}_res_{}.h5'.format(root, int(P.altkm), rr)
savefn = folder + filename
elif not savefn.endswith('.h5'):
root = os.path.split(fname)[1].rstrip('.h5')
rr = str(resolution).replace('.', '')
addon = '{}_altkm_{}_res_{}.h5'.format(root, int(P.altkm), rr)
savefn += addon
if not os.path.exists(os.path.split(savefn)[0]):
import subprocess
subprocess.call('mkdir -p {}'.format(os.path.split(savefn)[0]), shell=True, timeout=2)
f = makeTheHDF(f['obstimes'][:], xgrid[:,0], ygrid[0,:], images, savefn)
timestamp = datetime.now()
f.attrs[u'converted'] = timestamp.strftime('%Y-%m-%d')
f.attrs[u'lonlim'] = '{} - {}'.format(lonlim[0],lonlim[1])
f.attrs[u'latlim'] = '{} - {}'.format(latlim[0],latlim[1])
f.attrs[u'resolution'] = resolution
f.attrs[u'altkm'] = P.altkm
f.close()
| [
"numpy.radians",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.nanmedian",
"h5py.File",
"numpy.max",
"datetime.datetime.now",
"numpy.zeros",
"os.path.split",
"pymap3d.aer2geodetic",
"numpy.isfinite",
"numpy.min",
"numpy.shape"
] | [((2654, 2678), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (2663, 2678), False, 'import h5py\n'), ((2944, 2960), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2958, 2960), False, 'from argparse import ArgumentParser\n'), ((4254, 4275), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (4263, 4275), False, 'import h5py\n'), ((6331, 6345), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6343, 6345), False, 'from datetime import datetime\n'), ((532, 574), 'numpy.zeros', 'np.zeros', (['(xgrid.shape[0], xgrid.shape[1])'], {}), '((xgrid.shape[0], xgrid.shape[1]))\n', (540, 574), True, 'import numpy as np\n'), ((1250, 1285), 'numpy.ones', 'np.ones', (['im.shape'], {'dtype': 'np.float32'}), '(im.shape, dtype=np.float32)\n', (1257, 1285), True, 'import numpy as np\n'), ((1300, 1313), 'numpy.min', 'np.min', (['xgrid'], {}), '(xgrid)\n', (1306, 1313), True, 'import numpy as np\n'), ((1315, 1328), 'numpy.max', 'np.max', (['xgrid'], {}), '(xgrid)\n', (1321, 1328), True, 'import numpy as np\n'), ((1344, 1357), 'numpy.min', 'np.min', (['ygrid'], {}), '(ygrid)\n', (1350, 1357), True, 'import numpy as np\n'), ((1359, 1372), 'numpy.max', 'np.max', (['ygrid'], {}), '(ygrid)\n', (1365, 1372), True, 'import numpy as np\n'), ((1592, 1685), 'pymap3d.aer2geodetic', 'aer2geodetic', ([], {'az': 'azimuth', 'el': 'elevation', 'srange': 'r1', 'lat0': 'rxp[:, 0]', 'lon0': 'rxp[:, 1]', 'h0': 'h0'}), '(az=azimuth, el=elevation, srange=r1, lat0=rxp[:, 0], lon0=rxp[\n :, 1], h0=h0)\n', (1604, 1685), False, 'from pymap3d import aer2geodetic\n'), ((5591, 5611), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (5604, 5611), False, 'import os\n'), ((1506, 1527), 'numpy.radians', 'np.radians', (['elevation'], {}), '(elevation)\n', (1516, 1527), True, 'import numpy as np\n'), ((1936, 1958), 'numpy.isfinite', 'np.isfinite', (['dtec[isv]'], {}), '(dtec[isv])\n', (1947, 1958), True, 'import numpy as np\n'), ((2226, 2242), 'numpy.isfinite', 'np.isfinite', (['idx'], {}), '(idx)\n', (2237, 2242), True, 'import numpy as np\n'), ((2247, 2263), 'numpy.isfinite', 'np.isfinite', (['idy'], {}), '(idy)\n', (2258, 2263), True, 'import numpy as np\n'), ((2566, 2588), 'numpy.nanmedian', 'np.nanmedian', (['im[i, j]'], {}), '(im[i, j])\n', (2578, 2588), True, 'import numpy as np\n'), ((6090, 6111), 'os.path.split', 'os.path.split', (['savefn'], {}), '(savefn)\n', (6103, 6111), False, 'import os\n'), ((4506, 4518), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (4514, 4518), True, 'import numpy as np\n'), ((5630, 5650), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (5643, 5650), False, 'import os\n'), ((6188, 6209), 'os.path.split', 'os.path.split', (['savefn'], {}), '(savefn)\n', (6201, 6209), False, 'import os\n'), ((5885, 5905), 'os.path.split', 'os.path.split', (['fname'], {}), '(fname)\n', (5898, 5905), False, 'import os\n')] |
import numpy as np
from scipy.io.wavfile import write as wr
# from matplotlib import pyplot as plt
def sound_recovery(results, freq):
# Given data is rescaled in order to use with the function
min_value = min(results)
for i in range(len(results)):
results[i] -= min_value
max_value = max(results)
for i in range(len(results)):
results[i] = (results[i] * 65535)//max_value - 32767
data = np.int16(results)
wr('test.wav', freq, data)
| [
"numpy.int16",
"scipy.io.wavfile.write"
] | [((430, 447), 'numpy.int16', 'np.int16', (['results'], {}), '(results)\n', (438, 447), True, 'import numpy as np\n'), ((453, 479), 'scipy.io.wavfile.write', 'wr', (['"""test.wav"""', 'freq', 'data'], {}), "('test.wav', freq, data)\n", (455, 479), True, 'from scipy.io.wavfile import write as wr\n')] |
# NOTES:
# Sine is with even positions and Cosine is at odd positions
# Keep dim 0 for padding token position encoding zero vector
# Properties
# Check Relative Position property
# Check Relative Position property after linear transformation
# Plot position embeddings to show that only initial a few dimensions matters
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.spatial.distance import cosine
from torch import nn
class CheckLinerProperty(nn.Module):
def __init__(self, in_features, out_features):
super(CheckLinerProperty, self).__init__()
self.model = nn.Linear(in_features, out_features)
def forward(self, inp):
return self.model(torch.tensor(inp, dtype=torch.float32)).detach()
d_model = 512
max_positions = 100
position_enc = np.array([
[pos / np.power(10000, 2 * i / d_model) for i in range(d_model)]
if (pos != 0) else np.zeros(d_model) for pos in range(0, max_positions)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # 2i + 1
x = np.arange(0, max_positions)
emb = position_enc[x]
# Check Relative Position property
pos1 = position_enc[1]
pos2 = position_enc[2]
pos10 = position_enc[10]
print(f'Cosine distance with raw embeddings')
print('p1->p2')
print(cosine(pos1, pos2))
print('p1->p10')
print(cosine(pos1, pos10))
# Check Relative Position property after linear transformation
lm = CheckLinerProperty(in_features=512, out_features=512)
p1 = lm(pos1)
p2 = lm(pos2)
p10 = lm(pos10)
print(f'Cosine distance with linear transformed embeddings')
print('p1->p2')
print(cosine(p1, p2))
print('p1->p10')
print(cosine(p1, p10))
# Plot position embeddings to show that only initial a few dimensions matters
inp_seq = np.arange(0, max_positions)
emb_seq = position_enc[inp_seq]
plt.figure(figsize=(10, 8))
plt.pcolormesh(emb_seq)
plt.xlabel('Position Embeddings')
plt.ylabel('Token Position')
plt.xlim(0, d_model)
plt.ylim(max_positions, 0)
plt.colorbar()
plt.show()
| [
"scipy.spatial.distance.cosine",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"torch.tensor",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.cos",
"torch.nn.Linear",
"numpy.sin",
"matplotlib.pyplot.yli... | [((988, 1018), 'numpy.sin', 'np.sin', (['position_enc[1:, 0::2]'], {}), '(position_enc[1:, 0::2])\n', (994, 1018), True, 'import numpy as np\n'), ((1050, 1080), 'numpy.cos', 'np.cos', (['position_enc[1:, 1::2]'], {}), '(position_enc[1:, 1::2])\n', (1056, 1080), True, 'import numpy as np\n'), ((1096, 1123), 'numpy.arange', 'np.arange', (['(0)', 'max_positions'], {}), '(0, max_positions)\n', (1105, 1123), True, 'import numpy as np\n'), ((1780, 1807), 'numpy.arange', 'np.arange', (['(0)', 'max_positions'], {}), '(0, max_positions)\n', (1789, 1807), True, 'import numpy as np\n'), ((1841, 1868), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1851, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1869, 1892), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['emb_seq'], {}), '(emb_seq)\n', (1883, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position Embeddings"""'], {}), "('Position Embeddings')\n", (1903, 1926), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1955), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Token Position"""'], {}), "('Token Position')\n", (1937, 1955), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1976), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'd_model'], {}), '(0, d_model)\n', (1964, 1976), True, 'import matplotlib.pyplot as plt\n'), ((1977, 2003), 'matplotlib.pyplot.ylim', 'plt.ylim', (['max_positions', '(0)'], {}), '(max_positions, 0)\n', (1985, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2018), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2016, 2018), True, 'import matplotlib.pyplot as plt\n'), ((2019, 2029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2027, 2029), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1339), 'scipy.spatial.distance.cosine', 'cosine', (['pos1', 'pos2'], {}), '(pos1, pos2)\n', (1327, 1339), False, 'from scipy.spatial.distance import cosine\n'), ((1364, 1383), 'scipy.spatial.distance.cosine', 'cosine', (['pos1', 'pos10'], {}), '(pos1, pos10)\n', (1370, 1383), False, 'from scipy.spatial.distance import cosine\n'), ((1635, 1649), 'scipy.spatial.distance.cosine', 'cosine', (['p1', 'p2'], {}), '(p1, p2)\n', (1641, 1649), False, 'from scipy.spatial.distance import cosine\n'), ((1674, 1689), 'scipy.spatial.distance.cosine', 'cosine', (['p1', 'p10'], {}), '(p1, p10)\n', (1680, 1689), False, 'from scipy.spatial.distance import cosine\n'), ((611, 647), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (620, 647), False, 'from torch import nn\n'), ((907, 924), 'numpy.zeros', 'np.zeros', (['d_model'], {}), '(d_model)\n', (915, 924), True, 'import numpy as np\n'), ((703, 741), 'torch.tensor', 'torch.tensor', (['inp'], {'dtype': 'torch.float32'}), '(inp, dtype=torch.float32)\n', (715, 741), False, 'import torch\n'), ((826, 858), 'numpy.power', 'np.power', (['(10000)', '(2 * i / d_model)'], {}), '(10000, 2 * i / d_model)\n', (834, 858), True, 'import numpy as np\n')] |
"""
Simulation for problem 01 of SP1.
"""
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import factorial
def R(N: int) -> Tuple[np.ndarray, np.ndarray]:
# Generate the x array
x = np.arange(0, N + 1, dtype=np.float64)
# Probability
p = 0.5
# Calculate R
y = factorial(N - 1) * np.power(p, N - 1) / (factorial(x) * factorial(N - x))
return x, y
if __name__ == '__main__':
for N in [6, 12, 24, 170]:
x, y = R(N)
plt.plot(x, y, label=f'N={N}')
plt.legend()
plt.xlabel(r'$0 \leq x \leq N$')
plt.ylabel(r'$R(x, N)$')
plt.savefig('export/problem01.svg')
plt.show()
| [
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"scipy.special.factorial",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.power",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((240, 277), 'numpy.arange', 'np.arange', (['(0)', '(N + 1)'], {'dtype': 'np.float64'}), '(0, N + 1, dtype=np.float64)\n', (249, 277), True, 'import numpy as np\n'), ((549, 561), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (559, 561), True, 'import matplotlib.pyplot as plt\n'), ((566, 599), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$0 \\\\leq x \\\\leq N$"""'], {}), "('$0 \\\\leq x \\\\leq N$')\n", (576, 599), True, 'import matplotlib.pyplot as plt\n'), ((603, 626), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$R(x, N)$"""'], {}), "('$R(x, N)$')\n", (613, 626), True, 'import matplotlib.pyplot as plt\n'), ((632, 667), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""export/problem01.svg"""'], {}), "('export/problem01.svg')\n", (643, 667), True, 'import matplotlib.pyplot as plt\n'), ((672, 682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (680, 682), True, 'import matplotlib.pyplot as plt\n'), ((513, 543), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': 'f"""N={N}"""'}), "(x, y, label=f'N={N}')\n", (521, 543), True, 'import matplotlib.pyplot as plt\n'), ((334, 350), 'scipy.special.factorial', 'factorial', (['(N - 1)'], {}), '(N - 1)\n', (343, 350), False, 'from scipy.special import factorial\n'), ((353, 371), 'numpy.power', 'np.power', (['p', '(N - 1)'], {}), '(p, N - 1)\n', (361, 371), True, 'import numpy as np\n'), ((375, 387), 'scipy.special.factorial', 'factorial', (['x'], {}), '(x)\n', (384, 387), False, 'from scipy.special import factorial\n'), ((390, 406), 'scipy.special.factorial', 'factorial', (['(N - x)'], {}), '(N - x)\n', (399, 406), False, 'from scipy.special import factorial\n')] |
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
def mnist():
# exchange with the corrupted mnist dataset
path = '/mnt/c/Users/Laurine/Documents/DTU Python/Machine Learning Operations/dtu_mlops/data/corruptmnist/'
### get TRAIN dataloader
all_torch_images, all_torch_labels = [], []
for i in range (5):
file_path = path+'train_{}.npz'.format(i)
np_array = np.load(file_path)
all_torch_images.append(torch.from_numpy(np_array['images']))
all_torch_labels.append(torch.from_numpy(np_array['labels']))
torch_images= torch.cat(all_torch_images, 0)
torch_labels = torch.cat(all_torch_labels, 0)
torch_images, torch_labels = torch_images.type(torch.FloatTensor), torch_labels.type(torch.LongTensor)
train_dataset = TensorDataset(torch_images, torch_labels)
train_loader = DataLoader(train_dataset, batch_size=1000, shuffle=True)
### get TEST dataloader
file_path = path+'test.npz'
np_array = np.load(file_path)
torch_test_images = torch.from_numpy(np_array['images'])
torch_test_labels = torch.from_numpy(np_array['labels'])
torch_test_images, torch_test_labels = torch_test_images.type(torch.FloatTensor), torch_test_labels.type(torch.LongTensor)
test_dataset= TensorDataset(torch_test_images, torch_test_labels)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)
return train_loader, test_loader
| [
"torch.from_numpy",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"numpy.load",
"torch.cat"
] | [((619, 649), 'torch.cat', 'torch.cat', (['all_torch_images', '(0)'], {}), '(all_torch_images, 0)\n', (628, 649), False, 'import torch\n'), ((669, 699), 'torch.cat', 'torch.cat', (['all_torch_labels', '(0)'], {}), '(all_torch_labels, 0)\n', (678, 699), False, 'import torch\n'), ((827, 868), 'torch.utils.data.TensorDataset', 'TensorDataset', (['torch_images', 'torch_labels'], {}), '(torch_images, torch_labels)\n', (840, 868), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((888, 944), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': '(1000)', 'shuffle': '(True)'}), '(train_dataset, batch_size=1000, shuffle=True)\n', (898, 944), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1025, 1043), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (1032, 1043), True, 'import numpy as np\n'), ((1068, 1104), 'torch.from_numpy', 'torch.from_numpy', (["np_array['images']"], {}), "(np_array['images'])\n", (1084, 1104), False, 'import torch\n'), ((1129, 1165), 'torch.from_numpy', 'torch.from_numpy', (["np_array['labels']"], {}), "(np_array['labels'])\n", (1145, 1165), False, 'import torch\n'), ((1311, 1362), 'torch.utils.data.TensorDataset', 'TensorDataset', (['torch_test_images', 'torch_test_labels'], {}), '(torch_test_images, torch_test_labels)\n', (1324, 1362), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1381, 1434), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(64)', 'shuffle': '(True)'}), '(test_dataset, batch_size=64, shuffle=True)\n', (1391, 1434), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((442, 460), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (449, 460), True, 'import numpy as np\n'), ((493, 529), 'torch.from_numpy', 'torch.from_numpy', (["np_array['images']"], {}), "(np_array['images'])\n", (509, 529), False, 'import torch\n'), ((563, 599), 'torch.from_numpy', 'torch.from_numpy', (["np_array['labels']"], {}), "(np_array['labels'])\n", (579, 599), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.