code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
with open("input", "r") as f:
lines = f.readlines()
rules = {}
for i, line in enumerate(lines):
if line == "\n":
break
name, rest = line.strip().split(":")
range1, range2 = rest.split(" or ")
min1, max1 = range1.split("-")
min2, max2 = range2.split("-")
rules[name] = [[int(min1), int(max1)], [int(min2), int(max2)]]
mytick = [int(x) for x in lines[i + 2].strip().split(",")]
def is_in_range(val, ranges):
return ranges[0][0] <= val <= ranges[0][1] or ranges[1][0] <= val <= ranges[1][1]
invalid_vals = []
valid_ticks = []
for i2, line in enumerate(lines[i + 5 :]):
is_valid_tick = True
vals = [int(x) for x in line.strip().split(",")]
for val in vals:
is_valid = False
for name, ranges in rules.items():
if is_in_range(val, ranges):
is_valid = True
if not is_valid:
invalid_vals.append(val)
is_valid_tick = False
if is_valid_tick:
valid_ticks.append(vals)
print(sum(invalid_vals))
valid_ticks = np.array(valid_ticks)
field_pos = {}
for field, ranges in rules.items():
for pos in range(len(valid_ticks[0])):
if all(is_in_range(x, ranges) for x in valid_ticks[:, pos]):
field_pos[field] = field_pos.get(field, []) + [pos]
while not all(len(x) == 1 for x in field_pos.values()):
for k, v in field_pos.items():
if len(v) == 1:
for k2, v2 in field_pos.items():
if k == k2:
continue
try:
v2.remove(v[0])
except ValueError:
pass
p = 1
for k, v in field_pos.items():
if k.startswith("departure"):
p *= mytick[v[0]]
print(p)
| [
"numpy.array"
] | [((1066, 1087), 'numpy.array', 'np.array', (['valid_ticks'], {}), '(valid_ticks)\n', (1074, 1087), True, 'import numpy as np\n')] |
import random
import numpy as np
from collections import deque
class SumTree(object):
write = 0
def __init_(self,capacity):
self.capacity = capacity
self.tree = np.zeros(2*capacity -1 )
self.data = np.zeros(capacity, dtype=object)
def _propatate(self,idx, change):
parent = (idx-1)//2
self.tree[parent] += change
if parent != 0 :
self._propatate(parent,change)
def _retrive(self,idx,s):
left = 2*idx +1
right = left + 1
if left>=len(self.tree):
return idx
if s<=self.tree[left]:
return self._retrive(left,s)
else:
return self._retrive(right, s-self.tree[left])
def total(self):
return self.tree[0]
def add(self, p, data):
idx = self.write + self.capacity -1
self.data[self.write] = data
self.update(idx, p)
self.write += 1
if self.write >=self.capacity:
self.write = 0
def update(self,idx,p):
change = p - self.tree[idx]
self.tree[idx] = p
self._propatate(idx, change)
def get(self, s):
idx = self._retrive(0, s)
dataidx = idx - self.capacity + 1
return (idx, self.tree[idx], self.data[dataidx])
class MemoryBuffer(object):
"""experience reply buffer using a double-end deque or a sum-tree"""
def __init__(self,buffer_size, with_per = False):
if with_per:
"Prioritized Experience Replay"
self.alpha=0.5
self.epsilon = 0.01
self.bufer =SumTree(buffer_size)
else:
self.buffer = deque()
self.count =0
self.with_per = with_per
self.buffer_size = buffer_size
def memorize(self, state,action, reward, done,new_state,error=None):
""" save an experience to memory, optionally with its td-error"""
experience =(state, action, reward, done,new_state)
if self.with_per:
priority = self.priority(error[0])
self.buffer.add(priority, experience)
self.count+=1
else:
if self.count <self.buffer_size:
self.buffer.append(experience)
self.count+=1
else:
self.buffer.popleft()
self.buffer.append(experience)
def priority(self,error):
return (error + self.epsilon)**self.alpha
def size(self):
return self.count
def sample_batch(self, batch_size):
batch =[]
if self.with_per:
T = self.buffer.total()//batch_size
for i in range(batch_size):
a,b = T*i, T*(i+1)
s = random.uniform(a,b)
idx, error, data = self.buffer.get(s)
batch.append((*data,idx))
idx = np.array([i[5] for i in batch])
else:
idx = None
batch = random.sample(self.buffer, min(batch_size,self.count))
s_batch = np.array([i[0] for i in batch])
a_batch = np.array([i[1] for i in batch])
r_batch = np.array([i[2] for i in batch])
d_batch = np.array([i[3] for i in batch])
ns_batch = np.array([i[4] for i in batch])
return s_batch, a_batch, r_batch, d_batch, ns_batch
def update(self,idx, new_error):
self.buffer.update(idx, self.priority(new_error))
def clear(self):
if self.with_per:
self.buffer = SumTree(self.buffer_size)
else:
self.buffer = deque()
self.count = 0
| [
"numpy.array",
"numpy.zeros",
"collections.deque",
"random.uniform"
] | [((191, 217), 'numpy.zeros', 'np.zeros', (['(2 * capacity - 1)'], {}), '(2 * capacity - 1)\n', (199, 217), True, 'import numpy as np\n'), ((236, 268), 'numpy.zeros', 'np.zeros', (['capacity'], {'dtype': 'object'}), '(capacity, dtype=object)\n', (244, 268), True, 'import numpy as np\n'), ((3047, 3078), 'numpy.array', 'np.array', (['[i[0] for i in batch]'], {}), '([i[0] for i in batch])\n', (3055, 3078), True, 'import numpy as np\n'), ((3097, 3128), 'numpy.array', 'np.array', (['[i[1] for i in batch]'], {}), '([i[1] for i in batch])\n', (3105, 3128), True, 'import numpy as np\n'), ((3147, 3178), 'numpy.array', 'np.array', (['[i[2] for i in batch]'], {}), '([i[2] for i in batch])\n', (3155, 3178), True, 'import numpy as np\n'), ((3197, 3228), 'numpy.array', 'np.array', (['[i[3] for i in batch]'], {}), '([i[3] for i in batch])\n', (3205, 3228), True, 'import numpy as np\n'), ((3248, 3279), 'numpy.array', 'np.array', (['[i[4] for i in batch]'], {}), '([i[4] for i in batch])\n', (3256, 3279), True, 'import numpy as np\n'), ((1677, 1684), 'collections.deque', 'deque', ([], {}), '()\n', (1682, 1684), False, 'from collections import deque\n'), ((2884, 2915), 'numpy.array', 'np.array', (['[i[5] for i in batch]'], {}), '([i[5] for i in batch])\n', (2892, 2915), True, 'import numpy as np\n'), ((3581, 3588), 'collections.deque', 'deque', ([], {}), '()\n', (3586, 3588), False, 'from collections import deque\n'), ((2750, 2770), 'random.uniform', 'random.uniform', (['a', 'b'], {}), '(a, b)\n', (2764, 2770), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from drforest.datasets import make_simulation1
from drforest.ensemble import DimensionReductionForestRegressor
from drforest.ensemble import permutation_importance
plt.rc('font', family='serif')
fontsize = 14
n_samples = 2000
n_features = 5
X, y = make_simulation1(
n_samples=n_samples, noise=1, n_features=n_features, random_state=1234)
forest = DimensionReductionForestRegressor(
n_estimators=500, store_X_y=True, n_jobs=-1,
min_samples_leaf=3, max_features=None,
random_state=42).fit(X, y)
x0 = np.zeros(n_features)
x0[:2] = np.array([-1.5, 1.5])
local_direc_x0 = forest.local_principal_direction(x0)
local_direc_x0 *= np.sign(local_direc_x0[0])
x1 = np.zeros(n_features)
x1[:2] = [0.5, -0.5]
local_direc_x1 = forest.local_principal_direction(x1)
local_direc_x1 *= np.sign(local_direc_x1[0])
#forest = RandomForestRegressor(n_estimators=500,
# min_samples_leaf=3,
# n_jobs=-1, max_features=None,
# oob_score=True,
# random_state=42).fit(X, y)
#
#forest_imp = permutation_importance(
# forest, X, y, random_state=forest.random_state)
#forest_imp /= np.sum(forest_imp)
forest_imp = forest.feature_importances_
#order = np.argsort(forest_imp)
fig, ax = plt.subplots(figsize=(18, 5), ncols=4)
def f(x, y):
r1 = x - y
r2 = x + y
return (20 * np.maximum(
np.maximum(np.exp(-2 * r1 ** 2), np.exp(-r2 ** 2)),
2 * np.exp(-0.5 * (x ** 2 + y ** 2))))
x = np.linspace(-3, 3, 100)
y = np.linspace(-3, 3, 100)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
ax[0].contour(X, Y, Z, 3, colors='black', linestyles='--', levels=5, linewidths=1.5)
ax[0].imshow(Z, extent=[-3, 3, -3, 3], origin='lower', cmap='YlGnBu_r', alpha=0.5)
ax[0].scatter([-1.5, 0.5], [1.5, -0.5], color=None, edgecolor='black')
ax[0].annotate(r'(-1.5, 1.5)', (-1.5, 1.5), xytext=(-1.4, 1.6), fontname='Sans', weight='bold')
ax[0].annotate(r'(0.5, -0.5)', (0.5, -0.5), xytext=(0.6, -0.4), fontname='Sans', weight='bold')
ax[0].set_aspect('equal')
ax[1].bar(np.arange(1, n_features + 1), forest_imp, color='gray')
ax[1].set_ylabel('Importance', fontsize=fontsize)
#ax[1].set_title('Random Forest', fontsize=fontsize)
ax[1].set_xlabel(None)
ax[1].axhline(0, color='black', linestyle='-')
ax[1].set_ylim(-1, 1)
ax[1].set_xlabel('Variable', fontsize=fontsize)
ax[1].text(3.5, 0.8, 'Global', fontsize=16)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x0]
ax[2].bar(np.arange(1, n_features + 1), local_direc_x0, color=color)
#ax[2].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[2].axhline(0, color='black', linestyle='-', lw=1)
ax[2].set_ylim(-1, 1)
ax[2].set_xlabel('Variable', fontsize=fontsize)
ax[2].text(2.5, 0.8, '$\mathbf{x}_0 = (-1.5, 1.5, 0, 0, 0)$', fontsize=12)
color = ['tomato' if x > 0 else 'cornflowerblue' for x in local_direc_x1]
ax[3].bar(np.arange(1, n_features + 1), local_direc_x1, color=color)
#ax[3].set_title('Dimension Reduction Forest', fontsize=fontsize)
ax[3].set_xlabel('Variable', fontsize=fontsize)
ax[3].invert_yaxis()
ax[3].axhline(0, color='black', linestyle='-', lw=1)
ax[3].text(2.5, 0.8, '$\mathbf{x}_0 = (0.5, -0.5, 0, 0, 0)$', fontsize=12)
ax[3].set_ylim(-1, 1)
plt.subplots_adjust(wspace=0.3, left=0.03, right=0.985)
fig.savefig('local_lpd.png', dpi=300, bbox_inches='tight')
| [
"matplotlib.pyplot.subplots_adjust",
"numpy.arange",
"drforest.datasets.make_simulation1",
"drforest.ensemble.DimensionReductionForestRegressor",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.sign",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc"
] | [((312, 342), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (318, 342), True, 'import matplotlib.pyplot as plt\n'), ((398, 490), 'drforest.datasets.make_simulation1', 'make_simulation1', ([], {'n_samples': 'n_samples', 'noise': '(1)', 'n_features': 'n_features', 'random_state': '(1234)'}), '(n_samples=n_samples, noise=1, n_features=n_features,\n random_state=1234)\n', (414, 490), False, 'from drforest.datasets import make_simulation1\n'), ((666, 686), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (674, 686), True, 'import numpy as np\n'), ((696, 717), 'numpy.array', 'np.array', (['[-1.5, 1.5]'], {}), '([-1.5, 1.5])\n', (704, 717), True, 'import numpy as np\n'), ((790, 816), 'numpy.sign', 'np.sign', (['local_direc_x0[0]'], {}), '(local_direc_x0[0])\n', (797, 816), True, 'import numpy as np\n'), ((823, 843), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (831, 843), True, 'import numpy as np\n'), ((937, 963), 'numpy.sign', 'np.sign', (['local_direc_x1[0]'], {}), '(local_direc_x1[0])\n', (944, 963), True, 'import numpy as np\n'), ((1447, 1485), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 5)', 'ncols': '(4)'}), '(figsize=(18, 5), ncols=4)\n', (1459, 1485), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1693), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (1681, 1693), True, 'import numpy as np\n'), ((1698, 1721), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (1709, 1721), True, 'import numpy as np\n'), ((1729, 1746), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1740, 1746), True, 'import numpy as np\n'), ((3409, 3464), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)', 'left': '(0.03)', 'right': '(0.985)'}), '(wspace=0.3, left=0.03, right=0.985)\n', (3428, 3464), True, 'import matplotlib.pyplot as plt\n'), ((2228, 2256), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)'], {}), '(1, n_features + 1)\n', (2237, 2256), True, 'import numpy as np\n'), ((2656, 2684), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)'], {}), '(1, n_features + 1)\n', (2665, 2684), True, 'import numpy as np\n'), ((3064, 3092), 'numpy.arange', 'np.arange', (['(1)', '(n_features + 1)'], {}), '(1, n_features + 1)\n', (3073, 3092), True, 'import numpy as np\n'), ((502, 641), 'drforest.ensemble.DimensionReductionForestRegressor', 'DimensionReductionForestRegressor', ([], {'n_estimators': '(500)', 'store_X_y': '(True)', 'n_jobs': '(-1)', 'min_samples_leaf': '(3)', 'max_features': 'None', 'random_state': '(42)'}), '(n_estimators=500, store_X_y=True, n_jobs=\n -1, min_samples_leaf=3, max_features=None, random_state=42)\n', (535, 641), False, 'from drforest.ensemble import DimensionReductionForestRegressor\n'), ((1578, 1598), 'numpy.exp', 'np.exp', (['(-2 * r1 ** 2)'], {}), '(-2 * r1 ** 2)\n', (1584, 1598), True, 'import numpy as np\n'), ((1600, 1616), 'numpy.exp', 'np.exp', (['(-r2 ** 2)'], {}), '(-r2 ** 2)\n', (1606, 1616), True, 'import numpy as np\n'), ((1631, 1663), 'numpy.exp', 'np.exp', (['(-0.5 * (x ** 2 + y ** 2))'], {}), '(-0.5 * (x ** 2 + y ** 2))\n', (1637, 1663), True, 'import numpy as np\n')] |
from collections import OrderedDict
import numpy as np
from multiworld.envs.mujoco.classic_mujoco.all_ant_environments.ant_goal import AntGoalEnv
from multiworld.envs.env_util import get_stat_in_paths, create_stats_ordered_dict, get_asset_full_path
class AntGoalDisabledJointsEnv(AntGoalEnv):
def __init__(self, action_scale=1, frame_skip=5, goal_position=4., force=None, timestep_start=100000, timestep_end=100000):
self.quick_init(locals())
self.force = force
self.timestep_start = timestep_start
self.timestep_end = timestep_end
AntGoalEnv.__init__(self, action_scale=action_scale, frame_skip=frame_skip, goal_position=goal_position)
def step(self, action):
action = action * self.action_scale
self.step_count += 1
# print(self.sim.model.actuator_acc0)
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
if self.step_count >= self.timestep_start and self.step_count <= self.timestep_end:
# for i in range(9):
self.sim.data.qfrc_applied[:] = self.force
# import pdb; pdb.set_trace()
# self.sim.model.actuator_acc0[i] = -1000.
# self.sim.data.qacc[i] = -1000. #self.force
xposafter = self.sim.data.qpos[0]
goal_reward = -1.0 * np.linalg.norm(xposafter - self.goal_position) # make it happy, not suicidal
ctrl_cost = .1 * np.square(action).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 0
# state = self.state_vector()
# notdone = np.isfinite(state).all() \
# and state[2] >= 0.2 and state[2] <= 1.0
reward = goal_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
done = False
ob = self._get_obs()
return ob, reward, done, dict(
goal_forward=goal_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward,
)
def reset(self):
self.step_count = 0
return super().reset()
| [
"numpy.clip",
"multiworld.envs.mujoco.classic_mujoco.all_ant_environments.ant_goal.AntGoalEnv.__init__",
"numpy.square",
"numpy.linalg.norm"
] | [((577, 685), 'multiworld.envs.mujoco.classic_mujoco.all_ant_environments.ant_goal.AntGoalEnv.__init__', 'AntGoalEnv.__init__', (['self'], {'action_scale': 'action_scale', 'frame_skip': 'frame_skip', 'goal_position': 'goal_position'}), '(self, action_scale=action_scale, frame_skip=frame_skip,\n goal_position=goal_position)\n', (596, 685), False, 'from multiworld.envs.mujoco.classic_mujoco.all_ant_environments.ant_goal import AntGoalEnv\n'), ((1334, 1380), 'numpy.linalg.norm', 'np.linalg.norm', (['(xposafter - self.goal_position)'], {}), '(xposafter - self.goal_position)\n', (1348, 1380), True, 'import numpy as np\n'), ((1436, 1453), 'numpy.square', 'np.square', (['action'], {}), '(action)\n', (1445, 1453), True, 'import numpy as np\n'), ((1526, 1564), 'numpy.clip', 'np.clip', (['self.sim.data.cfrc_ext', '(-1)', '(1)'], {}), '(self.sim.data.cfrc_ext, -1, 1)\n', (1533, 1564), True, 'import numpy as np\n')] |
import numpy as np
import torch
import time
import gym
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
from common.common import *
import pyrobotdesign as rd
def evaluate(args, actor_critic, ob_rms, env_name, seed, num_processes, device):
eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,
None, None, device, True)
vec_norm = utils.get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(
num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_processes, 1, device=device)
while len(eval_episode_rewards) < args.eval_num:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs,
eval_recurrent_hidden_states,
eval_masks,
deterministic=True)
# Obser reward and next obs
obs, _, done, infos = eval_envs.step(action)
eval_masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float64,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards), np.mean(eval_episode_rewards)))
def render(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step * render_env.frame_skip
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while not done:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer
# render each sub-step
def render_full(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step
control_frequency = render_env.frame_skip
render_env.set_frame_skip(1)
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while episode_length < 128 * control_frequency:
if episode_length % control_frequency == 0:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 20.0 * time_step * (target_pos - camera_pos)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
if sim_time + time_step > render_time_now - render_time_start:
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer | [
"numpy.mean",
"numpy.sqrt",
"time.sleep",
"a2c_ppo_acktr.utils.get_vec_normalize",
"numpy.zeros",
"pyrobotdesign.GLFWViewer",
"torch.tensor",
"numpy.linalg.norm",
"torch.no_grad",
"a2c_ppo_acktr.envs.make_vec_envs",
"time.time",
"torch.zeros"
] | [((287, 377), 'a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['env_name', '(seed + num_processes)', 'num_processes', 'None', 'None', 'device', '(True)'], {}), '(env_name, seed + num_processes, num_processes, None, None,\n device, True)\n', (300, 377), False, 'from a2c_ppo_acktr.envs import make_vec_envs\n'), ((420, 454), 'a2c_ppo_acktr.utils.get_vec_normalize', 'utils.get_vec_normalize', (['eval_envs'], {}), '(eval_envs)\n', (443, 454), False, 'from a2c_ppo_acktr import utils\n'), ((636, 724), 'torch.zeros', 'torch.zeros', (['num_processes', 'actor_critic.recurrent_hidden_state_size'], {'device': 'device'}), '(num_processes, actor_critic.recurrent_hidden_state_size, device\n =device)\n', (647, 724), False, 'import torch\n'), ((746, 790), 'torch.zeros', 'torch.zeros', (['num_processes', '(1)'], {'device': 'device'}), '(num_processes, 1, device=device)\n', (757, 790), False, 'import torch\n'), ((1745, 1756), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1753, 1756), True, 'import numpy as np\n'), ((1769, 1780), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1777, 1780), True, 'import numpy as np\n'), ((1873, 1888), 'pyrobotdesign.GLFWViewer', 'rd.GLFWViewer', ([], {}), '()\n', (1886, 1888), True, 'import pyrobotdesign as rd\n'), ((3980, 3991), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3988, 3991), True, 'import numpy as np\n'), ((4004, 4015), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4012, 4015), True, 'import numpy as np\n'), ((4108, 4123), 'pyrobotdesign.GLFWViewer', 'rd.GLFWViewer', ([], {}), '()\n', (4121, 4123), True, 'import pyrobotdesign as rd\n'), ((1193, 1295), 'torch.tensor', 'torch.tensor', (['[([0.0] if done_ else [1.0]) for done_ in done]'], {'dtype': 'torch.float64', 'device': 'device'}), '([([0.0] if done_ else [1.0]) for done_ in done], dtype=torch.\n float64, device=device)\n', (1205, 1295), False, 'import torch\n'), ((2068, 2097), 'numpy.linalg.norm', 'np.linalg.norm', (['(upper - lower)'], {}), '(upper - lower)\n', (2082, 2097), True, 'import numpy as np\n'), ((2258, 2269), 'time.time', 'time.time', ([], {}), '()\n', (2267, 2269), False, 'import time\n'), ((4303, 4332), 'numpy.linalg.norm', 'np.linalg.norm', (['(upper - lower)'], {}), '(upper - lower)\n', (4317, 4332), True, 'import numpy as np\n'), ((4553, 4564), 'time.time', 'time.time', ([], {}), '()\n', (4562, 4564), False, 'import time\n'), ((858, 873), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (871, 873), False, 'import torch\n'), ((1592, 1621), 'numpy.mean', 'np.mean', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (1599, 1621), True, 'import numpy as np\n'), ((2283, 2298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2296, 2298), False, 'import torch\n'), ((4578, 4593), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4591, 4593), False, 'import torch\n'), ((3361, 3372), 'time.time', 'time.time', ([], {}), '()\n', (3370, 3372), False, 'import time\n'), ((5608, 5619), 'time.time', 'time.time', ([], {}), '()\n', (5617, 5619), False, 'import time\n'), ((3460, 3520), 'time.sleep', 'time.sleep', (['(sim_time - (render_time_now - render_time_start))'], {}), '(sim_time - (render_time_now - render_time_start))\n', (3470, 3520), False, 'import time\n'), ((5724, 5784), 'time.sleep', 'time.sleep', (['(sim_time - (render_time_now - render_time_start))'], {}), '(sim_time - (render_time_now - render_time_start))\n', (5734, 5784), False, 'import time\n'), ((2470, 2497), 'numpy.sqrt', 'np.sqrt', (['(ob_rms.var + 1e-08)'], {}), '(ob_rms.var + 1e-08)\n', (2477, 2497), True, 'import numpy as np\n'), ((2557, 2573), 'torch.tensor', 'torch.tensor', (['ob'], {}), '(ob)\n', (2569, 2573), False, 'import torch\n'), ((4861, 4888), 'numpy.sqrt', 'np.sqrt', (['(ob_rms.var + 1e-08)'], {}), '(ob_rms.var + 1e-08)\n', (4868, 4888), True, 'import numpy as np\n'), ((4952, 4968), 'torch.tensor', 'torch.tensor', (['ob'], {}), '(ob)\n', (4964, 4968), False, 'import torch\n')] |
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import sys
import tensorflow as tf
import matplotlib
from PIL import Image
import matplotlib.patches as patches
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import argparse
import glob
# import model
model_path = './trained_model/frozen_inference_graph.pb'
# import images
imgs = glob.glob("images/*.jpg")
imgs.sort()
# path idx
idx = len("images/")
# prepare dataframe
df = pd.DataFrame(columns = ["image", "result", "score", "position"])
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.compat.v2.io.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
label_map = label_map_util.load_labelmap('./trained_model/labels.txt')
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=1, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
for img in imgs:
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
parser = argparse.ArgumentParser()
#parser.add_argument('image_path')
#args = parser.parse_args()
image_np = load_image_into_numpy_array(Image.open(img)) #args.image_path))
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: np.expand_dims(image_np, axis=0)})
print('img =',img)
print('scores =',scores)
print('classes =',classes)
print('boxes =',boxes)
print('num_detections =',num_detections)
bboxes = boxes[scores > 0.2] #min_score_thresh
width, height = image_np.shape[:2]
pos = []
for box in bboxes:
ymin, xmin, ymax, xmax = box
pos = [np.mean([int(xmin * height), int(xmax * height)]), np.mean([int(ymin * width), int(ymax * width)])]#[int(xmin * height), int(ymin * width), int(xmax * height), int(ymax * width)]
if scores[0][0] < 0.1:
result = 'Wally not found'
else:
result = 'Wally found'
df = df.append({
"image": img[idx:-4],
"result": result,
"score": scores[0][0],
"position": pos}, ignore_index = True)
print(df)
df.to_csv('wally_positions.csv')
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np,
# np.squeeze(boxes),
# np.squeeze(classes).astype(np.int32),
# np.squeeze(scores),
# category_index,
# use_normalized_coordinates=True,
# line_thickness=8)
# plt.figure(figsize=(12, 8))
# plt.imshow(image_np)
# plt.show()
| [
"tensorflow.Graph",
"PIL.Image.open",
"tensorflow.compat.v1.GraphDef",
"argparse.ArgumentParser",
"tensorflow.import_graph_def",
"numpy.expand_dims",
"pandas.DataFrame",
"tensorflow.compat.v2.io.gfile.GFile",
"object_detection.utils.label_map_util.load_labelmap",
"object_detection.utils.label_map_... | [((430, 455), 'glob.glob', 'glob.glob', (['"""images/*.jpg"""'], {}), "('images/*.jpg')\n", (439, 455), False, 'import glob\n'), ((527, 589), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['image', 'result', 'score', 'position']"}), "(columns=['image', 'result', 'score', 'position'])\n", (539, 589), True, 'import pandas as pd\n'), ((611, 621), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (619, 621), True, 'import tensorflow as tf\n'), ((1097, 1155), 'object_detection.utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['"""./trained_model/labels.txt"""'], {}), "('./trained_model/labels.txt')\n", (1125, 1155), False, 'from object_detection.utils import label_map_util\n'), ((1169, 1272), 'object_detection.utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': '(1)', 'use_display_name': '(True)'}), '(label_map, max_num_classes=1,\n use_display_name=True)\n', (1215, 1272), False, 'from object_detection.utils import label_map_util\n'), ((1286, 1334), 'object_detection.utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1322, 1334), False, 'from object_detection.utils import label_map_util\n'), ((676, 699), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (697, 699), True, 'import tensorflow as tf\n'), ((709, 754), 'tensorflow.compat.v2.io.gfile.GFile', 'tf.compat.v2.io.gfile.GFile', (['model_path', '"""rb"""'], {}), "(model_path, 'rb')\n", (736, 754), True, 'import tensorflow as tf\n'), ((864, 906), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (883, 906), True, 'import tensorflow as tf\n'), ((1403, 1446), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (1423, 1446), True, 'import tensorflow as tf\n'), ((1473, 1498), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1496, 1498), False, 'import argparse\n'), ((1625, 1640), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (1635, 1640), False, 'from PIL import Image\n'), ((2219, 2251), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2233, 2251), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, random_split
import cv2
import matplotlib.pyplot as plt
from torchvision.datasets import CIFAR10
from PIL import Image
def to_RGB(image:Image)->Image:
if image.mode == 'RGB':return image
image.load() # required for png.split()
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
file_name = 'tmp.jpg'
background.save(file_name, 'JPEG', quality=80)
return cv2.open(file_name)
#return Image.open(file_name)
class rgb2YCrCb(object):
def __init__(self):
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transform=transforms.ToTensor()
pass
def __call__(self, tensor):
tensor = self.ts(tensor)
orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
CC = cv2.merge((Cr,Cb))
return CC
def __repr__(self):
return self.__class__.__name__
class rgb2YCrCb_(object):
def __init__(self):
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transform=transforms.ToTensor()
pass
def __call__(self, tensor):
tensor = self.ts(tensor)
orgYCrCb = cv2.cvtColor(np.float32(tensor), cv2.COLOR_BGR2YCR_CB)
Y, Cr,Cb = cv2.split(orgYCrCb)
CC = cv2.merge((Cr,Cb))
return Y
def __repr__(self):
return self.__class__.__name__
class MyAddGaussianNoise(object):
def __init__(self, mean=0., std=0.1):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, data_num,train_=True, transform1 = None, transform2 = None,train = True):
self.transform1 = transform1
self.transform2 = transform2
self.ts = torchvision.transforms.ToPILImage()
self.ts2 = transform=transforms.ToTensor()
self.train = train_
self.data_dir = './'
self.data_num = data_num
self.data = []
self.label = []
# download
CIFAR10(self.data_dir, train=True, download=True)
#CIFAR10(self.data_dir, train=False, download=True)
self.data =CIFAR10(self.data_dir, train=self.train, transform=self.ts2)
def __len__(self):
return self.data_num
def __getitem__(self, idx):
out_data = self.data[idx][0]
out_label_ = self.data[idx][1]
out_label = torch.from_numpy(np.array(out_label_)).long()
if self.transform1:
out_data1 = self.transform1(out_data)
if self.transform2:
out_data2 = self.transform2(out_data)
return out_data, out_data1, out_data2, out_label
ts = torchvision.transforms.ToPILImage()
ts2 = transform=transforms.ToTensor()
dims = (32*4, 32*4)
mean, std =[0.5,0.5,0.5], [0.25,0.25,0.25]
trans2 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(mean, std),
#torchvision.transforms.Resize(dims)
])
trans1 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(mean, std),
#MyAddGaussianNoise(0., 0.5),
#torchvision.transforms.Resize(dims),
torchvision.transforms.Grayscale()
])
trans3 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(mean, std),
#MyAddGaussianNoise(0., 0.1),
#torchvision.transforms.Resize(dims),
rgb2YCrCb(),
])
trans4 = torchvision.transforms.Compose([
#torchvision.transforms.Normalize(mean, std),
#MyAddGaussianNoise(0., 0.1),
#torchvision.transforms.Resize(dims),
rgb2YCrCb_(),
])
dataset = ImageDataset(8, transform1=trans4, transform2=trans3)
testloader = DataLoader(dataset, batch_size=4,
shuffle=True, num_workers=0)
for out_data, out_data1, out_data2,out_label in testloader:
for i in range(len(out_label)):
image = out_data[i]
Y = out_data1[i]
Y = np.array(Y).reshape(32,32)
CC_2 = out_data2[i]
CC_2 = np.array(CC_2)
#image_ = to_RGB(ts(image)) #jpeg
#orgYCrCb, Y, CC = trans4((ts2(image_)))
#print(orgYCrCb.shape,Y.shape,CC.shape)
print(out_label[i])
plt.imshow(Y, cmap = "gray")
plt.title('Y')
#print(type(orgYCrCb))
plt.pause(1)
X = np.zeros((32,32))
#X = np.array(ts2(X).reshape(32,32))
#print(X.shape, Y.shape, CC_2.shape)
X = X.astype(np.uint8)
CC_ = CC_2.astype(np.uint8)
XCC = cv2.merge((X,CC_))
XCC_ = cv2.cvtColor(XCC, cv2.COLOR_YCR_CB2BGR)
plt.imshow(XCC_/255.)
plt.title('XCC')
plt.pause(1)
print(Y.shape, CC_2.shape)
YCC = cv2.merge((Y,CC_2))
orgYCrCb_2 = cv2.cvtColor(YCC, cv2.COLOR_YCR_CB2BGR)
plt.imshow(orgYCrCb_2/255.)
plt.title('Y+Cr+Cb')
plt.pause(1)
YCC_ = cv2.merge((Y,CC_2))
orgYCrCb_2 = cv2.cvtColor(YCC_, cv2.COLOR_YCR_CB2BGR)
plt.imshow(orgYCrCb_2/255.)
plt.title('Y+Cr+Cb_')
plt.pause(1)
plt.close() | [
"matplotlib.pyplot.imshow",
"cv2.open",
"cv2.merge",
"torchvision.transforms.ToPILImage",
"numpy.float32",
"PIL.Image.new",
"torchvision.transforms.Grayscale",
"matplotlib.pyplot.close",
"numpy.array",
"torchvision.datasets.CIFAR10",
"numpy.zeros",
"cv2.split",
"torch.utils.data.DataLoader",... | [((3113, 3148), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (3146, 3148), False, 'import torchvision\n'), ((3165, 3186), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3184, 3186), False, 'from torchvision import transforms\n'), ((3260, 3294), 'torchvision.transforms.Compose', 'torchvision.transforms.Compose', (['[]'], {}), '([])\n', (3290, 3294), False, 'import torchvision\n'), ((4056, 4118), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size=4, shuffle=True, num_workers=0)\n', (4066, 4118), False, 'from torch.utils.data import DataLoader, random_split\n'), ((377, 422), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'image.size', '(255, 255, 255)'], {}), "('RGB', image.size, (255, 255, 255))\n", (386, 422), False, 'from PIL import Image\n'), ((582, 601), 'cv2.open', 'cv2.open', (['file_name'], {}), '(file_name)\n', (590, 601), False, 'import cv2\n'), ((702, 737), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (735, 737), False, 'import torchvision\n'), ((767, 788), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (786, 788), False, 'from torchvision import transforms\n'), ((965, 984), 'cv2.split', 'cv2.split', (['orgYCrCb'], {}), '(orgYCrCb)\n', (974, 984), False, 'import cv2\n'), ((998, 1017), 'cv2.merge', 'cv2.merge', (['(Cr, Cb)'], {}), '((Cr, Cb))\n', (1007, 1017), False, 'import cv2\n'), ((1176, 1211), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (1209, 1211), False, 'import torchvision\n'), ((1241, 1262), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1260, 1262), False, 'from torchvision import transforms\n'), ((1439, 1458), 'cv2.split', 'cv2.split', (['orgYCrCb'], {}), '(orgYCrCb)\n', (1448, 1458), False, 'import cv2\n'), ((1472, 1491), 'cv2.merge', 'cv2.merge', (['(Cr, Cb)'], {}), '((Cr, Cb))\n', (1481, 1491), False, 'import cv2\n'), ((2200, 2235), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (2233, 2235), False, 'import torchvision\n'), ((2265, 2286), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2284, 2286), False, 'from torchvision import transforms\n'), ((2461, 2510), 'torchvision.datasets.CIFAR10', 'CIFAR10', (['self.data_dir'], {'train': '(True)', 'download': '(True)'}), '(self.data_dir, train=True, download=True)\n', (2468, 2510), False, 'from torchvision.datasets import CIFAR10\n'), ((2590, 2650), 'torchvision.datasets.CIFAR10', 'CIFAR10', (['self.data_dir'], {'train': 'self.train', 'transform': 'self.ts2'}), '(self.data_dir, train=self.train, transform=self.ts2)\n', (2597, 2650), False, 'from torchvision.datasets import CIFAR10\n'), ((3560, 3594), 'torchvision.transforms.Grayscale', 'torchvision.transforms.Grayscale', ([], {}), '()\n', (3592, 3594), False, 'import torchvision\n'), ((4379, 4393), 'numpy.array', 'np.array', (['CC_2'], {}), '(CC_2)\n', (4387, 4393), True, 'import numpy as np\n'), ((4587, 4613), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Y'], {'cmap': '"""gray"""'}), "(Y, cmap='gray')\n", (4597, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4624, 4638), 'matplotlib.pyplot.title', 'plt.title', (['"""Y"""'], {}), "('Y')\n", (4633, 4638), True, 'import matplotlib.pyplot as plt\n'), ((4678, 4690), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (4687, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4712, 4730), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {}), '((32, 32))\n', (4720, 4730), True, 'import numpy as np\n'), ((4901, 4920), 'cv2.merge', 'cv2.merge', (['(X, CC_)'], {}), '((X, CC_))\n', (4910, 4920), False, 'import cv2\n'), ((4935, 4974), 'cv2.cvtColor', 'cv2.cvtColor', (['XCC', 'cv2.COLOR_YCR_CB2BGR'], {}), '(XCC, cv2.COLOR_YCR_CB2BGR)\n', (4947, 4974), False, 'import cv2\n'), ((4983, 5007), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(XCC_ / 255.0)'], {}), '(XCC_ / 255.0)\n', (4993, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5013, 5029), 'matplotlib.pyplot.title', 'plt.title', (['"""XCC"""'], {}), "('XCC')\n", (5022, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5050), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (5047, 5050), True, 'import matplotlib.pyplot as plt\n'), ((5109, 5129), 'cv2.merge', 'cv2.merge', (['(Y, CC_2)'], {}), '((Y, CC_2))\n', (5118, 5129), False, 'import cv2\n'), ((5150, 5189), 'cv2.cvtColor', 'cv2.cvtColor', (['YCC', 'cv2.COLOR_YCR_CB2BGR'], {}), '(YCC, cv2.COLOR_YCR_CB2BGR)\n', (5162, 5189), False, 'import cv2\n'), ((5198, 5228), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(orgYCrCb_2 / 255.0)'], {}), '(orgYCrCb_2 / 255.0)\n', (5208, 5228), True, 'import matplotlib.pyplot as plt\n'), ((5234, 5254), 'matplotlib.pyplot.title', 'plt.title', (['"""Y+Cr+Cb"""'], {}), "('Y+Cr+Cb')\n", (5243, 5254), True, 'import matplotlib.pyplot as plt\n'), ((5263, 5275), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (5272, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5300, 5320), 'cv2.merge', 'cv2.merge', (['(Y, CC_2)'], {}), '((Y, CC_2))\n', (5309, 5320), False, 'import cv2\n'), ((5341, 5381), 'cv2.cvtColor', 'cv2.cvtColor', (['YCC_', 'cv2.COLOR_YCR_CB2BGR'], {}), '(YCC_, cv2.COLOR_YCR_CB2BGR)\n', (5353, 5381), False, 'import cv2\n'), ((5390, 5420), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(orgYCrCb_2 / 255.0)'], {}), '(orgYCrCb_2 / 255.0)\n', (5400, 5420), True, 'import matplotlib.pyplot as plt\n'), ((5426, 5447), 'matplotlib.pyplot.title', 'plt.title', (['"""Y+Cr+Cb_"""'], {}), "('Y+Cr+Cb_')\n", (5435, 5447), True, 'import matplotlib.pyplot as plt\n'), ((5456, 5468), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (5465, 5468), True, 'import matplotlib.pyplot as plt\n'), ((5486, 5497), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5495, 5497), True, 'import matplotlib.pyplot as plt\n'), ((904, 922), 'numpy.float32', 'np.float32', (['tensor'], {}), '(tensor)\n', (914, 922), True, 'import numpy as np\n'), ((1378, 1396), 'numpy.float32', 'np.float32', (['tensor'], {}), '(tensor)\n', (1388, 1396), True, 'import numpy as np\n'), ((4309, 4320), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (4317, 4320), True, 'import numpy as np\n'), ((2851, 2871), 'numpy.array', 'np.array', (['out_label_'], {}), '(out_label_)\n', (2859, 2871), True, 'import numpy as np\n')] |
#Project: GBS Tool
# Author: Dr. <NAME>, <EMAIL>, denamics GmbH
# Date: January 16, 2018
# License: MIT License (see LICENSE file of this package for more information)
# Contains the main flow of the optimization as it is to be called from the GBSController.
import os
import time
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
from Analyzer.DataRetrievers.getBasecase import getBasecase
from Analyzer.DataRetrievers.getDataSubsets import getDataSubsets
from Analyzer.DataRetrievers.readNCFile import readNCFile
from Analyzer.PerformanceAnalyzers.getFuelUse import getFuelUse
from Analyzer.PerformanceAnalyzers.getPrimaryREContribution import getPrimaryREContribution
from Model.Operational.generateRuns import generateRuns
from Model.Operational.runSimulation import runSimulation
from Optimizer.FitnessFunctions.getFitness import getFitness
from Optimizer.OptimizationBoundaryCalculators.getOptimizationBoundaries import getOptimizationBoundaries
# DEV imports
class optimize:
'''
Main class of the optimization engine. This creates an object with all major pieces of optimization contained.
The constructor sets up much of the initial steps, such as selecting shorter data streams to work with, estimating
the interval for energy storage power and energy capacity to search for a solution in (if this bounding is desired).
The 'doOptimization' method executes the actual optimization routine (based on the selected configuration). The
results from each iteration are written to a separate folder for later analysis.
'''
def __init__(self, projectName, inputArgs):
'''
Constructor: does all necessary setup for the optimization itself:
1) it loads the configuration file optimizerCongfig<ProjectName>.xml and distributes required information to
pertinent variables.
2) it reads the base case for reference values and to retrieve immutable data streams (firmLoadP)
3) it determines the boundaries of the search space (for power and energy capacity, or as directed in the
config) for the optimization algorithms.
4) it finds shorter time-series representative of the data set to run faster simulations on. The method used is
based on the configuration file.
5) it calculates the base case's performnce with respect to the optimization objective given in the
configuration [Note, this is not strictly required for optimization, except that some meta data may be
retrieve in this step that later is used in the fitness calculations of actual simulation results.
:param projectName: [String] name of the project, used to locate project folder tree within the GBSProject
folder structure
:param inputArgs: [Array of strings] spare input, currently un-used.
'''
# Setup key parameters
self.thisPath = os.path.dirname(os.path.realpath(__file__))
self.projectName = projectName
self.rootProjectPath = os.path.join(self.thisPath, '../../GBSProjects/', self.projectName) # root path to project files relative to this file location
# Pull in inputArgs for potential later processing
self.inputArgs = inputArgs
# Load configuration from optimizerConfig<ProjectName>.xml
configFileName = 'optimizerConfig' + self.projectName + '.xml'
configPath = os.path.join(self.rootProjectPath, 'InputData/Setup/', configFileName)
configFile = open(configPath, 'r')
configFileXML = configFile.read()
configFile.close()
configSoup = bs(configFileXML, "xml")
self.searchMethod = configSoup.optimizationMethod.get('value')
self.optimizationObjective = configSoup.optimizationObjective.get('value')
self.dataReductionMethod = configSoup.dataReductionMethod.get('value') # should be 'RE-load-one-week'
self.boundaryMethod = configSoup.optimizationEnvelopeEstimator.get('value') # should be 'variableSRC'
# Bins for reference parameters and current best performing
# both will be written below with actual initial values, but are placed here so they don't get lost
self.basePerformance = 0
self.currentBestPerformance = 0
# Retrieve data from base case (Input files)
self.time, self.firmLoadP, self.varLoadP, self.firmGenP, self.varGenP, self.allGen, self.baseComponents = \
getBasecase(self.projectName, self.rootProjectPath)
# Calculate boundaries for optimization search
# Get boundary constraints from config file, since this may vary from method to method, these are then lumped
# into a list 'constraints' that is passed through to the appropriate algorithm
opBndMethodConfig = configSoup.find(self.boundaryMethod + 'Config')
opBndMethodConfigChildren = opBndMethodConfig.findChildren()
constraints = list()
for child in opBndMethodConfigChildren:
constraints.append(child.get('value'))
self.minESSPPa, self.maxESSPPa, self.minESSEPa, self.maxESSEPa = \
getOptimizationBoundaries(self.boundaryMethod, self.time, self.firmLoadP, self.varLoadP, self.firmGenP,
self.varGenP, constraints)
# Get the short test time-series
reductionInput = \
pd.DataFrame({'time':self.time, 'firmLoadP':self.firmLoadP, 'varGenP':self.varGenP})#, index=self.time)
self.abbrevDatasets, self.abbrevDatasetWeights = getDataSubsets(reductionInput, self.dataReductionMethod, otherInputs=[])
# Setup optimization runs
# Branch based on input from 'configSoup'->optimizationObjective
# Get base case KPI based on optimization objective
# Any of the following if-branches needs to write to self.basePerformance with the reference KPI based on the
# optimization objective
# Futurefeature: retrieve basePerformance of abbreviated data sets instead of full base case for direct comparability with optimization iteration outputs.
if self.optimizationObjective == 'maxREContribution':
# Calculate base case RE contribution
self.basePerformance = getPrimaryREContribution(self.time, self.firmLoadP, self.firmGenP, self.varGenP)
elif self.optimizationObjective == 'minFuelUtilization':
# Calculate base case fuel consumption
# Need to load fuel curves for this
genFleetList = list(self.allGen.columns.values)
genFleetList.remove('time')
genFleet = list()
for gen in genFleetList:
genFleet.append(gen[:-1])
self.fuelCurveDataPoints = pd.DataFrame(index = genFleet, columns = ['fuelCurve_pPu','fuelCurve_massFlow','POutMaxPa'])
for genString in genFleet:
genPath = os.path.join(self.rootProjectPath, 'InputData/Components/', genString + 'Descriptor.xml')
genFile = open(genPath, 'r')
genFileXML = genFile.read()
genFile.close()
genSoup = bs(genFileXML, "xml")
self.fuelCurveDataPoints.loc[genString, 'fuelCurve_pPu'] = genSoup.fuelCurve.pPu.get('value')
self.fuelCurveDataPoints.loc[genString, 'fuelCurve_massFlow'] = genSoup.fuelCurve.massFlow.get('value')
self.fuelCurveDataPoints.loc[genString, 'POutMaxPa'] = genSoup.POutMaxPa.get('value')
self.genAllFuelUsedBase, self.fuelStatsBase = getFuelUse(self.allGen, self.fuelCurveDataPoints)
self.basePerformance = self.fuelStatsBase['total']
else:
raise ValueError('Unknown optimization objective, %s, selected.' % self.optimizationObjective)
# Since we do not have a better performing set of sims yet, make basePerformance best performing.
self.currentBestPerformance = self.basePerformance
# Retrieve additional optimization config arguments for the selected algorithm
self.optimizationConfig = dict()
opAlgConfig = configSoup.find(self.searchMethod + 'Config')
opAlgConfigChildren = opAlgConfig.findChildren()
for child in opAlgConfigChildren:
self.optimizationConfig[child.name] = child.get('value')
#print(self.optimizationConfig)
def doOptimization(self):
'''
Interface to dispatch specified optimization algorithm. Returns a value error if the string in self.searchMethod
does not match a known optimization method.
Currently, only hillClimber is implemented.
:return:
'''
#print(self.searchMethod)
if self.searchMethod == 'hillClimber':
# call hillClimber function
self.hillClimber()
# FUTUREFEATURE: add further optimization methods here
else:
raise ValueError('Unknown optimization method, %s, selected.' % self.searchMethod)
def hillClimber(self):
'''
Adaptive hill climber method for optimization of EES power and energy capacity.
:return: nothing - writes to object-wide variables.
'''
maxIterNumber = int(float(self.optimizationConfig['maxRunNumber']))
convergenceRepeatNum = int(float(self.optimizationConfig['convergenceRepeatNum']))
convergenceFlag = False
# Select starting configuration at random, within the given bounds.
# The power level can be chose freely between the previously determined bounds.
self.essPPa = int(self.minESSPPa + (self.maxESSPPa - self.minESSPPa)*np.random.random_sample())
# The energy capacity must meet at least the minimum duration requirement, and cannot exceed the maximum.
self.essEPa = float(self.essPPa * (self.minESSEPa/self.minESSPPa) +
(self.maxESSEPa - (self.essPPa * (self.minESSEPa/self.minESSPPa))) * np.random.random_sample())
print(['Initial guess: ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(self.essEPa) + ' kWh.'])
# We want to make set numbers congruent with iteration numbers, a unique additional identifier needs to be added to the 'SetX' folder names.
# We'll use current unix time to the second as the unique identifier. Thus, if the 'Set0' directory already
# exists, we will name all directories of this optimization run as 'Set[IterationNumber].[snippetIdx].[identifier]', where
# 'identifier' is the current unix time rounded to the nearest second.
identifier = str(int(time.time()))
# Get the index for the ESS to be added to the system
essIdx = self.essExists()
# Create bins for fitness tracking
self.fitness = None
fitnessLog = pd.DataFrame(index = pd.Index(range(0, maxIterNumber)), columns = ['fitness', 'essPPa', 'essEPa', 'bestFitness', 'bestP', 'bestE'])
for iterIdx in range(0, maxIterNumber):
#time.sleep(1)
if not convergenceFlag:
# Need the abbreviate data designators to retrieve start and stop time stamp indicies
snippetIdx = self.abbrevDatasets.index.levels[0]
setIdx = 0
setPathList = list()
setNameList = list()
firmLoadsDF = pd.DataFrame()
#Create setup file for each of the six simulations
for sIdx in snippetIdx:
# Write the sets attributes, create the directory, etc.
startTimeIdx = self.abbrevDatasets.loc[sIdx].index[0]
endTimeIdx = self.abbrevDatasets.loc[sIdx].index[-1]
setPath, setName = self.setupSet(iterIdx, setIdx, identifier, essIdx, self.essPPa, self.essEPa, startTimeIdx, endTimeIdx)
setPathList.append(setPath)
setNameList.append(setName)
# Generate runs
print('Iteration ' + str(iterIdx) + ', Snippet ' + str(sIdx) + ' simulation dispatched.')
generateRuns(setPath)
# Dispatch simulations
runSimulation(setPath)
# Pass through firm load data
firmLoadsDF['firmLoadP.' + str(setIdx)] = self.abbrevDatasets.loc[sIdx]['firmLoadP'][:-1].values
firmLoadsDF['firmLoadTime.' + str(setIdx)] = self.abbrevDatasets.loc[sIdx]['time'][:-1].values
setIdx = setIdx + 1
print('Iteration '+ str(iterIdx) +', Snippet ' + str(sIdx) + ' completed.')
# Get KPIs
# Collect data: we need to pull all the pertinent data for KPI calculation together from the results
# in the set folders.
self.resultsDF, resultsMetaInfo = self.collectResults(setPathList, setNameList)
self.resultsDF = pd.concat([self.resultsDF, firmLoadsDF], axis = 1)
# Get the new fitness value
newFitness = getFitness(self.optimizationObjective, self.rootProjectPath, self.resultsDF, self.abbrevDatasetWeights, resultsMetaInfo)
# Log progress
fitnessLog['fitness'].loc[iterIdx] = newFitness
fitnessLog['essPPa'].loc[iterIdx] = self.essPPa
fitnessLog['essEPa'].loc[iterIdx] = self.essEPa
# Get KPIs
# TODO complete getRunMetaData(setPath, [0]) once getRunMetaData is completed. This might also be run then
# prior to getFitness and save some effort.
# Ascertain fitness
# First iteration just write the values
if not self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
# Random next guess: The power level can be chose freely between the previously determined bounds.
self.essPPa = int(self.minESSPPa + (self.maxESSPPa - self.minESSPPa) * np.random.random_sample())
# The energy capacity must meet at least the minimum duration requirement, and cannot exceed the maximum.
self.essEPa = float(self.essPPa * (self.minESSEPa / self.minESSPPa) +
(self.maxESSEPa - (self.essPPa * (
self.minESSEPa / self.minESSPPa))) * np.random.random_sample())
print(['Iteration: '+ str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Set the improvement tracker
lastImprovement = 1
# Other iterations check if fitness has improved (that is, has gotten smaller!!!)
elif newFitness < self.fitness:
self.fitness = newFitness
self.essPPaBest = self.essPPa
self.essEPaBest = self.essEPa
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx)
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Reset the improvement tracker
lastImprovement = 1
# Lastly if nothing has improved search again in the previously defined range.
else:
# Widen the random number deviation
self.essPPa, self.essEPa = self.getNextGuess(fitnessLog, self.essPPaBest, self.essEPaBest, iterIdx/lastImprovement) #np.sqrt(lastImprovement + 1))
# Increment the improvement tracker
lastImprovement = lastImprovement + 1
# If there's no improvement after X iterations in a row, terminate the algorithm.
# NOTE this can mean two things, either that we have achieved convergence, or that we're stuck somewhere
if lastImprovement > convergenceRepeatNum:
convergenceFlag = True
print('*********************************')
print('Terminated at Iteration: ' + str(iterIdx) + ' with fitness: ' + str(self.fitness))
print(['Iteration: ' + str(iterIdx) + ', ESS P: ' + str(self.essPPa) + ' kW , ESS E: ' + str(
self.essEPa) + ' kWh, Fitness: ' + str(self.fitness)])
# Additional logging
fitnessLog['bestFitness'].loc[iterIdx] = self.fitness
fitnessLog['bestP'].loc[iterIdx] = self.essPPaBest
fitnessLog['bestE'].loc[iterIdx] = self.essEPaBest
self.fl = fitnessLog
def getNextGuess(self, fl, pBest, eBest, iterNumParam):
'''
This method determines the next values for `essPPa` and `essEPa` that are to be tested in an iteration of the
hill climber. It uses the historical fitness values from previous iterations and determines the direction of the
steepest gradient away from the best fitness value. It then biases the random selection for new power and energy
capacity values in the _opposite_ direction of the steepest gradient with the hope that this is the most likely
direction to find a better value pair at. If new selections are outside of the constraints put on the search
space, i.e., maximum and minimum power and energy capacities, and/or minimum duration (at the essPPa selected),
it corrects selections back to the edges of the search envelope as set by the constraints.
If the more iterations in the past the best found fitness lies, the stronger the random element in picking new
values. The idea being that the algorithm might be stuck and larger jumps might get it unstuck.
**Note:** this approach to a hill climber was tested with several test functions
(found in getFitness.py->getTestFitness). With these test functions the algorithm generally converges well.
The caveat is, that recent results seem to suggest that the actual search space for the optimal GBS may not be
smooth, while the test cases used smooth test functions. This should be investigated further.
:param fl: fitnessLog
:param pBest: essPPaBest: current best power guess for GBS
:param eBest: essEPaBest: current best energy guess for GBS
:param iterNumParam: [float] parameter describing the randomness of the next value pair selection, fraction of
iteration number and count since the last improved fitness value was found.
:return: newESSPPa, newESSEPa: [float] new pair of energy and power capacities to run the next iteration with
'''
# Reduce the data in fl to the necessary columns and usable values
fl = fl[['fitness', 'essPPa', 'essEPa']]
fl = fl.dropna()
# Parameter used to adjust variability/randomization of next guess
# TODO make adjustable parameter
exponent = 0.5
# Calculate distance from best point
fl['Dist'] = pd.Series(np.sqrt(list(np.asarray(fl['essPPa'] - pBest)**2 + np.asarray(fl['essEPa'] - eBest)**2)))
fl = fl.sort_values('Dist')
originFitness = fl['fitness'].iloc[0]
originP = fl['essPPa'].iloc[0]
originE = fl['essEPa'].iloc[0]
print('Origin P: ' + str(originP) + ', Origin E: ' + str(originE))
fl = fl[fl.Dist != 0]
fl['Slope'] = (fl['fitness'] - originFitness)/fl['Dist']
# Get the difference in power-coordinate DOWN the steepest gradient of the four nearest neighbors
if fl.shape[0] == 1:
maxSlopeIdx = fl['Slope'].astype(float).index[0]
elif fl.shape[0] < 3:
maxSlopeIdx = fl['Slope'].astype(float).idxmax()
else:
maxSlopeIdx = fl['Slope'][0:2].astype(float).idxmax()
dx = fl['essPPa'][maxSlopeIdx] - originP
newCoord = originP - dx
# Get random down and up variations from the power-coordinate
rndDown = (newCoord - self.minESSPPa) * np.random.random_sample()/iterNumParam**exponent
rndUp = (self.maxESSPPa - newCoord)*np.random.random_sample()/iterNumParam**exponent
newESSPPa = float(newCoord - rndDown + rndUp)
# Check constraints
if newESSPPa < self.minESSPPa:
newESSPPa = self.minESSPPa
elif newESSPPa > self.maxESSPPa:
newESSPPa = self.maxESSPPa
# Get a random new value of energy storage capacity
# Get the difference in power-coordinate DOWN the steepest gradient
#maxSlopeIdx = fl.index[1]
dy = fl['essEPa'][maxSlopeIdx] - originE
newCoordY = originE - dy
# Get random down and up variations from the power-coordinate
# Note that ess needs to meet minimum duration requirement, so the minimum size is constraint by the currently
# selected power level.
currentESSEMin = newESSPPa * (self.minESSEPa/self.minESSPPa)
rndDown = (newCoordY - currentESSEMin) * np.random.random_sample() / iterNumParam**exponent
rndUp = (self.maxESSEPa - newCoordY) * np.random.random_sample() / iterNumParam**exponent
newESSEPa = float(newCoordY - rndDown + rndUp)
# Check constraints
if newESSEPa < currentESSEMin:
newESSEPa = currentESSEMin
elif newESSEPa > self.maxESSEPa:
newESSEPa = self.maxESSEPa
return newESSPPa, newESSEPa
def setupSet(self, iterIdx, setIdx, identifier, eesIdx, eesPPa, eesEPa, startTimeIdx, endTimeIdx):
'''
Generates the specific projectSetAttributes.xml file, and the necessary folder in the project's output folder.
Returns the name of the specific set and it's absolute path. Set naming follows the convention of
'Set[iterationNumber].[snippetNumber].[currentUNIXEpoch]', where iterationNumber is the current iteration of the
of the optimizer, snippetNumber is the numerical identifier of the abbreviated data snippet, and the
currentUNIXEpoch is the current local machine unix time to the second in int format.
:param iterIdx: [int] current iteration of optimization algorithm
:param setIdx: [int] numerical identifier of the snippet of time-series to be run here.
:param identifier: [int] current local machine UNIX time to the second, could be any other integer
:param eesIdx: [int] index of the ees to be added to the system, e.g., ees0. This is necessary should the system
already have an ees that is not part of the optimization.
:param eesPPa: [float] nameplate power capacity of the ees, assumed to be symmetrical in and out.
:param eesEPa: [float] nameplate energy capacity of the ees, necessary to calculate ratedDuration, which is the
actual parameter used in the setup.
:param startTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data starts
that is to be run here.
:param endTimeIdx: [int] index of the time stamp in the master-time series where the snippet of data ends that
is to be run here.
:return setPath: [os.path] path to the set folder
:return setName: [String] name of the set
'''
# Get the current path to avoid issues with mkdir
here = os.path.dirname(os.path.realpath(__file__))
# * Create the 'SetAttributes' file from the template and the specific information given
# Load the template
setAttributeTemplatePath = os.path.join(here, '../GBSModel/Resources/Setup/projectSetAttributes.xml')
setAttributeTemplateFile = open(setAttributeTemplatePath, 'r')
setAttributeTemplateFileXML = setAttributeTemplateFile.read()
setAttributeTemplateFile.close()
setAttributeSoup = bs(setAttributeTemplateFileXML, 'xml')
# Write the project name
setAttributeSoup.project['name'] = self.projectName
# Write the power levels and duration
compNameVal = 'ees' + str(eesIdx) + ' ees' + str(eesIdx) + ' ees' + str(eesIdx)
compTagVal = 'PInMaxPa POutMaxPa ratedDuration'
compAttrVal = 'value value value'
rtdDuration = int(3600*(eesEPa/eesPPa))
compValueVal = str(eesPPa) + ' PInMaxPa.value ' + str(rtdDuration)
setAttributeSoup.compAttributeValues.compName['value'] = compNameVal
setAttributeSoup.compAttributeValues.find('compTag')['value'] = compTagVal # See issue 99 for explanation
setAttributeSoup.compAttributeValues.compAttr['value'] = compAttrVal
setAttributeSoup.compAttributeValues.compValue['value'] = compValueVal
# Write additional information regarding run-time, time resolution, etc.
setupTagVal = 'componentNames runTimeSteps timeStep'
setupAttrVal = 'value value value'
componentNamesStr = 'ees' + str(eesIdx) + ',' + ','.join(self.baseComponents)
setupValueVal = componentNamesStr + ' ' + str(startTimeIdx) + ',' + str(endTimeIdx) + ' ' + str(1)
setAttributeSoup.setupAttributeValues.find('setupTag')['value'] = setupTagVal
setAttributeSoup.setupAttributeValues.setupAttr['value'] = setupAttrVal
setAttributeSoup.setupAttributeValues.setupValue['value'] = setupValueVal
# Make the directory for this set
setName = 'Set' + str(iterIdx) + '.' + str(setIdx) + '.' + str(identifier)
setPath = os.path.join(self.rootProjectPath, 'OutputData/' + setName)
os.mkdir(setPath)
filename = self.projectName + setName + 'Attributes.xml'
setPathName = os.path.join(setPath, filename)
with open(setPathName, 'w') as xmlfile:
xmlfile.write(str(setAttributeSoup))
xmlfile.close()
return setPath, setName
def essExists(self):
'''
Checks if the system setup already contains one or more ESS components; looks for the largest index of those
components, and returns it as the index for the ESS used in optimization.
:return: essIdx
'''
# We also need to determine the unique name for the ess. Normally, this should be ess0. However, in the rare
# situation that ess0 (and essX for that matter) already exists, we need to make sure we pick an available
# numeric identifier
# Load configuration from optimizerConfig<ProjectName>.xml
setupFileName = self.projectName + 'Setup.xml'
setupPath = os.path.join(self.rootProjectPath, 'InputData/Setup/', setupFileName)
setupFile = open(setupPath, 'r')
setupFileXML = setupFile.read()
setupFile.close()
setupSoup = bs(setupFileXML, "xml")
components = setupSoup.componentNames.get('value').split()
essComps = [comp for comp in components if comp.startswith('ees')]
essNum = []
for num in essComps:
essNum.append(int(num[3:]))
if not essNum:
essNumMax = 0
else:
essNumMax = max(essNum)
essIdx = essNumMax
return essIdx
def collectResults(self, setPathList, setNameList):
'''
TODO document
:param setPathList:
:return resultsDF:
'''
# Get the current path to avoid issues with file locations
#here = os.path.dirname(os.path.realpath(__file__))
resultsDF = pd.DataFrame()
for setIdx in range(0, len(setPathList)):
# Get power channels for all components in the configuration
# Get the component list from Attributes.xml file
setAttrFile = open(os.path.join(setPathList[setIdx], self.projectName + setNameList[setIdx] + 'Attributes.xml'), 'r')
setAttrXML = setAttrFile.read()
setAttrFile.close()
setAttrSoup = bs(setAttrXML, 'xml')
setAttrVal = setAttrSoup.setupAttributeValues.setupValue.get('value')
components = setAttrVal.split(' ')[0].split(',')
for component in components:
try:
ncChannel = readNCFile(os.path.join(setPathList[setIdx], 'Run0/OutputData/', component + 'P' + setNameList[setIdx] + 'Run0.nc'))
resultsDF[component + 'Time' + '.' + str(setIdx)] = pd.Series(np.asarray(ncChannel.time))
resultsDF[component + 'P' + '.' + str(setIdx)] = pd.Series(np.asarray(ncChannel.value))
except Exception:
pass
# Well also extract the list of generators used from the component list (needed for fuel calcs)
genList = list()
for component in components:
if component[0:3] == 'gen':
genList.append(component)
resultsMetaInfo = pd.DataFrame()
resultsMetaInfo['setPathList'] = setPathList
resultsMetaInfo['setNameList'] = setNameList
resultsMetaInfo['genList'] = pd.Series(genList)
resultsMetaInfo['snippetNum'] = pd.Series(len(setPathList))
# The following parameters are added for test fitness function use.
resultsMetaInfo['minESSPPa'] = self.minESSPPa
resultsMetaInfo['maxESSPPa'] = self.maxESSPPa
resultsMetaInfo['minESSEPa'] = self.minESSEPa
resultsMetaInfo['maxESSEPa'] = self.maxESSEPa
resultsMetaInfo['ESSPPa'] = self.essPPa
resultsMetaInfo['ESSEPa'] = self.essEPa
return resultsDF, resultsMetaInfo
def plotProgress(self, fitnessLog, fitnessBest, essPPaBest, essEPaBest, otherInformation):
x = np.asarray(fitnessLog['essPPa'][0:fitnessLog.shape[0]])
x = x[np.isfinite(x)]
y = np.asarray(fitnessLog['essPPa'][0:fitnessLog.shape[0]])
y = y[np.isfinite(y)]
# Potential well plotting
xMin = otherInformation['minESSPPa'][0]
xMax = otherInformation['maxESSPPa'][0]
xMid = ((xMax - xMin) / 2) + xMin
xCoord = np.linspace(xMin, xMax, 100)
yMin = otherInformation['minESSEPa'][0]
yMax = otherInformation['maxESSEPa'][0]
yMid = ((yMax - yMin) / 2) + yMin
yCoord = np.linspace(yMin, yMax, 100)
XC, YC = np.meshgrid(xCoord, yCoord)
# Use a parabolic well as the fitness, with goal to minimize
fitnessWell = (XC - xMid) ** (2 * int(np.log10(YC) + 1)) + (YC - yMid) ** (2 * int(np.log10(XC) + 1))
| [
"numpy.log10",
"numpy.isfinite",
"Model.Operational.generateRuns.generateRuns",
"numpy.asarray",
"Analyzer.PerformanceAnalyzers.getFuelUse.getFuelUse",
"Analyzer.DataRetrievers.getDataSubsets.getDataSubsets",
"numpy.linspace",
"Optimizer.OptimizationBoundaryCalculators.getOptimizationBoundaries.getOpt... | [((3051, 3118), 'os.path.join', 'os.path.join', (['self.thisPath', '"""../../GBSProjects/"""', 'self.projectName'], {}), "(self.thisPath, '../../GBSProjects/', self.projectName)\n", (3063, 3118), False, 'import os\n'), ((3434, 3504), 'os.path.join', 'os.path.join', (['self.rootProjectPath', '"""InputData/Setup/"""', 'configFileName'], {}), "(self.rootProjectPath, 'InputData/Setup/', configFileName)\n", (3446, 3504), False, 'import os\n'), ((3638, 3662), 'bs4.BeautifulSoup', 'bs', (['configFileXML', '"""xml"""'], {}), "(configFileXML, 'xml')\n", (3640, 3662), True, 'from bs4 import BeautifulSoup as bs\n'), ((4475, 4526), 'Analyzer.DataRetrievers.getBasecase.getBasecase', 'getBasecase', (['self.projectName', 'self.rootProjectPath'], {}), '(self.projectName, self.rootProjectPath)\n', (4486, 4526), False, 'from Analyzer.DataRetrievers.getBasecase import getBasecase\n'), ((5151, 5285), 'Optimizer.OptimizationBoundaryCalculators.getOptimizationBoundaries.getOptimizationBoundaries', 'getOptimizationBoundaries', (['self.boundaryMethod', 'self.time', 'self.firmLoadP', 'self.varLoadP', 'self.firmGenP', 'self.varGenP', 'constraints'], {}), '(self.boundaryMethod, self.time, self.firmLoadP,\n self.varLoadP, self.firmGenP, self.varGenP, constraints)\n', (5176, 5285), False, 'from Optimizer.OptimizationBoundaryCalculators.getOptimizationBoundaries import getOptimizationBoundaries\n'), ((5401, 5492), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': self.time, 'firmLoadP': self.firmLoadP, 'varGenP': self.varGenP}"], {}), "({'time': self.time, 'firmLoadP': self.firmLoadP, 'varGenP':\n self.varGenP})\n", (5413, 5492), True, 'import pandas as pd\n'), ((5563, 5635), 'Analyzer.DataRetrievers.getDataSubsets.getDataSubsets', 'getDataSubsets', (['reductionInput', 'self.dataReductionMethod'], {'otherInputs': '[]'}), '(reductionInput, self.dataReductionMethod, otherInputs=[])\n', (5577, 5635), False, 'from Analyzer.DataRetrievers.getDataSubsets import getDataSubsets\n'), ((23878, 23952), 'os.path.join', 'os.path.join', (['here', '"""../GBSModel/Resources/Setup/projectSetAttributes.xml"""'], {}), "(here, '../GBSModel/Resources/Setup/projectSetAttributes.xml')\n", (23890, 23952), False, 'import os\n'), ((24162, 24200), 'bs4.BeautifulSoup', 'bs', (['setAttributeTemplateFileXML', '"""xml"""'], {}), "(setAttributeTemplateFileXML, 'xml')\n", (24164, 24200), True, 'from bs4 import BeautifulSoup as bs\n'), ((25772, 25831), 'os.path.join', 'os.path.join', (['self.rootProjectPath', "('OutputData/' + setName)"], {}), "(self.rootProjectPath, 'OutputData/' + setName)\n", (25784, 25831), False, 'import os\n'), ((25840, 25857), 'os.mkdir', 'os.mkdir', (['setPath'], {}), '(setPath)\n', (25848, 25857), False, 'import os\n'), ((25945, 25976), 'os.path.join', 'os.path.join', (['setPath', 'filename'], {}), '(setPath, filename)\n', (25957, 25976), False, 'import os\n'), ((26808, 26877), 'os.path.join', 'os.path.join', (['self.rootProjectPath', '"""InputData/Setup/"""', 'setupFileName'], {}), "(self.rootProjectPath, 'InputData/Setup/', setupFileName)\n", (26820, 26877), False, 'import os\n'), ((27005, 27028), 'bs4.BeautifulSoup', 'bs', (['setupFileXML', '"""xml"""'], {}), "(setupFileXML, 'xml')\n", (27007, 27028), True, 'from bs4 import BeautifulSoup as bs\n'), ((27720, 27734), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (27732, 27734), True, 'import pandas as pd\n'), ((29105, 29119), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (29117, 29119), True, 'import pandas as pd\n'), ((29263, 29281), 'pandas.Series', 'pd.Series', (['genList'], {}), '(genList)\n', (29272, 29281), True, 'import pandas as pd\n'), ((29891, 29946), 'numpy.asarray', 'np.asarray', (["fitnessLog['essPPa'][0:fitnessLog.shape[0]]"], {}), "(fitnessLog['essPPa'][0:fitnessLog.shape[0]])\n", (29901, 29946), True, 'import numpy as np\n'), ((29989, 30044), 'numpy.asarray', 'np.asarray', (["fitnessLog['essPPa'][0:fitnessLog.shape[0]]"], {}), "(fitnessLog['essPPa'][0:fitnessLog.shape[0]])\n", (29999, 30044), True, 'import numpy as np\n'), ((30267, 30295), 'numpy.linspace', 'np.linspace', (['xMin', 'xMax', '(100)'], {}), '(xMin, xMax, 100)\n', (30278, 30295), True, 'import numpy as np\n'), ((30452, 30480), 'numpy.linspace', 'np.linspace', (['yMin', 'yMax', '(100)'], {}), '(yMin, yMax, 100)\n', (30463, 30480), True, 'import numpy as np\n'), ((30499, 30526), 'numpy.meshgrid', 'np.meshgrid', (['xCoord', 'yCoord'], {}), '(xCoord, yCoord)\n', (30510, 30526), True, 'import numpy as np\n'), ((2953, 2979), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2969, 2979), False, 'import os\n'), ((6266, 6351), 'Analyzer.PerformanceAnalyzers.getPrimaryREContribution.getPrimaryREContribution', 'getPrimaryREContribution', (['self.time', 'self.firmLoadP', 'self.firmGenP', 'self.varGenP'], {}), '(self.time, self.firmLoadP, self.firmGenP, self.varGenP\n )\n', (6290, 6351), False, 'from Analyzer.PerformanceAnalyzers.getPrimaryREContribution import getPrimaryREContribution\n'), ((23689, 23715), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (23705, 23715), False, 'import os\n'), ((28153, 28174), 'bs4.BeautifulSoup', 'bs', (['setAttrXML', '"""xml"""'], {}), "(setAttrXML, 'xml')\n", (28155, 28174), True, 'from bs4 import BeautifulSoup as bs\n'), ((29961, 29975), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (29972, 29975), True, 'import numpy as np\n'), ((30059, 30073), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (30070, 30073), True, 'import numpy as np\n'), ((6760, 6854), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'genFleet', 'columns': "['fuelCurve_pPu', 'fuelCurve_massFlow', 'POutMaxPa']"}), "(index=genFleet, columns=['fuelCurve_pPu', 'fuelCurve_massFlow',\n 'POutMaxPa'])\n", (6772, 6854), True, 'import pandas as pd\n'), ((7570, 7619), 'Analyzer.PerformanceAnalyzers.getFuelUse.getFuelUse', 'getFuelUse', (['self.allGen', 'self.fuelCurveDataPoints'], {}), '(self.allGen, self.fuelCurveDataPoints)\n', (7580, 7619), False, 'from Analyzer.PerformanceAnalyzers.getFuelUse import getFuelUse\n'), ((10590, 10601), 'time.time', 'time.time', ([], {}), '()\n', (10599, 10601), False, 'import time\n'), ((11337, 11351), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11349, 11351), True, 'import pandas as pd\n'), ((12949, 12997), 'pandas.concat', 'pd.concat', (['[self.resultsDF, firmLoadsDF]'], {'axis': '(1)'}), '([self.resultsDF, firmLoadsDF], axis=1)\n', (12958, 12997), True, 'import pandas as pd\n'), ((13090, 13214), 'Optimizer.FitnessFunctions.getFitness.getFitness', 'getFitness', (['self.optimizationObjective', 'self.rootProjectPath', 'self.resultsDF', 'self.abbrevDatasetWeights', 'resultsMetaInfo'], {}), '(self.optimizationObjective, self.rootProjectPath, self.resultsDF,\n self.abbrevDatasetWeights, resultsMetaInfo)\n', (13100, 13214), False, 'from Optimizer.FitnessFunctions.getFitness import getFitness\n'), ((20361, 20386), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (20384, 20386), True, 'import numpy as np\n'), ((20454, 20479), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (20477, 20479), True, 'import numpy as np\n'), ((21338, 21363), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (21361, 21363), True, 'import numpy as np\n'), ((21436, 21461), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (21459, 21461), True, 'import numpy as np\n'), ((27952, 28048), 'os.path.join', 'os.path.join', (['setPathList[setIdx]', "(self.projectName + setNameList[setIdx] + 'Attributes.xml')"], {}), "(setPathList[setIdx], self.projectName + setNameList[setIdx] +\n 'Attributes.xml')\n", (27964, 28048), False, 'import os\n'), ((6919, 7012), 'os.path.join', 'os.path.join', (['self.rootProjectPath', '"""InputData/Components/"""', "(genString + 'Descriptor.xml')"], {}), "(self.rootProjectPath, 'InputData/Components/', genString +\n 'Descriptor.xml')\n", (6931, 7012), False, 'import os\n'), ((7156, 7177), 'bs4.BeautifulSoup', 'bs', (['genFileXML', '"""xml"""'], {}), "(genFileXML, 'xml')\n", (7158, 7177), True, 'from bs4 import BeautifulSoup as bs\n'), ((9646, 9671), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9669, 9671), True, 'import numpy as np\n'), ((9949, 9974), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9972, 9974), True, 'import numpy as np\n'), ((12089, 12110), 'Model.Operational.generateRuns.generateRuns', 'generateRuns', (['setPath'], {}), '(setPath)\n', (12101, 12110), False, 'from Model.Operational.generateRuns import generateRuns\n'), ((12175, 12197), 'Model.Operational.runSimulation.runSimulation', 'runSimulation', (['setPath'], {}), '(setPath)\n', (12188, 12197), False, 'from Model.Operational.runSimulation import runSimulation\n'), ((28425, 28533), 'os.path.join', 'os.path.join', (['setPathList[setIdx]', '"""Run0/OutputData/"""', "(component + 'P' + setNameList[setIdx] + 'Run0.nc')"], {}), "(setPathList[setIdx], 'Run0/OutputData/', component + 'P' +\n setNameList[setIdx] + 'Run0.nc')\n", (28437, 28533), False, 'import os\n'), ((28613, 28639), 'numpy.asarray', 'np.asarray', (['ncChannel.time'], {}), '(ncChannel.time)\n', (28623, 28639), True, 'import numpy as np\n'), ((28720, 28747), 'numpy.asarray', 'np.asarray', (['ncChannel.value'], {}), '(ncChannel.value)\n', (28730, 28747), True, 'import numpy as np\n'), ((19386, 19418), 'numpy.asarray', 'np.asarray', (["(fl['essPPa'] - pBest)"], {}), "(fl['essPPa'] - pBest)\n", (19396, 19418), True, 'import numpy as np\n'), ((19424, 19456), 'numpy.asarray', 'np.asarray', (["(fl['essEPa'] - eBest)"], {}), "(fl['essEPa'] - eBest)\n", (19434, 19456), True, 'import numpy as np\n'), ((30643, 30655), 'numpy.log10', 'np.log10', (['YC'], {}), '(YC)\n', (30651, 30655), True, 'import numpy as np\n'), ((30688, 30700), 'numpy.log10', 'np.log10', (['XC'], {}), '(XC)\n', (30696, 30700), True, 'import numpy as np\n'), ((14134, 14159), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (14157, 14159), True, 'import numpy as np\n'), ((14537, 14562), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (14560, 14562), True, 'import numpy as np\n')] |
'''
Source codes for Python Machine Learning By Example 3rd Edition (Packt Publishing)
Chapter 10 Discovering Underlying Topics in the Newsgroups Dataset with Clustering and Topic Modeling
Author: Yuxi (Hayden) Liu (<EMAIL>)
'''
from sklearn import datasets
from sklearn.cluster import KMeans
import numpy as np
from matplotlib import pyplot as plt
iris = datasets.load_iris()
X = iris.data
y = iris.target
k_list = list(range(1, 7))
sse_list = [0] * len(k_list)
for k_ind, k in enumerate(k_list):
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(X)
clusters = kmeans.labels_
centroids = kmeans.cluster_centers_
sse = 0
for i in range(k):
cluster_i = np.where(clusters == i)
sse += np.linalg.norm(X[cluster_i] - centroids[i])
print('k={}, SSE={}'.format(k, sse))
sse_list[k_ind] = sse
plt.plot(k_list, sse_list)
plt.show()
| [
"sklearn.datasets.load_iris",
"sklearn.cluster.KMeans",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.linalg.norm",
"matplotlib.pyplot.show"
] | [((361, 381), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (379, 381), False, 'from sklearn import datasets\n'), ((856, 882), 'matplotlib.pyplot.plot', 'plt.plot', (['k_list', 'sse_list'], {}), '(k_list, sse_list)\n', (864, 882), True, 'from matplotlib import pyplot as plt\n'), ((883, 893), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (891, 893), True, 'from matplotlib import pyplot as plt\n'), ((519, 556), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'random_state': '(42)'}), '(n_clusters=k, random_state=42)\n', (525, 556), False, 'from sklearn.cluster import KMeans\n'), ((701, 724), 'numpy.where', 'np.where', (['(clusters == i)'], {}), '(clusters == i)\n', (709, 724), True, 'import numpy as np\n'), ((741, 784), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[cluster_i] - centroids[i])'], {}), '(X[cluster_i] - centroids[i])\n', (755, 784), True, 'import numpy as np\n')] |
import numpy as np
import numba as nb
from scipy.stats import rankdata
from functools import partial
import os
import sys
from sklearn.base import BaseEstimator, TransformerMixin
from julia import Julia
jl = Julia(compiled_modules=False)
class ECRelieff(BaseEstimator, TransformerMixin):
"""sklearn compatible implementation of the Evaporative Cooling Relief algorithm
<NAME>, <NAME>, <NAME>, <NAME>, Jr., <NAME>.
Evaporative cooling feature selection for genotypic data involving interactions.
Author: <NAME>
"""
def __init__(self, n_features_to_select=10, m=-1, k=5, dist_func=lambda x1, x2 : np.sum(np.abs(x1-x2), 1), learned_metric_func=None):
self.n_features_to_select = n_features_to_select # number of features to select
self.m = m # example sample size
self.k = k # the k parameter
self.dist_func = dist_func # distance function
self.learned_metric_func = learned_metric_func # learned distance function
# Use function written in Julia programming language to update feature weights.
script_path = os.path.abspath(__file__)
self._update_weights = jl.include(script_path[:script_path.rfind('/')] + "/julia-utils/update_weights_relieff2.jl")
self._perform_ec_ranking = jl.include(script_path[:script_path.rfind('/')] + "/julia-utils/ec_ranking.jl")
def fit(self, data, target):
"""
Rank features using ReliefF feature selection algorithm
Args:
data : Array[np.float64] -- matrix of examples
target : Array[np.int] -- vector of target values of examples
Returns:
self
"""
# Run ECRelief feature selection algorithm.
if self.learned_metric_func != None:
self.rank = self._ecrelieff(data, target, self.m, self.k, self.dist_func, learned_metric_func=self.learned_metric_func(data, target))
else:
self.rank = self._ecrelieff(data, target, self.m, self.k, self.dist_func)
return self
def transform(self, data):
"""
Perform feature selection using computed feature ranks
Args:
data : Array[np.float64] -- matrix of examples on which to perform feature selection
Returns:
Array[np.float64] -- result of performing feature selection
"""
# select n_features_to_select best features and return selected features.
msk = self.rank <= self.n_features_to_select # Compute mask.
return data[:, msk] # Perform feature selection.
def fit_transform(self, data, target):
"""
Compute ranks of features and perform feature selection
Args:
data : Array[np.float64] -- matrix of examples on which to perform feature selection
target : Array[np.int] -- vector of target values of examples
Returns:
Array[np.float64] -- result of performing feature selection
"""
self.fit(data, target) # Fit data
return self.transform(data) # Perform feature selection
def _entropy(self, distribution):
"""
Compute entropy of distribution
Args:
distribution : Array[np.float] or Array[np.int]
Returns:
np.float -- entropy of the distribution
"""
_, counts_classes = np.unique(distribution, return_counts=True)
p_classes = counts_classes/np.float(distribution.size)
return np.sum(p_classes*np.log(p_classes))
def _joint_entropy_pair(self, distribution1, distribution2):
"""
Compute joint entropy of two distributions.
Args:
distribution2 : Array[np.float] or Array[np.int] -- first distribution
distribution2 : Array[np.float] or Array[np.int] -- second distribution
Returns:
np.float -- entropy of the distribution
"""
_, counts_pairs = np.unique(np.vstack((distribution1, distribution2)), axis=1, return_counts=True)
p_pairs = counts_pairs/np.float(distribution1.size)
return np.sum(p_pairs*np.log(p_pairs))
def _scaled_mutual_information(self, distribution1, distribution2):
"""
Compute scaled mutual information between two distribution
Args:
distribution1 : Array[np.float] or Array[np.int] -- first distribution
distribution2 : Array[np.float] or Array[np.int] -- second distribution
Returns:
np.float -- scaled mutual information of the distributions
"""
return (self._entropy(distribution1) +\
self._entropy(distribution2) - self._joint_entropy_pair(distribution1, distribution2))/self._entropy(distribution1)
def _mu_vals(self, data, target):
mu_vals = np.empty(data.shape[1], dtype=np.float)
for idx, col in enumerate(data.T):
mu_vals[idx] = self._scaled_mutual_information(col, target)
return mu_vals
def _ecrelieff(self, data, target, m, k, dist_func, **kwargs):
"""Compute feature scores using Evaporative Cooling ReliefF algorithm
Args:
data : Array[np.float64] -- matrix containing examples' data as rows
target : Array[np.int] -- matrix containing the example's target variable value
m : int -- Sample size to use when evaluating the feature scores
k : int -- Number of closest examples from each class to use
dist_func : Callable[[Array[np.float64], Array[np.float64]], Array[np.float64]] -- function for evaluating
distances between examples. The function should acept two examples or two matrices of examples and return the dictances.
**kwargs: can contain argument with key 'learned_metric_func' that maps to a function that accepts a distance
function and indices of two training examples and returns the distance between the examples in the learned
metric space.
Returns:
Array[np.int], Array[np.float64] -- Array of feature enumerations based on the scores, array of feature scores
"""
# Initialize feature weights.
weights = np.zeros(data.shape[1], dtype=np.float)
# Get indices of examples in sample.
idx_sampled = np.random.choice(np.arange(data.shape[0]), data.shape[0] if m == -1 else m, replace=False)
# Set m if currently set to signal value -1.
m = data.shape[0] if m == -1 else m
# Get maximum and minimum values of each feature.
max_f_vals = np.amax(data, 0)
min_f_vals = np.amin(data, 0)
# Get all unique classes.
classes = np.unique(target)
# Get probabilities of classes in training set.
p_classes = (np.vstack(np.unique(target, return_counts=True)).T).astype(np.float)
p_classes[:, 1] = p_classes[:, 1] / np.sum(p_classes[:, 1])
# Compute mu values.
mu_vals = self._mu_vals(data, target)
# Go over sampled examples' indices.
for idx in idx_sampled:
# Get next example.
e = data[idx, :]
# Get index of next sampled example in group of examples with same class.
idx_class = idx - np.sum(target[:idx] != target[idx])
# If keyword argument with keyword 'learned_metric_func' exists...
if 'learned_metric_func' in kwargs:
# Partially apply distance function.
dist = partial(kwargs['learned_metric_func'], dist_func, np.int(idx))
# Compute distances to examples from same class in learned metric space.
distances_same = dist(np.where(target == target[idx])[0])
# Set distance of sampled example to itself to infinity.
distances_same[idx_class] = np.inf
# Find k closest examples from same class.
idxs_closest_same = np.argpartition(distances_same, k)[:k]
closest_same = (data[target == target[idx], :])[idxs_closest_same, :]
else:
# Find k nearest examples from same class.
distances_same = dist_func(e, data[target == target[idx], :])
# Set distance of sampled example to itself to infinity.
distances_same[idx_class] = np.inf
# Find closest examples from same class.
idxs_closest_same = np.argpartition(distances_same, k)[:k]
closest_same = (data[target == target[idx], :])[idxs_closest_same, :]
# Allocate matrix template for getting nearest examples from other classes.
closest_other = np.empty((k * (len(classes) - 1), data.shape[1]), dtype=np.float)
# Initialize pointer for adding examples to template matrix.
top_ptr = 0
for cl in classes: # Go over classes different than the one of current sampled example.
if cl != target[idx]:
# If keyword argument with keyword 'learned_metric_func' exists...
if 'learned_metric_func' in kwargs:
# get closest k examples with class cl if using learned distance metric.
distances_cl = dist(np.where(target == cl)[0])
else:
# Get closest k examples with class cl
distances_cl = dist_func(e, data[target == cl, :])
# Get indices of closest exmples from class cl
idx_closest_cl = np.argpartition(distances_cl, k)[:k]
# Add found closest examples to matrix.
closest_other[top_ptr:top_ptr+k, :] = (data[target == cl, :])[idx_closest_cl, :]
top_ptr = top_ptr + k
# Get probabilities of classes not equal to class of sampled example.
p_classes_other = p_classes[p_classes[:, 0] != target[idx], 1]
# Compute diff sum weights for closest examples from different class.
p_weights = p_classes_other/(1 - p_classes[p_classes[:, 0] == target[idx], 1])
weights_mult = np.repeat(p_weights, k) # Weights multiplier vector
# ------ weights update ------
weights = np.array(self._update_weights(data, e[np.newaxis], closest_same, closest_other, weights[np.newaxis],
weights_mult[np.newaxis].T, m, k, max_f_vals[np.newaxis], min_f_vals[np.newaxis]))
# Perform evaporative cooling feature selection.
rank = self._perform_ec_ranking(data, target, weights, mu_vals)
# Return feature ranks.
return rank
| [
"numpy.abs",
"numpy.float",
"numpy.unique",
"numpy.amin",
"numpy.repeat",
"numpy.argpartition",
"numpy.where",
"numpy.log",
"julia.Julia",
"numpy.sum",
"numpy.zeros",
"numpy.int",
"numpy.empty",
"numpy.vstack",
"os.path.abspath",
"numpy.amax",
"numpy.arange"
] | [((211, 240), 'julia.Julia', 'Julia', ([], {'compiled_modules': '(False)'}), '(compiled_modules=False)\n', (216, 240), False, 'from julia import Julia\n'), ((1211, 1236), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1226, 1236), False, 'import os\n'), ((3497, 3540), 'numpy.unique', 'np.unique', (['distribution'], {'return_counts': '(True)'}), '(distribution, return_counts=True)\n', (3506, 3540), True, 'import numpy as np\n'), ((4952, 4991), 'numpy.empty', 'np.empty', (['data.shape[1]'], {'dtype': 'np.float'}), '(data.shape[1], dtype=np.float)\n', (4960, 4991), True, 'import numpy as np\n'), ((6347, 6386), 'numpy.zeros', 'np.zeros', (['data.shape[1]'], {'dtype': 'np.float'}), '(data.shape[1], dtype=np.float)\n', (6355, 6386), True, 'import numpy as np\n'), ((6732, 6748), 'numpy.amax', 'np.amax', (['data', '(0)'], {}), '(data, 0)\n', (6739, 6748), True, 'import numpy as np\n'), ((6770, 6786), 'numpy.amin', 'np.amin', (['data', '(0)'], {}), '(data, 0)\n', (6777, 6786), True, 'import numpy as np\n'), ((6840, 6857), 'numpy.unique', 'np.unique', (['target'], {}), '(target)\n', (6849, 6857), True, 'import numpy as np\n'), ((3576, 3603), 'numpy.float', 'np.float', (['distribution.size'], {}), '(distribution.size)\n', (3584, 3603), True, 'import numpy as np\n'), ((4098, 4139), 'numpy.vstack', 'np.vstack', (['(distribution1, distribution2)'], {}), '((distribution1, distribution2))\n', (4107, 4139), True, 'import numpy as np\n'), ((4200, 4228), 'numpy.float', 'np.float', (['distribution1.size'], {}), '(distribution1.size)\n', (4208, 4228), True, 'import numpy as np\n'), ((6472, 6496), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (6481, 6496), True, 'import numpy as np\n'), ((7049, 7072), 'numpy.sum', 'np.sum', (['p_classes[:, 1]'], {}), '(p_classes[:, 1])\n', (7055, 7072), True, 'import numpy as np\n'), ((10340, 10363), 'numpy.repeat', 'np.repeat', (['p_weights', 'k'], {}), '(p_weights, k)\n', (10349, 10363), True, 'import numpy as np\n'), ((645, 660), 'numpy.abs', 'np.abs', (['(x1 - x2)'], {}), '(x1 - x2)\n', (651, 660), True, 'import numpy as np\n'), ((3637, 3654), 'numpy.log', 'np.log', (['p_classes'], {}), '(p_classes)\n', (3643, 3654), True, 'import numpy as np\n'), ((4259, 4274), 'numpy.log', 'np.log', (['p_pairs'], {}), '(p_pairs)\n', (4265, 4274), True, 'import numpy as np\n'), ((7406, 7441), 'numpy.sum', 'np.sum', (['(target[:idx] != target[idx])'], {}), '(target[:idx] != target[idx])\n', (7412, 7441), True, 'import numpy as np\n'), ((7707, 7718), 'numpy.int', 'np.int', (['idx'], {}), '(idx)\n', (7713, 7718), True, 'import numpy as np\n'), ((8105, 8139), 'numpy.argpartition', 'np.argpartition', (['distances_same', 'k'], {}), '(distances_same, k)\n', (8120, 8139), True, 'import numpy as np\n'), ((8604, 8638), 'numpy.argpartition', 'np.argpartition', (['distances_same', 'k'], {}), '(distances_same, k)\n', (8619, 8638), True, 'import numpy as np\n'), ((6946, 6983), 'numpy.unique', 'np.unique', (['target'], {'return_counts': '(True)'}), '(target, return_counts=True)\n', (6955, 6983), True, 'import numpy as np\n'), ((7848, 7879), 'numpy.where', 'np.where', (['(target == target[idx])'], {}), '(target == target[idx])\n', (7856, 7879), True, 'import numpy as np\n'), ((9728, 9760), 'numpy.argpartition', 'np.argpartition', (['distances_cl', 'k'], {}), '(distances_cl, k)\n', (9743, 9760), True, 'import numpy as np\n'), ((9433, 9455), 'numpy.where', 'np.where', (['(target == cl)'], {}), '(target == cl)\n', (9441, 9455), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Export DPN
suggest run as python export.py --file_name [filename] --file_format [file format] --checkpoint_path [ckpt path]
"""
import argparse
import numpy as np
from mindspore import Tensor, context
from mindspore.train.serialization import export, load_checkpoint
from src.resnet import resnet152 as resnet
from src.config import config5 as config
parser = argparse.ArgumentParser(description="resnet152 export ")
parser.add_argument("--device_id", type=int, default=0, help="Device id")
parser.add_argument("--ckpt_file", type=str, required=True, help="checkpoint file path")
parser.add_argument("--dataset", type=str, default="imagenet2012", help="Dataset, either cifar10 or imagenet2012")
parser.add_argument("--width", type=int, default=224, help="input width")
parser.add_argument("--height", type=int, default=224, help="input height")
parser.add_argument("--file_name", type=str, default='resnet152', help="output file name")
parser.add_argument("--file_format", type=str, choices=['AIR', 'ONNX', 'MINDIR'], default='AIR', help="Device id")
parser.add_argument("--device_target", type=str, choices=['Ascend', 'GPU', 'CPU'], default='Ascend', help="target")
args = parser.parse_args()
context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
if __name__ == "__main__":
target = args.device_target
if target != "GPU":
context.set_context(device_id=args.device_id)
# define net
network = resnet(class_num=config.class_num)
param_dict = load_checkpoint(args.ckpt_file, net=network)
network.set_train(False)
input_data = Tensor(np.zeros([1, 3, args.height, args.width]).astype(np.float32))
export(network, input_data, file_name=args.file_name, file_format=args.file_format)
| [
"argparse.ArgumentParser",
"mindspore.context.set_context",
"mindspore.train.serialization.load_checkpoint",
"numpy.zeros",
"src.resnet.resnet152",
"mindspore.train.serialization.export"
] | [((1031, 1087), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""resnet152 export """'}), "(description='resnet152 export ')\n", (1054, 1087), False, 'import argparse\n'), ((1866, 1944), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'args.device_target'}), '(mode=context.GRAPH_MODE, device_target=args.device_target)\n', (1885, 1944), False, 'from mindspore import Tensor, context\n'), ((2114, 2148), 'src.resnet.resnet152', 'resnet', ([], {'class_num': 'config.class_num'}), '(class_num=config.class_num)\n', (2120, 2148), True, 'from src.resnet import resnet152 as resnet\n'), ((2166, 2210), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['args.ckpt_file'], {'net': 'network'}), '(args.ckpt_file, net=network)\n', (2181, 2210), False, 'from mindspore.train.serialization import export, load_checkpoint\n'), ((2330, 2418), 'mindspore.train.serialization.export', 'export', (['network', 'input_data'], {'file_name': 'args.file_name', 'file_format': 'args.file_format'}), '(network, input_data, file_name=args.file_name, file_format=args.\n file_format)\n', (2336, 2418), False, 'from mindspore.train.serialization import export, load_checkpoint\n'), ((2037, 2082), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'args.device_id'}), '(device_id=args.device_id)\n', (2056, 2082), False, 'from mindspore import Tensor, context\n'), ((2264, 2305), 'numpy.zeros', 'np.zeros', (['[1, 3, args.height, args.width]'], {}), '([1, 3, args.height, args.width])\n', (2272, 2305), True, 'import numpy as np\n')] |
#######################################################
#Reference: https://github.com/experiencor/keras-yolo3#
#######################################################
import numpy as np
import os
import cv2
from scipy.special import expit
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def get_box(self):
return (self.xmin, self.ymin, self.xmax, self.ymax)
def _sigmoid(x):
return expit(x)
def _softmax(x, axis=-1):
x = x - np.amax(x, axis, keepdims=True)
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
def preprocess_input(img, w, h):
ih, iw, _ = img.shape
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image_data = cv2.resize(img, (nw,nh))
new_image = np.full((h,w,3), (128,128,128), dtype='uint8')
new_image[(h-nh)//2 : (h+nh)//2, (w-nw)//2:(w+nw)//2] = image_data
image_data = new_image.astype('float')/255.0
image_data = image_data[np.newaxis, ...]
return image_data
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if(objectness <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row,col,b,5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return []
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
return boxes
def get_yolo_boxes(model, images, net_h, net_w, anchors, obj_thresh, nms_thresh):
image_h, image_w, _ = images[0].shape
nb_images = len(images)
batch_input = np.zeros((nb_images, net_h, net_w, 3))
# preprocess the input
for i in range(nb_images):
batch_input[i] = preprocess_input(images[i], net_h, net_w)
# run the prediction
batch_output = model.predict_on_batch(batch_input)
batch_boxes = [None]*nb_images
for i in range(nb_images):
yolos = [batch_output[0][i], batch_output[1][i], batch_output[2][i]]
boxes = []
# decode the output of the network
for j in range(len(yolos)):
yolo_anchors = anchors[(2-j)*6:(3-j)*6] # config['model']['anchors']
boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
batch_boxes[i] = boxes
return batch_boxes
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
return boxes
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
| [
"numpy.argmax",
"scipy.special.expit",
"numpy.exp",
"numpy.zeros",
"numpy.argsort",
"numpy.expand_dims",
"numpy.finfo",
"numpy.maximum",
"numpy.full",
"cv2.resize",
"numpy.amax"
] | [((950, 958), 'scipy.special.expit', 'expit', (['x'], {}), '(x)\n', (955, 958), False, 'from scipy.special import expit\n'), ((1040, 1049), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1046, 1049), True, 'import numpy as np\n'), ((1243, 1268), 'cv2.resize', 'cv2.resize', (['img', '(nw, nh)'], {}), '(img, (nw, nh))\n', (1253, 1268), False, 'import cv2\n'), ((1282, 1332), 'numpy.full', 'np.full', (['(h, w, 3)', '(128, 128, 128)'], {'dtype': '"""uint8"""'}), "((h, w, 3), (128, 128, 128), dtype='uint8')\n", (1289, 1332), True, 'import numpy as np\n'), ((3718, 3756), 'numpy.zeros', 'np.zeros', (['(nb_images, net_h, net_w, 3)'], {}), '((nb_images, net_h, net_w, 3))\n', (3726, 3756), True, 'import numpy as np\n'), ((6006, 6023), 'numpy.maximum', 'np.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (6016, 6023), True, 'import numpy as np\n'), ((6033, 6050), 'numpy.maximum', 'np.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (6043, 6050), True, 'import numpy as np\n'), ((998, 1029), 'numpy.amax', 'np.amax', (['x', 'axis'], {'keepdims': '(True)'}), '(x, axis, keepdims=True)\n', (1005, 1029), True, 'import numpy as np\n'), ((3096, 3144), 'numpy.argsort', 'np.argsort', (['[(-box.classes[c]) for box in boxes]'], {}), '([(-box.classes[c]) for box in boxes])\n', (3106, 3144), True, 'import numpy as np\n'), ((626, 649), 'numpy.argmax', 'np.argmax', (['self.classes'], {}), '(self.classes)\n', (635, 649), True, 'import numpy as np\n'), ((5792, 5823), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 2]'], {'axis': '(1)'}), '(a[:, 2], axis=1)\n', (5806, 5823), True, 'import numpy as np\n'), ((5847, 5873), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (5861, 5873), True, 'import numpy as np\n'), ((5904, 5935), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 3]'], {'axis': '(1)'}), '(a[:, 3], axis=1)\n', (5918, 5935), True, 'import numpy as np\n'), ((5959, 5985), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (5973, 5985), True, 'import numpy as np\n'), ((6061, 6126), 'numpy.expand_dims', 'np.expand_dims', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'axis': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1)\n', (6075, 6126), True, 'import numpy as np\n'), ((6169, 6184), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (6177, 6184), True, 'import numpy as np\n'), ((2546, 2555), 'numpy.exp', 'np.exp', (['w'], {}), '(w)\n', (2552, 2555), True, 'import numpy as np\n'), ((2621, 2630), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (2627, 2630), True, 'import numpy as np\n')] |
import numpy as np
import cv2
img = cv2.imread('images/plane_noisy.png', cv2.IMREAD_GRAYSCALE)
img_out = img.copy()
height = img.shape[0]
width = img.shape[1]
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
neighbors = []
for k in np.arange(-3, 4):
for l in np.arange(-3, 4):
a = img.item(i+k, j+l)
neighbors.append(a)
neighbors.sort()
median = neighbors[24]
b = median
img_out.itemset((i,j), b)
cv2.imwrite('images/filter_median.jpg', img_out)
cv2.imshow('image',img_out)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.imwrite",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.waitKey",
"numpy.arange",
"cv2.imread"
] | [((40, 98), 'cv2.imread', 'cv2.imread', (['"""images/plane_noisy.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('images/plane_noisy.png', cv2.IMREAD_GRAYSCALE)\n", (50, 98), False, 'import cv2\n'), ((180, 204), 'numpy.arange', 'np.arange', (['(3)', '(height - 3)'], {}), '(3, height - 3)\n', (189, 204), True, 'import numpy as np\n'), ((534, 582), 'cv2.imwrite', 'cv2.imwrite', (['"""images/filter_median.jpg"""', 'img_out'], {}), "('images/filter_median.jpg', img_out)\n", (545, 582), False, 'import cv2\n'), ((586, 614), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img_out'], {}), "('image', img_out)\n", (596, 614), False, 'import cv2\n'), ((615, 629), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (626, 629), False, 'import cv2\n'), ((631, 654), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (652, 654), False, 'import cv2\n'), ((218, 241), 'numpy.arange', 'np.arange', (['(3)', '(width - 3)'], {}), '(3, width - 3)\n', (227, 241), True, 'import numpy as np\n'), ((283, 299), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (292, 299), True, 'import numpy as np\n'), ((323, 339), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (332, 339), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import itertools
import subprocess
import io
import re
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.periodic_table import get_el_sp
from veidt.abstract import Describer
from veidt.potential.processing import pool_from
class BispectrumCoefficients(Describer):
"""
Bispectrum coefficients to describe the local environment of each
atom in a quantitative way.
"""
def __init__(self, rcutfac, twojmax, element_profile, rfac0=0.99363,
rmin0=0, diagonalstyle=3, quadratic=False, pot_fit=False):
"""
Args:
rcutfac (float): Global cutoff distance.
twojmax (int): Band limit for bispectrum components.
element_profile (dict): Parameters (cutoff factor 'r' and
weight 'w') related to each element, e.g.,
{'Na': {'r': 0.3, 'w': 0.9},
'Cl': {'r': 0.7, 'w': 3.0}}
rfac0 (float): Parameter in distance to angle conversion.
Set between (0, 1), default to 0.99363.
rmin0 (float): Parameter in distance to angle conversion.
Default to 0.
diagonalstyle (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3,
default to 3.
quadratic (bool): Whether including quadratic terms.
Default to False.
pot_fit (bool): Whether to output in potential fitting
format. Default to False, i.e., returning the bispectrum
coefficients for each site.
"""
from veidt.potential.lammps.calcs import SpectralNeighborAnalysis
self.calculator = SpectralNeighborAnalysis(rcutfac, twojmax,
element_profile,
rfac0, rmin0,
diagonalstyle,
quadratic)
self.rcutfac = rcutfac
self.twojmax = twojmax
self.element_profile = element_profile
self.rfac0 = rfac0
self.rmin0 = rmin0
self.diagonalstyle = diagonalstyle
self.elements = sorted(element_profile.keys(),
key=lambda sym: get_el_sp(sym).X)
self.quadratic = quadratic
self.pot_fit = pot_fit
@property
def subscripts(self):
"""
The subscripts (2j1, 2j2, 2j) of all bispectrum components
involved.
"""
return self.calculator.get_bs_subscripts(self.twojmax,
self.diagonalstyle)
def describe(self, structure, include_stress=False):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame.
In regular format, the columns are the subscripts of
bispectrum components, while indices are the site indices
in input structure.
In potential fitting format, to match the sequence of
[energy, f_x[0], f_y[0], ..., f_z[N], v_xx, ..., v_xy], the
bispectrum coefficients are summed up by each specie and
normalized by a factor of No. of atoms (in the 1st row),
while the derivatives in each direction are preserved, with
the columns being the subscripts of bispectrum components
with each specie and the indices being
[0, '0_x', '0_y', ..., 'N_z'], and the virial contributions
(in GPa) are summed up for all atoms for each component in
the sequence of ['xx', 'yy', 'zz', 'yz', 'xz', 'xy'].
"""
return self.describe_all([structure], include_stress).xs(0, level='input_index')
def describe_all(self, structures, include_stress=False):
"""
Returns data for all input structures in a single DataFrame.
Args:
structures (Structure): Input structures as a list.
include_stress (bool): Whether to include stress descriptors.
Returns:
DataFrame with indices of input list preserved. To retrieve
the data for structures[i], use
df.xs(i, level='input_index').
"""
columns = list(map(lambda s: '-'.join(['%d' % i for i in s]),
self.subscripts))
if self.quadratic:
columns += list(map(lambda s: '-'.join(['%d%d%d' % (i, j, k)
for i, j, k in s]),
itertools.combinations_with_replacement(self.subscripts, 2)))
raw_data = self.calculator.calculate(structures)
def process(output, combine, idx, include_stress):
b, db, vb, e = output
df = pd.DataFrame(b, columns=columns)
if combine:
df_add = pd.DataFrame({'element': e, 'n': np.ones(len(e))})
df_b = df_add.join(df)
n_atoms = df_b.shape[0]
b_by_el = [df_b[df_b['element'] == e] for e in self.elements]
sum_b = [df[df.columns[1:]].sum(axis=0) for df in b_by_el]
hstack_b = pd.concat(sum_b, keys=self.elements)
hstack_b = hstack_b.to_frame().T / n_atoms
hstack_b.fillna(0, inplace=True)
dbs = np.split(db, len(self.elements), axis=1)
dbs = np.hstack([np.insert(d.reshape(-1, len(columns)),
0, 0, axis=1) for d in dbs])
db_index = ['%d_%s' % (i, d)
for i in df_b.index for d in 'xyz']
df_db = pd.DataFrame(dbs, index=db_index,
columns=hstack_b.columns)
if include_stress:
vbs = np.split(vb.sum(axis=0), len(self.elements))
vbs = np.hstack([np.insert(v.reshape(-1, len(columns)),
0, 0, axis=1) for v in vbs])
volume = structures[idx].volume
vbs = vbs / volume * 160.21766208 # from eV to GPa
vb_index = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']
df_vb = pd.DataFrame(vbs, index=vb_index,
columns=hstack_b.columns)
df = pd.concat([hstack_b, df_db, df_vb])
else:
df = pd.concat([hstack_b, df_db])
return df
df = pd.concat([process(d, self.pot_fit, i, include_stress)
for i, d in enumerate(raw_data)],
keys=range(len(raw_data)), names=["input_index", None])
return df
class AGNIFingerprints(Describer):
"""
Fingerprints for AGNI (Adaptive, Generalizable and Neighborhood
Informed) force field. Elemental systems only.
"""
def __init__(self, r_cut, etas):
"""
Args:
r_cut (float): Cutoff distance.
etas (numpy.array): All eta parameters in 1D array.
"""
self.r_cut = r_cut
self.etas = etas
def describe(self, structure):
"""
Calculate fingerprints for all sites in a structure.
Args:
structure (Structure): Input structure.
Returns:
DataFrame.
"""
all_neighbors = structure.get_all_neighbors(self.r_cut)
fingerprints = []
for i, an in enumerate(all_neighbors):
center = structure[i].coords
coords, distances = zip(*[(site.coords, d) for (site, d) in an])
v = (np.array(coords) - center)[:, :, None]
d = np.array(distances)[:, None, None]
e = np.array(self.etas)[None, None, :]
cf = 0.5 * (np.cos(np.pi * d / self.r_cut) + 1)
fpi = np.sum(v / d * np.exp(-(d / e) ** 2) * cf, axis=0)
fingerprints.append(fpi)
index = ["%d_%s" % (i, d) for i in range(len(structure))
for d in "xyz"]
df = pd.DataFrame(np.vstack(fingerprints), index=index,
columns=self.etas)
return df
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class SOAPDescriptor(Describer):
"""
Smooth Overlap of Atomic Position (SOAP) descriptor.
"""
def __init__(self, cutoff, l_max=8, n_max=8, atom_sigma=0.5):
"""
Args:
cutoff (float): Cutoff radius.
l_max (int): The band limit of spherical harmonics basis function.
Default to 8.
n_max (int): The number of radial basis function. Default to 8.
atom_sigma (float): The width of gaussian atomic density.
Default to 0.5.
"""
from veidt.potential.soap import SOAPotential
self.cutoff = cutoff
self.l_max = l_max
self.n_max = n_max
self.atom_sigma = atom_sigma
self.operator = SOAPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('quip'):
raise RuntimeError("quip has not been found.\n",
"Please refer to https://github.com/libAtoms/QUIP for ",
"further detail.")
atoms_filename = 'structure.xyz'
exe_command = ['quip']
exe_command.append('atoms_filename={}'.format(atoms_filename))
descriptor_command = ['soap']
descriptor_command.append("cutoff" + '=' + '{}'.format(self.cutoff))
descriptor_command.append("l_max" + '=' + '{}'.format(self.l_max))
descriptor_command.append("n_max" + '=' + '{}'.format(self.n_max))
descriptor_command.append("atom_sigma" + '=' + '{}'.format(self.atom_sigma))
atomic_numbers = [str(num) for num in np.unique(structure.atomic_numbers)]
n_Z = len(atomic_numbers)
n_species = len(atomic_numbers)
Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
species_Z = '{' + '{}'.format(' '.join(atomic_numbers)) + '}'
descriptor_command.append("n_Z" + '=' + str(n_Z))
descriptor_command.append("Z" + '=' + Z)
descriptor_command.append("n_species" + '=' + str(n_species))
descriptor_command.append("species_Z" + '=' + species_Z)
exe_command.append("descriptor_str=" + "{" +
"{}".format(' '.join(descriptor_command)) + "}")
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
descriptor_output = 'output'
p = subprocess.Popen(exe_command, stdout=open(descriptor_output, 'w'))
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'QUIP exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except Exception:
error_msg += msg[-1]
raise RuntimeError(error_msg)
with zopen(descriptor_output, 'rt') as f:
lines = f.read()
descriptor_pattern = re.compile('DESC(.*?)\n', re.S)
descriptors = pd.DataFrame([np.array(c.split(), dtype=np.float)
for c in descriptor_pattern.findall(lines)])
return descriptors
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
class BPSymmetryFunctions(Describer):
"""
Behler-Parrinello symmetry function descriptor.
"""
def __init__(self, dmin, cutoff, num_symm2, a_etas):
"""
Args:
dmin (float): The minimum interatomic distance accepted.
cutoff (float): Cutoff radius.
num_symm2 (int): The number of radial symmetry functions.
a_etas (list): The choice of η' in angular symmetry functions.
"""
from veidt.potential.nnp import NNPotential
self.dmin = dmin
self.cutoff = cutoff
self.num_symm2 = num_symm2
self.a_etas = a_etas
self.operator = NNPotential()
def describe(self, structure):
"""
Returns data for one input structure.
Args:
structure (Structure): Input structure.
"""
if not which('RuNNer'):
raise RuntimeError("RuNNer has not been found.")
if not which("RuNNerMakesym"):
raise RuntimeError("RuNNerMakesym has not been found.")
def read_functions_data(filename):
"""
Read structure features from file.
Args:
filename (str): The functions file to be read.
"""
with zopen(filename, 'rt') as f:
lines = f.read()
block_pattern = re.compile(r'(\n\s+\d+\n|^\s+\d+\n)(.+?)(?=\n\s+\d+\n|$)', re.S)
points_features = []
for (num_neighbor, block) in block_pattern.findall(lines):
point_features = pd.DataFrame([feature.split()[1:]
for feature in block.split('\n')[:-1]],
dtype=np.float32)
points_features.append(point_features)
points_features = pd.concat(points_features,
keys=range(len(block_pattern.findall(lines))),
names=['point_index', None])
return points_features
dmin = sorted(set(structure.distance_matrix.ravel()))[1]
r_etas = self.operator.generate_eta(dmin=self.dmin,
r_cut=self.cutoff,
num_symm2=self.num_symm2)
atoms_filename = 'input.data'
mode_output = 'mode.out'
with ScratchDir('.'):
atoms_filename = self.operator.write_cfgs(filename=atoms_filename,
cfg_pool=pool_from([structure]))
input_filename = self.operator.write_input(mode=1, r_cut=self.cutoff,
r_etas=r_etas, a_etas=self.a_etas,
scale_feature=False)
p = subprocess.Popen(['RuNNer'], stdout=open(mode_output, 'w'))
stdout = p.communicate()[0]
descriptors = read_functions_data('function.data')
return pd.DataFrame(descriptors)
def describe_all(self, structures):
return pd.concat([self.describe(s) for s in structures],
keys=range(len(structures)),
names=['input_index', None])
| [
"veidt.potential.lammps.calcs.SpectralNeighborAnalysis",
"numpy.unique",
"veidt.potential.soap.SOAPotential",
"veidt.potential.nnp.NNPotential",
"monty.os.path.which",
"re.compile",
"monty.io.zopen",
"itertools.combinations_with_replacement",
"numpy.exp",
"numpy.array",
"veidt.potential.processi... | [((1991, 2094), 'veidt.potential.lammps.calcs.SpectralNeighborAnalysis', 'SpectralNeighborAnalysis', (['rcutfac', 'twojmax', 'element_profile', 'rfac0', 'rmin0', 'diagonalstyle', 'quadratic'], {}), '(rcutfac, twojmax, element_profile, rfac0, rmin0,\n diagonalstyle, quadratic)\n', (2015, 2094), False, 'from veidt.potential.lammps.calcs import SpectralNeighborAnalysis\n'), ((9591, 9605), 'veidt.potential.soap.SOAPotential', 'SOAPotential', ([], {}), '()\n', (9603, 9605), False, 'from veidt.potential.soap import SOAPotential\n'), ((13264, 13277), 'veidt.potential.nnp.NNPotential', 'NNPotential', ([], {}), '()\n', (13275, 13277), False, 'from veidt.potential.nnp import NNPotential\n'), ((15624, 15649), 'pandas.DataFrame', 'pd.DataFrame', (['descriptors'], {}), '(descriptors)\n', (15636, 15649), True, 'import pandas as pd\n'), ((5254, 5286), 'pandas.DataFrame', 'pd.DataFrame', (['b'], {'columns': 'columns'}), '(b, columns=columns)\n', (5266, 5286), True, 'import pandas as pd\n'), ((8533, 8556), 'numpy.vstack', 'np.vstack', (['fingerprints'], {}), '(fingerprints)\n', (8542, 8556), True, 'import numpy as np\n'), ((9794, 9807), 'monty.os.path.which', 'which', (['"""quip"""'], {}), "('quip')\n", (9799, 9807), False, 'from monty.os.path import which\n'), ((11178, 11193), 'monty.tempfile.ScratchDir', 'ScratchDir', (['"""."""'], {}), "('.')\n", (11188, 11193), False, 'from monty.tempfile import ScratchDir\n'), ((12172, 12203), 're.compile', 're.compile', (['"""DESC(.*?)\n"""', 're.S'], {}), "('DESC(.*?)\\n', re.S)\n", (12182, 12203), False, 'import re\n'), ((13466, 13481), 'monty.os.path.which', 'which', (['"""RuNNer"""'], {}), "('RuNNer')\n", (13471, 13481), False, 'from monty.os.path import which\n'), ((13559, 13581), 'monty.os.path.which', 'which', (['"""RuNNerMakesym"""'], {}), "('RuNNerMakesym')\n", (13564, 13581), False, 'from monty.os.path import which\n'), ((13963, 14037), 're.compile', 're.compile', (['"""(\\\\n\\\\s+\\\\d+\\\\n|^\\\\s+\\\\d+\\\\n)(.+?)(?=\\\\n\\\\s+\\\\d+\\\\n|$)"""', 're.S'], {}), "('(\\\\n\\\\s+\\\\d+\\\\n|^\\\\s+\\\\d+\\\\n)(.+?)(?=\\\\n\\\\s+\\\\d+\\\\n|$)', re.S)\n", (13973, 14037), False, 'import re\n'), ((14997, 15012), 'monty.tempfile.ScratchDir', 'ScratchDir', (['"""."""'], {}), "('.')\n", (15007, 15012), False, 'from monty.tempfile import ScratchDir\n'), ((5646, 5682), 'pandas.concat', 'pd.concat', (['sum_b'], {'keys': 'self.elements'}), '(sum_b, keys=self.elements)\n', (5655, 5682), True, 'import pandas as pd\n'), ((6131, 6190), 'pandas.DataFrame', 'pd.DataFrame', (['dbs'], {'index': 'db_index', 'columns': 'hstack_b.columns'}), '(dbs, index=db_index, columns=hstack_b.columns)\n', (6143, 6190), True, 'import pandas as pd\n'), ((8157, 8176), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (8165, 8176), True, 'import numpy as np\n'), ((8208, 8227), 'numpy.array', 'np.array', (['self.etas'], {}), '(self.etas)\n', (8216, 8227), True, 'import numpy as np\n'), ((10549, 10584), 'numpy.unique', 'np.unique', (['structure.atomic_numbers'], {}), '(structure.atomic_numbers)\n', (10558, 10584), True, 'import numpy as np\n'), ((12068, 12098), 'monty.io.zopen', 'zopen', (['descriptor_output', '"""rt"""'], {}), "(descriptor_output, 'rt')\n", (12073, 12098), False, 'from monty.io import zopen\n'), ((13873, 13894), 'monty.io.zopen', 'zopen', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (13878, 13894), False, 'from monty.io import zopen\n'), ((5023, 5082), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['self.subscripts', '(2)'], {}), '(self.subscripts, 2)\n', (5062, 5082), False, 'import itertools\n'), ((6706, 6765), 'pandas.DataFrame', 'pd.DataFrame', (['vbs'], {'index': 'vb_index', 'columns': 'hstack_b.columns'}), '(vbs, index=vb_index, columns=hstack_b.columns)\n', (6718, 6765), True, 'import pandas as pd\n'), ((6832, 6867), 'pandas.concat', 'pd.concat', (['[hstack_b, df_db, df_vb]'], {}), '([hstack_b, df_db, df_vb])\n', (6841, 6867), True, 'import pandas as pd\n'), ((6915, 6943), 'pandas.concat', 'pd.concat', (['[hstack_b, df_db]'], {}), '([hstack_b, df_db])\n', (6924, 6943), True, 'import pandas as pd\n'), ((8102, 8118), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (8110, 8118), True, 'import numpy as np\n'), ((8267, 8297), 'numpy.cos', 'np.cos', (['(np.pi * d / self.r_cut)'], {}), '(np.pi * d / self.r_cut)\n', (8273, 8297), True, 'import numpy as np\n'), ((11337, 11359), 'veidt.potential.processing.pool_from', 'pool_from', (['[structure]'], {}), '([structure])\n', (11346, 11359), False, 'from veidt.potential.processing import pool_from\n'), ((15156, 15178), 'veidt.potential.processing.pool_from', 'pool_from', (['[structure]'], {}), '([structure])\n', (15165, 15178), False, 'from veidt.potential.processing import pool_from\n'), ((2603, 2617), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['sym'], {}), '(sym)\n', (2612, 2617), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((8336, 8357), 'numpy.exp', 'np.exp', (['(-(d / e) ** 2)'], {}), '(-(d / e) ** 2)\n', (8342, 8357), True, 'import numpy as np\n')] |
import batoid
import numpy as np
from test_helpers import timer, do_pickle
@timer
def test_sag():
import random
random.seed(57)
for i in range(100):
plane = batoid.Plane()
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
result = plane.sag(x, y)
np.testing.assert_allclose(result, 0.0)
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check vectorization
x = np.random.normal(0.0, 1.0, size=(10, 10))
y = np.random.normal(0.0, 1.0, size=(10, 10))
np.testing.assert_allclose(plane.sag(x, y), 0.0)
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(plane.sag(x[::5,::2], y[::5,::2]), 0.0)
do_pickle(plane)
@timer
def test_intersect():
import random
random.seed(577)
for i in range(100):
plane = batoid.Plane()
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r0 = batoid.Ray(x, y, -1000, 0, 0, 1, 0)
r = plane.intersect(r0)
np.testing.assert_allclose(r.r[0], x)
np.testing.assert_allclose(r.r[1], y)
np.testing.assert_allclose(r.r[2], plane.sag(x, y), rtol=0, atol=1e-9)
@timer
def test_intersect_vectorized():
import random
random.seed(5772)
r0s = [batoid.Ray([random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(10.0, 0.1)],
[random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(-1.0, 0.1)],
random.gauss(0.0, 0.1))
for i in range(1000)]
r0s = batoid.RayVector(r0s)
for i in range(100):
plane = batoid.Plane()
r1s = plane.intersect(r0s)
r2s = batoid.RayVector([plane.intersect(r0) for r0 in r0s])
assert r1s == r2s
@timer
def test_fail():
plane = batoid.Plane()
ray = batoid.Ray([0,0,-1], [0,0,-1])
ray = plane.intersect(ray)
assert ray.failed
ray = batoid.Ray([0,0,-1], [0,0,-1])
plane.intersectInPlace(ray)
assert ray.failed
if __name__ == '__main__':
test_sag()
test_intersect()
test_intersect_vectorized()
test_fail()
| [
"numpy.random.normal",
"batoid.Ray",
"batoid.Plane",
"test_helpers.do_pickle",
"numpy.testing.assert_allclose",
"batoid.RayVector",
"random.seed",
"random.gauss"
] | [((122, 137), 'random.seed', 'random.seed', (['(57)'], {}), '(57)\n', (133, 137), False, 'import random\n'), ((904, 920), 'random.seed', 'random.seed', (['(577)'], {}), '(577)\n', (915, 920), False, 'import random\n'), ((1529, 1546), 'random.seed', 'random.seed', (['(5772)'], {}), '(5772)\n', (1540, 1546), False, 'import random\n'), ((1922, 1943), 'batoid.RayVector', 'batoid.RayVector', (['r0s'], {}), '(r0s)\n', (1938, 1943), False, 'import batoid\n'), ((2168, 2182), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (2180, 2182), False, 'import batoid\n'), ((2193, 2227), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (2203, 2227), False, 'import batoid\n'), ((2288, 2322), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (2298, 2322), False, 'import batoid\n'), ((179, 193), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (191, 193), False, 'import batoid\n'), ((545, 586), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(10, 10)'}), '(0.0, 1.0, size=(10, 10))\n', (561, 586), True, 'import numpy as np\n'), ((599, 640), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(10, 10)'}), '(0.0, 1.0, size=(10, 10))\n', (615, 640), True, 'import numpy as np\n'), ((834, 850), 'test_helpers.do_pickle', 'do_pickle', (['plane'], {}), '(plane)\n', (843, 850), False, 'from test_helpers import timer, do_pickle\n'), ((962, 976), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (974, 976), False, 'import batoid\n'), ((1986, 2000), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (1998, 2000), False, 'import batoid\n'), ((238, 260), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (250, 260), False, 'import random\n'), ((277, 299), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (289, 299), False, 'import random\n'), ((349, 388), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', '(0.0)'], {}), '(result, 0.0)\n', (375, 388), True, 'import numpy as np\n'), ((1021, 1043), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1033, 1043), False, 'import random\n'), ((1060, 1082), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1072, 1082), False, 'import random\n'), ((1210, 1245), 'batoid.Ray', 'batoid.Ray', (['x', 'y', '(-1000)', '(0)', '(0)', '(1)', '(0)'], {}), '(x, y, -1000, 0, 0, 1, 0)\n', (1220, 1245), False, 'import batoid\n'), ((1294, 1331), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[0]', 'x'], {}), '(r.r[0], x)\n', (1320, 1331), True, 'import numpy as np\n'), ((1344, 1381), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[1]', 'y'], {}), '(r.r[1], y)\n', (1370, 1381), True, 'import numpy as np\n'), ((1855, 1877), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1867, 1877), False, 'import random\n'), ((1570, 1592), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1582, 1592), False, 'import random\n'), ((1617, 1639), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1629, 1639), False, 'import random\n'), ((1664, 1687), 'random.gauss', 'random.gauss', (['(10.0)', '(0.1)'], {}), '(10.0, 0.1)\n', (1676, 1687), False, 'import random\n'), ((1713, 1735), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1725, 1735), False, 'import random\n'), ((1760, 1782), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1772, 1782), False, 'import random\n'), ((1807, 1830), 'random.gauss', 'random.gauss', (['(-1.0)', '(0.1)'], {}), '(-1.0, 0.1)\n', (1819, 1830), False, 'import random\n')] |
import numpy as np
import pandas as pd
import dask.dataframe as dd
import dask.array as da
import matplotlib.pyplot as plt
import seaborn as sns
from dask.diagnostics import ProgressBar
ProgressBar().register()
dists = np.load('saved_tensors/java-huge-bpe-2000/test_proj_dist_cache.npy')
ranks = np.load('saved_tensors/java-huge-bpe-2000/test_proj_rank_cache.npy')
pkg_locality = np.load('saved_tensors/java-huge-bpe-2000/test_pkg_locality_cache.npy')
proj_locality = np.load('saved_tensors/java-huge-bpe-2000/test_proj_locality_cache.npy')
correctness = np.load('saved_tensors/java-huge-bpe-2000/test_proj_correctness_cache.npy')
dists = da.from_array(dists)
ranks = da.from_array(ranks)
pkg_locality = da.from_array(pkg_locality)
proj_locality = da.from_array(proj_locality)
correctness = da.from_array(correctness)
project_local_only = (proj_locality == 1) & (pkg_locality == 0).astype('int8')
locality = project_local_only + 2*pkg_locality
arr_all = da.stack([dists, ranks, locality, correctness], axis=1)
ddf = dd.from_array(arr_all, columns=['dist', 'rank', 'locality', 'correctness'])
ddf = ddf[ddf['dist'] >= -400]
# print(ddf.groupby('locality').count().compute())
# exit()
print('df build complete')
ddf = ddf.sort_values(['dist']).reset_index(drop=True)
ddf['overall_rank'] = ddf.groupby('locality').cumcount()
# dist - acc
# bins = [-10000] + list(range(-500, 0, 10)) + [0]
bins = list(np.arange(0, 2727431522, 100000))
ddf['rank_range'] = ddf['overall_rank'].map_partitions(pd.cut, bins)
dist_grouped = ddf.groupby(['locality', 'rank_range']).mean().reset_index().compute()
dist_grouped.to_csv('figures/java_dist_correctness.csv')
fig, ax = plt.subplots(figsize=(8, 4))
sns.scatterplot(x='dist_right', y='correctness', hue='locality', data=dist_grouped, s=5)
plt.savefig('figures/java_avg_correctness_by_dist_1024.pdf')
exit()
# rank - acc
grouped = ddf.groupby(['locality', 'rank']).mean().reset_index().compute()
grouped.to_csv('figures/java_rank.csv')
fig, ax = plt.subplots(figsize=(8, 4))
sns.scatterplot(x='rank', y='correctness', hue='locality', data=grouped, s=5)
plt.savefig('figures/java_avg_correctness_by_rank_1024.pdf')
# rank - dist
fig, ax = plt.subplots(figsize=(8, 4))
sns.scatterplot(x='rank', y='dist', hue='locality', data=grouped, s=5)
plt.savefig('figures/java_avg_dist_by_rank_1024.pdf')
| [
"dask.array.stack",
"dask.array.from_array",
"matplotlib.pyplot.savefig",
"seaborn.scatterplot",
"dask.diagnostics.ProgressBar",
"numpy.load",
"matplotlib.pyplot.subplots",
"numpy.arange",
"dask.dataframe.from_array"
] | [((220, 288), 'numpy.load', 'np.load', (['"""saved_tensors/java-huge-bpe-2000/test_proj_dist_cache.npy"""'], {}), "('saved_tensors/java-huge-bpe-2000/test_proj_dist_cache.npy')\n", (227, 288), True, 'import numpy as np\n'), ((297, 365), 'numpy.load', 'np.load', (['"""saved_tensors/java-huge-bpe-2000/test_proj_rank_cache.npy"""'], {}), "('saved_tensors/java-huge-bpe-2000/test_proj_rank_cache.npy')\n", (304, 365), True, 'import numpy as np\n'), ((381, 452), 'numpy.load', 'np.load', (['"""saved_tensors/java-huge-bpe-2000/test_pkg_locality_cache.npy"""'], {}), "('saved_tensors/java-huge-bpe-2000/test_pkg_locality_cache.npy')\n", (388, 452), True, 'import numpy as np\n'), ((469, 541), 'numpy.load', 'np.load', (['"""saved_tensors/java-huge-bpe-2000/test_proj_locality_cache.npy"""'], {}), "('saved_tensors/java-huge-bpe-2000/test_proj_locality_cache.npy')\n", (476, 541), True, 'import numpy as np\n'), ((556, 631), 'numpy.load', 'np.load', (['"""saved_tensors/java-huge-bpe-2000/test_proj_correctness_cache.npy"""'], {}), "('saved_tensors/java-huge-bpe-2000/test_proj_correctness_cache.npy')\n", (563, 631), True, 'import numpy as np\n'), ((641, 661), 'dask.array.from_array', 'da.from_array', (['dists'], {}), '(dists)\n', (654, 661), True, 'import dask.array as da\n'), ((670, 690), 'dask.array.from_array', 'da.from_array', (['ranks'], {}), '(ranks)\n', (683, 690), True, 'import dask.array as da\n'), ((706, 733), 'dask.array.from_array', 'da.from_array', (['pkg_locality'], {}), '(pkg_locality)\n', (719, 733), True, 'import dask.array as da\n'), ((750, 778), 'dask.array.from_array', 'da.from_array', (['proj_locality'], {}), '(proj_locality)\n', (763, 778), True, 'import dask.array as da\n'), ((793, 819), 'dask.array.from_array', 'da.from_array', (['correctness'], {}), '(correctness)\n', (806, 819), True, 'import dask.array as da\n'), ((957, 1012), 'dask.array.stack', 'da.stack', (['[dists, ranks, locality, correctness]'], {'axis': '(1)'}), '([dists, ranks, locality, correctness], axis=1)\n', (965, 1012), True, 'import dask.array as da\n'), ((1020, 1095), 'dask.dataframe.from_array', 'dd.from_array', (['arr_all'], {'columns': "['dist', 'rank', 'locality', 'correctness']"}), "(arr_all, columns=['dist', 'rank', 'locality', 'correctness'])\n", (1033, 1095), True, 'import dask.dataframe as dd\n'), ((1666, 1694), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1678, 1694), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1788), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""dist_right"""', 'y': '"""correctness"""', 'hue': '"""locality"""', 'data': 'dist_grouped', 's': '(5)'}), "(x='dist_right', y='correctness', hue='locality', data=\n dist_grouped, s=5)\n", (1710, 1788), True, 'import seaborn as sns\n'), ((1785, 1845), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/java_avg_correctness_by_dist_1024.pdf"""'], {}), "('figures/java_avg_correctness_by_dist_1024.pdf')\n", (1796, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1994, 2022), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (2006, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2100), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""rank"""', 'y': '"""correctness"""', 'hue': '"""locality"""', 'data': 'grouped', 's': '(5)'}), "(x='rank', y='correctness', hue='locality', data=grouped, s=5)\n", (2038, 2100), True, 'import seaborn as sns\n'), ((2102, 2162), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/java_avg_correctness_by_rank_1024.pdf"""'], {}), "('figures/java_avg_correctness_by_rank_1024.pdf')\n", (2113, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2217), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (2201, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2288), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""rank"""', 'y': '"""dist"""', 'hue': '"""locality"""', 'data': 'grouped', 's': '(5)'}), "(x='rank', y='dist', hue='locality', data=grouped, s=5)\n", (2233, 2288), True, 'import seaborn as sns\n'), ((2290, 2343), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/java_avg_dist_by_rank_1024.pdf"""'], {}), "('figures/java_avg_dist_by_rank_1024.pdf')\n", (2301, 2343), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1438), 'numpy.arange', 'np.arange', (['(0)', '(2727431522)', '(100000)'], {}), '(0, 2727431522, 100000)\n', (1415, 1438), True, 'import numpy as np\n'), ((186, 199), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (197, 199), False, 'from dask.diagnostics import ProgressBar\n')] |
import cv2
import numpy as np
from pathfinding.domain.coord import Coord
from vision.domain.iCameraCalibration import ICameraCalibration, table_width_mm, table_height_mm, obstacle_height_mm, \
robot_height_mm
from vision.domain.iPlayAreaFinder import IPlayAreaFinder
from vision.domain.image import Image
from vision.infrastructure.cvImageDisplay import CvImageDisplay
class CvCameraCalibration(ICameraCalibration):
def __init__(self, camera_matrix: np.ndarray, distortion_coefficients: np.ndarray,
play_area_finder: IPlayAreaFinder, image: Image) -> None:
self._play_area_finder = play_area_finder
self._image_display = CvImageDisplay()
self._camera_matrix = camera_matrix
self._distortion_coefficients = distortion_coefficients
self._optimized_camera_matrix, self._region_of_interest = \
cv2.getOptimalNewCameraMatrix(self._camera_matrix, self._distortion_coefficients,
image.size, 1, image.size)
self._camera_matrix_inverse = np.linalg.inv(self._optimized_camera_matrix)
self._focal_x: float = self._optimized_camera_matrix[0, 0]
self._focal_y: float = self._optimized_camera_matrix[1, 1]
self._compute_distances(image)
def rectify_image(self, image: Image) -> Image:
rectified_image = image.process(self._process_rectify)
self._image_display.display_debug_image('[OpenCvCameraCalibration] rectified_image', rectified_image.content)
return rectified_image
def _process_rectify(self, image: np.ndarray) -> np.ndarray:
rectified_image = cv2.undistort(image, self._camera_matrix, self._distortion_coefficients, None,
self._optimized_camera_matrix)
region_of_interest_x, region_of_interest_y, roi_width, roi_height = self._region_of_interest
return rectified_image[region_of_interest_y: region_of_interest_y + roi_height,
region_of_interest_x: region_of_interest_x + roi_width, :]
def _compute_distances(self, image: Image) -> None:
table_roi = self._play_area_finder.find(image)
table_distance_x = (table_width_mm * self._focal_x) / table_roi.width
table_distance_y = (table_height_mm * self._focal_y) / table_roi.height
self._table_distance = (table_distance_x + table_distance_y) / 2
self._obstacle_distance = self._table_distance - obstacle_height_mm
self._robot_distance = self._table_distance - robot_height_mm
self._table_real_origin = self._convert_pixel_to_real(table_roi.top_left_corner, self._table_distance)
def convert_table_pixel_to_real(self, pixel: Coord) -> Coord:
return self._convert_object_pixel_to_real(pixel, self._table_distance)
def convert_obstacle_pixel_to_real(self, pixel: Coord) -> Coord:
return self._convert_object_pixel_to_real(pixel, self._obstacle_distance)
def convert_robot_pixel_to_real(self, pixel: Coord) -> Coord:
return self._convert_object_pixel_to_real(pixel, self._robot_distance)
def _convert_pixel_to_real(self, pixel: Coord, distance: float) -> Coord:
pixel_vector = np.array([pixel.x, pixel.y, 1]).transpose()
real_vector = self._camera_matrix_inverse.dot(pixel_vector)
real_vector = np.multiply(real_vector, distance).transpose()
return Coord(int(real_vector[0]), int(real_vector[1]))
def _adjust_real_to_table(self, real: Coord) -> Coord:
return Coord(real.x - self._table_real_origin.x, real.y - self._table_real_origin.y)
def _convert_object_pixel_to_real(self, pixel: Coord, distance: float) -> Coord:
real = self._convert_pixel_to_real(pixel, distance)
return self._adjust_real_to_table(real)
| [
"numpy.multiply",
"vision.infrastructure.cvImageDisplay.CvImageDisplay",
"cv2.undistort",
"pathfinding.domain.coord.Coord",
"cv2.getOptimalNewCameraMatrix",
"numpy.array",
"numpy.linalg.inv"
] | [((665, 681), 'vision.infrastructure.cvImageDisplay.CvImageDisplay', 'CvImageDisplay', ([], {}), '()\n', (679, 681), False, 'from vision.infrastructure.cvImageDisplay import CvImageDisplay\n'), ((870, 983), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['self._camera_matrix', 'self._distortion_coefficients', 'image.size', '(1)', 'image.size'], {}), '(self._camera_matrix, self.\n _distortion_coefficients, image.size, 1, image.size)\n', (899, 983), False, 'import cv2\n'), ((1059, 1103), 'numpy.linalg.inv', 'np.linalg.inv', (['self._optimized_camera_matrix'], {}), '(self._optimized_camera_matrix)\n', (1072, 1103), True, 'import numpy as np\n'), ((1635, 1748), 'cv2.undistort', 'cv2.undistort', (['image', 'self._camera_matrix', 'self._distortion_coefficients', 'None', 'self._optimized_camera_matrix'], {}), '(image, self._camera_matrix, self._distortion_coefficients,\n None, self._optimized_camera_matrix)\n', (1648, 1748), False, 'import cv2\n'), ((3535, 3612), 'pathfinding.domain.coord.Coord', 'Coord', (['(real.x - self._table_real_origin.x)', '(real.y - self._table_real_origin.y)'], {}), '(real.x - self._table_real_origin.x, real.y - self._table_real_origin.y)\n', (3540, 3612), False, 'from pathfinding.domain.coord import Coord\n'), ((3214, 3245), 'numpy.array', 'np.array', (['[pixel.x, pixel.y, 1]'], {}), '([pixel.x, pixel.y, 1])\n', (3222, 3245), True, 'import numpy as np\n'), ((3349, 3383), 'numpy.multiply', 'np.multiply', (['real_vector', 'distance'], {}), '(real_vector, distance)\n', (3360, 3383), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys, os
import argparse
import numpy as np
#import atomsinmolecule as mol
import topology as topo
import math
import pandas as pd
class topologyDiff(object):
def __init__(self, molecule1, molecule2, covRadFactor=1.3):
errors = {}
requirements_for_comparison(molecule1, molecule2)
self.molecule1 = molecule1
self.molecule2 = molecule2
self.topology1 = topo.topology(molecule1, covRadFactor)
self.topology2 = topo.topology(molecule2, covRadFactor)
self.orderedBonds1 = self.topology1.order_convalentBondDistances()
self.orderedBonds2 = self.topology2.order_convalentBondDistances()
#print "\n".join([str(elem) for elem in self.orderedBonds2])
self.orderedAngles1 = self.topology1.order_angles()
self.orderedAngles2 = self.topology2.order_angles()
self.orderedDihedral1 = self.topology1.order_dihedralAngles()
self.orderedDihedral2 = self.topology2.order_dihedralAngles()
self.error_bonds = self.compare_bonds(percentLargest = 1.5)
self.error_angles = self.compare_angles()
self.error_dihedrals = self.compare_dihedralAngles()
# print "error_bonds", self.error_bonds
# print "error_angles", self.error_angles
# print "error_dihedrals", self.error_dihedrals
def compare_bonds(self, percentLargest = -1):
## Keep all data toghether and filter/sort on it
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_IDs = "uniquePairID"
nameCol_dist = "distance [A]"
nameCol_dist1 = "Mol1 dist. [A]"
nameCol_dist2 = "Mol2 dist. [A]"
nameCol_errors = "Dist.error [A]"
nameCol_absError = "absError [A]"
# same nb. of bonds?
if len(self.orderedBonds1) != len(self.orderedBonds2):
msg = "Not as many covalents bonds detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## error in distance (Angstrom) for each bond
## checking that the unique ID is the same, if not as many bonds, exit with an error
id1 = np.array(self.orderedBonds1[1:])[:,0]
id2 = np.array(self.orderedBonds2[1:])[:,0]
diffIDs = np.sum(np.absolute(np.subtract(id1, id2)))
if diffIDs > 0:
msg = "As many covalents bonds detected, but not between the same atoms comparing structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedBonds1[1:], columns=self.orderedBonds1[0])
df2 = pd.DataFrame(self.orderedBonds2[1:], columns=self.orderedBonds2[0])
## convert string to float/int
for header in [nameCol_dist]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
for header in [nameCol_IDs, nameCol_i, nameCol_j]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
df1 = df1.rename(columns={nameCol_dist:nameCol_dist1})
df2 = df2.rename(columns={nameCol_dist:nameCol_dist2})
df = df1
df[nameCol_dist2] = df2[nameCol_dist2]
df[nameCol_errors] = df[nameCol_dist1] - df[nameCol_dist2]
###df = df.sort([nameCol_errors, nameCol_IDs], ascending=[False,True])
df[nameCol_absError] = df[nameCol_errors].abs()
df = df.sort([nameCol_absError], ascending=[False])
# print df
## STATISTICS
return get_statistics(df, nameCol_errors, unit="angstrom")
def compare_angles(self):
## Keep all data toghether and filter/sort on it
nameCol_IDs = "uniqueID"
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_k = "index_k"
nameCol_anglDeg = 'Angle IJK [deg]'
nameCol_anglDeg1 = 'Angle1 IJK [deg]'
nameCol_anglDeg2 = 'Angle2 IJK [deg]'
nameCol_errors = "Angle error [deg]"
nameCol_absError = "absError [deg]"
nameCol_relError = "relError [deg]"
# same nb. of angles?
if len(self.orderedAngles1) != len(self.orderedAngles2):
msg = "Not as many covalents angles detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedAngles1[1:], columns=self.orderedAngles1[0])
df2 = pd.DataFrame(self.orderedAngles2[1:], columns=self.orderedAngles2[0])
## convert string to float/int
for header in [nameCol_IDs, nameCol_i, nameCol_j, nameCol_k]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
for header in [nameCol_anglDeg]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
df1 = df1.rename(columns={nameCol_anglDeg:nameCol_anglDeg1})
df2 = df2.rename(columns={nameCol_anglDeg:nameCol_anglDeg2})
df = df1
df[nameCol_anglDeg2] = df2[nameCol_anglDeg2]
df[nameCol_errors] = df[nameCol_anglDeg1] - df[nameCol_anglDeg2]
## checking that the unique ID is the same, if not as many angles, exit with an error
diffIDs = pd.DataFrame(df1[nameCol_IDs].values - df2[nameCol_IDs].values).abs().sum()
if diffIDs.values[0] > 0:
msg = "As many covalents angles detected, but not between the same atoms comparing structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
###df = df.sort([nameCol_errors, nameCol_IDs], ascending=[False,True])
df[nameCol_absError] = df[nameCol_errors].abs()
df[nameCol_relError] = df[nameCol_errors].map( lambda x: x if abs(x) < 180. else np.sign(x)*(abs(x)-360.))
df = df.sort([nameCol_relError, nameCol_IDs], ascending=[False,True])
#print pd.DataFrame(d8.values-d7.values)
## STATISTICS
return get_statistics(df, nameCol_relError, unit="degrees")
def compare_dihedralAngles(self):
## Keep all data toghether and filter/sort on it
nameCol_IDs = "uniqueID"
nameCol_i = "index_i"
nameCol_j = "index_j"
nameCol_k = "index_k"
nameCol_l = "index_l"
nameCol_dihedDeg = "Dihedral IJ-KL [deg]"
nameCol_dihedDeg1 = "Dihedral1 IJ-KL [deg]"
nameCol_dihedDeg2 = "Dihedral2 IJ-KL [deg]"
nameCol_errors = "Dihedral angle error [deg]"
nameCol_absError = "absError [deg]"
nameCol_relError = "relError [deg]"
# same nb. of dihedral angles?
if len(self.orderedDihedral1) != len(self.orderedDihedral2):
msg = "Not as many covalents dihedral angles detected in both structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
## Pandas Dataframe
df1 = pd.DataFrame(self.orderedDihedral1[1:], columns=self.orderedDihedral1[0])
df2 = pd.DataFrame(self.orderedDihedral2[1:], columns=self.orderedDihedral2[0])
## convert string to float/int
for header in [nameCol_IDs, nameCol_i, nameCol_j, nameCol_k, nameCol_l]:
df1[header] = df1[header].astype('int')
df2[header] = df2[header].astype('int')
for header in [nameCol_dihedDeg]:
df1[header] = df1[header].astype('float64')
df2[header] = df2[header].astype('float64')
df1 = df1.rename(columns={nameCol_dihedDeg:nameCol_dihedDeg1})
df2 = df2.rename(columns={nameCol_dihedDeg:nameCol_dihedDeg2})
df = df1
df[nameCol_dihedDeg2] = df2[nameCol_dihedDeg2]
df[nameCol_errors] = df[nameCol_dihedDeg1] - df[nameCol_dihedDeg2]
## checking that the unique ID is the same, if not as many angles, exit with an error
diffIDs = pd.DataFrame(df1[nameCol_IDs].values - df2[nameCol_IDs].values).abs().sum()
if diffIDs.values[0] > 0:
msg = "As many covalents dihedral angles detected, but not between the same atoms comparin structures:\n - {}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
df[nameCol_absError] = df[nameCol_errors].abs()
df[nameCol_relError] = df[nameCol_errors].map( lambda x: x if abs(x) < 180. else np.sign(x)*(abs(x)-360.))
df = df.sort([nameCol_relError, nameCol_IDs], ascending=[False,True])
#print pd.DataFrame(d8.values-d7.values)
## STATISTICS
return get_statistics(df, nameCol_relError, unit="degrees")
def get_object(self):
obj = {}
obj["molecule1"] = self.molecule1.get_object()
obj["molecule2"] = self.molecule2.get_object()
# obj["atomEntities"] = [e.get_object() for e in self.atomEntities]
# obj["atomicPairs"] = [p.get_object() for p in self.atomicPairs]
# obj["covalentBonds"] = [b.get_object() for b in self.covalentBonds]
# obj["covalentBondAngles"] = [b.get_object() for b in self.covalentBondAngles]
# obj["covalentDihedralAngles"] = [b.get_object() for b in self.covalentDihedralAngles]
return obj
def __str__(self):
return "COMPARISON OF TOPOLOGIES (summary):\
\n\tmolecules compared:\
\n\t\t- {} ({} atoms)\
\n\t\t- {} ({} atoms)\
\n\tCovalent radius factor: {}\
\n\tCovalents bonds errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
\n\tCovalents angles errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
\n\tDihedral angles errors:\
\n\t\t- mean: {:-.1e} {}\
\n\t\t- std: {:-.1e} {}\
".format(self.molecule1.shortname,
self.molecule1.nbAtomsInMolecule,
self.molecule2.shortname,
self.molecule2.nbAtomsInMolecule,
self.topology1.covRadFactor,
self.error_bonds['mean'],self.error_bonds['unit'],
self.error_bonds['stdDev'],self.error_bonds['unit'],
self.error_angles['mean'],self.error_angles['unit'],
self.error_angles['stdDev'],self.error_angles['unit'],
self.error_dihedrals['mean'],self.error_dihedrals['unit'],
self.error_dihedrals['stdDev'],self.error_dihedrals['unit'])
def get_as_JSON(self):
topoComparison = self.get_object()
import json
return json.dumps(topo, sort_keys=True, indent=4)
def requirements_for_comparison(molecule1, molecule2):
msg = ""
## the molecules should have the same atoms, provided in the same order
if molecule1.nbAtomsInMolecule != molecule2.nbAtomsInMolecule:
msg = "Not the same number of atoms comparing:\n-{} and\n-{}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
if molecule1.charge != molecule2.charge:
msg = "Not the same molecular charge comparing:\n-{} and\n-{}".format(molecule1.shortname, molecule2.shortname)
sys.exit(msg)
for atom1, atom2 in zip(molecule1.listAtoms, molecule2.listAtoms):
if atom1.atomSymbol != atom2.atomSymbol:
msg = "Not the same atom symbols: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
if atom1.atomCharge != atom2.atomCharge:
msg = "Not the same atom charge: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
if atom1.unitDistance != atom2.unitDistance:
msg = "Not the same atom unitDistance: comparing:\n-{} and\n-{}".format(str(atom1), str(atom2))
sys.exit(msg)
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("file_mol1",
help="First molecular geometry in .XYZ format.")
parser.add_argument("file_mol2",
help="Second molecular geometry in .XYZ format.")
parser.add_argument('-out', nargs='?', type=argparse.FileType('w'),
default=sys.stdout,
help="optional output filename,\
if not, default is mol1_vs_mol2.top")
parser.add_argument("-crf", "--covRadFactor", type=float,
help="optional covalent radius factor,\
equal to 1 by default")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
args = parser.parse_args()
return args
def get_statistics(dataFrame, nameData, unit=""):
mean = dataFrame[nameData].mean()
variance = dataFrame[nameData].var()
stdDev = dataFrame[nameData].std()
mad = dataFrame[nameData].mad()
maxAbs = dataFrame[nameData].abs().max()
return {
"unit":unit,
"mean":mean,
"variance":variance,
"stdDev":stdDev,
"mad":mad, ## mean/average absolute deviation
"maxAbs":maxAbs}
def example_valinomycin_pureLinK_vs_LinKwithDF():
# read inputs
# args = read_arguments()
# path_to_file1 = os.path.abspath(args.file_mol1)
# path_to_file2 = os.path.abspath(args.file_mol2)
path_to_file1 = "/home/ctcc2/Documents/CODE-DEV/xyz2top/xyz2top/tests/files/valinomycin_geomOpt_DFT-b3lyp_cc-pVTZ.xyz"
path_to_file2 = "/home/ctcc2/Documents/CODE-DEV/xyz2top/xyz2top/tests/files/valinomycin_geomOpt_DFT-b3lyp-noDF_cc-pVTZ.xyz"
import xyz2molecule as xyz
molecule1 = xyz.parse_XYZ(path_to_file1)
molecule2 = xyz.parse_XYZ(path_to_file2)
diff = topologyDiff(molecule1, molecule2, covRadFactor=1.3)
if __name__ == "__main__":
example_valinomycin_pureLinK_vs_LinKwithDF()
| [
"argparse.FileType",
"topology.topology",
"argparse.ArgumentParser",
"json.dumps",
"xyz2molecule.parse_XYZ",
"numpy.subtract",
"numpy.array",
"numpy.sign",
"sys.exit",
"pandas.DataFrame"
] | [((11784, 11809), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11807, 11809), False, 'import argparse\n'), ((13567, 13595), 'xyz2molecule.parse_XYZ', 'xyz.parse_XYZ', (['path_to_file1'], {}), '(path_to_file1)\n', (13580, 13595), True, 'import xyz2molecule as xyz\n'), ((13612, 13640), 'xyz2molecule.parse_XYZ', 'xyz.parse_XYZ', (['path_to_file2'], {}), '(path_to_file2)\n', (13625, 13640), True, 'import xyz2molecule as xyz\n'), ((426, 464), 'topology.topology', 'topo.topology', (['molecule1', 'covRadFactor'], {}), '(molecule1, covRadFactor)\n', (439, 464), True, 'import topology as topo\n'), ((490, 528), 'topology.topology', 'topo.topology', (['molecule2', 'covRadFactor'], {}), '(molecule2, covRadFactor)\n', (503, 528), True, 'import topology as topo\n'), ((2593, 2660), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedBonds1[1:]'], {'columns': 'self.orderedBonds1[0]'}), '(self.orderedBonds1[1:], columns=self.orderedBonds1[0])\n', (2605, 2660), True, 'import pandas as pd\n'), ((2675, 2742), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedBonds2[1:]'], {'columns': 'self.orderedBonds2[0]'}), '(self.orderedBonds2[1:], columns=self.orderedBonds2[0])\n', (2687, 2742), True, 'import pandas as pd\n'), ((4453, 4522), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedAngles1[1:]'], {'columns': 'self.orderedAngles1[0]'}), '(self.orderedAngles1[1:], columns=self.orderedAngles1[0])\n', (4465, 4522), True, 'import pandas as pd\n'), ((4537, 4606), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedAngles2[1:]'], {'columns': 'self.orderedAngles2[0]'}), '(self.orderedAngles2[1:], columns=self.orderedAngles2[0])\n', (4549, 4606), True, 'import pandas as pd\n'), ((7025, 7098), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedDihedral1[1:]'], {'columns': 'self.orderedDihedral1[0]'}), '(self.orderedDihedral1[1:], columns=self.orderedDihedral1[0])\n', (7037, 7098), True, 'import pandas as pd\n'), ((7113, 7186), 'pandas.DataFrame', 'pd.DataFrame', (['self.orderedDihedral2[1:]'], {'columns': 'self.orderedDihedral2[0]'}), '(self.orderedDihedral2[1:], columns=self.orderedDihedral2[0])\n', (7125, 7186), True, 'import pandas as pd\n'), ((10550, 10592), 'json.dumps', 'json.dumps', (['topo'], {'sort_keys': '(True)', 'indent': '(4)'}), '(topo, sort_keys=True, indent=4)\n', (10560, 10592), False, 'import json\n'), ((10933, 10946), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (10941, 10946), False, 'import sys, os\n'), ((11120, 11133), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (11128, 11133), False, 'import sys, os\n'), ((2010, 2023), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (2018, 2023), False, 'import sys, os\n'), ((2186, 2218), 'numpy.array', 'np.array', (['self.orderedBonds1[1:]'], {}), '(self.orderedBonds1[1:])\n', (2194, 2218), True, 'import numpy as np\n'), ((2238, 2270), 'numpy.array', 'np.array', (['self.orderedBonds2[1:]'], {}), '(self.orderedBonds2[1:])\n', (2246, 2270), True, 'import numpy as np\n'), ((2537, 2550), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (2545, 2550), False, 'import sys, os\n'), ((4397, 4410), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (4405, 4410), False, 'import sys, os\n'), ((5653, 5666), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (5661, 5666), False, 'import sys, os\n'), ((6969, 6982), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (6977, 6982), False, 'import sys, os\n'), ((8261, 8274), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (8269, 8274), False, 'import sys, os\n'), ((11369, 11382), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (11377, 11382), False, 'import sys, os\n'), ((11546, 11559), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (11554, 11559), False, 'import sys, os\n'), ((11733, 11746), 'sys.exit', 'sys.exit', (['msg'], {}), '(msg)\n', (11741, 11746), False, 'import sys, os\n'), ((12079, 12101), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (12096, 12101), False, 'import argparse\n'), ((2313, 2334), 'numpy.subtract', 'np.subtract', (['id1', 'id2'], {}), '(id1, id2)\n', (2324, 2334), True, 'import numpy as np\n'), ((5366, 5429), 'pandas.DataFrame', 'pd.DataFrame', (['(df1[nameCol_IDs].values - df2[nameCol_IDs].values)'], {}), '(df1[nameCol_IDs].values - df2[nameCol_IDs].values)\n', (5378, 5429), True, 'import pandas as pd\n'), ((5891, 5901), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (5898, 5901), True, 'import numpy as np\n'), ((7966, 8029), 'pandas.DataFrame', 'pd.DataFrame', (['(df1[nameCol_IDs].values - df2[nameCol_IDs].values)'], {}), '(df1[nameCol_IDs].values - df2[nameCol_IDs].values)\n', (7978, 8029), True, 'import pandas as pd\n'), ((8420, 8430), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (8427, 8430), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
def visualizador(complexo_img):
magnitude = np.log(np.abs(complexo_img) + 10**-10)
magnitude = magnitude / np.max(magnitude)
fase = (np.angle(complexo_img) + np.pi) / (np.pi * 2)
return magnitude, fase
def dft_np(img, vis=False, shift=False):
complexo = np.fft.fft2(img)
if shift:
complexo_img = np.fft.fftshift(complexo)
else:
complexo_img = complexo.copy()
if vis:
magnitude, fase = visualizador(complexo_img)
cv.imshow('Magnitude', magnitude)
cv.imshow('Fase', fase)
return complexo
def dft_inv_np(complexo):
img_comp = np.fft.ifft2(complexo)
img = np.real(img_comp)
return img/255
def gerar_filtro(img, x_0, x_min, x_max, c):
xx, yy = np.mgrid[:img.shape[0], :img.shape[1]]
circle = np.sqrt((xx - img.shape[0] / 2) ** 2 + (yy - img.shape[1] / 2) ** 2)
if c == 0:
c = 10**-10
return x_min + (np.tanh((circle - x_0)/c) + 1) / 2 * (x_max - x_min)
def normalizacao(x):
min_v = np.min(x)
ran_v = np.max(x) - min_v
return (x - min_v) / ran_v
def filtro_homomorfico(img, x_0, x_min, x_max, c, logs=True):
if logs:
img_return = img + 1.
img_return = np.log(img_return)
else:
img_return = img
img_return = dft_np(img_return)
filtro = gerar_filtro(img_return, x_0, x_min, x_max, c)
img_return = img_return * np.fft.fftshift(filtro)
filtro_return, _ = visualizador(img_return)
filtro_return = np.fft.fftshift(filtro_return)
img_return = dft_inv_np(img_return)
filtro_return[:,:filtro_return.shape[1]//2] = filtro[:,:filtro_return.shape[1]//2]
return normalizacao(np.exp(img_return)), filtro_return
# Obrigatoriedade da funcao! (Desnecessario!)
def faz_nada(*args, **kwargs):
pass
def main():
cv.getBuildInformation()
# cap = cv.VideoCapture('Bridge.mp4')
# cap = cv.VideoCapture('Night_Scene.mp4')
cap = cv.VideoCapture('Highway.mp4')
if not cap.isOpened():
print('Falha ao abrir o video.')
exit(-1)
cv.namedWindow('Filtro')
cv.createTrackbar('log', 'Filtro', 1, 1, faz_nada)
cv.createTrackbar('c', 'Filtro', 10, 100, faz_nada)
cv.createTrackbar('raio', 'Filtro', 20, 1000, faz_nada)
cv.createTrackbar('min', 'Filtro', 0, 100, faz_nada)
cv.createTrackbar('max', 'Filtro', 100, 100, faz_nada)
speed = 5
descarte_frame = 0
while True:
ret, frame = cap.read()
if ret:
if descarte_frame == 0:
logs = cv.getTrackbarPos('log', 'Filtro')
c = cv.getTrackbarPos('c', 'Filtro')
r = cv.getTrackbarPos('raio', 'Filtro')
v_min = cv.getTrackbarPos('min', 'Filtro')
v_max = cv.getTrackbarPos('max', 'Filtro')
v_min = v_min / 100
v_max = v_max / 100
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cv.imshow('Frame', frame)
img, filtro = filtro_homomorfico(frame, r, v_min, v_max, c, logs==1)
cv.imshow('Homomorfico', img)
cv.imshow('Filtro', filtro)
descarte_frame = (descarte_frame + 1) % speed
key = cv.waitKey(15)
if key == 27:
break
else:
# break
cap = cv.VideoCapture('Highway.mp4')
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
# Delete antes de postar:
# import cProfile
# cProfile.run('main()', 'output.dat') | [
"numpy.sqrt",
"numpy.log",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.getBuildInformation",
"numpy.tanh",
"numpy.fft.fft2",
"numpy.max",
"numpy.real",
"numpy.exp",
"numpy.min",
"cv2.waitKey",
"numpy.abs",
"cv2.cvtColor",
"cv2.createTrackbar",
"cv2.namedWindow",
"numpy.fft.ifft2",
... | [((316, 332), 'numpy.fft.fft2', 'np.fft.fft2', (['img'], {}), '(img)\n', (327, 332), True, 'import numpy as np\n'), ((650, 672), 'numpy.fft.ifft2', 'np.fft.ifft2', (['complexo'], {}), '(complexo)\n', (662, 672), True, 'import numpy as np\n'), ((683, 700), 'numpy.real', 'np.real', (['img_comp'], {}), '(img_comp)\n', (690, 700), True, 'import numpy as np\n'), ((832, 900), 'numpy.sqrt', 'np.sqrt', (['((xx - img.shape[0] / 2) ** 2 + (yy - img.shape[1] / 2) ** 2)'], {}), '((xx - img.shape[0] / 2) ** 2 + (yy - img.shape[1] / 2) ** 2)\n', (839, 900), True, 'import numpy as np\n'), ((1046, 1055), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1052, 1055), True, 'import numpy as np\n'), ((1517, 1547), 'numpy.fft.fftshift', 'np.fft.fftshift', (['filtro_return'], {}), '(filtro_return)\n', (1532, 1547), True, 'import numpy as np\n'), ((1841, 1865), 'cv2.getBuildInformation', 'cv.getBuildInformation', ([], {}), '()\n', (1863, 1865), True, 'import cv2 as cv\n'), ((1965, 1995), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""Highway.mp4"""'], {}), "('Highway.mp4')\n", (1980, 1995), True, 'import cv2 as cv\n'), ((2088, 2112), 'cv2.namedWindow', 'cv.namedWindow', (['"""Filtro"""'], {}), "('Filtro')\n", (2102, 2112), True, 'import cv2 as cv\n'), ((2118, 2168), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""log"""', '"""Filtro"""', '(1)', '(1)', 'faz_nada'], {}), "('log', 'Filtro', 1, 1, faz_nada)\n", (2135, 2168), True, 'import cv2 as cv\n'), ((2173, 2224), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""c"""', '"""Filtro"""', '(10)', '(100)', 'faz_nada'], {}), "('c', 'Filtro', 10, 100, faz_nada)\n", (2190, 2224), True, 'import cv2 as cv\n'), ((2229, 2284), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""raio"""', '"""Filtro"""', '(20)', '(1000)', 'faz_nada'], {}), "('raio', 'Filtro', 20, 1000, faz_nada)\n", (2246, 2284), True, 'import cv2 as cv\n'), ((2289, 2341), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""min"""', '"""Filtro"""', '(0)', '(100)', 'faz_nada'], {}), "('min', 'Filtro', 0, 100, faz_nada)\n", (2306, 2341), True, 'import cv2 as cv\n'), ((2346, 2400), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""max"""', '"""Filtro"""', '(100)', '(100)', 'faz_nada'], {}), "('max', 'Filtro', 100, 100, faz_nada)\n", (2363, 2400), True, 'import cv2 as cv\n'), ((3424, 3446), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (3444, 3446), True, 'import cv2 as cv\n'), ((153, 170), 'numpy.max', 'np.max', (['magnitude'], {}), '(magnitude)\n', (159, 170), True, 'import numpy as np\n'), ((370, 395), 'numpy.fft.fftshift', 'np.fft.fftshift', (['complexo'], {}), '(complexo)\n', (385, 395), True, 'import numpy as np\n'), ((520, 553), 'cv2.imshow', 'cv.imshow', (['"""Magnitude"""', 'magnitude'], {}), "('Magnitude', magnitude)\n", (529, 553), True, 'import cv2 as cv\n'), ((562, 585), 'cv2.imshow', 'cv.imshow', (['"""Fase"""', 'fase'], {}), "('Fase', fase)\n", (571, 585), True, 'import cv2 as cv\n'), ((1068, 1077), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1074, 1077), True, 'import numpy as np\n'), ((1245, 1263), 'numpy.log', 'np.log', (['img_return'], {}), '(img_return)\n', (1251, 1263), True, 'import numpy as np\n'), ((1425, 1448), 'numpy.fft.fftshift', 'np.fft.fftshift', (['filtro'], {}), '(filtro)\n', (1440, 1448), True, 'import numpy as np\n'), ((93, 113), 'numpy.abs', 'np.abs', (['complexo_img'], {}), '(complexo_img)\n', (99, 113), True, 'import numpy as np\n'), ((184, 206), 'numpy.angle', 'np.angle', (['complexo_img'], {}), '(complexo_img)\n', (192, 206), True, 'import numpy as np\n'), ((1700, 1718), 'numpy.exp', 'np.exp', (['img_return'], {}), '(img_return)\n', (1706, 1718), True, 'import numpy as np\n'), ((3255, 3269), 'cv2.waitKey', 'cv.waitKey', (['(15)'], {}), '(15)\n', (3265, 3269), True, 'import cv2 as cv\n'), ((3370, 3400), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""Highway.mp4"""'], {}), "('Highway.mp4')\n", (3385, 3400), True, 'import cv2 as cv\n'), ((2562, 2596), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""log"""', '"""Filtro"""'], {}), "('log', 'Filtro')\n", (2579, 2596), True, 'import cv2 as cv\n'), ((2617, 2649), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""c"""', '"""Filtro"""'], {}), "('c', 'Filtro')\n", (2634, 2649), True, 'import cv2 as cv\n'), ((2670, 2705), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""raio"""', '"""Filtro"""'], {}), "('raio', 'Filtro')\n", (2687, 2705), True, 'import cv2 as cv\n'), ((2730, 2764), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""min"""', '"""Filtro"""'], {}), "('min', 'Filtro')\n", (2747, 2764), True, 'import cv2 as cv\n'), ((2789, 2823), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""max"""', '"""Filtro"""'], {}), "('max', 'Filtro')\n", (2806, 2823), True, 'import cv2 as cv\n'), ((2922, 2959), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (2933, 2959), True, 'import cv2 as cv\n'), ((2976, 3001), 'cv2.imshow', 'cv.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2985, 3001), True, 'import cv2 as cv\n'), ((3103, 3132), 'cv2.imshow', 'cv.imshow', (['"""Homomorfico"""', 'img'], {}), "('Homomorfico', img)\n", (3112, 3132), True, 'import cv2 as cv\n'), ((3149, 3176), 'cv2.imshow', 'cv.imshow', (['"""Filtro"""', 'filtro'], {}), "('Filtro', filtro)\n", (3158, 3176), True, 'import cv2 as cv\n'), ((958, 985), 'numpy.tanh', 'np.tanh', (['((circle - x_0) / c)'], {}), '((circle - x_0) / c)\n', (965, 985), True, 'import numpy as np\n')] |
"""
Created on Mon Apr 23 16:35:00 2018
@author: jercas
"""
import numpy as np
def stepBased_decay(epoch):
# Initialize the base initial learning rate α, drop factor and epochs to drop every set of epochs.
initialAlpha = 0.01
# Drop learning rate by a factor of 0.25 every 5 epochs.
factor = 0.5
#factor = 0.5
dropEvery = 5
# Compute learning rate for the current epoch.
alpha = initialAlpha * (factor ** np.floor((1 + epoch) / dropEvery))
return float(alpha) | [
"numpy.floor"
] | [((416, 449), 'numpy.floor', 'np.floor', (['((1 + epoch) / dropEvery)'], {}), '((1 + epoch) / dropEvery)\n', (424, 449), True, 'import numpy as np\n')] |
"""개미집단 최적화
"""
import matplotlib.pyplot as plt
import numpy as np
area = np.ones([20, 20]) # 지역 생성
start = (1, 1) # 개미 출발지점
goal = (19, 14) # 도착해야 하는 지점
path_count = 40 # 경로를 만들 개미 수
path_max_len = 20 * 20 # 최대 경로 길이
pheromone = 1.0 # 페로몬 가산치
volatility = 0.3 # 스탭 당 페로몬 휘발율
def get_neighbors(x, y):
"""x, y와 이웃한 좌표 목록 출력"""
max_x, max_y = area.shape
return [
(i, j)
for i in range(x - 1, x + 2)
for j in range(y - 1, y + 2)
if (i != x or j != y) and (i >= 0 and j >= 0) and (i < max_x and j < max_y)
]
def ant_path_finding():
"""개미 경로 생성"""
path = [start]
x, y = start
count = 0
while x != goal[0] or y != goal[1]:
count += 1
if count > path_max_len:
return None
neighbors = get_neighbors(x, y)
values = np.array([area[i, j] for i, j in neighbors])
p = values / np.sum(values)
x, y = neighbors[np.random.choice(len(neighbors), p=p)]
while (x, y) == path[-1]:
x, y = neighbors[np.random.choice(len(neighbors), p=p)]
path.append((x, y))
return path
def step_end(path):
"""경로를 따라 페로몬을 더하고 전 지역의 페로몬을 한번 휘발시킴"""
global area
if path is None:
return
for x, y in set(path):
area[x, y] += pheromone / len(set(path))
area[:, :] = area * (1 - volatility)
return
if __name__ == "__main__":
# 계산 및 그래프 작성
count = 0
while count < path_count:
path = ant_path_finding()
if path is None:
continue
count += 1
print(f"Ant Pathfinding: {count} / {path_count}")
step_end(path)
# 최종 경로와 페로몬 맵 그리기
x, y = [], []
for _x, _y in path:
x.append(_x)
y.append(_y)
plt.plot(x, y, "b", alpha=0.3)
plt.imshow(area.T, cmap="Greens")
plt.xlim(0, 20)
plt.ylim(0, 20)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((75, 92), 'numpy.ones', 'np.ones', (['[20, 20]'], {}), '([20, 20])\n', (82, 92), True, 'import numpy as np\n'), ((1752, 1782), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""b"""'], {'alpha': '(0.3)'}), "(x, y, 'b', alpha=0.3)\n", (1760, 1782), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1820), 'matplotlib.pyplot.imshow', 'plt.imshow', (['area.T'], {'cmap': '"""Greens"""'}), "(area.T, cmap='Greens')\n", (1797, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1840), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(20)'], {}), '(0, 20)\n', (1833, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1860), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(20)'], {}), '(0, 20)\n', (1853, 1860), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1875), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1873, 1875), True, 'import matplotlib.pyplot as plt\n'), ((831, 875), 'numpy.array', 'np.array', (['[area[i, j] for i, j in neighbors]'], {}), '([area[i, j] for i, j in neighbors])\n', (839, 875), True, 'import numpy as np\n'), ((897, 911), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (903, 911), True, 'import numpy as np\n')] |
import pytest
import numpy as np
import multiprocess as mp
from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker
from ecogdata.parallel.mproc import parallel_context
from . import with_start_methods
@with_start_methods
def test_process_types():
jr = JobRunner(np.var)
# create 3 workers and check type
jr._renew_workers(3)
if parallel_context.context_name == 'spawn':
assert isinstance(jr.workers[0], mp.context.SpawnProcess)
elif parallel_context.context_name == 'fork':
assert isinstance(jr.workers[0], mp.context.ForkProcess)
@with_start_methods
def test_simple_method():
arrays = [np.arange(10) for _ in range(20)]
sums = JobRunner(np.sum, n_workers=4).run_jobs(inputs=arrays, progress=False)
assert sums.dtype not in np.sctypes['others'], 'simple results dtype is not numeric'
assert (sums == 9 * 5).all(), 'wrong sums'
@with_start_methods
def test_submitting_context():
jr = JobRunner(np.sum, n_workers=4)
with jr.submitting_jobs(progress=False):
for _ in range(20):
jr.submit(np.arange(10))
sums = jr.output_from_submitted
assert sums.dtype not in np.sctypes['others'], 'simple results dtype is not numeric'
assert (sums == 9 * 5).all(), 'wrong sums'
def nonnumeric_input_output(a: int, b: str, c: list):
return c, b, a
@with_start_methods
def test_nonnumeric():
from string import ascii_letters
n = 20
# inputs types are (int, str, list)
inputs = list(zip(range(n),
ascii_letters,
[list(range(np.random.randint(low=1, high=6))) for _ in range(n)]))
runner = JobRunner(nonnumeric_input_output, n_workers=4)
outputs = runner.run_jobs(inputs=inputs, progress=False)
assert outputs.dtype == object, 'did not return object array'
assert isinstance(outputs[0], tuple), 'individual ouputs have wrong type'
assert all([o[::-1] == i for o, i in zip(outputs, inputs)]), 'wrong output values'
class ArraySum(ParallelWorker):
"""
A fancier simple function
"""
para_method = staticmethod(np.sum)
def map_job(self, job):
"""
Create arguments and keywords to call self.para_method(*args, **kwargs)
"job" is of the form (i, job_spec) where i is a place keeper.
"""
i, arr = job
return i, (arr,), dict()
@with_start_methods
def test_custom_worker():
plain_arrays = [np.arange(100) for _ in range(25)]
jobs = JobRunner(ArraySum, n_workers=4)
sums = jobs.run_jobs(inputs=plain_arrays, progress=False)
assert (sums == 99 * 50).all(), 'wrong sums'
class SharedarraySum(ArraySum):
"""
A fancier simple function that uses shared memory
"""
def __init__(self, shm_managers):
# list of SharedmemManager objects
self.shm_managers = shm_managers
def map_job(self, job):
# job is only the job number
i = job
# use the get_ndarray() context manager to simply get the array
with self.shm_managers[i].get_ndarray() as arr:
pass
return i, (arr,), dict()
@with_start_methods
def test_shm_worker():
mem_man = parallel_context.SharedmemManager
shared_arrays = [mem_man(np.arange(100), use_lock=False) for _ in range(25)]
# worker constructor now takes shared mem pointers
jobs = JobRunner(SharedarraySum, n_workers=4, w_args=(shared_arrays,))
# and run-mode just creates indices to the pointers
sums = jobs.run_jobs(n_jobs=len(shared_arrays), progress=False)
assert (sums == 99 * 50).all(), 'wrong sums'
def hates_eights(n):
if n == 8:
raise ValueError("n == 8, what did you think would happen?!")
return n
@with_start_methods
def test_skipped_excpetions():
jobs = JobRunner(hates_eights, n_workers=4)
r, e = jobs.run_jobs(np.arange(10), reraise_exceptions=False, progress=False, return_exceptions=True)
assert len(e) == 1, 'exception not returned'
assert np.isnan(r[8]), 'exception not interpolated'
assert all([r[i] == i for i in range(10) if i != 8]), 'non-exceptions not returned correctly'
@with_start_methods
def test_raised_exceptions():
# testing raising after the fact
with pytest.raises(ValueError):
jobs = JobRunner(hates_eights, n_workers=4)
r = jobs.run_jobs(np.arange(10), reraise_exceptions=True, progress=False)
# testing raise-immediately
with pytest.raises(ValueError):
jobs = JobRunner(hates_eights, n_workers=1, single_job_in_thread=False)
r = jobs.run_jobs(np.arange(10), reraise_exceptions=True, progress=False)
| [
"numpy.random.randint",
"pytest.raises",
"numpy.isnan",
"ecogdata.parallel.jobrunner.JobRunner",
"numpy.arange"
] | [((268, 285), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['np.var'], {}), '(np.var)\n', (277, 285), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((955, 985), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['np.sum'], {'n_workers': '(4)'}), '(np.sum, n_workers=4)\n', (964, 985), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((1648, 1695), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['nonnumeric_input_output'], {'n_workers': '(4)'}), '(nonnumeric_input_output, n_workers=4)\n', (1657, 1695), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((2480, 2512), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['ArraySum'], {'n_workers': '(4)'}), '(ArraySum, n_workers=4)\n', (2489, 2512), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((3351, 3414), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['SharedarraySum'], {'n_workers': '(4)', 'w_args': '(shared_arrays,)'}), '(SharedarraySum, n_workers=4, w_args=(shared_arrays,))\n', (3360, 3414), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((3773, 3809), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['hates_eights'], {'n_workers': '(4)'}), '(hates_eights, n_workers=4)\n', (3782, 3809), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((3976, 3990), 'numpy.isnan', 'np.isnan', (['r[8]'], {}), '(r[8])\n', (3984, 3990), True, 'import numpy as np\n'), ((641, 654), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (650, 654), True, 'import numpy as np\n'), ((2434, 2448), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2443, 2448), True, 'import numpy as np\n'), ((3835, 3848), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3844, 3848), True, 'import numpy as np\n'), ((4217, 4242), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4230, 4242), False, 'import pytest\n'), ((4259, 4295), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['hates_eights'], {'n_workers': '(4)'}), '(hates_eights, n_workers=4)\n', (4268, 4295), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((4419, 4444), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4432, 4444), False, 'import pytest\n'), ((4461, 4525), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['hates_eights'], {'n_workers': '(1)', 'single_job_in_thread': '(False)'}), '(hates_eights, n_workers=1, single_job_in_thread=False)\n', (4470, 4525), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((686, 716), 'ecogdata.parallel.jobrunner.JobRunner', 'JobRunner', (['np.sum'], {'n_workers': '(4)'}), '(np.sum, n_workers=4)\n', (695, 716), False, 'from ecogdata.parallel.jobrunner import JobRunner, ParallelWorker\n'), ((3233, 3247), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (3242, 3247), True, 'import numpy as np\n'), ((4322, 4335), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4331, 4335), True, 'import numpy as np\n'), ((4552, 4565), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4561, 4565), True, 'import numpy as np\n'), ((1081, 1094), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1090, 1094), True, 'import numpy as np\n'), ((1579, 1611), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(6)'}), '(low=1, high=6)\n', (1596, 1611), True, 'import numpy as np\n')] |
#!/usr/bin/python
import os
import matplotlib.pyplot as plt
import numpy as np
from post_process import load
# from scipy.stats import iqr
class InformationCapacity(object):
def __init__(self, foreign_directory="./", self_directory="./", estimator='fd', limiting='foreign'):
self.num_steps = 1
self.foreign_directory = foreign_directory
self.self_directory = self_directory
self.foreign_output = np.loadtxt(foreign_directory + "output")
self.foreign_ligand = np.loadtxt(foreign_directory + "Ligand_concentrations")
self.self_output = np.loadtxt(self_directory + "output")
self.self_ligand = np.loadtxt(self_directory + "Ligand_concentrations")
if os.path.exists(foreign_directory + "sample_0/column_names"):
print("Loaded foreign column names")
self.foreign_column = load(foreign_directory + "sample_0/column_names")
self.foreign_column_names = self.foreign_column[0].split()
elif os.path.exists(foreign_directory + "column_names"):
print("Loaded foreign column names")
self.foreign_column = load(foreign_directory + "column_names")
self.foreign_column_names = self.foreign_column[0].split()
if os.path.exists(self_directory + "sample_0/column_names"):
self.self_column = load(self_directory + "sample_0/column_names")
self.self_column_names = self.self_column[0].split()
elif os.path.exists(self_directory + "column_names"):
self.self_column = load(self_directory + "column_names")
self.self_column_names = self.self_column[0].split()
self.estimator = estimator
self.capacity, self.number_of_bins, self.p0_integral = self.calculate_ic()
# self.capacity = self.calculate_ic()
self.bins = self.calculate_bins(num_bins=self.number_of_bins)
def calculate_bins(self, num_bins=100):
count, self_bins = np.histogram(self.self_output, bins=self.estimator, density=True)
count, foreign_bins = np.histogram(self.foreign_output, bins=self.estimator, density=True)
bins = np.linspace(min(self_bins), max(foreign_bins), num=num_bins)
return bins
# def kde_plot(self):
# sns.distplot(self.foreign_output, hist=True, kde=True,
# bins=self.bins, label="Lf Ls")
# sns.distplot(self.self_output, hist=True, kde=True,
# bins=self.bins, label="Ls")
# plt.legend()
# sns.distplot(self.foreign_output, hist=True, kde=True,
# bins=self.bins, color='darkblue',
# hist_kws={'edgecolor': 'black'},
# kde_kws={'linewidth': 4})
def count_cn(self, bin_locations):
count_cn, bins = np.histogram(self.foreign_output, bins=bin_locations, density=True)
return count_cn
def cn_mean_iqr(self):
mean = np.mean(self.foreign_output)
# cn_iqr = iqr(self.foreign_output)
return mean # , cn_iqr
def plot_cn(self, plot_label=None):
if os.path.exists(self.foreign_directory + "sample_0/column_names") or \
os.path.exists(self.foreign_directory + "column_names"):
if len(self.foreign_column_names) > 1:
if ("Lf" in self.foreign_column_names[-2] or "Lf" in self.foreign_column_names[-1]) and \
("Ls" in self.foreign_column_names[-2] or "Ls" in self.foreign_column_names[-1]):
label = 'P(' + self.foreign_column_names[-2] + " + " + self.foreign_column_names[-1] + ')'
else:
label = 'P(' + self.foreign_column_names[-1] + ')'
else:
label = 'P(' + self.foreign_column_names[-1] + ')'
else:
label = 'P(C{0} + D{0})'.format(self.num_steps - 1)
if plot_label:
label = plot_label
count, bins, _ = plt.hist(self.foreign_output, bins=self.bins, align='mid', normed=True,
label=label)
def plot_lf(self, bins):
count, bins_lf, _ = plt.hist(self.foreign_ligand, bins=bins, align='mid', normed=True,
label='P(Lf)')
def count_dn(self, bin_locations):
count_dn, bins = np.histogram(self.self_output, bins=bin_locations, density=True)
return count_dn
def dn_mean_iqr(self):
mean = np.mean(self.self_output)
# dn_iqr = iqr(self.self_output)
return mean # , dn_iqr
def plot_dn(self, plot_label=None):
if os.path.exists(self.self_directory + "sample_0/column_names") or \
os.path.exists(self.self_directory + "column_names"):
label = 'P(' + self.self_column_names[-1] + ')'
else:
label = 'P(D{0})'.format(self.num_steps - 1)
if plot_label:
label = plot_label
count, bins, _ = plt.hist(self.self_output, bins=self.bins, align='mid', normed=True,
label=label)
def plot_ls(self, bins):
count, bins_ls, _ = plt.hist(self.self_ligand, bins=bins, align='mid', normed=True,
label='P(Ls)')
# def kullback_leibler(self):
# count_cn = self.count_cn()
# count_dn = self.count_dn()
#
# bin_width = self.bins[1] - self.bins[0]
# kl_cn_dn = np.nan_to_num(np.log2(count_cn/count_dn))
# kl_cn_dn = np.trapz(count_cn * np.nan_to_num(np.log2(count_cn/count_dn)), dx=bin_width)
# return kl_cn_dn
def compute_bins(self):
number_of_bins = 50
bins = 0
p_0_integral = 0
while p_0_integral < 0.99:
bins = self.calculate_bins(num_bins=number_of_bins)
count_cn = self.count_cn(bins)
count_dn = self.count_dn(bins)
bin_width = bins[1] - bins[0]
p_O = 0.5 * (count_cn + count_dn)
p_0_integral = np.trapz(p_O, dx=bin_width)
# print("p(O) integral = " + str(p_0_integral))
number_of_bins += 50
return bins
def calculate_ic(self):
number_of_bins = 50
C = 0
p_0_integral = 0
while p_0_integral < 0.99:
bins = self.calculate_bins(num_bins=number_of_bins)
count_cn = self.count_cn(bins)
count_dn = self.count_dn(bins)
bin_width = bins[1] - bins[0]
p_O = 0.5 * (count_cn + count_dn)
p_0_integral = np.trapz(p_O, dx=bin_width)
print("p(O) integral = " + str(p_0_integral))
term_1_c0 = 0.5 * count_cn * np.nan_to_num(np.log2(count_cn / p_O))
term_2_d0 = 0.5 * count_dn * np.nan_to_num(np.log2(count_dn / p_O))
C = np.trapz(term_1_c0 + term_2_d0, dx=bin_width)
print("C = " + str(C))
if p_0_integral == C:
print("C == P(O) integral: " + str(p_0_integral == C))
C = 1.00
print("New C " + str(C))
break
number_of_bins += 50
return C, number_of_bins, p_0_integral
def alternate_calculate_ic(self):
bins = self.calculate_bins(num_bins=500)
count_cn = self.count_cn(bins)
count_dn = self.count_dn(bins)
bin_width = self.bins[1] - self.bins[0]
p_O = 0.5 * (count_cn + count_dn)
h_o = -p_O * np.nan_to_num(np.log2(p_O))
h_o_i_term_1 = 0.5 * count_cn * np.nan_to_num(np.log2(count_cn))
h_o_i_term_2 = 0.5 * count_dn * np.nan_to_num(np.log2(count_dn))
C = np.trapz(h_o + h_o_i_term_1 + h_o_i_term_2, dx=bin_width)
print("C2 = " + str(C))
return C
# def plot_histograms(self):
# count_C0, bins_C0, _ = plt.hist(self.foreign_output, bins='auto', align='mid', normed=True,
# label='P(C{0})'.format(self.num_steps - 1))
# print("C bins = " + str(bins_C0))
# print("p(CN) integral = " + str(np.trapz(count_C0, dx=bins_C0[1]-bins_C0[0])))
#
# count_D0, bins_D0, _ = plt.hist(self.self_output, bins_C0, align='mid', normed=True,
# label='P(D{0})'.format(self.num_steps - 1))
# print("D bins = " + str(bins_D0))
# print("p(DN) integral = " + str(np.trapz(count_D0, dx=bins_D0[1]-bins_D0[0])))
#
# binwidth_C0 = [bins_C0[i+1] - bins_C0[i] for i in range(100)]
# print("binwidth = " + str(binwidth_C0))
#
# # Calculating Information capacity
#
# p_O = 0.5 * (count_C0 + count_D0)
#
# print("p(0) integral = " + str(np.trapz(p_O, dx=binwidth_C0[0])))
#
# term_1_C0 = 0.5 * count_C0 * np.nan_to_num(np.log2(count_C0/p_O))
# term_2_D0 = 0.5 * count_D0 * np.nan_to_num(np.log2(count_D0/p_O))
# C = np.trapz(term_1_C0 + term_2_D0, dx=binwidth_C0[0])
# print("C = " + str(C))
#
# np.savetxt("IC", [C], fmt="%f")
# np.savetxt("num_bins", [self.num_bins], fmt="%f")
# np.savetxt("binwidth", [binwidth_C0[0]], fmt="%f")
def check_binning():
foreign_output = np.loadtxt("L_self/output")
foreign_output_end_step = np.loadtxt("3_step_end_step/L_self/output")
foreign, bins, _ = plt.hist(foreign_output, bins=100, normed=True, label="P(f)")
foreign_end_step, bins, _ = plt.hist(foreign_output_end_step, bins=100, normed=True, label="P(f) end step")
plt.legend()
# if __name__ == "__main__":
# ic = InformationCapacity()
# ic.calculate_ic()
# ic.alternate_calculate_ic()
# check_binning()
# plt.savefig("output_histograms_test.pdf", format='pdf')
# ic.plot_histograms()
# plt.xlim(0,600)
# plt.legend()
# plt.savefig("output_histograms.pdf", format='pdf')
| [
"os.path.exists",
"numpy.histogram",
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.trapz",
"numpy.log2",
"post_process.load",
"numpy.loadtxt",
"matplotlib.pyplot.legend"
] | [((9159, 9186), 'numpy.loadtxt', 'np.loadtxt', (['"""L_self/output"""'], {}), "('L_self/output')\n", (9169, 9186), True, 'import numpy as np\n'), ((9217, 9260), 'numpy.loadtxt', 'np.loadtxt', (['"""3_step_end_step/L_self/output"""'], {}), "('3_step_end_step/L_self/output')\n", (9227, 9260), True, 'import numpy as np\n'), ((9284, 9345), 'matplotlib.pyplot.hist', 'plt.hist', (['foreign_output'], {'bins': '(100)', 'normed': '(True)', 'label': '"""P(f)"""'}), "(foreign_output, bins=100, normed=True, label='P(f)')\n", (9292, 9345), True, 'import matplotlib.pyplot as plt\n'), ((9378, 9457), 'matplotlib.pyplot.hist', 'plt.hist', (['foreign_output_end_step'], {'bins': '(100)', 'normed': '(True)', 'label': '"""P(f) end step"""'}), "(foreign_output_end_step, bins=100, normed=True, label='P(f) end step')\n", (9386, 9457), True, 'import matplotlib.pyplot as plt\n'), ((9462, 9474), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9472, 9474), True, 'import matplotlib.pyplot as plt\n'), ((440, 480), 'numpy.loadtxt', 'np.loadtxt', (["(foreign_directory + 'output')"], {}), "(foreign_directory + 'output')\n", (450, 480), True, 'import numpy as np\n'), ((511, 566), 'numpy.loadtxt', 'np.loadtxt', (["(foreign_directory + 'Ligand_concentrations')"], {}), "(foreign_directory + 'Ligand_concentrations')\n", (521, 566), True, 'import numpy as np\n'), ((594, 631), 'numpy.loadtxt', 'np.loadtxt', (["(self_directory + 'output')"], {}), "(self_directory + 'output')\n", (604, 631), True, 'import numpy as np\n'), ((659, 711), 'numpy.loadtxt', 'np.loadtxt', (["(self_directory + 'Ligand_concentrations')"], {}), "(self_directory + 'Ligand_concentrations')\n", (669, 711), True, 'import numpy as np\n'), ((724, 783), 'os.path.exists', 'os.path.exists', (["(foreign_directory + 'sample_0/column_names')"], {}), "(foreign_directory + 'sample_0/column_names')\n", (738, 783), False, 'import os\n'), ((1261, 1317), 'os.path.exists', 'os.path.exists', (["(self_directory + 'sample_0/column_names')"], {}), "(self_directory + 'sample_0/column_names')\n", (1275, 1317), False, 'import os\n'), ((1965, 2030), 'numpy.histogram', 'np.histogram', (['self.self_output'], {'bins': 'self.estimator', 'density': '(True)'}), '(self.self_output, bins=self.estimator, density=True)\n', (1977, 2030), True, 'import numpy as np\n'), ((2061, 2129), 'numpy.histogram', 'np.histogram', (['self.foreign_output'], {'bins': 'self.estimator', 'density': '(True)'}), '(self.foreign_output, bins=self.estimator, density=True)\n', (2073, 2129), True, 'import numpy as np\n'), ((2786, 2853), 'numpy.histogram', 'np.histogram', (['self.foreign_output'], {'bins': 'bin_locations', 'density': '(True)'}), '(self.foreign_output, bins=bin_locations, density=True)\n', (2798, 2853), True, 'import numpy as np\n'), ((2922, 2950), 'numpy.mean', 'np.mean', (['self.foreign_output'], {}), '(self.foreign_output)\n', (2929, 2950), True, 'import numpy as np\n'), ((3934, 4022), 'matplotlib.pyplot.hist', 'plt.hist', (['self.foreign_output'], {'bins': 'self.bins', 'align': '"""mid"""', 'normed': '(True)', 'label': 'label'}), "(self.foreign_output, bins=self.bins, align='mid', normed=True,\n label=label)\n", (3942, 4022), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4197), 'matplotlib.pyplot.hist', 'plt.hist', (['self.foreign_ligand'], {'bins': 'bins', 'align': '"""mid"""', 'normed': '(True)', 'label': '"""P(Lf)"""'}), "(self.foreign_ligand, bins=bins, align='mid', normed=True, label=\n 'P(Lf)')\n", (4119, 4197), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4359), 'numpy.histogram', 'np.histogram', (['self.self_output'], {'bins': 'bin_locations', 'density': '(True)'}), '(self.self_output, bins=bin_locations, density=True)\n', (4307, 4359), True, 'import numpy as np\n'), ((4427, 4452), 'numpy.mean', 'np.mean', (['self.self_output'], {}), '(self.self_output)\n', (4434, 4452), True, 'import numpy as np\n'), ((4927, 5013), 'matplotlib.pyplot.hist', 'plt.hist', (['self.self_output'], {'bins': 'self.bins', 'align': '"""mid"""', 'normed': '(True)', 'label': 'label'}), "(self.self_output, bins=self.bins, align='mid', normed=True, label=\n label)\n", (4935, 5013), True, 'import matplotlib.pyplot as plt\n'), ((5101, 5179), 'matplotlib.pyplot.hist', 'plt.hist', (['self.self_ligand'], {'bins': 'bins', 'align': '"""mid"""', 'normed': '(True)', 'label': '"""P(Ls)"""'}), "(self.self_ligand, bins=bins, align='mid', normed=True, label='P(Ls)')\n", (5109, 5179), True, 'import matplotlib.pyplot as plt\n'), ((7597, 7654), 'numpy.trapz', 'np.trapz', (['(h_o + h_o_i_term_1 + h_o_i_term_2)'], {'dx': 'bin_width'}), '(h_o + h_o_i_term_1 + h_o_i_term_2, dx=bin_width)\n', (7605, 7654), True, 'import numpy as np\n'), ((868, 917), 'post_process.load', 'load', (["(foreign_directory + 'sample_0/column_names')"], {}), "(foreign_directory + 'sample_0/column_names')\n", (872, 917), False, 'from post_process import load\n'), ((1002, 1052), 'os.path.exists', 'os.path.exists', (["(foreign_directory + 'column_names')"], {}), "(foreign_directory + 'column_names')\n", (1016, 1052), False, 'import os\n'), ((1350, 1396), 'post_process.load', 'load', (["(self_directory + 'sample_0/column_names')"], {}), "(self_directory + 'sample_0/column_names')\n", (1354, 1396), False, 'from post_process import load\n'), ((1475, 1522), 'os.path.exists', 'os.path.exists', (["(self_directory + 'column_names')"], {}), "(self_directory + 'column_names')\n", (1489, 1522), False, 'import os\n'), ((3080, 3144), 'os.path.exists', 'os.path.exists', (["(self.foreign_directory + 'sample_0/column_names')"], {}), "(self.foreign_directory + 'sample_0/column_names')\n", (3094, 3144), False, 'import os\n'), ((3166, 3221), 'os.path.exists', 'os.path.exists', (["(self.foreign_directory + 'column_names')"], {}), "(self.foreign_directory + 'column_names')\n", (3180, 3221), False, 'import os\n'), ((4578, 4639), 'os.path.exists', 'os.path.exists', (["(self.self_directory + 'sample_0/column_names')"], {}), "(self.self_directory + 'sample_0/column_names')\n", (4592, 4639), False, 'import os\n'), ((4661, 4713), 'os.path.exists', 'os.path.exists', (["(self.self_directory + 'column_names')"], {}), "(self.self_directory + 'column_names')\n", (4675, 4713), False, 'import os\n'), ((5970, 5997), 'numpy.trapz', 'np.trapz', (['p_O'], {'dx': 'bin_width'}), '(p_O, dx=bin_width)\n', (5978, 5997), True, 'import numpy as np\n'), ((6511, 6538), 'numpy.trapz', 'np.trapz', (['p_O'], {'dx': 'bin_width'}), '(p_O, dx=bin_width)\n', (6519, 6538), True, 'import numpy as np\n'), ((6774, 6819), 'numpy.trapz', 'np.trapz', (['(term_1_c0 + term_2_d0)'], {'dx': 'bin_width'}), '(term_1_c0 + term_2_d0, dx=bin_width)\n', (6782, 6819), True, 'import numpy as np\n'), ((1137, 1177), 'post_process.load', 'load', (["(foreign_directory + 'column_names')"], {}), "(foreign_directory + 'column_names')\n", (1141, 1177), False, 'from post_process import load\n'), ((1555, 1592), 'post_process.load', 'load', (["(self_directory + 'column_names')"], {}), "(self_directory + 'column_names')\n", (1559, 1592), False, 'from post_process import load\n'), ((7424, 7436), 'numpy.log2', 'np.log2', (['p_O'], {}), '(p_O)\n', (7431, 7436), True, 'import numpy as np\n'), ((7492, 7509), 'numpy.log2', 'np.log2', (['count_cn'], {}), '(count_cn)\n', (7499, 7509), True, 'import numpy as np\n'), ((7565, 7582), 'numpy.log2', 'np.log2', (['count_dn'], {}), '(count_dn)\n', (7572, 7582), True, 'import numpy as np\n'), ((6653, 6676), 'numpy.log2', 'np.log2', (['(count_cn / p_O)'], {}), '(count_cn / p_O)\n', (6660, 6676), True, 'import numpy as np\n'), ((6733, 6756), 'numpy.log2', 'np.log2', (['(count_dn / p_O)'], {}), '(count_dn / p_O)\n', (6740, 6756), True, 'import numpy as np\n')] |
from __future__ import print_function
import matplotlib as plt
import numpy as np
from skimage.io import imread
from skimage import exposure, color
from skimage.transform import resize
import keras
from keras import backend as K
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def img_gen(img, zca=False, rotation=0., w_shift=0., h_shift=0., shear=0., zoom=0., h_flip=False, v_flip=False, preprocess_fcn=None, batch_size=9):
"""
define function to generate images
"""
datagen = ImageDataGenerator(zca_whitening=zca,
rotation_range=rotation,
width_shift_range=w_shift,
height_shift_range=h_shift,
shear_range=shear,
zoom_range=zoom,
fill_mode='nearest',
horizontal_flip=h_flip,
vertical_flip=v_flip,
preprocessing_function=preprocess_fcn,
data_format=K.image_data_format())
datagen.fit(img)
i = 0
for img_batch in datagen.flow(img, batch_size=9, shuffle=False):
for img in img_batch:
plt.subplot(330 + 1 + i)
plt.imshow(img)
i += 1
if i >= batch_size:
break
# plt.show()
plt.savefig('img/trans/cats.png')
img = imread('img/cat.jpg')
plt.imshow(img)
# plt.show()
# reshape it to prepare for data generator
img = img.astype('float32')
img /= 255
h_dim = np.shape(img)[0]
w_dim = np.shape(img)[1]
num_channel = np.shape(img)[2]
img = img.reshape(1, h_dim, w_dim, num_channel)
print(img.shape)
# generate images using function imgGen
img_gen(img, rotation=30, h_shift=0.5)
def contrast_stretching(img):
"""
Contrast stretching
"""
p2, p98 = np.percentile(img, (2,98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2,p98))
return img_rescale
def HE(img):
"""
Histogram equalization
"""
img_eq = exposure.equalize_hist(img)
return img_eq
def AHE(img):
"""
Adaptive histogram equalization
"""
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
return img_adapteq
img_gen(img, rotation=30, h_shift=0.5, preprocess_fcn = contrast_stretching)
img_gen(img, rotation=30, h_shift=0.5, preprocess_fcn = HE)
img_gen(img, rotation=30, h_shift=0.5, preprocess_fcn = AHE)
batch_size = 64
num_classes = 2
epochs = 10
img_rows, img_cols = 32, 32
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('X_train shape: ', x_train.shape)
# Only look at cats [=3] and dogs [=5]
train_picks = np.ravel(np.logical_or(y_train==3,y_train==5))
test_picks = np.ravel(np.logical_or(y_test==3,y_test==5))
y_train = np.array(y_train[train_picks]==5,dtype=int)
y_test = np.array(y_test[test_picks]==5,dtype=int)
x_train = x_train[train_picks]
x_test = x_test[test_picks]
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
input_shape = (3,img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(np.ravel(y_train), num_classes)
y_test = keras.utils.to_categorical(np.ravel(y_test), num_classes)
model = Sequential()
model.add(Conv2D(4, kernel_size=(3, 3),activation='relu',input_shape=input_shape))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(loss=keras.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
augmentation=True
if augmentation:
datagen = ImageDataGenerator(
rotation_range=0,
width_shift_range=0,
height_shift_range=0,
shear_range=0,
zoom_range=0,
horizontal_flip=True,
fill_mode='nearest',
# preprocessing_function = contrast_adjusment,
# preprocessing_function = HE,
preprocessing_function = AHE)
datagen.fit(x_train)
print("Running augmented training now, with augmentation")
history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),steps_per_epoch=x_train.shape[0], # batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
else:
print("Running regular training, no augmentation")
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,verbose=1, validation_data=(x_test, y_test))
plt.plot(history.epoch,history.history['val_acc'],'-o',label='validation')
plt.plot(history.epoch,history.history['acc'],'-o',label='training')
plt.legend(loc=0)
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.grid(True)
| [
"keras.layers.Conv2D",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"keras.preprocessing.image.ImageDataGenerator",
"skimage.exposure.equalize_adapthist",
"numpy.array",
"keras.layers.Dense",
"keras.optimizers.Adadelta",
"matplotlib.pyplot.imshow",
"keras.backend.image_data_format",
"ma... | [((472, 493), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (486, 493), False, 'import matplotlib\n'), ((1701, 1722), 'skimage.io.imread', 'imread', (['"""img/cat.jpg"""'], {}), "('img/cat.jpg')\n", (1707, 1722), False, 'from skimage.io import imread\n'), ((1723, 1738), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1733, 1738), True, 'import matplotlib.pyplot as plt\n'), ((2823, 2842), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2840, 2842), False, 'from keras.datasets import cifar10\n'), ((3055, 3101), 'numpy.array', 'np.array', (['(y_train[train_picks] == 5)'], {'dtype': 'int'}), '(y_train[train_picks] == 5, dtype=int)\n', (3063, 3101), True, 'import numpy as np\n'), ((3108, 3152), 'numpy.array', 'np.array', (['(y_test[test_picks] == 5)'], {'dtype': 'int'}), '(y_test[test_picks] == 5, dtype=int)\n', (3116, 3152), True, 'import numpy as np\n'), ((4031, 4043), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4041, 4043), False, 'from keras.models import Sequential\n'), ((5360, 5437), 'matplotlib.pyplot.plot', 'plt.plot', (['history.epoch', "history.history['val_acc']", '"""-o"""'], {'label': '"""validation"""'}), "(history.epoch, history.history['val_acc'], '-o', label='validation')\n", (5368, 5437), True, 'import matplotlib.pyplot as plt\n'), ((5435, 5506), 'matplotlib.pyplot.plot', 'plt.plot', (['history.epoch', "history.history['acc']", '"""-o"""'], {'label': '"""training"""'}), "(history.epoch, history.history['acc'], '-o', label='training')\n", (5443, 5506), True, 'import matplotlib.pyplot as plt\n'), ((5504, 5521), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (5514, 5521), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5542), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (5532, 5542), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5565), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (5553, 5565), True, 'import matplotlib.pyplot as plt\n'), ((5566, 5580), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5574, 5580), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1855), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1850, 1855), True, 'import numpy as np\n'), ((1867, 1880), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1875, 1880), True, 'import numpy as np\n'), ((1899, 1912), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1907, 1912), True, 'import numpy as np\n'), ((2136, 2163), 'numpy.percentile', 'np.percentile', (['img', '(2, 98)'], {}), '(img, (2, 98))\n', (2149, 2163), True, 'import numpy as np\n'), ((2178, 2229), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['img'], {'in_range': '(p2, p98)'}), '(img, in_range=(p2, p98))\n', (2204, 2229), False, 'from skimage import exposure, color\n'), ((2307, 2334), 'skimage.exposure.equalize_hist', 'exposure.equalize_hist', (['img'], {}), '(img)\n', (2329, 2334), False, 'from skimage import exposure, color\n'), ((2437, 2486), 'skimage.exposure.equalize_adapthist', 'exposure.equalize_adapthist', (['img'], {'clip_limit': '(0.03)'}), '(img, clip_limit=0.03)\n', (2464, 2486), False, 'from skimage import exposure, color\n'), ((2946, 2987), 'numpy.logical_or', 'np.logical_or', (['(y_train == 3)', '(y_train == 5)'], {}), '(y_train == 3, y_train == 5)\n', (2959, 2987), True, 'import numpy as np\n'), ((3006, 3045), 'numpy.logical_or', 'np.logical_or', (['(y_test == 3)', '(y_test == 5)'], {}), '(y_test == 3, y_test == 5)\n', (3019, 3045), True, 'import numpy as np\n'), ((3214, 3235), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3233, 3235), True, 'from keras import backend as K\n'), ((3923, 3940), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (3931, 3940), True, 'import numpy as np\n'), ((3991, 4007), 'numpy.ravel', 'np.ravel', (['y_test'], {}), '(y_test)\n', (3999, 4007), True, 'import numpy as np\n'), ((4054, 4127), 'keras.layers.Conv2D', 'Conv2D', (['(4)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(4, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (4060, 4127), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((4137, 4173), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(3, 3)'], {'activation': '"""relu"""'}), "(8, (3, 3), activation='relu')\n", (4143, 4173), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((4185, 4215), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4197, 4215), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((4227, 4240), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4234, 4240), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4252, 4261), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4259, 4261), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4273, 4301), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (4278, 4301), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4313, 4325), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4320, 4325), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4337, 4367), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (4342, 4367), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((4529, 4716), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(0)', 'width_shift_range': '(0)', 'height_shift_range': '(0)', 'shear_range': '(0)', 'zoom_range': '(0)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""', 'preprocessing_function': 'AHE'}), "(rotation_range=0, width_shift_range=0,\n height_shift_range=0, shear_range=0, zoom_range=0, horizontal_flip=True,\n fill_mode='nearest', preprocessing_function=AHE)\n", (4547, 4716), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1660, 1693), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""img/trans/cats.png"""'], {}), "('img/trans/cats.png')\n", (1671, 1693), True, 'import matplotlib.pyplot as plt\n'), ((4431, 4458), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (4456, 4458), False, 'import keras\n'), ((1348, 1369), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1367, 1369), True, 'from keras import backend as K\n'), ((1513, 1537), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(330 + 1 + i)'], {}), '(330 + 1 + i)\n', (1524, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1565), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1560, 1565), True, 'import matplotlib.pyplot as plt\n')] |
import numpy
def scale_array(arr, scale):
if (scale != int(scale)) or (scale < 1):
raise RuntimeError("scale={!r} must be a positive integer".format(scale))
elif scale == 1:
return arr
if len(arr.shape) == 2:
result = numpy.zeros(
(arr.shape[0] * scale, arr.shape[1] * scale),
dtype=arr.dtype)
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
for sub_row in range(scale):
for sub_col in range(scale):
result[row * scale + sub_row, col * scale + sub_col] = arr[row, col]
elif len(arr.shape) == 3:
result = numpy.zeros(
(arr.shape[0] * scale, arr.shape[1] * scale, arr.shape[2]),
dtype=arr.dtype)
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
for sub_row in range(scale):
for sub_col in range(scale):
for component in range(arr.shape[2]):
result[row * scale + sub_row,
col * scale + sub_col,
component] = arr[row, col, component]
else:
raise NotImplementedError("Only 2D greyscale or 2D color image "
"scaling is implemented")
return result
def colorize_greyscale(arr, color, bg_color=None):
basetype = numpy.uint8
try:
result = numpy.zeros((arr.shape[0], arr.shape[1], len(color)),
dtype=basetype)
except TypeError:
# color is a scalar (can't be enumerated)
if bg_color is None:
bg_color = 0
result = numpy.zeros((arr.shape[0], arr.shape[1]),
dtype=basetype)
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
result[row, col] = basetype(round(
arr[row, col] * (color - bg_color) + bg_color))
else:
# color can be enumerated
if bg_color is None:
bg_color = (0, 0, 0)
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
for (component, (value, bg_value)) in enumerate(zip(color, bg_color)):
result[row, col, component] = basetype(round(
arr[row, col] * (value - bg_value) + bg_value))
return result
def render_array_on_img(arr, img, x, y):
if len(arr.shape) != len(img.shape):
raise RuntimeError("arr and img must have same color type")
elif (len(arr.shape) == 3) and (arr.shape[2] != img.shape[2]):
raise RuntimeError("arr and img must have same color depth")
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
if (0 <= (col + x) < img.shape[1]) and (
0 <= (row + y) < img.shape[0]):
if len(arr.shape) == 3:
for component in range(len(arr.shape)):
img[row + y, col + x, component] = arr[row, col, component]
else:
img[row + y, col + x] = arr[row, col]
class Glyph(object):
def __init__(self, array):
self.data = {1: array,}
self.height = array.shape[0]
self.width = array.shape[1]
def scaled(self, scale):
try:
return self.data[scale]
except KeyError:
self.data[scale] = scale_array(self.data[1], scale)
return self.data[scale]
def render(self, img, x, y, scale=1, color=None, bg_color=None):
return render_array_on_img(
self.scaled(scale) if color is None
else colorize_greyscale(self.scaled(scale), color, bg_color),
img,
x,
y)
# 4 pixels below base line + 7 pixels for lower case + 3 pixels for ascenders
FONT = {
"a": Glyph(numpy.array((
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 1, 1, 1, 0, ),
(0, 0, 0, 0, 1, ),
(0, 0, 0, 0, 1, ),
(0, 1, 1, 1, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(0, 1, 1, 1, 1, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),))),
"b": Glyph(numpy.array((
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 0, ),
(1, 1, 1, 1, 0, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 1, 1, 1, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),))),
"c": Glyph(numpy.array((
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 1, 1, 1, 0, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 1, ),
(0, 1, 1, 1, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),))),
"d": Glyph(numpy.array((
(0, 0, 0, 0, 1, ),
(0, 0, 0, 0, 1, ),
(0, 0, 0, 0, 1, ),
(0, 1, 1, 1, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(0, 1, 1, 1, 1, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),))),
"e": Glyph(numpy.array((
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 1, 1, 1, 0, ),
(1, 0, 0, 0, 1, ),
(1, 0, 0, 0, 1, ),
(1, 1, 1, 1, 1, ),
(1, 0, 0, 0, 0, ),
(1, 0, 0, 0, 0, ),
(0, 1, 1, 1, 1, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),
(0, 0, 0, 0, 0, ),))),
"i": Glyph(numpy.array((
(0, 0, 0, ),
(0, 1, 0, ),
(0, 0, 0, ),
(1, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(1, 1, 1, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),))),
"t": Glyph(numpy.array((
(0, 0, 0, ),
(0, 1, 0, ),
(1, 1, 1, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 0, 1, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),))),
"l": Glyph(numpy.array((
(1, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(0, 1, 0, ),
(1, 1, 1, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),
(0, 0, 0, ),))),
" ": Glyph(numpy.array((
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),
(0, 0, 0, 0, ),))),
}
def render_string_on_img(str_, img, x, y, scale=1, color=None, font=FONT, line_spacing=1, glyph_spacing=1, bg_color=None):
start_x = x
max_h = 0
for c in str_:
if c == '\n':
y += max_h + line_spacing
x = start_x
else:
glyph = font[c]
glyph.render(img, x, y, scale, color, bg_color=bg_color)
max_h = max(max_h, glyph.height * scale)
x += glyph.width * scale + glyph_spacing
return img
| [
"numpy.array",
"numpy.zeros"
] | [((255, 329), 'numpy.zeros', 'numpy.zeros', (['(arr.shape[0] * scale, arr.shape[1] * scale)'], {'dtype': 'arr.dtype'}), '((arr.shape[0] * scale, arr.shape[1] * scale), dtype=arr.dtype)\n', (266, 329), False, 'import numpy\n'), ((3926, 4189), 'numpy.array', 'numpy.array', (['((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1, 0), (0, 0,\n 0, 0, 1), (0, 0, 0, 0, 1), (0, 1, 1, 1, 1), (1, 0, 0, 0, 1), (1, 0, 0, \n 0, 1), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, \n 0), (0, 0, 0, 0, 0))'], {}), '(((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1,\n 0), (0, 0, 0, 0, 1), (0, 0, 0, 0, 1), (0, 1, 1, 1, 1), (1, 0, 0, 0, 1),\n (1, 0, 0, 0, 1), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0,\n 0, 0, 0, 0), (0, 0, 0, 0, 0)))\n', (3937, 4189), False, 'import numpy\n'), ((4337, 4600), 'numpy.array', 'numpy.array', (['((1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 1, 1, 1, 0), (1, 0,\n 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, \n 0, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, \n 0), (0, 0, 0, 0, 0))'], {}), '(((1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 1, 1, 1,\n 0), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1),\n (1, 0, 0, 0, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0,\n 0, 0, 0, 0), (0, 0, 0, 0, 0)))\n', (4348, 4600), False, 'import numpy\n'), ((4748, 5011), 'numpy.array', 'numpy.array', (['((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1, 0), (1, 0,\n 0, 0, 1), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 0, 0, \n 0, 1), (0, 1, 1, 1, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, \n 0), (0, 0, 0, 0, 0))'], {}), '(((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1,\n 0), (1, 0, 0, 0, 1), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0), (1, 0, 0, 0, 0),\n (1, 0, 0, 0, 1), (0, 1, 1, 1, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0,\n 0, 0, 0, 0), (0, 0, 0, 0, 0)))\n', (4759, 5011), False, 'import numpy\n'), ((5159, 5422), 'numpy.array', 'numpy.array', (['((0, 0, 0, 0, 1), (0, 0, 0, 0, 1), (0, 0, 0, 0, 1), (0, 1, 1, 1, 1), (1, 0,\n 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, \n 0, 1), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, \n 0), (0, 0, 0, 0, 0))'], {}), '(((0, 0, 0, 0, 1), (0, 0, 0, 0, 1), (0, 0, 0, 0, 1), (0, 1, 1, 1,\n 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1),\n (1, 0, 0, 0, 1), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0,\n 0, 0, 0, 0), (0, 0, 0, 0, 0)))\n', (5170, 5422), False, 'import numpy\n'), ((5570, 5833), 'numpy.array', 'numpy.array', (['((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1, 0), (1, 0,\n 0, 0, 1), (1, 0, 0, 0, 1), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0), (1, 0, 0, \n 0, 0), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, \n 0), (0, 0, 0, 0, 0))'], {}), '(((0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0, 1, 1, 1,\n 0), (1, 0, 0, 0, 1), (1, 0, 0, 0, 1), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 0, 0, 0, 0), (0, 1, 1, 1, 1), (0, 0, 0, 0, 0), (0, 0, 0, 0, 0), (0,\n 0, 0, 0, 0), (0, 0, 0, 0, 0)))\n', (5581, 5833), False, 'import numpy\n'), ((5981, 6158), 'numpy.array', 'numpy.array', (['((0, 0, 0), (0, 1, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0\n ), (0, 1, 0), (0, 1, 0), (1, 1, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,\n 0, 0))'], {}), '(((0, 0, 0), (0, 1, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (0, 1, \n 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (1, 1, 1), (0, 0, 0), (0, 0, 0), (\n 0, 0, 0), (0, 0, 0)))\n', (5992, 6158), False, 'import numpy\n'), ((6308, 6485), 'numpy.array', 'numpy.array', (['((0, 0, 0), (0, 1, 0), (1, 1, 1), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0\n ), (0, 1, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,\n 0, 0))'], {}), '(((0, 0, 0), (0, 1, 0), (1, 1, 1), (0, 1, 0), (0, 1, 0), (0, 1, \n 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0), (0, 0, 0), (\n 0, 0, 0), (0, 0, 0)))\n', (6319, 6485), False, 'import numpy\n'), ((6635, 6812), 'numpy.array', 'numpy.array', (['((1, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0\n ), (0, 1, 0), (0, 1, 0), (1, 1, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,\n 0, 0))'], {}), '(((1, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, \n 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (1, 1, 1), (0, 0, 0), (0, 0, 0), (\n 0, 0, 0), (0, 0, 0)))\n', (6646, 6812), False, 'import numpy\n'), ((6962, 7179), 'numpy.array', 'numpy.array', (['((0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, \n 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0,\n 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0))'], {}), '(((0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0,\n 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0,\n 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)))\n', (6973, 7179), False, 'import numpy\n'), ((673, 765), 'numpy.zeros', 'numpy.zeros', (['(arr.shape[0] * scale, arr.shape[1] * scale, arr.shape[2])'], {'dtype': 'arr.dtype'}), '((arr.shape[0] * scale, arr.shape[1] * scale, arr.shape[2]),\n dtype=arr.dtype)\n', (684, 765), False, 'import numpy\n'), ((1724, 1781), 'numpy.zeros', 'numpy.zeros', (['(arr.shape[0], arr.shape[1])'], {'dtype': 'basetype'}), '((arr.shape[0], arr.shape[1]), dtype=basetype)\n', (1735, 1781), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
import os,shutil
import numpy as np
import json
import collections
import time
# import tensorflow as tf
import argparse
import sys
import face_model
from flask_cors import *
import cv2
from annoy import AnnoyIndex
import datetime
import random
import io
# 特征入库操作 数据库
from flaskext.mysql import MySQL
SERVER_DIR_KEYS = ['./images/test/test/']
SERARCH_TMP_DIR = './images/searchImg'
mysql = MySQL()
app = Flask(__name__)
CORS(app, resources=r'/*')
headers = {
'Cache-Control' : 'no-cache, no-store, must-revalidate',
'Pragma' : 'no-cache' ,
'Expires': '0' ,
'Access-Control-Allow-Origin' : '*',
'Access-Control-Allow-Methods': 'GET, POST, PATCH, PUT, DELETE, OPTIONS',
'Access-Control-Allow-Headers': 'Origin, Content-Type, X-Auth-Token'
}
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = '1'
app.config['MYSQL_DATABASE_DB'] = 'AIMEET'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
#用于连接对话,执行完增删改的操作记得要执行commit.commit
connect = mysql.connect()
cursor = connect.cursor()
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(JsonEncoder, self).default(obj)
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj,datetime.datetime):
return obj.strftime("%Y-%m-%d %H:%M:%S")
else:
return json.JSONEncoder.default(self,obj)
# 计算两个特征之间的相似度
def dot(source_feature, target_feature):
simlist = []
length = len(target_feature)
for i in range(length):
sim = np.dot(source_feature, np.expand_dims(target_feature[i], axis=0).T)
if sim[0][0] < 0:
sim[0][0] = 0
simlist.append(sim[0][0])
return simlist
# @app.route("/")
# def index():
# return render_template('index.html')
#获取会议室房间的数量事件
@app.route("/getRoomNum")
def getRoomNum():
success = 0
json_result = collections.OrderedDict()
cursor.execute("SELECT * from roomInfo ")
data = cursor.fetchall()
json_result["success"] = success
json_result["roomNumber"] = len(data)
return json.dumps(json_result)
#获取所有会议室的信息事件
@app.route("/getRoomInfo")
def getRoomInfo():
success = 0
json_result = collections.OrderedDict()
cursor.execute("SELECT * from roomInfo ")
data = cursor.fetchall()
json_result["success"] = success
json_result["room number"] = data
return json.dumps(json_result)
#进行条件筛查,以及更新响应的userInfo单次预定的信息事件
@app.route("/previewRoom",methods=["POST"])
def previewRoom():
success = 0
json_result = collections.OrderedDict()
data = request.get_json() # 获取 JOSN 数据
# account= request.json['account'] # 以字典形式获取参数
print(data)
user_id = request.json['user_id']
att_nums = request.json['att_nums']
s_time = request.json['s_time']
e_time = request.json['e_time']
# print(user_id,att_nums,s_time,e_time)
# 首先更新对应的用户的userInfo表,因为每次都是不一样的人数以及时间,历史预定会议室的用户信息将不再保存
sql = "update userinfo set att_nums=%s,s_time=%s,e_time=%s where userid = %s"
cursor.execute(sql, (att_nums,s_time,e_time,user_id))
connect.commit()
# sql = "select room_id,s_time,e_time from used_room where s_time>%s or e_time<%s and room_id in (select room_id from roomInfo where att_nums >= %s)" #%int(att_nums)
sql2 = "select * from roomInfo where room_id in (select room_id from roomInfo where att_nums >= %s or room_id in (select room_id from used_room where (s_time>%s or e_time<%s) and att_nums>=%s) order by roomInfo.att_nums)" #%int(att_nums)
cursor.execute(sql2,(att_nums,e_time,s_time,att_nums))
data = cursor.fetchall()
json_result["success"] = success
json_result["data"] = data
# return json.dumps(json_result,cls=DateEncoder)
return json.dumps(json_result)
#预约会议室事件事件
@app.route("/bookRoom",methods=["POST"])
def bookRoom():
success = 200
json_result = collections.OrderedDict()
user_id = request.form.get('user_id')
room_id = request.form.get('room_id')
s_time = request.form.get('s_time')
e_time = request.form.get('e_time')
flag = request.form.get('flag')
json_result = collections.OrderedDict()
cursor.execute('insert into used_room values(%s,%s,%s,%s,%s,%s)',([0,user_id,room_id,s_time,e_time,1]))
connect.commit()
json_result["success"] = success
return json.dumps(json_result)
#获取人脸数据库的长度事件
@app.route("/getDbSize")
def getDbSize():
success = 200
json_result = collections.OrderedDict()
try:
cursor.execute("select count(*) from FaceFeature ")
_len = cursor.fetchall()
connect.close()
json_result["success"] = success
print('The number of face in DB is: ',_len)
json_result["length"] = _len
except Exception as e:
print("catch error : ",str(e))
status = "addFailed"
success = 1
return json.dumps(json_result)
# 人脸入库接口
# 接口返回说明 success表示http请求的状态(0表示成功,1表示失败),status表示人脸入库的状态,added表示入库成功,addFailed表示入库失败
# {
# "success": 0,
# "status ": "added"
# }
@app.route("/addFace", methods=['POST'])
def addFace():
success = 200
status = "added"
json_result = collections.OrderedDict()
file = request.files.get('image')
name = request.form.get('name')
print(name)
if file is None:
status = "badrRequest"
json_result["success"] = success
json_result["status "] = status
return json.dumps(json_result)
filename = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))+str(random.randint(0,100))+'.jpg'
# 写死保存目录,需修改
filepath = os.path.join('./images/test','test',filename)
print('addFace file save path: ',filepath)
file.save(filepath)
try:
#生成特征编码
image = cv2.imread(filepath,cv2.IMREAD_COLOR)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
face_img = model.get_aligned_face(image)
if face_img is None:
status = "noFace"
else:
feature = model.get_feature(face_img)
bytes_feature = feature.tostring()
cursor.execute('insert into FaceFeature values(%s,%s,%s,%s)',([0,name,bytes_feature,filename]))
# 执行sql语句之后要记的commit,不然当前对话是不会成功的修改到数据库
connect.commit()
#特征编码入库,使用数据库之后就不用这一步了,直接从数据库里面获取
# feature_list.append(feature)
# print('feature_list length: ',len(feature_list))
#特征编码索引
# feature = feature.tolist()[0]
# index.add_item(get_i(),feature)
# add_i()
#图片数据入库
# image_list.append(filepath)
# print('image_list length: ',len(image_list))
# 用于保存annoy的model
cursor.execute('select * from FaceFeature')
data = cursor.fetchall()
# 特征编码有512维度
index = AnnoyIndex(512)
for faceId, faceName, feature, imgPath in data:
dBFeature = np.frombuffer(feature,dtype=np.float32)
# 特征编码索引
_dBFeature=dBFeature.tolist()
index.add_item(get_i(),_dBFeature)
add_i()
index.build(512)
index.save('faceModel.ann')
except Exception as e:
print("catch error : ",str(e))
status = "addFailed"
success = 1
json_result["success"] = success
json_result["status "] = status
return json.dumps(json_result,cls=JsonEncoder)
#人脸搜索请求
@app.route("/faceIdentity", methods=['POST'])
def identity():
success = 200
status = "identited"
json_result = collections.OrderedDict()
# 用于保存相似分数以及相似度,以及人员姓名
sim_image = []
sim = []
sim_name = []
result=[]
file = request.files.get('image')
if file is None:
status = "badrRequest"
json_result["success"] = success
json_result["status "] = status
return json.dumps(json_result)
# 写死保存目录,需修改
filename = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))+str(random.randint(0,100))+'.jpg'
filepath = os.path.join(SERARCH_TMP_DIR,filename)
file.save(filepath)
# 生成上传图片的特征编码
image = cv2.imread(filepath, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
try:
face_img = model.get_aligned_face(image)
if face_img is None:
status = "noFace"
else:
cursor.execute('select * from FaceFeature')
data = cursor.fetchall()
# 从数据库读取数据的地方,获取入库的人脸信息
# faceId 人员ID(系统生成,唯一)
# faceName 人员名称
# feature 提取的特征 blob
# imgPath 入库图片的名称,用于返回识别结果的库的人脸
feature_list = []
image_list = []
faceInfo=[]
for faceId, faceName, feature, imgPath in data:
dBFeature = np.frombuffer(feature,dtype=np.float32)
feature_list.append(dBFeature)
image_list.append(imgPath)
faceInfo.append({"faceId":faceId,"faceName":faceName,"imgPath":imgPath})
# 特征编码索引
# _dBFeature=dBFeature.tolist()
# index.add_item(faceId,_dBFeature)
# add_i()
print(faceInfo[0])
print('image_list length: ',len(image_list))
print('feature_list length: ',len(feature_list))
feature = model.get_feature(face_img)
source_feature = feature
feature = feature.tolist()[0]
# 找出最近的6个特征
# 10个查找树
# index.build(512)
# index.save('faceModel.ann')
u = AnnoyIndex(512)
u.load('faceModel.ann')
I = u.get_nns_by_vector(feature,3)
print(I)
# 根据annoy分析后得到的近似向量的下标,取出相应的向量计算他们之间的相似度
target_feature = np.array(feature_list)[I]
sim = dot(source_feature,target_feature)
for id, value in enumerate(sim):
# 判断这个分数,如果小于设定的阈值,将它丢弃
if value <= 0.7:
I.__delitem__(id)
sim.__delitem__(id)
_sim_image = np.array(image_list)[I].tolist()
_faceInfos = np.array(faceInfo)[I].tolist()
for key in SERVER_DIR_KEYS:
print(key)
for _,_,files in os.walk(key):
for image in _sim_image:
if str(image) in files:
sim_image.append(args.file_server +'/'+key.split('images/')[1] + image)
for idx, info in enumerate(_faceInfos):
result.append({'name':info['faceName'],'score':sim[idx],'imgPath':sim_image[idx]})
sim_name.append(info['faceName'])
except Exception as e:
print(str(e))
success = 1
json_result["success"] = success
return json.dumps(result,cls=JsonEncoder)
@app.route("/faceMeet", methods=['POST'])
def faceMeet():
success = 200
status = "identited"
json_result = collections.OrderedDict()
# 用于保存相似分数以及相似度,以及人员姓名
sim_image = []
sim = []
sim_name = []
file = request.json['image']
print(len(file))
# if file is None:
# status = "badrRequest"
# json_result["success"] = success
# json_result["status "] = status
# return json.dumps(json_result)
# # 写死保存目录,需修改
# filename = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))+str(random.randint(0,100))+'.jpg'
# filepath = os.path.join(SERARCH_TMP_DIR,filename)
# file.save(filepath)
# # 生成上传图片的特征编码
# image = cv2.imread(filepath, cv2.IMREAD_COLOR)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# try:
# face_img = model.get_aligned_face(image)
# if face_img is None:
# status = "noFace"
# else:
# cursor.execute('select * from FaceFeature')
# data = cursor.fetchall()
# # 从数据库读取数据的地方,获取入库的人脸信息
# # faceId 人员ID(系统生成,唯一)
# # faceName 人员名称
# # feature 提取的特征 blob
# # imgPath 入库图片的名称,用于返回识别结果的库的人脸
# feature_list = []
# image_list = []
# faceInfo=[]
# for faceId, faceName, feature, imgPath in data:
# dBFeature = np.frombuffer(feature,dtype=np.float32)
# feature_list.append(dBFeature)
# image_list.append(imgPath)
# faceInfo.append({"faceId":faceId,"faceName":faceName,"imgPath":imgPath})
# # 特征编码索引
# # _dBFeature=dBFeature.tolist()
# # index.add_item(faceId,_dBFeature)
# # add_i()
# print(faceInfo[0])
# print('image_list length: ',len(image_list))
# print('feature_list length: ',len(feature_list))
# feature = model.get_feature(face_img)
# source_feature = feature
# feature = feature.tolist()[0]
# # 找出最近的6个特征
# # 10个查找树
# # index.build(512)
# # index.save('faceModel.ann')
# u = AnnoyIndex(512)
# u.load('faceModel.ann')
# I = u.get_nns_by_vector(feature,3)
# print(I)
# # 根据annoy分析后得到的近似向量的下标,取出相应的向量计算他们之间的相似度
# target_feature = np.array(feature_list)[I]
# sim = dot(source_feature,target_feature)
# for id, value in enumerate(sim):
# # 判断这个分数,如果小于设定的阈值,将它丢弃
# if value <= 0.7:
# I.__delitem__(id)
# sim.__delitem__(id)
# _sim_image = np.array(image_list)[I].tolist()
# _faceInfos = np.array(faceInfo)[I].tolist()
# for key in SERVER_DIR_KEYS:
# print(key)
# for _,_,files in os.walk(key):
# for image in _sim_image:
# if str(image) in files:
# sim_image.append(args.file_server +'/'+key.split('images/')[1] + image)
# result=[]
# for idx, info in enumerate(_faceInfos):
# result.append({'name':info['faceName'],'score':sim[idx],'imgPath':sim_image[idx]})
# sim_name.append(info['faceName'])
# except Exception as e:
# print(str(e))
# success = 1
json_result["success"] = success
return json.dumps(json_result,cls=JsonEncoder)
#人脸验证请求
# 上传两张人脸图片进行人脸比对
@app.route("/faceVerify", methods=['POST'])
def faceVerify():
success = 0
json_result = collections.OrderedDict()
file1 = request.files.get('image1')
file2 = request.files.get('image2')
if file1 is None or file2 is None:
status = "badrRequest"
json_result["success"] = success
json_result["status "] = status
return json.dumps(json_result)
# 写死保存目录,需修改
filename1 = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))+str(random.randint(0,100))+'_1_'+'.jpg'
filepath1 = os.path.join(SERARCH_TMP_DIR,filename1)
file1.save(filepath1)
filename2 = str(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))+str(random.randint(0,100))+'_2_.'+'jpg'
filepath2 = os.path.join(SERARCH_TMP_DIR,filename2)
file2.save(filepath2)
# 生成两张上传图片的特征编码
image1 = cv2.imread(filepath1, cv2.IMREAD_COLOR)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image2 = cv2.imread(filepath2, cv2.IMREAD_COLOR)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
try:
face_img1 = model.get_aligned_face(image1)
face_img2 = model.get_aligned_face(image2)
if face_img1 is None or face_img2 is None :
status = "img1 or img2 noFace"
else:
feature1 = model.get_feature(face_img1)
feature2 = model.get_feature(face_img2)
# 计算两个人脸图的相似度
sim = dot(feature1,feature2)
print(sim[0]*100)
except Exception as e:
print(str(e))
success = 1
json_result["confidence"] = sim[0]*100
return json.dumps(json_result,cls=JsonEncoder)
def add_i():
global i
i += 1
def get_i():
global i
return i
# def paresulte_arguments(argv):
# parser = argparse.ArgumentParser()
# parser.add_argument('--image-size', default='112,112', help='')
# parser.add_argument('--model', default='/home/shawnliu/workPlace/insightface/models/model,00', help='path to load model.')
# parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
# parser.add_argument('--file_server_image_dir', type=str,help='Base dir to the face image.', default='/home/shawnliu/workPlace/Face_server/images')
# parser.add_argument('--file_server', type=str,help='the file server address', default='http://192.168.1.157:8082')
# parser.add_argument('--port', default=5000, type=int, help='api port')
# parser.add_argument('--gpu', default=0, type=int, help='gpu devices')
# return parser.parse_args(argv)
def paresulte_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../../models/model,00', help='path to load model.')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
parser.add_argument('--file_server_image_dir', type=str,help='Base dir to the face image.', default='/opt/images')
parser.add_argument('--file_server', type=str,help='the file server address', default='http://localhost:8082')
parser.add_argument('--port', default=5001, type=int, help='api port')
return parser.parse_args(argv)
args = paresulte_arguments('')
model = face_model.FaceModel(args)
i = 0
if __name__ == '__main__':
app.run(host='0.0.0.0',port = args.port, threaded=True)
| [
"json.JSONEncoder.default",
"flask.Flask",
"numpy.array",
"flaskext.mysql.MySQL",
"os.walk",
"argparse.ArgumentParser",
"json.dumps",
"flask.request.form.get",
"numpy.frombuffer",
"random.randint",
"annoy.AnnoyIndex",
"collections.OrderedDict",
"flask.request.get_json",
"cv2.cvtColor",
"... | [((470, 477), 'flaskext.mysql.MySQL', 'MySQL', ([], {}), '()\n', (475, 477), False, 'from flaskext.mysql import MySQL\n'), ((484, 499), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (489, 499), False, 'from flask import Flask, render_template, request\n'), ((18039, 18065), 'face_model.FaceModel', 'face_model.FaceModel', (['args'], {}), '(args)\n', (18059, 18065), False, 'import face_model\n'), ((2200, 2225), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2223, 2225), False, 'import collections\n'), ((2392, 2415), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (2402, 2415), False, 'import json\n'), ((2511, 2536), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2534, 2536), False, 'import collections\n'), ((2699, 2722), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (2709, 2722), False, 'import json\n'), ((2854, 2879), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2877, 2879), False, 'import collections\n'), ((2891, 2909), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2907, 2909), False, 'from flask import Flask, render_template, request\n'), ((4059, 4082), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (4069, 4082), False, 'import json\n'), ((4189, 4214), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4212, 4214), False, 'import collections\n'), ((4230, 4257), 'flask.request.form.get', 'request.form.get', (['"""user_id"""'], {}), "('user_id')\n", (4246, 4257), False, 'from flask import Flask, render_template, request\n'), ((4272, 4299), 'flask.request.form.get', 'request.form.get', (['"""room_id"""'], {}), "('room_id')\n", (4288, 4299), False, 'from flask import Flask, render_template, request\n'), ((4313, 4339), 'flask.request.form.get', 'request.form.get', (['"""s_time"""'], {}), "('s_time')\n", (4329, 4339), False, 'from flask import Flask, render_template, request\n'), ((4353, 4379), 'flask.request.form.get', 'request.form.get', (['"""e_time"""'], {}), "('e_time')\n", (4369, 4379), False, 'from flask import Flask, render_template, request\n'), ((4391, 4415), 'flask.request.form.get', 'request.form.get', (['"""flag"""'], {}), "('flag')\n", (4407, 4415), False, 'from flask import Flask, render_template, request\n'), ((4434, 4459), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4457, 4459), False, 'import collections\n'), ((4638, 4661), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (4648, 4661), False, 'import json\n'), ((4755, 4780), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4778, 4780), False, 'import collections\n'), ((5163, 5186), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (5173, 5186), False, 'import json\n'), ((5447, 5472), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5470, 5472), False, 'import collections\n'), ((5485, 5511), 'flask.request.files.get', 'request.files.get', (['"""image"""'], {}), "('image')\n", (5502, 5511), False, 'from flask import Flask, render_template, request\n'), ((5523, 5547), 'flask.request.form.get', 'request.form.get', (['"""name"""'], {}), "('name')\n", (5539, 5547), False, 'from flask import Flask, render_template, request\n'), ((5875, 5922), 'os.path.join', 'os.path.join', (['"""./images/test"""', '"""test"""', 'filename'], {}), "('./images/test', 'test', filename)\n", (5887, 5922), False, 'import os, shutil\n'), ((7675, 7715), 'json.dumps', 'json.dumps', (['json_result'], {'cls': 'JsonEncoder'}), '(json_result, cls=JsonEncoder)\n', (7685, 7715), False, 'import json\n'), ((7849, 7874), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7872, 7874), False, 'import collections\n'), ((7978, 8004), 'flask.request.files.get', 'request.files.get', (['"""image"""'], {}), "('image')\n", (7995, 8004), False, 'from flask import Flask, render_template, request\n'), ((8320, 8359), 'os.path.join', 'os.path.join', (['SERARCH_TMP_DIR', 'filename'], {}), '(SERARCH_TMP_DIR, filename)\n', (8332, 8359), False, 'import os, shutil\n'), ((8415, 8453), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_COLOR'], {}), '(filepath, cv2.IMREAD_COLOR)\n', (8425, 8453), False, 'import cv2\n'), ((8466, 8504), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (8478, 8504), False, 'import cv2\n'), ((11135, 11170), 'json.dumps', 'json.dumps', (['result'], {'cls': 'JsonEncoder'}), '(result, cls=JsonEncoder)\n', (11145, 11170), False, 'import json\n'), ((11292, 11317), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11315, 11317), False, 'import collections\n'), ((14722, 14762), 'json.dumps', 'json.dumps', (['json_result'], {'cls': 'JsonEncoder'}), '(json_result, cls=JsonEncoder)\n', (14732, 14762), False, 'import json\n'), ((14884, 14909), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (14907, 14909), False, 'import collections\n'), ((14923, 14950), 'flask.request.files.get', 'request.files.get', (['"""image1"""'], {}), "('image1')\n", (14940, 14950), False, 'from flask import Flask, render_template, request\n'), ((14963, 14990), 'flask.request.files.get', 'request.files.get', (['"""image2"""'], {}), "('image2')\n", (14980, 14990), False, 'from flask import Flask, render_template, request\n'), ((15332, 15372), 'os.path.join', 'os.path.join', (['SERARCH_TMP_DIR', 'filename1'], {}), '(SERARCH_TMP_DIR, filename1)\n', (15344, 15372), False, 'import os, shutil\n'), ((15527, 15567), 'os.path.join', 'os.path.join', (['SERARCH_TMP_DIR', 'filename2'], {}), '(SERARCH_TMP_DIR, filename2)\n', (15539, 15567), False, 'import os, shutil\n'), ((15627, 15666), 'cv2.imread', 'cv2.imread', (['filepath1', 'cv2.IMREAD_COLOR'], {}), '(filepath1, cv2.IMREAD_COLOR)\n', (15637, 15666), False, 'import cv2\n'), ((15680, 15719), 'cv2.cvtColor', 'cv2.cvtColor', (['image1', 'cv2.COLOR_BGR2RGB'], {}), '(image1, cv2.COLOR_BGR2RGB)\n', (15692, 15719), False, 'import cv2\n'), ((15734, 15773), 'cv2.imread', 'cv2.imread', (['filepath2', 'cv2.IMREAD_COLOR'], {}), '(filepath2, cv2.IMREAD_COLOR)\n', (15744, 15773), False, 'import cv2\n'), ((15787, 15826), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2RGB'], {}), '(image2, cv2.COLOR_BGR2RGB)\n', (15799, 15826), False, 'import cv2\n'), ((16375, 16415), 'json.dumps', 'json.dumps', (['json_result'], {'cls': 'JsonEncoder'}), '(json_result, cls=JsonEncoder)\n', (16385, 16415), False, 'import json\n'), ((17372, 17397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17395, 17397), False, 'import argparse\n'), ((5712, 5735), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (5722, 5735), False, 'import json\n'), ((6034, 6072), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_COLOR'], {}), '(filepath, cv2.IMREAD_COLOR)\n', (6044, 6072), False, 'import cv2\n'), ((6088, 6126), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (6100, 6126), False, 'import cv2\n'), ((8153, 8176), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (8163, 8176), False, 'import json\n'), ((15157, 15180), 'json.dumps', 'json.dumps', (['json_result'], {}), '(json_result)\n', (15167, 15180), False, 'import json\n'), ((1682, 1717), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1706, 1717), False, 'import json\n'), ((7113, 7128), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(512)'], {}), '(512)\n', (7123, 7128), False, 'from annoy import AnnoyIndex\n'), ((9882, 9897), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(512)'], {}), '(512)\n', (9892, 9897), False, 'from annoy import AnnoyIndex\n'), ((1888, 1929), 'numpy.expand_dims', 'np.expand_dims', (['target_feature[i]'], {'axis': '(0)'}), '(target_feature[i], axis=0)\n', (1902, 1929), True, 'import numpy as np\n'), ((5813, 5835), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (5827, 5835), False, 'import random\n'), ((7217, 7257), 'numpy.frombuffer', 'np.frombuffer', (['feature'], {'dtype': 'np.float32'}), '(feature, dtype=np.float32)\n', (7230, 7257), True, 'import numpy as np\n'), ((8275, 8297), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (8289, 8297), False, 'import random\n'), ((9095, 9135), 'numpy.frombuffer', 'np.frombuffer', (['feature'], {'dtype': 'np.float32'}), '(feature, dtype=np.float32)\n', (9108, 9135), True, 'import numpy as np\n'), ((10085, 10107), 'numpy.array', 'np.array', (['feature_list'], {}), '(feature_list)\n', (10093, 10107), True, 'import numpy as np\n'), ((10578, 10590), 'os.walk', 'os.walk', (['key'], {}), '(key)\n', (10585, 10590), False, 'import os, shutil\n'), ((15280, 15302), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (15294, 15302), False, 'import random\n'), ((15475, 15497), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (15489, 15497), False, 'import random\n'), ((5757, 5780), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5778, 5780), False, 'import datetime\n'), ((8219, 8242), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8240, 8242), False, 'import datetime\n'), ((10388, 10408), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (10396, 10408), True, 'import numpy as np\n'), ((10446, 10464), 'numpy.array', 'np.array', (['faceInfo'], {}), '(faceInfo)\n', (10454, 10464), True, 'import numpy as np\n'), ((15224, 15247), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15245, 15247), False, 'import datetime\n'), ((15419, 15442), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15440, 15442), False, 'import datetime\n')] |
import numpy as np
import os
import math
import dtdata as dt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers import Dense, Activation, Dropout, Input
from keras.models import load_model
import matplotlib.pyplot as plt
from build_model_basic import *
## Parameters
learning_rate = 0.01
lambda_l2_reg = 0.003
encoding_dim = 484
original_seq_len = 2420
## Network Parameters
# length of input signals
input_seq_len = 480
# length of output signals
output_seq_len = 4
# size of LSTM Cell
hidden_dim = 128
# num of input signals
input_dim = 1
# num of output signals
output_dim = 1
# num of stacked lstm layers
num_stacked_layers = 2
# gradient clipping - to avoid gradient exploding
GRADIENT_CLIPPING = 2.5
scaler = StandardScaler()
autoencoder_path = "/home/suroot/Documents/train/daytrader/models/autoencoder-"+str(encoding_dim)+".hdf5"
cache = "/home/suroot/Documents/train/daytrader/autoencoded-"+str(encoding_dim)+".npy"
savePath = r'/home/suroot/Documents/train/daytrader/'
path =r'/home/suroot/Documents/train/daytrader/ema-crossover' # path to data
# load auto encoder.. and encode the data..
autoencoder = load_model(autoencoder_path)
input = Input(shape=(original_seq_len,))
encoder_layer = autoencoder.layers[-2]
encoder = Model(input, encoder_layer(input))
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
use_cache = False
if( not use_cache or not os.path.isfile(cache) ):
data = dt.loadData(path)
(data, labels) = dt.centerAroundEntry(data, 0)
# scale data .. don't forget to stor the scaler weights as we will need them after.
data = scaler.fit_transform(data)
print(data.shape)
# encode all of the data .. should now be of length 480
encoded_ts = encoder.predict(data)
print(encoded_ts.shape)
# cache this data and the scaler weights.
np.save(cache, encoded_ts)
# TODO: cache the scaler weights
print("loading cached data")
data = np.load(cache)
print(data.shape)
def generate_train_samples(x, batch, batch_size = 10, input_seq_len = input_seq_len, output_seq_len = output_seq_len):
input_seq = x[batch*batch_size:(batch*batch_size)+batch_size, 0:input_seq_len]
output_seq = x[batch*batch_size:(batch*batch_size)+batch_size, input_seq_len:input_seq_len+output_seq_len]
return np.array(input_seq), np.array(output_seq)
# TRAINING PROCESS
x_train, x_test, _, _ = train_test_split(data, data[:,-1], test_size=0.1)
epochs = 250
batch_size = 64
total_iteractions = int(math.floor(x_train.shape[0] / batch_size))
KEEP_RATE = 0.5
train_losses = []
val_losses = []
if( not os.path.isfile( os.path.join(savePath, 'univariate_ts_model0.meta') ) ):
print("building model..")
rnn_model = build_graph(input_seq_len = input_seq_len, output_seq_len = output_seq_len, hidden_dim=hidden_dim, feed_previous=False)
saver = tf.train.Saver()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
print("EPOCH: " + str(epoch))
for i in range(total_iteractions):
batch_input, batch_output = generate_train_samples(x = x_train, batch=i, batch_size=batch_size)
feed_dict = {rnn_model['enc_inp'][t]: batch_input[:,t].reshape(-1,input_dim) for t in range(input_seq_len)}
feed_dict.update({rnn_model['target_seq'][t]: batch_output[:,t].reshape(-1,output_dim) for t in range(output_seq_len)})
_, loss_t = sess.run([rnn_model['train_op'], rnn_model['loss']], feed_dict)
print(loss_t)
temp_saver = rnn_model['saver']()
save_path = temp_saver.save(sess, os.path.join(savePath, 'univariate_ts_model0'))
print("Checkpoint saved at: ", save_path)
else:
print("using cached model...")
print("x_test: " + str(x_test.shape))
decoded_ts = decoder.predict(x_test)
print("decoded_ts: "+ str(decoded_ts.shape))
rnn_model = build_graph(input_seq_len = input_seq_len, output_seq_len = output_seq_len, hidden_dim=hidden_dim, feed_previous=True)
predictions = []
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver = rnn_model['saver']().restore(sess, os.path.join(savePath, 'univariate_ts_model0'))
for i in range(len(x_test)):
test_seq_input = x_test[i,0:input_seq_len]
feed_dict = {rnn_model['enc_inp'][t]: test_seq_input[t].reshape(1,1) for t in range(input_seq_len)}
feed_dict.update({rnn_model['target_seq'][t]: np.zeros([1, output_dim]) for t in range(output_seq_len)})
final_preds = sess.run(rnn_model['reshaped_outputs'], feed_dict)
final_preds = np.concatenate(final_preds, axis = 1)
print(final_preds)
predicted_ts = np.append(x_test[i,0:input_seq_len], final_preds.reshape(-1))
predicted_ts = np.reshape(predicted_ts, (1, encoding_dim))
predictions.append(predicted_ts)
for i in range(len(x_test)):
predicted_ts = predictions[i]
print(predicted_ts.shape)
predicted_decoded_ts = decoder.predict( predicted_ts )
#predicted_decoded_ts = scaler.inverse_transform(decoded_ts)
#decoded_ts = scaler.inverse_transform(decoded_ts)
print(predicted_decoded_ts.shape)
l1, = plt.plot(range(2400), decoded_ts[i,0:2400], label = 'Training truth')
l2, = plt.plot(range(2400, 2420), decoded_ts[i,2400:], 'y', label = 'Test truth')
l3, = plt.plot(range(2400, 2420), predicted_decoded_ts[0,2400:], 'r', label = 'Test predictions')
plt.legend(handles = [l1, l2, l3], loc = 'lower left')
plt.show()
| [
"keras.models.load_model",
"numpy.reshape",
"dtdata.centerAroundEntry",
"sklearn.model_selection.train_test_split",
"math.floor",
"matplotlib.pyplot.legend",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"os.path.isfile",
"keras.layers.Input",
"numpy.array",
"dtdata.loadData",
"num... | [((827, 843), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (841, 843), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1228, 1256), 'keras.models.load_model', 'load_model', (['autoencoder_path'], {}), '(autoencoder_path)\n', (1238, 1256), False, 'from keras.models import load_model\n'), ((1265, 1297), 'keras.layers.Input', 'Input', ([], {'shape': '(original_seq_len,)'}), '(shape=(original_seq_len,))\n', (1270, 1297), False, 'from keras.layers import Dense, Activation, Dropout, Input\n'), ((1398, 1426), 'keras.layers.Input', 'Input', ([], {'shape': '(encoding_dim,)'}), '(shape=(encoding_dim,))\n', (1403, 1426), False, 'from keras.layers import Dense, Activation, Dropout, Input\n'), ((2117, 2131), 'numpy.load', 'np.load', (['cache'], {}), '(cache)\n', (2124, 2131), True, 'import numpy as np\n'), ((2569, 2619), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'data[:, -1]'], {'test_size': '(0.1)'}), '(data, data[:, -1], test_size=0.1)\n', (2585, 2619), False, 'from sklearn.model_selection import train_test_split\n'), ((1608, 1625), 'dtdata.loadData', 'dt.loadData', (['path'], {}), '(path)\n', (1619, 1625), True, 'import dtdata as dt\n'), ((1647, 1676), 'dtdata.centerAroundEntry', 'dt.centerAroundEntry', (['data', '(0)'], {}), '(data, 0)\n', (1667, 1676), True, 'import dtdata as dt\n'), ((2016, 2042), 'numpy.save', 'np.save', (['cache', 'encoded_ts'], {}), '(cache, encoded_ts)\n', (2023, 2042), True, 'import numpy as np\n'), ((2673, 2714), 'math.floor', 'math.floor', (['(x_train.shape[0] / batch_size)'], {}), '(x_train.shape[0] / batch_size)\n', (2683, 2714), False, 'import math\n'), ((5754, 5804), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[l1, l2, l3]', 'loc': '"""lower left"""'}), "(handles=[l1, l2, l3], loc='lower left')\n", (5764, 5804), True, 'import matplotlib.pyplot as plt\n'), ((5813, 5823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5821, 5823), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1593), 'os.path.isfile', 'os.path.isfile', (['cache'], {}), '(cache)\n', (1586, 1593), False, 'import os\n'), ((2483, 2502), 'numpy.array', 'np.array', (['input_seq'], {}), '(input_seq)\n', (2491, 2502), True, 'import numpy as np\n'), ((2504, 2524), 'numpy.array', 'np.array', (['output_seq'], {}), '(output_seq)\n', (2512, 2524), True, 'import numpy as np\n'), ((2794, 2845), 'os.path.join', 'os.path.join', (['savePath', '"""univariate_ts_model0.meta"""'], {}), "(savePath, 'univariate_ts_model0.meta')\n", (2806, 2845), False, 'import os\n'), ((4443, 4489), 'os.path.join', 'os.path.join', (['savePath', '"""univariate_ts_model0"""'], {}), "(savePath, 'univariate_ts_model0')\n", (4455, 4489), False, 'import os\n'), ((4905, 4940), 'numpy.concatenate', 'np.concatenate', (['final_preds'], {'axis': '(1)'}), '(final_preds, axis=1)\n', (4919, 4940), True, 'import numpy as np\n'), ((5079, 5122), 'numpy.reshape', 'np.reshape', (['predicted_ts', '(1, encoding_dim)'], {}), '(predicted_ts, (1, encoding_dim))\n', (5089, 5122), True, 'import numpy as np\n'), ((3889, 3935), 'os.path.join', 'os.path.join', (['savePath', '"""univariate_ts_model0"""'], {}), "(savePath, 'univariate_ts_model0')\n", (3901, 3935), False, 'import os\n'), ((4742, 4767), 'numpy.zeros', 'np.zeros', (['[1, output_dim]'], {}), '([1, output_dim])\n', (4750, 4767), True, 'import numpy as np\n')] |
import os
import json
import numpy as np
import scipy.sparse as sp
from src.model.linear_svm import LinearSVM
from src.model.random_forest import RandomForest
from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR
from src.utils.io import load_proc_baseline_feature, save_UAR_results
from src.utils.io import save_post_probability, load_post_probability
from src.utils.io import save_cv_results
from src.utils.preprocess import upsample
from src.utils.preprocess import k_fold_cv
'''
BASELINE CLASSIFICATION (py) PROVIDED BY AVEC2018
features | computed level
-------- | --------------
MFCCs | frame level
eGeMAPS | turn level
DeepSpectrum | activations in ALEXNET
BoAW | window size (2s)
FAUs | session level
BoVW | window size (11s)
'''
class BaseLine():
"""
Baseline system in BD classification, based on SVM/RF using LLDs and fusion
---
Attributes
-----------
model_name: str
model for BaseLine() instance, SVM or RF
feature_name: str
feature for BaseLine() instance, MFCC/eGeMAPS/Deep/BoAW/FAU/BoVW
test: bool
whether to test BaseLine() or not
----------------------------------------------------------------------
Functions
-----------
run(): public
main function
run_[MFCC,eGeMAPS,DeepSpectrum,BoAW,AU,BoVW](): public
run classifier on specified feature (single modality)
run_fusion(): public
run late fusion on a pair of specified features
"""
def __init__(self, model_name, feature_name, test=False):
# para model: determine the model in baseline system
# para name: determine the feature in baseline system
self.model_name = model_name
self.feature_name = feature_name
self.test = test
print("\nbaseline system initialized, model %s feature %s" % (self.model_name, self.feature_name))
def run(self):
"""main function of BaseLine() instance
"""
if self.feature_name == 'FUSE':
feature_name_1 = ''
feature_name_2 = ''
self.run_fusion(feature_name_1, feature_name_2)
elif self.feature_name == 'MFCC':
self.run_MFCC()
elif self.feature_name == 'eGeMAPS':
self.run_eGeMAPS()
elif self.feature_name == 'Deep':
self.run_DeepSpectrum()
elif self.feature_name == 'BoAW':
self.run_BoAW()
elif self.feature_name == 'AU':
self.run_AU()
elif self.feature_name == 'BoVW':
self.run_BoVW()
def run_MFCC(self):
"""run classifier on MFCC feature (single modality)
"""
print("\nbuilding a classifier on MFCC features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('MFCC', verbose=True)
if self.model_name == 'RF_cv':
y_train, y_dev = np.ravel(y_train), np.ravel(y_dev)
train_inst, dev_inst = np.ravel(train_inst), np.ravel(dev_inst)
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
inst = np.hstack((train_inst, dev_inst))
assert len(X) == len(y) == len(inst)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
dev_inst = inst[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_MFCC = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_MFCC.run()
y_pred_train, y_pred_dev = SVM_MFCC.evaluate()
elif self.model_name == 'RF':
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_eGeMAPS(self):
"""run classifier on eGeMAPS feature (single modality)
"""
print("\nbuilding a classifier on eGeMAPS features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('eGeMAPS', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_eGeMAPS = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_eGeMAPS.run()
y_pred_train, y_pred_dev = SVM_eGeMAPS.evaluate()
elif self.model_name == 'RF':
RF_eGeMAPS = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_eGeMAPS.run()
y_pred_train, y_pred_dev = RF_eGeMAPS.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_DeepSpectrum(self):
"""run classifier on DeepSpectrum feature (single modality)
"""
print("\nbuilding a classifier on Deep features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('Deep', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_Deep = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_Deep.run()
y_pred_train, y_pred_dev = SVM_Deep.evaluate()
elif self.model_name == 'RF':
RF_Deep = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_Deep.run()
y_pred_train, y_pred_dev = RF_Deep.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_BoAW(self):
"""run classifier on BoAW feature (single modality)
"""
print("\nbuilding a classifier on BoAW features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('BoAW', verbose=True)
if self.model_name == 'RF_cv':
y_train, y_dev = np.ravel(y_train), np.ravel(y_dev)
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_BoAW = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_BoAW.run()
y_pred_train, y_pred_dev = SVM_BoAW.evaluate()
elif self.model_name == 'RF':
RF_BoAW = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_BoAW.run()
y_pred_train, y_pred_dev = RF_BoAW.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_AU(self):
"""run classifier on AU feature (single modality)
"""
print("\nbuilding a classifier on AU features (already session-level)")
X_train, y_train, _, X_dev, y_dev, _ = load_proc_baseline_feature('AU', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, _ = upsample(X_train, y_train, np.array([]))
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_AU = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_AU.run()
y_pred_train, y_pred_dev = SVM_AU.evaluate()
session_prob = SVM_AU.get_session_probability()
elif self.model_name == 'RF':
RF_AU = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_AU.run()
y_pred_train, y_pred_dev = RF_AU.evaluate()
session_prob = RF_AU.get_session_probability()
get_UAR(y_pred_train, y_train, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
def run_BoVW(self):
"""run classifier on BoVW feature (single modality)
"""
print("\nbuilding a classifier on BoVW features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('BoVW', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_BoVW = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_BoVW.run()
y_pred_train, y_pred_dev = SVM_BoVW.evaluate()
elif self.model_name == 'RF':
RF_BoVW = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_BoVW.run()
y_pred_train, y_pred_dev = RF_BoVW.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_fusion(self, feature_name_1, feature_name_2):
"""run late fusion on a pair of specified features
"""
get_late_fusion_UAR(self.model_name, feature_name_1, feature_name_2, baseline=True) | [
"src.metric.uar.get_UAR",
"numpy.hstack",
"src.utils.io.load_proc_baseline_feature",
"src.model.random_forest.RandomForest",
"src.utils.preprocess.upsample",
"numpy.array",
"numpy.vstack",
"numpy.ravel",
"src.utils.io.save_cv_results",
"scipy.sparse.csr_matrix",
"src.model.linear_svm.LinearSVM",... | [((2882, 2930), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""MFCC"""'], {'verbose': '(True)'}), "('MFCC', verbose=True)\n", (2908, 2930), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((4209, 4247), 'src.utils.preprocess.upsample', 'upsample', (['X_train', 'y_train', 'train_inst'], {}), '(X_train, y_train, train_inst)\n', (4217, 4247), False, 'from src.utils.preprocess import upsample\n'), ((5032, 5174), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_train', 'y_train', 'train_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'train_set': '(True)', 'test': 'self.test'}), "(y_pred_train, y_train, train_inst, self.model_name, self.\n feature_name, 'baseline', baseline=True, train_set=True, test=self.test)\n", (5039, 5174), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((5178, 5297), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': 'self.test'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=self.test)\n", (5185, 5297), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((5696, 5747), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""eGeMAPS"""'], {'verbose': '(True)'}), "('eGeMAPS', verbose=True)\n", (5722, 5747), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((6774, 6812), 'src.utils.preprocess.upsample', 'upsample', (['X_train', 'y_train', 'train_inst'], {}), '(X_train, y_train, train_inst)\n', (6782, 6812), False, 'from src.utils.preprocess import upsample\n'), ((7615, 7757), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_train', 'y_train', 'train_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'train_set': '(True)', 'test': 'self.test'}), "(y_pred_train, y_train, train_inst, self.model_name, self.\n feature_name, 'baseline', baseline=True, train_set=True, test=self.test)\n", (7622, 7757), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((7761, 7880), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': 'self.test'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=self.test)\n", (7768, 7880), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((8286, 8334), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""Deep"""'], {'verbose': '(True)'}), "('Deep', verbose=True)\n", (8312, 8334), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((9353, 9391), 'src.utils.preprocess.upsample', 'upsample', (['X_train', 'y_train', 'train_inst'], {}), '(X_train, y_train, train_inst)\n', (9361, 9391), False, 'from src.utils.preprocess import upsample\n'), ((10176, 10318), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_train', 'y_train', 'train_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'train_set': '(True)', 'test': 'self.test'}), "(y_pred_train, y_train, train_inst, self.model_name, self.\n feature_name, 'baseline', baseline=True, train_set=True, test=self.test)\n", (10183, 10318), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((10322, 10441), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': 'self.test'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=self.test)\n", (10329, 10441), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((10831, 10879), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""BoAW"""'], {'verbose': '(True)'}), "('BoAW', verbose=True)\n", (10857, 10879), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((11963, 12001), 'src.utils.preprocess.upsample', 'upsample', (['X_train', 'y_train', 'train_inst'], {}), '(X_train, y_train, train_inst)\n', (11971, 12001), False, 'from src.utils.preprocess import upsample\n'), ((12786, 12928), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_train', 'y_train', 'train_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'train_set': '(True)', 'test': 'self.test'}), "(y_pred_train, y_train, train_inst, self.model_name, self.\n feature_name, 'baseline', baseline=True, train_set=True, test=self.test)\n", (12793, 12928), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((12932, 13051), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': 'self.test'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=self.test)\n", (12939, 13051), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((13406, 13452), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""AU"""'], {'verbose': '(True)'}), "('AU', verbose=True)\n", (13432, 13452), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((15929, 15977), 'src.utils.io.load_proc_baseline_feature', 'load_proc_baseline_feature', (['"""BoVW"""'], {'verbose': '(True)'}), "('BoVW', verbose=True)\n", (15955, 15977), False, 'from src.utils.io import load_proc_baseline_feature, save_UAR_results\n'), ((16996, 17034), 'src.utils.preprocess.upsample', 'upsample', (['X_train', 'y_train', 'train_inst'], {}), '(X_train, y_train, train_inst)\n', (17004, 17034), False, 'from src.utils.preprocess import upsample\n'), ((17827, 17969), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_train', 'y_train', 'train_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'train_set': '(True)', 'test': 'self.test'}), "(y_pred_train, y_train, train_inst, self.model_name, self.\n feature_name, 'baseline', baseline=True, train_set=True, test=self.test)\n", (17834, 17969), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((17973, 18092), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': 'self.test'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=self.test)\n", (17980, 18092), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((18365, 18452), 'src.metric.uar.get_late_fusion_UAR', 'get_late_fusion_UAR', (['self.model_name', 'feature_name_1', 'feature_name_2'], {'baseline': '(True)'}), '(self.model_name, feature_name_1, feature_name_2,\n baseline=True)\n', (18384, 18452), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((3140, 3167), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (3149, 3167), True, 'import numpy as np\n'), ((3184, 3211), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (3193, 3211), True, 'import numpy as np\n'), ((3231, 3264), 'numpy.hstack', 'np.hstack', (['(train_inst, dev_inst)'], {}), '((train_inst, dev_inst))\n', (3240, 3264), True, 'import numpy as np\n'), ((4026, 4097), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (4041, 4097), False, 'from src.utils.io import save_cv_results\n'), ((4492, 4514), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (4505, 4514), True, 'import scipy.sparse as sp\n'), ((4516, 4536), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (4529, 4536), True, 'import scipy.sparse as sp\n'), ((4598, 4693), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (4607, 4693), False, 'from src.model.linear_svm import LinearSVM\n'), ((5804, 5831), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (5813, 5831), True, 'import numpy as np\n'), ((5848, 5875), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (5857, 5875), True, 'import numpy as np\n'), ((6583, 6654), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (6598, 6654), False, 'from src.utils.io import save_cv_results\n'), ((7057, 7079), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (7070, 7079), True, 'import scipy.sparse as sp\n'), ((7081, 7101), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (7094, 7101), True, 'import scipy.sparse as sp\n'), ((7166, 7261), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (7175, 7261), False, 'from src.model.linear_svm import LinearSVM\n'), ((8391, 8418), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (8400, 8418), True, 'import numpy as np\n'), ((8435, 8462), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (8444, 8462), True, 'import numpy as np\n'), ((9170, 9241), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (9185, 9241), False, 'from src.utils.io import save_cv_results\n'), ((9636, 9658), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (9649, 9658), True, 'import scipy.sparse as sp\n'), ((9660, 9680), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (9673, 9680), True, 'import scipy.sparse as sp\n'), ((9742, 9837), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (9751, 9837), False, 'from src.model.linear_svm import LinearSVM\n'), ((11001, 11028), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (11010, 11028), True, 'import numpy as np\n'), ((11045, 11072), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (11054, 11072), True, 'import numpy as np\n'), ((11780, 11851), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (11795, 11851), False, 'from src.utils.io import save_cv_results\n'), ((12246, 12268), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (12259, 12268), True, 'import scipy.sparse as sp\n'), ((12270, 12290), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (12283, 12290), True, 'import scipy.sparse as sp\n'), ((12352, 12447), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (12361, 12447), False, 'from src.model.linear_svm import LinearSVM\n'), ((13509, 13536), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (13518, 13536), True, 'import numpy as np\n'), ((13553, 13580), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (13562, 13580), True, 'import numpy as np\n'), ((14292, 14363), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (14307, 14363), False, 'from src.utils.io import save_cv_results\n'), ((14493, 14505), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14501, 14505), True, 'import numpy as np\n'), ((14751, 14773), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (14764, 14773), True, 'import scipy.sparse as sp\n'), ((14775, 14795), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (14788, 14795), True, 'import scipy.sparse as sp\n'), ((14863, 14958), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (14872, 14958), False, 'from src.model.linear_svm import LinearSVM\n'), ((15437, 15449), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15445, 15449), True, 'import numpy as np\n'), ((15581, 15593), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15589, 15593), True, 'import numpy as np\n'), ((16034, 16061), 'numpy.vstack', 'np.vstack', (['(X_train, X_dev)'], {}), '((X_train, X_dev))\n', (16043, 16061), True, 'import numpy as np\n'), ((16078, 16105), 'numpy.hstack', 'np.hstack', (['(y_train, y_dev)'], {}), '((y_train, y_dev))\n', (16087, 16105), True, 'import numpy as np\n'), ((16813, 16884), 'src.utils.io.save_cv_results', 'save_cv_results', (['cv_res', 'self.model_name', 'self.feature_name', '"""baseline"""'], {}), "(cv_res, self.model_name, self.feature_name, 'baseline')\n", (16828, 16884), False, 'from src.utils.io import save_cv_results\n'), ((17279, 17301), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_train'], {}), '(X_train)\n', (17292, 17301), True, 'import scipy.sparse as sp\n'), ((17303, 17323), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_dev'], {}), '(X_dev)\n', (17316, 17323), True, 'import scipy.sparse as sp\n'), ((17393, 17488), 'src.model.linear_svm.LinearSVM', 'LinearSVM', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True,\n test=self.test)\n', (17402, 17488), False, 'from src.model.linear_svm import LinearSVM\n'), ((3000, 3017), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (3008, 3017), True, 'import numpy as np\n'), ((3019, 3034), 'numpy.ravel', 'np.ravel', (['y_dev'], {}), '(y_dev)\n', (3027, 3034), True, 'import numpy as np\n'), ((3070, 3090), 'numpy.ravel', 'np.ravel', (['train_inst'], {}), '(train_inst)\n', (3078, 3090), True, 'import numpy as np\n'), ((3092, 3110), 'numpy.ravel', 'np.ravel', (['dev_inst'], {}), '(dev_inst)\n', (3100, 3110), True, 'import numpy as np\n'), ((3640, 3739), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (3652, 3739), False, 'from src.model.random_forest import RandomForest\n'), ((3860, 3974), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': '(True)'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=True)\n", (3867, 3974), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((4836, 4935), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (4848, 4935), False, 'from src.model.random_forest import RandomForest\n'), ((5382, 5394), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5390, 5394), True, 'import numpy as np\n'), ((6197, 6296), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (6209, 6296), False, 'from src.model.random_forest import RandomForest\n'), ((6417, 6531), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': '(True)'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=True)\n", (6424, 6531), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((7413, 7512), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (7425, 7512), False, 'from src.model.random_forest import RandomForest\n'), ((7965, 7977), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7973, 7977), True, 'import numpy as np\n'), ((8784, 8883), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (8796, 8883), False, 'from src.model.random_forest import RandomForest\n'), ((9004, 9118), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': '(True)'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=True)\n", (9011, 9118), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((9980, 10079), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (9992, 10079), False, 'from src.model.random_forest import RandomForest\n'), ((10526, 10538), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10534, 10538), True, 'import numpy as np\n'), ((10949, 10966), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (10957, 10966), True, 'import numpy as np\n'), ((10968, 10983), 'numpy.ravel', 'np.ravel', (['y_dev'], {}), '(y_dev)\n', (10976, 10983), True, 'import numpy as np\n'), ((11394, 11493), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (11406, 11493), False, 'from src.model.random_forest import RandomForest\n'), ((11614, 11728), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': '(True)'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=True)\n", (11621, 11728), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((12590, 12689), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (12602, 12689), False, 'from src.model.random_forest import RandomForest\n'), ((13136, 13148), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13144, 13148), True, 'import numpy as np\n'), ((13902, 14001), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (13914, 14001), False, 'from src.model.random_forest import RandomForest\n'), ((15155, 15254), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (15167, 15254), False, 'from src.model.random_forest import RandomForest\n'), ((16427, 16526), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (16439, 16526), False, 'from src.model.random_forest import RandomForest\n'), ((16647, 16761), 'src.metric.uar.get_UAR', 'get_UAR', (['y_pred_dev', 'y_dev', 'dev_inst', 'self.model_name', 'self.feature_name', '"""baseline"""'], {'baseline': '(True)', 'test': '(True)'}), "(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name,\n 'baseline', baseline=True, test=True)\n", (16654, 16761), False, 'from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR\n'), ((17631, 17730), 'src.model.random_forest.RandomForest', 'RandomForest', (['self.feature_name', 'X_train', 'y_train', 'X_dev', 'y_dev'], {'baseline': '(True)', 'test': 'self.test'}), '(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=\n True, test=self.test)\n', (17643, 17730), False, 'from src.model.random_forest import RandomForest\n'), ((18177, 18189), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18185, 18189), True, 'import numpy as np\n'), ((14149, 14161), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14157, 14161), True, 'import numpy as np\n')] |
import logging
from pathlib import Path
from typing import List, Optional
import jax
import numpy as np
import wandb
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.loggers.base import DummyLogger
from tqdm import tqdm
from fourierflow.callbacks import Callback
from .jax_callback_hook import TrainerCallbackHookMixin
logger = logging.getLogger(__name__)
class JAXTrainer(TrainerCallbackHookMixin):
def __init__(
self,
max_epochs,
weights_save_path: Optional[str] = None,
enable_model_summary: bool = False,
resume_from_checkpoint=None,
limit_train_batches=None,
limit_val_batches=None,
callbacks: Optional[List[Callback]] = None,
logger: Optional[WandbLogger] = None,
seed: Optional[int] = None,
enable_checkpointing: bool = True,
plugins=None,
):
self.max_epochs = max_epochs
self.weights_save_path = weights_save_path
if weights_save_path:
self.weights_save_path = Path(weights_save_path)
self.limit_train_batches = limit_train_batches
self.limit_val_batches = limit_val_batches
self.callbacks = callbacks or []
self.current_epoch = -1
self.routine = None
self.seed = seed
self.logger = logger or DummyLogger()
self.global_step = -1
self.logs = {}
def tune(self, routine, datamodule):
pass
def fit(self, routine, datamodule):
self.routine = routine
params = routine.init(self.seed, datamodule)
opt_state = routine.optimizer.init(params)
step = jax.jit(routine.step)
self.on_train_start()
for epoch in range(self.max_epochs):
self.current_epoch += 1
self.on_train_epoch_start()
train_batches = iter(datamodule.train_dataloader())
with tqdm(train_batches, total=self.limit_train_batches, unit="batch") as tepoch:
for i, batch in enumerate(tepoch):
self.global_step += 1
self.on_train_batch_start(batch, i)
tepoch.set_description(f"Epoch {epoch}")
outputs = step(params, opt_state, batch)
params, opt_state, loss_value = outputs
routine.params = params
tepoch.set_postfix(loss=loss_value.item())
self.on_train_batch_end(outputs, batch, i)
if self.global_step % 100 == 0:
logs = {'loss': loss_value.item()}
self.logger.log_metrics(logs, step=self.global_step)
if self.limit_train_batches and i >= self.limit_train_batches:
break
self.on_train_epoch_start()
self.on_validation_epoch_start()
validate_batches = iter(datamodule.val_dataloader())
valid_outs = []
for i, batch in tqdm(enumerate(validate_batches), total=self.limit_val_batches):
self.on_validation_batch_start(batch, i, 0)
outputs = routine.valid_step(params, batch)
valid_outs.append(outputs)
self.on_validation_batch_end(outputs, batch, i, 0)
if self.limit_val_batches and i >= self.limit_val_batches:
break
valid_logs = routine.validation_epoch_end(valid_outs)
valid_scalars = {k: v for k, v in valid_logs.items()
if np.isscalar(v)}
self.logger.log_metrics(valid_scalars, step=self.global_step)
self.logs = valid_logs
self.on_validation_epoch_end()
self.on_train_end()
def test(self, routine, datamodule):
self.on_test_epoch_start()
test_batches = iter(datamodule.test_dataloader())
test_outs = []
for i, batch in tqdm(enumerate(test_batches)):
self.on_test_batch_start(batch, i, 0)
outputs = routine.valid_step(routine.params, **batch)
test_outs.append(outputs)
self.on_test_batch_end(outputs, batch, i, 0)
test_logs = routine.test_epoch_end(test_outs)
test_scalars = {k: v for k, v in test_logs.items() if np.isscalar(v)}
self.logger.log_metrics(test_scalars)
if 'test_correlations' in test_logs:
corr_rows = list(zip(test_logs['test_times'],
test_logs['test_correlations']))
self.logger.experiment.log({
'test_correlations': wandb.Table(['time', 'corr'], corr_rows)
})
self.on_test_epoch_end()
| [
"logging.getLogger",
"wandb.Table",
"numpy.isscalar",
"pathlib.Path",
"tqdm.tqdm",
"jax.jit",
"pytorch_lightning.loggers.base.DummyLogger"
] | [((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((1643, 1664), 'jax.jit', 'jax.jit', (['routine.step'], {}), '(routine.step)\n', (1650, 1664), False, 'import jax\n'), ((1040, 1063), 'pathlib.Path', 'Path', (['weights_save_path'], {}), '(weights_save_path)\n', (1044, 1063), False, 'from pathlib import Path\n'), ((1328, 1341), 'pytorch_lightning.loggers.base.DummyLogger', 'DummyLogger', ([], {}), '()\n', (1339, 1341), False, 'from pytorch_lightning.loggers.base import DummyLogger\n'), ((1897, 1962), 'tqdm.tqdm', 'tqdm', (['train_batches'], {'total': 'self.limit_train_batches', 'unit': '"""batch"""'}), "(train_batches, total=self.limit_train_batches, unit='batch')\n", (1901, 1962), False, 'from tqdm import tqdm\n'), ((4287, 4301), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (4298, 4301), True, 'import numpy as np\n'), ((3548, 3562), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (3559, 3562), True, 'import numpy as np\n'), ((4597, 4637), 'wandb.Table', 'wandb.Table', (["['time', 'corr']", 'corr_rows'], {}), "(['time', 'corr'], corr_rows)\n", (4608, 4637), False, 'import wandb\n')] |
#!/usr/bin/env python3
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from conban_spanet.dataset_driver import DatasetDriver
from .utils import test_oracle
from bite_selection_package.config import spanet_config as config
N_FEATURES = 2048 if config.n_features==None else config.n_features
class Environment(object):
def __init__(self, N, d=N_FEATURES):
self.N = N
self.features = np.ones((N, d+1))
self.driver = DatasetDriver(N)
self.features[:, 1:] = self.driver.get_features()
def run(self, algo, T, time, time_prev):
N = self.N
costs_algo = []
costs_spanet = []
pi_star_choice_hist = []
pi_choice_hist = []
N = algo.N
K = algo.K
num_failures = 0
MAX_FAILURES = 1
expected_srs = []
loss_list = []
# X_to_test = [[] for i in range(K)]
# y_to_test = [[] for i in range(K)]
# Run algorithm for T time steps
for t in range(T):
# Feb 22: We do not care about mean expected loss anymore
# exp_loss = algo.expected_loss(self.driver)
# loss_list.append(exp_loss)
# expected_srs.append(1.0 - exp_loss)
if t % 10 == 0:
time_now = time.time()
print("Now at horzion", t, " Time taken is ", time_now - time_prev)
time_prev = time_now
# Exploration / Exploitation
p_t = algo.explore(self.features)
# Sample Action
_, K = p_t.shape
p_t_flat = p_t.reshape((-1,))
sample_idx = np.random.choice(N*K, p = p_t_flat)
n_t, a_t = sample_idx // K, sample_idx % K
# Get Costs
costs = self.driver.sample_loss_vector()
pi_star = int(self.driver.get_pi_star()[n_t][0])
cost_SPANet = costs[n_t, pi_star]
cost_algo = costs[n_t, a_t]
pi_star_choice_hist.append(pi_star)
pi_choice_hist.append(a_t)
if t % 10 == 0:
#
# # Getting expectd loss of algorithm
# print("Expected loss is : " + str(exp_loss))
print("cumulative loss is :"+str(np.sum(cost_algo)))
time_now = time.time()
print("Time Taken: ", time_now - time_prev)
time_prev = time_now
# Learning
algo.learn(self.features, n_t, a_t, cost_algo, p_t)
#for a in range(6):
# algo.learn(self.features, n_t, a, costs[n_t, a], np.ones(p_t.shape))
# Record costs for future use
costs_algo.append(cost_algo)
costs_spanet.append(cost_SPANet)
# Replace successfully acquired food item
# Or give up after some amount of time.
"""
if (cost_algo == 1):
num_failures += 1
if num_failures >= MAX_FAILURES:
cost_algo = 0
if (cost_algo == 0):
num_failures = 0
if not self.driver.resample(n_t):
print("Exhausted all food items!")
break
self.features[:, 1:] = self.driver.get_features()
"""
if not self.driver.resample(n_t):
print("Exhausted all food items!")
break
self.features[:, 1:] = self.driver.get_features()
# Getting expected loss of algorithm
print("Calculating expected loss of algo...")
# exp_loss = algo.expected_loss(self.driver)
print("Expected Loss: " + str(exp_loss))
# expected_srs.append(1.0 - exp_loss)
time_now = time.time()
print("Time Taken: ", time_now - time_prev)
time_prev = time_now
pi_star_loss =self.driver.pi_star_loss
return (costs_algo, costs_spanet,pi_star_choice_hist,pi_choice_hist,expected_srs,loss_list, pi_star_loss)
| [
"numpy.random.choice",
"numpy.sum",
"numpy.ones",
"conban_spanet.dataset_driver.DatasetDriver"
] | [((504, 523), 'numpy.ones', 'np.ones', (['(N, d + 1)'], {}), '((N, d + 1))\n', (511, 523), True, 'import numpy as np\n'), ((545, 561), 'conban_spanet.dataset_driver.DatasetDriver', 'DatasetDriver', (['N'], {}), '(N)\n', (558, 561), False, 'from conban_spanet.dataset_driver import DatasetDriver\n'), ((1776, 1811), 'numpy.random.choice', 'np.random.choice', (['(N * K)'], {'p': 'p_t_flat'}), '(N * K, p=p_t_flat)\n', (1792, 1811), True, 'import numpy as np\n'), ((2410, 2427), 'numpy.sum', 'np.sum', (['cost_algo'], {}), '(cost_algo)\n', (2416, 2427), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import os
def read_data():
# set path to raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path,'train.csv')
test_file_path = os.path.join(raw_data_path,'test.csv')
# read data with default parameters
train_df = pd.read_csv(train_file_path,index_col='PassengerId')
test_df = pd.read_csv(test_file_path,index_col='PassengerId')
test_df['Survived'] = -888 # Survived in test data
df = pd.concat((train_df,test_df),axis=0)
return df
def process_data(df):
return (df
.assign(Title = lambda x : x.Name.map(getTitle)) # x indicates a complete dataframe
.pipe(fill_missing_values)
.assign(Fare_category = lambda x : pd.qcut(x.Fare,4,labels=['very_low','low','high','very_high']))
.assign(AgeState = lambda x : np.where(x.Age>=18,'Adult','Child'))
.assign(FamilySize = lambda x : x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch>0) & (x.Age>18) & (x.Title != 'Miss')),1,0))
.assign(Cabin = lambda x : np.where(x.Cabin == 'T',np.nan,x.Cabin))
.assign(Deck = lambda x : x.Cabin.map(get_deck))
.assign(IsMale = lambda x : np.where(x.Sex == 'male',1,0))
.pipe(pd.get_dummies,columns = ['Deck','Pclass','Title','Fare_category','Embarked','AgeState'])
.drop(['Cabin','Name','Parch','Sex','SibSp','Ticket'],axis=1)
.pipe(reorder_columns)
)
def getTitle(name):
title_group = {
'mr' : 'Mr',
'mrs' : 'Mrs',
'miss': 'Miss',
'master' : 'Master',
'don' : 'Sir',
'rev' : 'Sir',
'dr' : 'Officer',
'mme' : 'Mrs',
'ms' : 'Mrs',
'major' : 'Officer',
'lady' : 'Lady',
'sir' : 'Sir',
'mlle' : 'Miss',
'col' : 'Officer',
'capt': 'Officer',
'the countess' : 'Lady',
'jonkheer' : 'Sir',
'dona' : 'Lady'
}
first_name = name.split(',')[1] # split wrt comma and extract the string from Mr or Mrs
title = first_name.split('.')[0] # obtain Mr or Mrs
title = title.strip().lower() # strip out white spaces
return title_group[title]
def fill_missing_values(df):
df.Embarked.fillna('C',inplace=True)
median_fare = df.loc[(df.Pclass==3) & (df.Embarked=='S'),'Fare'].median()
df.Fare.fillna(median_fare,inplace=True)
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median,inplace=True)
return df
def get_deck(cabin):
return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'z')
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path,'train.csv')
write_test_path = os.path.join(processed_data_path,'test.csv')
df.loc[df.Survived != -888].to_csv(write_train_path) # train data
columns = [column for column in df.columns if column != 'Survived'] # test data
df.loc[df.Survived == -888,columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df) | [
"pandas.read_csv",
"pandas.qcut",
"numpy.where",
"os.path.join",
"pandas.notnull",
"pandas.concat"
] | [((114, 157), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', '"""raw"""'], {}), "(os.path.pardir, 'data', 'raw')\n", (126, 157), False, 'import os\n'), ((178, 218), 'os.path.join', 'os.path.join', (['raw_data_path', '"""train.csv"""'], {}), "(raw_data_path, 'train.csv')\n", (190, 218), False, 'import os\n'), ((239, 278), 'os.path.join', 'os.path.join', (['raw_data_path', '"""test.csv"""'], {}), "(raw_data_path, 'test.csv')\n", (251, 278), False, 'import os\n'), ((333, 386), 'pandas.read_csv', 'pd.read_csv', (['train_file_path'], {'index_col': '"""PassengerId"""'}), "(train_file_path, index_col='PassengerId')\n", (344, 386), True, 'import pandas as pd\n'), ((400, 452), 'pandas.read_csv', 'pd.read_csv', (['test_file_path'], {'index_col': '"""PassengerId"""'}), "(test_file_path, index_col='PassengerId')\n", (411, 452), True, 'import pandas as pd\n'), ((516, 554), 'pandas.concat', 'pd.concat', (['(train_df, test_df)'], {'axis': '(0)'}), '((train_df, test_df), axis=0)\n', (525, 554), True, 'import pandas as pd\n'), ((2923, 2972), 'os.path.join', 'os.path.join', (['os.path.pardir', '"""data"""', '"""processed"""'], {}), "(os.path.pardir, 'data', 'processed')\n", (2935, 2972), False, 'import os\n'), ((2994, 3040), 'os.path.join', 'os.path.join', (['processed_data_path', '"""train.csv"""'], {}), "(processed_data_path, 'train.csv')\n", (3006, 3040), False, 'import os\n'), ((3062, 3107), 'os.path.join', 'os.path.join', (['processed_data_path', '"""test.csv"""'], {}), "(processed_data_path, 'test.csv')\n", (3074, 3107), False, 'import os\n'), ((2661, 2678), 'pandas.notnull', 'pd.notnull', (['cabin'], {}), '(cabin)\n', (2671, 2678), True, 'import pandas as pd\n'), ((1297, 1328), 'numpy.where', 'np.where', (["(x.Sex == 'male')", '(1)', '(0)'], {}), "(x.Sex == 'male', 1, 0)\n", (1305, 1328), True, 'import numpy as np\n'), ((1157, 1198), 'numpy.where', 'np.where', (["(x.Cabin == 'T')", 'np.nan', 'x.Cabin'], {}), "(x.Cabin == 'T', np.nan, x.Cabin)\n", (1165, 1198), True, 'import numpy as np\n'), ((1033, 1125), 'numpy.where', 'np.where', (["((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss'))", '(1)', '(0)'], {}), "((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title !=\n 'Miss'), 1, 0)\n", (1041, 1125), True, 'import numpy as np\n'), ((889, 928), 'numpy.where', 'np.where', (['(x.Age >= 18)', '"""Adult"""', '"""Child"""'], {}), "(x.Age >= 18, 'Adult', 'Child')\n", (897, 928), True, 'import numpy as np\n'), ((784, 851), 'pandas.qcut', 'pd.qcut', (['x.Fare', '(4)'], {'labels': "['very_low', 'low', 'high', 'very_high']"}), "(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high'])\n", (791, 851), True, 'import pandas as pd\n')] |
from functools import wraps
from typing import List, Callable
import numpy as np
LETTER_SIGNATURE = "fruits_letter"
LETTER_NAME = "fruits_name"
BOUND_LETTER_TYPE = Callable[[np.ndarray, int], np.ndarray]
FREE_LETTER_TYPE = Callable[[int], BOUND_LETTER_TYPE]
class ExtendedLetter:
"""Class for an extended letter used in words.
A :class:`~fruits.words.word.Word` consists of a number of
extended letters.
An extended letter is a container that only allows appending
functions that were decorated with
:meth:`~fruits.words.letters.letter`.
:param letter_string: A string like ``f1(i)f2(j)f3(k)``, where
``f1,f2,f3`` are the names of decorated letters and ``i,j,k``
are integers representing dimensions. For available letters call
:meth:`fruits.words.letters.get_available`.
:type letter_string: str
"""
def __init__(self, letter_string: str = ""):
self._letters: List[FREE_LETTER_TYPE] = []
self._dimensions: List[int] = []
self._string_repr = ""
self.append_from_string(letter_string)
def append(self, letter: FREE_LETTER_TYPE, dim: int = 0):
"""Appends a letter to the ExtendedLetter object.
:param letter: Function that was decorated with
:meth:`~fruits.words.letters.letter`.
:type letter: callable
:param int: Dimension of the letter that is going to be used as
its second argument, if it has one., defaults to 0
:type dim: int, optional
"""
if not callable(letter):
raise TypeError("Argument letter has to be a callable function")
elif not _letter_configured(letter):
raise TypeError("Letter has the wrong signature. Perhaps it " +
"wasn't decorated correctly?")
else:
self._letters.append(letter)
self._dimensions.append(dim)
self._string_repr += letter.__dict__[LETTER_NAME]
self._string_repr += "(" + str(dim+1) + ")"
def append_from_string(self, letter_string: str):
letters = letter_string.split(")")[:-1]
for letter in letters:
l, d = letter.split("(")
self.append(_get(l), int(d)-1)
def copy(self) -> "ExtendedLetter":
"""Returns a copy of this extended letter.
:rtype: ExtendedLetter
"""
el = ExtendedLetter()
el._letters = self._letters.copy()
el._dimensions = self._dimensions.copy()
el._string_repr = self._string_repr
return el
def __iter__(self):
self._iter_index = -1
return self
def __next__(self):
if self._iter_index < len(self._letters)-1:
self._iter_index += 1
return self._letters[self._iter_index](
self._dimensions[self._iter_index])
raise StopIteration()
def __len__(self) -> int:
return len(self._letters)
def __getitem__(self, i: int) -> BOUND_LETTER_TYPE:
return self._letters[i](self._dimensions[i])
def __str__(self) -> str:
return "["+self._string_repr+"]"
def __repr__(self):
return "fruits.words.letters.ExtendedLetter"
def letter(*args, name: str = None):
"""Decorator for the implementation of a letter appendable to an
:class:`~fruits.words.letters.ExtendedLetter` object.
It is possible to implement a new letter by using this decorator.
This callable (e.g. called ``myletter``) has to have two arguments:
``X: np.ndarray`` and ``i: int``, where ``X`` is a multidimensional
time series and ``i`` is the dimension index that can
(but doesn't need to) be used in the decorated function.
The function has to return a numpy array. ``X`` has exactly two
dimensions and the returned array has one dimension.
.. code-block:: python
@fruits.words.letter(name="ReLU")
def myletter(X: np.ndarray, i: int) -> np.ndarray:
return X[i, :] * (X[i, :]>0)
It is also possible to use this decorator without any arguments:
.. code-block:: python
@fruits.words.letter
Available predefined letters are:
- ``simple``: Extracts a single dimension
- ``absolute``: Extracts the absolute value of a single dim.
:param name: You can supply a name to the function. This name will
be used for documentation in an ``ExtendedLetter`` object. If
no name is supplied, then the name of the function is used.
Each letter has to have a unique name., defaults to None
:type name: str, optional
"""
if name is not None and not isinstance(name, str):
raise TypeError("Unknown argument type for name")
if len(args) > 1:
raise RuntimeError("Too many arguments")
if name is None and len(args) == 1 and callable(args[0]):
_configure_letter(args[0], args[0].__name__)
@wraps(args[0])
def wrapper(i: int):
def index_manipulation(X: np.ndarray):
return args[0](X, i)
return index_manipulation
_log(args[0].__name__, wrapper)
return wrapper
else:
if name is None and len(args) > 0:
if not isinstance(args[0], str):
raise TypeError("Unknown argument type")
name = args[0]
def letter_decorator(func):
_configure_letter(func, name=name)
@wraps(func)
def wrapper(i: int):
def index_manipulation(X: np.ndarray):
return func(X, i)
return index_manipulation
_log(name, wrapper)
return wrapper
return letter_decorator
_AVAILABLE = dict()
def _log(name: str, func: FREE_LETTER_TYPE):
if name in _AVAILABLE:
raise RuntimeError(f"Letter with name '{name}' already exists")
_AVAILABLE[name] = func
def _get(name: str) -> FREE_LETTER_TYPE:
# returns the corresponding letter for the given name
if name not in _AVAILABLE:
raise RuntimeError(f"Letter with name '{name}' does not exist")
return _AVAILABLE[name]
def get_available() -> List[str]:
"""Returns a list of all available letter names to use in a
:class:`~fruits.words.letters.ExtendedLetter`.
:rtype: List[str]
"""
return list(_AVAILABLE.keys())
def _configure_letter(func: BOUND_LETTER_TYPE, name: str):
# marks the input callable as a letter
if func.__code__.co_argcount != 2:
raise RuntimeError("Wrong number of arguments at decorated function " +
str(func.__name__) + ". Should be 2.")
func.__dict__[LETTER_SIGNATURE] = "letter"
func.__dict__[LETTER_NAME] = name
def _letter_configured(func: FREE_LETTER_TYPE) -> bool:
# checks if the given callable is a letter
if (LETTER_SIGNATURE in func.__dict__
and LETTER_NAME in func.__dict__
and func.__dict__[LETTER_NAME] in _AVAILABLE):
return True
return False
@letter(name="SIMPLE")
def simple(X: np.ndarray, i: int) -> np.ndarray:
return X[i, :]
@letter(name="ABS")
def absolute(X: np.ndarray, i: int) -> np.ndarray:
return np.abs(X[i, :])
| [
"numpy.abs",
"functools.wraps"
] | [((7192, 7207), 'numpy.abs', 'np.abs', (['X[i, :]'], {}), '(X[i, :])\n', (7198, 7207), True, 'import numpy as np\n'), ((4922, 4936), 'functools.wraps', 'wraps', (['args[0]'], {}), '(args[0])\n', (4927, 4936), False, 'from functools import wraps\n'), ((5436, 5447), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (5441, 5447), False, 'from functools import wraps\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
# In[2]:
## read data:
import pandas as pd
folder_names = sorted(os.listdir("../Data/ILSVRC/Data/CLS-LOC/train/"))
folder_names =sorted([i for i in folder_names if "n" in i])
print(len(folder_names))
# match class based on alphabet:
label_to_index = dict((name, index) for index, name in enumerate(folder_names))
# In[150]:
### We can select the number of samples we train
# There are 1 million images and we can take 5% percent, which is 50k
N_train = 20000
train_csv = pd.read_csv("../Data/LOC_train_solution.csv")
# In[7]:
## name and synset match
file_synset = open("../Data/LOC_synset_mapping.txt")
synset_match = pd.DataFrame(columns=["Id","names"])
for f in file_synset:
temp=f.replace("\n","").replace(",","").split(" ")
# print(temp)
s2 = pd.DataFrame(np.atleast_2d([temp[0],temp.pop()]),columns=["Id","names"])
synset_match = pd.concat([synset_match, s2], ignore_index=True)
# In[8]:
# read data and box:
data_path = "../Data/ILSVRC/Data/CLS-LOC/train/"
data = pd.DataFrame()
image_path = []
class_all = []
boxes = []
for i in range(N_train):
temp = data_path+train_csv["ImageId"][i].split("_")[0]+"/"
image_path.append(temp+train_csv["ImageId"][i]+".JPEG")
class_all.append(train_csv["ImageId"][i].split("_")[0])
# box from training csv
temp = [i for i in train_csv["PredictionString"][i].split(" ") if i.isdigit()]
boxes.append(temp)
class_name_array =class_all
class_id = np.array([label_to_index[i] for i in class_all])
image_path = np.array(image_path)
# In[9]:
# save txt
f_script= open("dataset_20k.txt","w+")
for i in range(N_train):
line = image_path[i]
line+=" "
#print(boxes[i])
temp_i = boxes[i]
for j in range(int(len(boxes[i])/4)):
line=line+"{},{},{},{},{}".format(temp_i[4*j],temp_i[4*j+1],temp_i[4*j+2],temp_i[4*j+3],class_id[i])
line+= " "
line+="\n"
#print(line)
f_script.write(line)
# In[10]:
# In[84]:
class_all
# In[25]:
f_script= open("classes.txt","w+")
for i in range(len(folder_names)):
line=folder_names[i]
line+="\n"
f_script.write(line)
# In[26]:
print(len(class_name_array))
# In[18]:
print(len(folder_names))
# In[29]:
with open("classes.txt") as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
# In[31]:
print(len(class_names))
# In[28]:
print(len(folder_names))
# In[6]:
# In[ ]:
| [
"os.listdir",
"pandas.read_csv",
"numpy.array",
"pandas.DataFrame",
"pandas.concat"
] | [((823, 868), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/LOC_train_solution.csv"""'], {}), "('../Data/LOC_train_solution.csv')\n", (834, 868), True, 'import pandas as pd\n'), ((976, 1013), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Id', 'names']"}), "(columns=['Id', 'names'])\n", (988, 1013), True, 'import pandas as pd\n'), ((1355, 1369), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1367, 1369), True, 'import pandas as pd\n'), ((1807, 1855), 'numpy.array', 'np.array', (['[label_to_index[i] for i in class_all]'], {}), '([label_to_index[i] for i in class_all])\n', (1815, 1855), True, 'import numpy as np\n'), ((1871, 1891), 'numpy.array', 'np.array', (['image_path'], {}), '(image_path)\n', (1879, 1891), True, 'import numpy as np\n'), ((410, 458), 'os.listdir', 'os.listdir', (['"""../Data/ILSVRC/Data/CLS-LOC/train/"""'], {}), "('../Data/ILSVRC/Data/CLS-LOC/train/')\n", (420, 458), False, 'import os\n'), ((1210, 1258), 'pandas.concat', 'pd.concat', (['[synset_match, s2]'], {'ignore_index': '(True)'}), '([synset_match, s2], ignore_index=True)\n', (1219, 1258), True, 'import pandas as pd\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, <NAME>
Date created: 12/8/2016 (original)
Date last modified: 05/25/2018
"""
__version__ = "1.3"
from time import sleep,time
from logging import debug,info,warn,error
import logging
from thread import start_new_thread
import traceback
import psutil, os
import platform #https://stackoverflow.com/questions/110362/how-can-i-find-the-current-os-in-python
p = psutil.Process(os.getpid()) #source: https://psutil.readthedocs.io/en/release-2.2.1/
# psutil.ABOVE_NORMAL_PRIORITY_CLASS
# psutil.BELOW_NORMAL_PRIORITY_CLASS
# psutil.HIGH_PRIORITY_CLASS
# psutil.IDLE_PRIORITY_CLASS
# psutil.NORMAL_PRIORITY_CLASS
# psutil.REALTIME_PRIORITYsindows':
if platform.system() == 'Windows':
p.nice(psutil.ABOVE_NORMAL_PRIORITY_CLASS)
elif platform.system() == 'Linux': #linux FIXIT
p.nice(-10) # nice runs from -20 to +12, where -20 the most not nice code(highest priority)
from numpy import nan, mean, std, nanstd, asfarray, asarray, hstack, array, concatenate, delete, round, vstack, hstack, zeros, transpose, split, unique, nonzero, take, savetxt, min, max
from time import time, sleep, clock
import sys
import os.path
import struct
from pdb import pm
from time import gmtime, strftime, time
from logging import debug,info,warn,error
###These are Friedrich's libraries.
###The number3 in the end shows that it is competable with the Python version 3.
###However, some of them were never well tested.
if sys.version_info[0] ==3:
from persistent_property3 import persistent_property
from DB3 import dbput, dbget
from module_dir3 import module_dir
from normpath3 import normpath
else:
from persistent_property import persistent_property
from DB import dbput, dbget
from module_dir import module_dir
from normpath import normpath
from struct import pack, unpack
from timeit import Timer, timeit
import sys
###In Python 3 the thread library was renamed to _thread
if sys.version_info[0] ==3:
from _thread import start_new_thread
else:
from thread import start_new_thread
from datetime import datetime
from precision_sleep import precision_sleep #home-built module for accurate sleep
import msgpack
import msgpack_numpy as m
import socket
import platform
server_name = platform.node()
class server_LL(object):
def __init__(self, name = ''):
"""
to initialize an instance and create main variables
"""
if len(name) == 0:
self.name = 'test_communication_LL'
else:
self.name = name
self.running = False
self.network_speed = 12**6 # bytes per second
self.client_lst = []
def init_server(self):
'''
Proper sequence of socket server initialization
'''
self._set_commands()
self.sock = self.init_socket()
if self.sock is not None:
self.running = True
else:
self.running = False
self._start()
def stop(self):
self.running = False
self.sock.close()
def init_socket(self):
'''
initializes socket for listening, creates sock and bind to '' with a port somewhere between 2030 and 2050
'''
import socket
ports = range(2030,2050)
for port in ports:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', port))
self.port = port
sock.listen(100)
flag = True
except:
error(traceback.format_exc())
flag = False
if flag:
break
else:
sock = None
return sock
def _set_commands(self):
"""
Set type definition, the dictionary of command excepted by the server
Standard supported commands:
- "help"
- "init"
- "close"
- "abort"
- "snapshot"
- "subscribe"
- "task"
"""
self.commands = {}
self.commands['help'] = 'help'
self.commands['init'] = 'init'
self.commands['close'] = 'close'
self.commands['abort'] = 'abort'
self.commands['snapshot'] = 'snapshot'
self.commands['subscribe'] = 'subscribe'
self.commands['task'] = 'task'
def _get_commands(self):
"""
returns the dictionary with all supported commands
"""
return self.commands
def _start(self):
'''
creates a separete thread for server_thread function
'''
start_new_thread(self._run,())
def _run(self):
"""
runs the function _run_once in a while True loop
"""
self.running = True
while self.running:
self._run_once()
self.running = False
def _run_once(self):
"""
creates a listening socket.
"""
client, addr = self.sock.accept()
debug('Client has connected: %r,%r' %(client,addr))
self._log_last_call(client, addr)
try:
msg_in = self._receive(client)
except:
error(traceback.format_exc())
msg_in = {b'command':b'unknown',b'message':b'unknown'}
msg_out = self._receive_handler(msg_in,client)
self._send(client,msg_out)
def _transmit_handler(self,command = '', message = ''):
from time import time
res_dic = {}
res_dic[b'command'] = command
res_dic[b'time'] = time()
res_dic[b'message'] = message
return res_dic
def _receive_handler(self,msg_in,client):
"""
the incoming msg_in has N mandatory fields: command, message and time
"""
from time import time
res_dic = {}
#the input msg has to be a dictionary. If not, ignore. FIXIT. I don't know how to do it in Python3
debug('command received: %r' % msg_in)
try:
keys = msg_in.keys()
command = msg_in[b'command']
res_dic['command'] = command
flag = True
if command == b'help':
res_dic['message'] = self.help()
elif command == b'init':
res_dic['message'] = self.dev_init()
elif command == b'close':
res_dic['message'] = self.dev_close()
elif command == b'abort':
res_dic['message'] = self.dev_abort()
elif command == b'snapshot':
res_dic['message'] = self.dev_snapshot()
elif command == b'subscribe':
try:
port = message['port']
err = ''
except:
err = traceback.format_exc()
if len(err) ==0:
res_dic['message'] = self.subscribe(client,port)
else:
res_dic['message'] = 'server needs port number to subscribe'
elif command == b'task':
print(msg_in)
if b'message' in msg_in.keys():
res_dic['message'] = self.task(msg_in[b'message'])
else:
flag = False
err = 'task command does not have message key'
else:
flag = False
err = 'the command %r is not supporte by the server' % command
if not flag:
debug('command is not recognized')
res_dic['command'] = 'unknown'
res_dic['message'] = 'The quote of the day: ... . I hope you enjoyed it.'
res_dic['flag'] = flag
res_dic['error'] = err
else:
res_dic['flag'] = flag
res_dic['error'] = ''
except:
error(traceback.format_exc())
res_dic['command'] = 'unknown'
res_dic['message'] = 'The quote of the day: ... . I hope you enjoyed it.'
res_dic['flag'] = True
res_dic['error'] = ''
res_dic[b'time'] = time()
return res_dic
def _receive(self,client):
"""
descritpion:
client sends 20 bytes with a number of expected package size.
20 bytes will encode a number up to 10**20 bytes.
This will be enough for any possible size of the transfer
input:
client - socket client object
output:
unpacked data
"""
import msgpack
import msgpack_numpy as msg_m
a = client.recv(20)
length = int(a)
debug('initial length: %r' % length)
sleep(0.01)
if length != 0:
msg_in = ''.encode()
while len(msg_in) < length:
debug('length left (before): %r' % length)
msg_in += client.recv(length - len(msg_in))
debug('length left (after): %r' % length)
sleep(0.01)
else:
msg_in = ''
return msgpack.unpackb(msg_in, object_hook=msg_m.decode)
def _send(self,client,msg_out):
"""
descrition:
uses msgpack to serialize data and sends it to the client
"""
debug('command send %r' % msg_out)
msg = msgpack.packb(msg_out, default=m.encode)
length = str(len(msg))
if len(length)!=20:
length = '0'*(20-len(length)) + length
try:
client.sendall(length.encode())
client.sendall(msg)
flag = True
except:
error(traceback.format_exc())
flag = False
return flag
def _connect(self,ip_address,port):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.settimeout(10)
server.connect((ip_address , port))
server.settimeout(None)
debug("Connection success!")
except:
error('%r' %(traceback.format_exc()))
server = None
return server
def _transmit(self,command = '', message = '' ,ip_address = '127.0.0.1',port = 2031):
msg_out = self._transmit_handler(command = command, message = message)
server = self._connect(ip_address = ip_address,port = port)
flag = self._send(server,msg_out)
self.response_arg = self._receive(server)
self._server_close(server)
return self.response_arg
def _server_close(self,server):
server.close()
return server
def _log_last_call(self,client,addr):
self.last_call = [addr,client.getpeername()]
#***************************************************
#*** wrappers for basic response functions *********
#***************************************************
def subscribe(self,port,client):
self.subscribe_lst = [client.getpeername()[0],port]
msg = 'subscribe command received' + str(self.subscribe_lst)
debug(msg)
return msg
def help(self):
debug('help command received')
#***************************************************
#*** wrappers for basic response functions *********
#***************************************************
def help(self):
msg = {}
msg['server name']= self.name
msg['commands'] = self.commands
msg['dev_indicators'] = device.indicators.keys()
msg['dev_controls'] = device.controls.keys()
debug(msg)
return msg
def dev_init(self):
msg = 'init command received'
device.init()
debug(msg)
return msg
def dev_close(self):
msg = 'close command received'
debug(msg)
return msg
def dev_abort(self):
msg = 'abort command received'
debug(msg)
return msg
def dev_snapshot(self):
msg = 'snapshot command received'
debug(msg)
msg = device.snapshot()
return msg
def dev_task(self,msg):
msg = 'task command received: %r' % msg
debug(msg)
return msg
def dev_get_device_indicators(self, indicator = {}):
response = {}
if 'all' in indicator.keys():
response = device.indicators
else:
for key in indicator.keys():
if key in device.indicators.keys():
response[key] = device.indicators[key]
return response
def dev_set_device_indicators(self, control = {}):
for key in controll.keys():
if key in device.controlls.keys():
device.controlls[key] = controll[key]
def dev_get_device_controls(self, control = {}):
response = {}
if 'all' in control.keys():
response = device.controls
else:
for key in controll.keys():
if key in device.controls.keys():
response[key] = device.controls[key]
return response
def dev_set_device_controls(self, control = {}):
for key in control.keys():
if key in device.controls.keys():
device.controls[key] = control[key]
class client_LL(object):
def __init__(self, name = ''):
"""
to initialize an instance and create main variables
"""
if len(name) == 0:
self.name = 'test_client_LL'
else:
self.name = name
self.running = False
self.network_speed = 12**6 # bytes per second
self.client_lst = []
def init_server(self):
'''
Proper sequence of socket server initialization
'''
self._set_commands()
if self.sock is not None:
self.running = True
else:
self.running = False
def stop(self):
self.running = False
self.sock.close()
def _set_commands(self):
"""
Set type definition, the dictionary of command excepted by the server
Standard supported commands:
- "help"
- "init"
- "close"
- "abort"
- "snapshot"
- "subscribe"
- "task"
"""
self.commands = {}
self.commands['help'] = 'help'
self.commands['init'] = 'init'
self.commands['close'] = 'close'
self.commands['abort'] = 'abort'
self.commands['snapshot'] = 'snapshot'
self.commands['subscribe'] = 'subscribe'
self.commands['task'] = 'task'
def _get_commands(self):
"""
returns the dictionary with all supported commands
"""
return self.commands
def _transmit_handler(self,command = '', message = ''):
from time import time
res_dic = {}
res_dic[b'command'] = command
res_dic[b'time'] = time()
res_dic[b'message'] = message
return res_dic
def _receive(self,client):
"""
descritpion:
client sends 20 bytes with a number of expected package size.
20 bytes will encode a number up to 10**20 bytes.
This will be enough for any possible size of the transfer
input:
client - socket client object
output:
unpacked data
"""
import msgpack
import msgpack_numpy as msg_m
a = client.recv(20)
length = int(a)
sleep(0.01)
if length != 0:
msg_in = ''.encode()
while len(msg_in) < length:
msg_in += client.recv(length - len(msg_in))
sleep(0.01)
else:
msg_in = ''
return msgpack.unpackb(msg_in, object_hook=msg_m.decode)
def _send(self,client,msg_out):
"""
descrition:
uses msgpack to serialize data and sends it to the client
"""
debug('command send %r' % msg_out)
msg = msgpack.packb(msg_out, default=m.encode)
length = str(len(msg))
if len(length)!=20:
length = '0'*(20-len(length)) + length
try:
client.sendall(length.encode())
client.sendall(msg)
flag = True
except:
error(traceback.format_exc())
flag = False
return flag
def _connect(self,ip_address,port):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.settimeout(10)
server.connect((ip_address , port))
server.settimeout(None)
debug("Connection success!")
except:
error('%r' %(traceback.format_exc()))
server = None
return server
def _transmit(self,command = '', message = '' ,ip_address = '127.0.0.1',port = 2031):
msg_out = self._transmit_handler(command = command, message = message)
server = self._connect(ip_address = ip_address,port = port)
flag = self._send(server,msg_out)
self.response_arg = self._receive(server)
self._server_close(server)
return self.response_arg
def _server_close(self,server):
server.close()
return server
def _log_last_call(self,client,addr):
self.last_call = [addr,client.getpeername()]
#***************************************************
#*** wrappers for basic response functions *********
#***************************************************
def subscribe(self,port,client):
self.subscribe_lst = [client.getpeername()[0],port]
msg = 'subscribe command received' + str(self.subscribe_lst)
debug(msg)
return msg
class syringe_pump_DL(object):
def __init__(self):
self.name = 'syringe_pump_DL'
def init(self):
from cavro_centris_syringe_pump_LL import driver
driver.discover()
self.indicators = {}
self.controls = {}
self.indicators['positions'] = {}
self.indicators['valves'] = {}
def help(self):
debug('help command received')
def snapshot(self):
response = {}
response['indicators'] = self.indicators
response['controls'] = self.controls
from numpy import random
response['data'] = random.rand(2,100000)
return response
def update_indictors(self):
from cavro_centris_syringe_pump_LL import driver
self.indicators['positions'] = driver.positions(pids = [1,2,3,4])
self.indicators['valves'] = driver.valve_get(pids = [1,2,3,4])
def run_once(self):
from time import sleep
while True:
self.update_indictors()
sleep(1)
def run(self):
start_new_thread(self.run_once,())
server = server_LL(name = 'suringe_pump_server_DL')
client = client_LL(name = 'suringe_pump_client_DL')
from cavro_centris_syringe_pump_LL import driver
device = syringe_pump_DL()
server.init_server()
if __name__ == "__main__":
from tempfile import gettempdir
logging.basicConfig(#filename=gettempdir()+'/suringe_pump_DL.log',
level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s")
print('driver.discover()')
print('driver.prime(1)')
print('driver.prime(3)')
| [
"logging.basicConfig",
"traceback.format_exc",
"platform.node",
"logging.debug",
"socket.socket",
"numpy.random.rand",
"cavro_centris_syringe_pump_LL.driver.valve_get",
"msgpack.packb",
"time.sleep",
"cavro_centris_syringe_pump_LL.driver.discover",
"msgpack.unpackb",
"platform.system",
"os.g... | [((2291, 2306), 'platform.node', 'platform.node', ([], {}), '()\n', (2304, 2306), False, 'import platform\n'), ((455, 466), 'os.getpid', 'os.getpid', ([], {}), '()\n', (464, 466), False, 'import psutil, os\n'), ((727, 744), 'platform.system', 'platform.system', ([], {}), '()\n', (742, 744), False, 'import platform\n'), ((19081, 19175), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s: %(message)s')\n", (19100, 19175), False, 'import logging\n'), ((811, 828), 'platform.system', 'platform.system', ([], {}), '()\n', (826, 828), False, 'import platform\n'), ((4650, 4681), 'thread.start_new_thread', 'start_new_thread', (['self._run', '()'], {}), '(self._run, ())\n', (4666, 4681), False, 'from thread import start_new_thread\n'), ((5033, 5086), 'logging.debug', 'debug', (["('Client has connected: %r,%r' % (client, addr))"], {}), "('Client has connected: %r,%r' % (client, addr))\n", (5038, 5086), False, 'from logging import debug, info, warn, error\n'), ((5592, 5598), 'time.time', 'time', ([], {}), '()\n', (5596, 5598), False, 'from time import time\n'), ((5984, 6022), 'logging.debug', 'debug', (["('command received: %r' % msg_in)"], {}), "('command received: %r' % msg_in)\n", (5989, 6022), False, 'from logging import debug, info, warn, error\n'), ((8147, 8153), 'time.time', 'time', ([], {}), '()\n', (8151, 8153), False, 'from time import time\n'), ((8683, 8719), 'logging.debug', 'debug', (["('initial length: %r' % length)"], {}), "('initial length: %r' % length)\n", (8688, 8719), False, 'from logging import debug, info, warn, error\n'), ((8728, 8739), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (8733, 8739), False, 'from time import sleep\n'), ((9095, 9144), 'msgpack.unpackb', 'msgpack.unpackb', (['msg_in'], {'object_hook': 'msg_m.decode'}), '(msg_in, object_hook=msg_m.decode)\n', (9110, 9144), False, 'import msgpack\n'), ((9304, 9338), 'logging.debug', 'debug', (["('command send %r' % msg_out)"], {}), "('command send %r' % msg_out)\n", (9309, 9338), False, 'from logging import debug, info, warn, error\n'), ((9353, 9393), 'msgpack.packb', 'msgpack.packb', (['msg_out'], {'default': 'm.encode'}), '(msg_out, default=m.encode)\n', (9366, 9393), False, 'import msgpack\n'), ((9778, 9827), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (9791, 9827), False, 'import socket\n'), ((11034, 11044), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11039, 11044), False, 'from logging import debug, info, warn, error\n'), ((11093, 11123), 'logging.debug', 'debug', (['"""help command received"""'], {}), "('help command received')\n", (11098, 11123), False, 'from logging import debug, info, warn, error\n'), ((11518, 11528), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11523, 11528), False, 'from logging import debug, info, warn, error\n'), ((11645, 11655), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11650, 11655), False, 'from logging import debug, info, warn, error\n'), ((11752, 11762), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11757, 11762), False, 'from logging import debug, info, warn, error\n'), ((11855, 11865), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11860, 11865), False, 'from logging import debug, info, warn, error\n'), ((11964, 11974), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (11969, 11974), False, 'from logging import debug, info, warn, error\n'), ((12117, 12127), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (12122, 12127), False, 'from logging import debug, info, warn, error\n'), ((14866, 14872), 'time.time', 'time', ([], {}), '()\n', (14870, 14872), False, 'from time import time\n'), ((15453, 15464), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (15458, 15464), False, 'from time import sleep\n'), ((15703, 15752), 'msgpack.unpackb', 'msgpack.unpackb', (['msg_in'], {'object_hook': 'msg_m.decode'}), '(msg_in, object_hook=msg_m.decode)\n', (15718, 15752), False, 'import msgpack\n'), ((15912, 15946), 'logging.debug', 'debug', (["('command send %r' % msg_out)"], {}), "('command send %r' % msg_out)\n", (15917, 15946), False, 'from logging import debug, info, warn, error\n'), ((15961, 16001), 'msgpack.packb', 'msgpack.packb', (['msg_out'], {'default': 'm.encode'}), '(msg_out, default=m.encode)\n', (15974, 16001), False, 'import msgpack\n'), ((16386, 16435), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (16399, 16435), False, 'import socket\n'), ((17642, 17652), 'logging.debug', 'debug', (['msg'], {}), '(msg)\n', (17647, 17652), False, 'from logging import debug, info, warn, error\n'), ((17858, 17875), 'cavro_centris_syringe_pump_LL.driver.discover', 'driver.discover', ([], {}), '()\n', (17873, 17875), False, 'from cavro_centris_syringe_pump_LL import driver\n'), ((18043, 18073), 'logging.debug', 'debug', (['"""help command received"""'], {}), "('help command received')\n", (18048, 18073), False, 'from logging import debug, info, warn, error\n'), ((18275, 18297), 'numpy.random.rand', 'random.rand', (['(2)', '(100000)'], {}), '(2, 100000)\n', (18286, 18297), False, 'from numpy import random\n'), ((18458, 18493), 'cavro_centris_syringe_pump_LL.driver.positions', 'driver.positions', ([], {'pids': '[1, 2, 3, 4]'}), '(pids=[1, 2, 3, 4])\n', (18474, 18493), False, 'from cavro_centris_syringe_pump_LL import driver\n'), ((18529, 18564), 'cavro_centris_syringe_pump_LL.driver.valve_get', 'driver.valve_get', ([], {'pids': '[1, 2, 3, 4]'}), '(pids=[1, 2, 3, 4])\n', (18545, 18564), False, 'from cavro_centris_syringe_pump_LL import driver\n'), ((18726, 18761), 'thread.start_new_thread', 'start_new_thread', (['self.run_once', '()'], {}), '(self.run_once, ())\n', (18742, 18761), False, 'from thread import start_new_thread\n'), ((9971, 9999), 'logging.debug', 'debug', (['"""Connection success!"""'], {}), "('Connection success!')\n", (9976, 9999), False, 'from logging import debug, info, warn, error\n'), ((16579, 16607), 'logging.debug', 'debug', (['"""Connection success!"""'], {}), "('Connection success!')\n", (16584, 16607), False, 'from logging import debug, info, warn, error\n'), ((18689, 18697), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (18694, 18697), False, 'from time import sleep\n'), ((3372, 3421), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3385, 3421), False, 'import socket\n'), ((7519, 7553), 'logging.debug', 'debug', (['"""command is not recognized"""'], {}), "('command is not recognized')\n", (7524, 7553), False, 'from logging import debug, info, warn, error\n'), ((8853, 8895), 'logging.debug', 'debug', (["('length left (before): %r' % length)"], {}), "('length left (before): %r' % length)\n", (8858, 8895), False, 'from logging import debug, info, warn, error\n'), ((8972, 9013), 'logging.debug', 'debug', (["('length left (after): %r' % length)"], {}), "('length left (after): %r' % length)\n", (8977, 9013), False, 'from logging import debug, info, warn, error\n'), ((9030, 9041), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (9035, 9041), False, 'from time import sleep\n'), ((15638, 15649), 'time.sleep', 'sleep', (['(0.01)'], {}), '(0.01)\n', (15643, 15649), False, 'from time import sleep\n'), ((5233, 5255), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5253, 5255), False, 'import traceback\n'), ((7898, 7920), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7918, 7920), False, 'import traceback\n'), ((9651, 9673), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9671, 9673), False, 'import traceback\n'), ((16259, 16281), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16279, 16281), False, 'import traceback\n'), ((3596, 3618), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3616, 3618), False, 'import traceback\n'), ((10041, 10063), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10061, 10063), False, 'import traceback\n'), ((16649, 16671), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16669, 16671), False, 'import traceback\n'), ((6816, 6838), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6836, 6838), False, 'import traceback\n')] |
import mock
import numpy as np
import pytest
import pypylon.pylon
import pypylon.genicam
import piescope.data
import piescope.lm.volume
from piescope.lm.detector import Basler
from piescope.lm.objective import StageController
pytest.importorskip('pypylon', reason="The pypylon library is not available.")
def test_mock_basler(monkeypatch):
# This is how you make a mock Basler detector for the volume function
monkeypatch.setenv("PYLON_CAMEMU", "1")
with mock.patch("pypylon.pylon"):
with mock.patch.object(pypylon.genicam.INodeMap, "GetNode"):
detector = Basler()
detector.camera_grab()
detector.camera.Open()
detector.camera.ExposureTime.SetValue(10)
@mock.patch.object(StageController, 'connect')
@mock.patch.object(StageController, 'recv')
@mock.patch.object(StageController, 'sendall')
def test_mock_objective_stage(mock_sendall, mock_recv, mock_connect):
# This is how you mock the SMARACT objective lens stage
# By mocking the StageConroller connect() method we don't need testing=True
mock_sendall.return_value = None
mock_recv.return_value = None
stage = StageController() # completely mocked
stage.current_position()
mock_sendall.assert_called_with(bytes(':' + 'GP0' + '\012', 'utf-8'))
@mock.patch.object(StageController, 'current_position')
@mock.patch.object(StageController, 'connect')
@mock.patch.object(StageController, 'recv')
@mock.patch.object(StageController, 'sendall')
def test_volume_acquisition(mock_sendall, mock_recv, mock_connect,
mock_current_position, monkeypatch):
mock_current_position.return_value = 5
monkeypatch.setenv("PYLON_CAMEMU", "1")
power = 0.01 # as a percentage
exposure = 200 # in microseconds
laser_dict = {
"laser640": (power, exposure), # 640 nm (far-red)
"laser561": (power, exposure), # 561 nm (RFP)
"laser488": (power, exposure), # 488 nm (GFP)
"laser405": (power, exposure), # 405 nm (DAPI)
}
num_z_slices = 3
z_slice_distance = 10
output = piescope.lm.volume.volume_acquisition(
laser_dict, num_z_slices, z_slice_distance,
time_delay=0.01, count_max=0, threshold=np.Inf)
assert output.dtype == np.uint8 # 8-bit output expected
# Basler emulated mode produces images with shape (1040, 1024)
# The real Basler detector in the lab produces images with shape (1200, 1920)
# shape has format: (pln, row, col, ch)
assert output.shape == (3, 1040, 1024, 4) or output.shape == (3, 1200, 1920, 4)
if output.shape == (3, 1040, 1024, 4):
emulated_image = piescope.data.basler_image()
expected = np.stack([emulated_image for _ in range(4)], axis=-1)
expected = np.stack([expected, expected, expected], axis=0)
assert np.allclose(output, expected)
| [
"piescope.lm.objective.StageController",
"mock.patch",
"numpy.allclose",
"mock.patch.object",
"numpy.stack",
"piescope.lm.detector.Basler",
"pytest.importorskip"
] | [((230, 308), 'pytest.importorskip', 'pytest.importorskip', (['"""pypylon"""'], {'reason': '"""The pypylon library is not available."""'}), "('pypylon', reason='The pypylon library is not available.')\n", (249, 308), False, 'import pytest\n'), ((730, 775), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""connect"""'], {}), "(StageController, 'connect')\n", (747, 775), False, 'import mock\n'), ((777, 819), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""recv"""'], {}), "(StageController, 'recv')\n", (794, 819), False, 'import mock\n'), ((821, 866), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""sendall"""'], {}), "(StageController, 'sendall')\n", (838, 866), False, 'import mock\n'), ((1305, 1359), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""current_position"""'], {}), "(StageController, 'current_position')\n", (1322, 1359), False, 'import mock\n'), ((1361, 1406), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""connect"""'], {}), "(StageController, 'connect')\n", (1378, 1406), False, 'import mock\n'), ((1408, 1450), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""recv"""'], {}), "(StageController, 'recv')\n", (1425, 1450), False, 'import mock\n'), ((1452, 1497), 'mock.patch.object', 'mock.patch.object', (['StageController', '"""sendall"""'], {}), "(StageController, 'sendall')\n", (1469, 1497), False, 'import mock\n'), ((1160, 1177), 'piescope.lm.objective.StageController', 'StageController', ([], {}), '()\n', (1175, 1177), False, 'from piescope.lm.objective import StageController\n'), ((473, 500), 'mock.patch', 'mock.patch', (['"""pypylon.pylon"""'], {}), "('pypylon.pylon')\n", (483, 500), False, 'import mock\n'), ((2775, 2823), 'numpy.stack', 'np.stack', (['[expected, expected, expected]'], {'axis': '(0)'}), '([expected, expected, expected], axis=0)\n', (2783, 2823), True, 'import numpy as np\n'), ((2839, 2868), 'numpy.allclose', 'np.allclose', (['output', 'expected'], {}), '(output, expected)\n', (2850, 2868), True, 'import numpy as np\n'), ((515, 569), 'mock.patch.object', 'mock.patch.object', (['pypylon.genicam.INodeMap', '"""GetNode"""'], {}), "(pypylon.genicam.INodeMap, 'GetNode')\n", (532, 569), False, 'import mock\n'), ((594, 602), 'piescope.lm.detector.Basler', 'Basler', ([], {}), '()\n', (600, 602), False, 'from piescope.lm.detector import Basler\n')] |
import numpy as np
from os import chdir
#wd="/Users/moudiki/Documents/Python_Packages/teller"
#
#chdir(wd)
import teller as tr
import pandas as pd
from sklearn import datasets
import numpy as np
from sklearn import datasets
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
# import data
boston = datasets.load_boston()
X = np.delete(boston.data, 11, 1)
y = boston.target
col_names = np.append(np.delete(boston.feature_names, 11), 'MEDV')
# split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=123)
print(X_train.shape)
print(X_test.shape)
# fit a linear regression model
regr = RandomForestRegressor(n_estimators=1000, random_state=123)
regr.fit(X_train, y_train)
# creating the explainer
expr = tr.Explainer(obj=regr)
# print(expr.get_params())
# heterogeneity of effects -----
# fitting the explainer
expr.fit(X_test, y_test, X_names=col_names[:-1],
method="avg")
print(expr.summary())
# confidence int. and tests on effects -----
expr.fit(X_test, y_test, X_names=col_names[:-1],
method="ci")
print(expr.summary())
# interactions -----
varx = "RAD"
expr.fit(X_test, y_test, X_names=col_names[:-1],
col_inters = varx, method="inters")
print(expr.summary())
varx = "RM"
expr.fit(X_test, y_test, X_names=col_names[:-1],
col_inters = varx, method="inters")
print(expr.summary())
| [
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.train_test_split",
"numpy.delete",
"sklearn.datasets.load_boston",
"teller.Explainer"
] | [((362, 384), 'sklearn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (382, 384), False, 'from sklearn import datasets\n'), ((389, 418), 'numpy.delete', 'np.delete', (['boston.data', '(11)', '(1)'], {}), '(boston.data, 11, 1)\n', (398, 418), True, 'import numpy as np\n'), ((587, 642), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(123)'}), '(X, y, test_size=0.2, random_state=123)\n', (603, 642), False, 'from sklearn.model_selection import train_test_split\n'), ((779, 837), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(1000)', 'random_state': '(123)'}), '(n_estimators=1000, random_state=123)\n', (800, 837), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((899, 921), 'teller.Explainer', 'tr.Explainer', ([], {'obj': 'regr'}), '(obj=regr)\n', (911, 921), True, 'import teller as tr\n'), ((459, 494), 'numpy.delete', 'np.delete', (['boston.feature_names', '(11)'], {}), '(boston.feature_names, 11)\n', (468, 494), True, 'import numpy as np\n')] |
##############################################################################
#
# <NAME>
# <EMAIL>
#
# References:
# SuperDataScience,
# Official Documentation
#
#
##############################################################################
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def plot_map(X_set, y_set, classifier, text):
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'cyan','cornflowerblue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title(text)
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
plt.clf()
def plot_model(X_set, y_set, classifier, text):
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'cyan','cornflowerblue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'blue','midnightblue'))(i), label = j)
plt.title(text)
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
plt.clf()
def evaluate(y_test,y_pred):
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, y_pred)
print("============================================================")
print("Accuracy Score: " + str(accuracy))
# Getting evaluation report
from sklearn.metrics import classification_report
print("============================================================")
print(classification_report(y_test, y_pred))
print("============================================================")
# Importing the dataset
# iloc gets data via numerical indexes
# .values converts from python dataframe to numpy object
dataset = pd.read_csv('Circles.csv')
X = dataset.iloc[:, 1:3].values
y = dataset.iloc[:, 3].values
from matplotlib.colors import ListedColormap
for i, j in enumerate(np.unique(y)):
plt.scatter(X[y == j, 0], X[y == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Dataset')
plt.xlabel('X1')
plt.ylabel('X2')
plt.legend()
plt.show()
plt.clf()
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
#============================== NAIVE BAYES ===================================
# NAIVE BAYES CLASSIFICATION
#
# Naive Bayes methods are a set of supervised learning algorithms based on
# applying Bayes’ theorem with the “naive” assumption of independence
# between every pair of features
#
# Naive Bayes learners and classifiers can be extremely fast compared to
# more sophisticated methods. The decoupling of the class conditional
# feature distributions means that each distribution can be independently
# estimated as a one dimensional distribution. This in turn helps to alleviate
# problems stemming from the curse of dimensionality.
#
# Gaussian Naive Bayes
# In the Gaussian Naive Bayes algorithm for classification.
# The likelihood of the features is assumed to be Gaussian:
#
# Other Options:
# Multinomial Naive Bayes:
# MultinomialNB implements the naive Bayes algorithm for
# multinomially distributed data
# Bernoulli Naive Bayes:
# BernoulliNB implements the naive Bayes training and classification
# algorithms for data that is distributed according to multivariate
# Bernoulli distributions; i.e., there may be multiple features but
# each one is assumed to be a binary-valued (Bernoulli, boolean) variable.
#
#
# priors : array-like, shape (n_classes,)
# Prior probabilities of the classes.
# If specified the priors are not adjusted according to the data.
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
evaluate(y_test, y_pred)
plot_map(X_train, y_train, classifier, 'GaussianNB Boundary')
plot_model(X_train, y_train, classifier, 'GaussianNB Train')
plot_model(X_test, y_test, classifier, 'GaussianNB Test')
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(classifier, X_test, y_test,cmap=plt.cm.Blues)
| [
"numpy.unique",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"sklearn.metrics.classification_report",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.title",
... | [((2492, 2518), 'pandas.read_csv', 'pd.read_csv', (['"""Circles.csv"""'], {}), "('Circles.csv')\n", (2503, 2518), True, 'import pandas as pd\n'), ((2776, 2796), 'matplotlib.pyplot.title', 'plt.title', (['"""Dataset"""'], {}), "('Dataset')\n", (2785, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2813), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X1"""'], {}), "('X1')\n", (2807, 2813), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2830), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X2"""'], {}), "('X2')\n", (2824, 2830), True, 'import matplotlib.pyplot as plt\n'), ((2831, 2843), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2844, 2854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2852, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2855, 2864), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2862, 2864), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3050), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (3029, 3050), False, 'from sklearn.model_selection import train_test_split\n'), ((4699, 4711), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (4709, 4711), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((5077, 5145), 'sklearn.metrics.plot_confusion_matrix', 'plot_confusion_matrix', (['classifier', 'X_test', 'y_test'], {'cmap': 'plt.cm.Blues'}), '(classifier, X_test, y_test, cmap=plt.cm.Blues)\n', (5098, 5145), False, 'from sklearn.metrics import plot_confusion_matrix\n'), ((894, 909), 'matplotlib.pyplot.title', 'plt.title', (['text'], {}), '(text)\n', (903, 909), True, 'import matplotlib.pyplot as plt\n'), ((914, 930), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X1"""'], {}), "('X1')\n", (924, 930), True, 'import matplotlib.pyplot as plt\n'), ((935, 951), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X2"""'], {}), "('X2')\n", (945, 951), True, 'import matplotlib.pyplot as plt\n'), ((956, 968), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (966, 968), True, 'import matplotlib.pyplot as plt\n'), ((973, 983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (981, 983), True, 'import matplotlib.pyplot as plt\n'), ((988, 997), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (995, 997), True, 'import matplotlib.pyplot as plt\n'), ((1726, 1741), 'matplotlib.pyplot.title', 'plt.title', (['text'], {}), '(text)\n', (1735, 1741), True, 'import matplotlib.pyplot as plt\n'), ((1746, 1762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X1"""'], {}), "('X1')\n", (1756, 1762), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1783), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X2"""'], {}), "('X2')\n", (1777, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1800), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1813, 1815), True, 'import matplotlib.pyplot as plt\n'), ((1820, 1829), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1827, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1956), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1940, 1956), False, 'from sklearn.metrics import accuracy_score\n'), ((2649, 2661), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2658, 2661), True, 'import numpy as np\n'), ((1553, 1569), 'numpy.unique', 'np.unique', (['y_set'], {}), '(y_set)\n', (1562, 1569), True, 'import numpy as np\n'), ((2247, 2284), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2268, 2284), False, 'from sklearn.metrics import classification_report\n'), ((773, 823), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('pink', 'cyan', 'cornflowerblue')"], {}), "(('pink', 'cyan', 'cornflowerblue'))\n", (787, 823), False, 'from matplotlib.colors import ListedColormap\n'), ((1410, 1460), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('pink', 'cyan', 'cornflowerblue')"], {}), "(('pink', 'cyan', 'cornflowerblue'))\n", (1424, 1460), False, 'from matplotlib.colors import ListedColormap\n'), ((2728, 2760), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('red', 'green')"], {}), "(('red', 'green'))\n", (2742, 2760), False, 'from matplotlib.colors import ListedColormap\n'), ((1660, 1707), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('red', 'blue', 'midnightblue')"], {}), "(('red', 'blue', 'midnightblue'))\n", (1674, 1707), False, 'from matplotlib.colors import ListedColormap\n')] |
#!/usr/bin/python3
import numpy as np
import PIL.Image
#infile = 'CHX_Eiger1M_blemish2-mask.npy'
infile = 'CHX_Eiger1M_flatfield.npy'
outfile = infile[:-4] + '.png'
pixmask = np.load(infile)
#pixmask = pixmask < 1
#img = np.where( pixmask<1, 255, 0)
#img = np.where( pixmask<1, 0, 255)
img = np.where( pixmask<0.1, 0, 255)
img = PIL.Image.fromarray(np.uint8(img))
img.save(outfile) | [
"numpy.where",
"numpy.uint8",
"numpy.load"
] | [((179, 194), 'numpy.load', 'np.load', (['infile'], {}), '(infile)\n', (186, 194), True, 'import numpy as np\n'), ((297, 328), 'numpy.where', 'np.where', (['(pixmask < 0.1)', '(0)', '(255)'], {}), '(pixmask < 0.1, 0, 255)\n', (305, 328), True, 'import numpy as np\n'), ((356, 369), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (364, 369), True, 'import numpy as np\n')] |
#-*-coding:utf-8-*-
# date:2020-03-02
# Author: X.li
# function: inference & eval CenterNet only support resnet backbone
import os
import glob
import cv2
import numpy as np
import time
import shutil
import torch
import json
import matplotlib.pyplot as plt
from data_iterator import LoadImagesAndLabels
from models.decode import ctdet_decode
from utils.model_utils import load_model
from utils.post_process import ctdet_post_process
from msra_resnet import get_pose_net as resnet
from xml_writer import PascalVocWriter
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# reference https://zhuanlan.zhihu.com/p/60707912
def draw_pr(coco_eval, label="192_288"):
pr_array1 = coco_eval.eval["precision"][0, :, 0, 0, 2]
score_array1 = coco_eval.eval['scores'][0, :, 0, 0, 2]
x = np.arange(0.0, 1.01, 0.01)
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.grid(True)
plt.plot(x, pr_array1, "b-", label=label)
for i in range(len(pr_array1)):
print("Confidence: {:.2f}, Precision: {:.2f}, Recall: {:.2f}".format(score_array1[i], pr_array1[i], x[i]))
plt.legend(loc="lower left")
plt.savefig("one_p_r.png")
def write_bbox_label(writer_x,img_shape,bbox,label):
h,w = img_shape
x1,y1,x2,y2 = bbox
x1 = min(w, max(0, x1))
x2 = min(w, max(0, x2))
y1 = min(h, max(0, y1))
y2 = min(h, max(0, y2))
writer_x.addBndBox(int(x1), int(y1), int(x2), int(y2), label, 0)
def letterbox(img, height=512, color=(31, 31, 31)):
# Resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_LINEAR)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
#x1、y1、x2、y2、以及score赋值
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
#每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#按照score置信度降序排序
order = scores.argsort()[::-1]
keep = [] #保留的结果框集合
while order.size > 0:
i = order[0]
keep.append(i) #保留该类剩余box中得分最高的一个
#得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
#计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] #因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]# color
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 2, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] # label size
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 # 字体的bbox
cv2.rectangle(img, c1, c2, color, -1) # filled rectangle
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255],\
thickness=tf, lineType=cv2.LINE_AA)
class CtdetDetector(object):
def __init__(self,model_arch,model_path):
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
else:
self.device = torch.device('cpu')
self.num_classes = LoadImagesAndLabels.num_classes
print('Creating model...')
head_conv_ =64
if "resnet_" in model_arch:
num_layer = int(model_arch.split("_")[1])
self.model = resnet(num_layers=num_layer, heads={'hm': self.num_classes, 'wh': 2, 'reg': 2}, head_conv=head_conv_, pretrained=False) # res_18
else:
print("model_arch error:", model_arch)
self.model = load_model(self.model, model_path)
self.model = self.model.to(self.device)
self.model.eval()
self.mean = np.array([[[0.40789655, 0.44719303, 0.47026116]]], dtype=np.float32).reshape(1, 1, 3)
self.std = np.array([[[0.2886383, 0.27408165, 0.27809834]]], dtype=np.float32).reshape(1, 1, 3)
self.class_name = LoadImagesAndLabels.class_name
self.down_ratio = 4
self.K = 100
self.vis_thresh = 0.3
self.show = True
def pre_process(self, image):
height, width = image.shape[0:2]
inp_height, inp_width = LoadImagesAndLabels.default_resolution#获取分辨率
torch.cuda.synchronize()
s1 = time.time()
inp_image = letterbox(image, height=inp_height)# 非形变图像pad
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
images = torch.from_numpy(images)
torch.cuda.synchronize()
s2 = time.time()
# print("pre_process:".format(s2 -s1))
meta = {'c': c, 's': s, 'out_height': inp_height // self.down_ratio, 'out_width': inp_width // self.down_ratio}
return images, meta
def predict(self, images):
images = images.to(self.device)
with torch.no_grad():
torch.cuda.synchronize()
s1 = time.time()
output = self.model(images)[-1]
torch.cuda.synchronize()
s2 = time.time()
# for k, v in output.items():
# print("output:", k, v.size())
# print("inference time:", s2 - s1)
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if "reg" in output else None
dets = ctdet_decode(hm, wh, reg=reg, K=self.K)
torch.cuda.synchronize()
return output, dets
def post_process(self, dets, meta, scale=1):
torch.cuda.synchronize()
s1 = time.time()
dets = dets.cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(dets, [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
torch.cuda.synchronize()
s2 = time.time()
# print("post_process:", s2-s1)
return dets[0]
def work(self, image):
img_h, img_w = image.shape[0], image.shape[1]
torch.cuda.synchronize()
s1 = time.time()
detections = []
images, meta = self.pre_process(image)
output, dets = self.predict(images)
hm = output['hm']
dets = self.post_process(dets, meta)
detections.append(dets)
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate([detection[j] for detection in detections], axis=0).astype(np.float32)
final_result = []
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] >= self.vis_thresh:
x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
x1 = min(img_w, max(0, x1))
x2 = min(img_w, max(0, x2))
y1 = min(img_h, max(0, y1))
y2 = min(img_h, max(0, y2))
conf = bbox[4]
cls = self.class_name[j]
final_result.append((cls, conf, [x1, y1, x2, y2]))
# print("cost time: ", time.time() - s1)
return final_result,hm
def eval(model_arch,model_path,img_dir,gt_annot_path):
output = "output"
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if LoadImagesAndLabels.num_classes <= 5:
colors = [(55,55,250), (255,155,50), (128,0,0), (255,0,255), (128,255,128), (255,0,0)]
else:
colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, LoadImagesAndLabels.num_classes + 1)][::-1]
detector = CtdetDetector(model_arch,model_path)
print('\n/****************** Eval ****************/\n')
import tqdm
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
print("gt path: {}".format(gt_annot_path))
result_file = '../evaluation/instances_det.json'
coco = coco.COCO(gt_annot_path)
images = coco.getImgIds()
num_samples = len(images)
print('find {} samples in {}'.format(num_samples, gt_annot_path))
#------------------------------------------------
coco_res = []
for index in tqdm.tqdm(range(num_samples)):
img_id = images[index]
file_name = coco.loadImgs(ids=[img_id])[0]['file_name']
image_path = os.path.join(img_dir, file_name)
img = cv2.imread(image_path)
results,hm = detector.work(img)# 返回检测结果和置信度图
class_num = {}
for res in results:
cls, conf, bbox = res[0], res[1], res[2]
coco_res.append({'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]], 'category_id':
LoadImagesAndLabels.class_name.index(cls), 'image_id': img_id, 'score': conf})
if cls in class_num:
class_num[cls] += 1
else:
class_num[cls] = 1
color = colors[LoadImagesAndLabels.class_name.index(cls)]
# 绘制标签&置信度
label_ = '{}:{:.1f}'.format(cls, conf)
plot_one_box(bbox, img, color=color, label=label_, line_thickness=2)
cv2.imwrite(output + "/" + os.path.basename(image_path), img)
cv2.namedWindow("heatmap", 0)
cv2.imshow("heatmap", np.hstack(hm[0].cpu().numpy()))
cv2.namedWindow("img", 0)
cv2.imshow("img", img)
key = cv2.waitKey(1)
#-------------------------------------------------
with open(result_file, 'w') as f_dump:
json.dump(coco_res, f_dump, cls=NpEncoder)
cocoDt = coco.loadRes(result_file)
cocoEval = COCOeval(coco, cocoDt, 'bbox')
# cocoEval.params.imgIds = imgIds
cocoEval.params.catIds = [1] # 1代表’Hand’类,你可以根据需要增减类别
cocoEval.evaluate()
print('\n/***************************/\n')
cocoEval.accumulate()
print('\n/***************************/\n')
cocoEval.summarize()
draw_pr(cocoEval)
def inference(model_arch,nms_flag,model_path,img_dir):
print('\n/****************** Demo ****************/\n')
flag_write_xml = False
path_det_ = './det_xml/'
if os.path.exists(path_det_):
shutil.rmtree(path_det_)
print('remove detect document ~')
if not os.path.exists(path_det_):
os.mkdir(path_det_)
output = "output"
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if LoadImagesAndLabels.num_classes <= 5:
colors = [(55,55,250), (255,155,50), (128,0,0), (255,0,255), (128,255,128), (255,0,0)]
else:
colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, LoadImagesAndLabels.num_classes + 1)][::-1]
detector = CtdetDetector(model_arch,model_path)
for file_ in os.listdir(img_dir):
if '.xml' in file_:
continue
print("--------------------")
img = cv2.imread(img_dir + file_)
if flag_write_xml:
shutil.copyfile(img_dir + file_,path_det_+file_)
if flag_write_xml:
img_h, img_w = img.shape[0],img.shape[1]
writer = PascalVocWriter("./",file_, (img_h, img_w, 3), localImgPath="./", usrname="RGB_HandPose_EVAL")
results,hm = detector.work(img)# 返回检测结果和置信度图
print('model_arch - {} : {}'.format(model_arch,results))
class_num = {}
nms_dets_ = []
for res in results:
cls, conf, bbox = res[0], res[1], res[2]
if flag_write_xml:
write_bbox_label(writer,(img_h,img_w),bbox,cls)
if cls in class_num:
class_num[cls] += 1
else:
class_num[cls] = 1
color = colors[LoadImagesAndLabels.class_name.index(cls)]
nms_dets_.append((bbox[0], bbox[1],bbox[2], bbox[3],conf))
# 绘制标签&置信度
if nms_flag == False:
label_ = '{}:{:.1f}'.format(cls, conf)
plot_one_box(bbox, img, color=color, label=label_, line_thickness=2)
if flag_write_xml:
writer.save(targetFile = path_det_+file_.replace('.jpg','.xml'))
if nms_flag and len(nms_dets_)>0:
#nms
keep_ = py_cpu_nms(np.array(nms_dets_), thresh=0.8)
print('keep_ : {}'.format(keep_))
for i in range(len(nms_dets_)):
if i in keep_:
bbox_conf = nms_dets_[i]
bbox_ = int(bbox_conf[0]),int(bbox_conf[1]),int(bbox_conf[2]),int(bbox_conf[3])
label_ = 'nms_Hand:{:.2f}'.format(bbox_conf[4])
plot_one_box(bbox_, img, color=(55,125,255), label=label_, line_thickness=2)
cv2.namedWindow("heatmap", 0)
cv2.imshow("heatmap", np.hstack(hm[0].cpu().numpy()))
cv2.namedWindow("img", 0)
cv2.imshow("img", img)
key = cv2.waitKey(1)
if key == 27:
break
if __name__ == '__main__':
model_arch = 'resnet_18'
model_path = './model_save/model_hand_last_'+model_arch+'.pth'# 模型路径
gt_annot_path = './hand_detect_gt.json'
img_dir = '../done/'# 测试集
nms_flag = True
Eval = True
if Eval:
eval(model_arch,model_path,img_dir,gt_annot_path)
else:
inference(model_arch,nms_flag,model_path,img_dir)
| [
"cv2.rectangle",
"matplotlib.pyplot.grid",
"pycocotools.cocoeval.COCOeval",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"cv2.imshow",
"torch.cuda.synchronize",
"numpy.array",
"torch.cuda.is_available",
"numpy.arange",
"os.path.exists",
"os.listdir",
"pycocotools.coco.getImgIds",
"numpy... | [((1023, 1049), 'numpy.arange', 'np.arange', (['(0.0)', '(1.01)', '(0.01)'], {}), '(0.0, 1.01, 0.01)\n', (1032, 1049), True, 'import numpy as np\n'), ((1054, 1074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (1064, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1102), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (1089, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1123), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (1115, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1144), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (1136, 1144), True, 'import matplotlib.pyplot as plt\n'), ((1149, 1163), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1157, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1169, 1210), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'pr_array1', '"""b-"""'], {'label': 'label'}), "(x, pr_array1, 'b-', label=label)\n", (1177, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1394), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (1376, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1425), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""one_p_r.png"""'], {}), "('one_p_r.png')\n", (1410, 1425), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2249), 'cv2.resize', 'cv2.resize', (['img', 'new_shape'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, new_shape, interpolation=cv2.INTER_LINEAR)\n', (2201, 2249), False, 'import cv2\n'), ((2260, 2347), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'top', 'bottom', 'left', 'right', 'cv2.BORDER_CONSTANT'], {'value': 'color'}), '(img, top, bottom, left, right, cv2.BORDER_CONSTANT,\n value=color)\n', (2278, 2347), False, 'import cv2\n'), ((3726, 3773), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color'], {'thickness': 'tl'}), '(img, c1, c2, color, thickness=tl)\n', (3739, 3773), False, 'import cv2\n'), ((8752, 8774), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (8766, 8774), False, 'import os\n'), ((8801, 8817), 'os.mkdir', 'os.mkdir', (['output'], {}), '(output)\n', (8809, 8817), False, 'import os\n'), ((9422, 9446), 'pycocotools.coco.COCO', 'coco.COCO', (['gt_annot_path'], {}), '(gt_annot_path)\n', (9431, 9446), True, 'import pycocotools.coco as coco\n'), ((9457, 9473), 'pycocotools.coco.getImgIds', 'coco.getImgIds', ([], {}), '()\n', (9471, 9473), True, 'import pycocotools.coco as coco\n'), ((10803, 10828), 'pycocotools.coco.loadRes', 'coco.loadRes', (['result_file'], {}), '(result_file)\n', (10815, 10828), True, 'import pycocotools.coco as coco\n'), ((10841, 10871), 'pycocotools.cocoeval.COCOeval', 'COCOeval', (['coco', 'cocoDt', '"""bbox"""'], {}), "(coco, cocoDt, 'bbox')\n", (10849, 10871), False, 'from pycocotools.cocoeval import COCOeval\n'), ((11303, 11328), 'os.path.exists', 'os.path.exists', (['path_det_'], {}), '(path_det_)\n', (11317, 11328), False, 'import os\n'), ((11473, 11495), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (11487, 11495), False, 'import os\n'), ((11522, 11538), 'os.mkdir', 'os.mkdir', (['output'], {}), '(output)\n', (11530, 11538), False, 'import os\n'), ((11907, 11926), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (11917, 11926), False, 'import os\n'), ((2837, 2869), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (2847, 2869), True, 'import numpy as np\n'), ((2884, 2916), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (2894, 2916), True, 'import numpy as np\n'), ((2931, 2963), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (2941, 2963), True, 'import numpy as np\n'), ((2978, 3010), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (2988, 3010), True, 'import numpy as np\n'), ((3050, 3080), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (3060, 3080), True, 'import numpy as np\n'), ((3093, 3123), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (3103, 3123), True, 'import numpy as np\n'), ((3997, 4034), 'cv2.rectangle', 'cv2.rectangle', (['img', 'c1', 'c2', 'color', '(-1)'], {}), '(img, c1, c2, color, -1)\n', (4010, 4034), False, 'import cv2\n'), ((4063, 4174), 'cv2.putText', 'cv2.putText', (['img', 'label', '(c1[0], c1[1] - 2)', '(0)', '(tl / 4)', '[225, 255, 255]'], {'thickness': 'tf', 'lineType': 'cv2.LINE_AA'}), '(img, label, (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255],\n thickness=tf, lineType=cv2.LINE_AA)\n', (4074, 4174), False, 'import cv2\n'), ((4266, 4291), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4289, 4291), False, 'import torch\n'), ((4852, 4886), 'utils.model_utils.load_model', 'load_model', (['self.model', 'model_path'], {}), '(self.model, model_path)\n', (4862, 4886), False, 'from utils.model_utils import load_model\n'), ((5496, 5520), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5518, 5520), False, 'import torch\n'), ((5534, 5545), 'time.time', 'time.time', ([], {}), '()\n', (5543, 5545), False, 'import time\n'), ((5625, 5680), 'numpy.array', 'np.array', (['[width / 2.0, height / 2.0]'], {'dtype': 'np.float32'}), '([width / 2.0, height / 2.0], dtype=np.float32)\n', (5633, 5680), True, 'import numpy as np\n'), ((5900, 5924), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (5916, 5924), False, 'import torch\n'), ((5933, 5957), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5955, 5957), False, 'import torch\n'), ((5971, 5982), 'time.time', 'time.time', ([], {}), '()\n', (5980, 5982), False, 'import time\n'), ((6913, 6937), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6935, 6937), False, 'import torch\n'), ((6951, 6962), 'time.time', 'time.time', ([], {}), '()\n', (6960, 6962), False, 'import time\n'), ((7062, 7174), 'utils.post_process.ctdet_post_process', 'ctdet_post_process', (['dets', "[meta['c']]", "[meta['s']]", "meta['out_height']", "meta['out_width']", 'self.num_classes'], {}), "(dets, [meta['c']], [meta['s']], meta['out_height'], meta\n ['out_width'], self.num_classes)\n", (7080, 7174), False, 'from utils.post_process import ctdet_post_process\n'), ((7345, 7369), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7367, 7369), False, 'import torch\n'), ((7383, 7394), 'time.time', 'time.time', ([], {}), '()\n', (7392, 7394), False, 'import time\n'), ((7550, 7574), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7572, 7574), False, 'import torch\n'), ((7588, 7599), 'time.time', 'time.time', ([], {}), '()\n', (7597, 7599), False, 'import time\n'), ((8778, 8799), 'shutil.rmtree', 'shutil.rmtree', (['output'], {}), '(output)\n', (8791, 8799), False, 'import shutil\n'), ((9777, 9809), 'os.path.join', 'os.path.join', (['img_dir', 'file_name'], {}), '(img_dir, file_name)\n', (9789, 9809), False, 'import os\n'), ((9818, 9840), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (9828, 9840), False, 'import cv2\n'), ((10493, 10522), 'cv2.namedWindow', 'cv2.namedWindow', (['"""heatmap"""', '(0)'], {}), "('heatmap', 0)\n", (10508, 10522), False, 'import cv2\n'), ((10581, 10606), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (10596, 10606), False, 'import cv2\n'), ((10609, 10631), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (10619, 10631), False, 'import cv2\n'), ((10640, 10654), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10651, 10654), False, 'import cv2\n'), ((10749, 10791), 'json.dump', 'json.dump', (['coco_res', 'f_dump'], {'cls': 'NpEncoder'}), '(coco_res, f_dump, cls=NpEncoder)\n', (10758, 10791), False, 'import json\n'), ((11332, 11356), 'shutil.rmtree', 'shutil.rmtree', (['path_det_'], {}), '(path_det_)\n', (11345, 11356), False, 'import shutil\n'), ((11400, 11425), 'os.path.exists', 'os.path.exists', (['path_det_'], {}), '(path_det_)\n', (11414, 11425), False, 'import os\n'), ((11429, 11448), 'os.mkdir', 'os.mkdir', (['path_det_'], {}), '(path_det_)\n', (11437, 11448), False, 'import os\n'), ((11499, 11520), 'shutil.rmtree', 'shutil.rmtree', (['output'], {}), '(output)\n', (11512, 11520), False, 'import shutil\n'), ((12002, 12029), 'cv2.imread', 'cv2.imread', (['(img_dir + file_)'], {}), '(img_dir + file_)\n', (12012, 12029), False, 'import cv2\n'), ((13447, 13476), 'cv2.namedWindow', 'cv2.namedWindow', (['"""heatmap"""', '(0)'], {}), "('heatmap', 0)\n", (13462, 13476), False, 'import cv2\n'), ((13535, 13560), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (13550, 13560), False, 'import cv2\n'), ((13563, 13585), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (13573, 13585), False, 'import cv2\n'), ((13594, 13608), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (13605, 13608), False, 'import cv2\n'), ((3280, 3303), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (3288, 3303), True, 'import numpy as np\n'), ((3851, 3908), 'cv2.getTextSize', 'cv2.getTextSize', (['label', '(0)'], {'fontScale': '(tl / 3)', 'thickness': 'tf'}), '(label, 0, fontScale=tl / 3, thickness=tf)\n', (3866, 3908), False, 'import cv2\n'), ((4319, 4341), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4331, 4341), False, 'import torch\n'), ((4382, 4401), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4394, 4401), False, 'import torch\n'), ((4636, 4759), 'msra_resnet.get_pose_net', 'resnet', ([], {'num_layers': 'num_layer', 'heads': "{'hm': self.num_classes, 'wh': 2, 'reg': 2}", 'head_conv': 'head_conv_', 'pretrained': '(False)'}), "(num_layers=num_layer, heads={'hm': self.num_classes, 'wh': 2, 'reg':\n 2}, head_conv=head_conv_, pretrained=False)\n", (4642, 4759), True, 'from msra_resnet import get_pose_net as resnet\n'), ((6263, 6278), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6276, 6278), False, 'import torch\n'), ((6292, 6316), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6314, 6316), False, 'import torch\n'), ((6334, 6345), 'time.time', 'time.time', ([], {}), '()\n', (6343, 6345), False, 'import time\n'), ((6402, 6426), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6424, 6426), False, 'import torch\n'), ((6444, 6455), 'time.time', 'time.time', ([], {}), '()\n', (6453, 6455), False, 'import time\n'), ((6746, 6785), 'models.decode.ctdet_decode', 'ctdet_decode', (['hm', 'wh'], {'reg': 'reg', 'K': 'self.K'}), '(hm, wh, reg=reg, K=self.K)\n', (6758, 6785), False, 'from models.decode import ctdet_decode\n'), ((6798, 6822), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6820, 6822), False, 'import torch\n'), ((12054, 12105), 'shutil.copyfile', 'shutil.copyfile', (['(img_dir + file_)', '(path_det_ + file_)'], {}), '(img_dir + file_, path_det_ + file_)\n', (12069, 12105), False, 'import shutil\n'), ((12180, 12280), 'xml_writer.PascalVocWriter', 'PascalVocWriter', (['"""./"""', 'file_', '(img_h, img_w, 3)'], {'localImgPath': '"""./"""', 'usrname': '"""RGB_HandPose_EVAL"""'}), "('./', file_, (img_h, img_w, 3), localImgPath='./', usrname=\n 'RGB_HandPose_EVAL')\n", (12195, 12280), False, 'from xml_writer import PascalVocWriter\n'), ((4982, 5050), 'numpy.array', 'np.array', (['[[[0.40789655, 0.44719303, 0.47026116]]]'], {'dtype': 'np.float32'}), '([[[0.40789655, 0.44719303, 0.47026116]]], dtype=np.float32)\n', (4990, 5050), True, 'import numpy as np\n'), ((5087, 5154), 'numpy.array', 'np.array', (['[[[0.2886383, 0.27408165, 0.27809834]]]'], {'dtype': 'np.float32'}), '([[[0.2886383, 0.27408165, 0.27809834]]], dtype=np.float32)\n', (5095, 5154), True, 'import numpy as np\n'), ((9718, 9745), 'pycocotools.coco.loadImgs', 'coco.loadImgs', ([], {'ids': '[img_id]'}), '(ids=[img_id])\n', (9731, 9745), True, 'import pycocotools.coco as coco\n'), ((10254, 10295), 'data_iterator.LoadImagesAndLabels.class_name.index', 'LoadImagesAndLabels.class_name.index', (['cls'], {}), '(cls)\n', (10290, 10295), False, 'from data_iterator import LoadImagesAndLabels\n'), ((10456, 10484), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (10472, 10484), False, 'import os\n'), ((12653, 12694), 'data_iterator.LoadImagesAndLabels.class_name.index', 'LoadImagesAndLabels.class_name.index', (['cls'], {}), '(cls)\n', (12689, 12694), False, 'from data_iterator import LoadImagesAndLabels\n'), ((13069, 13088), 'numpy.array', 'np.array', (['nms_dets_'], {}), '(nms_dets_)\n', (13077, 13088), True, 'import numpy as np\n'), ((7244, 7282), 'numpy.array', 'np.array', (['dets[0][j]'], {'dtype': 'np.float32'}), '(dets[0][j], dtype=np.float32)\n', (7252, 7282), True, 'import numpy as np\n'), ((7914, 7980), 'numpy.concatenate', 'np.concatenate', (['[detection[j] for detection in detections]'], {'axis': '(0)'}), '([detection[j] for detection in detections], axis=0)\n', (7928, 7980), True, 'import numpy as np\n'), ((10077, 10118), 'data_iterator.LoadImagesAndLabels.class_name.index', 'LoadImagesAndLabels.class_name.index', (['cls'], {}), '(cls)\n', (10113, 10118), False, 'from data_iterator import LoadImagesAndLabels\n')] |
import naturalize.crossover.core as c
import naturalize.crossover.strategies as st
from naturalize.solutionClass import Individual
import numpy as np
import pytest
# from naturalize.crossover.strategies import crossGene, basicCrossover
from naturalize.solutionClass import Individual
import numpy as np
np.random.seed(25)
# df.defaultFitness(individual, env)
genea = np.empty(5)
geneb = np.empty(5)
individualA = Individual([genea])
individualB = Individual([geneb])
# genea = np.array([1,1,1,1,1,0])
# geneb = np.array([1,2,2,2,2,1])
basicCrossover = c.getCrossover()
def test_crossGene():
out1, out2 = st.crossGeneSingleCut(genea, geneb)
check1 = np.all(genea[:4] == out1[:4]) and (geneb[4] == out2[4])
check2 = np.all(geneb[:4] == out2[:4]) and (genea[4] == out1[4])
assert np.all(check1 == check2)
def test_default_crossGene():
Indout1, Indout2 = basicCrossover(individualA, individualB)
genea = individualA.genotype[0]
geneb = individualB.genotype[0]
out1 = Indout1.genotype[0]
out2 = Indout2.genotype[0]
check1 = np.all(genea[:2] == out1[:2]) and (geneb[2:] == out2[2:])
check2 = np.all(geneb[:2] == out2[:2]) and (genea[2:] == out1[2:])
assert np.all(check1 == check2)
# test_crossGene()
# test_default_crossGene()
| [
"naturalize.crossover.core.getCrossover",
"numpy.empty",
"numpy.random.seed",
"naturalize.solutionClass.Individual",
"numpy.all",
"naturalize.crossover.strategies.crossGeneSingleCut"
] | [((309, 327), 'numpy.random.seed', 'np.random.seed', (['(25)'], {}), '(25)\n', (323, 327), True, 'import numpy as np\n'), ((374, 385), 'numpy.empty', 'np.empty', (['(5)'], {}), '(5)\n', (382, 385), True, 'import numpy as np\n'), ((394, 405), 'numpy.empty', 'np.empty', (['(5)'], {}), '(5)\n', (402, 405), True, 'import numpy as np\n'), ((421, 440), 'naturalize.solutionClass.Individual', 'Individual', (['[genea]'], {}), '([genea])\n', (431, 440), False, 'from naturalize.solutionClass import Individual\n'), ((455, 474), 'naturalize.solutionClass.Individual', 'Individual', (['[geneb]'], {}), '([geneb])\n', (465, 474), False, 'from naturalize.solutionClass import Individual\n'), ((561, 577), 'naturalize.crossover.core.getCrossover', 'c.getCrossover', ([], {}), '()\n', (575, 577), True, 'import naturalize.crossover.core as c\n'), ((624, 659), 'naturalize.crossover.strategies.crossGeneSingleCut', 'st.crossGeneSingleCut', (['genea', 'geneb'], {}), '(genea, geneb)\n', (645, 659), True, 'import naturalize.crossover.strategies as st\n'), ((819, 843), 'numpy.all', 'np.all', (['(check1 == check2)'], {}), '(check1 == check2)\n', (825, 843), True, 'import numpy as np\n'), ((1253, 1277), 'numpy.all', 'np.all', (['(check1 == check2)'], {}), '(check1 == check2)\n', (1259, 1277), True, 'import numpy as np\n'), ((678, 707), 'numpy.all', 'np.all', (['(genea[:4] == out1[:4])'], {}), '(genea[:4] == out1[:4])\n', (684, 707), True, 'import numpy as np\n'), ((747, 776), 'numpy.all', 'np.all', (['(geneb[:4] == out2[:4])'], {}), '(geneb[:4] == out2[:4])\n', (753, 776), True, 'import numpy as np\n'), ((1108, 1137), 'numpy.all', 'np.all', (['(genea[:2] == out1[:2])'], {}), '(genea[:2] == out1[:2])\n', (1114, 1137), True, 'import numpy as np\n'), ((1179, 1208), 'numpy.all', 'np.all', (['(geneb[:2] == out2[:2])'], {}), '(geneb[:2] == out2[:2])\n', (1185, 1208), True, 'import numpy as np\n')] |
import numpy as np
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(
x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def get_len_matrix(len_list):
len_list = np.array(len_list)
max_nodes = np.sum(len_list)
curr_sum = 0
len_matrix = []
for l in len_list:
curr = np.zeros(max_nodes)
curr[curr_sum:curr_sum + l] = 1
len_matrix.append(curr)
curr_sum += l
return np.array(len_matrix)
| [
"numpy.array",
"numpy.zeros",
"numpy.sum"
] | [((524, 542), 'numpy.array', 'np.array', (['len_list'], {}), '(len_list)\n', (532, 542), True, 'import numpy as np\n'), ((559, 575), 'numpy.sum', 'np.sum', (['len_list'], {}), '(len_list)\n', (565, 575), True, 'import numpy as np\n'), ((776, 796), 'numpy.array', 'np.array', (['len_matrix'], {}), '(len_matrix)\n', (784, 796), True, 'import numpy as np\n'), ((651, 670), 'numpy.zeros', 'np.zeros', (['max_nodes'], {}), '(max_nodes)\n', (659, 670), True, 'import numpy as np\n')] |
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
from IMLearn.learners.classifiers import GaussianNaiveBayes, LDA, Perceptron
import numpy as np
# Ex 1
# x = np.array([1, 5, 2, 3, 8, -4, -2, 5, 1, 10, -10, 4, 5, 2, 7, 1, 1, 3, 2, -1, -3, 1, -4, 1, 2, 1,
# -4, -4, 1, 3, 2, 6, -6, 8, 3, -6, 4, 1, -2, 3, 1, 4, 1, 4, -2, 3, -1, 0, 3, 5, 0, -2])
# model = UnivariateGaussian()
# model.fit(x)
# print(model.log_likelihood(1, 1, x))
# print(model.log_likelihood(10, 1, x))
# print(np.var(np.array([0.917, 0.166, -0.03, 0.463])))
train_y = np.array([0, 0, 1, 1, 1, 1, 2, 2])
train_X = np.array([0, 1, 2, 3, 4, 5, 6, 7])
naive = GaussianNaiveBayes()
naive.fit(train_X, train_y)
print(f"Naive pi : {naive.pi_}")
print(f"Naive mu_ : {naive.mu_}")
S = np.array([[1, 1, 0], [1, 2, 0], [2, 3, 1],
[2, 4, 1], [3, 3, 1], [3, 4, 1]])
x = S[:, :2]
y = S[:, 2]
print(f"X= {x}")
print(f"y= {y}")
naive = GaussianNaiveBayes()
naive.fit(x, y)
print(f"Var: {naive.vars_}")
x = np.array([0, 1, 2, 3, 4, 5, 6, 7])
y = np.array([0, 0, 1, 1, 1, 1, 2, 2])
naive.fit(x, y)
print(f"Pois mu: { naive.mu_}")
| [
"numpy.array",
"IMLearn.learners.classifiers.GaussianNaiveBayes"
] | [((588, 622), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 1, 1, 2, 2])\n', (596, 622), True, 'import numpy as np\n'), ((634, 668), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (642, 668), True, 'import numpy as np\n'), ((680, 700), 'IMLearn.learners.classifiers.GaussianNaiveBayes', 'GaussianNaiveBayes', ([], {}), '()\n', (698, 700), False, 'from IMLearn.learners.classifiers import GaussianNaiveBayes, LDA, Perceptron\n'), ((806, 882), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 2, 0], [2, 3, 1], [2, 4, 1], [3, 3, 1], [3, 4, 1]]'], {}), '([[1, 1, 0], [1, 2, 0], [2, 3, 1], [2, 4, 1], [3, 3, 1], [3, 4, 1]])\n', (814, 882), True, 'import numpy as np\n'), ((969, 989), 'IMLearn.learners.classifiers.GaussianNaiveBayes', 'GaussianNaiveBayes', ([], {}), '()\n', (987, 989), False, 'from IMLearn.learners.classifiers import GaussianNaiveBayes, LDA, Perceptron\n'), ((1044, 1078), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (1052, 1078), True, 'import numpy as np\n'), ((1084, 1118), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 1, 1, 2, 2])\n', (1092, 1118), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 17:04:02 2019
@author: jessi
Example of use:--input-file ./prueba1/teclado_ISSA_63_2019-04-25T08_33_54.975Z_sp.wav --xCoor 0.6052 --yCoor -0.5411 --zCoor 0.5828
"""
import os
import argparse
import numpy as np
from scipy.io import wavfile
from hmmlearn import hmm
from python_speech_features import mfcc
from sklearn.externals import joblib
# Function to parse input arguments
def build_arg_parser():
parser = argparse.ArgumentParser(description='Trains the HMM classifier')
parser.add_argument("--input-file", dest="input_file", required=True,
help="Input file to evaluate ")
parser.add_argument("--xCoor", dest="xCoor", required=True,
help="X Coordinate ")
parser.add_argument("--yCoor", dest="yCoor", required=True,
help="Y Coordinate ")
parser.add_argument("--zCoor", dest="zCoor", required=True,
help="Z Coordinate ")
return parser
# Class to handle all HMM related processing
class HMMTrainer(object):
def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):
self.model_name = model_name
self.n_components = n_components
self.cov_type = cov_type
self.n_iter = n_iter
self.models = []
if self.model_name == 'GaussianHMM':
self.model = hmm.GaussianHMM(n_components=self.n_components,
covariance_type=self.cov_type, n_iter=self.n_iter)
else:
raise TypeError('Invalid model type')
# X is a 2D numpy array where each row is 13D
def train(self, X):
np.seterr(all='ignore')
self.models.append(self.model.fit(X))
# Run the model on input data
def get_score(self, input_data):
return self.model.score(input_data)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_file = args.input_file
xCoor = args.xCoor
yCoor = args.yCoor
zCoor = args.zCoor
hmm_models = joblib.load("./classifier/2019-Sonidos-Marcela-v1.pkl")
dictLocations = {
"AGITAR_MEDICAMENTO": [np.array([0.151, 0.546, 0.816]),0.25],
"ASPIRAR": [np.array([-0.449,0.150,0.872]),0.25],
"CAMPANA_ESTUFA": [np.array([0.626,0.250,0.738]),0.25],
"LAVAR_TRASTES": [np.array([0.505,0.490,0.711]),0.25],
"LICUADORA": [np.array([0.599,0.482,0.638]),0.25],
}
LocCoorThisSound = np.array([float(xCoor),float(yCoor),float(zCoor)])
sampling_freq, audio = wavfile.read(input_file)
# Extract MFCC features
mfcc_features = mfcc(audio, sampling_freq)
# Define variables
max_score = -99999
output_label = None
for item in hmm_models:
hmm_model, label = item
score = hmm_model.get_score(mfcc_features)
if score > max_score:
max_score = score
output_label = label
predictedClass = output_label.split('\\')
# Print the output
if np.linalg.norm((dictLocations[predictedClass[-1]][0])-LocCoorThisSound)<=dictLocations[predictedClass[-1]][1]:
if max_score>=-30000:
print ("Predicted:", predictedClass[-1] )
elif max_score<-30000 and max_score>-32000:
print ("Was that a", predictedClass[-1] )
else:
print ("What was that?")
else:
print ("What was that?")
| [
"hmmlearn.hmm.GaussianHMM",
"argparse.ArgumentParser",
"sklearn.externals.joblib.load",
"python_speech_features.mfcc",
"numpy.array",
"scipy.io.wavfile.read",
"numpy.linalg.norm",
"numpy.seterr"
] | [((489, 553), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trains the HMM classifier"""'}), "(description='Trains the HMM classifier')\n", (512, 553), False, 'import argparse\n'), ((2076, 2131), 'sklearn.externals.joblib.load', 'joblib.load', (['"""./classifier/2019-Sonidos-Marcela-v1.pkl"""'], {}), "('./classifier/2019-Sonidos-Marcela-v1.pkl')\n", (2087, 2131), False, 'from sklearn.externals import joblib\n'), ((2609, 2633), 'scipy.io.wavfile.read', 'wavfile.read', (['input_file'], {}), '(input_file)\n', (2621, 2633), False, 'from scipy.io import wavfile\n'), ((2684, 2710), 'python_speech_features.mfcc', 'mfcc', (['audio', 'sampling_freq'], {}), '(audio, sampling_freq)\n', (2688, 2710), False, 'from python_speech_features import mfcc\n'), ((1683, 1706), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (1692, 1706), True, 'import numpy as np\n'), ((3091, 3162), 'numpy.linalg.norm', 'np.linalg.norm', (['(dictLocations[predictedClass[-1]][0] - LocCoorThisSound)'], {}), '(dictLocations[predictedClass[-1]][0] - LocCoorThisSound)\n', (3105, 3162), True, 'import numpy as np\n'), ((1409, 1512), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', ([], {'n_components': 'self.n_components', 'covariance_type': 'self.cov_type', 'n_iter': 'self.n_iter'}), '(n_components=self.n_components, covariance_type=self.\n cov_type, n_iter=self.n_iter)\n', (1424, 1512), False, 'from hmmlearn import hmm\n'), ((2194, 2225), 'numpy.array', 'np.array', (['[0.151, 0.546, 0.816]'], {}), '([0.151, 0.546, 0.816])\n', (2202, 2225), True, 'import numpy as np\n'), ((2254, 2285), 'numpy.array', 'np.array', (['[-0.449, 0.15, 0.872]'], {}), '([-0.449, 0.15, 0.872])\n', (2262, 2285), True, 'import numpy as np\n'), ((2320, 2350), 'numpy.array', 'np.array', (['[0.626, 0.25, 0.738]'], {}), '([0.626, 0.25, 0.738])\n', (2328, 2350), True, 'import numpy as np\n'), ((2384, 2414), 'numpy.array', 'np.array', (['[0.505, 0.49, 0.711]'], {}), '([0.505, 0.49, 0.711])\n', (2392, 2414), True, 'import numpy as np\n'), ((2444, 2475), 'numpy.array', 'np.array', (['[0.599, 0.482, 0.638]'], {}), '([0.599, 0.482, 0.638])\n', (2452, 2475), True, 'import numpy as np\n')] |
from .plotter import Plotter
from matplotlib import pyplot as plt
import pandas as pd
import logging
import numpy as np
import seaborn as sns
class LinePlotter(Plotter):
def plot_group(self, data, x, y, label, ax, xpid=None, read_every=1, smooth_window=10, align_x=1, alpha=0.4, linewidth=3):
color = next(ax._get_lines.prop_cycler)['color']
if xpid is not None:
all_data = []
unique_xpids = sorted(data[xpid].unique())
for xid in unique_xpids:
xp_data = data[data[xpid] == xid][::read_every]
xp_data[y+'_smooth'] = xp_data[y].rolling(smooth_window, min_periods=smooth_window//2).mean()
all_data.append(xp_data)
xs = xp_data[x].to_numpy()
ys = xp_data[y+'_smooth'].to_numpy()
order = np.argsort(xs)
xs = np.take_along_axis(xs, order, axis=0)
ys = np.take_along_axis(ys, order, axis=0)
ax.plot(xs, ys, linestyle='dashed', color=color, label='_nolegend_', alpha=alpha)
data = pd.concat(all_data)
else:
data = data[::read_every]
data[y+'_smooth'] = data[y].rolling(smooth_window, min_periods=smooth_window//2).mean()
if label is not None:
data[x] = data[x] // align_x * align_x
sns.lineplot(data=data, x=x, y=y+'_smooth', label=label, ax=ax, color=color, linewidth=linewidth)
def plot(self, exps_and_logs, x, y, group=None, xpid=None, read_every=1, smooth_window=10, align_x=1, ax=None, linewidth=3, alpha=0.4, force_label=None):
if ax is None:
fig, ax = plt.subplots(figsize=(10, 10))
data = []
for exp, logs in exps_and_logs:
for r in logs:
r = r.copy()
r.update(exp.config)
data.append(r)
if not data:
logging.critical('Nothing to plot!')
return ax
data = pd.DataFrame(data)
if group is None:
self.plot_group(data=data, x=x, y=y, label=force_label, ax=ax, xpid=xpid, read_every=read_every, smooth_window=smooth_window, align_x=align_x, alpha=alpha, linewidth=linewidth)
else:
unique_groups = sorted(data[group].unique())
for g in unique_groups:
group_data = data[data[group] == g]
label = force_label or g
self.plot_group(data=group_data, x=x, y=y, label=label, ax=ax, xpid=xpid, read_every=read_every, smooth_window=smooth_window, align_x=align_x)
return ax
| [
"seaborn.lineplot",
"numpy.argsort",
"logging.critical",
"pandas.DataFrame",
"numpy.take_along_axis",
"pandas.concat",
"matplotlib.pyplot.subplots"
] | [((1980, 1998), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1992, 1998), True, 'import pandas as pd\n'), ((1091, 1110), 'pandas.concat', 'pd.concat', (['all_data'], {}), '(all_data)\n', (1100, 1110), True, 'import pandas as pd\n'), ((1356, 1460), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'x': 'x', 'y': "(y + '_smooth')", 'label': 'label', 'ax': 'ax', 'color': 'color', 'linewidth': 'linewidth'}), "(data=data, x=x, y=y + '_smooth', label=label, ax=ax, color=\n color, linewidth=linewidth)\n", (1368, 1460), True, 'import seaborn as sns\n'), ((1658, 1688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1670, 1688), True, 'from matplotlib import pyplot as plt\n'), ((1905, 1941), 'logging.critical', 'logging.critical', (['"""Nothing to plot!"""'], {}), "('Nothing to plot!')\n", (1921, 1941), False, 'import logging\n'), ((840, 854), 'numpy.argsort', 'np.argsort', (['xs'], {}), '(xs)\n', (850, 854), True, 'import numpy as np\n'), ((876, 913), 'numpy.take_along_axis', 'np.take_along_axis', (['xs', 'order'], {'axis': '(0)'}), '(xs, order, axis=0)\n', (894, 913), True, 'import numpy as np\n'), ((935, 972), 'numpy.take_along_axis', 'np.take_along_axis', (['ys', 'order'], {'axis': '(0)'}), '(ys, order, axis=0)\n', (953, 972), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
class utilsClass:
def pdfFunction():
sample = np.random.normal(size=1000)
plt.hist(sample, bins=20)
plt.show()
sample_mean = np.mean(sample)
sample_std = np.std(sample)
print(sample_mean)
print(sample_std)
def generateGaussians():
x = np.linspace(-3,3,120)
for mu, sigma in [(-1,1),(0,2),(2,3)]:
plt.plot(x, np.exp(-np.power(x-mu, 2.) / (2 * np.power(sigma, 2))))
plt.show()
if __name__ == "__main__":
utilsClass.generateGaussians()
| [
"numpy.random.normal",
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.power",
"numpy.linspace",
"numpy.std",
"matplotlib.pyplot.show"
] | [((111, 138), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (127, 138), True, 'import numpy as np\n'), ((148, 173), 'matplotlib.pyplot.hist', 'plt.hist', (['sample'], {'bins': '(20)'}), '(sample, bins=20)\n', (156, 173), True, 'import matplotlib.pyplot as plt\n'), ((182, 192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (190, 192), True, 'import matplotlib.pyplot as plt\n'), ((216, 231), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (223, 231), True, 'import numpy as np\n'), ((253, 267), 'numpy.std', 'np.std', (['sample'], {}), '(sample)\n', (259, 267), True, 'import numpy as np\n'), ((372, 395), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(120)'], {}), '(-3, 3, 120)\n', (383, 395), True, 'import numpy as np\n'), ((530, 540), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (538, 540), True, 'import matplotlib.pyplot as plt\n'), ((473, 494), 'numpy.power', 'np.power', (['(x - mu)', '(2.0)'], {}), '(x - mu, 2.0)\n', (481, 494), True, 'import numpy as np\n'), ((499, 517), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (507, 517), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 19 16:52:59 2018
@author: <NAME>
"""
def ann_module(input_data,output_data,reckon,num_hidden,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import grass.script as gscript
os.mkdir(directory) #Create directory
#Scaling the data
max_in = np.zeros((1,input_data.shape[1]))
min_in = np.zeros((1,input_data.shape[1]))
max_out = 1
min_out = 0
alldata = np.concatenate((input_data,reckon))
for i in range(0,input_data.shape[1]):
max_in[0,i] = np.nanmax(alldata[:,i])
min_in[0,i] = np.nanmin(alldata[:,i])
os.chdir(directory)
np.savetxt('max_in.txt', (max_in), delimiter=',')
np.savetxt('min_in.txt', (min_in), delimiter=',')
os.chdir('../')
for j in range(0,input_data.shape[1]):
if max_in[0,j] != 0:
input_data[:,j] = (input_data[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
for j in range(0,reckon.shape[1]):
if max_in[0,j] != 0:
reckon[:,j] = (reckon[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
#Resample data
m = 1
n = input_data.shape[0]
n_test = int(math.ceil(test_samples*n)) #TORNAR GENERICA ESSAS %%
n_val = int(math.ceil(val_samples*n))
n_train = int(n - n_test - n_val)
reg_set = np.arange(0,n)
T = np.zeros((n_train,1))
V = np.zeros((n_val,1))
flag_test = 0
B_ind = ((n-m)*np.random.rand(2*n_test,1)+m)
B_ind = [math.floor(x) for x in B_ind]
while flag_test == 0:
if len(np.unique(B_ind)) == n_test:
flag_test = 1
else:
del B_ind[0]
B_ind = np.unique(B_ind)
B_ind = B_ind.astype(int)
B = reg_set[B_ind]-1 #Test
reg_rest = np.setdiff1d(reg_set,B) #Dataset for TRAINING and VALIDATIONS
flag_val = 0
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
while flag_val == 0:
if len(np.unique(V_buff)) == n_val:
flag_val = 1
elif len(np.unique(V_buff)) > n_val:
del V_buff[0]
else:
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
V_buff = [int(x) for x in V_buff] #Transform to integer
V[:,0] = (reg_rest[np.unique(V_buff)])
T[:,0] = np.setdiff1d(reg_rest,V[:,0])
T = T.astype(np.int64) #Training
V = V.astype(np.int64) #Validations
B = B.astype(np.int64) #Test
p = input_data.shape[1]
input_test = np.zeros((B.shape[0],p))
output_test = np.zeros((B.shape[0],1))
input_train = np.zeros((T.shape[0],p))
output_train = np.zeros((T.shape[0],1))
input_val = np.zeros((B.shape[0],p))
output_val = np.zeros((T.shape[0],1))
n1 = T.shape[0]
n2 = B.shape[0]
n3 = V.shape[0]
for i in range(0,n1-1):
input_train[i,:] = input_data[T[i],:]
output_train[i,:] = output_data[T[i]]
for i in range(0,n2-1):
input_test[i,:] = input_data[B[i],:]
output_test[i,:] = output_data[B[i]]
for i in range(0,n3-1):
input_val[i,:] = input_data[V[i],:]
output_val[i,:] = output_data[V[i]]
output_data = output_data.reshape((output_data.size,1))
#Validations
def ann_validate(input_data,output_data,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out):
#Collect dimensions
num_input = input_data.shape[1] #Number of parameters
reg_size = input_data.shape[0] #Number of records
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1 #Number of lines of weights matrix of the output layer - 1
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,reg_size))
def activation(x): #Sigmoid as activation function
fx = 1/(1+math.exp(-x))
return fx
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro[0,k] = output[k,:] - output_data[k,:]
return output,erro
#Test
def ann_test(input_data,output_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1
def activation(x): #activation function
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro_dec = np.zeros((1,reg_size))
erro_round = np.zeros((1,reg_size))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro_dec[0,k] = output[k,:] - output_data[k,:]
erro_round[0,k] = np.around(erro_dec[0,k])
return output,erro_dec,erro_round
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
#Training
num_input = input_train.shape[1] #Number of parameters
reg_size = input_train.shape[0] #Number of examples
num_output = output_train.shape[1] #Number of outputs (in this case, just 1 - susceptibility)
weights = np.random.rand(num_input+1,num_hidden)
peights = np.random.rand(num_hidden+1,num_output)
biasH = np.ones((num_hidden,1))
biasO = np.ones((num_output,1))
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,nr_epochs))
erro_train = np.zeros((1,nr_epochs))
erro_validate = np.zeros((1,nr_epochs))
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
for epoch in range(0,nr_epochs):
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_train[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
#Backpropagation
#Uptade the weights in the output layer
for i in range(0,num_hidden):
for j in range(0,num_output):
if i < (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*H[i,0]
elif i == (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*biasO[j,0]
#Uptade the weights in the hidden layer
buff = 0
for j in range(0,num_hidden):
for i in range(0,num_input):
for k1 in range(0,num_output):
buff = buff + (output_train[k,k1] - output[k,k1])*output[k,k1]*(1-output[k,k1])*peights[j,k1]
if i < (num_input+1):
weights[i,j] = weights[i,j] + coef*buff*H[j,0]*(1-H[j,0])*input_train[k,i]
elif i == num_input+1:
weights[i,j] = weights[i,j] + coef*buff*H[j,i]*(1-H[j,0])*biasH[j,0]
buff = 0 #Zeroes the buffer variable
erro_train[0,epoch] = np.linalg.norm((output - output_train)/reg_size)
output_val,erro_val = ann_validate(input_val,output_val,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out)
erro_validate[0,epoch] = np.linalg.norm(erro_val)
#Collects the weights in the minimum validation error
if epoch == 0:
W = weights
P = peights
epoch_min = epoch
erro_min = 1000
elif erro_min > np.linalg.norm(erro_validate[0,epoch]):
W = weights
P = peights
epoch_min = epoch
erro_min = np.linalg.norm(erro_validate[0,epoch])
[output,erro_test,erro_round] = ann_test(input_test,output_test,weights,peights,biasH,biasO)
x = np.arange(1,nr_epochs+1).reshape((nr_epochs,1))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plt.subplot(1,2,1)
plt.plot(x,erro_train.T,x,erro_validate.T,'g-', epoch_min, erro_min,'g*')
plt.xlabel('Epochs')
plt.ylabel('Root mean square output error')
plt.legend(('Training','Validation','Early stop'))
plt.subplot(1,2,2)
plt.bar(np.arange(1,(erro_test.shape[1])+1),erro_test.reshape(erro_test.shape[1]))
plt.xlabel('Instances')
plt.ylabel('Error (ANN output - real output)')
os.chdir(directory)
plt.savefig('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
rounded = np.asarray(erro_round).reshape((B.shape[0],1))
error = sum(abs(erro_round.T))
gscript.message(_("Test set error: "))
gscript.message(error)
#Assign the minimum weights to their original names
weights = W
peights = P
os.chdir(directory)
np.savetxt('weights.txt', (weights), delimiter=',')
np.savetxt('peights.txt', (peights), delimiter=',')
np.savetxt('biasH.txt', (biasH), delimiter=',')
np.savetxt('biasO.txt', (biasO), delimiter=',')
os.mkdir('Inputs and outputs')
os.chdir('Inputs and outputs')
np.savetxt('Input_test.txt', (input_test), delimiter=',')
np.savetxt('Output_test.txt', (output_test), delimiter=',')
np.savetxt('Input_val.txt', (input_val), delimiter=',')
np.savetxt('Output_val.txt', (output_val), delimiter=',')
np.savetxt('Input_train.txt', (input_train), delimiter=',')
np.savetxt('Output_train.txt', (output_train), delimiter=',')
np.savetxt('Error_train.txt', (erro_train), delimiter=',')
np.savetxt('Error_val.txt', (erro_validate), delimiter=',')
np.savetxt('Epoch_and_error_min.txt', (epoch_min,erro_min), delimiter=',')
np.savetxt('Test_set_TOTAL_error.txt', (error), delimiter=',')
np.savetxt('Error_test.txt', (erro_test), delimiter=',')
param = open('ANN_Parameters.txt','w')
param.write('Hidden neurons: '+str(num_hidden))
param.write('\n Learning rate: '+str(coef))
param.write('\n Epochs: '+str(nr_epochs))
param.close()
os.chdir('../')
os.chdir('../')
#Sensitivity analysis
def sensitivity(input_data,output_size,weights,peights,biasH,biasO):
input_size = input_data.shape[1] #Number of columns (parameters)
npts = 200 #Number of samples in the sensitivity evaluation set
sens_set = np.random.random_sample((npts,1)) #Return random floats in the half-open interval [0.0, 1.0)
fixed_par_value = np.empty([input_size,1])
ones_sens = np.ones((200,1))
#Calculation of the normalized mean value of each entry
for k in range(0,input_size):
fixed_par_value[k] = (np.mean(input_data[:,k]))
#Pre-allocation
input_sens = [[([1] * npts) for j in range(input_size)] for i in range(input_size)]
output_sens = [[([0] * npts) for j in range(input_size)] for i in range(input_size)]
input_sens = np.asarray(input_sens,dtype=float)
for k1 in range(0,input_size):
for k2 in range(0,input_size):
if k1 == k2:
input_sens[k1,k2,:] = sens_set.reshape(sens_set.shape[0]).T
else:
input_sens[k1,k2,:] = (ones_sens*fixed_par_value[k2]).reshape(ones_sens.shape[0])
for k1 in range(0,input_size):
input_sens2 = np.asarray(input_sens[k1]).T
output = ann_reckon(input_sens2,weights,peights,biasH,biasO)
output_sens[k1] = output
return sens_set,fixed_par_value,input_sens,output_sens
[sens_set,fixed_par_value,input_sens,output_sens] = sensitivity(input_data,output_data.shape[1],weights,peights,biasH,biasO)
for k in range(0,input_data.shape[1]):
plt.figure()
plt.plot(sens_set,output_sens[k],'.')
plt.title('Sensitivity analysis. Parameter: '+columnst[k])
plt.ylabel('Output response')
plt.xlabel('Parameter: '+columnst[k])
os.chdir(directory)
plt.savefig('SensitivityAnalysisVar_'+columnst[k], dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
#Reckon
if not flag_train:
output_reckon = ann_reckon(reckon,weights,peights,biasH,biasO)
a = np.reshape(output_reckon,(col,row),order='F')
a = np.transpose(a)
else:
a = 0
return a
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
import numpy as np
import math
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
def ANN_batch(input_data,output_data,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
#hidden is a vector now
#trials is the number of initial conditions
#Train a set of neural networks and select the best one
#Different number of hidden neurons
#Different number of initial conditions
import numpy as np
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import os
import grass.script as gscript
os.mkdir(directory) #Create directory -> REVER HERE
#Scaling the data
max_in = np.zeros((1,input_data.shape[1]))
min_in = np.zeros((1,input_data.shape[1]))
max_out = 1
min_out = 0
alldata = np.concatenate((input_data,reckon))
for i in range(0,input_data.shape[1]):
max_in[0,i] = np.nanmax(alldata[:,i])
min_in[0,i] = np.nanmin(alldata[:,i])
os.chdir(directory)
np.savetxt('max_in.txt', (max_in), delimiter=',')
np.savetxt('min_in.txt', (min_in), delimiter=',')
os.chdir('../')
for j in range(0,input_data.shape[1]):
if max_in[0,j] != 0:
input_data[:,j] = (input_data[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
for j in range(0,reckon.shape[1]):
if max_in[0,j] != 0:
reckon[:,j] = (reckon[:,j] - min_in[0,j])/(max_in[0,j]-min_in[0,j])
#Resample data
m = 1
n = input_data.shape[0]
n_test = int(math.ceil(test_samples*n)) #TORNAR GENERICA ESSAS %%
n_val = int(math.ceil(val_samples*n))
n_train = int(n - n_test - n_val)
reg_set = np.arange(0,n)
T = np.zeros((n_train,1))
V = np.zeros((n_val,1))
flag_test = 0
B_ind = ((n-m)*np.random.rand(2*n_test,1)+m)
B_ind = [math.floor(x) for x in B_ind]
while flag_test == 0:
if len(np.unique(B_ind)) == n_test:
flag_test = 1
else:
del B_ind[0]
B_ind = np.unique(B_ind)
B_ind = B_ind.astype(int)
B = reg_set[B_ind]-1 #Test
reg_rest = np.setdiff1d(reg_set,B) #Dataset for TRAINING and VALIDATIONS
flag_val = 0
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
while flag_val == 0:
if len(np.unique(V_buff)) == n_val:
flag_val = 1
elif len(np.unique(V_buff)) > n_val:
del V_buff[0]
else:
V_buff = ((n-n_test-m)*np.random.rand(2*n_val,1)+m)
V_buff = [math.floor(x) for x in V_buff]
V_buff = [int(x) for x in V_buff] #Transform to integer
V[:,0] = (reg_rest[np.unique(V_buff)])
T[:,0] = np.setdiff1d(reg_rest,V[:,0])
T = T.astype(np.int64) #Training
V = V.astype(np.int64) #Validations
B = B.astype(np.int64) #Test
p = input_data.shape[1]
input_test = np.zeros((B.shape[0],p))
output_test = np.zeros((B.shape[0],1))
input_train = np.zeros((T.shape[0],p))
output_train = np.zeros((T.shape[0],1))
input_val = np.zeros((B.shape[0],p))
output_val = np.zeros((T.shape[0],1))
n1 = T.shape[0]
n2 = B.shape[0]
n3 = V.shape[0]
for i in range(0,n1-1):
input_train[i,:] = input_data[T[i],:]
output_train[i,:] = output_data[T[i]]
for i in range(0,n2-1):
input_test[i,:] = input_data[B[i],:]
output_test[i,:] = output_data[B[i]]
for i in range(0,n3-1):
input_val[i,:] = input_data[V[i],:]
output_val[i,:] = output_data[V[i]]
output_data = output_data.reshape((output_data.size,1))
def ann_train(input_train,output_train,input_val,output_val,input_test,output_test,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train):
#Training
num_input = input_train.shape[1] #Number of parameters
reg_size = input_train.shape[0] #Number of examples
num_output = output_train.shape[1] #Number of outputs (in this case, just 1 - susceptibility)
weights = np.random.rand(num_input+1,num_hidden)
peights = np.random.rand(num_hidden+1,num_output)
biasH = np.ones((num_hidden,1))
biasO = np.ones((num_output,1))
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,nr_epochs))
erro_train = np.zeros((1,nr_epochs))
erro_validate = np.zeros((1,nr_epochs))
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
for epoch in range(0,nr_epochs):
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_train[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
#Backpropagation
#Uptade the weights in the output layer
for i in range(0,num_hidden):
for j in range(0,num_output):
if i < (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*H[i,0]
elif i == (num_hidden+1):
peights[i,j] = peights[i,j] + coef*(output_train[k,j] - output[k,j])*output[k,j]*(1-output[k,j])*biasO[j,0]
#Uptade the weights in the hidden layer
buff = 0
for j in range(0,num_hidden):
for i in range(0,num_input):
for k1 in range(0,num_output):
buff = buff + (output_train[k,k1] - output[k,k1])*output[k,k1]*(1-output[k,k1])*peights[j,k1]
if i < (num_input+1):
weights[i,j] = weights[i,j] + coef*buff*H[j,0]*(1-H[j,0])*input_train[k,i]
elif i == num_input+1:
weights[i,j] = weights[i,j] + coef*buff*H[j,i]*(1-H[j,0])*biasH[j,0]
buff = 0 #Zeroes the buffer variable
erro_train[0,epoch] = np.linalg.norm((output - output_train)/reg_size)
output_val,erro_val = ann_validate(input_val,output_val,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out)
erro_validate[0,epoch] = np.linalg.norm(erro_val)
#Collects the weights in the minimum validation error
if epoch == 0:
W = weights
P = peights
epoch_min = epoch
erro_min = 1000
elif erro_min > np.linalg.norm(erro_validate[0,epoch]):
W = weights
P = peights
epoch_min = epoch
erro_min = np.linalg.norm(erro_validate[0,epoch])
[output,erro_test,erro_round] = ann_test(input_test,output_test,weights,peights,biasH,biasO)
error = np.linalg.norm(erro_test)
rounded = np.asarray(erro_round).reshape((B.shape[0],1))
error_total = sum(abs(erro_round.T))
#Assign the minimum weights to their original names
weights = W
peights = P
return output,weights,peights,biasH,biasO,erro_train,erro_validate,epoch_min,erro_min,error,erro_test,error_total
#Validations
def ann_validate(input_data,output_data,weights,peights,biasH,biasO,max_in,max_out,min_in,min_out):
#Collect dimensions
num_input = input_data.shape[1] #Number of parameters
reg_size = input_data.shape[0] #Number of records
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1 #Number of lines of weights matrix of the output layer - 1
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro = np.zeros((1,reg_size))
def activation(x): #Sigmoid as activation function
fx = 1/(1+math.exp(-x))
return fx
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro[0,k] = output[k,:] - output_data[k,:]
return output,erro
#Test
def ann_test(input_data,output_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = output_data.shape[1]
num_hidden = peights.shape[0] - 1
def activation(x): #activation function
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
erro_dec = np.zeros((1,reg_size))
erro_round = np.zeros((1,reg_size))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
erro_dec[0,k] = output[k,:] - output_data[k,:]
erro_round[0,k] = np.around(erro_dec[0,k])
return output,erro_dec,erro_round
#Reckon
def ann_reckon(input_data,weights,peights,biasH,biasO):
#Collect dimensions
num_input = input_data.shape[1]
reg_size = input_data.shape[0]
num_output = 1
num_hidden = peights.shape[0] - 1
def activation(x):
fx = 1/(1+math.exp(-x))
return fx
S = np.zeros((num_hidden,1))
H = np.zeros((num_hidden,1))
R = np.zeros((num_output,1))
output = np.zeros((reg_size,num_output))
for k in range(0,reg_size):
for i in range(0,num_hidden):
for j in range(0,num_input):
S[i,0] = S[i,0] + input_data[k,j]*weights[j,i]
S[i,0] = S[i,0] + biasH[i,0]*weights[num_input,i]
H[i,0] = activation(S[i,0])
S[i,0] = 0
for i in range(0,num_output):
for j in range(0,num_hidden):
R[i,0] = R[i,0] + H[j,0]*peights[j,i]
R[i,0] = R[i,0] + biasO[i,0]*peights[num_hidden,i]
output[k,i] = activation(R[i,0])
R[i,0] = 0
output_reckon = output
return output_reckon
num_hidden = len(hidden)
erro_buff = 9999999
for k1 in range(0,num_hidden): #hidden neurons tested
for k2 in range(0,trials): #initial conditions
[output,Weights,Peights,BiasH,BiasO,erro_train,erro_validate,epoch_min,erro_min,error,erro_test,error_total] = ann_train(input_train,output_train,input_val,output_val,input_test,output_test,reckon,hidden,trials,coef,nr_epochs,val_samples,test_samples,directory,columnst,col,row,flag_train)
gscript.message(_("Hidden neuron: "))
neuron_tested = hidden[k1]
gscript.message(neuron_tested)
gscript.message(_("Initial condition: "))
gscript.message(k2+1)
gscript.message(_("---------------------------------"))
if error < erro_buff:
erro_buff = error
#then save the data in variables
weights = Weights
peights = Peights
biasH = BiasH
biasO = BiasO
Erro_train = erro_train
Erro_val = erro_validate
Early_stop = np.array([epoch_min,erro_min])
Erro_test = erro_test
Erro_test_norm = erro_buff
neurons = hidden[k1]
Epoch_min = epoch_min
Erro_min = erro_min
Total_erro = error_total
gscript.message(_("Test set error from the best ANN: "))
gscript.message(Total_erro)
gscript.message(_("Best ANN hidden neurons: "))
gscript.message(neurons)
x = np.arange(1,nr_epochs+1).reshape((nr_epochs,1))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plt.subplot(1,2,1)
plt.plot(x,Erro_train.T,x,Erro_val.T,'g-', Epoch_min, Erro_min,'g*')
plt.xlabel('Epochs')
plt.ylabel('Root mean square output error')
plt.legend(('Training','Validation','Early stop'))
plt.subplot(1,2,2)
plt.bar(np.arange(1,(Erro_test.shape[1])+1),Erro_test.reshape(Erro_test.shape[1]))
plt.xlabel('Instances')
plt.ylabel('Error (ANN output - real output)')
os.chdir(directory)
plt.savefig('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
#Sensitivity analysis
def sensitivity(input_data,output_size,weights,peights,biasH,biasO):
input_size = input_data.shape[1] #Number of columns (parameters)
npts = 200 #Number of samples in the sensitivity evaluation set
sens_set = np.random.random_sample((npts,1)) #Return random floats in the half-open interval [0.0, 1.0)
fixed_par_value = np.empty([input_size,1])
ones_sens = np.ones((200,1))
#Calculation of the normalized mean value of each entry
for k in range(0,input_size):
fixed_par_value[k] = (np.mean(input_data[:,k]))
#Pre-allocation
input_sens = [[([1] * npts) for j in range(input_size)] for i in range(input_size)]
output_sens = [[([0] * npts) for j in range(input_size)] for i in range(input_size)]
input_sens = np.asarray(input_sens,dtype=float)
for k1 in range(0,input_size):
for k2 in range(0,input_size):
if k1 == k2:
input_sens[k1,k2,:] = sens_set.reshape(sens_set.shape[0]).T
else:
input_sens[k1,k2,:] = (ones_sens*fixed_par_value[k2]).reshape(ones_sens.shape[0])
for k1 in range(0,input_size):
input_sens2 = np.asarray(input_sens[k1]).T
output = ann_reckon(input_sens2,weights,peights,biasH,biasO)
output_sens[k1] = output
return sens_set,fixed_par_value,input_sens,output_sens
[sens_set,fixed_par_value,input_sens,output_sens] = sensitivity(input_data,output_data.shape[1],weights,peights,biasH,biasO)
for k in range(0,input_data.shape[1]):
plt.figure()
plt.plot(sens_set,output_sens[k],'.')
plt.title('Sensitivity analysis. Parameter: '+columnst[k])
plt.ylabel('Output response')
plt.xlabel('Parameter: '+columnst[k])
os.chdir(directory)
plt.savefig('SensitivityAnalysisVar_'+columnst[k], dpi=300, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches='tight', pad_inches=0.2,
frameon=None)
os.chdir('../')
os.chdir(directory)
np.savetxt('weights.txt', (weights), delimiter=',')
np.savetxt('peights.txt', (peights), delimiter=',')
np.savetxt('biasH.txt', (biasH), delimiter=',')
np.savetxt('biasO.txt', (biasO), delimiter=',')
os.mkdir('Inputs and outputs')
os.chdir('Inputs and outputs')
np.savetxt('Input_test.txt', (input_test), delimiter=',')
np.savetxt('Output_test.txt', (output_test), delimiter=',')
np.savetxt('Input_val.txt', (input_val), delimiter=',')
np.savetxt('Output_val.txt', (output_val), delimiter=',')
np.savetxt('Input_train.txt', (input_train), delimiter=',')
np.savetxt('Output_train.txt', (output_train), delimiter=',')
np.savetxt('Error_train.txt', (Erro_train), delimiter=',')
np.savetxt('Error_val.txt', (Erro_val), delimiter=',')
np.savetxt('Epoch_and_error_min.txt', (Early_stop), delimiter=',')
np.savetxt('Test_set_TOTAL_error.txt', (Total_erro), delimiter=',')
np.savetxt('Error_test.txt', (Erro_test), delimiter=',')
param = open('ANN_Parameters.txt','w')
param.write('Hidden neurons of best ANN: '+str(neurons))
param.write('\n Learning rate: '+str(coef))
param.write('\n Epochs: '+str(nr_epochs))
param.write('\n Hidden neurons tested: '+str(hidden))
param.write('\n Number of initial conditions tested: '+str(trials))
param.close()
os.chdir('../')
os.chdir('../')
#Reckon
if not flag_train:
output_reckon = ann_reckon(reckon,weights,peights,biasH,biasO)
a = np.reshape(output_reckon,(col,row),order='F')
a = np.transpose(a)
else:
a = 0
return a | [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"math.floor",
"numpy.array",
"numpy.linalg.norm",
"numpy.nanmin",
"math.exp",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.empty",
"os.mkdir",
"numpy.concate... | [((292, 313), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (306, 313), False, 'import matplotlib\n'), ((404, 423), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (412, 423), False, 'import os\n'), ((478, 512), 'numpy.zeros', 'np.zeros', (['(1, input_data.shape[1])'], {}), '((1, input_data.shape[1]))\n', (486, 512), True, 'import numpy as np\n'), ((525, 559), 'numpy.zeros', 'np.zeros', (['(1, input_data.shape[1])'], {}), '((1, input_data.shape[1]))\n', (533, 559), True, 'import numpy as np\n'), ((613, 649), 'numpy.concatenate', 'np.concatenate', (['(input_data, reckon)'], {}), '((input_data, reckon))\n', (627, 649), True, 'import numpy as np\n'), ((802, 821), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (810, 821), False, 'import os\n'), ((826, 873), 'numpy.savetxt', 'np.savetxt', (['"""max_in.txt"""', 'max_in'], {'delimiter': '""","""'}), "('max_in.txt', max_in, delimiter=',')\n", (836, 873), True, 'import numpy as np\n'), ((880, 927), 'numpy.savetxt', 'np.savetxt', (['"""min_in.txt"""', 'min_in'], {'delimiter': '""","""'}), "('min_in.txt', min_in, delimiter=',')\n", (890, 927), True, 'import numpy as np\n'), ((934, 949), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (942, 949), False, 'import os\n'), ((1487, 1502), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (1496, 1502), True, 'import numpy as np\n'), ((1511, 1533), 'numpy.zeros', 'np.zeros', (['(n_train, 1)'], {}), '((n_train, 1))\n', (1519, 1533), True, 'import numpy as np\n'), ((1541, 1561), 'numpy.zeros', 'np.zeros', (['(n_val, 1)'], {}), '((n_val, 1))\n', (1549, 1561), True, 'import numpy as np\n'), ((1830, 1846), 'numpy.unique', 'np.unique', (['B_ind'], {}), '(B_ind)\n', (1839, 1846), True, 'import numpy as np\n'), ((1928, 1952), 'numpy.setdiff1d', 'np.setdiff1d', (['reg_set', 'B'], {}), '(reg_set, B)\n', (1940, 1952), True, 'import numpy as np\n'), ((2551, 2582), 'numpy.setdiff1d', 'np.setdiff1d', (['reg_rest', 'V[:, 0]'], {}), '(reg_rest, V[:, 0])\n', (2563, 2582), True, 'import numpy as np\n'), ((2741, 2766), 'numpy.zeros', 'np.zeros', (['(B.shape[0], p)'], {}), '((B.shape[0], p))\n', (2749, 2766), True, 'import numpy as np\n'), ((2785, 2810), 'numpy.zeros', 'np.zeros', (['(B.shape[0], 1)'], {}), '((B.shape[0], 1))\n', (2793, 2810), True, 'import numpy as np\n'), ((2828, 2853), 'numpy.zeros', 'np.zeros', (['(T.shape[0], p)'], {}), '((T.shape[0], p))\n', (2836, 2853), True, 'import numpy as np\n'), ((2873, 2898), 'numpy.zeros', 'np.zeros', (['(T.shape[0], 1)'], {}), '((T.shape[0], 1))\n', (2881, 2898), True, 'import numpy as np\n'), ((2914, 2939), 'numpy.zeros', 'np.zeros', (['(B.shape[0], p)'], {}), '((B.shape[0], p))\n', (2922, 2939), True, 'import numpy as np\n'), ((2957, 2982), 'numpy.zeros', 'np.zeros', (['(T.shape[0], 1)'], {}), '((T.shape[0], 1))\n', (2965, 2982), True, 'import numpy as np\n'), ((7967, 8008), 'numpy.random.rand', 'np.random.rand', (['(num_input + 1)', 'num_hidden'], {}), '(num_input + 1, num_hidden)\n', (7981, 8008), True, 'import numpy as np\n'), ((8021, 8063), 'numpy.random.rand', 'np.random.rand', (['(num_hidden + 1)', 'num_output'], {}), '(num_hidden + 1, num_output)\n', (8035, 8063), True, 'import numpy as np\n'), ((8078, 8102), 'numpy.ones', 'np.ones', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (8085, 8102), True, 'import numpy as np\n'), ((8114, 8138), 'numpy.ones', 'np.ones', (['(num_output, 1)'], {}), '((num_output, 1))\n', (8121, 8138), True, 'import numpy as np\n'), ((8146, 8171), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (8154, 8171), True, 'import numpy as np\n'), ((8179, 8204), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (8187, 8204), True, 'import numpy as np\n'), ((8212, 8237), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (8220, 8237), True, 'import numpy as np\n'), ((8250, 8282), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (8258, 8282), True, 'import numpy as np\n'), ((8293, 8317), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (8301, 8317), True, 'import numpy as np\n'), ((8335, 8359), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (8343, 8359), True, 'import numpy as np\n'), ((8379, 8403), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (8387, 8403), True, 'import numpy as np\n'), ((11278, 11373), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None', 'wspace': '(0.5)', 'hspace': 'None'}), '(left=None, bottom=None, right=None, top=None, wspace=\n 0.5, hspace=None)\n', (11297, 11373), True, 'import matplotlib.pyplot as plt\n'), ((11373, 11393), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11384, 11393), True, 'import matplotlib.pyplot as plt\n'), ((11396, 11474), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'erro_train.T', 'x', 'erro_validate.T', '"""g-"""', 'epoch_min', 'erro_min', '"""g*"""'], {}), "(x, erro_train.T, x, erro_validate.T, 'g-', epoch_min, erro_min, 'g*')\n", (11404, 11474), True, 'import matplotlib.pyplot as plt\n'), ((11474, 11494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (11484, 11494), True, 'import matplotlib.pyplot as plt\n'), ((11499, 11542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Root mean square output error"""'], {}), "('Root mean square output error')\n", (11509, 11542), True, 'import matplotlib.pyplot as plt\n'), ((11547, 11599), 'matplotlib.pyplot.legend', 'plt.legend', (["('Training', 'Validation', 'Early stop')"], {}), "(('Training', 'Validation', 'Early stop'))\n", (11557, 11599), True, 'import matplotlib.pyplot as plt\n'), ((11602, 11622), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (11613, 11622), True, 'import matplotlib.pyplot as plt\n'), ((11712, 11735), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Instances"""'], {}), "('Instances')\n", (11722, 11735), True, 'import matplotlib.pyplot as plt\n'), ((11740, 11786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error (ANN output - real output)"""'], {}), "('Error (ANN output - real output)')\n", (11750, 11786), True, 'import matplotlib.pyplot as plt\n'), ((11792, 11811), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (11800, 11811), False, 'import os\n'), ((11816, 12014), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ANN_train_val"""'], {'dpi': '(300)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.2)', 'frameon': 'None'}), "('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches='tight', pad_inches=0.2, frameon=None)\n", (11827, 12014), True, 'import matplotlib.pyplot as plt\n'), ((12035, 12050), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (12043, 12050), False, 'import os\n'), ((12203, 12225), 'grass.script.message', 'gscript.message', (['error'], {}), '(error)\n', (12218, 12225), True, 'import grass.script as gscript\n'), ((12324, 12343), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (12332, 12343), False, 'import os\n'), ((12348, 12397), 'numpy.savetxt', 'np.savetxt', (['"""weights.txt"""', 'weights'], {'delimiter': '""","""'}), "('weights.txt', weights, delimiter=',')\n", (12358, 12397), True, 'import numpy as np\n'), ((12404, 12453), 'numpy.savetxt', 'np.savetxt', (['"""peights.txt"""', 'peights'], {'delimiter': '""","""'}), "('peights.txt', peights, delimiter=',')\n", (12414, 12453), True, 'import numpy as np\n'), ((12460, 12505), 'numpy.savetxt', 'np.savetxt', (['"""biasH.txt"""', 'biasH'], {'delimiter': '""","""'}), "('biasH.txt', biasH, delimiter=',')\n", (12470, 12505), True, 'import numpy as np\n'), ((12512, 12557), 'numpy.savetxt', 'np.savetxt', (['"""biasO.txt"""', 'biasO'], {'delimiter': '""","""'}), "('biasO.txt', biasO, delimiter=',')\n", (12522, 12557), True, 'import numpy as np\n'), ((12564, 12594), 'os.mkdir', 'os.mkdir', (['"""Inputs and outputs"""'], {}), "('Inputs and outputs')\n", (12572, 12594), False, 'import os\n'), ((12599, 12629), 'os.chdir', 'os.chdir', (['"""Inputs and outputs"""'], {}), "('Inputs and outputs')\n", (12607, 12629), False, 'import os\n'), ((12634, 12689), 'numpy.savetxt', 'np.savetxt', (['"""Input_test.txt"""', 'input_test'], {'delimiter': '""","""'}), "('Input_test.txt', input_test, delimiter=',')\n", (12644, 12689), True, 'import numpy as np\n'), ((12696, 12753), 'numpy.savetxt', 'np.savetxt', (['"""Output_test.txt"""', 'output_test'], {'delimiter': '""","""'}), "('Output_test.txt', output_test, delimiter=',')\n", (12706, 12753), True, 'import numpy as np\n'), ((12760, 12813), 'numpy.savetxt', 'np.savetxt', (['"""Input_val.txt"""', 'input_val'], {'delimiter': '""","""'}), "('Input_val.txt', input_val, delimiter=',')\n", (12770, 12813), True, 'import numpy as np\n'), ((12820, 12875), 'numpy.savetxt', 'np.savetxt', (['"""Output_val.txt"""', 'output_val'], {'delimiter': '""","""'}), "('Output_val.txt', output_val, delimiter=',')\n", (12830, 12875), True, 'import numpy as np\n'), ((12882, 12939), 'numpy.savetxt', 'np.savetxt', (['"""Input_train.txt"""', 'input_train'], {'delimiter': '""","""'}), "('Input_train.txt', input_train, delimiter=',')\n", (12892, 12939), True, 'import numpy as np\n'), ((12946, 13005), 'numpy.savetxt', 'np.savetxt', (['"""Output_train.txt"""', 'output_train'], {'delimiter': '""","""'}), "('Output_train.txt', output_train, delimiter=',')\n", (12956, 13005), True, 'import numpy as np\n'), ((13012, 13068), 'numpy.savetxt', 'np.savetxt', (['"""Error_train.txt"""', 'erro_train'], {'delimiter': '""","""'}), "('Error_train.txt', erro_train, delimiter=',')\n", (13022, 13068), True, 'import numpy as np\n'), ((13075, 13132), 'numpy.savetxt', 'np.savetxt', (['"""Error_val.txt"""', 'erro_validate'], {'delimiter': '""","""'}), "('Error_val.txt', erro_validate, delimiter=',')\n", (13085, 13132), True, 'import numpy as np\n'), ((13139, 13214), 'numpy.savetxt', 'np.savetxt', (['"""Epoch_and_error_min.txt"""', '(epoch_min, erro_min)'], {'delimiter': '""","""'}), "('Epoch_and_error_min.txt', (epoch_min, erro_min), delimiter=',')\n", (13149, 13214), True, 'import numpy as np\n'), ((13218, 13278), 'numpy.savetxt', 'np.savetxt', (['"""Test_set_TOTAL_error.txt"""', 'error'], {'delimiter': '""","""'}), "('Test_set_TOTAL_error.txt', error, delimiter=',')\n", (13228, 13278), True, 'import numpy as np\n'), ((13285, 13339), 'numpy.savetxt', 'np.savetxt', (['"""Error_test.txt"""', 'erro_test'], {'delimiter': '""","""'}), "('Error_test.txt', erro_test, delimiter=',')\n", (13295, 13339), True, 'import numpy as np\n'), ((13553, 13568), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (13561, 13568), False, 'import os\n'), ((13573, 13588), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (13581, 13588), False, 'import os\n'), ((16402, 16427), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (16410, 16427), True, 'import numpy as np\n'), ((16435, 16460), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (16443, 16460), True, 'import numpy as np\n'), ((16468, 16493), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (16476, 16493), True, 'import numpy as np\n'), ((16506, 16538), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (16514, 16538), True, 'import numpy as np\n'), ((17625, 17646), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (17639, 17646), False, 'import matplotlib\n'), ((17737, 17756), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (17745, 17756), False, 'import os\n'), ((17825, 17859), 'numpy.zeros', 'np.zeros', (['(1, input_data.shape[1])'], {}), '((1, input_data.shape[1]))\n', (17833, 17859), True, 'import numpy as np\n'), ((17872, 17906), 'numpy.zeros', 'np.zeros', (['(1, input_data.shape[1])'], {}), '((1, input_data.shape[1]))\n', (17880, 17906), True, 'import numpy as np\n'), ((17960, 17996), 'numpy.concatenate', 'np.concatenate', (['(input_data, reckon)'], {}), '((input_data, reckon))\n', (17974, 17996), True, 'import numpy as np\n'), ((18149, 18168), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (18157, 18168), False, 'import os\n'), ((18173, 18220), 'numpy.savetxt', 'np.savetxt', (['"""max_in.txt"""', 'max_in'], {'delimiter': '""","""'}), "('max_in.txt', max_in, delimiter=',')\n", (18183, 18220), True, 'import numpy as np\n'), ((18227, 18274), 'numpy.savetxt', 'np.savetxt', (['"""min_in.txt"""', 'min_in'], {'delimiter': '""","""'}), "('min_in.txt', min_in, delimiter=',')\n", (18237, 18274), True, 'import numpy as np\n'), ((18281, 18296), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (18289, 18296), False, 'import os\n'), ((18834, 18849), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (18843, 18849), True, 'import numpy as np\n'), ((18858, 18880), 'numpy.zeros', 'np.zeros', (['(n_train, 1)'], {}), '((n_train, 1))\n', (18866, 18880), True, 'import numpy as np\n'), ((18888, 18908), 'numpy.zeros', 'np.zeros', (['(n_val, 1)'], {}), '((n_val, 1))\n', (18896, 18908), True, 'import numpy as np\n'), ((19177, 19193), 'numpy.unique', 'np.unique', (['B_ind'], {}), '(B_ind)\n', (19186, 19193), True, 'import numpy as np\n'), ((19275, 19299), 'numpy.setdiff1d', 'np.setdiff1d', (['reg_set', 'B'], {}), '(reg_set, B)\n', (19287, 19299), True, 'import numpy as np\n'), ((19898, 19929), 'numpy.setdiff1d', 'np.setdiff1d', (['reg_rest', 'V[:, 0]'], {}), '(reg_rest, V[:, 0])\n', (19910, 19929), True, 'import numpy as np\n'), ((20088, 20113), 'numpy.zeros', 'np.zeros', (['(B.shape[0], p)'], {}), '((B.shape[0], p))\n', (20096, 20113), True, 'import numpy as np\n'), ((20132, 20157), 'numpy.zeros', 'np.zeros', (['(B.shape[0], 1)'], {}), '((B.shape[0], 1))\n', (20140, 20157), True, 'import numpy as np\n'), ((20175, 20200), 'numpy.zeros', 'np.zeros', (['(T.shape[0], p)'], {}), '((T.shape[0], p))\n', (20183, 20200), True, 'import numpy as np\n'), ((20220, 20245), 'numpy.zeros', 'np.zeros', (['(T.shape[0], 1)'], {}), '((T.shape[0], 1))\n', (20228, 20245), True, 'import numpy as np\n'), ((20261, 20286), 'numpy.zeros', 'np.zeros', (['(B.shape[0], p)'], {}), '((B.shape[0], p))\n', (20269, 20286), True, 'import numpy as np\n'), ((20304, 20329), 'numpy.zeros', 'np.zeros', (['(T.shape[0], 1)'], {}), '((T.shape[0], 1))\n', (20312, 20329), True, 'import numpy as np\n'), ((30921, 30948), 'grass.script.message', 'gscript.message', (['Total_erro'], {}), '(Total_erro)\n', (30936, 30948), True, 'import grass.script as gscript\n'), ((31005, 31029), 'grass.script.message', 'gscript.message', (['neurons'], {}), '(neurons)\n', (31020, 31029), True, 'import grass.script as gscript\n'), ((31091, 31186), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None', 'wspace': '(0.5)', 'hspace': 'None'}), '(left=None, bottom=None, right=None, top=None, wspace=\n 0.5, hspace=None)\n', (31110, 31186), True, 'import matplotlib.pyplot as plt\n'), ((31186, 31206), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (31197, 31206), True, 'import matplotlib.pyplot as plt\n'), ((31209, 31282), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Erro_train.T', 'x', 'Erro_val.T', '"""g-"""', 'Epoch_min', 'Erro_min', '"""g*"""'], {}), "(x, Erro_train.T, x, Erro_val.T, 'g-', Epoch_min, Erro_min, 'g*')\n", (31217, 31282), True, 'import matplotlib.pyplot as plt\n'), ((31282, 31302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (31292, 31302), True, 'import matplotlib.pyplot as plt\n'), ((31307, 31350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Root mean square output error"""'], {}), "('Root mean square output error')\n", (31317, 31350), True, 'import matplotlib.pyplot as plt\n'), ((31355, 31407), 'matplotlib.pyplot.legend', 'plt.legend', (["('Training', 'Validation', 'Early stop')"], {}), "(('Training', 'Validation', 'Early stop'))\n", (31365, 31407), True, 'import matplotlib.pyplot as plt\n'), ((31410, 31430), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (31421, 31430), True, 'import matplotlib.pyplot as plt\n'), ((31520, 31543), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Instances"""'], {}), "('Instances')\n", (31530, 31543), True, 'import matplotlib.pyplot as plt\n'), ((31548, 31594), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error (ANN output - real output)"""'], {}), "('Error (ANN output - real output)')\n", (31558, 31594), True, 'import matplotlib.pyplot as plt\n'), ((31600, 31619), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (31608, 31619), False, 'import os\n'), ((31624, 31822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ANN_train_val"""'], {'dpi': '(300)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.2)', 'frameon': 'None'}), "('ANN_train_val', dpi=300, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format=None, transparent=False,\n bbox_inches='tight', pad_inches=0.2, frameon=None)\n", (31635, 31822), True, 'import matplotlib.pyplot as plt\n'), ((31843, 31858), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (31851, 31858), False, 'import os\n'), ((34076, 34095), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (34084, 34095), False, 'import os\n'), ((34100, 34149), 'numpy.savetxt', 'np.savetxt', (['"""weights.txt"""', 'weights'], {'delimiter': '""","""'}), "('weights.txt', weights, delimiter=',')\n", (34110, 34149), True, 'import numpy as np\n'), ((34156, 34205), 'numpy.savetxt', 'np.savetxt', (['"""peights.txt"""', 'peights'], {'delimiter': '""","""'}), "('peights.txt', peights, delimiter=',')\n", (34166, 34205), True, 'import numpy as np\n'), ((34212, 34257), 'numpy.savetxt', 'np.savetxt', (['"""biasH.txt"""', 'biasH'], {'delimiter': '""","""'}), "('biasH.txt', biasH, delimiter=',')\n", (34222, 34257), True, 'import numpy as np\n'), ((34264, 34309), 'numpy.savetxt', 'np.savetxt', (['"""biasO.txt"""', 'biasO'], {'delimiter': '""","""'}), "('biasO.txt', biasO, delimiter=',')\n", (34274, 34309), True, 'import numpy as np\n'), ((34316, 34346), 'os.mkdir', 'os.mkdir', (['"""Inputs and outputs"""'], {}), "('Inputs and outputs')\n", (34324, 34346), False, 'import os\n'), ((34351, 34381), 'os.chdir', 'os.chdir', (['"""Inputs and outputs"""'], {}), "('Inputs and outputs')\n", (34359, 34381), False, 'import os\n'), ((34386, 34441), 'numpy.savetxt', 'np.savetxt', (['"""Input_test.txt"""', 'input_test'], {'delimiter': '""","""'}), "('Input_test.txt', input_test, delimiter=',')\n", (34396, 34441), True, 'import numpy as np\n'), ((34448, 34505), 'numpy.savetxt', 'np.savetxt', (['"""Output_test.txt"""', 'output_test'], {'delimiter': '""","""'}), "('Output_test.txt', output_test, delimiter=',')\n", (34458, 34505), True, 'import numpy as np\n'), ((34512, 34565), 'numpy.savetxt', 'np.savetxt', (['"""Input_val.txt"""', 'input_val'], {'delimiter': '""","""'}), "('Input_val.txt', input_val, delimiter=',')\n", (34522, 34565), True, 'import numpy as np\n'), ((34572, 34627), 'numpy.savetxt', 'np.savetxt', (['"""Output_val.txt"""', 'output_val'], {'delimiter': '""","""'}), "('Output_val.txt', output_val, delimiter=',')\n", (34582, 34627), True, 'import numpy as np\n'), ((34634, 34691), 'numpy.savetxt', 'np.savetxt', (['"""Input_train.txt"""', 'input_train'], {'delimiter': '""","""'}), "('Input_train.txt', input_train, delimiter=',')\n", (34644, 34691), True, 'import numpy as np\n'), ((34698, 34757), 'numpy.savetxt', 'np.savetxt', (['"""Output_train.txt"""', 'output_train'], {'delimiter': '""","""'}), "('Output_train.txt', output_train, delimiter=',')\n", (34708, 34757), True, 'import numpy as np\n'), ((34764, 34820), 'numpy.savetxt', 'np.savetxt', (['"""Error_train.txt"""', 'Erro_train'], {'delimiter': '""","""'}), "('Error_train.txt', Erro_train, delimiter=',')\n", (34774, 34820), True, 'import numpy as np\n'), ((34827, 34879), 'numpy.savetxt', 'np.savetxt', (['"""Error_val.txt"""', 'Erro_val'], {'delimiter': '""","""'}), "('Error_val.txt', Erro_val, delimiter=',')\n", (34837, 34879), True, 'import numpy as np\n'), ((34886, 34950), 'numpy.savetxt', 'np.savetxt', (['"""Epoch_and_error_min.txt"""', 'Early_stop'], {'delimiter': '""","""'}), "('Epoch_and_error_min.txt', Early_stop, delimiter=',')\n", (34896, 34950), True, 'import numpy as np\n'), ((34957, 35022), 'numpy.savetxt', 'np.savetxt', (['"""Test_set_TOTAL_error.txt"""', 'Total_erro'], {'delimiter': '""","""'}), "('Test_set_TOTAL_error.txt', Total_erro, delimiter=',')\n", (34967, 35022), True, 'import numpy as np\n'), ((35029, 35083), 'numpy.savetxt', 'np.savetxt', (['"""Error_test.txt"""', 'Erro_test'], {'delimiter': '""","""'}), "('Error_test.txt', Erro_test, delimiter=',')\n", (35039, 35083), True, 'import numpy as np\n'), ((35436, 35451), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (35444, 35451), False, 'import os\n'), ((35456, 35471), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (35464, 35471), False, 'import os\n'), ((719, 743), 'numpy.nanmax', 'np.nanmax', (['alldata[:, i]'], {}), '(alldata[:, i])\n', (728, 743), True, 'import numpy as np\n'), ((765, 789), 'numpy.nanmin', 'np.nanmin', (['alldata[:, i]'], {}), '(alldata[:, i])\n', (774, 789), True, 'import numpy as np\n'), ((1340, 1367), 'math.ceil', 'math.ceil', (['(test_samples * n)'], {}), '(test_samples * n)\n', (1349, 1367), False, 'import math\n'), ((1409, 1435), 'math.ceil', 'math.ceil', (['(val_samples * n)'], {}), '(val_samples * n)\n', (1418, 1435), False, 'import math\n'), ((1642, 1655), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (1652, 1655), False, 'import math\n'), ((2083, 2096), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (2093, 2096), False, 'import math\n'), ((2518, 2535), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (2527, 2535), True, 'import numpy as np\n'), ((3931, 3956), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (3939, 3956), True, 'import numpy as np\n'), ((3968, 3993), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (3976, 3993), True, 'import numpy as np\n'), ((4005, 4030), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (4013, 4030), True, 'import numpy as np\n'), ((4047, 4079), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (4055, 4079), True, 'import numpy as np\n'), ((4094, 4117), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (4102, 4117), True, 'import numpy as np\n'), ((5392, 5417), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (5400, 5417), True, 'import numpy as np\n'), ((5429, 5454), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (5437, 5454), True, 'import numpy as np\n'), ((5466, 5491), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (5474, 5491), True, 'import numpy as np\n'), ((5508, 5540), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (5516, 5540), True, 'import numpy as np\n'), ((5559, 5582), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (5567, 5582), True, 'import numpy as np\n'), ((5603, 5626), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (5611, 5626), True, 'import numpy as np\n'), ((6836, 6861), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (6844, 6861), True, 'import numpy as np\n'), ((6873, 6898), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (6881, 6898), True, 'import numpy as np\n'), ((6910, 6935), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (6918, 6935), True, 'import numpy as np\n'), ((6952, 6984), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (6960, 6984), True, 'import numpy as np\n'), ((10472, 10522), 'numpy.linalg.norm', 'np.linalg.norm', (['((output - output_train) / reg_size)'], {}), '((output - output_train) / reg_size)\n', (10486, 10522), True, 'import numpy as np\n'), ((10678, 10702), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_val'], {}), '(erro_val)\n', (10692, 10702), True, 'import numpy as np\n'), ((11633, 11669), 'numpy.arange', 'np.arange', (['(1)', '(erro_test.shape[1] + 1)'], {}), '(1, erro_test.shape[1] + 1)\n', (11642, 11669), True, 'import numpy as np\n'), ((13866, 13900), 'numpy.random.random_sample', 'np.random.random_sample', (['(npts, 1)'], {}), '((npts, 1))\n', (13889, 13900), True, 'import numpy as np\n'), ((13985, 14010), 'numpy.empty', 'np.empty', (['[input_size, 1]'], {}), '([input_size, 1])\n', (13993, 14010), True, 'import numpy as np\n'), ((14030, 14047), 'numpy.ones', 'np.ones', (['(200, 1)'], {}), '((200, 1))\n', (14037, 14047), True, 'import numpy as np\n'), ((14454, 14489), 'numpy.asarray', 'np.asarray', (['input_sens'], {'dtype': 'float'}), '(input_sens, dtype=float)\n', (14464, 14489), True, 'import numpy as np\n'), ((15275, 15287), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15285, 15287), True, 'import matplotlib.pyplot as plt\n'), ((15296, 15335), 'matplotlib.pyplot.plot', 'plt.plot', (['sens_set', 'output_sens[k]', '"""."""'], {}), "(sens_set, output_sens[k], '.')\n", (15304, 15335), True, 'import matplotlib.pyplot as plt\n'), ((15342, 15402), 'matplotlib.pyplot.title', 'plt.title', (["('Sensitivity analysis. Parameter: ' + columnst[k])"], {}), "('Sensitivity analysis. Parameter: ' + columnst[k])\n", (15351, 15402), True, 'import matplotlib.pyplot as plt\n'), ((15410, 15439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output response"""'], {}), "('Output response')\n", (15420, 15439), True, 'import matplotlib.pyplot as plt\n'), ((15448, 15487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Parameter: ' + columnst[k])"], {}), "('Parameter: ' + columnst[k])\n", (15458, 15487), True, 'import matplotlib.pyplot as plt\n'), ((15495, 15514), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (15503, 15514), False, 'import os\n'), ((15523, 15745), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('SensitivityAnalysisVar_' + columnst[k])"], {'dpi': '(300)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.2)', 'frameon': 'None'}), "('SensitivityAnalysisVar_' + columnst[k], dpi=300, facecolor='w',\n edgecolor='w', orientation='portrait', papertype=None, format=None,\n transparent=False, bbox_inches='tight', pad_inches=0.2, frameon=None)\n", (15534, 15745), True, 'import matplotlib.pyplot as plt\n'), ((15780, 15795), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (15788, 15795), False, 'import os\n'), ((15927, 15975), 'numpy.reshape', 'np.reshape', (['output_reckon', '(col, row)'], {'order': '"""F"""'}), "(output_reckon, (col, row), order='F')\n", (15937, 15975), True, 'import numpy as np\n'), ((15986, 16001), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (15998, 16001), True, 'import numpy as np\n'), ((18066, 18090), 'numpy.nanmax', 'np.nanmax', (['alldata[:, i]'], {}), '(alldata[:, i])\n', (18075, 18090), True, 'import numpy as np\n'), ((18112, 18136), 'numpy.nanmin', 'np.nanmin', (['alldata[:, i]'], {}), '(alldata[:, i])\n', (18121, 18136), True, 'import numpy as np\n'), ((18687, 18714), 'math.ceil', 'math.ceil', (['(test_samples * n)'], {}), '(test_samples * n)\n', (18696, 18714), False, 'import math\n'), ((18756, 18782), 'math.ceil', 'math.ceil', (['(val_samples * n)'], {}), '(val_samples * n)\n', (18765, 18782), False, 'import math\n'), ((18989, 19002), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (18999, 19002), False, 'import math\n'), ((19430, 19443), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (19440, 19443), False, 'import math\n'), ((19865, 19882), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (19874, 19882), True, 'import numpy as np\n'), ((21298, 21339), 'numpy.random.rand', 'np.random.rand', (['(num_input + 1)', 'num_hidden'], {}), '(num_input + 1, num_hidden)\n', (21312, 21339), True, 'import numpy as np\n'), ((21356, 21398), 'numpy.random.rand', 'np.random.rand', (['(num_hidden + 1)', 'num_output'], {}), '(num_hidden + 1, num_output)\n', (21370, 21398), True, 'import numpy as np\n'), ((21421, 21445), 'numpy.ones', 'np.ones', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (21428, 21445), True, 'import numpy as np\n'), ((21461, 21485), 'numpy.ones', 'np.ones', (['(num_output, 1)'], {}), '((num_output, 1))\n', (21468, 21485), True, 'import numpy as np\n'), ((21497, 21522), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (21505, 21522), True, 'import numpy as np\n'), ((21534, 21559), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (21542, 21559), True, 'import numpy as np\n'), ((21571, 21596), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (21579, 21596), True, 'import numpy as np\n'), ((21613, 21645), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (21621, 21645), True, 'import numpy as np\n'), ((21660, 21684), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (21668, 21684), True, 'import numpy as np\n'), ((21706, 21730), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (21714, 21730), True, 'import numpy as np\n'), ((21754, 21778), 'numpy.zeros', 'np.zeros', (['(1, nr_epochs)'], {}), '((1, nr_epochs))\n', (21762, 21778), True, 'import numpy as np\n'), ((24857, 24882), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_test'], {}), '(erro_test)\n', (24871, 24882), True, 'import numpy as np\n'), ((25668, 25693), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (25676, 25693), True, 'import numpy as np\n'), ((25705, 25730), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (25713, 25730), True, 'import numpy as np\n'), ((25742, 25767), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (25750, 25767), True, 'import numpy as np\n'), ((25784, 25816), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (25792, 25816), True, 'import numpy as np\n'), ((25831, 25854), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (25839, 25854), True, 'import numpy as np\n'), ((27129, 27154), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (27137, 27154), True, 'import numpy as np\n'), ((27166, 27191), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (27174, 27191), True, 'import numpy as np\n'), ((27203, 27228), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (27211, 27228), True, 'import numpy as np\n'), ((27245, 27277), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (27253, 27277), True, 'import numpy as np\n'), ((27296, 27319), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (27304, 27319), True, 'import numpy as np\n'), ((27340, 27363), 'numpy.zeros', 'np.zeros', (['(1, reg_size)'], {}), '((1, reg_size))\n', (27348, 27363), True, 'import numpy as np\n'), ((28573, 28598), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (28581, 28598), True, 'import numpy as np\n'), ((28610, 28635), 'numpy.zeros', 'np.zeros', (['(num_hidden, 1)'], {}), '((num_hidden, 1))\n', (28618, 28635), True, 'import numpy as np\n'), ((28647, 28672), 'numpy.zeros', 'np.zeros', (['(num_output, 1)'], {}), '((num_output, 1))\n', (28655, 28672), True, 'import numpy as np\n'), ((28689, 28721), 'numpy.zeros', 'np.zeros', (['(reg_size, num_output)'], {}), '((reg_size, num_output))\n', (28697, 28721), True, 'import numpy as np\n'), ((31441, 31477), 'numpy.arange', 'np.arange', (['(1)', '(Erro_test.shape[1] + 1)'], {}), '(1, Erro_test.shape[1] + 1)\n', (31450, 31477), True, 'import numpy as np\n'), ((32136, 32170), 'numpy.random.random_sample', 'np.random.random_sample', (['(npts, 1)'], {}), '((npts, 1))\n', (32159, 32170), True, 'import numpy as np\n'), ((32255, 32280), 'numpy.empty', 'np.empty', (['[input_size, 1]'], {}), '([input_size, 1])\n', (32263, 32280), True, 'import numpy as np\n'), ((32300, 32317), 'numpy.ones', 'np.ones', (['(200, 1)'], {}), '((200, 1))\n', (32307, 32317), True, 'import numpy as np\n'), ((32724, 32759), 'numpy.asarray', 'np.asarray', (['input_sens'], {'dtype': 'float'}), '(input_sens, dtype=float)\n', (32734, 32759), True, 'import numpy as np\n'), ((33545, 33557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (33555, 33557), True, 'import matplotlib.pyplot as plt\n'), ((33566, 33605), 'matplotlib.pyplot.plot', 'plt.plot', (['sens_set', 'output_sens[k]', '"""."""'], {}), "(sens_set, output_sens[k], '.')\n", (33574, 33605), True, 'import matplotlib.pyplot as plt\n'), ((33612, 33672), 'matplotlib.pyplot.title', 'plt.title', (["('Sensitivity analysis. Parameter: ' + columnst[k])"], {}), "('Sensitivity analysis. Parameter: ' + columnst[k])\n", (33621, 33672), True, 'import matplotlib.pyplot as plt\n'), ((33680, 33709), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output response"""'], {}), "('Output response')\n", (33690, 33709), True, 'import matplotlib.pyplot as plt\n'), ((33718, 33757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Parameter: ' + columnst[k])"], {}), "('Parameter: ' + columnst[k])\n", (33728, 33757), True, 'import matplotlib.pyplot as plt\n'), ((33765, 33784), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (33773, 33784), False, 'import os\n'), ((33793, 34015), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('SensitivityAnalysisVar_' + columnst[k])"], {'dpi': '(300)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.2)', 'frameon': 'None'}), "('SensitivityAnalysisVar_' + columnst[k], dpi=300, facecolor='w',\n edgecolor='w', orientation='portrait', papertype=None, format=None,\n transparent=False, bbox_inches='tight', pad_inches=0.2, frameon=None)\n", (33804, 34015), True, 'import matplotlib.pyplot as plt\n'), ((34050, 34065), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (34058, 34065), False, 'import os\n'), ((35598, 35646), 'numpy.reshape', 'np.reshape', (['output_reckon', '(col, row)'], {'order': '"""F"""'}), "(output_reckon, (col, row), order='F')\n", (35608, 35646), True, 'import numpy as np\n'), ((35657, 35672), 'numpy.transpose', 'np.transpose', (['a'], {}), '(a)\n', (35669, 35672), True, 'import numpy as np\n'), ((1599, 1628), 'numpy.random.rand', 'np.random.rand', (['(2 * n_test)', '(1)'], {}), '(2 * n_test, 1)\n', (1613, 1628), True, 'import numpy as np\n'), ((2040, 2068), 'numpy.random.rand', 'np.random.rand', (['(2 * n_val)', '(1)'], {}), '(2 * n_val, 1)\n', (2054, 2068), True, 'import numpy as np\n'), ((6387, 6412), 'numpy.around', 'np.around', (['erro_dec[0, k]'], {}), '(erro_dec[0, k])\n', (6396, 6412), True, 'import numpy as np\n'), ((11226, 11253), 'numpy.arange', 'np.arange', (['(1)', '(nr_epochs + 1)'], {}), '(1, nr_epochs + 1)\n', (11235, 11253), True, 'import numpy as np\n'), ((12074, 12096), 'numpy.asarray', 'np.asarray', (['erro_round'], {}), '(erro_round)\n', (12084, 12096), True, 'import numpy as np\n'), ((14193, 14218), 'numpy.mean', 'np.mean', (['input_data[:, k]'], {}), '(input_data[:, k])\n', (14200, 14218), True, 'import numpy as np\n'), ((18946, 18975), 'numpy.random.rand', 'np.random.rand', (['(2 * n_test)', '(1)'], {}), '(2 * n_test, 1)\n', (18960, 18975), True, 'import numpy as np\n'), ((19387, 19415), 'numpy.random.rand', 'np.random.rand', (['(2 * n_val)', '(1)'], {}), '(2 * n_val, 1)\n', (19401, 19415), True, 'import numpy as np\n'), ((24027, 24077), 'numpy.linalg.norm', 'np.linalg.norm', (['((output - output_train) / reg_size)'], {}), '((output - output_train) / reg_size)\n', (24041, 24077), True, 'import numpy as np\n'), ((24241, 24265), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_val'], {}), '(erro_val)\n', (24255, 24265), True, 'import numpy as np\n'), ((28124, 28149), 'numpy.around', 'np.around', (['erro_dec[0, k]'], {}), '(erro_dec[0, k])\n', (28133, 28149), True, 'import numpy as np\n'), ((30047, 30077), 'grass.script.message', 'gscript.message', (['neuron_tested'], {}), '(neuron_tested)\n', (30062, 30077), True, 'import grass.script as gscript\n'), ((30144, 30167), 'grass.script.message', 'gscript.message', (['(k2 + 1)'], {}), '(k2 + 1)\n', (30159, 30167), True, 'import grass.script as gscript\n'), ((31039, 31066), 'numpy.arange', 'np.arange', (['(1)', '(nr_epochs + 1)'], {}), '(1, nr_epochs + 1)\n', (31048, 31066), True, 'import numpy as np\n'), ((32463, 32488), 'numpy.mean', 'np.mean', (['input_data[:, k]'], {}), '(input_data[:, k])\n', (32470, 32488), True, 'import numpy as np\n'), ((1723, 1739), 'numpy.unique', 'np.unique', (['B_ind'], {}), '(B_ind)\n', (1732, 1739), True, 'import numpy as np\n'), ((2154, 2171), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (2163, 2171), True, 'import numpy as np\n'), ((8451, 8463), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (8459, 8463), False, 'import math\n'), ((10927, 10966), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_validate[0, epoch]'], {}), '(erro_validate[0, epoch])\n', (10941, 10966), True, 'import numpy as np\n'), ((11068, 11107), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_validate[0, epoch]'], {}), '(erro_validate[0, epoch])\n', (11082, 11107), True, 'import numpy as np\n'), ((14883, 14909), 'numpy.asarray', 'np.asarray', (['input_sens[k1]'], {}), '(input_sens[k1])\n', (14893, 14909), True, 'import numpy as np\n'), ((16353, 16365), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (16361, 16365), False, 'import math\n'), ((19070, 19086), 'numpy.unique', 'np.unique', (['B_ind'], {}), '(B_ind)\n', (19079, 19086), True, 'import numpy as np\n'), ((19501, 19518), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (19510, 19518), True, 'import numpy as np\n'), ((24901, 24923), 'numpy.asarray', 'np.asarray', (['erro_round'], {}), '(erro_round)\n', (24911, 24923), True, 'import numpy as np\n'), ((30591, 30622), 'numpy.array', 'np.array', (['[epoch_min, erro_min]'], {}), '([epoch_min, erro_min])\n', (30599, 30622), True, 'import numpy as np\n'), ((33153, 33179), 'numpy.asarray', 'np.asarray', (['input_sens[k1]'], {}), '(input_sens[k1])\n', (33163, 33179), True, 'import numpy as np\n'), ((2225, 2242), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (2234, 2242), True, 'import numpy as np\n'), ((2379, 2392), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (2389, 2392), False, 'import math\n'), ((4203, 4215), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (4211, 4215), False, 'import math\n'), ((5331, 5343), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (5339, 5343), False, 'import math\n'), ((6775, 6787), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (6783, 6787), False, 'import math\n'), ((19572, 19589), 'numpy.unique', 'np.unique', (['V_buff'], {}), '(V_buff)\n', (19581, 19589), True, 'import numpy as np\n'), ((19726, 19739), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (19736, 19739), False, 'import math\n'), ((21838, 21850), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (21846, 21850), False, 'import math\n'), ((24522, 24561), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_validate[0, epoch]'], {}), '(erro_validate[0, epoch])\n', (24536, 24561), True, 'import numpy as np\n'), ((24679, 24718), 'numpy.linalg.norm', 'np.linalg.norm', (['erro_validate[0, epoch]'], {}), '(erro_validate[0, epoch])\n', (24693, 24718), True, 'import numpy as np\n'), ((25940, 25952), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (25948, 25952), False, 'import math\n'), ((27068, 27080), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (27076, 27080), False, 'import math\n'), ((28512, 28524), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (28520, 28524), False, 'import math\n'), ((2328, 2356), 'numpy.random.rand', 'np.random.rand', (['(2 * n_val)', '(1)'], {}), '(2 * n_val, 1)\n', (2342, 2356), True, 'import numpy as np\n'), ((19675, 19703), 'numpy.random.rand', 'np.random.rand', (['(2 * n_val)', '(1)'], {}), '(2 * n_val, 1)\n', (19689, 19703), True, 'import numpy as np\n')] |
"""Tests for wrapper submodule."""
import numpy as np
from pspca import PSPCA
def test_interface():
"""Test scikit-learn interface."""
points = np.array(
[
[np.sqrt(0.5), np.sqrt(0.5), 0],
[-np.sqrt(0.5), np.sqrt(0.5), 0],
]
)
dim = 3
reducer = PSPCA(dim)
reducer.fit(points)
np.testing.assert_allclose(reducer.v, np.eye(3))
t_points = reducer.transform(points)
assert t_points.shape[0] == points.shape[0]
assert t_points.shape[1] == dim
np.testing.assert_allclose(reducer.fit_transform(points), t_points)
| [
"numpy.sqrt",
"numpy.eye",
"pspca.PSPCA"
] | [((309, 319), 'pspca.PSPCA', 'PSPCA', (['dim'], {}), '(dim)\n', (314, 319), False, 'from pspca import PSPCA\n'), ((387, 396), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (393, 396), True, 'import numpy as np\n'), ((188, 200), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (195, 200), True, 'import numpy as np\n'), ((202, 214), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (209, 214), True, 'import numpy as np\n'), ((248, 260), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (255, 260), True, 'import numpy as np\n'), ((234, 246), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (241, 246), True, 'import numpy as np\n')] |
import numpy as np
lst = [[1, 3, 4], [2, 8, 9]]
a = np.size(lst)
a_ = np.array(lst).size
b = np.shape(lst)
b_ = np.array(lst).shape
c = np.ndim(lst)
c_ = np.array(lst).ndim
d = np.dtype(lst)
d_ = np.array(lst).dtype
# show_store()
| [
"numpy.shape",
"numpy.size",
"numpy.ndim",
"numpy.array",
"numpy.dtype"
] | [((53, 65), 'numpy.size', 'np.size', (['lst'], {}), '(lst)\n', (60, 65), True, 'import numpy as np\n'), ((94, 107), 'numpy.shape', 'np.shape', (['lst'], {}), '(lst)\n', (102, 107), True, 'import numpy as np\n'), ((137, 149), 'numpy.ndim', 'np.ndim', (['lst'], {}), '(lst)\n', (144, 149), True, 'import numpy as np\n'), ((178, 191), 'numpy.dtype', 'np.dtype', (['lst'], {}), '(lst)\n', (186, 191), True, 'import numpy as np\n'), ((71, 84), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (79, 84), True, 'import numpy as np\n'), ((113, 126), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (121, 126), True, 'import numpy as np\n'), ((155, 168), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (163, 168), True, 'import numpy as np\n'), ((197, 210), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (205, 210), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This file is a part of the CIRN Interaction Language.
from __future__ import print_function, division
import logging
import numpy as np
import pandas as pd
import pyflux as pf
def predict_all_freqs(data, training_lag, steps):
"""Takes a numpy matrix containing duty cycles with time in the first
coordinate and frequency in the second, and calculates another numpy
matrix containing the VAR predictions with steps many new time indices
and the same number of frequencies. We use a single model across
frequencies (this is NOT how spec-val is implemented)."""
columns = [str(i) for i in range(data.shape[1])]
data = pd.DataFrame(data, columns=columns)
model = pf.VAR(data=data, lags=training_lag)
model.fit()
return model.predict(h=steps)
def predict_freq_by_freq(data, training_lag, steps):
"""Takes a numpy matrix containing duty cycles with time in the first
coordinate and frequency in the second, and calculates another numpy
matrix containing the VAR predictions with steps many new time indices
and the same number of frequencies. We use separate models for each
frequencies (this is how spec-val is implemented)."""
output = np.zeros((steps, data.shape[1]), dtype=np.float32)
for i in range(data.shape[1]):
data2 = pd.DataFrame(data[:, i:i+1], columns=["x"])
model = pf.VAR(data=data2, lags=training_lag)
model.fit()
result = model.predict(h=steps)
output[:, i:i+1] = result
return output
def predict_the_future(data, training_len, training_lag, training_rate, training_noise=1e-5):
"""Takes a numpy matrix containing duty cycles with time in the first
coordinate and frequency in the second, and calculates another numpy
matrix with the same shape containing the VAR predictions. The first
training_len many output values will be set to zero. All parameters
are integers and represent time steps."""
assert 0 < training_lag < training_len and 0 < training_rate
output = np.zeros(data.shape, dtype=np.float32)
# add noise
data = np.array(data, dtype=np.double, copy=True)
data += np.random.normal(size=data.shape, scale=training_noise)
# last reported percentage
report = -1
for start in range(training_len, data.shape[0], training_rate):
steps = min(start+training_rate, data.shape[0]) - start
output[start:start + steps] = predict_freq_by_freq(
data[start - training_len: start, :], training_lag, steps)
current = start / data.shape[0]
if current - report > 0.05:
logging.info("Training at %s", "{:.0%}".format(current))
report = current
return output
if __name__ == '__main__':
data = np.zeros((20, 3))
for i in range(data.shape[0]):
data[i][0] = i
data[i][1] = np.random.randint(-20, 20)
data[i][2] = data[i][0] + data[i][1]
pred = predict_the_future(data, 10, 1, 1, training_noise=1)
print(data)
print(pred)
| [
"numpy.random.normal",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"pandas.DataFrame",
"pyflux.VAR"
] | [((1774, 1809), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (1786, 1809), True, 'import pandas as pd\n'), ((1822, 1858), 'pyflux.VAR', 'pf.VAR', ([], {'data': 'data', 'lags': 'training_lag'}), '(data=data, lags=training_lag)\n', (1828, 1858), True, 'import pyflux as pf\n'), ((2331, 2381), 'numpy.zeros', 'np.zeros', (['(steps, data.shape[1])'], {'dtype': 'np.float32'}), '((steps, data.shape[1]), dtype=np.float32)\n', (2339, 2381), True, 'import numpy as np\n'), ((3161, 3199), 'numpy.zeros', 'np.zeros', (['data.shape'], {'dtype': 'np.float32'}), '(data.shape, dtype=np.float32)\n', (3169, 3199), True, 'import numpy as np\n'), ((3228, 3270), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.double', 'copy': '(True)'}), '(data, dtype=np.double, copy=True)\n', (3236, 3270), True, 'import numpy as np\n'), ((3283, 3338), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'data.shape', 'scale': 'training_noise'}), '(size=data.shape, scale=training_noise)\n', (3299, 3338), True, 'import numpy as np\n'), ((3886, 3903), 'numpy.zeros', 'np.zeros', (['(20, 3)'], {}), '((20, 3))\n', (3894, 3903), True, 'import numpy as np\n'), ((2434, 2479), 'pandas.DataFrame', 'pd.DataFrame', (['data[:, i:i + 1]'], {'columns': "['x']"}), "(data[:, i:i + 1], columns=['x'])\n", (2446, 2479), True, 'import pandas as pd\n'), ((2494, 2531), 'pyflux.VAR', 'pf.VAR', ([], {'data': 'data2', 'lags': 'training_lag'}), '(data=data2, lags=training_lag)\n', (2500, 2531), True, 'import pyflux as pf\n'), ((3983, 4009), 'numpy.random.randint', 'np.random.randint', (['(-20)', '(20)'], {}), '(-20, 20)\n', (4000, 4009), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
import toolkit.metrics as metrics
class TestMotion(TestCase):
def test_true_positives(self):
y_true = np.array([[1, 1, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 1]])
y_pred = np.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1]])
tp, _, _, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(tp.tolist(), [0, 1, 2, 3])
def test_false_positives(self):
y_true = np.array([[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 0, 0, 0]])
y_pred = np.array([[0, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 1, 1]])
_, fp, _, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(fp.tolist(), [0, 1, 2, 3])
def test_true_negatives(self):
y_true = np.array([[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 0, 0, 0]])
y_pred = np.array([[1, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]])
_, _, tn, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(tn.tolist(), [0, 1, 2, 3])
def test_false_negatives(self):
y_true = np.array([[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 0, 1, 1]])
y_pred = np.array([[1, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]])
_, _, _, fn = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(fn.tolist(), [0, 1, 2, 3]) | [
"numpy.array",
"toolkit.metrics.multilabel_tp_fp_tn_fn_scores"
] | [((167, 219), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [1, 0, 1, 1], [0, 0, 0, 1]]'], {}), '([[1, 1, 1, 1], [1, 0, 1, 1], [0, 0, 0, 1]])\n', (175, 219), True, 'import numpy as np\n'), ((291, 343), 'numpy.array', 'np.array', (['[[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 0, 1]]'], {}), '([[0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 0, 1]])\n', (299, 343), True, 'import numpy as np\n'), ((420, 473), 'toolkit.metrics.multilabel_tp_fp_tn_fn_scores', 'metrics.multilabel_tp_fp_tn_fn_scores', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (457, 473), True, 'import toolkit.metrics as metrics\n'), ((588, 640), 'numpy.array', 'np.array', (['[[1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]]'], {}), '([[1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]])\n', (596, 640), True, 'import numpy as np\n'), ((712, 764), 'numpy.array', 'np.array', (['[[0, 0, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]'], {}), '([[0, 0, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]])\n', (720, 764), True, 'import numpy as np\n'), ((841, 894), 'toolkit.metrics.multilabel_tp_fp_tn_fn_scores', 'metrics.multilabel_tp_fp_tn_fn_scores', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (878, 894), True, 'import toolkit.metrics as metrics\n'), ((1008, 1060), 'numpy.array', 'np.array', (['[[1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]]'], {}), '([[1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]])\n', (1016, 1060), True, 'import numpy as np\n'), ((1132, 1184), 'numpy.array', 'np.array', (['[[1, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0]]'], {}), '([[1, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0]])\n', (1140, 1184), True, 'import numpy as np\n'), ((1261, 1314), 'toolkit.metrics.multilabel_tp_fp_tn_fn_scores', 'metrics.multilabel_tp_fp_tn_fn_scores', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1298, 1314), True, 'import toolkit.metrics as metrics\n'), ((1429, 1481), 'numpy.array', 'np.array', (['[[1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1]]'], {}), '([[1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1]])\n', (1437, 1481), True, 'import numpy as np\n'), ((1553, 1605), 'numpy.array', 'np.array', (['[[1, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0]]'], {}), '([[1, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0]])\n', (1561, 1605), True, 'import numpy as np\n'), ((1682, 1735), 'toolkit.metrics.multilabel_tp_fp_tn_fn_scores', 'metrics.multilabel_tp_fp_tn_fn_scores', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1719, 1735), True, 'import toolkit.metrics as metrics\n')] |
#!/usr/bin/env python
"""
A service like daemon for calculating components of the gradient
"""
# pylint: disable=invalid-name, unused-import, multiple-imports
import platform
import numpy as np
import uuid
import sys, time, os, atexit, json
from SparseSC.cli.daemon import Daemon
from yaml import load, dump
import tempfile
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
# pluck = lambda d, *args: (d[arg] for arg in args)
def pluck(d, *args):
"""
pluckr
"""
# print("hello pluckr"); sys.stdout.flush()
out = [None] * len(args)
for i, key in enumerate(args):
# print("key: " + key); sys.stdout.flush()
try:
out[i] = d[key]
except KeyError:
raise RuntimeError("no such key '{}'".format(key))
return out
def grad_part(common, part, k):
"""
Calculate a single component of the gradient
"""
N0, N1, in_controls, splits, w_pen, treated_units, Y_treated, Y_control, dA_dV_ki, dB_dV_ki = pluck(
common,
"N0",
"N1",
"in_controls",
"splits",
"w_pen",
"treated_units",
"Y_treated",
"Y_control",
"dA_dV_ki",
"dB_dV_ki",
)
in_controls2 = [np.ix_(i, i) for i in in_controls]
A, weights, b_i = pluck(part, "A", "weights", "b_i")
dA_dV_ki_k, dB_dV_ki_k = dA_dV_ki[k], dB_dV_ki[k]
dPI_dV = np.zeros((N0, N1)) # stupid notation: PI = W.T
try:
for i, (_, (_, test)) in enumerate(zip(in_controls, splits)):
dA = dA_dV_ki_k[i]
dB = dB_dV_ki_k[i]
try:
b = np.linalg.solve(A[in_controls2[i]], dB - dA.dot(b_i[i]))
except np.linalg.LinAlgError as exc:
print("Unique weights not possible.")
if w_pen == 0:
print("Try specifying a very small w_pen rather than 0.")
raise exc
dPI_dV[np.ix_(in_controls[i], treated_units[test])] = b
# einsum is faster than the equivalent (Ey * Y_control.T.dot(dPI_dV).T.getA()).sum()
return 2 * np.einsum(
"ij,kj,ki->", (weights.T.dot(Y_control) - Y_treated), Y_control, dPI_dV
)
except Exception as err:
print("{}: {}".format(err.__class__.__name__, getattr(err, "message", "<>")))
raise RuntimeError("bye from scgrad")
DAEMON_FIFO = "/tmp/sc-daemon.fifo"
DAEMON_PID = "/tmp/sc-gradient-daemon.pid"
#-- print("DAEMON_FIFO: " + DAEMON_FIFO)
#-- print("DAEMON_PID: " + DAEMON_PID)
_CONTAINER_OUTPUT_FILE = "output.yaml" # Standard Output file
_GRAD_COMMON_FILE = "common.yaml"
_GRAD_PART_FILE = "part.yaml"
_BASENAMES = [_GRAD_COMMON_FILE, _GRAD_PART_FILE, _CONTAINER_OUTPUT_FILE]
class TestDaemon(Daemon):
"""
A daemon which calculates Sparse SC gradient components
"""
def run(self):
print("run says hi: ")
sys.stdout.flush()
# pylint: disable=no-self-use
while True:
with open(DAEMON_FIFO, "r") as fifo:
try:
params = fifo.read()
print("run says hi: " + params)
sys.stdout.flush()
tmpdirname, return_fifo, k = json.loads(params)
common_file, part_file, out_file = [
os.join(tmpdirname, name) for name in _BASENAMES
]
print([common_file, part_file, out_file, return_fifo, k])
sys.stdout.flush()
except: # pylint: disable=bare-except
# SOMETHING WENT WRONG, RESPOND WITH A NON-ZERO
try:
with open(return_fifo, "w") as rf:
rf.write("1")
except: # pylint: disable=bare-except
pass
else:
print("daemon something went wrong: ")
sys.stdout.flush()
else:
# SEND THE SUCCESS RESPONSE
print("daemon all done: ")
sys.stdout.flush()
with open(return_fifo, "w") as rf:
rf.write("0")
class GradientDaemon(Daemon):
"""
A daemon which calculates Sparse SC gradient components
"""
def run(self):
# pylint: disable=no-self-use
while True:
with open(DAEMON_FIFO, "r") as fifo:
try:
params = fifo.read()
print("params: " + params)
sys.stdout.flush()
tmpdirname, return_fifo, k = json.loads(params)
print(_BASENAMES)
for file in os.listdir(tmpdirname):
print(file)
common_file, part_file, out_file = [
os.path.join(tmpdirname, name) for name in _BASENAMES
]
print([common_file, part_file, out_file, return_fifo, k])
sys.stdout.flush()
# LOAD IN THE INPUT FILES
with open(common_file, "r") as fp:
common = load(fp, Loader=Loader)
with open(part_file, "r") as fp:
part = load(fp, Loader=Loader)
# DO THE WORK
print("about to do work: ")
sys.stdout.flush()
grad = grad_part(common, part, int(k))
print("did work: ")
sys.stdout.flush()
# DUMP THE RESULT TO THE OUTPUT FILE
with open(out_file, "w") as fp:
fp.write(dump(grad, Dumper=Dumper))
except Exception as err: # pylint: disable=bare-except
# SOMETHING WENT WRONG, RESPOND WITH A NON-ZERO
try:
with open(return_fifo, "w") as rf:
rf.write("1")
except: # pylint: disable=bare-except
print("double failed...: ")
sys.stdout.flush()
else:
print(
"failed with {}: {}",
err.__class__.__name__,
getattr(err, "message", "<>"),
)
sys.stdout.flush()
else:
# SEND THE SUCCESS RESPONSE
print("success...: ")
sys.stdout.flush()
with open(return_fifo, "w") as rf:
rf.write("0")
print("and wrote about it...: ")
sys.stdout.flush()
def main(test=False): # pylint: disable=inconsistent-return-statements
"""
read in the contents of the inputs yaml file
"""
try:
os.fork # pylint: disable=no-member
except NameError:
raise RuntimeError(
"scgrad.py depends on os.fork, which is not available on this system."
)
ARGS = sys.argv[1:]
if ARGS[0] == "scgrad.py":
ARGS.pop(0)
WorkerDaemon = TestDaemon if test else GradientDaemon
# --------------------------------------------------
# Daemon controllers
# --------------------------------------------------
if ARGS[0] == "start":
print("starting daemon")
daemon = WorkerDaemon(DAEMON_PID, DAEMON_FIFO)
daemon.start()
return "0"
if ARGS[0] == "status":
if not os.path.exists(DAEMON_PID):
print("Daemon not running")
return '0'
with open(DAEMON_PID,'w') as fh:
_pid = fh.read()
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
if _pid in pids:
print("daemon process (pid {}) is running".format(_pid))
else:
print("daemon process (pid {}) NOT is running".format(_pid))
return '0'
if ARGS[0] == "stop":
print("stopping daemon")
daemon = WorkerDaemon(DAEMON_PID, DAEMON_FIFO)
daemon.stop()
return "0"
if ARGS[0] == "restart":
print("restaring daemon")
daemon = WorkerDaemon(DAEMON_PID, DAEMON_FIFO)
daemon.restart()
return "0"
# --------------------------------------------------
# Gradient job
# --------------------------------------------------
try:
with open(DAEMON_PID, "r") as pf:
pid = int(pf.read().strip())
except IOError:
pid = None
if not pid:
raise RuntimeError("please start the daemon")
assert (
len(ARGS) == 4
), "ssc.py expects 4 parameters, including a commonfile, partfile, outfile and the component index"
try:
int(ARGS[3])
except ValueError:
raise ValueError("The args[3] must be an integer")
# CREATE THE RESPONSE FIFO
RETURN_FIFO = os.path.join("/tmp/sc-" + str(uuid.uuid4()) + ".fifo")
os.mkfifo(RETURN_FIFO) # pylint: disable=no-member
def cleanup():
os.remove(RETURN_FIFO)
atexit.register(cleanup)
# SEND THE ARGS TO THE DAEMON
with open(DAEMON_FIFO, "w") as d_send:
d_send.write(json.dumps(ARGS + [RETURN_FIFO]))
# LISTEN FOR THE RESPONSE
with open(RETURN_FIFO, "r") as d_return:
response = d_return.read()
print("daemon sent response: {}".format(response))
return response
if __name__ == "__main__":
print("hi from scgrad", sys.argv)
condition_flag = main(test=False)
if condition_flag == "0":
print("Test Daemon worked!")
else:
print("Something went wrong: {}".format(condition_flag))
| [
"os.path.exists",
"json.loads",
"os.listdir",
"os.join",
"yaml.dump",
"json.dumps",
"os.path.join",
"yaml.load",
"numpy.ix_",
"uuid.uuid4",
"numpy.zeros",
"os.mkfifo",
"sys.stdout.flush",
"atexit.register",
"os.remove"
] | [((1455, 1473), 'numpy.zeros', 'np.zeros', (['(N0, N1)'], {}), '((N0, N1))\n', (1463, 1473), True, 'import numpy as np\n'), ((9152, 9174), 'os.mkfifo', 'os.mkfifo', (['RETURN_FIFO'], {}), '(RETURN_FIFO)\n', (9161, 9174), False, 'import sys, time, os, atexit, json\n'), ((9260, 9284), 'atexit.register', 'atexit.register', (['cleanup'], {}), '(cleanup)\n', (9275, 9284), False, 'import sys, time, os, atexit, json\n'), ((1294, 1306), 'numpy.ix_', 'np.ix_', (['i', 'i'], {}), '(i, i)\n', (1300, 1306), True, 'import numpy as np\n'), ((2951, 2969), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2967, 2969), False, 'import sys, time, os, atexit, json\n'), ((9232, 9254), 'os.remove', 'os.remove', (['RETURN_FIFO'], {}), '(RETURN_FIFO)\n', (9241, 9254), False, 'import sys, time, os, atexit, json\n'), ((7702, 7728), 'os.path.exists', 'os.path.exists', (['DAEMON_PID'], {}), '(DAEMON_PID)\n', (7716, 7728), False, 'import sys, time, os, atexit, json\n'), ((9384, 9416), 'json.dumps', 'json.dumps', (['(ARGS + [RETURN_FIFO])'], {}), '(ARGS + [RETURN_FIFO])\n', (9394, 9416), False, 'import sys, time, os, atexit, json\n'), ((1997, 2040), 'numpy.ix_', 'np.ix_', (['in_controls[i]', 'treated_units[test]'], {}), '(in_controls[i], treated_units[test])\n', (2003, 2040), True, 'import numpy as np\n'), ((7895, 7914), 'os.listdir', 'os.listdir', (['"""/proc"""'], {}), "('/proc')\n", (7905, 7914), False, 'import sys, time, os, atexit, json\n'), ((3211, 3229), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3227, 3229), False, 'import sys, time, os, atexit, json\n'), ((3279, 3297), 'json.loads', 'json.loads', (['params'], {}), '(params)\n', (3289, 3297), False, 'import sys, time, os, atexit, json\n'), ((3548, 3566), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3564, 3566), False, 'import sys, time, os, atexit, json\n'), ((4177, 4195), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4193, 4195), False, 'import sys, time, os, atexit, json\n'), ((4653, 4671), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4669, 4671), False, 'import sys, time, os, atexit, json\n'), ((4721, 4739), 'json.loads', 'json.loads', (['params'], {}), '(params)\n', (4731, 4739), False, 'import sys, time, os, atexit, json\n'), ((4810, 4832), 'os.listdir', 'os.listdir', (['tmpdirname'], {}), '(tmpdirname)\n', (4820, 4832), False, 'import sys, time, os, atexit, json\n'), ((5125, 5143), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5141, 5143), False, 'import sys, time, os, atexit, json\n'), ((5514, 5532), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5530, 5532), False, 'import sys, time, os, atexit, json\n'), ((5652, 5670), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5668, 5670), False, 'import sys, time, os, atexit, json\n'), ((6684, 6702), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6700, 6702), False, 'import sys, time, os, atexit, json\n'), ((6870, 6888), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6886, 6888), False, 'import sys, time, os, atexit, json\n'), ((9123, 9135), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9133, 9135), False, 'import uuid\n'), ((3379, 3404), 'os.join', 'os.join', (['tmpdirname', 'name'], {}), '(tmpdirname, name)\n', (3386, 3404), False, 'import sys, time, os, atexit, json\n'), ((4951, 4981), 'os.path.join', 'os.path.join', (['tmpdirname', 'name'], {}), '(tmpdirname, name)\n', (4963, 4981), False, 'import sys, time, os, atexit, json\n'), ((5279, 5302), 'yaml.load', 'load', (['fp'], {'Loader': 'Loader'}), '(fp, Loader=Loader)\n', (5283, 5302), False, 'from yaml import load, dump\n'), ((5387, 5410), 'yaml.load', 'load', (['fp'], {'Loader': 'Loader'}), '(fp, Loader=Loader)\n', (5391, 5410), False, 'from yaml import load, dump\n'), ((4019, 4037), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4035, 4037), False, 'import sys, time, os, atexit, json\n'), ((5814, 5839), 'yaml.dump', 'dump', (['grad'], {'Dumper': 'Dumper'}), '(grad, Dumper=Dumper)\n', (5818, 5839), False, 'from yaml import load, dump\n'), ((6531, 6549), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6547, 6549), False, 'import sys, time, os, atexit, json\n'), ((6244, 6262), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6260, 6262), False, 'import sys, time, os, atexit, json\n')] |
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import pytest
import numpy as np
import nilearn
from mne import Covariance
from mne.simulation import add_noise
from mne_nirs.experimental_design import make_first_level_design_matrix
from mne_nirs.statistics import run_GLM
from mne_nirs.simulation import simulate_nirs_raw
iir_filter = [1., -0.58853134, -0.29575669, -0.52246482, 0.38735476, 0.024286]
@pytest.mark.filterwarnings('ignore:.*nilearn.glm module is experimental.*:')
def test_run_GLM():
raw = simulate_nirs_raw(sig_dur=200, stim_dur=5.)
design_matrix = make_first_level_design_matrix(raw, stim_dur=5.,
drift_order=1,
drift_model='polynomial')
glm_estimates = run_GLM(raw, design_matrix)
assert len(glm_estimates) == len(raw.ch_names)
# Check the estimate is correct within 10% error
assert abs(glm_estimates["Simulated"].theta[0] - 1.e-6) < 0.1e-6
# ensure we return the same type as nilearn to encourage compatibility
_, ni_est = nilearn.glm.first_level.run_glm(
raw.get_data(0).T, design_matrix.values)
assert type(ni_est) == type(glm_estimates)
def test_run_GLM_order():
raw = simulate_nirs_raw(sig_dur=200, stim_dur=5., sfreq=3)
design_matrix = make_first_level_design_matrix(raw, stim_dur=5.,
drift_order=1,
drift_model='polynomial')
# Default should be first order AR
glm_estimates = run_GLM(raw, design_matrix)
assert glm_estimates['Simulated'].model.order == 1
# Default should be first order AR
glm_estimates = run_GLM(raw, design_matrix, noise_model='ar2')
assert glm_estimates['Simulated'].model.order == 2
glm_estimates = run_GLM(raw, design_matrix, noise_model='ar7')
assert glm_estimates['Simulated'].model.order == 7
# Auto should be 4 times sample rate
cov = Covariance(np.ones(1) * 1e-11, raw.ch_names,
raw.info['bads'], raw.info['projs'], nfree=0)
raw = add_noise(raw, cov, iir_filter=iir_filter)
glm_estimates = run_GLM(raw, design_matrix, noise_model='auto')
assert glm_estimates['Simulated'].model.order == 3 * 4
raw = simulate_nirs_raw(sig_dur=10, stim_dur=5., sfreq=2)
cov = Covariance(np.ones(1) * 1e-11, raw.ch_names,
raw.info['bads'], raw.info['projs'], nfree=0)
raw = add_noise(raw, cov, iir_filter=iir_filter)
design_matrix = make_first_level_design_matrix(raw, stim_dur=5.,
drift_order=1,
drift_model='polynomial')
# Auto should be 4 times sample rate
glm_estimates = run_GLM(raw, design_matrix, noise_model='auto')
assert glm_estimates['Simulated'].model.order == 2 * 4
| [
"mne_nirs.simulation.simulate_nirs_raw",
"pytest.mark.filterwarnings",
"numpy.ones",
"mne.simulation.add_noise",
"mne_nirs.statistics.run_GLM",
"mne_nirs.experimental_design.make_first_level_design_matrix"
] | [((416, 492), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:.*nilearn.glm module is experimental.*:"""'], {}), "('ignore:.*nilearn.glm module is experimental.*:')\n", (442, 492), False, 'import pytest\n'), ((523, 567), 'mne_nirs.simulation.simulate_nirs_raw', 'simulate_nirs_raw', ([], {'sig_dur': '(200)', 'stim_dur': '(5.0)'}), '(sig_dur=200, stim_dur=5.0)\n', (540, 567), False, 'from mne_nirs.simulation import simulate_nirs_raw\n'), ((587, 681), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw'], {'stim_dur': '(5.0)', 'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw, stim_dur=5.0, drift_order=1,\n drift_model='polynomial')\n", (617, 681), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((799, 826), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {}), '(raw, design_matrix)\n', (806, 826), False, 'from mne_nirs.statistics import run_GLM\n'), ((1261, 1314), 'mne_nirs.simulation.simulate_nirs_raw', 'simulate_nirs_raw', ([], {'sig_dur': '(200)', 'stim_dur': '(5.0)', 'sfreq': '(3)'}), '(sig_dur=200, stim_dur=5.0, sfreq=3)\n', (1278, 1314), False, 'from mne_nirs.simulation import simulate_nirs_raw\n'), ((1334, 1428), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw'], {'stim_dur': '(5.0)', 'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw, stim_dur=5.0, drift_order=1,\n drift_model='polynomial')\n", (1364, 1428), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((1586, 1613), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {}), '(raw, design_matrix)\n', (1593, 1613), False, 'from mne_nirs.statistics import run_GLM\n'), ((1729, 1775), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {'noise_model': '"""ar2"""'}), "(raw, design_matrix, noise_model='ar2')\n", (1736, 1775), False, 'from mne_nirs.statistics import run_GLM\n'), ((1852, 1898), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {'noise_model': '"""ar7"""'}), "(raw, design_matrix, noise_model='ar7')\n", (1859, 1898), False, 'from mne_nirs.statistics import run_GLM\n'), ((2128, 2170), 'mne.simulation.add_noise', 'add_noise', (['raw', 'cov'], {'iir_filter': 'iir_filter'}), '(raw, cov, iir_filter=iir_filter)\n', (2137, 2170), False, 'from mne.simulation import add_noise\n'), ((2191, 2238), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {'noise_model': '"""auto"""'}), "(raw, design_matrix, noise_model='auto')\n", (2198, 2238), False, 'from mne_nirs.statistics import run_GLM\n'), ((2309, 2361), 'mne_nirs.simulation.simulate_nirs_raw', 'simulate_nirs_raw', ([], {'sig_dur': '(10)', 'stim_dur': '(5.0)', 'sfreq': '(2)'}), '(sig_dur=10, stim_dur=5.0, sfreq=2)\n', (2326, 2361), False, 'from mne_nirs.simulation import simulate_nirs_raw\n'), ((2493, 2535), 'mne.simulation.add_noise', 'add_noise', (['raw', 'cov'], {'iir_filter': 'iir_filter'}), '(raw, cov, iir_filter=iir_filter)\n', (2502, 2535), False, 'from mne.simulation import add_noise\n'), ((2556, 2650), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw'], {'stim_dur': '(5.0)', 'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw, stim_dur=5.0, drift_order=1,\n drift_model='polynomial')\n", (2586, 2650), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((2809, 2856), 'mne_nirs.statistics.run_GLM', 'run_GLM', (['raw', 'design_matrix'], {'noise_model': '"""auto"""'}), "(raw, design_matrix, noise_model='auto')\n", (2816, 2856), False, 'from mne_nirs.statistics import run_GLM\n'), ((2017, 2027), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2024, 2027), True, 'import numpy as np\n'), ((2382, 2392), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2389, 2392), True, 'import numpy as np\n')] |
import pytesseract
import cv2
from pytesseract import Output
import numpy as np
import os
from tqdm import tqdm
CHAR_MAP = {
'"': (0, 255, 0),
'#': (0, 255, 0),
'%': (0, 255, 0),
'&': (0, 255, 0),
',': (0, 255, 0),
'.': (0, 255, 0),
'0': (0, 0, 255),
'1': (0, 0, 255),
'2': (0, 0, 255),
'3': (0, 0, 255),
'4': (0, 0, 255),
'5': (0, 0, 255),
'6': (0, 0, 255),
'7': (0, 0, 255),
'8': (0, 0, 255),
'9': (0, 0, 255),
':': (0, 255, 0),
'a': (0, 255, 255),
'b': (0, 255, 255),
'c': (0, 255, 255),
'd': (0, 255, 255),
'e': (0, 255, 255),
'f': (0, 255, 255),
'g': (0, 255, 255),
'h': (0, 255, 255),
'i': (0, 255, 255),
'j': (0, 255, 255),
'k': (0, 255, 255),
'l': (0, 255, 255),
'm': (0, 255, 255),
'n': (0, 255, 255),
'o': (0, 255, 255),
'p': (0, 255, 255),
'q': (0, 255, 255),
'r': (0, 255, 255),
's': (0, 255, 255),
't': (0, 255, 255),
'u': (0, 255, 255),
'v': (0, 255, 255),
'w': (0, 255, 255),
'x': (0, 255, 255),
'y': (0, 255, 255),
'z': (0, 255, 255)
}
def char_grid(
input_dir,
output_dir
):
img = cv2.imread(input_dir)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height, width, 3), np.uint8)
tess_img = pytesseract.image_to_boxes(
img,
output_type=Output.DICT
)
n_boxes = len(tess_img['char'])
for i in range(n_boxes):
try:
annot_a = True
(text, x1, y2, x2, y1) = (
tess_img['char'][i],
tess_img['left'][i],
tess_img['top'][i],
tess_img['right'][i],
tess_img['bottom'][i]
)
except:
annot_a = False
if annot_a:
# print(annot_a)
char_color = CHAR_MAP.get(
text.strip().lower(),
(255, 255, 255)
)
x, y, w, h = x1, y1, (x2 - x1), (y2 - y1)
cv2.rectangle(
blank_image,
(x1, height - y1),
(x2, height - y2),
char_color,
cv2.FILLED
)
filename = input_dir.split('/')[-1]
cv2.imwrite(os.path.join(output_dir, filename), blank_image)
# if __name__=='__main__':
# list_images = [
# os.path.join('/content/drive/My Drive/image', i)
# for i in
# os.listdir('/content/drive/My Drive/image')
# if i.split('.')[-1] in ['png']
# ]
# for input_image in tqdm(list_images):
# # print(input_image)
# char_grid(
# input_image,
# "/content/drive/My Drive/KPMG_datasets/chargrid_image"
# )
| [
"pytesseract.image_to_boxes",
"cv2.rectangle",
"os.path.join",
"numpy.zeros",
"cv2.imread"
] | [((1076, 1097), 'cv2.imread', 'cv2.imread', (['input_dir'], {}), '(input_dir)\n', (1086, 1097), False, 'import cv2\n'), ((1167, 1205), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (1175, 1205), True, 'import numpy as np\n'), ((1222, 1278), 'pytesseract.image_to_boxes', 'pytesseract.image_to_boxes', (['img'], {'output_type': 'Output.DICT'}), '(img, output_type=Output.DICT)\n', (1248, 1278), False, 'import pytesseract\n'), ((1929, 2021), 'cv2.rectangle', 'cv2.rectangle', (['blank_image', '(x1, height - y1)', '(x2, height - y2)', 'char_color', 'cv2.FILLED'], {}), '(blank_image, (x1, height - y1), (x2, height - y2), char_color,\n cv2.FILLED)\n', (1942, 2021), False, 'import cv2\n'), ((2184, 2218), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (2196, 2218), False, 'import os\n')] |
# Escrever um script em python que le
# uma tabela do excel
# e gera o código em assembly
# para gerar a imagem no LCD
import pandas as pd
import numpy as np
WIDTH = 320
HEIGHT = 240
SIZE = 16
START = 16_384
sheet = pd.read_excel("SW_LCD.xlsx")
ones = sheet == 1
pixels = np.where(ones)
memory = {}
for y, x in zip(*pixels):
address = START + np.floor(x / SIZE) + y * WIDTH / SIZE
bit = SIZE - x % SIZE - 1
if not address in memory:
memory[address] = 0
memory[address] += 2 ** bit
nasm = ""
for address, value in memory.items():
nasm += f"leaw ${value:.0f}, %A\n"
nasm += f"movw %A, %D\n"
nasm += f"leaw ${address:.0f}, %A\n"
nasm += f"movw %D, (%A)\n"
nasm += "\n"
with open("output.nasm", "w") as file:
file.write(nasm)
| [
"numpy.where",
"numpy.floor",
"pandas.read_excel"
] | [((223, 251), 'pandas.read_excel', 'pd.read_excel', (['"""SW_LCD.xlsx"""'], {}), "('SW_LCD.xlsx')\n", (236, 251), True, 'import pandas as pd\n'), ((279, 293), 'numpy.where', 'np.where', (['ones'], {}), '(ones)\n', (287, 293), True, 'import numpy as np\n'), ((354, 372), 'numpy.floor', 'np.floor', (['(x / SIZE)'], {}), '(x / SIZE)\n', (362, 372), True, 'import numpy as np\n')] |
import os
from glob import glob
import numpy as np
import dlib
import cv2
from PIL import Image
def remove_undetected(directory ,detector ='hog'):
'''
Removes the undetected images in data
Args:
-----------------------------------------
directory: path to the data folder
detector: type of detector (Hog or Cnn) to detect faces
Returns:
------------------------------------------
Removes the undetected images in data
Returns co-ordinates of rectangle bounding face in order (y1,y2,x1,x2)
'''
all_imgs = glob(f'{directory}*/*')
for img in all_imgs:
arr_img = np.asarray(Image.open(img))
#Removes image if face could not be detected
try:
faces_detected = face_detector(arr_img,detector)
except:
print(img)
os.remove(img)
continue
if (faces_detected == None) or (faces_detected == []):
print(img)
os.remove(img)
def face_detector(img,detector = 'hog'):
'''
Detects faces in images from data
Args:
-----------------------------------------
img: numpy image array
detector: type of detector (Hog or Cnn) to detect faces
Returns:
------------------------------------------
Returns co-ordinates of rectangle bounding face in order (y1,y2,x1,x2)
'''
if detector.lower() == 'hog':
hogFaceDetector = dlib.get_frontal_face_detector()
faceRects = hogFaceDetector(img, 1)
faceRect = faceRects[0]
if faceRect.top() > 0 and faceRect.bottom() > 0 and faceRect.left() > 0 and faceRect.right() > 0:
return faceRect.top(), faceRect.bottom(), faceRect.left(), faceRect.right()
else:
return None
elif detector.lower() == 'cnn':
dnnFaceDetector = dlib.cnn_face_detection_model_v1('./database/dlib-models/mmod_human_face_detector.dat')
rects = dnnFaceDetector(img, 1)
faceRect = rects[0]
if faceRect.rect.top() > 0 and faceRect.rect.bottom() > 0 and faceRect.rect.left() > 0 and faceRect.rect.right() > 0:
return faceRect.rect.top(),faceRect.rect.bottom(),faceRect.rect.left(),faceRect.rect.right()
else:
return None
def make_data_array(directory ,paths,img_size ,imgs_per_folder,total_imgs, check_detect,detector = 'hog'):
'''
Loads the data from disk to an array to speed up during training
Args:
-----------------------------------------
directory: path to data folder
paths: paths to persons/classes in data
img_size = size of image to be resized to
imgs_per_folder: no of images to be taken from each class
total_imgs = total number of images in data folder
detector: type of detector (Hog or Cnn) to detect faces
check_detect: bool to check all imgs in data are detectable
Returns:
------------------------------------------
Returns the loaded input and its corresponding labels
'''
data = np.zeros((total_imgs,img_size,img_size,3),dtype = np.int)
y = np.zeros((total_imgs))
if check_detect:
print("Removing undetected Images..")
remove_undetected(directory,detector)
# Re compute imgs per folder as value could be change by removed_undetected.
minimum = 1e+8
for i in paths:
temp = len(glob(f'{i}/*'))
if temp < minimum:
minimum = temp
imgs_per_folder = int(minimum)
print("Removed undetected Images")
print('-----------------------------------------\n')
else:
print("Skipping Detection Check")
print('-----------------------------------------')
print("Detecting Faces")
# Storing imgs_per_folder faces from each class and its corresponding labels
for index1,individual in enumerate(paths):
for index2,picture in enumerate(glob(f'{individual}/*')[:imgs_per_folder]):
img = np.asarray(Image.open(picture))
y1,y2,x1,x2 = face_detector(img,detector)
resized_img = cv2.resize(img[y1:y2,x1:x2],(img_size,img_size))
data[index1*imgs_per_folder+index2] = resized_img
y[index1*imgs_per_folder+index2] = index1
print("Faces Detected and Loaded Successfully")
return data,y,imgs_per_folder
| [
"PIL.Image.open",
"dlib.get_frontal_face_detector",
"numpy.zeros",
"dlib.cnn_face_detection_model_v1",
"cv2.resize",
"glob.glob",
"os.remove"
] | [((560, 583), 'glob.glob', 'glob', (['f"""{directory}*/*"""'], {}), "(f'{directory}*/*')\n", (564, 583), False, 'from glob import glob\n'), ((2901, 2960), 'numpy.zeros', 'np.zeros', (['(total_imgs, img_size, img_size, 3)'], {'dtype': 'np.int'}), '((total_imgs, img_size, img_size, 3), dtype=np.int)\n', (2909, 2960), True, 'import numpy as np\n'), ((2966, 2986), 'numpy.zeros', 'np.zeros', (['total_imgs'], {}), '(total_imgs)\n', (2974, 2986), True, 'import numpy as np\n'), ((1376, 1408), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1406, 1408), False, 'import dlib\n'), ((636, 651), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (646, 651), False, 'from PIL import Image\n'), ((927, 941), 'os.remove', 'os.remove', (['img'], {}), '(img)\n', (936, 941), False, 'import os\n'), ((1757, 1849), 'dlib.cnn_face_detection_model_v1', 'dlib.cnn_face_detection_model_v1', (['"""./database/dlib-models/mmod_human_face_detector.dat"""'], {}), "(\n './database/dlib-models/mmod_human_face_detector.dat')\n", (1789, 1849), False, 'import dlib\n'), ((3902, 3953), 'cv2.resize', 'cv2.resize', (['img[y1:y2, x1:x2]', '(img_size, img_size)'], {}), '(img[y1:y2, x1:x2], (img_size, img_size))\n', (3912, 3953), False, 'import cv2\n'), ((809, 823), 'os.remove', 'os.remove', (['img'], {}), '(img)\n', (818, 823), False, 'import os\n'), ((3242, 3256), 'glob.glob', 'glob', (['f"""{i}/*"""'], {}), "(f'{i}/*')\n", (3246, 3256), False, 'from glob import glob\n'), ((3741, 3764), 'glob.glob', 'glob', (['f"""{individual}/*"""'], {}), "(f'{individual}/*')\n", (3745, 3764), False, 'from glob import glob\n'), ((3809, 3828), 'PIL.Image.open', 'Image.open', (['picture'], {}), '(picture)\n', (3819, 3828), False, 'from PIL import Image\n')] |
import numpy as np
from copy import copy
from complex_performance_metrics.models.plugin import MulticlassPluginClassifier
from complex_performance_metrics.utils import nae
"""
This module implements the COCO algorithm for:
minimizing the (multiclass) Q-mean loss subject to (multiclass) Normalized Absolute Error <= epsilon
"""
def frank_wolfe(x, y, classifier, cpe_model, alpha, num_inner_iter):
"""
Inner Frank-Wolfe optimization in COCO
Perform Lagrangian optimization over confusion matrices for Lagrange multiplier alpha
Args:
x (array-like, dtype = float, shape = (m,d)): Features
y (array-like, dtype = int, shape = (m,)): Labels {0,...,m-1}
classifier (RandomizedClassifier):
A randomized classifier to which additional base classifiers are to be added
cpe_model (sklearn estimator): A model with a predict_proba() function (default: None)
alpha (float): Lagrange multiplier
num_inner_iter (int): Number of solver iterations
Returns:
classifier (RandomizedClassifier): Solution classifier for inner maximization
"""
num_class = classifier.num_class
p = np.zeros((num_class,))
for i in range(num_class):
p[i] = (y == i).mean()
# Intialize gain matrix
W = np.eye(num_class, num_class)
for i in range(num_class):
W[i, i] = 1.0 / p[i]
# Create binary plug-in with separate thresholds for protected attribute values
plugin = MulticlassPluginClassifier(cpe_model, num_class=num_class)
plugin.set_cost_matrix(W, is_cost=False)
C = plugin.evaluate_conf(x, y, use_stored_prob=True)
# Initialize constants/normalization terms
norm_costs_const = 0.5 / (1 - np.min(p))
norm_weights_const = 1.0
# Frank-Wolfe iterations
for i in range(num_inner_iter):
# Compute gain matrix from objective gradient,
# and construct optimal classifier for cost-sensitive learning step
W = np.zeros((num_class, num_class))
for j in range(num_class):
for k in range(num_class):
if j == k:
W[j, j] = 2 * (1 - C[j, j] / p[j]) / p[j] / num_class
W[j, k] -= alpha * 2 * (C[:,k].sum() - p[k]) * norm_costs_const
plugin.set_cost_matrix(W, is_cost=False)
# Update confusion matrix iterate
C_hat = plugin.evaluate_conf(x, y, use_stored_prob=True)
C = (1 - 2.0 / (i + 2)) * C + 2.0 / (i + 2) * C_hat
# Append weight and copy of plug-in classifier to randomized classifier
if i == 0:
classifier.append(1.0, copy(plugin))
else:
norm_weights_const *= 1 - 2.0 / (i + 2)
classifier.append(2.0 / (i + 2) / norm_weights_const, copy(plugin))
# Normalize classifier weights to sum up to 1
classifier.weights[-num_inner_iter:-1] = [x * norm_weights_const for x in classifier.weights[-num_inner_iter:-1]]
classifier.weights[-1] *= norm_weights_const
return C, classifier
def fit(x, y, classifier, cpe_model, eps, eta, num_outer_iter, num_inner_iter):
"""
Outer optimization in COCO
Run gradient ascent over Lagrange multipliers alpha
Args:
x (array-like, dtype = float, shape= (m,d)): Features
y (array-like, dtype = int, shape = (m,)): Labels {0,...,m-1}
classifier (RandomizedClassifier):
A randomized classifier to which additional base classifiers are to be added
cpe_model (sklearn estimator): A model with a predict_proba() function (default: None)
eps (float): Constraint function tolerance
eta (float): Step-size for gradient-ascent solver
num_outer_iter (int): Number of outer iterations in solver (gradient ascent)
num_inner_iter (int): Number of inner iterations in solver (Frank-Wolfe)
Returns:
classifier (RandomizedClassifier): Final classifier
"""
# If the problem has no constraints, set alpha to 0 and run outer solver only for one iteration
if eps == 1:
alpha = 0.0
num_outer_iter = 1
else: # else initialize Lagrange multiplier
alpha = 0.01
# Gradient ascent iterations
for t in range(num_outer_iter):
# Find confusion matrix that maximizes the Lagrangian at alpha
C, _ = frank_wolfe(x, y, classifier, cpe_model, alpha, num_inner_iter)
er = nae(C)
# Gradient update to alpha
alpha += eta * 1.0 / np.sqrt(t + 1) * (er - eps)
# Projection step
if alpha < 0:
alpha = 0
# Normalize classifier weights to sum up to 1
classifier.normalize_weights()
return classifier
| [
"numpy.eye",
"numpy.sqrt",
"complex_performance_metrics.models.plugin.MulticlassPluginClassifier",
"numpy.zeros",
"numpy.min",
"copy.copy",
"complex_performance_metrics.utils.nae"
] | [((1155, 1177), 'numpy.zeros', 'np.zeros', (['(num_class,)'], {}), '((num_class,))\n', (1163, 1177), True, 'import numpy as np\n'), ((1277, 1305), 'numpy.eye', 'np.eye', (['num_class', 'num_class'], {}), '(num_class, num_class)\n', (1283, 1305), True, 'import numpy as np\n'), ((1465, 1523), 'complex_performance_metrics.models.plugin.MulticlassPluginClassifier', 'MulticlassPluginClassifier', (['cpe_model'], {'num_class': 'num_class'}), '(cpe_model, num_class=num_class)\n', (1491, 1523), False, 'from complex_performance_metrics.models.plugin import MulticlassPluginClassifier\n'), ((1960, 1992), 'numpy.zeros', 'np.zeros', (['(num_class, num_class)'], {}), '((num_class, num_class))\n', (1968, 1992), True, 'import numpy as np\n'), ((4393, 4399), 'complex_performance_metrics.utils.nae', 'nae', (['C'], {}), '(C)\n', (4396, 4399), False, 'from complex_performance_metrics.utils import nae\n'), ((1708, 1717), 'numpy.min', 'np.min', (['p'], {}), '(p)\n', (1714, 1717), True, 'import numpy as np\n'), ((2600, 2612), 'copy.copy', 'copy', (['plugin'], {}), '(plugin)\n', (2604, 2612), False, 'from copy import copy\n'), ((2746, 2758), 'copy.copy', 'copy', (['plugin'], {}), '(plugin)\n', (2750, 2758), False, 'from copy import copy\n'), ((4465, 4479), 'numpy.sqrt', 'np.sqrt', (['(t + 1)'], {}), '(t + 1)\n', (4472, 4479), True, 'import numpy as np\n')] |
import numpy as np
class MSE:
def __call__(self, y_pred, y):
return 0.5 * np.mean((y_pred - y)**2)
def derivative(self, y_pred, y):
return y_pred - y
class CrossEntropy:
def __call__(self, y_pred, y):
return -np.sum(y * np.log(y_pred))
def derivative(self, y_pred, y):
return (y_pred - y) / (y_pred * (1 - y_pred))
class NoCost:
def __call__(self, y_pred, y):
return y_pred
def derivative(self, y_pred, y):
n_samples, n_targets = y_pred.shape
return np.ones((n_samples, n_targets))
| [
"numpy.mean",
"numpy.log",
"numpy.ones"
] | [((539, 570), 'numpy.ones', 'np.ones', (['(n_samples, n_targets)'], {}), '((n_samples, n_targets))\n', (546, 570), True, 'import numpy as np\n'), ((88, 114), 'numpy.mean', 'np.mean', (['((y_pred - y) ** 2)'], {}), '((y_pred - y) ** 2)\n', (95, 114), True, 'import numpy as np\n'), ((261, 275), 'numpy.log', 'np.log', (['y_pred'], {}), '(y_pred)\n', (267, 275), True, 'import numpy as np\n')] |
import sys
import argparse
import numpy as np
from .. import zemax, trains, sdb, _utility, agf
from otk.rt2 import rt2_scalar_qt as rt2
def view_zmx():
parser = argparse.ArgumentParser(
description='Load and view a Zemax lens. Glass catalogs are obtained the directory zemax_glass_catalog_dir'
'defined in the otk configuration file.')
parser.add_argument('filename', help='file to view')
parser.add_argument('-n', help='accept N- prefix as substitute if named glass not found', action='store_true')
args = parser.parse_args()
config = _utility.load_config()
dir = config.get('zemax_glass_catalog_dir')
if dir is not None:
glass_catalog_paths = zemax.read_glass_catalog_dir(dir)
else:
glass_catalog_paths = zemax.SUPPLIED_GLASS_CATALOG_PATHS
try:
train0 = zemax.read_train(args.filename, glass_catalog_paths=glass_catalog_paths, try_n_prefix=args.n)
except (agf.ParseError, zemax.GlassNotFoundError, zemax.NoCatalogError) as e:
print(e.args[0])
sys.exit(1)
train1 = train0.crop_to_finite()
# Convert to a sequence of axisymmetric singlet lenses.
singlet_sequence = trains.SingletSequence.from_train2(train1, 'max')
# Convert to rt2 Elements.
elements = rt2.make_elements(singlet_sequence, 'circle')
# Create assembly object for ray tracing.
assembly = rt2.Assembly.make(elements, singlet_sequence.n_external)
scale_factor = abs(assembly.surface.get_aabb(np.eye(4)).size[:3]).prod()**(-1/3)
view_surface = sdb.IntersectionOp((assembly.surface, sdb.Plane((-1, 0, 0), 0)), assembly.surface).scale(scale_factor)
with rt2.application():
viewer = rt2.view_assembly(assembly, surface=view_surface)
| [
"otk.rt2.rt2_scalar_qt.Assembly.make",
"numpy.eye",
"otk.rt2.rt2_scalar_qt.view_assembly",
"otk.rt2.rt2_scalar_qt.application",
"argparse.ArgumentParser",
"otk.rt2.rt2_scalar_qt.make_elements",
"sys.exit"
] | [((167, 347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Load and view a Zemax lens. Glass catalogs are obtained the directory zemax_glass_catalog_dirdefined in the otk configuration file."""'}), "(description=\n 'Load and view a Zemax lens. Glass catalogs are obtained the directory zemax_glass_catalog_dirdefined in the otk configuration file.'\n )\n", (190, 347), False, 'import argparse\n'), ((1286, 1331), 'otk.rt2.rt2_scalar_qt.make_elements', 'rt2.make_elements', (['singlet_sequence', '"""circle"""'], {}), "(singlet_sequence, 'circle')\n", (1303, 1331), True, 'from otk.rt2 import rt2_scalar_qt as rt2\n'), ((1393, 1449), 'otk.rt2.rt2_scalar_qt.Assembly.make', 'rt2.Assembly.make', (['elements', 'singlet_sequence.n_external'], {}), '(elements, singlet_sequence.n_external)\n', (1410, 1449), True, 'from otk.rt2 import rt2_scalar_qt as rt2\n'), ((1668, 1685), 'otk.rt2.rt2_scalar_qt.application', 'rt2.application', ([], {}), '()\n', (1683, 1685), True, 'from otk.rt2 import rt2_scalar_qt as rt2\n'), ((1704, 1753), 'otk.rt2.rt2_scalar_qt.view_assembly', 'rt2.view_assembly', (['assembly'], {'surface': 'view_surface'}), '(assembly, surface=view_surface)\n', (1721, 1753), True, 'from otk.rt2 import rt2_scalar_qt as rt2\n'), ((1057, 1068), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1065, 1068), False, 'import sys\n'), ((1500, 1509), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1506, 1509), True, 'import numpy as np\n')] |
# Imports
from maze_generator import breadth_first_generation
from PIL import Image
import numpy
def main():
"""
Generates a few new mazes
:return: None
"""
for i in range(5):
breadth_first_generation(name=f'image{i}.jpg')
def astar_algorithm(name):
"""
Algorithm that is based on my pain and eternal will to die
:param name: str
:return: None
"""
image = Image.open(name)
image_array = numpy.asarray(image)
print(image_array)
if __name__ == '__main__':
main()
astar_algorithm('image.jpg')
| [
"PIL.Image.open",
"numpy.asarray",
"maze_generator.breadth_first_generation"
] | [((414, 430), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (424, 430), False, 'from PIL import Image\n'), ((449, 469), 'numpy.asarray', 'numpy.asarray', (['image'], {}), '(image)\n', (462, 469), False, 'import numpy\n'), ((207, 253), 'maze_generator.breadth_first_generation', 'breadth_first_generation', ([], {'name': 'f"""image{i}.jpg"""'}), "(name=f'image{i}.jpg')\n", (231, 253), False, 'from maze_generator import breadth_first_generation\n')] |
import numpy as np
from methods_project_old import MaximumIterationError
from robot_arm import RobotArm
def test_point_outside_outer_circle(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with destination outside configuration space ----')
analytical_tests('ooc', lengths, n, plot_initial, plot_minimizer, animate)
def test_point_innside_inner_circle(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
# Precondition: Configuration space is an annulus
longest = np.argmax(lengths)
if np.sum(np.delete(lengths, longest)) > lengths[longest]:
print("---- Configuration space not an annulus. Can't run test ----")
return
print("---- Test with destination innside inner circle of configuration space ----")
analytical_tests('iic', lengths, n, plot_initial, plot_minimizer, animate)
def analytical_tests(test, lengths, n, plot_initial, plot_minimizer, animate):
if test == 'iic':
longest = np.argmax(lengths)
inner_radius = lengths[longest] - np.sum(lengths)
p_distance_from_origin = inner_radius / 2
elif test == 'ooc':
reach = np.sum(lengths)
p_distance_from_origin = reach + 1
else:
print ("Test not implemented.")
raise NotImplementedError
angle = 2 * np.pi * np.random.random()
p = p_distance_from_origin * np.array([np.cos(angle), np.sin(angle)])
theta0 = 2 * np.pi * np.random.random(n)
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def test_bfgs_local_max(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with a boundary point that is a \n'
'local (not global) maximum as starting point ----')
bfgs_tests('lm', lengths, n, plot_initial, plot_minimizer, animate)
def test_bfgs_global_max(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with global maximum as starting point ----')
bfgs_tests('gm', lengths, n, plot_initial, plot_minimizer, animate)
def test_bfgs_saddle_point(lengths, n, plot_initial=True, plot_minimizer=True, animate=False):
print('---- Test with an interior point that is either a\n'
' saddle point or local maximum as starting point ----')
bfgs_tests('sp', lengths, n, plot_initial, plot_minimizer, animate)
def bfgs_tests(test, lengths, n, plot_initial, plot_minimizer, animate):
first_angle = 2 * np.pi * np.random.random()
if test == 'sp': last_angle = np.pi
theta0 = np.zeros(n)
theta0[0] = first_angle
longest = np.argmax(lengths)
annulus = np.sum(np.delete(lengths, longest)) < lengths[longest]
if annulus:
if test == 'sp':
if longest != len(lengths) - 1: theta0[-1] = last_angle
else: theta0[-2] = last_angle
inner_radius = lengths[longest] - np.sum(np.delete(lengths, longest))
outer_radius = np.sum(lengths)
p_distance_from_origin = inner_radius + (outer_radius - inner_radius) * np.random.random()
else:
p_distance_from_origin = np.sum(lengths) * np.random.random()
if test == 'lm' or test == 'sp':
p = p_distance_from_origin * np.array([np.cos(first_angle), np.sin(first_angle)])
elif test == 'gm':
p = p_distance_from_origin * np.array([-np.cos(first_angle), -np.sin(first_angle)])
else:
print ("Test not implemented.")
raise NotImplementedError
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def test_random(m, plot_initial=False, plot_minimizer=False, animate=False):
print('---- Test with m random configurations ----')
n_cap = 100
l_cap = 1000
for i in range(0, m):
n = int(n_cap * np.random.random()) + 1
lengths = l_cap * np.random.random(n)
theta0 = 2 * np.pi * np.random.random(n)
p_distance_to_origin = 2 * np.sum(lengths) * np.random.uniform(low=-1, high=1)
p_angle = 2 * np.pi * np.random.random()
p = p_distance_to_origin * np.array([np.cos(p_angle), np.sin(p_angle)])
run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate)
def run_test(lengths, theta0, p, plot_initial, plot_minimizer, animate):
WALL_E = RobotArm(lengths, p, theta0, precision=epsilon)
if plot_initial: WALL_E.plot()
try:
WALL_E.move_to_destination()
except MaximumIterationError:
np.save('initial_values_bug', (lengths, theta0, p))
WALL_E.save_animation()
raise
except AssertionError:
np.save('initial_values_bug', (lengths, theta0, p))
WALL_E.save_animation()
raise
if plot_minimizer: WALL_E.plot()
if animate: WALL_E.save_animation()
if __name__ == '__main__':
arms = []
arms.append(np.array([3, 2, 2])) # disk-shaped configuration space
arms.append(np.array([1, 4, 1])) # annulus-shaped configuration space
epsilon = 1e-3
k = len(arms)
for lengths in arms:
n = len(lengths)
test_point_outside_outer_circle(lengths, n)
test_point_innside_inner_circle(lengths, n)
test_bfgs_local_max(lengths, n)
test_bfgs_global_max(lengths, n)
test_bfgs_saddle_point(lengths, n)
test_random(100)
| [
"numpy.random.random",
"numpy.delete",
"numpy.argmax",
"robot_arm.RobotArm",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.cos",
"numpy.random.uniform",
"numpy.sin",
"numpy.save"
] | [((535, 553), 'numpy.argmax', 'np.argmax', (['lengths'], {}), '(lengths)\n', (544, 553), True, 'import numpy as np\n'), ((2543, 2554), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2551, 2554), True, 'import numpy as np\n'), ((2598, 2616), 'numpy.argmax', 'np.argmax', (['lengths'], {}), '(lengths)\n', (2607, 2616), True, 'import numpy as np\n'), ((4256, 4303), 'robot_arm.RobotArm', 'RobotArm', (['lengths', 'p', 'theta0'], {'precision': 'epsilon'}), '(lengths, p, theta0, precision=epsilon)\n', (4264, 4303), False, 'from robot_arm import RobotArm\n'), ((1000, 1018), 'numpy.argmax', 'np.argmax', (['lengths'], {}), '(lengths)\n', (1009, 1018), True, 'import numpy as np\n'), ((1335, 1353), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1351, 1353), True, 'import numpy as np\n'), ((1454, 1473), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (1470, 1473), True, 'import numpy as np\n'), ((2471, 2489), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2487, 2489), True, 'import numpy as np\n'), ((2939, 2954), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (2945, 2954), True, 'import numpy as np\n'), ((4795, 4814), 'numpy.array', 'np.array', (['[3, 2, 2]'], {}), '([3, 2, 2])\n', (4803, 4814), True, 'import numpy as np\n'), ((4867, 4886), 'numpy.array', 'np.array', (['[1, 4, 1]'], {}), '([1, 4, 1])\n', (4875, 4886), True, 'import numpy as np\n'), ((568, 595), 'numpy.delete', 'np.delete', (['lengths', 'longest'], {}), '(lengths, longest)\n', (577, 595), True, 'import numpy as np\n'), ((1061, 1076), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (1067, 1076), True, 'import numpy as np\n'), ((1167, 1182), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (1173, 1182), True, 'import numpy as np\n'), ((2638, 2665), 'numpy.delete', 'np.delete', (['lengths', 'longest'], {}), '(lengths, longest)\n', (2647, 2665), True, 'import numpy as np\n'), ((3097, 3112), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (3103, 3112), True, 'import numpy as np\n'), ((3115, 3133), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3131, 3133), True, 'import numpy as np\n'), ((3805, 3824), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3821, 3824), True, 'import numpy as np\n'), ((3854, 3873), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3870, 3873), True, 'import numpy as np\n'), ((3928, 3961), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)'}), '(low=-1, high=1)\n', (3945, 3961), True, 'import numpy as np\n'), ((3992, 4010), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4008, 4010), True, 'import numpy as np\n'), ((4428, 4479), 'numpy.save', 'np.save', (['"""initial_values_bug"""', '(lengths, theta0, p)'], {}), "('initial_values_bug', (lengths, theta0, p))\n", (4435, 4479), True, 'import numpy as np\n'), ((4561, 4612), 'numpy.save', 'np.save', (['"""initial_values_bug"""', '(lengths, theta0, p)'], {}), "('initial_values_bug', (lengths, theta0, p))\n", (4568, 4612), True, 'import numpy as np\n'), ((1397, 1410), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1403, 1410), True, 'import numpy as np\n'), ((1412, 1425), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1418, 1425), True, 'import numpy as np\n'), ((2887, 2914), 'numpy.delete', 'np.delete', (['lengths', 'longest'], {}), '(lengths, longest)\n', (2896, 2914), True, 'import numpy as np\n'), ((3035, 3053), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3051, 3053), True, 'import numpy as np\n'), ((3910, 3925), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (3916, 3925), True, 'import numpy as np\n'), ((3219, 3238), 'numpy.cos', 'np.cos', (['first_angle'], {}), '(first_angle)\n', (3225, 3238), True, 'import numpy as np\n'), ((3240, 3259), 'numpy.sin', 'np.sin', (['first_angle'], {}), '(first_angle)\n', (3246, 3259), True, 'import numpy as np\n'), ((3755, 3773), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3771, 3773), True, 'import numpy as np\n'), ((4056, 4071), 'numpy.cos', 'np.cos', (['p_angle'], {}), '(p_angle)\n', (4062, 4071), True, 'import numpy as np\n'), ((4073, 4088), 'numpy.sin', 'np.sin', (['p_angle'], {}), '(p_angle)\n', (4079, 4088), True, 'import numpy as np\n'), ((3333, 3352), 'numpy.cos', 'np.cos', (['first_angle'], {}), '(first_angle)\n', (3339, 3352), True, 'import numpy as np\n'), ((3355, 3374), 'numpy.sin', 'np.sin', (['first_angle'], {}), '(first_angle)\n', (3361, 3374), True, 'import numpy as np\n')] |
"""Implements a simple two layer mlp network."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class MlpNet(nn.Module):
"""Implements a simple fully connected mlp network."""
def __init__(self, sa_dim, n_agents, hidden_size,
agent_id=0, agent_shuffle='none'):
super(MlpNet, self).__init__()
self.linear1 = nn.Linear(sa_dim * n_agents, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
self.n_agents = n_agents
self.agent_id = agent_id
self.agent_shuffle = agent_shuffle
def forward(self, x):
# Perform shuffling.
bz = x.shape[0]
if self.agent_shuffle == 'all':
x_out = []
for k in range(bz):
rand_idx = np.random.permutation(self.n_agents)
x_out.append(x[k, :, rand_idx].unsqueeze(0))
x = torch.cat(x_out, 0)
elif self.agent_shuffle == 'others':
x_out = []
for k in range(bz):
rand_idx = np.random.permutation(self.n_agents-1)
index_except = np.concatenate([np.arange(0, self.agent_id),
np.arange(self.agent_id+1, self.n_agents) ])
except_shuffle = index_except[rand_idx]
x_tmp = x[k, :, :]
x_tmp[:, index_except] = x_tmp[:, except_shuffle]
x_out.append(x_tmp.unsqueeze(0))
x = torch.cat(x_out, 0)
elif self.agent_shuffle == 'none':
pass
else:
raise NotImplemented(
'Unsupported agent_shuffle opt: %s' % self.agent_shuffle)
# Reshape to fit into mlp.
x = x.view(bz, -1)
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
x = F.relu(x)
V = self.V(x)
return V
class MlpNetM(nn.Module):
"""Implements a simple fully connected mlp network."""
def __init__(self, sa_dim, n_agents, hidden_size,
agent_id=0, agent_shuffle='none'):
super(MlpNetM, self).__init__()
self.linear1 = nn.Linear(sa_dim, hidden_size)
self.linear2 = nn.Linear(hidden_size * 3, hidden_size)
self.linear3 = nn.Linear(hidden_size, hidden_size)
self.V = nn.Linear(hidden_size, 1)
self.V.weight.data.mul_(0.1)
self.V.bias.data.mul_(0.1)
self.n_agents = n_agents
self.agent_id = agent_id
self.agent_shuffle = agent_shuffle
def forward(self, x):
# Perform shuffling.
bz = x.shape[0]
if self.agent_shuffle == 'all':
x_out = []
for k in range(bz):
rand_idx = np.random.permutation(self.n_agents)
x_out.append(x[k, :, rand_idx].unsqueeze(0))
x = torch.cat(x_out, 0)
elif self.agent_shuffle == 'others':
x_out = []
for k in range(bz):
rand_idx = np.random.permutation(self.n_agents-1)
index_except = np.concatenate([np.arange(0, self.agent_id),
np.arange(self.agent_id+1, self.n_agents) ])
except_shuffle = index_except[rand_idx]
x_tmp = x[k, :, :]
x_tmp[:, index_except] = x_tmp[:, except_shuffle]
x_out.append(x_tmp.unsqueeze(0))
x = torch.cat(x_out, 0)
elif self.agent_shuffle == 'none':
pass
else:
raise NotImplemented(
'Unsupported agent_shuffle opt: %s' % self.agent_shuffle)
# Reshape to fit into mlp.
x1, x2, x3 = self.linear1(x[:, :, 0]), self.linear1(x[:, :, 1]), self.linear1(x[:, :, 2])
x = torch.cat((x1, x2, x3), 1)
x = F.relu(x)
x = self.linear2(x)
x = F.relu(x)
x = self.linear3(x)
x = F.relu(x)
V = self.V(x)
return V
| [
"numpy.arange",
"torch.nn.functional.relu",
"torch.nn.Linear",
"torch.cat",
"numpy.random.permutation"
] | [((377, 418), 'torch.nn.Linear', 'nn.Linear', (['(sa_dim * n_agents)', 'hidden_size'], {}), '(sa_dim * n_agents, hidden_size)\n', (386, 418), True, 'import torch.nn as nn\n'), ((438, 473), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (447, 473), True, 'import torch.nn as nn\n'), ((487, 512), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (496, 512), True, 'import torch.nn as nn\n'), ((1705, 1714), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1711, 1714), True, 'import torch.nn.functional as F\n'), ((1747, 1756), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1753, 1756), True, 'import torch.nn.functional as F\n'), ((2031, 2061), 'torch.nn.Linear', 'nn.Linear', (['sa_dim', 'hidden_size'], {}), '(sa_dim, hidden_size)\n', (2040, 2061), True, 'import torch.nn as nn\n'), ((2081, 2120), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 3)', 'hidden_size'], {}), '(hidden_size * 3, hidden_size)\n', (2090, 2120), True, 'import torch.nn as nn\n'), ((2140, 2175), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (2149, 2175), True, 'import torch.nn as nn\n'), ((2189, 2214), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2198, 2214), True, 'import torch.nn as nn\n'), ((3453, 3479), 'torch.cat', 'torch.cat', (['(x1, x2, x3)', '(1)'], {}), '((x1, x2, x3), 1)\n', (3462, 3479), False, 'import torch\n'), ((3488, 3497), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3494, 3497), True, 'import torch.nn.functional as F\n'), ((3530, 3539), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3536, 3539), True, 'import torch.nn.functional as F\n'), ((3572, 3581), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3578, 3581), True, 'import torch.nn.functional as F\n'), ((943, 962), 'torch.cat', 'torch.cat', (['x_out', '(0)'], {}), '(x_out, 0)\n', (952, 962), False, 'import torch\n'), ((2645, 2664), 'torch.cat', 'torch.cat', (['x_out', '(0)'], {}), '(x_out, 0)\n', (2654, 2664), False, 'import torch\n'), ((843, 879), 'numpy.random.permutation', 'np.random.permutation', (['self.n_agents'], {}), '(self.n_agents)\n', (864, 879), True, 'import numpy as np\n'), ((1441, 1460), 'torch.cat', 'torch.cat', (['x_out', '(0)'], {}), '(x_out, 0)\n', (1450, 1460), False, 'import torch\n'), ((2545, 2581), 'numpy.random.permutation', 'np.random.permutation', (['self.n_agents'], {}), '(self.n_agents)\n', (2566, 2581), True, 'import numpy as np\n'), ((3143, 3162), 'torch.cat', 'torch.cat', (['x_out', '(0)'], {}), '(x_out, 0)\n', (3152, 3162), False, 'import torch\n'), ((1066, 1106), 'numpy.random.permutation', 'np.random.permutation', (['(self.n_agents - 1)'], {}), '(self.n_agents - 1)\n', (1087, 1106), True, 'import numpy as np\n'), ((2768, 2808), 'numpy.random.permutation', 'np.random.permutation', (['(self.n_agents - 1)'], {}), '(self.n_agents - 1)\n', (2789, 2808), True, 'import numpy as np\n'), ((1144, 1171), 'numpy.arange', 'np.arange', (['(0)', 'self.agent_id'], {}), '(0, self.agent_id)\n', (1153, 1171), True, 'import numpy as np\n'), ((1212, 1255), 'numpy.arange', 'np.arange', (['(self.agent_id + 1)', 'self.n_agents'], {}), '(self.agent_id + 1, self.n_agents)\n', (1221, 1255), True, 'import numpy as np\n'), ((2846, 2873), 'numpy.arange', 'np.arange', (['(0)', 'self.agent_id'], {}), '(0, self.agent_id)\n', (2855, 2873), True, 'import numpy as np\n'), ((2914, 2957), 'numpy.arange', 'np.arange', (['(self.agent_id + 1)', 'self.n_agents'], {}), '(self.agent_id + 1, self.n_agents)\n', (2923, 2957), True, 'import numpy as np\n')] |
# Importing tensorflow
import tensorflow as tf
# importing the data
from tensorflow.examples.tutorials.mnist import input_data
# Importing some more libraries
import matplotlib.pyplot as plt
from numpy import loadtxt
import numpy as np
from pylab import rcParams
from sklearn import preprocessing
import cv2
import scipy
import skimage.measure
import os
from random import randint
import math
'''
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
X_train = mnist.train.images
X_test = mnist.test.images
'''
'''
matdata = scipy.io.loadmat("./mnist/train_32x32")
temp = matdata['X']
X_train = []
X_train_temp = []
for i in range(temp.shape[3]):
X_train_temp.append(temp[:,:,:,i])
for i in range(temp.shape[3]):
img_gray = (skimage.measure.block_reduce(cv2.cvtColor(X_train_temp[i], cv2.COLOR_BGR2GRAY), (2,2), np.max)).reshape(256)
X_train.append(img_gray)
X_train = np.asarray(X_train)
print(X_train.shape)
'''
patch_size = 17
mean = 0
stddev = 15
PSNR_0 = []
PSNR_1 = []
def psnr(img1, img2):
mse = np.mean( (img1 - img2) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def make_patch(img1):
patch = patch_size
sero = img1.shape[0]
garo = img1.shape[1]
i = randint(0,sero-patch)
j = randint(0,garo-patch)
random = randint(0,1)
if random == 1:
return img1[i:i+patch,j:j+patch]
else:
return np.fliplr(img1[i:i+patch,j:j+patch])
def choose_patch(img1,img2):
patch = patch_size
sero = img1.shape[0]
garo = img1.shape[1]
i = randint(0,sero-patch)
j = randint(0,garo-patch)
return (img1[i:i+patch,j:j+patch], img2[i:i+patch,j:j+patch])
X_train = []
X_train_noisy = []
file_dir = './BSDS300-images/BSDS300/images/train'
file_names = os.listdir(file_dir)
for name in file_names:
BGR_img = cv2.imread(file_dir + '/' + name)
img_gray = cv2.cvtColor(BGR_img, cv2.COLOR_BGR2GRAY)
if img_gray.shape[0] > img_gray.shape[1]:
img_gray = np.transpose(img_gray)
img_gray = img_gray.reshape(321,481)
#img_gray_noisy = img_gray + noise
X_train.append(img_gray)
#X_train_noisy.append(img_gray_noisy)
X_train = np.asarray(X_train)
#X_train_noisy = np.asarray(X_train_noisy)
X_train = X_train
#X_train_noisy = X_train_noisy/255.
# Deciding how many nodes each layer should have
n_nodes_inpl = patch_size*patch_size
n_nodes_hl1 = 711
n_nodes_hl2 = 711
n_nodes_hl3 = 711
n_nodes_outl = patch_size*patch_size
# hidden layers
hidden_1_layer_vals = {'weights':tf.Variable(tf.random_normal([n_nodes_inpl,n_nodes_hl1])),'biases':tf.Variable(tf.zeros([n_nodes_hl1])) }
hidden_2_layer_vals = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),'biases':tf.Variable(tf.zeros([n_nodes_hl2])) }
hidden_3_layer_vals = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),'biases':tf.Variable(tf.zeros([n_nodes_hl3])) }
output_layer_vals = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3,n_nodes_outl])),'biases':tf.Variable(tf.zeros([n_nodes_outl])) }
# 32*32
input_layer = tf.placeholder('float', [None, patch_size*patch_size])
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(input_layer,hidden_1_layer_vals['weights']),hidden_1_layer_vals['biases']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,hidden_2_layer_vals['weights']), hidden_2_layer_vals['biases']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2,hidden_3_layer_vals['weights']), hidden_3_layer_vals['biases']))
output_layer = (tf.add(tf.matmul(layer_3,output_layer_vals['weights']),output_layer_vals['biases']))
output_true = tf.placeholder('float', [None, patch_size*patch_size])
# Cost Function
meansq = tf.reduce_mean(tf.square(output_layer - output_true))
# Optimizer
learn_rate = 0.001
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(meansq)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=learn_rate).minimize(meansq)
def test():
BGR_img = cv2.imread('Lena.png')
BGR_img1 = cv2.imread('Man.png')
img_gray = cv2.cvtColor(BGR_img, cv2.COLOR_BGR2GRAY)
img_gray1 = cv2.cvtColor(BGR_img1, cv2.COLOR_BGR2GRAY)
mean = 0
sigma_noise = 15
gaussian = np.random.normal(mean, sigma_noise, img_gray.shape)
noisy_img = np.zeros(img_gray.shape, np.float32)
noisy_img[:,:] = img_gray[:,:] + gaussian
noisy_img1 = np.zeros(img_gray1.shape, np.float32)
noisy_img1[:,:] = img_gray1[:,:] + gaussian
npad_gray = [(0,0),(0,0)]
img_gray_padded = np.pad(noisy_img, npad_gray, mode='constant')
img_gray_padded = img_gray_padded/255.
img_gray_padded1 = np.pad(noisy_img1, npad_gray, mode='constant')
img_gray_padded1 = img_gray_padded1/255.
# STRIDE = 3
avg_cnt = np.zeros((512,512))
counter = np.ones((patch_size,patch_size))
output_full = np.zeros((512,512))
output_full1 = np.zeros((512,512))
for i in range(166):
for j in range(166):
output_denoised_patch = sess.run(output_layer, feed_dict={input_layer:[img_gray_padded[3*i:3*i+patch_size,3*j:3*j+patch_size].reshape(patch_size*patch_size)]}).reshape(patch_size,patch_size)
output_denoised_patch1 = sess.run(output_layer, feed_dict={input_layer:[img_gray_padded1[3*i:3*i+patch_size,3*j:3*j+patch_size].reshape(patch_size*patch_size)]}).reshape(patch_size,patch_size)
output_full[3*i:3*i+patch_size,3*j:3*j+patch_size] += output_denoised_patch
output_full1[3*i:3*i+patch_size,3*j:3*j+patch_size] += output_denoised_patch1
avg_cnt[3*i:3*i+patch_size,3*j:3*j+patch_size] += counter
output_full_avg = (np.divide(output_full,avg_cnt))*255.
output_full_avg1 = (np.divide(output_full1,avg_cnt))*255.
rcParams['figure.figsize'] = 20,20
plt.subplot(1, 3, 1)
plt.imshow(img_gray, cmap=plt.cm.gray),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(noisy_img, cmap=plt.cm.gray),plt.title('Noise Added')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(output_full_avg, cmap=plt.cm.gray),plt.title('Filtered')
plt.xticks([]), plt.yticks([])
plt.show()
plt.subplot(1, 3, 1)
plt.imshow(img_gray1, cmap=plt.cm.gray),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2)
plt.imshow(noisy_img1, cmap=plt.cm.gray),plt.title('Noise Added')
plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3)
plt.imshow(output_full_avg1, cmap=plt.cm.gray),plt.title('Filtered')
plt.xticks([]), plt.yticks([])
plt.show()
PSNR0 = psnr(img_gray,output_full_avg)
PSNR1 = psnr(img_gray1,output_full_avg1)
print(PSNR0)
print(PSNR1)
PSNR_0.append(PSNR0)
PSNR_1.append(PSNR1)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
batch_size = 300
hm_epochs = 250000
tot_images = X_train.shape[0]
for epoch in range(1,hm_epochs+1):
epoch_loss = 0
for i in range(int(tot_images/batch_size)):
lepoch_x = []
lepoch_x_n = []
epoch_x1 = X_train[ i*batch_size : (i+1)*batch_size ]
for i in range(batch_size):
patched_img = make_patch(epoch_x1[i])
noise = np.random.normal(mean, stddev, patched_img.shape)
noised = patched_img + noise
patched_img = patched_img/255
noised = noised/255.
lepoch_x.append(patched_img.reshape(patch_size*patch_size))
lepoch_x_n.append(noised.reshape(patch_size*patch_size))
epoch_x = np.asarray(lepoch_x)
epoch_x_n = np.asarray(lepoch_x_n)
_, c = sess.run([optimizer, meansq],\
feed_dict={input_layer: epoch_x_n, \
output_true: epoch_x})
epoch_loss += c
if epoch%5000 == 0:
print('EPOCH ' + str(epoch))
test()
'''
patched_img = make_patch(X_train[8])
noised = patched_img + np.random.normal(mean, stddev, patched_img.shape)
max1 = np.max(np.max(patched_img))
max2 = np.max(np.max(noised))
patched_img = patched_img/255.
noised = noised/255.
a_i = patched_img.reshape(patch_size*patch_size)
any_image = noised.reshape(patch_size*patch_size)
output_any_image = sess.run(output_layer,\
feed_dict={input_layer:[any_image]})
rcParams['figure.figsize'] = 5,5
# Noisy Image
plt.subplot(1, 3, 1)
plt.imshow(any_image.reshape(patch_size,patch_size), cmap='Greys')
plt.axis('off')
# Ground Truth
plt.subplot(1, 3, 2)
plt.imshow(a_i.reshape(patch_size,patch_size), cmap='Greys')
plt.axis('off')
# Denoised Image
plt.subplot(1, 3, 3)
plt.imshow(output_any_image.reshape(patch_size,patch_size), cmap='Greys')
plt.axis('off')
plt.show()
'''
if epoch%100 == 0 :
print('Epoch', epoch, '/', hm_epochs, 'loss:',epoch_loss)
x = range(5000,5000*(len(PSNR_0)+1),5000)
y0 = PSNR_0
y1 = PSNR_1
rcParams['figure.figsize'] = 8,8
rcParams.update({'font.size': 12})
plt.plot(x,y0,'b',label='Lena',marker='^')
plt.plot(x,y1,'r',label='Man',marker='D')
plt.xlabel('Epochs')
plt.ylabel('PSNR(dB)')
plt.title('MLP(multilayer perceptron)')
plt.legend(loc='upper right')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"math.sqrt",
"numpy.divide",
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.listdir",
"tensorflow.random_normal",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.asarray",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"t... | [((1925, 1945), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (1935, 1945), False, 'import os\n'), ((2342, 2361), 'numpy.asarray', 'np.asarray', (['X_train'], {}), '(X_train)\n', (2352, 2361), True, 'import numpy as np\n'), ((3257, 3313), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, patch_size * patch_size]'], {}), "('float', [None, patch_size * patch_size])\n", (3271, 3313), True, 'import tensorflow as tf\n'), ((3779, 3835), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, patch_size * patch_size]'], {}), "('float', [None, patch_size * patch_size])\n", (3793, 3835), True, 'import tensorflow as tf\n'), ((7104, 7137), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7135, 7137), True, 'import tensorflow as tf\n'), ((7146, 7158), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7156, 7158), True, 'import tensorflow as tf\n'), ((9622, 9656), 'pylab.rcParams.update', 'rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (9637, 9656), False, 'from pylab import rcParams\n'), ((9660, 9706), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y0', '"""b"""'], {'label': '"""Lena"""', 'marker': '"""^"""'}), "(x, y0, 'b', label='Lena', marker='^')\n", (9668, 9706), True, 'import matplotlib.pyplot as plt\n'), ((9704, 9749), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', '"""r"""'], {'label': '"""Man"""', 'marker': '"""D"""'}), "(x, y1, 'r', label='Man', marker='D')\n", (9712, 9749), True, 'import matplotlib.pyplot as plt\n'), ((9747, 9767), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (9757, 9767), True, 'import matplotlib.pyplot as plt\n'), ((9769, 9791), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PSNR(dB)"""'], {}), "('PSNR(dB)')\n", (9779, 9791), True, 'import matplotlib.pyplot as plt\n'), ((9793, 9832), 'matplotlib.pyplot.title', 'plt.title', (['"""MLP(multilayer perceptron)"""'], {}), "('MLP(multilayer perceptron)')\n", (9802, 9832), True, 'import matplotlib.pyplot as plt\n'), ((9834, 9863), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (9844, 9863), True, 'import matplotlib.pyplot as plt\n'), ((9865, 9875), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9873, 9875), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1117), 'numpy.mean', 'np.mean', (['((img1 - img2) ** 2)'], {}), '((img1 - img2) ** 2)\n', (1097, 1117), True, 'import numpy as np\n'), ((1348, 1372), 'random.randint', 'randint', (['(0)', '(sero - patch)'], {}), '(0, sero - patch)\n', (1355, 1372), False, 'from random import randint\n'), ((1379, 1403), 'random.randint', 'randint', (['(0)', '(garo - patch)'], {}), '(0, garo - patch)\n', (1386, 1403), False, 'from random import randint\n'), ((1421, 1434), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1428, 1434), False, 'from random import randint\n'), ((1691, 1715), 'random.randint', 'randint', (['(0)', '(sero - patch)'], {}), '(0, sero - patch)\n', (1698, 1715), False, 'from random import randint\n'), ((1722, 1746), 'random.randint', 'randint', (['(0)', '(garo - patch)'], {}), '(0, garo - patch)\n', (1729, 1746), False, 'from random import randint\n'), ((1986, 2019), 'cv2.imread', 'cv2.imread', (["(file_dir + '/' + name)"], {}), "(file_dir + '/' + name)\n", (1996, 2019), False, 'import cv2\n'), ((2036, 2077), 'cv2.cvtColor', 'cv2.cvtColor', (['BGR_img', 'cv2.COLOR_BGR2GRAY'], {}), '(BGR_img, cv2.COLOR_BGR2GRAY)\n', (2048, 2077), False, 'import cv2\n'), ((3684, 3732), 'tensorflow.matmul', 'tf.matmul', (['layer_3', "output_layer_vals['weights']"], {}), "(layer_3, output_layer_vals['weights'])\n", (3693, 3732), True, 'import tensorflow as tf\n'), ((3876, 3913), 'tensorflow.square', 'tf.square', (['(output_layer - output_true)'], {}), '(output_layer - output_true)\n', (3885, 3913), True, 'import tensorflow as tf\n'), ((4148, 4170), 'cv2.imread', 'cv2.imread', (['"""Lena.png"""'], {}), "('Lena.png')\n", (4158, 4170), False, 'import cv2\n'), ((4187, 4208), 'cv2.imread', 'cv2.imread', (['"""Man.png"""'], {}), "('Man.png')\n", (4197, 4208), False, 'import cv2\n'), ((4231, 4272), 'cv2.cvtColor', 'cv2.cvtColor', (['BGR_img', 'cv2.COLOR_BGR2GRAY'], {}), '(BGR_img, cv2.COLOR_BGR2GRAY)\n', (4243, 4272), False, 'import cv2\n'), ((4290, 4332), 'cv2.cvtColor', 'cv2.cvtColor', (['BGR_img1', 'cv2.COLOR_BGR2GRAY'], {}), '(BGR_img1, cv2.COLOR_BGR2GRAY)\n', (4302, 4332), False, 'import cv2\n'), ((4392, 4443), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma_noise', 'img_gray.shape'], {}), '(mean, sigma_noise, img_gray.shape)\n', (4408, 4443), True, 'import numpy as np\n'), ((4468, 4504), 'numpy.zeros', 'np.zeros', (['img_gray.shape', 'np.float32'], {}), '(img_gray.shape, np.float32)\n', (4476, 4504), True, 'import numpy as np\n'), ((4570, 4607), 'numpy.zeros', 'np.zeros', (['img_gray1.shape', 'np.float32'], {}), '(img_gray1.shape, np.float32)\n', (4578, 4607), True, 'import numpy as np\n'), ((4717, 4762), 'numpy.pad', 'np.pad', (['noisy_img', 'npad_gray'], {'mode': '"""constant"""'}), "(noisy_img, npad_gray, mode='constant')\n", (4723, 4762), True, 'import numpy as np\n'), ((4835, 4881), 'numpy.pad', 'np.pad', (['noisy_img1', 'npad_gray'], {'mode': '"""constant"""'}), "(noisy_img1, npad_gray, mode='constant')\n", (4841, 4881), True, 'import numpy as np\n'), ((4977, 4997), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {}), '((512, 512))\n', (4985, 4997), True, 'import numpy as np\n'), ((5012, 5045), 'numpy.ones', 'np.ones', (['(patch_size, patch_size)'], {}), '((patch_size, patch_size))\n', (5019, 5045), True, 'import numpy as np\n'), ((5064, 5084), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {}), '((512, 512))\n', (5072, 5084), True, 'import numpy as np\n'), ((5104, 5124), 'numpy.zeros', 'np.zeros', (['(512, 512)'], {}), '((512, 512))\n', (5112, 5124), True, 'import numpy as np\n'), ((6036, 6056), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (6047, 6056), True, 'import matplotlib.pyplot as plt\n'), ((6170, 6190), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (6181, 6190), True, 'import matplotlib.pyplot as plt\n'), ((6308, 6328), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (6319, 6328), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6457, 6459), True, 'import matplotlib.pyplot as plt\n'), ((6471, 6491), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (6482, 6491), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6626), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (6617, 6626), True, 'import matplotlib.pyplot as plt\n'), ((6745, 6765), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (6756, 6765), True, 'import matplotlib.pyplot as plt\n'), ((6887, 6897), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6895, 6897), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1573), 'numpy.fliplr', 'np.fliplr', (['img1[i:i + patch, j:j + patch]'], {}), '(img1[i:i + patch, j:j + patch])\n', (1541, 1573), True, 'import numpy as np\n'), ((2145, 2167), 'numpy.transpose', 'np.transpose', (['img_gray'], {}), '(img_gray)\n', (2157, 2167), True, 'import numpy as np\n'), ((2713, 2758), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_inpl, n_nodes_hl1]'], {}), '([n_nodes_inpl, n_nodes_hl1])\n', (2729, 2758), True, 'import tensorflow as tf\n'), ((2780, 2803), 'tensorflow.zeros', 'tf.zeros', (['[n_nodes_hl1]'], {}), '([n_nodes_hl1])\n', (2788, 2803), True, 'import tensorflow as tf\n'), ((2854, 2898), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl1, n_nodes_hl2]'], {}), '([n_nodes_hl1, n_nodes_hl2])\n', (2870, 2898), True, 'import tensorflow as tf\n'), ((2921, 2944), 'tensorflow.zeros', 'tf.zeros', (['[n_nodes_hl2]'], {}), '([n_nodes_hl2])\n', (2929, 2944), True, 'import tensorflow as tf\n'), ((2995, 3039), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl2, n_nodes_hl3]'], {}), '([n_nodes_hl2, n_nodes_hl3])\n', (3011, 3039), True, 'import tensorflow as tf\n'), ((3062, 3085), 'tensorflow.zeros', 'tf.zeros', (['[n_nodes_hl3]'], {}), '([n_nodes_hl3])\n', (3070, 3085), True, 'import tensorflow as tf\n'), ((3134, 3179), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl3, n_nodes_outl]'], {}), '([n_nodes_hl3, n_nodes_outl])\n', (3150, 3179), True, 'import tensorflow as tf\n'), ((3201, 3225), 'tensorflow.zeros', 'tf.zeros', (['[n_nodes_outl]'], {}), '([n_nodes_outl])\n', (3209, 3225), True, 'import tensorflow as tf\n'), ((3344, 3398), 'tensorflow.matmul', 'tf.matmul', (['input_layer', "hidden_1_layer_vals['weights']"], {}), "(input_layer, hidden_1_layer_vals['weights'])\n", (3353, 3398), True, 'import tensorflow as tf\n'), ((3462, 3512), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "hidden_2_layer_vals['weights']"], {}), "(layer_1, hidden_2_layer_vals['weights'])\n", (3471, 3512), True, 'import tensorflow as tf\n'), ((3577, 3627), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "hidden_3_layer_vals['weights']"], {}), "(layer_2, hidden_3_layer_vals['weights'])\n", (3586, 3627), True, 'import tensorflow as tf\n'), ((3961, 4009), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learn_rate'}), '(learning_rate=learn_rate)\n', (3983, 4009), True, 'import tensorflow as tf\n'), ((5879, 5910), 'numpy.divide', 'np.divide', (['output_full', 'avg_cnt'], {}), '(output_full, avg_cnt)\n', (5888, 5910), True, 'import numpy as np\n'), ((5941, 5973), 'numpy.divide', 'np.divide', (['output_full1', 'avg_cnt'], {}), '(output_full1, avg_cnt)\n', (5950, 5973), True, 'import numpy as np\n'), ((6062, 6100), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_gray'], {'cmap': 'plt.cm.gray'}), '(img_gray, cmap=plt.cm.gray)\n', (6072, 6100), True, 'import matplotlib.pyplot as plt\n'), ((6101, 6122), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (6110, 6122), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6142), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6138, 6142), True, 'import matplotlib.pyplot as plt\n'), ((6144, 6158), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6154, 6158), True, 'import matplotlib.pyplot as plt\n'), ((6196, 6235), 'matplotlib.pyplot.imshow', 'plt.imshow', (['noisy_img'], {'cmap': 'plt.cm.gray'}), '(noisy_img, cmap=plt.cm.gray)\n', (6206, 6235), True, 'import matplotlib.pyplot as plt\n'), ((6236, 6260), 'matplotlib.pyplot.title', 'plt.title', (['"""Noise Added"""'], {}), "('Noise Added')\n", (6245, 6260), True, 'import matplotlib.pyplot as plt\n'), ((6266, 6280), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6276, 6280), True, 'import matplotlib.pyplot as plt\n'), ((6282, 6296), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6292, 6296), True, 'import matplotlib.pyplot as plt\n'), ((6334, 6379), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output_full_avg'], {'cmap': 'plt.cm.gray'}), '(output_full_avg, cmap=plt.cm.gray)\n', (6344, 6379), True, 'import matplotlib.pyplot as plt\n'), ((6380, 6401), 'matplotlib.pyplot.title', 'plt.title', (['"""Filtered"""'], {}), "('Filtered')\n", (6389, 6401), True, 'import matplotlib.pyplot as plt\n'), ((6407, 6421), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6417, 6421), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6437), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6433, 6437), True, 'import matplotlib.pyplot as plt\n'), ((6497, 6536), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_gray1'], {'cmap': 'plt.cm.gray'}), '(img_gray1, cmap=plt.cm.gray)\n', (6507, 6536), True, 'import matplotlib.pyplot as plt\n'), ((6537, 6558), 'matplotlib.pyplot.title', 'plt.title', (['"""Original"""'], {}), "('Original')\n", (6546, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6564, 6578), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6574, 6578), True, 'import matplotlib.pyplot as plt\n'), ((6580, 6594), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6590, 6594), True, 'import matplotlib.pyplot as plt\n'), ((6632, 6672), 'matplotlib.pyplot.imshow', 'plt.imshow', (['noisy_img1'], {'cmap': 'plt.cm.gray'}), '(noisy_img1, cmap=plt.cm.gray)\n', (6642, 6672), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6697), 'matplotlib.pyplot.title', 'plt.title', (['"""Noise Added"""'], {}), "('Noise Added')\n", (6682, 6697), True, 'import matplotlib.pyplot as plt\n'), ((6703, 6717), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6713, 6717), True, 'import matplotlib.pyplot as plt\n'), ((6719, 6733), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6729, 6733), True, 'import matplotlib.pyplot as plt\n'), ((6771, 6817), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output_full_avg1'], {'cmap': 'plt.cm.gray'}), '(output_full_avg1, cmap=plt.cm.gray)\n', (6781, 6817), True, 'import matplotlib.pyplot as plt\n'), ((6818, 6839), 'matplotlib.pyplot.title', 'plt.title', (['"""Filtered"""'], {}), "('Filtered')\n", (6827, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6845, 6859), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6855, 6859), True, 'import matplotlib.pyplot as plt\n'), ((6861, 6875), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6871, 6875), True, 'import matplotlib.pyplot as plt\n'), ((7961, 7981), 'numpy.asarray', 'np.asarray', (['lepoch_x'], {}), '(lepoch_x)\n', (7971, 7981), True, 'import numpy as np\n'), ((8003, 8025), 'numpy.asarray', 'np.asarray', (['lepoch_x_n'], {}), '(lepoch_x_n)\n', (8013, 8025), True, 'import numpy as np\n'), ((7596, 7645), 'numpy.random.normal', 'np.random.normal', (['mean', 'stddev', 'patched_img.shape'], {}), '(mean, stddev, patched_img.shape)\n', (7612, 7645), True, 'import numpy as np\n'), ((1221, 1235), 'math.sqrt', 'math.sqrt', (['mse'], {}), '(mse)\n', (1230, 1235), False, 'import math\n')] |
# PyVot Python Variational Optimal Transportation
# Author: <NAME> <<EMAIL>>
# Date: April 28th 2020
# Licence: MIT
"""
===============================================================
Area Preserving Map through Optimal Transportation
===============================================================
This demo shows R^n -> R^n area preserving mapping through optimal transportation.
The total area is assumed to be one. We randomly sample a square and
count the samples to approximate the area. In this way, we avoid computing
convex hulls.
For now, PyVot assumes that the range in each dimension is (-1,1).
"""
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from vot_numpy import VOTAP
import utils
np.random.seed(0)
mean = [0, 0]
cov = [[.08, 0], [0, .08]]
N = 50
data_backup = np.random.multivariate_normal(mean, cov, N).clip(-0.99, 0.99)
# ----------------------------------- #
# ------------ Example 1 ------------ #
# ----------------------------------- #
# ----- set up vot ------ #
data = data_backup.copy()
vot = VOTAP(data, sampling='square', ratio=200, verbose=True)
# ----- map ------ #
tick = time.time()
# vot.map(plot_filename='area.gif', max_iter=300)
idx, _ = vot.map(max_iter=3000)
tock = time.time()
print('total time: {0:.4f}'.format(tock-tick))
# Note: Area preserving usually requires a pre-defined boundary.
# That is beyond the scope of the demo. Missing the boundary condition,
# this area-preserving demo might not produce accurate maps near the boundary.
# This can be visualized by drawing the Voronoi diagram or Delaunay triangulation
# and one may see slight intersection near the boundary centroids.
# ----- plot before ----- #
plt.figure(figsize=(12, 8))
plt.subplot(231)
utils.scatter_otsamples(vot.data_p_original, vot.x, title='before', )
# ------ plot map ------- #
fig232 = plt.subplot(232)
utils.plot_otmap(vot.data_p_original, vot.y, fig232, title='vot map', facecolor_after='none')
# ------ plot after ----- #
ce = np.array(plt.get_cmap('viridis')(idx / (N - 1)))
plt.subplot(233)
utils.scatter_otsamples(vot.y, vot.x, color_x=ce, title='after')
# ----------------------------------- #
# ------------ Example 2 ------------ #
# ----------------------------------- #
# ----- set up vot ------ #
data = data_backup.copy()
vot2 = VOTAP(data, sampling='circle', ratio=200, verbose=True)
# ----- map ------ #
tick = time.time()
# vot.map(plot_filename='area.gif', max_iter=300)
idx, _ = vot2.map(max_iter=3000)
tock = time.time()
print('total time: {0:.4f}'.format(tock-tick))
# ----- plot before ----- #
plt.subplot(234)
utils.scatter_otsamples(vot2.data_p_original, vot2.x, title='before')
# ------ plot map ------- #
fig235 = plt.subplot(235)
utils.plot_otmap(vot2.data_p_original, vot2.y, fig235, title='vot map', facecolor_after='none')
# ------ plot after ----- #
cx = np.array(plt.get_cmap('viridis')(idx / (N - 1)))
plt.subplot(236)
utils.scatter_otsamples(vot2.y, vot2.x, color_x=cx, title='after')
# ---- plot and save ---- #
plt.tight_layout(pad=1.0, w_pad=1.5, h_pad=0.5)
plt.savefig("area_numpy.png")
# plt.show()
| [
"vot_numpy.VOTAP",
"matplotlib.pyplot.savefig",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.tight_layout",
"utils.scatter_otsamples",
"os.path.abspath",
"time.time",
"utils.plot_otmap",
"matplotlib.pyplot.... | [((826, 843), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (840, 843), True, 'import numpy as np\n'), ((1151, 1206), 'vot_numpy.VOTAP', 'VOTAP', (['data'], {'sampling': '"""square"""', 'ratio': '(200)', 'verbose': '(True)'}), "(data, sampling='square', ratio=200, verbose=True)\n", (1156, 1206), False, 'from vot_numpy import VOTAP\n'), ((1236, 1247), 'time.time', 'time.time', ([], {}), '()\n', (1245, 1247), False, 'import time\n'), ((1337, 1348), 'time.time', 'time.time', ([], {}), '()\n', (1346, 1348), False, 'import time\n'), ((1795, 1822), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (1805, 1822), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (1834, 1839), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1907), 'utils.scatter_otsamples', 'utils.scatter_otsamples', (['vot.data_p_original', 'vot.x'], {'title': '"""before"""'}), "(vot.data_p_original, vot.x, title='before')\n", (1863, 1907), False, 'import utils\n'), ((1948, 1964), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (1959, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1965, 2062), 'utils.plot_otmap', 'utils.plot_otmap', (['vot.data_p_original', 'vot.y', 'fig232'], {'title': '"""vot map"""', 'facecolor_after': '"""none"""'}), "(vot.data_p_original, vot.y, fig232, title='vot map',\n facecolor_after='none')\n", (1981, 2062), False, 'import utils\n'), ((2142, 2158), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (2153, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2159, 2223), 'utils.scatter_otsamples', 'utils.scatter_otsamples', (['vot.y', 'vot.x'], {'color_x': 'ce', 'title': '"""after"""'}), "(vot.y, vot.x, color_x=ce, title='after')\n", (2182, 2223), False, 'import utils\n'), ((2408, 2463), 'vot_numpy.VOTAP', 'VOTAP', (['data'], {'sampling': '"""circle"""', 'ratio': '(200)', 'verbose': '(True)'}), "(data, sampling='circle', ratio=200, verbose=True)\n", (2413, 2463), False, 'from vot_numpy import VOTAP\n'), ((2493, 2504), 'time.time', 'time.time', ([], {}), '()\n', (2502, 2504), False, 'import time\n'), ((2595, 2606), 'time.time', 'time.time', ([], {}), '()\n', (2604, 2606), False, 'import time\n'), ((2683, 2699), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (2694, 2699), True, 'import matplotlib.pyplot as plt\n'), ((2700, 2769), 'utils.scatter_otsamples', 'utils.scatter_otsamples', (['vot2.data_p_original', 'vot2.x'], {'title': '"""before"""'}), "(vot2.data_p_original, vot2.x, title='before')\n", (2723, 2769), False, 'import utils\n'), ((2808, 2824), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (2819, 2824), True, 'import matplotlib.pyplot as plt\n'), ((2825, 2924), 'utils.plot_otmap', 'utils.plot_otmap', (['vot2.data_p_original', 'vot2.y', 'fig235'], {'title': '"""vot map"""', 'facecolor_after': '"""none"""'}), "(vot2.data_p_original, vot2.y, fig235, title='vot map',\n facecolor_after='none')\n", (2841, 2924), False, 'import utils\n'), ((3004, 3020), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (3015, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3087), 'utils.scatter_otsamples', 'utils.scatter_otsamples', (['vot2.y', 'vot2.x'], {'color_x': 'cx', 'title': '"""after"""'}), "(vot2.y, vot2.x, color_x=cx, title='after')\n", (3044, 3087), False, 'import utils\n'), ((3117, 3164), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(1.0)', 'w_pad': '(1.5)', 'h_pad': '(0.5)'}), '(pad=1.0, w_pad=1.5, h_pad=0.5)\n', (3133, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3194), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""area_numpy.png"""'], {}), "('area_numpy.png')\n", (3176, 3194), True, 'import matplotlib.pyplot as plt\n'), ((907, 950), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'N'], {}), '(mean, cov, N)\n', (936, 950), True, 'import numpy as np\n'), ((2102, 2125), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (2114, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2964, 2987), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (2976, 2987), True, 'import matplotlib.pyplot as plt\n'), ((754, 779), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (769, 779), False, 'import os\n')] |
"""
"""
from cddm.map import k_select
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#diffusion constant
from examples.paper.one_component.conf import D, DATA_PATH
import os.path as path
SHOW_FITS = True
colors = ["C{}".format(i) for i in range(10)]
def _g1(x,f,a):
"""g1: exponential decay"""
return a * np.exp(-f*x)
def fit_data(x, data, title = "", ax = None):
"""performs fitting and plotting of cross correlation data"""
popt = [0.01,1]
for i, (k, y) in enumerate(data):
try:
popt,pcov = curve_fit(_g1, x,y, p0 = popt)
if ax is not None:
ax.semilogx(x,y,"o",color = colors[i%len(colors)],fillstyle='none')
ax.semilogx(x,_g1(x,*popt), color = colors[i%len(colors)])
yield k, popt, pcov
except:
pass
if ax is not None:
ax.set_title(title)
def _lin(x,k):
return k*x
def fit(x,y, title = "data"):
k_data = k_select(y, angle = 0, sector = 180, kstep = 1)
if SHOW_FITS:
fig = plt.figure()
ax = fig.subplots()
else:
ax = None
results = list(fit_data(x, k_data, title = title ,ax = ax))
k_out = np.empty(shape = (len(results),),dtype = float)
p_out = np.empty(shape = (len(results),2),dtype = float)
c_out = np.empty(shape = (len(results),2,2),dtype = float)
results = np.array(results)
for i,(k,p,c) in enumerate(results):
k_out[i] = k
p_out[i,:] = p
c_out[i,:,:] = c
return k_out, p_out, c_out
fig = plt.figure()
ax = fig.subplots()
norm = 6
#for i,label in enumerate(("standard", "random", "dual")):
for i,label in enumerate(("standard","fast","dual","random")):
x = np.load(path.join(DATA_PATH, "corr_{}_t.npy".format(label)))
y = np.load(path.join(DATA_PATH, "corr_{}_data_norm{}.npy".format(label, norm)))
mask = np.isnan(y)
mask = np.logical_not(np.all(mask, axis = tuple(range(mask.ndim-1))))
x,y = x[mask], y[...,mask]
k,p,c = fit(x[1:],y[...,1:], title = label)
f = p[:,0]
ax.plot((k**2),f,"o", color = colors[i],fillstyle='none', label = "{}".format(label))
x = k**2
popt,pcov = curve_fit(_lin, x, f, sigma = c[:,0,0]**0.5)
ax.plot(x,_lin(x,*popt), "--", color = colors[i], label = "fit {}".format(label))
err = np.sqrt(np.diag(pcov))/popt
print("Measured D (norm = {}): {:.3e} (1 +- {:.4f})".format(label, popt[0], err[0]))
ax.plot(x,_lin(x,D), "k-", label = "true")
print("True D: {:.3e}".format(D))
ax.set_xlabel("$q^2$")
ax.set_ylabel(r"$1/\tau$")
ax.legend()
plt.show()
| [
"scipy.optimize.curve_fit",
"numpy.diag",
"cddm.map.k_select",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.isnan",
"matplotlib.pyplot.show"
] | [((1613, 1625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1623, 1625), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2664, 2666), True, 'import matplotlib.pyplot as plt\n'), ((1024, 1065), 'cddm.map.k_select', 'k_select', (['y'], {'angle': '(0)', 'sector': '(180)', 'kstep': '(1)'}), '(y, angle=0, sector=180, kstep=1)\n', (1032, 1065), False, 'from cddm.map import k_select\n'), ((1438, 1455), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (1446, 1455), True, 'import numpy as np\n'), ((1950, 1961), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1958, 1961), True, 'import numpy as np\n'), ((2257, 2303), 'scipy.optimize.curve_fit', 'curve_fit', (['_lin', 'x', 'f'], {'sigma': '(c[:, 0, 0] ** 0.5)'}), '(_lin, x, f, sigma=c[:, 0, 0] ** 0.5)\n', (2266, 2303), False, 'from scipy.optimize import curve_fit\n'), ((359, 373), 'numpy.exp', 'np.exp', (['(-f * x)'], {}), '(-f * x)\n', (365, 373), True, 'import numpy as np\n'), ((1104, 1116), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1114, 1116), True, 'import matplotlib.pyplot as plt\n'), ((603, 632), 'scipy.optimize.curve_fit', 'curve_fit', (['_g1', 'x', 'y'], {'p0': 'popt'}), '(_g1, x, y, p0=popt)\n', (612, 632), False, 'from scipy.optimize import curve_fit\n'), ((2406, 2419), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (2413, 2419), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
import pandas as pd
from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, \
get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers
from functions.sub_actuarial_functions import get_premiums_portfolio
def simulate_contracts(N = 30000, option_new_business = False):
'''
Simulate endowment insurance contracts.
Inputs:
--------
N: number of contracts
options_new_business: True: Simulate only new business; False: Simulate new and existing business. (Default: False)
Outputs:
--------
DataFrame with N columns and rows
'''
# Parameters
N_contracts = N
mortality_params = {'A': 0.00022, 'B': 2.7*10**(-6), 'c': 1.124, 'age_max': 125} # SUSM Dickson, Hardy, Waters
expenses_params = {'alpha': 0.025,'beta': 0.03, 'gamma': 0.005}
# alpha: percentage of sum of ALL premiums (acquisition)
# beta: percentage of annual premium amount (administrative)
# gamma: percentage of sum insured, annual fee (administrative)
# expenses chosen in line with Aleandri, Modelling Dynamic PH Behaviour through ML (p.20)
# note legal requirements, e.g. alpha \leq 0.25 or recommended 'Höchstrechnungszins' of 0.005 by aktuar.de
management_params = {'int_rate': 0.005, 'prem_loading': 0.15,# 'profit_sharing': 0.9, 'return_guaranteed': 0.01,
'age_retire': 67}
######################### Underwriting age x #################################
# Simulate underwriting age
age_max = 85
np.random.seed(0)
ages = np.random.gamma(shape = 5.5, scale = 6.8, size = N_contracts)
ages[(ages>age_max)] = age_max*np.random.uniform(low = 0, high = 1, size = sum((ages<0)|(ages>age_max)))
###################### Premium Payments (Frequency m) #######################
# Simulate Premium Frequency: 0 - lump sum (m=0), 1 - annual (m=1), 12 - monthly (m=12)
# Compare Milhaud, 'Lapse Tables [..]': supra-annual: 15.19%, annual: 23.44%, infra-annual: 61.37%
np.random.seed(1)
premium_freq = np.random.uniform(size=N_contracts)
premium_freq[premium_freq <= 0.15] = 0
premium_freq[premium_freq > 0.4] = 12 # Note: Order important (assign level 12 before level 1 (>0.4))
premium_freq[(premium_freq>0.15)&(premium_freq <= 0.4)] = 1
premium_freq = premium_freq.astype('int')
# For all contracts where PH at start of contract > 67 premiums we assume single premiums
premium_freq[premium_freq>0] = premium_freq[premium_freq>0]*(ages[premium_freq>0] + 1/(premium_freq[premium_freq>0]) < 67)
############################# Death times #####################################
death_times = get_noninteger_death_times(ages_PH= ages, mortality_params= mortality_params)
###################### Durations (of Endowments) ####################################
# Mean: 20 years
np.random.seed(3)
# assume right-skewed distr. of durations
duration = 5+(12*np.random.gamma(shape = 5, scale = 1.5, size = N_contracts)).astype('int')/12
# ## Elapsed duration
if option_new_business == False:
duration_elapsed = duration - duration*(np.random.rand(N_contracts))
# Note: Make sure that the resulting Age_init is not <0 -> condition
duration_elapsed = duration_elapsed*(duration_elapsed<ages)
else:
duration_elapsed = duration*0
################# Face Amounts (S) ######################
# Face Amount
# Choice arbitrary -> Backtest by looking and resulting premiums and compare range and variance to Milhaud's paper
np.random.seed(2)
face_amounts = 5000+ np.random.gamma(shape = 4, scale = 2000, size = N_contracts)#/10#np.random.normal(loc = 800000, scale = 200000, size = N_contracts)
# Combine Data
Portfolio = pd.DataFrame(data = {'Time': 0,
'Age': ages, 'Age_init': ages-duration_elapsed,
'Face_amount': face_amounts,
'Duration': duration,
'Duration_elapsed': duration_elapsed,
'Duration_remain': duration - duration_elapsed,
'Premium_freq': premium_freq, 'Premium': [None]*N_contracts,
'Premium_annual': [None]*N_contracts,
'Lapsed': [0]*N_contracts,
'Death': death_times})
# ## Compute Premiums (P)
Portfolio.Premium = get_premiums_portfolio(portfolio_df= Portfolio, mortality_params= mortality_params,
expenses_params= expenses_params, management_params = management_params)
###### Annualize Premiums (P_ann) #####################
# Use ceil since payments are made up to age of retirement
# meaning: an monthly (m=12) premium payment for indivual with underwriting age 66.95 is effectively a single premium
# since we assume no premium payments after the age of retirement (67)
# Similarly, monthly premium payments for ind. with underwriting age 66.8 means that there will be 3 premium payments
# Note: For contracts setup at underwriting age > 67, the premium time is indicated as negative
premium_time = pd.DataFrame(data = [(management_params['age_retire']-Portfolio.Age),
Portfolio.Duration]).apply(lambda x: np.min(x)**(np.min(x)>0), axis = 0)
Portfolio.Premium_annual = (Portfolio.Premium_freq>0)*Portfolio.Premium*np.ceil(premium_time*Portfolio.Premium_freq)/\
np.ceil(premium_time)+ (Portfolio.Premium_freq==0)*Portfolio.Premium/np.ceil(premium_time**(premium_time>0))
return Portfolio
def get_integer_death_times(ages_PH, mortality_params, seed = 42):
'''
Get death times (integer age) based on integer valued ages of policy holders
We can either work with these integer valued ages or use them as refined starting values for a non-integer solution
by Newton's method (-> Avoid problem of approx. zero-valued derivative for poor initial value)
'''
# Extract info about Number of contracts
N_individuals = ages_PH.shape[0]
death_times = np.zeros(shape = (N_individuals,))
# Simulate Death Times, i.e. potentially censoring times
np.random.seed(seed)
death_times_prob = np.random.uniform(low=0, high=1, size= N_individuals)
for i in range(N_individuals):
for t in range(mortality_params['age_max']-int(ages_PH[i])+1):
# if survival prob < simulated death prob -> death
if np.exp(-mortality_params['A']*t-mortality_params['B']/np.log(mortality_params['c'])*mortality_params['c']**int(ages_PH[i])*(mortality_params['c']**t-1)) < death_times_prob[i]:
death_times[i] = int(ages_PH[i])+t
break
return death_times
def get_noninteger_death_times(ages_PH, mortality_params, seed=42):
'''
Get death times (exact)
To avoid failure of Newton's method due to zero-derivative and poor starting values we use integer-approx. death times
as starting points
'''
# Extract info about Number of contracts
N_individuals = ages_PH.shape[0]
death_times = np.zeros(shape = (N_individuals,))
death_times_int = get_integer_death_times(ages_PH, mortality_params, seed=seed)
# Simulate Death Times, i.e. potentially censoring times.
# Note: Seed is identical to seed for death_times_prob in get_integer_death_times (!!!)
np.random.seed(seed)
death_times_prob = np.random.uniform(low=0, high=1, size= N_individuals)
for i in range(N_individuals):
death_times[i] = ages_PH[i] + sc.optimize.newton( lambda t: np.exp(-mortality_params['A']*t-mortality_params['B'] /np.log(mortality_params['c'])*mortality_params['c']**ages_PH[i]* (mortality_params['c']**t-1))-death_times_prob[i],
x0=death_times_int[i]-ages_PH[i])
return death_times
def get_surrender_coeff(df, profile =0, bool_errors = True):
'''
Combine all individual features' coefficients to obtain \beta^T*x
Parameter
----------
df: DataFrame with columns that match names in features_lst
profile: integer, representing some lapse profile
0: Fitted Logit Model in Milhaud 2011
1: Empirical effects in Data in Milhaud 2011
bool_error: Boolean, display error messages when feature no risk driver in profile
'''
coeff = np.zeros(shape=(df.shape[0],))
risk_drivers = get_risk_drivers(profile)
if bool_errors:
print('Risk drivers in profile {} are restricted to {}'.format(profile, risk_drivers))
for feature in risk_drivers:
if feature == 'Age':
coeff += get_age_coeff(df[feature], profile=profile)
elif feature == 'Duration':
coeff += get_duration_coeff(df[feature], profile=profile)
elif feature == 'Duration_elapsed':
coeff += get_duration_elapsed_coeff(dur= df[feature], profile = profile)
elif feature == 'Duration_remain':
coeff += get_duration_elapsed_coeff(df[feature], profile=profile)
elif feature == 'Premium_freq':
coeff += get_prem_freq_coeff(df[feature], profile=profile)
elif feature == 'Premium_annual':
coeff += get_prem_annual_coeff(df[feature], profile=profile)
elif feature == 'Time':
coeff += get_time_coeff(df[feature],profile=profile)
else:
print('Note that in sub_simulate_events, l 231: coefficient "', coeff, '" of surrender profile ignored!')
print('Abording computation!')
exit()
return coeff
def get_surrender_prob(df, profile_nr = 0, adjust_baseline = False, target_surrender = None, beta0 = 0,
rand_noise_var = 0, bool_errors = True):
'''
Combine coefficients for age, duration, premium frequency and annual premium amount in a logit model
To obtain a reasonable average surrender rate: adapt by introducing a baseline factor beta0
Parameter:
----------
df: DataFrame with columns that match names in surrender profiles
profile_nr: integer value indicating a predefined surrender profile for the simulation
0: Fitted Logit Model in Milhaud 2011
1: Empirical effects in Data in Milhaud 2011
2 & 3: Eling and Cerchiari
adjust baseline: Boolean whether baseline to be adjusted
target_surrender: target value for mean surrender of portfolio. Used to adjust beta0 if adjust_baseline == True
beta0: baseline surrender factor beta0 (optional user input)
rand_noise_var: variance of a centered, gaussian noise that is added to the logit-model for surrender probs
bool_error: Boolean, display error messages when feature no risk driver in profile
Outputs
-------
array of surrender activity
underlying beta0 coeff (optional, if adjust_baseline==True)
'''
odd_ratio = np.exp(get_surrender_coeff(df=df, profile =profile_nr, bool_errors=bool_errors))
if adjust_baseline:
beta0 = sc.optimize.newton(func= lambda x: target_surrender - (np.exp(x)*odd_ratio/(1+np.exp(x)*odd_ratio)).mean(), x0 = -1)
# Adjust odd_ratio by baseline factor beta0 (either user_input or determined to fit target_surrender rate)
odd_ratio = odd_ratio*np.exp(beta0)
# Add random noise
# Note standard modeling assumption: log(p/1-p) = betas*X + noise
odd_ratio = odd_ratio*np.exp(np.random.normal(loc = 0, scale = rand_noise_var,size = odd_ratio.size))
if adjust_baseline: # Also return adjusted beta0 coefficient (-> to replicate results)
return (odd_ratio/(1+odd_ratio), beta0)
else: # return purely probabilites
return odd_ratio/(1+odd_ratio)
def simulate_surrender(df, profile_nr, adjust_baseline,
target_rate = None, beta0 = 0, simulation_noise_var = 0,
bool_errors = True):
'''
Simulate a 1-year surrender in the portfolio with given surrender profile(s)
Inputs
\t df: \t \t DataFrame with columns that match names in features_lst & 'Lapsed' column
\t features_lst: \t list of features to be considered in true surrender model
\t \t Options: 'Age', 'Duration', 'Duration_elapsed', 'Premium_freq', 'Premium_annual', 'Face_amount'
\t profile_nr: \t integer value indicating a predefined surrender profile for the simulation
\t \t 0: Fitted Logit Model in Milhaud 2011
\t \t 1: Empirical effects in Data in Milhaud 2011
\t target_surrender: \t target value for mean surrender of portfolio. Used to adjust beta0 (if adjust_baseline == True)
\t adjust_baseline: \t Boolean whether baseline factor beta0 should be determined to match some target_surrender rate
\t beta0: \t baseline surrender factor beta0. Default = 0.
\t simulation_noise_var: \t variance of a centered, gaussian noise that is added to the logit-model for surrender probs
\t bool_error: Boolean, display error messages when feature no risk driver in profile
'''
N = df['Age'].size
np.random.seed(8) # set seed in simulate_surrender_time_series()
# simulated probs to compare with surrender probs later obtained by get_surrender_prob()
sim_probs = np.random.uniform(size = N)
# surrender probs
# if adjust_baseline == True: val[0]: surrender_probs, val[1]: adjusted beta0
# if adjust_baseline == False: val: surrender_probs
val = get_surrender_prob(df=df, profile_nr= profile_nr,
adjust_baseline= adjust_baseline, beta0= beta0,
target_surrender= target_rate, rand_noise_var = simulation_noise_var,
bool_errors= bool_errors)
if adjust_baseline: # also return corresponding beta0 coefficient (for later comparison)
# return 0: active (after period), 1: surrender (within period)
return ((sim_probs < val[0]).astype('int'), val[1])
else:
return (sim_probs < val).astype('int')
def simulate_surrender_time_series(df, target_rate, profile_nr, modeling_time_step = 1/12, time_period_max = 10,
option_new_business = True, rate_new_business = 0.06,
simulation_noise_var = 0):
'''
Simulate the portfolio decomposition over time, i.e. iteratively apply simulate_surrender()
Important: Fix the baseline hazard beta0 determined at initial time 0 and apply it for consecutive points in time
Parameter
---------
df: DataFrame with columns that match names in features_lst & 'Lapsed' column
target_surrender: target value for mean surrender of portfolio. Used to adjust beta0 (adjust_baseline == True default)
profile_nr: integer value indicating a predefined surrender profile for the simulation
0: Fitted Logit Model in Milhaud 2011
1: Empirical effects in Data in Milhaud 2011
2 & 3: effects from Eling 2012 - Figure 4c and Cerchiari
modeling_time_step: frequency in which we iteratively apply the modelling of surrender, i.e. 1/12=monthly, 1=annually
time_period_max: length of oberservation in years
simulation_noise_var: variance of a centered, gaussian noise that is added to the logit-model for surrender probs
'''
N_contracts = df.shape[0]
# set seed
np.random.seed(0)
TS_length = min(time_period_max, int(df['Duration'].max()/modeling_time_step) +1)
# Initialize time series
TS_portfolio = [None]*TS_length
#Initial value
TS_portfolio[0] = pd.DataFrame.copy(df)
####### Indicate lapse due to maturity (level 2) ###########
# This can be overwritten by lapse due to death or surrender
TS_portfolio[0].loc[(TS_portfolio[0]['Duration_remain']-modeling_time_step)<0, 'Lapsed'] = 2
######## Indicate lapse due to death (level 3) ###########
# Can censor simulated surrender
TS_portfolio[0].loc[(TS_portfolio[0]['Age']+modeling_time_step>TS_portfolio[0]['Death']) & (TS_portfolio[0]['Death']<TS_portfolio[0]['Age']+TS_portfolio[0]['Duration']),'Lapsed']= 3
######## Indicate surrender (level 1) ##############
surrender, beta0 = simulate_surrender(df = TS_portfolio[0], #features_lst= features_lst,
profile_nr= profile_nr, adjust_baseline = True,
target_rate= target_rate, simulation_noise_var= simulation_noise_var)
# select contracts with multiple events
conflict_death = (surrender == 1) & (TS_portfolio[0]['Lapsed'] == 3)
conflict_maturity = (surrender == 1) & (TS_portfolio[0]['Lapsed'] == 2)
if sum(conflict_death)>0 | sum(conflict_maturity)>0:
print('\t ', 'Conflicting events: ', str(sum(conflict_death)+sum(conflict_maturity)))
time_to_death = TS_portfolio[0].loc[conflict_death,'Death']-TS_portfolio[0].loc[conflict_death,'Age']
time_to_maturity = TS_portfolio[0].loc[conflict_maturity,'Duration_remain']
# conflict: surrender (1) - death (3)
np.random.seed(42)
sim_death = np.random.uniform(size=sum(conflict_death))
surrender[conflict_death] += (sim_death<time_to_death)*2
# conflict: surrender (1) - maturity (2)
np.random.seed(42)
sim_maturity = np.random.uniform(size=sum(conflict_maturity))
surrender[conflict_maturity] += (sim_maturity<time_to_maturity)*2
TS_portfolio[0].loc[surrender==1,'Lapsed'] = 1
#beta0 = surrender[1]
# iterate over time
for i in range(1,TS_length):
if sum(TS_portfolio[i-1]['Lapsed']==0)>0: # still active contracts in portfolio
# Advance time for TS, drop contracts that lapsed at the previous time step
TS_portfolio[i] = pd.DataFrame(data = TS_portfolio[i-1][TS_portfolio[i-1]['Lapsed']==0])
# Adjust Features by advance of time
TS_portfolio[i]['Age'] += modeling_time_step
TS_portfolio[i]['Time'] += modeling_time_step
TS_portfolio[i]['Duration_elapsed'] += modeling_time_step
TS_portfolio[i]['Duration_remain'] -= modeling_time_step
# add new business
if option_new_business:
N_contracts_new = int(len(TS_portfolio[i])*rate_new_business)
new_business = simulate_contracts(N=N_contracts_new, option_new_business=True)
new_business.index = range(N_contracts, N_contracts+N_contracts_new)
N_contracts += N_contracts_new
TS_portfolio[i] = TS_portfolio[i].append(new_business)
########### Indicate lapse due to maturity (level 2) ############
# Note: Don't use 0 but 10**(-10) as threshold to compensate for rounding errors of e.g. 1/12 steps
TS_portfolio[i].loc[((TS_portfolio[i]['Duration_remain']-modeling_time_step+10**(-10))<0), 'Lapsed'] = 2
############# Indicate lapse due to death (level 3) ###############
TS_portfolio[i].loc[((TS_portfolio[i]['Age']+modeling_time_step)>TS_portfolio[i]['Death'])& (TS_portfolio[i]['Death']<TS_portfolio[i]['Age']+TS_portfolio[i]['Duration_remain']), 'Lapsed'] = 3
########### Indicate surrender (level 1) #############
# Note: Keep beta0 fixed (determined at initial time 0 to match some empirical target surrender rate)
surrender = simulate_surrender(df = TS_portfolio[i], #features_lst= features_lst,
profile_nr= profile_nr,
adjust_baseline = False, target_rate= target_rate,
beta0 = beta0,
simulation_noise_var= simulation_noise_var,
bool_errors=False) # Dont display messages for feature that are no risk driver
# select contracts with multiple events
conflict_death = (surrender == 1) & (TS_portfolio[i]['Lapsed'] == 3)
conflict_maturity = (surrender == 1) & (TS_portfolio[i]['Lapsed'] == 2)
if sum(conflict_death)>0 | sum(conflict_maturity)>0:
print('\t ', 'Conflicting events: ', str(sum(conflict_death)+sum(conflict_maturity)))
time_to_death = TS_portfolio[i].loc[conflict_death,'Death']-TS_portfolio[i].loc[conflict_death,'Age']
time_to_maturity = TS_portfolio[i].loc[conflict_maturity,'Duration_remain']
# conflict: surrender (1) - death (3)
np.random.seed(42)
sim_death = np.random.uniform(size=sum(conflict_death))
surrender[conflict_death] += (sim_death<time_to_death)*2
# conflict: surrender (1) - maturity (2)
np.random.seed(42)
sim_maturity = np.random.uniform(size=sum(conflict_maturity))
surrender[conflict_maturity] += (sim_maturity<time_to_maturity)*2
TS_portfolio[i].loc[surrender==1,'Lapsed'] = 1
# index of non-surendered contracts
# Implicit assumption: surrender happens bevore maturity or death
#index_bool_lapse = (TS_portfolio[i].loc[:,'Lapsed']==1)
else:
# return TS_portfolio prematurely (due to the lack of active contracts)
return (TS_portfolio[0:i], beta0)#, df_events)
return (TS_portfolio, beta0)#), df_events
def visualize_time_series_lapse(lst_of_df, modeling_time_step = 1/12, zoom_factor = 1, fig_size = (12,6),
option_view= 'lapse_decomp', option_maturity = True,
option_annualized_rates = True, title = ""):
'''
Visualize lapse (i.e. surrender, death and maturation of contracts)
Type of lapse is recorded in lst_of_df for each time t in column 'Lapsed'
Encoding 0: active (at the end of period), 1: surrender (during period), 2: maturity, 3: death (during period)
Relate this to the respective at-risk-sets at time t
Also: Illustrate the at-risk-set as a countplot
Inputs:
\t lst_of_df: \t time series (list of DataFrames) created by def simulate_surrender_time_series()
\t zoom_factor: \t when plottling Surrender, Death and Maturity Rates zoom in to ignore large e.g. 100% maturity rate at the end
\t modeling_time_step: \t frequency in which we iteratively apply the modelling of surrender, i.e. 1/12=monthly, 1=annually
\t option: \t indicate type of 2nd plot. Either 'lapse_decomp', i.e. decomposition of lapse activity over time,
or 'abs_view', i.e. absolute numbers of lapse components
'''
N_ts = len(lst_of_df)
### Step 1: Compute Statistics
stats = pd.DataFrame(data = np.zeros(shape= (N_ts,4)), columns = ['Surrender', 'Maturity','Death', 'AtRisk'])
for i in range(N_ts):
stats.loc[i, 'AtRisk'] = lst_of_df[i].shape[0]
stats.loc[i, 'Surrender'] = sum(lst_of_df[i]['Lapsed']==1)
stats.loc[i, 'Maturity'] = sum(lst_of_df[i]['Lapsed']==2)
stats.loc[i, 'Death'] = sum(lst_of_df[i]['Lapsed']==3)
### Step 2: Plot exposure and lapse rates
# Set up x-values for plot
x_grid = np.linspace(0,(stats.shape[0]-1)*modeling_time_step,stats.shape[0])
x_cut = int(zoom_factor*len(x_grid))
# create canvas for plots
fig, ax = plt.subplots(1,2, figsize = fig_size)
ax2 = ax[0].twinx()
ax2.set_ylabel('Rate' +option_annualized_rates*' (p.a.)')#, fontsize = 'large')
ax2.tick_params(axis='y')
ax[0].set_xlabel((modeling_time_step==1)*'Year'+(modeling_time_step ==1/12)*'Month'+ (modeling_time_step ==1/4)*'Quarter')
ax[0].set_ylabel('Active Contracts')
# Plot number of active contracts
ax[0].bar(x = x_grid[0:x_cut], height = (stats['AtRisk'][0:x_cut]), color = 'grey',
alpha = .8, width = modeling_time_step)
# Plot Surrender, Death and Maturity Rates
if option_annualized_rates:
ax2.step(x = x_grid[0:x_cut],
y = (1+stats['Surrender'][0:x_cut]/stats['AtRisk'][0:x_cut])**(1/modeling_time_step)-1,
color = 'blue', label = 'Surrender')
ax2.step(x = x_grid[0:x_cut],
y = (1+stats['Death'][0:x_cut]/stats['AtRisk'][0:x_cut])**(1/modeling_time_step)-1,
color = 'orange', label = 'Death')
if option_maturity:
ax2.step(x = x_grid[0:x_cut],
y = (1+stats['Maturity'][0:x_cut]/stats['AtRisk'][0:x_cut])**(1/12)-1,
color = 'green', label = 'Maturity')
else:
ax2.step(x = x_grid[0:x_cut], y = stats['Surrender'][0:x_cut]/stats['AtRisk'][0:x_cut],
color = 'blue', label = 'Surrender')
ax2.step(x = x_grid[0:x_cut], y = stats['Death'][0:x_cut]/stats['AtRisk'][0:x_cut],
color = 'orange', label = 'Death')
if option_maturity:
ax2.step(x = x_grid[0:x_cut], y = stats['Maturity'][0:x_cut]/stats['AtRisk'][0:x_cut],
color = 'green', label = 'Maturity')
ax2.legend(loc = 'upper center')#loc = (0.2,0.8-0.1*(title != "")))
fig.suptitle(title, fontsize = 'large')
# Step 3: Plot decomposition (%-wise) of surrender over time
if option_view == 'lapse_decomp':
stats_lapses = stats['Surrender']+stats['Maturity'] +stats['Death']
# Avoid NA by deviding by 0
stats_lapses[stats_lapses==0] = 1
# initialize dataframe
stats_lapsed_decomp = pd.DataFrame(data = None, columns = ['Surrender', 'Maturity','Death', 'Active'])
# fill in new values
stats_lapsed_decomp['Surrender'] = stats['Surrender']/stats_lapses
stats_lapsed_decomp['Maturity'] = stats['Maturity']/stats_lapses
stats_lapsed_decomp['Death'] = stats['Death']/stats_lapses
stats_lapsed_decomp['Active'] = 1-(stats['Surrender']+stats['Maturity']+stats['Death'])/stats_lapses
# plot data
ax[1].fill_between(x=x_grid,
y1=stats_lapsed_decomp['Surrender']+stats_lapsed_decomp['Maturity']+stats_lapsed_decomp['Death'],
y2=stats_lapsed_decomp['Surrender']+stats_lapsed_decomp['Maturity'],
color = 'orange', label = 'Death')
ax[1].fill_between(x=x_grid, y1 = stats_lapsed_decomp['Surrender']+stats_lapsed_decomp['Maturity'],
y2=stats_lapsed_decomp['Surrender'], alpha =.6, color = 'green', label = 'Maturity')
ax[1].fill_between(x=x_grid, y1 = 0, y2=stats_lapsed_decomp['Surrender'], color = 'blue', label = 'Surrender')
# create white label for 'No Lapse', i.e. times when we obsere no lapses at all
ax[1].fill_between(x=x_grid,y1=-1, y2=-1,color = 'white', label= 'No Lapse')
#plt.fill_between(x=x_grid, y1 = 0, y2=1, color = 'blue', interpolate= True)
ax[1].set_ylim((-0.05,1.05))
ax[1].set_ylabel('Decomposition of Lapse Activity')
ax[1].set_xlabel((modeling_time_step==1)*'Year'+(modeling_time_step ==1/12)*'Month'+ (modeling_time_step ==1/4)*'Quarter')
ax[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), frameon=True, shadow = False, edgecolor = 'black')
else: # plot absolute numbers
ax[1].plot(x_grid, stats.Surrender, label = 'Surrender', color = 'blue')
ax[1].plot(x_grid, stats.Maturity, label = 'Maturity', color = 'green')
ax[1].plot(x_grid, stats.Death, label = 'Death', color = 'orange')
ax[1].legend()
ax[1].set_xlabel((modeling_time_step==1)*'Year'+(modeling_time_step ==1/12)*'Month'+ (modeling_time_step ==1/4)*'Quarter')
ax[1].set_ylabel('No. of observations')
plt.tight_layout(rect = (0,0,.95,.95))
plt.show()
return stats#, stats_lapsed_decomp
def create_summary_for_lapse_events(events_df, profile_name = ''):
'''
Print summary of events including surrender, maturity and death activity
Input
events_df: DataFrame with columns 'Surrender', 'Maturity', 'Death', 'AtRisk'
rows relate to points in time
events type dataframe is output of visualize_time_series_lapse()
'''
N_contracts = int(events_df.AtRisk[0])
print('Overview for Surrender Profile '+ profile_name)
print('---------------------------------------------------')
print(events_df.head())
print('... \t\t ... \t\t ...')
print('---------------------------------------------------')
print('\n Overall number of contracts: {} \n'.format(N_contracts))
print( '\t \t contracts lapsed due to surrender: '+str(int(events_df.Surrender.sum())) + ' (' + str(np.round(events_df.Surrender.sum()/N_contracts*100,decimals =2))+'%)')
print( '\t \t contracts lapsed due to maturity: '+str(int(events_df.Maturity.sum())) + ' (' + str(np.round(events_df.Maturity.sum()/N_contracts*100,decimals =2))+'%)')
print( '\t \t contracts lapsed due to death: '+str(int(events_df.Death.sum())) + ' (' + str(np.round(events_df.Death.sum()/N_contracts*100,decimals =2))+'%)')
print('\n Overall number of datapoints: ' + str(int(events_df.AtRisk.sum())))
print('\t\t share of surrender events: ' + str(np.round(events_df.Surrender.sum()/events_df.AtRisk.sum()*100, decimals = 2))+'%')
print('\t\t share of maturity events: ' + str(np.round(events_df.Maturity.sum()/events_df.AtRisk.sum()*100, decimals = 2))+'%')
print('\t\t share of death events: ' + str(np.round(events_df.Death.sum()/events_df.AtRisk.sum()*100, decimals = 2))+'%')
| [
"numpy.random.rand",
"functions.sub_surrender_profiles.get_time_coeff",
"numpy.log",
"functions.sub_surrender_profiles.get_prem_annual_coeff",
"functions.sub_surrender_profiles.get_age_coeff",
"functions.sub_surrender_profiles.get_risk_drivers",
"numpy.exp",
"numpy.linspace",
"numpy.random.gamma",
... | [((1729, 1746), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1743, 1746), True, 'import numpy as np\n'), ((1758, 1813), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': '(5.5)', 'scale': '(6.8)', 'size': 'N_contracts'}), '(shape=5.5, scale=6.8, size=N_contracts)\n', (1773, 1813), True, 'import numpy as np\n'), ((2213, 2230), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2227, 2230), True, 'import numpy as np\n'), ((2250, 2285), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'N_contracts'}), '(size=N_contracts)\n', (2267, 2285), True, 'import numpy as np\n'), ((3067, 3084), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (3081, 3084), True, 'import numpy as np\n'), ((3772, 3789), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3786, 3789), True, 'import numpy as np\n'), ((3983, 4374), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Time': 0, 'Age': ages, 'Age_init': ages - duration_elapsed, 'Face_amount':\n face_amounts, 'Duration': duration, 'Duration_elapsed':\n duration_elapsed, 'Duration_remain': duration - duration_elapsed,\n 'Premium_freq': premium_freq, 'Premium': [None] * N_contracts,\n 'Premium_annual': [None] * N_contracts, 'Lapsed': [0] * N_contracts,\n 'Death': death_times}"}), "(data={'Time': 0, 'Age': ages, 'Age_init': ages -\n duration_elapsed, 'Face_amount': face_amounts, 'Duration': duration,\n 'Duration_elapsed': duration_elapsed, 'Duration_remain': duration -\n duration_elapsed, 'Premium_freq': premium_freq, 'Premium': [None] *\n N_contracts, 'Premium_annual': [None] * N_contracts, 'Lapsed': [0] *\n N_contracts, 'Death': death_times})\n", (3995, 4374), True, 'import pandas as pd\n'), ((4731, 4892), 'functions.sub_actuarial_functions.get_premiums_portfolio', 'get_premiums_portfolio', ([], {'portfolio_df': 'Portfolio', 'mortality_params': 'mortality_params', 'expenses_params': 'expenses_params', 'management_params': 'management_params'}), '(portfolio_df=Portfolio, mortality_params=\n mortality_params, expenses_params=expenses_params, management_params=\n management_params)\n', (4753, 4892), False, 'from functions.sub_actuarial_functions import get_premiums_portfolio\n'), ((6459, 6491), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N_individuals,)'}), '(shape=(N_individuals,))\n', (6467, 6491), True, 'import numpy as np\n'), ((6564, 6584), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6578, 6584), True, 'import numpy as np\n'), ((6608, 6660), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': 'N_individuals'}), '(low=0, high=1, size=N_individuals)\n', (6625, 6660), True, 'import numpy as np\n'), ((7498, 7530), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N_individuals,)'}), '(shape=(N_individuals,))\n', (7506, 7530), True, 'import numpy as np\n'), ((7785, 7805), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7799, 7805), True, 'import numpy as np\n'), ((7829, 7881), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': 'N_individuals'}), '(low=0, high=1, size=N_individuals)\n', (7846, 7881), True, 'import numpy as np\n'), ((8909, 8939), 'numpy.zeros', 'np.zeros', ([], {'shape': '(df.shape[0],)'}), '(shape=(df.shape[0],))\n', (8917, 8939), True, 'import numpy as np\n'), ((8964, 8989), 'functions.sub_surrender_profiles.get_risk_drivers', 'get_risk_drivers', (['profile'], {}), '(profile)\n', (8980, 8989), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((13767, 13784), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (13781, 13784), True, 'import numpy as np\n'), ((13941, 13966), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'N'}), '(size=N)\n', (13958, 13966), True, 'import numpy as np\n'), ((16175, 16192), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (16189, 16192), True, 'import numpy as np\n'), ((16394, 16415), 'pandas.DataFrame.copy', 'pd.DataFrame.copy', (['df'], {}), '(df)\n', (16411, 16415), True, 'import pandas as pd\n'), ((24167, 24240), 'numpy.linspace', 'np.linspace', (['(0)', '((stats.shape[0] - 1) * modeling_time_step)', 'stats.shape[0]'], {}), '(0, (stats.shape[0] - 1) * modeling_time_step, stats.shape[0])\n', (24178, 24240), True, 'import numpy as np\n'), ((24325, 24361), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': 'fig_size'}), '(1, 2, figsize=fig_size)\n', (24337, 24361), True, 'import matplotlib.pyplot as plt\n'), ((28724, 28765), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '(0, 0, 0.95, 0.95)'}), '(rect=(0, 0, 0.95, 0.95))\n', (28740, 28765), True, 'import matplotlib.pyplot as plt\n'), ((28767, 28777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28775, 28777), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3869), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': '(4)', 'scale': '(2000)', 'size': 'N_contracts'}), '(shape=4, scale=2000, size=N_contracts)\n', (3830, 3869), True, 'import numpy as np\n'), ((11935, 11948), 'numpy.exp', 'np.exp', (['beta0'], {}), '(beta0)\n', (11941, 11948), True, 'import numpy as np\n'), ((17882, 17900), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (17896, 17900), True, 'import numpy as np\n'), ((18087, 18105), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (18101, 18105), True, 'import numpy as np\n'), ((26492, 26569), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'None', 'columns': "['Surrender', 'Maturity', 'Death', 'Active']"}), "(data=None, columns=['Surrender', 'Maturity', 'Death', 'Active'])\n", (26504, 26569), True, 'import pandas as pd\n'), ((5490, 5582), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[management_params['age_retire'] - Portfolio.Age, Portfolio.Duration]"}), "(data=[management_params['age_retire'] - Portfolio.Age,\n Portfolio.Duration])\n", (5502, 5582), True, 'import pandas as pd\n'), ((5829, 5850), 'numpy.ceil', 'np.ceil', (['premium_time'], {}), '(premium_time)\n', (5836, 5850), True, 'import numpy as np\n'), ((5898, 5941), 'numpy.ceil', 'np.ceil', (['(premium_time ** (premium_time > 0))'], {}), '(premium_time ** (premium_time > 0))\n', (5905, 5941), True, 'import numpy as np\n'), ((9193, 9236), 'functions.sub_surrender_profiles.get_age_coeff', 'get_age_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9206, 9236), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((12085, 12151), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'rand_noise_var', 'size': 'odd_ratio.size'}), '(loc=0, scale=rand_noise_var, size=odd_ratio.size)\n', (12101, 12151), True, 'import numpy as np\n'), ((18604, 18678), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "TS_portfolio[i - 1][TS_portfolio[i - 1]['Lapsed'] == 0]"}), "(data=TS_portfolio[i - 1][TS_portfolio[i - 1]['Lapsed'] == 0])\n", (18616, 18678), True, 'import pandas as pd\n'), ((23702, 23727), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N_ts, 4)'}), '(shape=(N_ts, 4))\n', (23710, 23727), True, 'import numpy as np\n'), ((3343, 3370), 'numpy.random.rand', 'np.random.rand', (['N_contracts'], {}), '(N_contracts)\n', (3357, 3370), True, 'import numpy as np\n'), ((5637, 5646), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (5643, 5646), True, 'import numpy as np\n'), ((5750, 5796), 'numpy.ceil', 'np.ceil', (['(premium_time * Portfolio.Premium_freq)'], {}), '(premium_time * Portfolio.Premium_freq)\n', (5757, 5796), True, 'import numpy as np\n'), ((9294, 9342), 'functions.sub_surrender_profiles.get_duration_coeff', 'get_duration_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9312, 9342), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((21429, 21447), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (21443, 21447), True, 'import numpy as np\n'), ((21666, 21684), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (21680, 21684), True, 'import numpy as np\n'), ((5649, 5658), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (5655, 5658), True, 'import numpy as np\n'), ((9408, 9468), 'functions.sub_surrender_profiles.get_duration_elapsed_coeff', 'get_duration_elapsed_coeff', ([], {'dur': 'df[feature]', 'profile': 'profile'}), '(dur=df[feature], profile=profile)\n', (9434, 9468), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((3152, 3205), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': '(5)', 'scale': '(1.5)', 'size': 'N_contracts'}), '(shape=5, scale=1.5, size=N_contracts)\n', (3167, 3205), True, 'import numpy as np\n'), ((9536, 9592), 'functions.sub_surrender_profiles.get_duration_elapsed_coeff', 'get_duration_elapsed_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9562, 9592), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((9654, 9703), 'functions.sub_surrender_profiles.get_prem_freq_coeff', 'get_prem_freq_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9673, 9703), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((9767, 9818), 'functions.sub_surrender_profiles.get_prem_annual_coeff', 'get_prem_annual_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9788, 9818), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((6905, 6934), 'numpy.log', 'np.log', (["mortality_params['c']"], {}), "(mortality_params['c'])\n", (6911, 6934), True, 'import numpy as np\n'), ((9872, 9916), 'functions.sub_surrender_profiles.get_time_coeff', 'get_time_coeff', (['df[feature]'], {'profile': 'profile'}), '(df[feature], profile=profile)\n', (9886, 9916), False, 'from functions.sub_surrender_profiles import get_risk_drivers, get_age_coeff, get_duration_coeff, get_duration_elapsed_coeff, get_duration_remain_coeff, get_prem_freq_coeff, get_prem_annual_coeff, get_time_coeff, get_risk_drivers\n'), ((11723, 11732), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (11729, 11732), True, 'import numpy as np\n'), ((8096, 8125), 'numpy.log', 'np.log', (["mortality_params['c']"], {}), "(mortality_params['c'])\n", (8102, 8125), True, 'import numpy as np\n'), ((11746, 11755), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (11752, 11755), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
class TFPositionalEncoding2D(tf.keras.layers.Layer):
def __init__(self, channels:int, return_format:str="pos", dtype=tf.float32):
"""
Args:
channels int: The last dimension of the tensor you want to apply pos emb to.
Keyword Args:
return_format str: Return either the position encoding "pos" or the sum
of the inputs with the position encoding "sum". Default is "pos".
dtype: output type of the encodings. Default is "tf.float32".
"""
super(TFPositionalEncoding2D, self).__init__()
if return_format not in ["pos", "sum"]:
raise ValueError(f'"{return_format}" is an unkown return format. Value must be "pos" or "sum')
self.return_format = return_format
self.channels = int(2 * np.ceil(channels/4))
self.inv_freq = np.float32(1 / np.power(10000, np.arange(0, self.channels, 2) / np.float32(self.channels)))
@tf.function
def call(self, inputs):
"""
:param tensor: A 4d tensor of size (batch_size, x, y, ch)
:return: Positional Encoding Matrix of size (batch_size, x, y, ch)
"""
if len(inputs.shape)!=4:
raise RuntimeError("The input tensor has to be 4d!")
_, x, y, org_channels = inputs.shape
dtype = self.inv_freq.dtype
pos_x = tf.range(x, dtype=dtype)
pos_y = tf.range(y, dtype=dtype)
sin_inp_x = tf.einsum("i,j->ij", pos_x, self.inv_freq)
sin_inp_y = tf.einsum("i,j->ij", pos_y, self.inv_freq)
emb_x = tf.expand_dims(tf.concat((tf.sin(sin_inp_x), tf.cos(sin_inp_x)), -1),1)
emb_y = tf.expand_dims(tf.concat((tf.sin(sin_inp_y), tf.cos(sin_inp_y)), -1),0)
emb_x = tf.tile(emb_x, (1,y,1))
emb_y = tf.tile(emb_y, (x,1,1))
emb = tf.concat((emb_x, emb_y),-1)
pos_enc = tf.repeat(emb[None, :, :, :org_channels], tf.shape(inputs)[0], axis=0)
if self.return_format == "pos":
return pos_enc
elif self.return_format == "sum":
return inputs + pos_enc | [
"tensorflow.tile",
"numpy.ceil",
"tensorflow.shape",
"numpy.float32",
"tensorflow.range",
"tensorflow.einsum",
"tensorflow.concat",
"tensorflow.sin",
"tensorflow.cos",
"numpy.arange"
] | [((1436, 1460), 'tensorflow.range', 'tf.range', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (1444, 1460), True, 'import tensorflow as tf\n'), ((1477, 1501), 'tensorflow.range', 'tf.range', (['y'], {'dtype': 'dtype'}), '(y, dtype=dtype)\n', (1485, 1501), True, 'import tensorflow as tf\n'), ((1531, 1573), 'tensorflow.einsum', 'tf.einsum', (['"""i,j->ij"""', 'pos_x', 'self.inv_freq'], {}), "('i,j->ij', pos_x, self.inv_freq)\n", (1540, 1573), True, 'import tensorflow as tf\n'), ((1594, 1636), 'tensorflow.einsum', 'tf.einsum', (['"""i,j->ij"""', 'pos_y', 'self.inv_freq'], {}), "('i,j->ij', pos_y, self.inv_freq)\n", (1603, 1636), True, 'import tensorflow as tf\n'), ((1838, 1863), 'tensorflow.tile', 'tf.tile', (['emb_x', '(1, y, 1)'], {}), '(emb_x, (1, y, 1))\n', (1845, 1863), True, 'import tensorflow as tf\n'), ((1878, 1903), 'tensorflow.tile', 'tf.tile', (['emb_y', '(x, 1, 1)'], {}), '(emb_y, (x, 1, 1))\n', (1885, 1903), True, 'import tensorflow as tf\n'), ((1916, 1945), 'tensorflow.concat', 'tf.concat', (['(emb_x, emb_y)', '(-1)'], {}), '((emb_x, emb_y), -1)\n', (1925, 1945), True, 'import tensorflow as tf\n'), ((867, 888), 'numpy.ceil', 'np.ceil', (['(channels / 4)'], {}), '(channels / 4)\n', (874, 888), True, 'import numpy as np\n'), ((2005, 2021), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (2013, 2021), True, 'import tensorflow as tf\n'), ((1688, 1705), 'tensorflow.sin', 'tf.sin', (['sin_inp_x'], {}), '(sin_inp_x)\n', (1694, 1705), True, 'import tensorflow as tf\n'), ((1707, 1724), 'tensorflow.cos', 'tf.cos', (['sin_inp_x'], {}), '(sin_inp_x)\n', (1713, 1724), True, 'import tensorflow as tf\n'), ((1776, 1793), 'tensorflow.sin', 'tf.sin', (['sin_inp_y'], {}), '(sin_inp_y)\n', (1782, 1793), True, 'import tensorflow as tf\n'), ((1795, 1812), 'tensorflow.cos', 'tf.cos', (['sin_inp_y'], {}), '(sin_inp_y)\n', (1801, 1812), True, 'import tensorflow as tf\n'), ((943, 973), 'numpy.arange', 'np.arange', (['(0)', 'self.channels', '(2)'], {}), '(0, self.channels, 2)\n', (952, 973), True, 'import numpy as np\n'), ((976, 1001), 'numpy.float32', 'np.float32', (['self.channels'], {}), '(self.channels)\n', (986, 1001), True, 'import numpy as np\n')] |
import numpy as np
import argparse
from MultinomialNBClassifier import multinomial_nb_classifier
from NBClassifier import nb_classifier
from LogisticRegressionClassifer import lr_classifer
from SGDClassifier import sgd_classifier
from Parser import get_vocabulary, bag_of_words, bernoulli
def main():
parser = argparse.ArgumentParser(description="Instructions:")
parser.add_argument(
"-nb", dest="nb", help="Discrete Naive Bayes Classifier", action="store_true"
)
parser.add_argument(
"-mnb",
dest="mnb",
help="Multinomial Naive Bayes Classifier",
action="store_true",
)
parser.add_argument(
"-lr", dest="lr", help="Logistic Regression Classifier", action="store_true"
)
parser.add_argument(
"-sgd",
dest="sgd",
help="Stochastic Gradient Descent Classifier",
action="store_true",
)
parser.add_argument(
"-train", dest="train_data_path", help="train_data_path", required=True
)
parser.add_argument(
"-test", dest="test_data_path", help="test_data_path", required=True
)
parse(parser.parse_args())
def print_result(arr):
accuracy, precision, recall, f1 = arr
print(f"{accuracy=}, {precision=}, {recall=}, {f1=}")
def parse(args):
vocabulary = get_vocabulary(args.train_data_path)
bow_train_data, bow_train_classes = bag_of_words(args.train_data_path, vocabulary)
bow_test_data, bow_test_classes = bag_of_words(args.test_data_path, vocabulary)
bnl_train_data, bnl_train_classes = bernoulli(args.train_data_path, vocabulary)
bnl_test_data, bnl_test_classes = bernoulli(args.test_data_path, vocabulary)
if args.nb:
nb = nb_classifier()
nb.train(bow_train_data, bow_train_classes)
print("Discrete Naive Bayes Classifier:")
print_result(nb.test(bow_test_data, bow_test_classes))
if args.mnb:
mnb = multinomial_nb_classifier()
mnb.train(bnl_train_data, bnl_train_classes)
print("Multinomial Naive Bayes Classifier:")
print_result(mnb.test(bnl_test_data, bnl_test_classes))
if args.lr:
np.warnings.filterwarnings("ignore", "overflow")
lr = lr_classifer()
print("Logistic Regression Classifier:")
print("bag_of_words:")
print("lambda:", lr.train(bow_train_data, bow_train_classes))
print_result(lr.test(bow_test_data, bow_test_classes))
print("bernoulli:")
print("lambda:", lr.train(bnl_train_data, bnl_train_classes))
print_result(lr.test(bnl_test_data, bnl_test_classes))
if args.sgd:
sgd = sgd_classifier()
print("Stochastic Gradient Descent Classifier:")
print("bag_of_words:")
print(sgd.train(bow_train_data, bow_train_classes))
print_result(sgd.test(bow_test_data, bow_test_classes))
print("bernoulli:")
print(sgd.train(bnl_train_data, bnl_train_classes))
print_result(sgd.test(bnl_test_data, bnl_test_classes))
if __name__ == "__main__":
main()
| [
"Parser.bag_of_words",
"NBClassifier.nb_classifier",
"Parser.bernoulli",
"argparse.ArgumentParser",
"MultinomialNBClassifier.multinomial_nb_classifier",
"numpy.warnings.filterwarnings",
"Parser.get_vocabulary",
"LogisticRegressionClassifer.lr_classifer",
"SGDClassifier.sgd_classifier"
] | [((316, 368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Instructions:"""'}), "(description='Instructions:')\n", (339, 368), False, 'import argparse\n'), ((1311, 1347), 'Parser.get_vocabulary', 'get_vocabulary', (['args.train_data_path'], {}), '(args.train_data_path)\n', (1325, 1347), False, 'from Parser import get_vocabulary, bag_of_words, bernoulli\n'), ((1388, 1434), 'Parser.bag_of_words', 'bag_of_words', (['args.train_data_path', 'vocabulary'], {}), '(args.train_data_path, vocabulary)\n', (1400, 1434), False, 'from Parser import get_vocabulary, bag_of_words, bernoulli\n'), ((1473, 1518), 'Parser.bag_of_words', 'bag_of_words', (['args.test_data_path', 'vocabulary'], {}), '(args.test_data_path, vocabulary)\n', (1485, 1518), False, 'from Parser import get_vocabulary, bag_of_words, bernoulli\n'), ((1559, 1602), 'Parser.bernoulli', 'bernoulli', (['args.train_data_path', 'vocabulary'], {}), '(args.train_data_path, vocabulary)\n', (1568, 1602), False, 'from Parser import get_vocabulary, bag_of_words, bernoulli\n'), ((1641, 1683), 'Parser.bernoulli', 'bernoulli', (['args.test_data_path', 'vocabulary'], {}), '(args.test_data_path, vocabulary)\n', (1650, 1683), False, 'from Parser import get_vocabulary, bag_of_words, bernoulli\n'), ((1714, 1729), 'NBClassifier.nb_classifier', 'nb_classifier', ([], {}), '()\n', (1727, 1729), False, 'from NBClassifier import nb_classifier\n'), ((1926, 1953), 'MultinomialNBClassifier.multinomial_nb_classifier', 'multinomial_nb_classifier', ([], {}), '()\n', (1951, 1953), False, 'from MultinomialNBClassifier import multinomial_nb_classifier\n'), ((2148, 2196), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""', '"""overflow"""'], {}), "('ignore', 'overflow')\n", (2174, 2196), True, 'import numpy as np\n'), ((2210, 2224), 'LogisticRegressionClassifer.lr_classifer', 'lr_classifer', ([], {}), '()\n', (2222, 2224), False, 'from LogisticRegressionClassifer import lr_classifer\n'), ((2630, 2646), 'SGDClassifier.sgd_classifier', 'sgd_classifier', ([], {}), '()\n', (2644, 2646), False, 'from SGDClassifier import sgd_classifier\n')] |
# -*- coding: utf-8 -*-
################################################
###### Copyright (c) 2016, <NAME>
###
import numpy as np
import itertools
import time
from .categoryaction import CatObject
class MultQ(object):
def __init__(self,x):
"""Initializes an element of the multiplicative quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return MultQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return MultQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the multiplicative quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The product self * rhs.
In the case of the multiplicative quantale, it is the ordinary
product of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(self.x*rhs.x)
def __add__(self,rhs):
"""Compute the supremum in the multiplicative quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The supremum self v rhs.
In the case of the multiplicative quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the multiplicative quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the multiplicative
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "MultQ({})".format(self.x)
class IntvQ(object):
def __init__(self,x):
"""Initializes an element of the interval quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return IntvQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return IntvQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the interval quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The product self * rhs.
In the case of the interval quantale, it is the min of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(min([self.x,rhs.x]))
def __add__(self,rhs):
"""Compute the supremum in the interval quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The supremum self v rhs.
In the case of the interval quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the interval quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the interval
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "IntvQ({})".format(self.x)
class Lin3Q(IntvQ):
def __init__(self,x):
"""Initializes an element of the linear order quantale with 3 elements.
It is a sub-quantale of the interval quantale with values 0, 1/2, and 1.
Parameters
----------
x: a float value between being either 0, 1/2, or 1.
Returns
-------
None
Raise an exception if the float value is not one of the above-mentionned
values.
"""
if not (x==0 or x==0.5 or x==1):
raise Exception("The possibles values are 0, 1/2, and 1")
super().__init__(x)
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the linear order quantale for the monoid operation.
"""
return Lin3Q(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return Lin3Q(0.0)
def __str__(self):
return str(self.x)
def __repr__(self):
return "Lin3Q({})".format(self.x)
########################################################
class QMorphism(object):
def __init__(self,name,source,target,qtype=None,mapping=None):
"""Initializes a quantaloid morphism between two sets.
Parameters
----------
name: a string representing the name of the morphism
source: an instance of CatObject representing the domain of the morphism
target: an instance of CatObject representing the codomain of
the morphism
qtype: class of quantale for the morphism
mapping: optional argument representing the mapping of elements
between the domain and the codomain. The mapping can be
given as a NumPy array matrix or as a dictionary.
Returns
-------
None
Raises an exception if
- the source is not an instance of a CatObject
- the target is not an instance of a CatObject
- the type (class) of quantale is not specified
"""
if not isinstance(source,CatObject):
raise Exception("Source is not a valid CatObject class\n")
if not isinstance(target,CatObject):
raise Exception("Target is not a valid CatObject class\n")
if qtype is None:
raise Exception("Type of quantale should be specified")
self.name = name
self.source = source
self.target = target
self.qtype = qtype
if mapping is not None:
if isinstance(mapping,np.ndarray)==False:
self.set_mapping(mapping)
else:
self.set_mapping_matrix(mapping)
def set_name(self,name):
"""Sets the name of the morphism
Parameters
----------
name: a string representing the new name of the morphism
Returns
-------
None
"""
if not len(name):
raise Exception("The specified morphism name is empty")
self.name = name
def set_to_identity(self):
"""Sets the morphism to be an identity morphism. The domain and codomain
must be identical.
Parameters
----------
None
Returns
-------
None
"""
if not (self.source==self.target):
raise Exception("Source and target should be identical")
card_source = self.source.get_cardinality()
M = np.empty((card_source,card_source),dtype=self.qtype)
for i in range(card_source):
for j in range(card_source):
if i==j:
M[i,j] = self.qtype.Unit()
else:
M[i,j] = self.qtype.Zero()
self.matrix = M
def set_mapping(self,mapping):
"""Sets the mapping of elements between the domain and the codomain
Parameters
----------
mapping: a dictionary, with:
- keys: the element names in the domain of the morphism
- values: a list of pairs of element names in the codomain of
the morphism and a number in the specified quantale.
The mapping can be one-on-many as we are working in the category Rel(Q)
of finite sets and quantale-valued relations.
Returns
-------
None
"""
card_source = self.source.get_cardinality()
card_target = self.target.get_cardinality()
self.matrix = np.empty((card_target,card_source),dtype=self.qtype)
for i in range(card_source):
for j in range(card_target):
self.matrix[j,i] = self.qtype.Zero()
for elem,images in sorted(mapping.items()):
id_elem = self.source.get_idx_by_name(elem)
for image,value in images:
id_image = self.target.get_idx_by_name(image)
self.matrix[id_image,id_elem] = self.qtype(value)
def set_mapping_matrix(self,matrix):
"""Sets the mapping of elements between the domain and the codomain
Parameters
----------
matrix: a quantale-valued matrix (m,n), where m is the cardinality of the codomain
and n the cardinality of the domain, indicating the image of the elements.
Returns
-------
None
"""
self.matrix = matrix
def get_mapping(self):
"""Retrieves the mapping in the form of a dictionary
Parameters
----------
None
Returns
-------
A dictionary, with:
- keys: the element names in the domain of the morphism
- values: a list of pairs of element names in the
codomain of the morphism with the value in the
quantale
"""
dest_cardinality,source_cardinality = self.matrix.shape
d={}
for i in range(source_cardinality):
l=[]
for j in range(dest_cardinality):
v = self.matrix[j,i]
l.append((self.target.get_name_by_idx(j),v.x))
d[self.source.get_name_by_idx(i)]=l
return d
def get_mapping_matrix(self):
"""Retrieves the mapping in matrix form
Parameters
----------
None
Returns
-------
A boolean matrix representing the morphism in Rel(Q)
"""
return self.matrix
def copy(self):
"""Copy the current morphism
Parameters
----------
None
Returns
-------
A new instance of QMorphism with the same domain, codomain, and mapping
"""
U = QMorphism(self.name,self.source,self.target,qtype=self.qtype)
U.set_mapping_matrix(self.get_mapping_matrix())
return U
def _is_lefttotal(self):
"""Checks if the morphism is left total
Parameters
----------
None
Returns
-------
True if the morphism is left total, False otherwise.
"""
return np.all(np.sum(self.matrix,axis=0)>self.qtype.Zero())
def __str__(self):
"""Returns a verbose description of the morphism
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A description of the morphism via its source, target, and mapping.
"""
descr = self.name+":"+self.source.name+"->"+self.target.name+"\n\n"
for s,t in sorted(self.get_mapping().items()):
descr += " "*(len(self.name)+1)
descr += s+"->"+(",".join([(x[0],str(x[1])) for x in t]))+"\n"
return descr
def __call__(self,elem):
"""Apply the current morphism to an element of its domain
Parameters
----------
elem : string representing an element of self.source
Returns
-------
List of pairs of elements and quantale values mapped by the given
QMorphism.
"""
idx_elem = self.source.get_idx_by_name(elem)
return [(self.target.get_name_by_idx(j),v.x) for j,v in enumerate(self.matrix[:,idx_elem]) if v!=self.qtype.Zero()]
def __pow__(self,int_power):
"""Raise the morphism to the power int_power
Overloads the '**' operator of Python
Parameters
----------
int_power : an integer
Returns
-------
The power self^int_power. Raises an exception if the morphism is not an
endomorphism.
"""
if not self.target==self.source:
raise Exception("Morphism should be an endomorphism")
U = self.copy()
U.set_to_identity()
for i in range(int_power):
U = self*U
U.set_name(self.name+"^"+str(int_power))
return U
def __mul__(self,morphism):
"""Compose two morphisms
Overloads the '*' operator of Python
Parameters
----------
morphism : an instance of CatMorphism
Returns
-------
The product self * morphism.
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types.
Returns None if the two morphisms are not composable.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid QMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if not morphism.target==self.source:
return None
new_morphism = QMorphism(self.name+morphism.name,morphism.source,self.target,qtype=self.qtype)
new_morphism.set_mapping_matrix((self.matrix.dot(morphism.matrix)))
return new_morphism
def __eq__(self,morphism):
"""Checks if the given morphism is equal to 'morphism'
Overloads the '==' operator of Python
Parameters
----------
morphism : an instance of QMorphism
Returns
-------
True if 'self' is equal to 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid QMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if self is None or morphism is None:
return False
return (self.source == morphism.source) and \
(self.target == morphism.target) and \
(np.array_equal(self.matrix,morphism.matrix))
def __le__(self, morphism):
"""Checks if the given morphism is included in 'morphism', i.e. if there
is a 2-morphism in Rel from 'self' to 'morphism'.
Overloads the '<=' operator of Python
Parameters
----------
morphism : an instance of QMorphism
Returns
-------
True if 'self' is included in 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types, or if the domain and codomain
differ.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid CatMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if self is None or morphism is None:
return False
if not (self.source == morphism.source) and (self.target == morphism.target):
raise Exception("Morphisms should have the same domain and codomain")
return np.all(self.matrix<=morphism.matrix)
def __lt__(self, morphism):
"""Checks if the given morphism is strictly included in 'morphism', i.e. if there
is a non-identity 2-morphism in Rel from 'self' to 'morphism'.
Overloads the '<' operator of Python
Parameters
----------
morphism : an instance of CatMorphism
Returns
-------
True if 'self' is strictly included in 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types, or if the domain and codomain
differ.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid CatMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if not (self.source == morphism.source) and (self.target == morphism.target):
raise Exception("Morphisms should have the same domain and codomain")
if self is None or morphism is None:
return False
return np.all(self.matrix<morphism.matrix)
########################################"""
class CategoryQAction(object):
def __init__(self,qtype=None,objects=None,generators=None,generate=True):
"""Instantiates a CategoryQAction class with morphisms in a given
quantale
Parameters
----------
objects: optional list of CatObject instances representing
the objects in the category.
generators: optional list of QMorphism instances
representing the generators of the category.
generator: optional boolean indicating whether the category
should be generated upon instantiation.
Returns
-------
None
Raises an exception if the quantale type (class) is not specified.
"""
if qtype is None:
raise Exception("Type of quantale should be specified")
self.qtype=qtype
self.objects={}
self.generators={}
self.morphisms={}
self.equivalences=[]
if objects is not None:
self.set_objects(objects)
if generators is not None:
self.set_generators(generators)
if generate==True:
self.generate_category()
def set_objects(self,list_objects):
"""Sets the objects constituting the category action. This erases
all previous objects, morphisms, and generators.
Parameters
----------
list_objects: a list of CatObject classes representing the objects in
the category.
Returns
-------
None. Checks if all objects have distinct names, raises an Exception
otherwise.
"""
self.objects={}
self.generators={}
self.morphisms={}
self.equivalences=[]
ob_names = [catobject.name for catobject in list_objects]
if not len(ob_names)==len(np.unique(ob_names)):
raise Exception("Objects should have distinct names")
for catobject in list_objects:
self.objects[catobject.name] = catobject
def get_objects(self):
"""Returns the objects in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the object
- y is the corresponding instance of CatObject
"""
return list(sorted(self.objects.items()))
def get_morphisms(self):
"""Returns the morphisms in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the morphism
- y is the corresponding instance of QMorphism
"""
return list(sorted(self.morphisms.items()))
def get_generators(self):
"""Returns the generators in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the generator
- y is the corresponding instance of QMorphism
"""
return list(sorted(self.generators.items()))
def set_generators(self,list_morphisms):
"""Set generators to the category action. This erases
all previous morphisms and generators.
Parameters
----------
list_morphisms: a list of QMorphism instances representing the
generator morphisms to be added.
Returns
-------
None.
Checks if sources and targets of generators are objects present
in the category, raises an Exception otherwise
Checks if all generators have distinct names, raises an Exception
otherwise.
"""
self.generators={}
self.morphisms={}
self.equivalences=[]
all_gennames = [m.name for m in list_morphisms]
if not len(all_gennames)==len(np.unique(all_gennames)):
raise Exception("Generators must have distinct names")
cat_obj_names = [x[0] for x in self.get_objects()]
for m in list_morphisms:
if not isinstance(m,QMorphism):
raise Exception("Generator is not a valid QMorphism class\n")
if not m.source.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if not m.target.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
self.generators[m.name] = m
def _add_morphisms(self,list_morphisms):
"""Add morphisms to the category action.
Parameters
----------
list_morphisms: a list of QMorphism instances representing the
morphisms to be added.
Returns
-------
None
Checks if sources and targets of generators are objects present
in the category, raises an Exception otherwise.
Checks if the morphisms have a distinct name, raises an Exception
otherwise.
"""
cat_obj_names = [x[0] for x in self.get_objects()]
cat_mor_names = [x[0] for x in self.get_morphisms()]
for m in list_morphisms:
if not m.source.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if not m.target.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if m.name in cat_mor_names:
raise Exception("Morphisms should have distinct names")
self.morphisms[m.name] = m
def _add_identities(self):
"""Automatically add identity morphisms on each object of the category
action
Parameters
----------
None
Returns
-------
None
"""
for name,catobject in sorted(self.objects.items()):
identity_morphism = QMorphism("id_"+name,catobject,catobject,qtype=self.qtype)
identity_morphism.set_to_identity()
self._add_morphisms([identity_morphism])
def generate_category(self):
"""Generates all morphisms in the category based on the given list of
generators. The generation proceeds by successive multiplication of
generators and morphisms until completion. This is suited to small
category action, but the performance would be prohibitive for very
large categories containing many morphisms.
Parameters
----------
None
Returns
-------
None
"""
self.morphisms = self.generators.copy()
self._add_identities()
new_liste = self.generators.copy()
added_liste = self.generators.copy()
while(len(added_liste)>0):
added_liste = {}
for name_x,morphism_x in sorted(new_liste.items()):
for name_g,morphism_g in self.get_generators():
new_morphism = morphism_g*morphism_x
if not new_morphism is None:
c=0
for name_y,morphism_y in self.get_morphisms():
if new_morphism==morphism_y:
c=1
self.equivalences.append([new_morphism.name,morphism_y.name])
if c==0:
added_liste[new_morphism.name] = new_morphism
self.morphisms[new_morphism.name] = new_morphism
new_liste = added_liste
def mult(self,name_g,name_f):
"""Multiplies two morphisms and returns the corresponding morphism.
Parameters
----------
name_g, name_f: a string representing the names of the morphisms
to be multiplied.
Returns
-------
A string representing the name of the morphism corresponding
to name_g*name_f.
"""
new_morphism = self.morphisms[name_g]*self.morphisms[name_f]
if new_morphism is None:
return new_morphism
else:
return [name_x for name_x,x in self.get_morphisms() if x==new_morphism][0]
def apply_operation(self,name_f,element):
"""Applies a morphism to a given element.
Parameters
----------
name_f: a string representing the name of the morphisms to be applied.
elem: a string representing the name of the element.
Returns
-------
A list of pairs representing the images of elem by name_f and their
quantale values.
"""
return self.morphisms[name_f](element)
def get_operation(self,element_1,element_2):
"""Returns the operations taking the element element_1 to the element
element_2.
Parameters
----------
element_1,element_2 : strings representing the name of the elements.
Returns
-------
A list of strings representing the morphisms f such that element_2 is
an image of element_1 by f.
"""
res = []
for name_f,f in self.get_morphisms():
try:
if element_2 in [x[0] for x in f(element_1)]:
res.append(name_f)
except:
pass
return res
def rename_operation(self,name_f,new_name):
"""Renames a morphism in the category
Parameters
----------
name_f: a string representing the name of the morphism to be renamed.
new_name: a string representing the new name of the morphism.
Returns
-------
None
"""
if not name_f in self.morphisms:
raise Exception("The specified operation cannot be found")
new_op = self.morphisms[name_f].copy()
new_op.set_name(new_name)
del self.morphisms[name_f]
self.morphisms[new_name] = new_op
def rewrite_operations(self):
"""Rewrites morphism names in the category action by trying to reduce
repeated substrings.
Parameters
----------
None
Returns
-------
None
"""
operation_names = sorted(self.morphisms.keys())
for op_name in operation_names:
self.rename_operation(op_name,self._rewrite(op_name))
equivalences_new=[]
for x,y in self.equivalences:
equivalences_new.append([self._rewrite(x),self._rewrite(y)])
self.equivalences = equivalences_new
def _rewrite(self,the_string):
"""Rewrites a string by trying to reduce repeated patterns of the
category action generator names.
Parameters
----------
None
Returns
-------
None
"""
if "id" in the_string:
return the_string
generator_names = sorted(self.generators.keys())
count_list=[["",0]]
while(len(the_string)):
flag=0
for name_g in generator_names:
if the_string[:len(name_g)]==name_g:
flag=1
if count_list[-1][0]==name_g:
count_list[-1][1]+=1
else:
count_list.append([name_g,1])
the_string=the_string[len(name_g):]
if not flag:
raise Exception("Operation name cannot be rewritten")
new_string=""
for name,count in count_list:
if count>1:
new_string+="("+name+"^"+str(count)+")"
else:
new_string+=name
return new_string
def get_description(self,name_f):
"""Gets a string description of a given morphism.
Parameters
----------
name_f: a string representing the name of the morphism
Returns
-------
A string representing the corresponding morphism
"""
return str(self.morphisms[name_f])
| [
"numpy.unique",
"numpy.sum",
"numpy.array_equal",
"numpy.empty",
"numpy.all"
] | [((11851, 11905), 'numpy.empty', 'np.empty', (['(card_source, card_source)'], {'dtype': 'self.qtype'}), '((card_source, card_source), dtype=self.qtype)\n', (11859, 11905), True, 'import numpy as np\n'), ((12884, 12938), 'numpy.empty', 'np.empty', (['(card_target, card_source)'], {'dtype': 'self.qtype'}), '((card_target, card_source), dtype=self.qtype)\n', (12892, 12938), True, 'import numpy as np\n'), ((20162, 20200), 'numpy.all', 'np.all', (['(self.matrix <= morphism.matrix)'], {}), '(self.matrix <= morphism.matrix)\n', (20168, 20200), True, 'import numpy as np\n'), ((21277, 21314), 'numpy.all', 'np.all', (['(self.matrix < morphism.matrix)'], {}), '(self.matrix < morphism.matrix)\n', (21283, 21314), True, 'import numpy as np\n'), ((19071, 19115), 'numpy.array_equal', 'np.array_equal', (['self.matrix', 'morphism.matrix'], {}), '(self.matrix, morphism.matrix)\n', (19085, 19115), True, 'import numpy as np\n'), ((15481, 15508), 'numpy.sum', 'np.sum', (['self.matrix'], {'axis': '(0)'}), '(self.matrix, axis=0)\n', (15487, 15508), True, 'import numpy as np\n'), ((23201, 23220), 'numpy.unique', 'np.unique', (['ob_names'], {}), '(ob_names)\n', (23210, 23220), True, 'import numpy as np\n'), ((25287, 25310), 'numpy.unique', 'np.unique', (['all_gennames'], {}), '(all_gennames)\n', (25296, 25310), True, 'import numpy as np\n')] |
import numpy as np
def euclidean_dist(x, y):
"""
Returns matrix of pairwise, squared Euclidean distances
"""
norms_1 = (x ** 2).sum(axis=1)
norms_2 = (y ** 2).sum(axis=1)
y = np.transpose(y)
prod = np.dot(x, y)
prod = 2*prod
norms_1 = norms_1.reshape(-1,1)
sum = norms_1 + norms_2
sum = sum - prod
abs_mat = np.abs(sum)
return abs_mat
def prbf_kernel(x,y):
"""
Combination of Polynomial + RBF Kernel
"""
gamma = 0.05
dists_sq = euclidean_dist(x, y)
z = (10+np.exp(-gamma * dists_sq))
return z
| [
"numpy.exp",
"numpy.abs",
"numpy.dot",
"numpy.transpose"
] | [((200, 215), 'numpy.transpose', 'np.transpose', (['y'], {}), '(y)\n', (212, 215), True, 'import numpy as np\n'), ((227, 239), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (233, 239), True, 'import numpy as np\n'), ((358, 369), 'numpy.abs', 'np.abs', (['sum'], {}), '(sum)\n', (364, 369), True, 'import numpy as np\n'), ((536, 561), 'numpy.exp', 'np.exp', (['(-gamma * dists_sq)'], {}), '(-gamma * dists_sq)\n', (542, 561), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy
MOD = 2 ** 16
class RandomGenerator:
def __init__(self, seed):
self.seed = seed
def next(self):
self.seed = (self.seed * 25173 + 13849) % (2 ** 16)
return self.seed
def get_random_vector(generator, size):
return numpy.array([[generator.next()] for _ in range(size)])
def convert_matrix(matrix, amount_of_cycles, seed):
generator = RandomGenerator(seed)
for i in range(amount_of_cycles):
rand_vector = get_random_vector(generator, len(matrix))
new_vector = numpy.dot(matrix, rand_vector)
matrix = numpy.hstack((matrix[:, 1:], new_vector)) % MOD
return matrix
def get_initial_matrix(filename):
with open(filename, 'r') as f:
lines = f.read().splitlines()
n = len(lines[0]) // 4
matrix = numpy.zeros((n, n), dtype=int)
for y, line in enumerate(lines):
tokens = [line[i:i+4] for i in range(0, len(line), 4)]
for x, token in enumerate(tokens):
matrix[y][x] = int(token, 16)
return matrix
def get_secret_from_matrix(matrix, msg_len):
result = [0] * msg_len
values = matrix.flatten()
for i, val in enumerate(values):
result[i % msg_len] ^= val % 256
result[i % msg_len] ^= val // 256
return ''.join(map(chr, result))
def main():
amount_of_cycles = 2 ** 38
seed = 35812
n = 256
msg_len = 35
matrix = get_initial_matrix('encryption')
converted_matrix = convert_matrix(matrix, amount_of_cycles, seed)
secret = get_secret_from_matrix(converted_matrix, msg_len)
print(secret)
if __name__ == '__main__':
main()
| [
"numpy.dot",
"numpy.zeros",
"numpy.hstack"
] | [((821, 851), 'numpy.zeros', 'numpy.zeros', (['(n, n)'], {'dtype': 'int'}), '((n, n), dtype=int)\n', (832, 851), False, 'import numpy\n'), ((557, 587), 'numpy.dot', 'numpy.dot', (['matrix', 'rand_vector'], {}), '(matrix, rand_vector)\n', (566, 587), False, 'import numpy\n'), ((605, 646), 'numpy.hstack', 'numpy.hstack', (['(matrix[:, 1:], new_vector)'], {}), '((matrix[:, 1:], new_vector))\n', (617, 646), False, 'import numpy\n')] |
import json
import numpy as np
def to_ndarray(obj, dtype=np.float64):
"""
Greedily and recursively convert the given object to a dtype ndarray.
"""
if isinstance(obj, dict):
for k in obj:
obj[k] = to_ndarray(obj[k])
return obj
elif isinstance(obj, list):
try:
return np.array(obj, dtype=dtype)
except:
return [to_ndarray(o) for o in obj]
else:
return obj
class HelperNumpyJSONEncoder(json.JSONEncoder):
"""
This encoder encodes Numpy arrays as lists.
"""
def default(self, o):
if isinstance(o, np.ndarray):
return o.tolist()
return json.JSONEncoder.default(self, o)
class NumpyJSONEncoder(json.JSONEncoder):
"""
This encoder will print an entire collection onto a single line if it fits.
Otherwise the individual elements are printed on separate lines. Numpy
arrays are encoded as lists.
This class is derived from contributions by <NAME> and <NAME> to a stackoverflow discussion:
https://stackoverflow.com/questions/16264515/json-dumps-custom-formatting
"""
MAX_WIDTH = 80 # Maximum length of a single line list.
MAX_ITEMS = 80 # Maximum number of items in a single line list.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.indentation_level = 0
def encode(self, o):
# If o fits on a single line, do so.
line = json.dumps(o, cls=HelperNumpyJSONEncoder)
if len(line) <= self.MAX_WIDTH:
return line
# Otherwise, break o into pieces.
else:
# If a list, split each entry into a separate line.
if isinstance(o, (list, tuple)):
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
return "[\n" + ",\n".join(output) + "\n" + self.indent_str + "]"
# If a dict, each key/value pair into a separate line.
if isinstance(o, dict):
self.indentation_level += 1
output = [self.indent_str + f"{json.dumps(k)}: {self.encode(v)}" for k, v in o.items()]
self.indentation_level -= 1
return "{\n" + ",\n".join(output) + "\n" + self.indent_str + "}"
# Otherwise use default encoding.
return json.dumps(o)
@property
def indent_str(self) -> str:
if self.indent == None:
indent = 0
else:
indent = self.indent
return " " * self.indentation_level * indent
if __name__ == '__main__':
import copy
# Example data.
data = {
'bounds': {'extents': [0, 5.0, 0, 2.0, 0, 13.0]},
'blocks': [
{'extents': [2, 3, 0.0, 2, 0.0, 10.0], 'color': [1, 0, 0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0], 'color': [1, 0, 0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0], 'color': [1, 0, 0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0], 'color': [1, 0, 0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0]},
{'extents': [2, 3, 0.0, 2, 0.0, 10.0]}],
'start': np.array([0, 0, 1]),
'goal': np.array([4, 0, 2]),
'resolution': np.array([0.25, 0.25, 0.5]),
'margin': 0.1,
'expected_path_length': 20.52
}
data['more'] = copy.deepcopy(data)
# Print JSON string to terminal.
print(json.dumps(data, cls=NumpyJSONEncoder, indent=4))
# Using 'dump' not yet supported.
with open('example.json', 'w') as file:
file.write(json.dumps(data, cls=NumpyJSONEncoder, indent=4))
with open('example.json') as file:
data_out = json.load(file)
data_out = to_ndarray(data_out)
print(data_out)
| [
"json.JSONEncoder.default",
"json.dumps",
"numpy.array",
"copy.deepcopy",
"json.load"
] | [((3452, 3471), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (3465, 3471), False, 'import copy\n'), ((678, 711), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (702, 711), False, 'import json\n'), ((1468, 1509), 'json.dumps', 'json.dumps', (['o'], {'cls': 'HelperNumpyJSONEncoder'}), '(o, cls=HelperNumpyJSONEncoder)\n', (1478, 1509), False, 'import json\n'), ((3257, 3276), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3265, 3276), True, 'import numpy as np\n'), ((3294, 3313), 'numpy.array', 'np.array', (['[4, 0, 2]'], {}), '([4, 0, 2])\n', (3302, 3313), True, 'import numpy as np\n'), ((3337, 3364), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.5]'], {}), '([0.25, 0.25, 0.5])\n', (3345, 3364), True, 'import numpy as np\n'), ((3520, 3568), 'json.dumps', 'json.dumps', (['data'], {'cls': 'NumpyJSONEncoder', 'indent': '(4)'}), '(data, cls=NumpyJSONEncoder, indent=4)\n', (3530, 3568), False, 'import json\n'), ((3781, 3796), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3790, 3796), False, 'import json\n'), ((2422, 2435), 'json.dumps', 'json.dumps', (['o'], {}), '(o)\n', (2432, 2435), False, 'import json\n'), ((3672, 3720), 'json.dumps', 'json.dumps', (['data'], {'cls': 'NumpyJSONEncoder', 'indent': '(4)'}), '(data, cls=NumpyJSONEncoder, indent=4)\n', (3682, 3720), False, 'import json\n'), ((336, 362), 'numpy.array', 'np.array', (['obj'], {'dtype': 'dtype'}), '(obj, dtype=dtype)\n', (344, 362), True, 'import numpy as np\n'), ((2175, 2188), 'json.dumps', 'json.dumps', (['k'], {}), '(k)\n', (2185, 2188), False, 'import json\n')] |
import torch
from Utils import *
import torchvision
import math
import numpy as np
import faiss
from Utils import LogText
import clustering
from scipy.optimize import linear_sum_assignment
import imgaug.augmenters as iaa
import imgaug.augmentables.kps
class SuperPoint():
def __init__(self, number_of_clusters, confidence_thres_superpoint,nms_thres_superpoint,path_to_pretrained_superpoint,experiment_name,log_path,remove_superpoint_outliers_percentage,use_box=False,UseScales=False,RemoveBackgroundClusters=False):
self.path_to_pretrained_superpoint=path_to_pretrained_superpoint
self.use_box=use_box
self.confidence_thres_superpoint=confidence_thres_superpoint
self.nms_thres_superpoint=nms_thres_superpoint
self.log_path=log_path
self.remove_superpoint_outliers_percentage=remove_superpoint_outliers_percentage
self.experiment_name=experiment_name
self.number_of_clusters=number_of_clusters
self.model = Cuda(SuperPointNet())
self.UseScales=UseScales
self.RemoveBackgroundClusters=RemoveBackgroundClusters
if(self.UseScales):
self.SuperpointUndoScaleDistill1 = iaa.Affine(scale={"x": 1 / 1.3, "y": 1 / 1.3})
self.SuperpointUndoScaleDistill2 = iaa.Affine(scale={"x": 1 / 1.6, "y": 1 / 1.6})
try:
checkpoint = torch.load(path_to_pretrained_superpoint, map_location='cpu')
self.model.load_state_dict(checkpoint)
LogText(f"Superpoint Network from checkpoint {path_to_pretrained_superpoint}", self.experiment_name, self.log_path)
except:
raise Exception(f"Superpoint weights from {path_to_pretrained_superpoint} failed to load.")
self.softmax = torch.nn.Softmax(dim=1)
self.pixelSuffle = torch.nn.PixelShuffle(8)
self.model.eval()
def CreateInitialPseudoGroundtruth(self, dataloader):
LogText(f"Extraction of initial Superpoint pseudo groundtruth", self.experiment_name,self.log_path)
imagesize=256
heatmapsize=64
numberoffeatures=256
buffersize=500000
#allocation of 2 buffers for temporal storing of keypoints and descriptors.
Keypoint_buffer = torch.zeros(buffersize, 3)
Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
#arrays on which we save buffer content periodically. Corresponding files are temporal and
#will be deleted after the completion of the process
CreateFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'),3)
CreateFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'), numberoffeatures)
#intermediate variables
first_index = 0
last_index = 0
buffer_first_index = 0
buffer_last_index = 0
keypoint_indexes = {}
LogText(f"Inference of Keypoints begins", self.experiment_name, self.log_path)
for i_batch, sample in enumerate(dataloader):
input = Cuda(sample['image_gray'])
names = sample['filename']
bsize=input.size(0)
if(self.UseScales):
input=input.view(-1,1,input.shape[2],input.shape[3])
with torch.no_grad():
detectorOutput,descriptorOutput=self.GetSuperpointOutput(input)
if(self.UseScales):
detectorOutput=detectorOutput.view(bsize,-1,detectorOutput.shape[2],detectorOutput.shape[3])
input=input.view(bsize,-1,input.shape[2],input.shape[3])
descriptorOutput=descriptorOutput.view(bsize,-1,descriptorOutput.size(1),descriptorOutput.size(2),descriptorOutput.size(3))[:,0]
for i in range(0, bsize):
keypoints = self.GetPoints(detectorOutput[i].unsqueeze(0), self.confidence_thres_superpoint, self.nms_thres_superpoint)
if (self.RemoveBackgroundClusters):
bounding_box=sample['bounding_box'][i]
pointsinbox = torch.ones(len(keypoints))
pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1
elif (self.use_box):
bounding_box=sample['bounding_box'][i]
pointsinbox = torch.ones(len(keypoints))
pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1
keypoints=keypoints[pointsinbox==1]
descriptors = GetDescriptors(descriptorOutput[i], keypoints, input.shape[3], input.shape[2])
#scale image keypoints to FAN resolution
keypoints=dataloader.dataset.keypointsToFANResolution(dataloader.dataset,names[i],keypoints)
keypoints = ((heatmapsize/imagesize)*keypoints).round()
last_index += len(keypoints)
buffer_last_index += len(keypoints)
Keypoint_buffer[buffer_first_index:buffer_last_index, :2] = keypoints
Descriptor__buffer[buffer_first_index:buffer_last_index] = descriptors
if (self.RemoveBackgroundClusters):
Keypoint_buffer[buffer_first_index:buffer_last_index, 2] = pointsinbox
keypoint_indexes[names[i]] = [first_index, last_index]
first_index += len(keypoints)
buffer_first_index += len(keypoints)
#periodically we store the buffer in file
if buffer_last_index>int(buffersize*0.8):
AppendFileArray(np.array(Keypoint_buffer[:buffer_last_index]),str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
AppendFileArray(np.array(Descriptor__buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
Keypoint_buffer = torch.zeros(buffersize, 3)
Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
buffer_first_index = 0
buffer_last_index = 0
LogText(f"Inference of Keypoints completed", self.experiment_name, self.log_path)
#store any keypoints left on the buffers
AppendFileArray(np.array(Keypoint_buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
AppendFileArray(np.array(Descriptor__buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
#load handlers to the Keypoints and Descriptor files
Descriptors,fileHandler1=OpenreadFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
Keypoints, fileHandler2 = OpenreadFileArray( str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
Keypoints = Keypoints[:, :]
LogText(f"Keypoints Detected per image {len(Keypoints)/len(keypoint_indexes)}", self.experiment_name, self.log_path)
#perform outlier detection
inliersindexes=np.ones(len(Keypoints))==1
if(self.remove_superpoint_outliers_percentage>0):
inliersindexes=self.Indexes_of_inliers(Keypoints,Descriptors,buffersize)
#extend outliers with background points for constant background datasets
if (self.RemoveBackgroundClusters):
foregroundpointindex=self.Indexes_of_BackgroundPoints(Keypoints,Descriptors,keypoint_indexes)
inliersindexes = np.logical_and(inliersindexes, foregroundpointindex)
LogText(f"Keypoints Detected per image(filtering) {sum(inliersindexes) / len(keypoint_indexes)}", self.experiment_name,self.log_path)
#we use a subset of all the descriptors for clustering based on the recomendation of the Faiss repository
numberOfPointsForClustering=500000
LogText(f"Clustering of keypoints", self.experiment_name, self.log_path)
#clustering of superpoint features
KmeansClustering = clustering.Kmeans(self.number_of_clusters, centroids=None)
descriptors = clustering.preprocess_features( Descriptors[:numberOfPointsForClustering][inliersindexes[:numberOfPointsForClustering]])
KmeansClustering.cluster(descriptors, verbose=False)
thresholds=self.GetThresholdsPerCluster( inliersindexes,Descriptors,KmeansClustering)
Image_Keypoints = {}
averagepointsperimage=0
for image in keypoint_indexes:
start,end=keypoint_indexes[image]
inliersinimage=inliersindexes[start:end]
keypoints=Keypoints[start:end,:]
inliersinimage[np.sum(keypoints[:,:2]<0 ,1)>0]=False
inliersinimage[np.sum(keypoints[:,:2]>64 ,1)>0]=False
keypoints=keypoints[inliersinimage]
image_descriptors=clustering.preprocess_features(Descriptors[start:end])
image_descriptors=image_descriptors[inliersinimage]
#calculate distance of each keypoints to each centroid
distanceMatrix, clustering_assignments = KmeansClustering.index.search(image_descriptors, self.number_of_clusters)
distanceMatrix=np.take_along_axis(distanceMatrix, np.argsort(clustering_assignments), axis=-1)
#assign keypoints to centroids using the Hungarian algorithm. This ensures that each
#image has at most one instance of each cluster
keypointIndex,clusterAssignment= linear_sum_assignment(distanceMatrix)
tempKeypoints=keypoints[keypointIndex]
clusterAssignmentDistance = distanceMatrix[keypointIndex, clusterAssignment]
clusterstokeep = np.zeros(len(clusterAssignmentDistance))
clusterstokeep = clusterstokeep == 1
# keep only points that lie in their below a cluster specific theshold
clusterstokeep[clusterAssignmentDistance < thresholds[clusterAssignment]] = True
tempKeypoints[:,2]=clusterAssignment
Image_Keypoints[image]=tempKeypoints[clusterstokeep]
averagepointsperimage+=sum(clusterstokeep)
LogText(f"Keypoints Detected per image(clusteringAssignment) {averagepointsperimage / len(Image_Keypoints)}",self.experiment_name, self.log_path)
ClosereadFileArray(fileHandler1,str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
ClosereadFileArray(fileHandler2,str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
self.save_keypoints(Image_Keypoints,"SuperPointKeypoints.pickle")
LogText(f"Extraction of Initial pseudoGroundtruth completed", self.experiment_name, self.log_path)
return Image_Keypoints
def Indexes_of_inliers(self,Keypoints,Descriptors,buffersize):
res = faiss.StandardGpuResources()
nlist = 100
quantizer = faiss.IndexFlatL2(256)
index = faiss.IndexIVFFlat(quantizer, 256, nlist)
gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index)
gpu_index_flat.train(clustering.preprocess_features(Descriptors[:buffersize]))
gpu_index_flat.add(clustering.preprocess_features(Descriptors[:buffersize]))
#we process the descriptors in batches of 10000 vectors
rg = np.linspace(0, len(Descriptors), math.ceil(len(Descriptors) / 10000) + 1, dtype=int)
keypoints_outlier_score=np.zeros(len(Keypoints))
for i in range(len(rg) - 1):
descr = clustering.preprocess_features(Descriptors[rg[i]:rg[i + 1], :])
distance_to_closest_points, _ = gpu_index_flat.search(descr, 100)
outlierscore = np.median(distance_to_closest_points, axis=1)
keypoints_outlier_score[rg[i]:rg[i + 1]] = outlierscore
inliers = keypoints_outlier_score.copy()
inliers = np.sort(inliers)
threshold = inliers[int((1-self.remove_superpoint_outliers_percentage) * (len(inliers) - 1))]
inliers = keypoints_outlier_score < threshold
return inliers
# For constant background datasets like Human3.6 we use this method to discur background keypoints inside the objects bounding box.
# We cluster foreground and background keypoints seperately. Then remove keypoints whos descriptors are closer to background centroids.
def Indexes_of_BackgroundPoints(self,Keypoints,Descriptors,keypoint_indexes):
backgroundpoitnsIndex = Keypoints[:, 2] == -1
insideboxPoitnsIndex = Keypoints[:, 2] == 1
backgroundDescriptors = clustering.preprocess_features(
Descriptors[:500000 ][ [backgroundpoitnsIndex[:500000 ]]])
insideboxDescriptors = clustering.preprocess_features(
Descriptors[:500000][ [insideboxPoitnsIndex[:500000 ]]])
number_of_insideClusters=100
number_of_outsideClusters=250
backgroundclustering= clustering.Kmeans(number_of_outsideClusters, centroids=None)
insideboxclustering = clustering.Kmeans(number_of_insideClusters, centroids=None)
backgroundclustering.cluster(backgroundDescriptors, verbose=False)
insideboxclustering.cluster(insideboxDescriptors, verbose=False)
foregroundpointindex=np.zeros(len(Keypoints))==-1
for imagename in keypoint_indexes:
start,end=keypoint_indexes[imagename]
keypoints = Keypoints[start:end, :]
descriptors=Descriptors[start:end,:]
distanceinside, Iinside = insideboxclustering.index.search(clustering.preprocess_features(descriptors), 1)
distanceoutside, Ioutside = backgroundclustering.index.search(clustering.preprocess_features(descriptors), 1)
points_to_keep = (distanceinside < distanceoutside).reshape(-1)
points_to_keep = np.logical_and(points_to_keep,keypoints[:,2]==1)
foregroundpointindex[start:end] = points_to_keep
return foregroundpointindex
def GetPoints(self,confidenceMap, threshold, NMSthes):
if(confidenceMap.size(1)==1):
points,_=self.GetPointsFromHeatmap(confidenceMap, threshold, NMSthes)
return points
keypoints,keypointprob = self.GetPointsFromHeatmap(confidenceMap[:,0:1], threshold, NMSthes)
keypoints1,keypoint1prob = self.GetPointsFromHeatmap(confidenceMap[:,1:2], threshold, NMSthes)
keypoints2,keypoint2prob = self.GetPointsFromHeatmap(confidenceMap[:,2:3], threshold, NMSthes)
keys = keypoints1
imgaug_keypoints = []
for j in range(len(keys)):
imgaug_keypoints.append(imgaug.augmentables.kps.Keypoint(x=keys[j, 0], y=keys[j, 1]))
kpsoi = imgaug.augmentables.kps.KeypointsOnImage(imgaug_keypoints, shape=confidenceMap.shape[2:])
keypoitns_aug = self.SuperpointUndoScaleDistill1(keypoints=kpsoi)
keys = keypoitns_aug.to_xy_array()
keypoints1 = keys
keys = keypoints2
imgaug_keypoints = []
for j in range(len(keys)):
imgaug_keypoints.append(imgaug.augmentables.kps.Keypoint(x=keys[j, 0], y=keys[j, 1]))
kpsoi = imgaug.augmentables.kps.KeypointsOnImage(imgaug_keypoints, shape=confidenceMap.shape[2:])
keypoitns_aug = self.SuperpointUndoScaleDistill2(keypoints=kpsoi)
keys = keypoitns_aug.to_xy_array()
keypoints2 = keys
newkeypoints = Cuda(torch.from_numpy(np.row_stack((keypoints.cpu().detach().numpy(),keypoints1,keypoints2))))
newkeypointsprob = torch.cat((keypointprob,keypoint1prob,keypoint2prob))
newkeypoints=torch.cat((newkeypoints,newkeypointsprob.unsqueeze(1)),1)
newkeypoints = MergeScales(newkeypoints, int(NMSthes/2))
return newkeypoints[:,:2]
def GetPointsFromHeatmap(self,confidenceMap, threshold, NMSthes):
mask = confidenceMap > threshold
prob = confidenceMap[mask]
value, indices = prob.sort(descending=True)
pred = torch.nonzero(mask)
prob = prob[indices]
pred = pred[indices]
points = pred[:, 2:4]
points = points.flip(1)
nmsPoints = torch.cat((points.float(), prob.unsqueeze(1)), 1).transpose(0, 1)
thres = math.ceil(NMSthes / 2)
newpoints = torch.cat((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] + thres,
nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0).transpose(0, 1)
res = torchvision.ops.nms(newpoints[:, 0:4], newpoints[:, 4], 0.01)
points = nmsPoints[:, res].transpose(0, 1)
returnPoints = points[:, 0:2]
prob = points[:, 2]
return returnPoints,prob
def GetSuperpointOutput(self,input):
keypoints_volume, descriptors_volume = self.model(input)
keypoints_volume = keypoints_volume.detach()
keypoints_volume = self.softmax(keypoints_volume)
volumeNoDustbin = keypoints_volume[:, :-1, :, :]
spaceTensor = self.pixelSuffle(volumeNoDustbin)
return spaceTensor,descriptors_volume
def GetThresholdsPerCluster(self,inliersindexes,Descriptors,deepcluster):
rg = np.linspace(0, sum(inliersindexes), math.ceil(sum(inliersindexes) / 10000) + 1, dtype=int)
distance_to_centroid_per_cluster = list([[] for i in range(self.number_of_clusters)])
for i in range(len(rg) - 1):
descriptors = clustering.preprocess_features(Descriptors[rg[i]:rg[i + 1], :][inliersindexes[rg[i]:rg[i + 1]]])
distancesFromCenter, clustering_assingments = deepcluster.index.search(descriptors, 1)
for point in range(len(clustering_assingments)):
distance_to_centroid_per_cluster[int(clustering_assingments[point])].append(
distancesFromCenter[point][0])
thresholds = np.zeros(self.number_of_clusters)
for i in range(self.number_of_clusters):
if (len(distance_to_centroid_per_cluster[i]) == 0):
thresholds[i] = 0
else:
thresholds[i]=np.average(np.array(distance_to_centroid_per_cluster[i]))+np.std(distance_to_centroid_per_cluster[i])
return thresholds
def save_keypoints(self,Image_Keypoints,filename):
checkPointdir = GetCheckPointsPath(self.experiment_name,self.log_path)
checkPointFile=checkPointdir /filename
with open(checkPointFile, 'wb') as handle:
pickle.dump(Image_Keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ----------------------------------------------------------------------
# https://github.com/magicleap/SuperPointPretrainedNetwork/
#
# --------------------------------------------------------------------*/
#
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.numberOfClasses=1
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
# Shared Encoder.
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
# Detector Head.
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
# Descriptor Head.
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Input
x: Image pytorch tensor shaped N x 1 x H x W.
Output
semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.git c
desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8.
"""
# Shared Encoder.
x = self.relu(self.conv1a(x))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Detector Head.
cPa = self.relu(self.convPa(x))
semi = self.convPb(cPa)
# Descriptor Head.
cDa = self.relu(self.convDa(x))
desc = self.convDb(cDa)
dn = torch.norm(desc, p=2, dim=1) # Compute the norm.
desc = desc.div(torch.unsqueeze(dn, 1)) # Divide by norm to normalize.
return semi, desc
| [
"torch.nn.ReLU",
"clustering.preprocess_features",
"numpy.argsort",
"numpy.array",
"faiss.index_cpu_to_gpu",
"scipy.optimize.linear_sum_assignment",
"torch.unsqueeze",
"numpy.sort",
"torchvision.ops.nms",
"faiss.StandardGpuResources",
"clustering.Kmeans",
"torch.nn.PixelShuffle",
"torch.norm... | [((1744, 1767), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1760, 1767), False, 'import torch\n'), ((1795, 1819), 'torch.nn.PixelShuffle', 'torch.nn.PixelShuffle', (['(8)'], {}), '(8)\n', (1816, 1819), False, 'import torch\n'), ((1915, 2020), 'Utils.LogText', 'LogText', (['f"""Extraction of initial Superpoint pseudo groundtruth"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Extraction of initial Superpoint pseudo groundtruth', self.\n experiment_name, self.log_path)\n", (1922, 2020), False, 'from Utils import LogText\n'), ((2236, 2262), 'torch.zeros', 'torch.zeros', (['buffersize', '(3)'], {}), '(buffersize, 3)\n', (2247, 2262), False, 'import torch\n'), ((2292, 2333), 'torch.zeros', 'torch.zeros', (['buffersize', 'numberoffeatures'], {}), '(buffersize, numberoffeatures)\n', (2303, 2333), False, 'import torch\n'), ((2895, 2973), 'Utils.LogText', 'LogText', (['f"""Inference of Keypoints begins"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Inference of Keypoints begins', self.experiment_name, self.log_path)\n", (2902, 2973), False, 'from Utils import LogText\n'), ((6569, 6655), 'Utils.LogText', 'LogText', (['f"""Inference of Keypoints completed"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Inference of Keypoints completed', self.experiment_name, self.\n log_path)\n", (6576, 6655), False, 'from Utils import LogText\n'), ((8328, 8400), 'Utils.LogText', 'LogText', (['f"""Clustering of keypoints"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Clustering of keypoints', self.experiment_name, self.log_path)\n", (8335, 8400), False, 'from Utils import LogText\n'), ((8471, 8529), 'clustering.Kmeans', 'clustering.Kmeans', (['self.number_of_clusters'], {'centroids': 'None'}), '(self.number_of_clusters, centroids=None)\n', (8488, 8529), False, 'import clustering\n'), ((8552, 8676), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[:numberOfPointsForClustering][inliersindexes[:\n numberOfPointsForClustering]]'], {}), '(Descriptors[:numberOfPointsForClustering][\n inliersindexes[:numberOfPointsForClustering]])\n', (8582, 8676), False, 'import clustering\n'), ((11033, 11136), 'Utils.LogText', 'LogText', (['f"""Extraction of Initial pseudoGroundtruth completed"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Extraction of Initial pseudoGroundtruth completed', self.\n experiment_name, self.log_path)\n", (11040, 11136), False, 'from Utils import LogText\n'), ((11246, 11274), 'faiss.StandardGpuResources', 'faiss.StandardGpuResources', ([], {}), '()\n', (11272, 11274), False, 'import faiss\n'), ((11315, 11337), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['(256)'], {}), '(256)\n', (11332, 11337), False, 'import faiss\n'), ((11354, 11395), 'faiss.IndexIVFFlat', 'faiss.IndexIVFFlat', (['quantizer', '(256)', 'nlist'], {}), '(quantizer, 256, nlist)\n', (11372, 11395), False, 'import faiss\n'), ((11422, 11459), 'faiss.index_cpu_to_gpu', 'faiss.index_cpu_to_gpu', (['res', '(0)', 'index'], {}), '(res, 0, index)\n', (11444, 11459), False, 'import faiss\n'), ((12261, 12277), 'numpy.sort', 'np.sort', (['inliers'], {}), '(inliers)\n', (12268, 12277), True, 'import numpy as np\n'), ((12966, 13057), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[:500000][[backgroundpoitnsIndex[:500000]]]'], {}), '(Descriptors[:500000][[backgroundpoitnsIndex[\n :500000]]])\n', (12996, 13057), False, 'import clustering\n'), ((13101, 13191), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[:500000][[insideboxPoitnsIndex[:500000]]]'], {}), '(Descriptors[:500000][[insideboxPoitnsIndex[:\n 500000]]])\n', (13131, 13191), False, 'import clustering\n'), ((13308, 13368), 'clustering.Kmeans', 'clustering.Kmeans', (['number_of_outsideClusters'], {'centroids': 'None'}), '(number_of_outsideClusters, centroids=None)\n', (13325, 13368), False, 'import clustering\n'), ((13399, 13458), 'clustering.Kmeans', 'clustering.Kmeans', (['number_of_insideClusters'], {'centroids': 'None'}), '(number_of_insideClusters, centroids=None)\n', (13416, 13458), False, 'import clustering\n'), ((15910, 15965), 'torch.cat', 'torch.cat', (['(keypointprob, keypoint1prob, keypoint2prob)'], {}), '((keypointprob, keypoint1prob, keypoint2prob))\n', (15919, 15965), False, 'import torch\n'), ((16491, 16510), 'torch.nonzero', 'torch.nonzero', (['mask'], {}), '(mask)\n', (16504, 16510), False, 'import torch\n'), ((16733, 16755), 'math.ceil', 'math.ceil', (['(NMSthes / 2)'], {}), '(NMSthes / 2)\n', (16742, 16755), False, 'import math\n'), ((16981, 17042), 'torchvision.ops.nms', 'torchvision.ops.nms', (['newpoints[:, 0:4]', 'newpoints[:, 4]', '(0.01)'], {}), '(newpoints[:, 0:4], newpoints[:, 4], 0.01)\n', (17000, 17042), False, 'import torchvision\n'), ((18339, 18372), 'numpy.zeros', 'np.zeros', (['self.number_of_clusters'], {}), '(self.number_of_clusters)\n', (18347, 18372), True, 'import numpy as np\n'), ((19406, 19433), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (19419, 19433), False, 'import torch\n'), ((19450, 19493), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (19468, 19493), False, 'import torch\n'), ((19617, 19675), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', 'c1'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(1, c1, kernel_size=3, stride=1, padding=1)\n', (19632, 19675), False, 'import torch\n'), ((19694, 19753), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c1', 'c1'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c1, c1, kernel_size=3, stride=1, padding=1)\n', (19709, 19753), False, 'import torch\n'), ((19772, 19831), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c1', 'c2'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c1, c2, kernel_size=3, stride=1, padding=1)\n', (19787, 19831), False, 'import torch\n'), ((19850, 19909), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c2', 'c2'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c2, c2, kernel_size=3, stride=1, padding=1)\n', (19865, 19909), False, 'import torch\n'), ((19928, 19987), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c2', 'c3'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c2, c3, kernel_size=3, stride=1, padding=1)\n', (19943, 19987), False, 'import torch\n'), ((20006, 20065), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c3', 'c3'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c3, c3, kernel_size=3, stride=1, padding=1)\n', (20021, 20065), False, 'import torch\n'), ((20084, 20143), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c3', 'c4'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c3, c4, kernel_size=3, stride=1, padding=1)\n', (20099, 20143), False, 'import torch\n'), ((20162, 20221), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c4', 'c4'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c4, c4, kernel_size=3, stride=1, padding=1)\n', (20177, 20221), False, 'import torch\n'), ((20261, 20320), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c4', 'c5'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c4, c5, kernel_size=3, stride=1, padding=1)\n', (20276, 20320), False, 'import torch\n'), ((20339, 20398), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c5', '(65)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(c5, 65, kernel_size=1, stride=1, padding=0)\n', (20354, 20398), False, 'import torch\n'), ((20440, 20499), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c4', 'c5'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(c4, c5, kernel_size=3, stride=1, padding=1)\n', (20455, 20499), False, 'import torch\n'), ((20518, 20577), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['c5', 'd1'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(c5, d1, kernel_size=1, stride=1, padding=0)\n', (20533, 20577), False, 'import torch\n'), ((21456, 21484), 'torch.norm', 'torch.norm', (['desc'], {'p': '(2)', 'dim': '(1)'}), '(desc, p=2, dim=1)\n', (21466, 21484), False, 'import torch\n'), ((1179, 1225), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1 / 1.3, 'y': 1 / 1.3}"}), "(scale={'x': 1 / 1.3, 'y': 1 / 1.3})\n", (1189, 1225), True, 'import imgaug.augmenters as iaa\n'), ((1273, 1319), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1 / 1.6, 'y': 1 / 1.6}"}), "(scale={'x': 1 / 1.6, 'y': 1 / 1.6})\n", (1283, 1319), True, 'import imgaug.augmenters as iaa\n'), ((1359, 1420), 'torch.load', 'torch.load', (['path_to_pretrained_superpoint'], {'map_location': '"""cpu"""'}), "(path_to_pretrained_superpoint, map_location='cpu')\n", (1369, 1420), False, 'import torch\n'), ((1484, 1603), 'Utils.LogText', 'LogText', (['f"""Superpoint Network from checkpoint {path_to_pretrained_superpoint}"""', 'self.experiment_name', 'self.log_path'], {}), "(f'Superpoint Network from checkpoint {path_to_pretrained_superpoint}',\n self.experiment_name, self.log_path)\n", (1491, 1603), False, 'from Utils import LogText\n'), ((6724, 6769), 'numpy.array', 'np.array', (['Keypoint_buffer[:buffer_last_index]'], {}), '(Keypoint_buffer[:buffer_last_index])\n', (6732, 6769), True, 'import numpy as np\n'), ((6870, 6918), 'numpy.array', 'np.array', (['Descriptor__buffer[:buffer_last_index]'], {}), '(Descriptor__buffer[:buffer_last_index])\n', (6878, 6918), True, 'import numpy as np\n'), ((7966, 8018), 'numpy.logical_and', 'np.logical_and', (['inliersindexes', 'foregroundpointindex'], {}), '(inliersindexes, foregroundpointindex)\n', (7980, 8018), True, 'import numpy as np\n'), ((9291, 9345), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[start:end]'], {}), '(Descriptors[start:end])\n', (9321, 9345), False, 'import clustering\n'), ((9916, 9953), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['distanceMatrix'], {}), '(distanceMatrix)\n', (9937, 9953), False, 'from scipy.optimize import linear_sum_assignment\n'), ((11490, 11546), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[:buffersize]'], {}), '(Descriptors[:buffersize])\n', (11520, 11546), False, 'import clustering\n'), ((11575, 11631), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[:buffersize]'], {}), '(Descriptors[:buffersize])\n', (11605, 11631), False, 'import clustering\n'), ((11910, 11973), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[rg[i]:rg[i + 1], :]'], {}), '(Descriptors[rg[i]:rg[i + 1], :])\n', (11940, 11973), False, 'import clustering\n'), ((12079, 12124), 'numpy.median', 'np.median', (['distance_to_closest_points'], {'axis': '(1)'}), '(distance_to_closest_points, axis=1)\n', (12088, 12124), True, 'import numpy as np\n'), ((14206, 14258), 'numpy.logical_and', 'np.logical_and', (['points_to_keep', '(keypoints[:, 2] == 1)'], {}), '(points_to_keep, keypoints[:, 2] == 1)\n', (14220, 14258), True, 'import numpy as np\n'), ((17916, 18017), 'clustering.preprocess_features', 'clustering.preprocess_features', (['Descriptors[rg[i]:rg[i + 1], :][inliersindexes[rg[i]:rg[i + 1]]]'], {}), '(Descriptors[rg[i]:rg[i + 1], :][\n inliersindexes[rg[i]:rg[i + 1]]])\n', (17946, 18017), False, 'import clustering\n'), ((21525, 21547), 'torch.unsqueeze', 'torch.unsqueeze', (['dn', '(1)'], {}), '(dn, 1)\n', (21540, 21547), False, 'import torch\n'), ((3278, 3293), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3291, 3293), False, 'import torch\n'), ((6377, 6403), 'torch.zeros', 'torch.zeros', (['buffersize', '(3)'], {}), '(buffersize, 3)\n', (6388, 6403), False, 'import torch\n'), ((6441, 6482), 'torch.zeros', 'torch.zeros', (['buffersize', 'numberoffeatures'], {}), '(buffersize, numberoffeatures)\n', (6452, 6482), False, 'import torch\n'), ((9668, 9702), 'numpy.argsort', 'np.argsort', (['clustering_assignments'], {}), '(clustering_assignments)\n', (9678, 9702), True, 'import numpy as np\n'), ((13930, 13973), 'clustering.preprocess_features', 'clustering.preprocess_features', (['descriptors'], {}), '(descriptors)\n', (13960, 13973), False, 'import clustering\n'), ((14052, 14095), 'clustering.preprocess_features', 'clustering.preprocess_features', (['descriptors'], {}), '(descriptors)\n', (14082, 14095), False, 'import clustering\n'), ((16778, 16924), 'torch.cat', 'torch.cat', (['(nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] +\n thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :])', '(0)'], {}), '((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[\n 0:1, :] + thres, nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0)\n', (16787, 16924), False, 'import torch\n'), ((6062, 6107), 'numpy.array', 'np.array', (['Keypoint_buffer[:buffer_last_index]'], {}), '(Keypoint_buffer[:buffer_last_index])\n', (6070, 6107), True, 'import numpy as np\n'), ((6215, 6263), 'numpy.array', 'np.array', (['Descriptor__buffer[:buffer_last_index]'], {}), '(Descriptor__buffer[:buffer_last_index])\n', (6223, 6263), True, 'import numpy as np\n'), ((9106, 9137), 'numpy.sum', 'np.sum', (['(keypoints[:, :2] < 0)', '(1)'], {}), '(keypoints[:, :2] < 0, 1)\n', (9112, 9137), True, 'import numpy as np\n'), ((9171, 9203), 'numpy.sum', 'np.sum', (['(keypoints[:, :2] > 64)', '(1)'], {}), '(keypoints[:, :2] > 64, 1)\n', (9177, 9203), True, 'import numpy as np\n'), ((18627, 18670), 'numpy.std', 'np.std', (['distance_to_centroid_per_cluster[i]'], {}), '(distance_to_centroid_per_cluster[i])\n', (18633, 18670), True, 'import numpy as np\n'), ((18580, 18625), 'numpy.array', 'np.array', (['distance_to_centroid_per_cluster[i]'], {}), '(distance_to_centroid_per_cluster[i])\n', (18588, 18625), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# 3rd party modules
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
import numpy as np
# config
n = 10**6
feature_dim = 3
# create data
x = np.random.rand(n * feature_dim).reshape(n, feature_dim)
y_true = np.random.rand(n)
# x[:, 1] = x[:, 0]
print("Frist 3 points of {} with dimension {}:".format(n, feature_dim))
print(x[:3])
# create and fit
regressor = LinearRegression()
regressor.fit(x, y_true)
# Show what it learned
print("coef_: {}".format(regressor.coef_))
print("intercept: {:.4f}".format(regressor.intercept_))
y_pred = regressor.predict(x)
print("MAE: {:.4f}".format(mean_absolute_error(y_true, y_pred)))
| [
"sklearn.metrics.mean_absolute_error",
"sklearn.linear_model.LinearRegression",
"numpy.random.rand"
] | [((280, 297), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (294, 297), True, 'import numpy as np\n'), ((433, 451), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (449, 451), False, 'from sklearn.linear_model import LinearRegression\n'), ((215, 246), 'numpy.random.rand', 'np.random.rand', (['(n * feature_dim)'], {}), '(n * feature_dim)\n', (229, 246), True, 'import numpy as np\n'), ((670, 705), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (689, 705), False, 'from sklearn.metrics import mean_absolute_error\n')] |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn as nn
import time
import sys, os
import numpy as np
import cv2
import scipy
import matplotlib.pyplot as plt
from fcn.config import cfg
from fcn.test_common import refine_pose
from transforms3d.quaternions import mat2quat, quat2mat, qmult
from utils.se3 import *
from utils.nms import nms
from utils.pose_error import re, te
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return '{:.3f} ({:.3f})'.format(self.val, self.avg)
def test(test_loader, background_loader, network, output_dir):
batch_time = AverageMeter()
epoch_size = len(test_loader)
enum_background = enumerate(background_loader)
# switch to test mode
network.eval()
for i, sample in enumerate(test_loader):
# if 'is_testing' in sample and sample['is_testing'] == 0:
# continue
end = time.time()
inputs = sample['image_color']
im_info = sample['im_info']
# add background
mask = sample['mask']
try:
_, background = next(enum_background)
except:
enum_background = enumerate(background_loader)
_, background = next(enum_background)
if inputs.size(0) != background['background_color'].size(0):
enum_background = enumerate(background_loader)
_, background = next(enum_background)
background_color = background['background_color'].cuda()
for j in range(inputs.size(0)):
is_syn = im_info[j, -1]
if is_syn:
inputs[j] = mask[j] * inputs[j] + (1 - mask[j]) * background_color[j]
labels = sample['label'].cuda()
meta_data = sample['meta_data'].cuda()
extents = sample['extents'][0, :, :].repeat(cfg.TRAIN.GPUNUM, 1, 1).cuda()
gt_boxes = sample['gt_boxes'].cuda()
poses = sample['poses'].cuda()
points = sample['points'][0, :, :, :].repeat(cfg.TRAIN.GPUNUM, 1, 1, 1).cuda()
symmetry = sample['symmetry'][0, :].repeat(cfg.TRAIN.GPUNUM, 1).cuda()
# compute output
if cfg.TRAIN.VERTEX_REG:
if cfg.TRAIN.POSE_REG:
out_label, out_vertex, rois, out_pose, out_quaternion = network(inputs, labels, meta_data, extents, gt_boxes, poses, points, symmetry)
# combine poses
rois = rois.detach().cpu().numpy()
out_pose = out_pose.detach().cpu().numpy()
out_quaternion = out_quaternion.detach().cpu().numpy()
num = rois.shape[0]
poses = out_pose.copy()
for j in range(num):
cls = int(rois[j, 1])
if cls >= 0:
qt = out_quaternion[j, 4*cls:4*cls+4]
qt = qt / np.linalg.norm(qt)
# allocentric to egocentric
poses[j, 4] *= poses[j, 6]
poses[j, 5] *= poses[j, 6]
T = poses[j, 4:]
poses[j, :4] = allocentric2egocentric(qt, T)
# non-maximum suppression within class
index = nms(rois, 0.5)
rois = rois[index, :]
poses = poses[index, :]
# refine pose
if cfg.TEST.POSE_REFINE:
im_depth = sample['im_depth'].numpy()[0]
depth_tensor = torch.from_numpy(im_depth).cuda().float()
labels_out = out_label[0]
poses_refined = refine_pose(labels_out, depth_tensor, rois, poses, sample['meta_data'], test_loader.dataset)
else:
poses_refined = []
else:
out_label, out_vertex, rois, out_pose = network(inputs, labels, meta_data, extents, gt_boxes, poses, points, symmetry)
rois = rois.detach().cpu().numpy()
out_pose = out_pose.detach().cpu().numpy()
poses = out_pose.copy()
poses_refined = []
# non-maximum suppression within class
index = nms(rois, 0.5)
rois = rois[index, :]
poses = poses[index, :]
else:
out_label = network(inputs, labels, meta_data, extents, gt_boxes, poses, points, symmetry)
out_vertex = []
rois = []
poses = []
poses_refined = []
if cfg.TEST.VISUALIZE:
_vis_test(inputs, labels, out_label, out_vertex, rois, poses, poses_refined, sample, \
test_loader.dataset._points_all, test_loader.dataset.classes, test_loader.dataset.class_colors)
# measure elapsed time
batch_time.update(time.time() - end)
if not cfg.TEST.VISUALIZE:
result = {'labels': out_label[0].detach().cpu().numpy(), 'rois': rois, 'poses': poses, 'poses_refined': poses_refined}
if 'video_id' in sample and 'image_id' in sample:
filename = os.path.join(output_dir, sample['video_id'][0] + '_' + sample['image_id'][0] + '.mat')
else:
result['meta_data_path'] = sample['meta_data_path']
print(result['meta_data_path'])
filename = os.path.join(output_dir, '%06d.mat' % i)
print(filename)
scipy.io.savemat(filename, result, do_compression=True)
print('[%d/%d], batch time %.2f' % (i, epoch_size, batch_time.val))
filename = os.path.join(output_dir, 'results_posecnn.mat')
if os.path.exists(filename):
os.remove(filename)
def _vis_test(inputs, labels, out_label, out_vertex, rois, poses, poses_refined, sample, points, classes, class_colors):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
im_blob = inputs.cpu().numpy()
label_blob = labels.cpu().numpy()
label_pred = out_label.cpu().numpy()
gt_poses = sample['poses'].numpy()
meta_data_blob = sample['meta_data'].numpy()
metadata = meta_data_blob[0, :]
intrinsic_matrix = metadata[:9].reshape((3,3))
gt_boxes = sample['gt_boxes'].numpy()
extents = sample['extents'][0, :, :].numpy()
if cfg.TRAIN.VERTEX_REG or cfg.TRAIN.VERTEX_REG_DELTA:
vertex_targets = sample['vertex_targets'].numpy()
vertex_pred = out_vertex.detach().cpu().numpy()
m = 4
n = 4
for i in range(im_blob.shape[0]):
fig = plt.figure()
start = 1
# show image
im = im_blob[i, :, :, :].copy()
im = im.transpose((1, 2, 0)) * 255.0
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
ax = fig.add_subplot(m, n, 1)
plt.imshow(im)
ax.set_title('color')
start += 1
# show gt boxes
boxes = gt_boxes[i]
for j in range(boxes.shape[0]):
if boxes[j, 4] == 0:
continue
x1 = boxes[j, 0]
y1 = boxes[j, 1]
x2 = boxes[j, 2]
y2 = boxes[j, 3]
plt.gca().add_patch(
plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='g', linewidth=3))
# show gt label
label_gt = label_blob[i, :, :, :]
label_gt = label_gt.transpose((1, 2, 0))
height = label_gt.shape[0]
width = label_gt.shape[1]
num_classes = label_gt.shape[2]
im_label_gt = np.zeros((height, width, 3), dtype=np.uint8)
for j in range(num_classes):
I = np.where(label_gt[:, :, j] > 0)
im_label_gt[I[0], I[1], :] = class_colors[j]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_label_gt)
ax.set_title('gt labels')
# show predicted label
label = label_pred[i, :, :]
height = label.shape[0]
width = label.shape[1]
im_label = np.zeros((height, width, 3), dtype=np.uint8)
for j in range(num_classes):
I = np.where(label == j)
im_label[I[0], I[1], :] = class_colors[j]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_label)
ax.set_title('predicted labels')
if cfg.TRAIN.VERTEX_REG or cfg.TRAIN.VERTEX_REG_DELTA:
# show predicted boxes
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('predicted boxes')
for j in range(rois.shape[0]):
if rois[j, 0] != i or rois[j, -1] < cfg.TEST.DET_THRESHOLD:
continue
cls = rois[j, 1]
x1 = rois[j, 2]
y1 = rois[j, 3]
x2 = rois[j, 4]
y2 = rois[j, 5]
plt.gca().add_patch(
plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=np.array(class_colors[int(cls)])/255.0, linewidth=3))
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
plt.plot(cx, cy, 'yo')
# show gt poses
ax = fig.add_subplot(m, n, start)
start += 1
ax.set_title('gt poses')
plt.imshow(im)
pose_blob = gt_poses[i]
for j in range(pose_blob.shape[0]):
if pose_blob[j, 0] == 0:
continue
cls = int(pose_blob[j, 1])
# extract 3D points
x3d = np.ones((4, points.shape[1]), dtype=np.float32)
x3d[0, :] = points[cls,:,0]
x3d[1, :] = points[cls,:,1]
x3d[2, :] = points[cls,:,2]
# projection
RT = np.zeros((3, 4), dtype=np.float32)
qt = pose_blob[j, 2:6]
T = pose_blob[j, 6:]
qt_new = allocentric2egocentric(qt, T)
RT[:3, :3] = quat2mat(qt_new)
RT[:, 3] = T
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
plt.plot(x2d[0, :], x2d[1, :], '.', color=np.divide(class_colors[cls], 255.0), alpha=0.1)
# show predicted poses
ax = fig.add_subplot(m, n, start)
start += 1
ax.set_title('predicted poses')
plt.imshow(im)
for j in range(rois.shape[0]):
if rois[j, 0] != i:
continue
cls = int(rois[j, 1])
if cls > 0:
print('%s: detection score %s' % (classes[cls], rois[j, -1]))
if rois[j, -1] > cfg.TEST.DET_THRESHOLD:
# extract 3D points
x3d = np.ones((4, points.shape[1]), dtype=np.float32)
x3d[0, :] = points[cls,:,0]
x3d[1, :] = points[cls,:,1]
x3d[2, :] = points[cls,:,2]
# projection
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = quat2mat(poses[j, :4])
RT[:, 3] = poses[j, 4:7]
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
plt.plot(x2d[0, :], x2d[1, :], '.', color=np.divide(class_colors[cls], 255.0), alpha=0.1)
# show predicted refined poses
if cfg.TEST.POSE_REFINE:
ax = fig.add_subplot(m, n, start)
start += 1
ax.set_title('predicted refined poses')
plt.imshow(im)
for j in range(rois.shape[0]):
if rois[j, 0] != i:
continue
cls = int(rois[j, 1])
if rois[j, -1] > cfg.TEST.DET_THRESHOLD:
# extract 3D points
x3d = np.ones((4, points.shape[1]), dtype=np.float32)
x3d[0, :] = points[cls,:,0]
x3d[1, :] = points[cls,:,1]
x3d[2, :] = points[cls,:,2]
# projection
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = quat2mat(poses_refined[j, :4])
RT[:, 3] = poses_refined[j, 4:7]
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
plt.plot(x2d[0, :], x2d[1, :], '.', color=np.divide(class_colors[cls], 255.0), alpha=0.1)
# show gt vertex targets
vertex_target = vertex_targets[i, :, :, :]
center = np.zeros((3, height, width), dtype=np.float32)
for j in range(1, num_classes):
index = np.where(label_gt[:, :, j] > 0)
if len(index[0]) > 0:
center[:, index[0], index[1]] = vertex_target[3*j:3*j+3, index[0], index[1]]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(center[0,:,:])
ax.set_title('gt center x')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(center[1,:,:])
ax.set_title('gt center y')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(np.exp(center[2,:,:]))
ax.set_title('gt z')
# show predicted vertex targets
vertex_target = vertex_pred[i, :, :, :]
center = np.zeros((3, height, width), dtype=np.float32)
for j in range(1, num_classes):
index = np.where(label == j)
if len(index[0]) > 0:
center[:, index[0], index[1]] = vertex_target[3*j:3*j+3, index[0], index[1]]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(center[0,:,:])
ax.set_title('predicted center x')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(center[1,:,:])
ax.set_title('predicted center y')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(np.exp(center[2,:,:]))
ax.set_title('predicted z')
plt.show()
| [
"scipy.io.savemat",
"torch.from_numpy",
"numpy.linalg.norm",
"numpy.divide",
"os.remove",
"matplotlib.pyplot.imshow",
"os.path.exists",
"utils.nms.nms",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.matmul",
"matplotlib.pyplot.Rectangle",
"numpy.ones",
"matplotlib.pyplot.g... | [((6030, 6077), 'os.path.join', 'os.path.join', (['output_dir', '"""results_posecnn.mat"""'], {}), "(output_dir, 'results_posecnn.mat')\n", (6042, 6077), False, 'import sys, os\n'), ((6085, 6109), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (6099, 6109), False, 'import sys, os\n'), ((1392, 1403), 'time.time', 'time.time', ([], {}), '()\n', (1401, 1403), False, 'import time\n'), ((6119, 6138), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6128, 6138), False, 'import sys, os\n'), ((6975, 6987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6985, 6987), True, 'import matplotlib.pyplot as plt\n'), ((7255, 7269), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (7265, 7269), True, 'import matplotlib.pyplot as plt\n'), ((7961, 8005), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (7969, 8005), True, 'import numpy as np\n'), ((8218, 8241), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_label_gt'], {}), '(im_label_gt)\n', (8228, 8241), True, 'import matplotlib.pyplot as plt\n'), ((8427, 8471), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {'dtype': 'np.uint8'}), '((height, width, 3), dtype=np.uint8)\n', (8435, 8471), True, 'import numpy as np\n'), ((8670, 8690), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im_label'], {}), '(im_label)\n', (8680, 8690), True, 'import matplotlib.pyplot as plt\n'), ((15049, 15059), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15057, 15059), True, 'import matplotlib.pyplot as plt\n'), ((5881, 5936), 'scipy.io.savemat', 'scipy.io.savemat', (['filename', 'result'], {'do_compression': '(True)'}), '(filename, result, do_compression=True)\n', (5897, 5936), False, 'import scipy\n'), ((8059, 8090), 'numpy.where', 'np.where', (['(label_gt[:, :, j] > 0)'], {}), '(label_gt[:, :, j] > 0)\n', (8067, 8090), True, 'import numpy as np\n'), ((8525, 8545), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (8533, 8545), True, 'import numpy as np\n'), ((8913, 8927), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (8923, 8927), True, 'import matplotlib.pyplot as plt\n'), ((9710, 9724), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (9720, 9724), True, 'import matplotlib.pyplot as plt\n'), ((10946, 10960), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (10956, 10960), True, 'import matplotlib.pyplot as plt\n'), ((13454, 13500), 'numpy.zeros', 'np.zeros', (['(3, height, width)'], {'dtype': 'np.float32'}), '((3, height, width), dtype=np.float32)\n', (13462, 13500), True, 'import numpy as np\n'), ((13819, 13846), 'matplotlib.pyplot.imshow', 'plt.imshow', (['center[0, :, :]'], {}), '(center[0, :, :])\n', (13829, 13846), True, 'import matplotlib.pyplot as plt\n'), ((13968, 13995), 'matplotlib.pyplot.imshow', 'plt.imshow', (['center[1, :, :]'], {}), '(center[1, :, :])\n', (13978, 13995), True, 'import matplotlib.pyplot as plt\n'), ((14301, 14347), 'numpy.zeros', 'np.zeros', (['(3, height, width)'], {'dtype': 'np.float32'}), '((3, height, width), dtype=np.float32)\n', (14309, 14347), True, 'import numpy as np\n'), ((14655, 14682), 'matplotlib.pyplot.imshow', 'plt.imshow', (['center[0, :, :]'], {}), '(center[0, :, :])\n', (14665, 14682), True, 'import matplotlib.pyplot as plt\n'), ((14811, 14838), 'matplotlib.pyplot.imshow', 'plt.imshow', (['center[1, :, :]'], {}), '(center[1, :, :])\n', (14821, 14838), True, 'import matplotlib.pyplot as plt\n'), ((3705, 3719), 'utils.nms.nms', 'nms', (['rois', '(0.5)'], {}), '(rois, 0.5)\n', (3708, 3719), False, 'from utils.nms import nms\n'), ((4662, 4676), 'utils.nms.nms', 'nms', (['rois', '(0.5)'], {}), '(rois, 0.5)\n', (4665, 4676), False, 'from utils.nms import nms\n'), ((5277, 5288), 'time.time', 'time.time', ([], {}), '()\n', (5286, 5288), False, 'import time\n'), ((5552, 5643), 'os.path.join', 'os.path.join', (['output_dir', "(sample['video_id'][0] + '_' + sample['image_id'][0] + '.mat')"], {}), "(output_dir, sample['video_id'][0] + '_' + sample['image_id'][0\n ] + '.mat')\n", (5564, 5643), False, 'import sys, os\n'), ((5800, 5840), 'os.path.join', 'os.path.join', (['output_dir', "('%06d.mat' % i)"], {}), "(output_dir, '%06d.mat' % i)\n", (5812, 5840), False, 'import sys, os\n'), ((7635, 7720), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'fill': '(False)', 'edgecolor': '"""g"""', 'linewidth': '(3)'}), "((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='g',\n linewidth=3)\n", (7648, 7720), True, 'import matplotlib.pyplot as plt\n'), ((9540, 9562), 'matplotlib.pyplot.plot', 'plt.plot', (['cx', 'cy', '"""yo"""'], {}), "(cx, cy, 'yo')\n", (9548, 9562), True, 'import matplotlib.pyplot as plt\n'), ((9982, 10029), 'numpy.ones', 'np.ones', (['(4, points.shape[1])'], {'dtype': 'np.float32'}), '((4, points.shape[1]), dtype=np.float32)\n', (9989, 10029), True, 'import numpy as np\n'), ((10228, 10262), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float32'}), '((3, 4), dtype=np.float32)\n', (10236, 10262), True, 'import numpy as np\n'), ((10423, 10439), 'transforms3d.quaternions.quat2mat', 'quat2mat', (['qt_new'], {}), '(qt_new)\n', (10431, 10439), False, 'from transforms3d.quaternions import mat2quat, quat2mat, qmult\n'), ((10567, 10598), 'numpy.divide', 'np.divide', (['x2d[0, :]', 'x2d[2, :]'], {}), '(x2d[0, :], x2d[2, :])\n', (10576, 10598), True, 'import numpy as np\n'), ((10627, 10658), 'numpy.divide', 'np.divide', (['x2d[1, :]', 'x2d[2, :]'], {}), '(x2d[1, :], x2d[2, :])\n', (10636, 10658), True, 'import numpy as np\n'), ((12269, 12283), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (12279, 12283), True, 'import matplotlib.pyplot as plt\n'), ((13570, 13601), 'numpy.where', 'np.where', (['(label_gt[:, :, j] > 0)'], {}), '(label_gt[:, :, j] > 0)\n', (13578, 13601), True, 'import numpy as np\n'), ((14127, 14150), 'numpy.exp', 'np.exp', (['center[2, :, :]'], {}), '(center[2, :, :])\n', (14133, 14150), True, 'import numpy as np\n'), ((14417, 14437), 'numpy.where', 'np.where', (['(label == j)'], {}), '(label == j)\n', (14425, 14437), True, 'import numpy as np\n'), ((14977, 15000), 'numpy.exp', 'np.exp', (['center[2, :, :]'], {}), '(center[2, :, :])\n', (14983, 15000), True, 'import numpy as np\n'), ((4090, 4186), 'fcn.test_common.refine_pose', 'refine_pose', (['labels_out', 'depth_tensor', 'rois', 'poses', "sample['meta_data']", 'test_loader.dataset'], {}), "(labels_out, depth_tensor, rois, poses, sample['meta_data'],\n test_loader.dataset)\n", (4101, 4186), False, 'from fcn.test_common import refine_pose\n'), ((7598, 7607), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7605, 7607), True, 'import matplotlib.pyplot as plt\n'), ((10519, 10537), 'numpy.matmul', 'np.matmul', (['RT', 'x3d'], {}), '(RT, x3d)\n', (10528, 10537), True, 'import numpy as np\n'), ((11340, 11387), 'numpy.ones', 'np.ones', (['(4, points.shape[1])'], {'dtype': 'np.float32'}), '((4, points.shape[1]), dtype=np.float32)\n', (11347, 11387), True, 'import numpy as np\n'), ((11591, 11625), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float32'}), '((3, 4), dtype=np.float32)\n', (11599, 11625), True, 'import numpy as np\n'), ((11659, 11681), 'transforms3d.quaternions.quat2mat', 'quat2mat', (['poses[j, :4]'], {}), '(poses[j, :4])\n', (11667, 11681), False, 'from transforms3d.quaternions import mat2quat, quat2mat, qmult\n'), ((11833, 11864), 'numpy.divide', 'np.divide', (['x2d[0, :]', 'x2d[2, :]'], {}), '(x2d[0, :], x2d[2, :])\n', (11842, 11864), True, 'import numpy as np\n'), ((11897, 11928), 'numpy.divide', 'np.divide', (['x2d[1, :]', 'x2d[2, :]'], {}), '(x2d[1, :], x2d[2, :])\n', (11906, 11928), True, 'import numpy as np\n'), ((9298, 9307), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9305, 9307), True, 'import matplotlib.pyplot as plt\n'), ((10717, 10752), 'numpy.divide', 'np.divide', (['class_colors[cls]', '(255.0)'], {}), '(class_colors[cls], 255.0)\n', (10726, 10752), True, 'import numpy as np\n'), ((11781, 11799), 'numpy.matmul', 'np.matmul', (['RT', 'x3d'], {}), '(RT, x3d)\n', (11790, 11799), True, 'import numpy as np\n'), ((12581, 12628), 'numpy.ones', 'np.ones', (['(4, points.shape[1])'], {'dtype': 'np.float32'}), '((4, points.shape[1]), dtype=np.float32)\n', (12588, 12628), True, 'import numpy as np\n'), ((12852, 12886), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float32'}), '((3, 4), dtype=np.float32)\n', (12860, 12886), True, 'import numpy as np\n'), ((12924, 12954), 'transforms3d.quaternions.quat2mat', 'quat2mat', (['poses_refined[j, :4]'], {}), '(poses_refined[j, :4])\n', (12932, 12954), False, 'from transforms3d.quaternions import mat2quat, quat2mat, qmult\n'), ((13126, 13157), 'numpy.divide', 'np.divide', (['x2d[0, :]', 'x2d[2, :]'], {}), '(x2d[0, :], x2d[2, :])\n', (13135, 13157), True, 'import numpy as np\n'), ((13194, 13225), 'numpy.divide', 'np.divide', (['x2d[1, :]', 'x2d[2, :]'], {}), '(x2d[1, :], x2d[2, :])\n', (13203, 13225), True, 'import numpy as np\n'), ((3342, 3360), 'numpy.linalg.norm', 'np.linalg.norm', (['qt'], {}), '(qt)\n', (3356, 3360), True, 'import numpy as np\n'), ((11991, 12026), 'numpy.divide', 'np.divide', (['class_colors[cls]', '(255.0)'], {}), '(class_colors[cls], 255.0)\n', (12000, 12026), True, 'import numpy as np\n'), ((13070, 13088), 'numpy.matmul', 'np.matmul', (['RT', 'x3d'], {}), '(RT, x3d)\n', (13079, 13088), True, 'import numpy as np\n'), ((13292, 13327), 'numpy.divide', 'np.divide', (['class_colors[cls]', '(255.0)'], {}), '(class_colors[cls], 255.0)\n', (13301, 13327), True, 'import numpy as np\n'), ((3966, 3992), 'torch.from_numpy', 'torch.from_numpy', (['im_depth'], {}), '(im_depth)\n', (3982, 3992), False, 'import torch\n')] |
import unittest
import numpy as np
import tensorflow as tf
from lib import tf_utils
class TensorDotTest(unittest.TestCase):
def test_adj_tensor_dot(self):
# adj: [[1, 0], [0, 1]]
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
adj_indices = [[0, 0], [1, 1]]
adj_values = np.array([1, 1], dtype=np.float32)
adj_shape = [2, 2]
adj = tf.SparseTensor(adj_indices, adj_values, adj_shape)
# y: (2, 2, 2), [[[1, 0], [0, 1]], [[1, 1], [1, 1]]]
y = np.array([[[1, 0], [0, 1]], [[1, 1], [1, 1]]], dtype=np.float32)
y = tf.constant(y)
expected_result = np.array([[[1, 0], [0, 1]], [[1, 1], [1, 1]]], dtype=np.float32)
result = tf_utils.adj_tensor_dot(adj, y)
with tf.Session() as sess:
result_ = sess.run(result)
self.assertTrue(np.array_equal(expected_result, result_))
if __name__ == '__main__':
unittest.main()
| [
"tensorflow.SparseTensor",
"lib.tf_utils.adj_tensor_dot",
"tensorflow.Session",
"numpy.array",
"tensorflow.constant",
"numpy.array_equal",
"unittest.main"
] | [((949, 964), 'unittest.main', 'unittest.main', ([], {}), '()\n', (962, 964), False, 'import unittest\n'), ((339, 373), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'np.float32'}), '([1, 1], dtype=np.float32)\n', (347, 373), True, 'import numpy as np\n'), ((415, 466), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['adj_indices', 'adj_values', 'adj_shape'], {}), '(adj_indices, adj_values, adj_shape)\n', (430, 466), True, 'import tensorflow as tf\n'), ((540, 604), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]], [[1, 1], [1, 1]]]'], {'dtype': 'np.float32'}), '([[[1, 0], [0, 1]], [[1, 1], [1, 1]]], dtype=np.float32)\n', (548, 604), True, 'import numpy as np\n'), ((617, 631), 'tensorflow.constant', 'tf.constant', (['y'], {}), '(y)\n', (628, 631), True, 'import tensorflow as tf\n'), ((658, 722), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]], [[1, 1], [1, 1]]]'], {'dtype': 'np.float32'}), '([[[1, 0], [0, 1]], [[1, 1], [1, 1]]], dtype=np.float32)\n', (666, 722), True, 'import numpy as np\n'), ((740, 771), 'lib.tf_utils.adj_tensor_dot', 'tf_utils.adj_tensor_dot', (['adj', 'y'], {}), '(adj, y)\n', (763, 771), False, 'from lib import tf_utils\n'), ((785, 797), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (795, 797), True, 'import tensorflow as tf\n'), ((874, 914), 'numpy.array_equal', 'np.array_equal', (['expected_result', 'result_'], {}), '(expected_result, result_)\n', (888, 914), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import functional as F
import numpy as np
import collections
from collections import OrderedDict
import datetime
import os
import vae_conv_model_mnist
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--dataroot', help='path to dataset')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--model', default='model.pth', help='saved model file')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print("cuda", args.cuda, args.no_cuda, torch.cuda.is_available())
params = 20
model = vae_conv_model_mnist.VAE(params)
model.have_cuda = args.cuda
if args.cuda:
model.cuda()
if args.cuda:
model.load_state_dict(torch.load(args.model))
else:
model.load_state_dict(torch.load(args.model, map_location={'cuda:0': 'cpu'}))
np.set_printoptions(threshold=500000,linewidth=1000)
print(model)
# Summarize Model
from pytorch_summary import Summary
s = Summary(model.encoder, input_size=(1, 1, 28, 28))
s = Summary(model.decoder, input_size=(1, 1024, 1, 1))
side_x = 40
side_y = 20
z_input = np.full((side_x*side_y,params), 0.0)
# print(z_input.shape)
for i in range(side_y):
for j in range(side_x):
z_input[i*side_x+j][i] = (j-side_x/2.0) * 0.1
# z_input[i*side+j][1] = (j-side/2.0) * 0.1
# for i in range(side):
# for j in range(side):
# z_input[i*side+j][0] = (i-side/2.0) * 0.1
# z_input[i*side+j][1] = (j-side/2.0) * 0.1
# print(z_input)
if args.cuda:
z_batch = torch.cuda.FloatTensor(z_input)
else:
z_batch = torch.FloatTensor(z_input)
z_batch = Variable(z_batch)
vis_batch = model.decode(z_batch)
outf = args.outf
save_image(vis_batch.data.cpu(), outf + '/test.png', nrow=side_x)
| [
"torch.manual_seed",
"torch.cuda.FloatTensor",
"vae_conv_model_mnist.VAE",
"pytorch_summary.Summary",
"argparse.ArgumentParser",
"torch.load",
"torch.cuda.is_available",
"numpy.full",
"torch.cuda.manual_seed",
"torch.autograd.Variable",
"torch.FloatTensor",
"numpy.set_printoptions"
] | [((418, 478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (441, 478), False, 'import argparse\n'), ((1034, 1062), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1051, 1062), False, 'import torch\n'), ((1204, 1236), 'vae_conv_model_mnist.VAE', 'vae_conv_model_mnist.VAE', (['params'], {}), '(params)\n', (1228, 1236), False, 'import vae_conv_model_mnist\n'), ((1450, 1503), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '(500000)', 'linewidth': '(1000)'}), '(threshold=500000, linewidth=1000)\n', (1469, 1503), True, 'import numpy as np\n'), ((1575, 1624), 'pytorch_summary.Summary', 'Summary', (['model.encoder'], {'input_size': '(1, 1, 28, 28)'}), '(model.encoder, input_size=(1, 1, 28, 28))\n', (1582, 1624), False, 'from pytorch_summary import Summary\n'), ((1629, 1679), 'pytorch_summary.Summary', 'Summary', (['model.decoder'], {'input_size': '(1, 1024, 1, 1)'}), '(model.decoder, input_size=(1, 1024, 1, 1))\n', (1636, 1679), False, 'from pytorch_summary import Summary\n'), ((1715, 1754), 'numpy.full', 'np.full', (['(side_x * side_y, params)', '(0.0)'], {}), '((side_x * side_y, params), 0.0)\n', (1722, 1754), True, 'import numpy as np\n'), ((2227, 2244), 'torch.autograd.Variable', 'Variable', (['z_batch'], {}), '(z_batch)\n', (2235, 2244), False, 'from torch.autograd import Variable\n'), ((1007, 1032), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1030, 1032), False, 'import torch\n'), ((1081, 1114), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1103, 1114), False, 'import torch\n'), ((1155, 1180), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1178, 1180), False, 'import torch\n'), ((2138, 2169), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['z_input'], {}), '(z_input)\n', (2160, 2169), False, 'import torch\n'), ((2190, 2216), 'torch.FloatTensor', 'torch.FloatTensor', (['z_input'], {}), '(z_input)\n', (2207, 2216), False, 'import torch\n'), ((1337, 1359), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (1347, 1359), False, 'import torch\n'), ((1393, 1447), 'torch.load', 'torch.load', (['args.model'], {'map_location': "{'cuda:0': 'cpu'}"}), "(args.model, map_location={'cuda:0': 'cpu'})\n", (1403, 1447), False, 'import torch\n')] |
r"""
*****
Array
*****
.. autofunction:: is_all_equal
.. autofunction:: is_all_finite
.. autofunction:: is_crescent
"""
from numpy import asarray, isfinite, mgrid, prod, rollaxis
from numpy import sum as _sum
from numpy import unique as _unique
try:
from numba import boolean, char, float64, int32, int64, jit
_NUMBA = True
except ImportError:
_NUMBA = False
def is_crescent(arr):
r"""Check if the array values are in non-decreasing order.
Args:
arr (array_like): sequence of values.
Returns:
bool: ``True`` for non-decreasing order.
"""
arr = asarray(arr)
return _is_crescent(arr)
def is_all_equal(arr):
r"""Check if the array values are all equal.
Args:
arr (array_like): sequence of values.
Returns:
bool: ``True`` if values are all equal.
"""
arr = asarray(arr)
return _is_all_equal(arr)
def is_all_finite(arr):
r"""Check if the array values are all finite.
Args:
arr (array_like): sequence of values.
Returns:
bool: ``True`` if values are all finite.
"""
return isfinite(_sum(asarray(arr)))
def _is_crescent(arr):
i = 0
while i < arr.shape[0] - 1:
if arr[i] > arr[i + 1]:
return False
i += 1
return True
if _NUMBA:
signature = jit(
[boolean(float64[:]), boolean(int64[:]), boolean(char[:]), boolean(int32[:])],
nogil=True,
nopython=True,
cache=True,
)
_is_crescent = signature(_is_crescent)
def _is_all_equal(arr):
arr = arr.ravel()
v = arr[0]
i = 1
while i < arr.shape[0]:
if arr[i] != v:
return False
i += 1
return True
if _NUMBA:
_is_all_equal = signature(_is_all_equal)
def cartesian(shape):
r"""Cartesian indexing.
Returns a sequence of n-tuples indexing each element of a hypothetical
matrix of the given shape.
Args:
shape (tuple): tuple of dimensions.
Returns:
array_like: indices.
Example
-------
.. doctest::
>>> from numpy_sugar import cartesian
>>> print(cartesian((2, 3)))
[[0 0]
[0 1]
[0 2]
[1 0]
[1 1]
[1 2]]
Reference:
[1] http://stackoverflow.com/a/27286794
"""
n = len(shape)
idx = [slice(0, s) for s in shape]
g = rollaxis(mgrid[idx], 0, n + 1)
return g.reshape((prod(shape), n))
def unique(ar):
r"""Find the unique elements of an array.
It uses ``dask.array.unique`` if necessary.
Args:
ar (array_like): Input array.
Returns:
array_like: the sorted unique elements.
"""
import dask.array as da
if isinstance(ar, da.core.Array):
return da.unique(ar)
return _unique(ar)
| [
"numpy.prod",
"numpy.unique",
"numba.boolean",
"dask.array.unique",
"numpy.asarray",
"numpy.rollaxis"
] | [((601, 613), 'numpy.asarray', 'asarray', (['arr'], {}), '(arr)\n', (608, 613), False, 'from numpy import asarray, isfinite, mgrid, prod, rollaxis\n'), ((854, 866), 'numpy.asarray', 'asarray', (['arr'], {}), '(arr)\n', (861, 866), False, 'from numpy import asarray, isfinite, mgrid, prod, rollaxis\n'), ((2378, 2408), 'numpy.rollaxis', 'rollaxis', (['mgrid[idx]', '(0)', '(n + 1)'], {}), '(mgrid[idx], 0, n + 1)\n', (2386, 2408), False, 'from numpy import asarray, isfinite, mgrid, prod, rollaxis\n'), ((2789, 2800), 'numpy.unique', '_unique', (['ar'], {}), '(ar)\n', (2796, 2800), True, 'from numpy import unique as _unique\n'), ((2763, 2776), 'dask.array.unique', 'da.unique', (['ar'], {}), '(ar)\n', (2772, 2776), True, 'import dask.array as da\n'), ((1126, 1138), 'numpy.asarray', 'asarray', (['arr'], {}), '(arr)\n', (1133, 1138), False, 'from numpy import asarray, isfinite, mgrid, prod, rollaxis\n'), ((1339, 1358), 'numba.boolean', 'boolean', (['float64[:]'], {}), '(float64[:])\n', (1346, 1358), False, 'from numba import boolean, char, float64, int32, int64, jit\n'), ((1360, 1377), 'numba.boolean', 'boolean', (['int64[:]'], {}), '(int64[:])\n', (1367, 1377), False, 'from numba import boolean, char, float64, int32, int64, jit\n'), ((1379, 1395), 'numba.boolean', 'boolean', (['char[:]'], {}), '(char[:])\n', (1386, 1395), False, 'from numba import boolean, char, float64, int32, int64, jit\n'), ((1397, 1414), 'numba.boolean', 'boolean', (['int32[:]'], {}), '(int32[:])\n', (1404, 1414), False, 'from numba import boolean, char, float64, int32, int64, jit\n'), ((2431, 2442), 'numpy.prod', 'prod', (['shape'], {}), '(shape)\n', (2435, 2442), False, 'from numpy import asarray, isfinite, mgrid, prod, rollaxis\n')] |
'''
Configure:
python setup.py build
StructuredModels
<NAME>
2013
'''
from distutils.core import setup
# from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
# ext_modules = [
# Extension("pyKinectTools_algs_Dijkstras", ["pyKinectTools/algs/dijkstras.pyx"],language='c++'),
# Extension("pyKinectTools_algs_local_occupancy_pattern", ["pyKinectTools/algs/LocalOccupancyPattern.pyx"],language='c++'),
# ]
#
# for e in ext_modules:
# e.pyrex_directives = {
# "boundscheck": False,
# "wraparound": False,
# "infer_types": True
# }
# e.extra_compile_args = ["-w"]
setup(
author = '<NAME>',
author_email = '<EMAIL>',
description = '',
license = "FreeBSD",
version= "0.1",
name = 'StructuredModels',
cmdclass = {'build_ext': build_ext},
include_dirs = [np.get_include()],
packages= [ "StructuredModels",
"StructuredModels.models",
],
# package_data={'':['*.xml', '*.png', '*.yml', '*.txt']},
# ext_modules = ext_modules
)
| [
"numpy.get_include"
] | [((872, 888), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (886, 888), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 16:02:16 2018
@author: ning
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import statsmodels.formula.api as sm
import numpy as np
from sklearn.preprocessing import StandardScaler
result_dir = '../results/'
# Exp 1
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
correct = [],
awareness = [],
confidence = [],
RT_correct = [],
RT_awareness = [],
RT_confidence = [],
r2 = [],
intercept = [],
)
# use all 6 possible features
for n_back in range(11): # loop through the number of trials looking back
for participant,df_sub in df.groupby('participant'):# for each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'correct',
'awareness',
'confidence',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_6_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'pos_logistic_statsmodel_6_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_mean_6_features.csv'),index=False)
##############################################################################################################
# 3 judgement features
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
correct = [],
awareness = [],
confidence = [],
r2 = [],
intercept = [],
)
# use all 6 possible features
for n_back in range(11): # loop through the number of trials looking back
for participant,df_sub in df.groupby('participant'):# for each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'correct',
'awareness',
'confidence',]
target_name = 'success'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_3_1_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'pos_logistic_statsmodel_3_1_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_mean_3_1_features.csv'),index=False)
#####################################################################################################################################
# RT features
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
RT_correct = [],
RT_awareness = [],
RT_confidence = [],
r2 = [],
intercept = [],
)
# use all 6 possible features
for n_back in range(11): # loop through the number of trials looking back
for participant,df_sub in df.groupby('participant'):# for each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_RT_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'pos_logistic_statsmodel_RT_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'pos_logistic_statsmodel_mean_RT_features.csv'),index=False)
#####################################################################################################
#####################################################################################################
#####################################################################################################
################################ ATT #####################################
#####################################################################################################
experiment = 'att'
df = pd.read_csv(os.path.join(working_dir,'../data/ATTfoc.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'attention',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
correct = [],
awareness = [],
confidence = [],
RT_correct = [],
RT_awareness = [],
RT_confidence = [],
r2 = [],
intercept = [],
)
# use all 6 features
for n_back in range(11):# loop through the number of trials you want to look back
for participant,df_sub in df.groupby('participant'):# loop through each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'attention' ] = df_sub.loc[:,'attention' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'correct',
'awareness',
'confidence',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'attention'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_6_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'att_logistic_statsmodel_6_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_mean_6_features.csv'),index=False)
#######################################################################################################################################
# 3 judgement features
experiment = 'att'
df = pd.read_csv(os.path.join(working_dir,'../data/ATTfoc.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'attention',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
correct = [],
awareness = [],
confidence = [],
r2 = [],
intercept = [],
)
# use all 6 features
for n_back in range(11):# loop through the number of trials you want to look back
for participant,df_sub in df.groupby('participant'):# loop through each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'attention' ] = df_sub.loc[:,'attention' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'correct',
'awareness',
'confidence',
]
target_name = 'attention'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_3_1_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'att_logistic_statsmodel_3_1_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_mean_3_1_features.csv'),index=False)
####################################################################################################################################
# RT features
experiment = 'att'
df = pd.read_csv(os.path.join(working_dir,'../data/ATTfoc.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'attention',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
np.random.seed(12345)
results = dict(sub = [],
model = [],
score = [],
window = [],
RT_correct = [],
RT_awareness = [],
RT_confidence = [],
r2 = [],
intercept = [],
)
# use all 6 features
for n_back in range(11):# loop through the number of trials you want to look back
for participant,df_sub in df.groupby('participant'):# loop through each subject
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'attention' ] = df_sub.loc[:,'attention' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
df_sub['intercept'] = 1
feature_names = ['intercept',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'attention'
features, targets = [],[]
for block, df_block in df_sub.groupby('blocks'):
# preparing the features and target by shifting the feature columns up
# and shifting the target column down
feature = (df_block[feature_names].shift(n_back) # shift downward so that the first n_back rows are gone
.dropna() # since some of rows are gone, so they are nans
.values # I only need the matrix not the data frame
)
target = (df_block[target_name].shift(-n_back) # same thing for the target, but shifting upward, and the last n_back rows are gone
.dropna()
.values
)
features.append(feature)
targets.append(target)
features = np.concatenate(features)
targets = np.concatenate(targets)
df_ = pd.DataFrame(features,columns = feature_names)
df_[target_name] = targets
model = sm.Logit(df_[target_name],df_[feature_names])
temp = model.fit(method='lbfgs')
results['sub'].append(participant)
results['model'].append('logistic')
results['score'].append([temp.bic,temp.aic])
results['window'].append(n_back)
for name in feature_names:
results[name].append([temp.params[name],
temp.bse[name],
temp.tvalues[name],
temp.pvalues[name],
temp.conf_int().loc[name][0],
temp.conf_int().loc[name][1],
])
results['r2'].append(temp.prsquared)
c = pd.DataFrame(results) # tansform a dictionary object to a data frame
for name in feature_names:
temp = c[name].to_frame()
temp[name+'_coef'] = np.vstack(temp[name].values)[:,0]
temp[name+'_se'] = np.vstack(temp[name].values)[:,1]
temp[name+'_tval'] = np.vstack(temp[name].values)[:,2]
temp[name+'_pval'] = np.vstack(temp[name].values)[:,3]
temp[name+'_lower'] = np.vstack(temp[name].values)[:,4]
temp[name+'_upper'] = np.vstack(temp[name].values)[:,5]
for k_name in temp.columns[1:]:
c[k_name] = temp[k_name].values
c = c.drop(name,axis=1)
c.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_RT_features.csv'),index=False)
c = pd.read_csv(os.path.join(result_dir,'att_logistic_statsmodel_RT_features.csv'))
j = c.groupby('window').mean().reset_index()
j.to_csv(os.path.join(result_dir,'att_logistic_statsmodel_mean_RT_features.csv'),index=False)
| [
"os.path.join",
"statsmodels.formula.api.Logit",
"numpy.random.seed",
"numpy.concatenate",
"numpy.vstack",
"pandas.DataFrame"
] | [((764, 785), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (778, 785), True, 'import numpy as np\n'), ((3952, 3973), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3964, 3973), True, 'import pandas as pd\n'), ((5428, 5449), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (5442, 5449), True, 'import numpy as np\n'), ((8381, 8402), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (8393, 8402), True, 'import pandas as pd\n'), ((9877, 9898), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (9891, 9898), True, 'import numpy as np\n'), ((12838, 12859), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (12850, 12859), True, 'import pandas as pd\n'), ((14694, 14715), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (14708, 14715), True, 'import numpy as np\n'), ((17892, 17913), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (17904, 17913), True, 'import pandas as pd\n'), ((19394, 19415), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (19408, 19415), True, 'import numpy as np\n'), ((22383, 22404), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (22395, 22404), True, 'import pandas as pd\n'), ((23879, 23900), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (23893, 23900), True, 'import numpy as np\n'), ((26850, 26871), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (26862, 26871), True, 'import pandas as pd\n'), ((359, 407), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/PoSdata.csv"""'], {}), "(working_dir, '../data/PoSdata.csv')\n", (371, 407), False, 'import os\n'), ((4545, 4611), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_6_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_6_features.csv')\n", (4557, 4611), False, 'import os\n'), ((4640, 4706), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_6_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_6_features.csv')\n", (4652, 4706), False, 'import os\n'), ((4761, 4832), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_mean_6_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_mean_6_features.csv')\n", (4773, 4832), False, 'import os\n'), ((5023, 5071), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/PoSdata.csv"""'], {}), "(working_dir, '../data/PoSdata.csv')\n", (5035, 5071), False, 'import os\n'), ((8974, 9042), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_3_1_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_3_1_features.csv')\n", (8986, 9042), False, 'import os\n'), ((9071, 9139), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_3_1_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_3_1_features.csv')\n", (9083, 9139), False, 'import os\n'), ((9194, 9267), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_mean_3_1_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_mean_3_1_features.csv')\n", (9206, 9267), False, 'import os\n'), ((9472, 9520), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/PoSdata.csv"""'], {}), "(working_dir, '../data/PoSdata.csv')\n", (9484, 9520), False, 'import os\n'), ((13431, 13498), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_RT_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_RT_features.csv')\n", (13443, 13498), False, 'import os\n'), ((13527, 13594), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_RT_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_RT_features.csv')\n", (13539, 13594), False, 'import os\n'), ((13649, 13721), 'os.path.join', 'os.path.join', (['result_dir', '"""pos_logistic_statsmodel_mean_RT_features.csv"""'], {}), "(result_dir, 'pos_logistic_statsmodel_mean_RT_features.csv')\n", (13661, 13721), False, 'import os\n'), ((14288, 14335), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/ATTfoc.csv"""'], {}), "(working_dir, '../data/ATTfoc.csv')\n", (14300, 14335), False, 'import os\n'), ((18485, 18551), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_6_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_6_features.csv')\n", (18497, 18551), False, 'import os\n'), ((18580, 18646), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_6_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_6_features.csv')\n", (18592, 18646), False, 'import os\n'), ((18701, 18772), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_mean_6_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_mean_6_features.csv')\n", (18713, 18772), False, 'import os\n'), ((18988, 19035), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/ATTfoc.csv"""'], {}), "(working_dir, '../data/ATTfoc.csv')\n", (19000, 19035), False, 'import os\n'), ((22976, 23044), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_3_1_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_3_1_features.csv')\n", (22988, 23044), False, 'import os\n'), ((23073, 23141), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_3_1_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_3_1_features.csv')\n", (23085, 23141), False, 'import os\n'), ((23196, 23269), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_mean_3_1_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_mean_3_1_features.csv')\n", (23208, 23269), False, 'import os\n'), ((23473, 23520), 'os.path.join', 'os.path.join', (['working_dir', '"""../data/ATTfoc.csv"""'], {}), "(working_dir, '../data/ATTfoc.csv')\n", (23485, 23520), False, 'import os\n'), ((27443, 27510), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_RT_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_RT_features.csv')\n", (27455, 27510), False, 'import os\n'), ((27539, 27606), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_RT_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_RT_features.csv')\n", (27551, 27606), False, 'import os\n'), ((27661, 27733), 'os.path.join', 'os.path.join', (['result_dir', '"""att_logistic_statsmodel_mean_RT_features.csv"""'], {}), "(result_dir, 'att_logistic_statsmodel_mean_RT_features.csv')\n", (27673, 27733), False, 'import os\n'), ((3035, 3059), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (3049, 3059), True, 'import numpy as np\n'), ((3088, 3111), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (3102, 3111), True, 'import numpy as np\n'), ((3127, 3172), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (3139, 3172), True, 'import pandas as pd\n'), ((3225, 3271), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (3233, 3271), True, 'import statsmodels.formula.api as sm\n'), ((4103, 4131), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4112, 4131), True, 'import numpy as np\n'), ((4160, 4188), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4169, 4188), True, 'import numpy as np\n'), ((4219, 4247), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4228, 4247), True, 'import numpy as np\n'), ((4278, 4306), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4287, 4306), True, 'import numpy as np\n'), ((4338, 4366), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4347, 4366), True, 'import numpy as np\n'), ((4398, 4426), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (4407, 4426), True, 'import numpy as np\n'), ((7464, 7488), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (7478, 7488), True, 'import numpy as np\n'), ((7517, 7540), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (7531, 7540), True, 'import numpy as np\n'), ((7556, 7601), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (7568, 7601), True, 'import pandas as pd\n'), ((7654, 7700), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (7662, 7700), True, 'import statsmodels.formula.api as sm\n'), ((8532, 8560), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8541, 8560), True, 'import numpy as np\n'), ((8589, 8617), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8598, 8617), True, 'import numpy as np\n'), ((8648, 8676), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8657, 8676), True, 'import numpy as np\n'), ((8707, 8735), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8716, 8735), True, 'import numpy as np\n'), ((8767, 8795), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8776, 8795), True, 'import numpy as np\n'), ((8827, 8855), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (8836, 8855), True, 'import numpy as np\n'), ((11921, 11945), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (11935, 11945), True, 'import numpy as np\n'), ((11974, 11997), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (11988, 11997), True, 'import numpy as np\n'), ((12013, 12058), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (12025, 12058), True, 'import pandas as pd\n'), ((12111, 12157), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (12119, 12157), True, 'import statsmodels.formula.api as sm\n'), ((12989, 13017), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (12998, 13017), True, 'import numpy as np\n'), ((13046, 13074), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (13055, 13074), True, 'import numpy as np\n'), ((13105, 13133), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (13114, 13133), True, 'import numpy as np\n'), ((13164, 13192), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (13173, 13192), True, 'import numpy as np\n'), ((13224, 13252), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (13233, 13252), True, 'import numpy as np\n'), ((13284, 13312), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (13293, 13312), True, 'import numpy as np\n'), ((16975, 16999), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (16989, 16999), True, 'import numpy as np\n'), ((17028, 17051), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (17042, 17051), True, 'import numpy as np\n'), ((17067, 17112), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (17079, 17112), True, 'import pandas as pd\n'), ((17165, 17211), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (17173, 17211), True, 'import statsmodels.formula.api as sm\n'), ((18043, 18071), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18052, 18071), True, 'import numpy as np\n'), ((18100, 18128), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18109, 18128), True, 'import numpy as np\n'), ((18159, 18187), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18168, 18187), True, 'import numpy as np\n'), ((18218, 18246), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18227, 18246), True, 'import numpy as np\n'), ((18278, 18306), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18287, 18306), True, 'import numpy as np\n'), ((18338, 18366), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (18347, 18366), True, 'import numpy as np\n'), ((21466, 21490), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (21480, 21490), True, 'import numpy as np\n'), ((21519, 21542), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (21533, 21542), True, 'import numpy as np\n'), ((21558, 21603), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (21570, 21603), True, 'import pandas as pd\n'), ((21656, 21702), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (21664, 21702), True, 'import statsmodels.formula.api as sm\n'), ((22534, 22562), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22543, 22562), True, 'import numpy as np\n'), ((22591, 22619), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22600, 22619), True, 'import numpy as np\n'), ((22650, 22678), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22659, 22678), True, 'import numpy as np\n'), ((22709, 22737), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22718, 22737), True, 'import numpy as np\n'), ((22769, 22797), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22778, 22797), True, 'import numpy as np\n'), ((22829, 22857), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (22838, 22857), True, 'import numpy as np\n'), ((25933, 25957), 'numpy.concatenate', 'np.concatenate', (['features'], {}), '(features)\n', (25947, 25957), True, 'import numpy as np\n'), ((25986, 26009), 'numpy.concatenate', 'np.concatenate', (['targets'], {}), '(targets)\n', (26000, 26009), True, 'import numpy as np\n'), ((26025, 26070), 'pandas.DataFrame', 'pd.DataFrame', (['features'], {'columns': 'feature_names'}), '(features, columns=feature_names)\n', (26037, 26070), True, 'import pandas as pd\n'), ((26123, 26169), 'statsmodels.formula.api.Logit', 'sm.Logit', (['df_[target_name]', 'df_[feature_names]'], {}), '(df_[target_name], df_[feature_names])\n', (26131, 26169), True, 'import statsmodels.formula.api as sm\n'), ((27001, 27029), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27010, 27029), True, 'import numpy as np\n'), ((27058, 27086), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27067, 27086), True, 'import numpy as np\n'), ((27117, 27145), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27126, 27145), True, 'import numpy as np\n'), ((27176, 27204), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27185, 27204), True, 'import numpy as np\n'), ((27236, 27264), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27245, 27264), True, 'import numpy as np\n'), ((27296, 27324), 'numpy.vstack', 'np.vstack', (['temp[name].values'], {}), '(temp[name].values)\n', (27305, 27324), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: LeeZChuan
"""
import pandas as pd
import numpy as np
import requests
import os
from pandas.core.frame import DataFrame
import json
import datetime
import time
pd.set_option('display.max_columns',1000)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth',1000)
def addressProcess(address):
result = address
if '镇' in address:
item = address.split('镇')
result = item[0]+'镇'
elif '农场' in address:
item = address.split('农场')
result = item[0]+'农场'
elif '街道' in address:
item = address.split('街道')
result = item[0]+'街道'
elif '路' in address:
item = address.split('路')
result = item[0]+'路'
elif '大道' in address:
item = address.split('大道')
result = item[0]+'大道'
elif '街' in address:
item = address.split('街')
result = item[0]+'街'
elif '村' in address:
item = address.split('村')
result = item[0]+'村'
return result
def processJson(filePath):
orderNum = 0 #订单数
with open(filepath, 'r', encoding="utf-8") as f:
# 读取所有行 每行会是一个字符串
i = 0
for jsonstr in f.readlines():
list_address = []
list_name = []
jsonstr = jsonstr[1:-1]
# listValue = jsonstr.split(']];,')
listValue = jsonstr.split(']],')
for listitem in listValue:
listitem = listitem[1:]
listCon = listitem.split(',[')
listAddr = listCon[3][:-1].split(',')
if len(listAddr) == 2 and '海南省海口市' in listAddr[0] and '海南省海口市' in listAddr[1]:
list_address_each = []
startAdd = addressProcess(listAddr[0][6:])
endAdd = addressProcess(listAddr[1][6:])
if startAdd != endAdd:
list_address_each.append(startAdd)
list_address_each.append(endAdd)
list_address.append(list_address_each)
list_name.append(startAdd)
list_name.append(endAdd)
pd_list_address = pd.DataFrame(list_name)
# print (pd_list_address)
name_list_count = pd.value_counts(pd_list_address[0], sort=False)
name_df = pd_list_address[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list if name_list_count[name] > 300]
name_list_new = []
for item in name_list_all:
name_list_new.append(item[0])
print (name_list_new)
new_list_address = []
for item in list_address:
if item[0] in name_list_new and item[1] in name_list_new:
new_list = []
new_list.append(item[0])
new_list.append(item[1])
new_list_address.append(new_list)
orderNum += 1
return orderNum, list_address
def save(filename, contents):
fh = open(filename, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def dataSta(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
name_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
name_result.append(name[0])
name_result.append(name[1])
name_df = DataFrame(name_result)
name_list_count = pd.value_counts(name_df[0], sort=False)
name_df = name_df[0].unique()
name_list = name_df.tolist()
name_list_all = [[name, name_list_count[name]] for name in name_list]
print (name_list_all)
strValue = "{\"nodes\": [\n"
for item in name_list_all:
strValue = strValue+" {\"name\":\""+item[0] +"\",\n \"value\":"+str(item[1])+" \n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ],\n"
strValue = strValue + "\"links\": [\n"
for item in all_result:
strValue = strValue+" {\"source\":\""+item[0]+"\", \"target\":\""+item[1]+"\", \"value\":"+str(item[2])+"\n },\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]\n}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_nodes_links.json'
save(name_path, strValue)
def hexiantu(list_address, txtname):
raw_file_df = pd.DataFrame(list_address)
raw_file_df.dropna(axis=0, how='any', inplace=True) #删除含有空值的行
result = raw_file_df.groupby([raw_file_df[0],raw_file_df[1]])
all_result = []
for name, item in result:
each_result = []
each_result.append(name[0])
each_result.append(name[1])
each_result.append(len(item))
all_result.append(each_result)
strValue = ''
strValue = strValue + "{\"value\": [\n"
for item in all_result:
strValue = strValue+" [\""+item[0]+"\", \""+item[1]+"\", "+str(item[2])+"],\n"
strValue = strValue[:-2]
strValue = strValue + "\n ]}"
name_path = os.getcwd()+'\dataForMulberryFigure\\'+txtname+'_hexiantu.json'
save(name_path, strValue)
def read_csv(filepath):
# raw_train_df = pd.read_csv(fileInfo, sep='\s+', engine='python').loc[:,[name_title+'arrive_time',name_title+'starting_lng',name_title+'starting_lat',name_title+'dest_lng',name_title+'dest_lat']]
raw_train_df = pd.read_csv(filepath, sep=',', engine='python').loc[:,['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'year', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']]
return raw_train_df
def orderNumByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
result = ''
result_distance = '[\n'
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
result = result+str(group_name)+','+str(group_data.shape[0])+'\n'
result_distance = result_distance +' [\n \"'+str(group_name)+'\",\n '+str(group_data.shape[0])+',\n '+str(int(group_data['passenger_count'].mean())/1000)+'\n ],\n'
result_order = result_distance[:-2] + '\n]'
name_path = os.getcwd()+'\lineChart\\'+txtname+'_lineChart.json'
save(name_path, result_order)
def save2(filepath, filename, contents):
if not os.path.exists(filepath):
os.mkdir(filepath)
path = filepath + '\\' + filename
fh = open(path, 'w', encoding='utf-8')
fh.write(contents)
fh.close()
def averagenum(num):
nsum = 0
for i in range(len(num)):
nsum += num[i]
return nsum / len(num)
def grade_mode(list):
'''
计算众数
参数:
list:列表类型,待分析数据
返回值:
grade_mode: 列表类型,待分析数据的众数
'''
# TODO
# 定义计算众数的函数
# grade_mode返回为一个列表,可记录一个或者多个众数
list_set=set(list)#取list的集合,去除重复元素
frequency_dict={}
for i in list_set:#遍历每一个list的元素,得到该元素何其对应的个数.count(i)
frequency_dict[i]=list.count(i)#创建dict; new_dict[key]=value
grade_mode=[]
for key,value in frequency_dict.items():#遍历dict的key and value。key:value
if value==max(frequency_dict.values()):
grade_mode.append(key)
def thermodynamicByHour(filepath, txtname):
raw_train_df = read_csv(filepath)
raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
list_count_start = []
list_count_end = []
groupedByHour = raw_train_df.groupby(['hour'])
for group_name, group_data in groupedByHour:
print ('处理数据的时间段:', group_name)
result = '[\n'
groupByLocation = group_data.groupby([group_data['starting_lng'],group_data['starting_lat']])
for group_name2, group_data2 in groupByLocation:
list_count_start.append(len(group_data2))
if group_name2[0] > 100 and group_name2[1] < 40:
result = result + ' {\n \"lng\": ' + str(group_name2[0]) + ',\n \"lat\": ' + str(group_name2[1]) + ',\n \"count\": ' + str(len(group_data2)) + '\n },\n'
result = result[:-2] + '\n]'
result2 = '[\n'
groupByLocation2 = group_data.groupby([group_data['dest_lng'],group_data['dest_lat']])
for group_name3, group_data3 in groupByLocation2:
list_count_end.append(len(group_data3))
if group_name3[0] > 100 and group_name3[1] < 40:
result2 = result2 + ' {\n \"lng\": ' + str(group_name3[0]) + ',\n \"lat\": ' + str(group_name3[1]) + ',\n \"count\": ' + str(len(group_data3)) + '\n },\n'
result2 = result2[:-2] + '\n]'
txt_start = txtname+'_start'
txt_dest = txtname+'_dest'
path_start = os.getcwd()+'\dataForMulberryFigure\\'+txt_start
path_dest = os.getcwd()+'\dataForMulberryFigure\\'+txt_dest
name = str(group_name)+'.json'
save2(path_start, name, result)
save2(path_dest, name, result2)
def get_week_day(date):
week_day_dict = {
0 : '星期一',
1 : '星期二',
2 : '星期三',
3 : '星期四',
4 : '星期五',
5 : '星期六',
6 : '星期天',
}
day = date.weekday()
return week_day_dict[day]
def strGetAve(str1, str2):
return ((int(str1)+int(str2))/2)
def calendarHeatMap(foldername):
weatherPath = 'weather_05.xlsx'
weather_df = pd.DataFrame(pd.read_excel(weatherPath))
weather_df = weather_df.loc[:,['日期','天气状况','气温','holiday']]
weather_df['最高温度'] = [item[:2] for item in weather_df['气温']]
weather_df['最低温度'] = [item[-3:-1] for item in weather_df['气温']]
weather_df['平均温度'] = [strGetAve(item[:2],item[-3:-1]) for item in weather_df['气温']]
weather_df['周几'] = [get_week_day(st) for st in weather_df['日期']]
filelist=os.listdir('datasets')
dayLists = []
i = 0
for item in filelist:
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
raw_train_df = read_csv(filename)
dayList.append(raw_train_df.shape[0])
dayList.append(weather_df['天气状况'][i])
dayList.append(weather_df['周几'][i])
dayList.append(weather_df['最高温度'][i])
dayList.append(weather_df['最低温度'][i])
dayList.append(weather_df['平均温度'][i])
dayList.append(weather_df['holiday'][i])
i += 1
dayLists.append(dayList)
result = '[\n'
for item in dayLists:
print ('dealing--------:' + str(item[0]))
if str(item[7]) == '0':
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + '\"\n ],\n'
else:
result = result + ' [\n \"' + str(item[0]) +'\",\n ' + str(item[1]) + ',\n \"' + str(item[2]) + '\",\n \"' + str(item[3]) + '\",\n \"' + str(item[4]) + '\",\n \"' + str(item[5]) + '\",\n \"' + str(item[6]) + '\",\n \"' + str(item[7]) + '\"\n ],\n'
file = open('calendarHeatMap.json','w', encoding="utf-8")
file.write(result[:-2]+'\n]')
file.close()
def readTxt(filename):
pos = []
with open(filename, 'r', encoding='utf-8') as file_to_read:
while True:
lines = file_to_read.readline() # 整行读取数据
if not lines:
break
pass
p_tmp = [i for i in lines.split(',')] # 将整行数据分割处理,如果分割符是空格,括号里就不用传入参数,如果是逗号, 则传入‘,'字符。
pos.append(p_tmp) # 添加新读取的数据
pass
return pos
def RealtimeStatistics(foldername):
filelist=os.listdir('datasets')
realtimeStati = []
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos = DataFrame(pos)
pos = pos.drop([1], axis=1)
pos.columns = ['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']
pos['passenger_count'] = [float(item)/1000 for item in pos['passenger_count']]
pos['normal_time'] = ['0' if str(item) == '' else item for item in pos['normal_time']]
pos['changtu'] = [1 if item > 30 or item == 30 else 0 for item in pos['passenger_count']]
result1 = np.round(pos['changtu'].sum()/(pos['passenger_count'].shape[0])*100,3)
pos['kuaiche'] = [1 if str(item) == '3.0' else 0 for item in pos['product_1level']]
result2 = np.round(pos['kuaiche'].sum()/(pos['kuaiche'].shape[0])*100,3)
pos['gaojia'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['pre_total_fee']]
result3 = np.round(pos['gaojia'].sum()/(pos['pre_total_fee'].shape[0])*100,3)
pos['changshi'] = [1 if int(float(item)) > 60 or int(float(item)) == 60 else 0 for item in pos['normal_time']]
result4 = np.round(pos['changshi'].sum()/(pos['normal_time'].shape[0])*100,3)
print (item[:-4], str(result1)+'%', str(result2)+'%', str(result3)+'%', str(result4)+'%')
dayList.append(str(result1)+'%')
dayList.append(str(result2)+'%')
dayList.append(str(result3)+'%')
dayList.append(str(result4)+'%')
realtimeStati.append(dayList)
file = open('RealtimeStatistics.json','w', encoding="utf-8")
file.write(str(realtimeStati))
file.close()
def normalization2(data):
_range = np.max(abs(data))
return np.round(data / _range, 4)
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma
def Histogrammap(foldername):
filelist=os.listdir('datasets')
for item in filelist:
print ('dealing>>>>>', item)
dayList = []
dayList.append(item[:-4])
savefile = item[:-4]
filename = 'datasets/' + item
pos = readTxt(filename)
pos = pos[1:]
pos = DataFrame(pos)
pos = pos.drop([1], axis=1)
pos.columns = ['order_id','product_id','type','combo_type','traffic_type','passenger_count', 'driver_product_id', 'start_dest_distance', 'arrive_time', 'departure_time', 'pre_total_fee', 'normal_time', 'bubble_trace_id', 'product_1level', 'year', 'month', 'day', 'starting_lng', 'starting_lat', 'dest_lng', 'dest_lat']
pos['hour'] = [pd.to_datetime(item).hour for item in pos['departure_time']]
num_hour = []
groupedByHour = pos.groupby(['hour'])
for group_name, group_data in groupedByHour:
num_hour.append(group_data.shape[0])
if len(num_hour) == 24:
value_hour = []
for i in range(len(num_hour)-1):
value_hour.append(num_hour[i+1]-num_hour[i])
value_hour = np.array(value_hour)
value_hour = normalization2(value_hour)
result = '[\n'
i = 1
for item in value_hour:
result = result + ' [\"' +str(i)+ '\",' + str(item) +'],\n'
i += 1
result = result[:-2] + '\n]'
else:
result = '[\n'
i = 1
for item in value_hour:
result = result + ' [\"' +str(i)+ '\",' + str(0) +'],\n'
i += 1
result = result[:-2] + '\n]'
filePath = 'Histogrammap/'+savefile+'.json'
file = open(filePath,'w', encoding="utf-8")
file.write(result)
file.close()
def dayOrder(foldername):
filelist=os.listdir(foldername)
dayLists = []
for item in filelist:
print ('dealing---:', item)
dayList = []
dayList.append(item[:-4])
filename = 'datasets/' + item
raw_train_df = read_csv(filename)
dayList.append(raw_train_df.shape[0])
dayLists.append(dayList)
fileSave = 'dayOrder.csv'
df = pd.DataFrame(dayLists, columns=['date','order'])
df.to_csv(fileSave, encoding='utf_8_sig')
def IsPtInPoly(aLon, aLat, pointList):
'''
:param aLon: double 经度
:param aLat: double 纬度
:param pointList: list [(lon, lat)...] 多边形点的顺序需根据顺时针或逆时针,不能乱
'''
iSum = 0
iCount = len(pointList)
if(iCount < 3):
return False
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if(i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat>=pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2);
if(pLon < aLon):
iSum += 1
if(iSum % 2 != 0):
# print ('in it!')
return True
else:
# print (' not in it!')
return False
#def dayAreaOrder(foldername):
# filelist=os.listdir(foldername)
# pointList = [(110.321053,20.022773), (110.325291,20.02369), (110.325999,20.020626), (110.321825,20.019255)]
# for item in filelist:
# print (item)
# filename = item[:-4]
# filepath = 'datasets/'+item
#
# raw_train_df = read_csv(filepath)
# raw_train_df['hour'] = [pd.to_datetime(item).hour for item in raw_train_df['departure_time']]
#
# list_all_order = []
#
# all_hour = raw_train_df['hour'].unique()
# if len(all_hour) == 24:
#
# groupedByHour = raw_train_df.groupby(['hour'])
#
# hour_order = []
# hour_order.append(filename)
# for group_name, group_data in groupedByHour:
# print ('处理数据的时间段:', filename, group_name)
# print (group_data.shape)
#
#
#
# for item in group_data:
# print (item)
## pandingbiao = [1 if IsPtInPoly(aLon, aLat, pointList) else 0 for aLon in group_data['starting_lng'] for aLat in group_data['starting_lat']]
#
## print (group_data['starting_lng'], group_data['starting_lat'])
## print (sum(pandingbiao), len(pandingbiao))
if __name__ == '__main__':
#桑葚图
filelist=os.listdir('dataset')
for item in filelist:
print (item)
filepath = 'dataset/'+item
orderNum, list_address = processJson(filepath)
print (orderNum)
print (len(list_address))
print (list_address[:10])
print ('-----------------------')
# dataSta(list_address, item[:-5])
hexiantu(list_address, item[:-5])
##每天 24小时的订单数
# filename = '2017-05-13'
# filepath = 'datasets/'+filename+'.txt'
# orderNumByHour(filepath, filename)
##3D热力图
# filelist=os.listdir('datasets')
# for item in filelist:
# print (item)
# filename = item[:-4]
# filepath = 'datasets/'+filename+'.txt'
# thermodynamicByHour(filepath, filename)
#日历热力图
# foldername = 'datasets'
# calendarHeatMap(foldername)
##实时统计
# foldername = 'datasets'
# RealtimeStatistics(foldername)
#柱状图
# foldername = 'datasets'
# Histogrammap(foldername)
##day order
# foldername = 'datasets'
# dayOrder(foldername)
###每天 24小时的订单数
# filelist=os.listdir('datasets')
# for item in filelist:
# print (item)
# filename = item[:-4]
# filepath = 'datasets/'+item
# orderNumByHour(filepath, filename)
##订单量预测处理
##110.321053,20.022773 (左上)
##110.325291,20.02369 (右上)
##110.321825,20.019255 (左下)
##110.325999,20.020626 (右下)
#
# foldername = 'datasets'
# dayAreaOrder(foldername)
| [
"numpy.mean",
"os.path.exists",
"os.listdir",
"pandas.read_csv",
"pandas.to_datetime",
"numpy.min",
"pandas.value_counts",
"pandas.set_option",
"numpy.max",
"numpy.array",
"os.getcwd",
"pandas.read_excel",
"os.mkdir",
"numpy.std",
"pandas.DataFrame",
"pandas.core.frame.DataFrame",
"n... | [((201, 243), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(1000)'], {}), "('display.max_columns', 1000)\n", (214, 243), True, 'import pandas as pd\n'), ((243, 279), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (256, 279), True, 'import pandas as pd\n'), ((280, 323), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', '(1000)'], {}), "('display.max_colwidth', 1000)\n", (293, 323), True, 'import pandas as pd\n'), ((3672, 3698), 'pandas.DataFrame', 'pd.DataFrame', (['list_address'], {}), '(list_address)\n', (3684, 3698), True, 'import pandas as pd\n'), ((4191, 4213), 'pandas.core.frame.DataFrame', 'DataFrame', (['name_result'], {}), '(name_result)\n', (4200, 4213), False, 'from pandas.core.frame import DataFrame\n'), ((4236, 4275), 'pandas.value_counts', 'pd.value_counts', (['name_df[0]'], {'sort': '(False)'}), '(name_df[0], sort=False)\n', (4251, 4275), True, 'import pandas as pd\n'), ((5172, 5198), 'pandas.DataFrame', 'pd.DataFrame', (['list_address'], {}), '(list_address)\n', (5184, 5198), True, 'import pandas as pd\n'), ((10818, 10840), 'os.listdir', 'os.listdir', (['"""datasets"""'], {}), "('datasets')\n", (10828, 10840), False, 'import os\n'), ((12775, 12797), 'os.listdir', 'os.listdir', (['"""datasets"""'], {}), "('datasets')\n", (12785, 12797), False, 'import os\n'), ((14898, 14924), 'numpy.round', 'np.round', (['(data / _range)', '(4)'], {}), '(data / _range, 4)\n', (14906, 14924), True, 'import numpy as np\n'), ((15075, 15096), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15082, 15096), True, 'import numpy as np\n'), ((15109, 15129), 'numpy.std', 'np.std', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15115, 15129), True, 'import numpy as np\n'), ((15213, 15235), 'os.listdir', 'os.listdir', (['"""datasets"""'], {}), "('datasets')\n", (15223, 15235), False, 'import os\n'), ((17119, 17141), 'os.listdir', 'os.listdir', (['foldername'], {}), '(foldername)\n', (17129, 17141), False, 'import os\n'), ((17478, 17527), 'pandas.DataFrame', 'pd.DataFrame', (['dayLists'], {'columns': "['date', 'order']"}), "(dayLists, columns=['date', 'order'])\n", (17490, 17527), True, 'import pandas as pd\n'), ((20036, 20057), 'os.listdir', 'os.listdir', (['"""dataset"""'], {}), "('dataset')\n", (20046, 20057), False, 'import os\n'), ((2550, 2573), 'pandas.DataFrame', 'pd.DataFrame', (['list_name'], {}), '(list_name)\n', (2562, 2573), True, 'import pandas as pd\n'), ((2641, 2688), 'pandas.value_counts', 'pd.value_counts', (['pd_list_address[0]'], {'sort': '(False)'}), '(pd_list_address[0], sort=False)\n', (2656, 2688), True, 'import pandas as pd\n'), ((7378, 7402), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (7392, 7402), False, 'import os\n'), ((7412, 7430), 'os.mkdir', 'os.mkdir', (['filepath'], {}), '(filepath)\n', (7420, 7430), False, 'import os\n'), ((10423, 10449), 'pandas.read_excel', 'pd.read_excel', (['weatherPath'], {}), '(weatherPath)\n', (10436, 10449), True, 'import pandas as pd\n'), ((13054, 13068), 'pandas.core.frame.DataFrame', 'DataFrame', (['pos'], {}), '(pos)\n', (13063, 13068), False, 'from pandas.core.frame import DataFrame\n'), ((14964, 14976), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (14970, 14976), True, 'import numpy as np\n'), ((14979, 14991), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (14985, 14991), True, 'import numpy as np\n'), ((15491, 15505), 'pandas.core.frame.DataFrame', 'DataFrame', (['pos'], {}), '(pos)\n', (15500, 15505), False, 'from pandas.core.frame import DataFrame\n'), ((6188, 6235), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'sep': '""","""', 'engine': '"""python"""'}), "(filepath, sep=',', engine='python')\n", (6199, 6235), True, 'import pandas as pd\n'), ((6684, 6704), 'pandas.to_datetime', 'pd.to_datetime', (['item'], {}), '(item)\n', (6698, 6704), True, 'import pandas as pd\n'), ((8356, 8376), 'pandas.to_datetime', 'pd.to_datetime', (['item'], {}), '(item)\n', (8370, 8376), True, 'import pandas as pd\n'), ((15011, 15023), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15017, 15023), True, 'import numpy as np\n'), ((16347, 16367), 'numpy.array', 'np.array', (['value_hour'], {}), '(value_hour)\n', (16355, 16367), True, 'import numpy as np\n'), ((4997, 5008), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5006, 5008), False, 'import os\n'), ((5846, 5857), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5855, 5857), False, 'import os\n'), ((7237, 7248), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7246, 7248), False, 'import os\n'), ((9806, 9817), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9815, 9817), False, 'import os\n'), ((9875, 9886), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9884, 9886), False, 'import os\n'), ((15893, 15913), 'pandas.to_datetime', 'pd.to_datetime', (['item'], {}), '(item)\n', (15907, 15913), True, 'import pandas as pd\n')] |
import unittest
from unittest_data_provider import data_provider
import sys
import numpy as np
from board import *
from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions
from tests.test_utils.print_board import print_board_if_verbosity_is_set
class BoardTest (unittest.TestCase):
def test_if_construct_board_return_correctly_initialized_array(self):
board = construct_board()
print_board_if_verbosity_is_set(board)
expected_board = np.array([[0] * 6] * 6)
np.testing.assert_array_equal(board, expected_board)
def test_is_board_not_full_should_return_false(self):
board = [[1]*6 for _ in range(6)]
board[0][0] = 0
print_board_if_verbosity_is_set(board)
result = is_board_full(board)
self.assertFalse(result)
def test_is_board_full_should_return_true(self):
board = [[1]*6 for _ in range(6)]
print_board_if_verbosity_is_set(board)
result = is_board_full(board)
self.assertTrue(result)
positions_values = lambda: (
( "A1", (0, 0)),
( "a1", (0, 0)),
( "C4", (3, 2)),
( "F6", (5, 5)),
( "A0", None),
( "A7", None),
( "G1", None),
( "anything", None)
)
@data_provider(positions_values)
def test_get_position_if_valid(self, position, expected_result):
board = generate_empty_board()
print_board_if_verbosity_is_set(board)
result = get_position_if_valid(board, position)
self.assertEqual(result, expected_result)
def test_get_position_if_valid_should_return_None_due_to_cell_already_filled(self):
board = [[0]*6 for _ in range(6)]
board[0][0] = 1
print_board_if_verbosity_is_set(board)
result = get_position_if_valid(board, "A1")
expected_result = None
self.assertEqual(result, expected_result)
good_positions_values = lambda: (
( "A1", generate_board_and_add_position((0, 0), 1), 1),
( "A1", generate_board_and_add_position((0, 0), 2), 2),
( "a1", generate_board_and_add_position((0, 0), 1), 1),
( "C4", generate_board_and_add_position((3, 2), 1), 1),
( "F6", generate_board_and_add_position((5, 5), 1), 1)
)
@data_provider(good_positions_values)
def test_add_marble_to_board_return_board(self, position, expected_board, current_player_id):
board = generate_empty_board()
board = add_marble_to_board(board, current_player_id, position)
print_board_if_verbosity_is_set(board)
np.testing.assert_array_equal(board, expected_board)
bad_positions_values = lambda: (
( "A0", generate_empty_board()),
( "A7", generate_empty_board()),
( "G1", generate_empty_board()),
( "anything", generate_empty_board()),
( "A1", generate_full_board())
)
@data_provider(bad_positions_values)
def test_add_marble_to_board_raise_exception(self, position, board):
print_board_if_verbosity_is_set(board)
with self.assertRaises(ValueError) as context:
board = add_marble_to_board(board, 1, position)
self.assertEqual("Position given is not correct", str(context.exception))
rotation_keys = lambda: (
(0, (slice(0, 3), slice(0, 3))),
(1, (slice(0, 3), slice(0, 3))),
(2, (slice(0, 3), slice(3, 6))),
(3, (slice(0, 3), slice(3, 6))),
(4, (slice(3, 6), slice(3, 6))),
(5, (slice(3, 6), slice(3, 6))),
(6, (slice(3, 6), slice(0, 3))),
(7, (slice(3, 6), slice(0, 3))),
)
@data_provider(rotation_keys)
def test_get_quarter_boundaries_from_rotation_key(self, rotation_key, expected_boundaries):
result_boundaries = get_quarter_boundaries_from_rotation_key(rotation_key)
self.assertTupleEqual(expected_boundaries, result_boundaries)
good_rotation_values = lambda: (
( "", generate_board_and_add_position((0, 0)), True, generate_board_and_add_position((0, 0)) ),
( "1", generate_board_and_add_position((0, 0)), False, generate_board_and_add_position((2, 0)) ),
( "1", generate_board_and_add_position((0, 0)), True, generate_board_and_add_position((2, 0)) ),
( "2", generate_board_and_add_position((0, 0)), False, generate_board_and_add_position((0, 2)) ),
( "3", generate_board_and_add_position((0, 3)), False, generate_board_and_add_position((2, 3)) ),
( "4", generate_board_and_add_position((0, 3)), False, generate_board_and_add_position((0, 5)) ),
( "5", generate_board_and_add_position((3, 3)), False, generate_board_and_add_position((5, 3)) ),
( "6", generate_board_and_add_position((3, 3)), False, generate_board_and_add_position((3, 5)) ),
( "7", generate_board_and_add_position((3, 0)), False, generate_board_and_add_position((5, 0)) ),
( "8", generate_board_and_add_position((3, 0)), False, generate_board_and_add_position((3, 2)) ),
)
@data_provider(good_rotation_values)
def test_rotate_quarter_of_board_should_return_board(self, player_input_value, board, one_quarter_is_symetric, expected_board):
print_board_if_verbosity_is_set(board)
print_board_if_verbosity_is_set(expected_board)
result = rotate_quarter_of_board(board, player_input_value, one_quarter_is_symetric)
np.testing.assert_array_equal(result, expected_board)
bad_rotation_values = lambda: (
( "0", generate_empty_board()),
( "A1", generate_empty_board()),
( "something", generate_empty_board()),
( "9", generate_empty_board()),
)
@data_provider(bad_rotation_values)
def test_rotate_quarter_of_board_raise_exception(self, player_input_value, board):
print_board_if_verbosity_is_set(board)
with self.assertRaises(ValueError) as context:
board = rotate_quarter_of_board(board, player_input_value, False)
self.assertEqual("Rotation given is not correct", str(context.exception))
is_quarter_symetric_values = lambda: (
([[0, 0, 0], [0, 0, 0], [0, 0, 0]], True),
([[0, 1, 0], [0, 0, 0], [0, 0, 0]], False),
([[1, 0, 0], [0, 1, 0], [0, 0, 1]], False),
([[1, 0, 1], [0, 0, 0], [1, 0, 1]], True),
([[1, 0, 1], [0, 0, 0], [1, 0, 1]], True),
([[1, 2, 1], [2, 0, 2], [1, 2, 1]], True),
)
@data_provider(is_quarter_symetric_values)
def test_is_quarter_symetric(self, quarter, expected_result):
print_board_if_verbosity_is_set(quarter)
result = is_quarter_symetric(quarter)
self.assertEqual(result, expected_result)
is_at_least_one_quarter_symetric_values = lambda: (
(generate_board_and_add_positions(((0, 0), (3, 0), (3, 3), (0, 3))), False),
(generate_empty_board(), True),
)
@data_provider(is_at_least_one_quarter_symetric_values)
def test_is_at_least_one_quarter_symetric(self, board, expected_result):
print_board_if_verbosity_is_set(board)
result = is_at_least_one_quarter_symetric(board)
self.assertEqual(result, expected_result)
| [
"tests.test_utils.generate_board.generate_full_board",
"tests.test_utils.generate_board.generate_board_and_add_positions",
"tests.test_utils.generate_board.generate_board_and_add_position",
"tests.test_utils.print_board.print_board_if_verbosity_is_set",
"numpy.array",
"tests.test_utils.generate_board.gene... | [((1388, 1419), 'unittest_data_provider.data_provider', 'data_provider', (['positions_values'], {}), '(positions_values)\n', (1401, 1419), False, 'from unittest_data_provider import data_provider\n'), ((2410, 2446), 'unittest_data_provider.data_provider', 'data_provider', (['good_positions_values'], {}), '(good_positions_values)\n', (2423, 2446), False, 'from unittest_data_provider import data_provider\n'), ((3034, 3069), 'unittest_data_provider.data_provider', 'data_provider', (['bad_positions_values'], {}), '(bad_positions_values)\n', (3047, 3069), False, 'from unittest_data_provider import data_provider\n'), ((3770, 3798), 'unittest_data_provider.data_provider', 'data_provider', (['rotation_keys'], {}), '(rotation_keys)\n', (3783, 3798), False, 'from unittest_data_provider import data_provider\n'), ((5169, 5204), 'unittest_data_provider.data_provider', 'data_provider', (['good_rotation_values'], {}), '(good_rotation_values)\n', (5182, 5204), False, 'from unittest_data_provider import data_provider\n'), ((5814, 5848), 'unittest_data_provider.data_provider', 'data_provider', (['bad_rotation_values'], {}), '(bad_rotation_values)\n', (5827, 5848), False, 'from unittest_data_provider import data_provider\n'), ((6573, 6614), 'unittest_data_provider.data_provider', 'data_provider', (['is_quarter_symetric_values'], {}), '(is_quarter_symetric_values)\n', (6586, 6614), False, 'from unittest_data_provider import data_provider\n'), ((7021, 7075), 'unittest_data_provider.data_provider', 'data_provider', (['is_at_least_one_quarter_symetric_values'], {}), '(is_at_least_one_quarter_symetric_values)\n', (7034, 7075), False, 'from unittest_data_provider import data_provider\n'), ((497, 535), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (528, 535), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((563, 586), 'numpy.array', 'np.array', (['([[0] * 6] * 6)'], {}), '([[0] * 6] * 6)\n', (571, 586), True, 'import numpy as np\n'), ((604, 656), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['board', 'expected_board'], {}), '(board, expected_board)\n', (633, 656), True, 'import numpy as np\n'), ((794, 832), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (825, 832), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((1022, 1060), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (1053, 1060), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((1505, 1527), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (1525, 1527), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((1536, 1574), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (1567, 1574), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((1858, 1896), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (1889, 1896), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((2562, 2584), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (2582, 2584), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2675, 2713), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (2706, 2713), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((2723, 2775), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['board', 'expected_board'], {}), '(board, expected_board)\n', (2752, 2775), True, 'import numpy as np\n'), ((3152, 3190), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (3183, 3190), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((5345, 5383), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (5376, 5383), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((5392, 5439), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['expected_board'], {}), '(expected_board)\n', (5423, 5439), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((5542, 5595), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected_board'], {}), '(result, expected_board)\n', (5571, 5595), True, 'import numpy as np\n'), ((5945, 5983), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (5976, 5983), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((6689, 6729), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['quarter'], {}), '(quarter)\n', (6720, 6729), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((7161, 7199), 'tests.test_utils.print_board.print_board_if_verbosity_is_set', 'print_board_if_verbosity_is_set', (['board'], {}), '(board)\n', (7192, 7199), False, 'from tests.test_utils.print_board import print_board_if_verbosity_is_set\n'), ((2096, 2138), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)', '(1)'], {}), '((0, 0), 1)\n', (2127, 2138), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2160, 2202), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)', '(2)'], {}), '((0, 0), 2)\n', (2191, 2202), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2224, 2266), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)', '(1)'], {}), '((0, 0), 1)\n', (2255, 2266), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2288, 2330), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 2)', '(1)'], {}), '((3, 2), 1)\n', (2319, 2330), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2352, 2394), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(5, 5)', '(1)'], {}), '((5, 5), 1)\n', (2383, 2394), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2830, 2852), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (2850, 2852), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2871, 2893), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (2891, 2893), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2912, 2934), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (2932, 2934), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((2959, 2981), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (2979, 2981), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((3000, 3021), 'tests.test_utils.generate_board.generate_full_board', 'generate_full_board', ([], {}), '()\n', (3019, 3021), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4114, 4153), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)'], {}), '((0, 0))\n', (4145, 4153), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4161, 4200), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)'], {}), '((0, 0))\n', (4192, 4200), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4219, 4258), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)'], {}), '((0, 0))\n', (4250, 4258), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4267, 4306), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(2, 0)'], {}), '((2, 0))\n', (4298, 4306), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4325, 4364), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)'], {}), '((0, 0))\n', (4356, 4364), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4372, 4411), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(2, 0)'], {}), '((2, 0))\n', (4403, 4411), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4430, 4469), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 0)'], {}), '((0, 0))\n', (4461, 4469), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4478, 4517), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 2)'], {}), '((0, 2))\n', (4509, 4517), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4536, 4575), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 3)'], {}), '((0, 3))\n', (4567, 4575), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4584, 4623), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(2, 3)'], {}), '((2, 3))\n', (4615, 4623), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4642, 4681), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 3)'], {}), '((0, 3))\n', (4673, 4681), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4690, 4729), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(0, 5)'], {}), '((0, 5))\n', (4721, 4729), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4748, 4787), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 3)'], {}), '((3, 3))\n', (4779, 4787), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4796, 4835), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(5, 3)'], {}), '((5, 3))\n', (4827, 4835), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4854, 4893), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 3)'], {}), '((3, 3))\n', (4885, 4893), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4902, 4941), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 5)'], {}), '((3, 5))\n', (4933, 4941), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((4960, 4999), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 0)'], {}), '((3, 0))\n', (4991, 4999), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5008, 5047), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(5, 0)'], {}), '((5, 0))\n', (5039, 5047), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5066, 5105), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 0)'], {}), '((3, 0))\n', (5097, 5105), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5114, 5153), 'tests.test_utils.generate_board.generate_board_and_add_position', 'generate_board_and_add_position', (['(3, 2)'], {}), '((3, 2))\n', (5145, 5153), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5649, 5671), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (5669, 5671), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5690, 5712), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (5710, 5712), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5738, 5760), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (5758, 5760), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((5778, 5800), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (5798, 5800), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((6894, 6960), 'tests.test_utils.generate_board.generate_board_and_add_positions', 'generate_board_and_add_positions', (['((0, 0), (3, 0), (3, 3), (0, 3))'], {}), '(((0, 0), (3, 0), (3, 3), (0, 3)))\n', (6926, 6960), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n'), ((6979, 7001), 'tests.test_utils.generate_board.generate_empty_board', 'generate_empty_board', ([], {}), '()\n', (6999, 7001), False, 'from tests.test_utils.generate_board import generate_board_and_add_position, generate_empty_board, generate_full_board, generate_board_and_add_positions\n')] |
import numpy as np
import keras
model = keras.Sequential(layers=[keras.layers.Dense(
units=1,
input_shape=[1],
)])
model.compile(
optimizer='sgd',
loss='mean_squared_error',
)
Xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)
Ys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)
model.fit(x=Xs, y=Ys,epochs=30)
print(model.predict([5])) | [
"keras.layers.Dense",
"numpy.array"
] | [((205, 259), 'numpy.array', 'np.array', (['[-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]'], {'dtype': 'float'}), '([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float)\n', (213, 259), True, 'import numpy as np\n'), ((266, 321), 'numpy.array', 'np.array', (['[-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]'], {'dtype': 'float'}), '([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0], dtype=float)\n', (274, 321), True, 'import numpy as np\n'), ((68, 112), 'keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), '(units=1, input_shape=[1])\n', (86, 112), False, 'import keras\n')] |
# plot 2d 2rd-order regression
import matplotlib.pyplot as plt
import numpy as np
I = np.arange(4.40990640657, 58.51715740979401, 0.1) # (min, max, step)
I_coef_list = [
[-0.13927776461608068, 0.002038919337462459, 2.3484253283211487], # doubles
[-0.09850865867608666, 0.001429693439506745, 1.673346834786426], # triples
[-0.04062711141502941, 0.0005561811330575912, 0.7243531132252763], # quadruples
[-0.09931019535824288, 0.0014388069537618795, 1.6896976115572775] # all
]
# coef for [order-1, order-2, constant]
OvI = np.arange(0.3978092294245647, 7.325677230721877, 0.01)
OvI_coef_list = [
[0.08149445158061253, 0.19664308140877118, -0.27567136158525557],
[0.1691745820752472, 0.07835210254462913, -0.23984672383683908],
[0.055910443720261264, 0.05469892798492301, -0.10648949308046807],
[0.15332557744227973, 0.09344338104295906, -0.24194502634113763]
]
# now set for fig. 4
x = I
coef_list = I_coef_list
name = "I"
type_list = ["Doubles", "Triples", "Quadruples", "All"]
pattern_list = ['--', '-.', ':', '']
fig, ax = plt.subplots()
for i in range(len(coef_list)):
y = coef_list[i][0] * x + coef_list[i][1] * np.power(x, 2) + coef_list[i][2] * 1 # prediction
ax.plot(x, y, pattern_list[i], label=type_list[i])
ax.set_xlabel(name, size=15)
ax.set_ylabel('Δ', size=15)
ax.tick_params(axis='both', direction='in')
plt.yticks(rotation=90)
ax.legend()
plt.savefig(name.replace('/', 'v') + ".png", dpi=300)
plt.show()
| [
"numpy.power",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((88, 136), 'numpy.arange', 'np.arange', (['(4.40990640657)', '(58.51715740979401)', '(0.1)'], {}), '(4.40990640657, 58.51715740979401, 0.1)\n', (97, 136), True, 'import numpy as np\n'), ((540, 594), 'numpy.arange', 'np.arange', (['(0.3978092294245647)', '(7.325677230721877)', '(0.01)'], {}), '(0.3978092294245647, 7.325677230721877, 0.01)\n', (549, 594), True, 'import numpy as np\n'), ((1063, 1077), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1075, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1388), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (1375, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1465, 1467), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1172), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (1166, 1172), True, 'import numpy as np\n')] |
from numpy import random # only used to simulate data loss
from decoder import Decoder # necessary for the functionality
from encoder import Encoder # necessary for the functionality
from utils import NUMBER_OF_ENCODED_BITS # only used for statistic
# showcase steering elements:
STOP_ENCODER_ON_DECODING = True # this variable sets whether the encoder stops generating packages once the decoding
# was successful. (This can in IRL only occur, when the sender can be informed)
HAS_PACKAGE_LOSS = False # sets whether the loss of packages is simulated
PROBABILITY_OF_LOSS = 0.5 # Probability that a package is lost (when package loss is activated)
PRINT_STATISTICS = True # this variable sets, whether the statistic will be printed or not
# necessary variables
encoder = Encoder(500) # the Number set how many packages the encoder maximally generates (optional)
decoder = Decoder()
# the following is an example of transmitted data. (Since it takes bytes the string has to be encoded).
# the input has to have a multiple length of 32 bits (4 bytes) or it will not be processed
exampleTransmissionData = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed diam nonumy.".encode('utf8')
# variables for the statistic (not relevant)
numberOfPackages = 0 # counts how many packages were sent for every decoded Data
temp_numberOfPackages = 0 # help variable for same task
numberOfDecodedInformation = 0 # counts number of successfully decoding data
# demo code
for package in encoder.encode(exampleTransmissionData): # the encode function of the decoder acts as a generator
# which yields utils.TransmissionPackage.
temp_numberOfPackages += 1 # counter for statistic (not relevant)
# simulation of package loss
if random.random() < PROBABILITY_OF_LOSS:
continue
txt = decoder.decode(package) # decoder.decode(TransmissionPackage) tries to decode the information. If there is
# not enough information it returns None, else it returns the decoded bytes
if txt is not None: # check whether the decoder was successful
numberOfDecodedInformation += 1 # counter for statistics (not relevant)
numberOfPackages += temp_numberOfPackages # counter for statistics (not relevant)
temp_numberOfPackages = 0 # counter for statistics (not relevant)
print(numberOfDecodedInformation, txt.decode('utf8')) # the decoded data gets printed (has to be decoded,
# since its bytes and we want a string.
if STOP_ENCODER_ON_DECODING: # steering structure for demo (not relevant)
break
# statistics
if numberOfDecodedInformation == 0: # check if there was a successful decoding
print("Ran out of packages before first successful decoding!") # if not print that it wasn't successful
elif PRINT_STATISTICS: # also check if printing of the statistic is activated
# calculate how many chunks there are for the data
numberOfChunks = int(len(exampleTransmissionData) / (NUMBER_OF_ENCODED_BITS / 8))
print("Number of Chunks:\t\t" + str(numberOfChunks)) # print that number
# number of encoded packages for sending
print("avg. Number of Packages Needed:\t" + str(numberOfPackages / numberOfDecodedInformation))
# number of encoded packages for sending per chunk
print("avg. per chunk:\t\t\t" + str(int(numberOfPackages / numberOfChunks) / numberOfDecodedInformation))
| [
"numpy.random.random",
"encoder.Encoder",
"decoder.Decoder"
] | [((781, 793), 'encoder.Encoder', 'Encoder', (['(500)'], {}), '(500)\n', (788, 793), False, 'from encoder import Encoder\n'), ((883, 892), 'decoder.Decoder', 'Decoder', ([], {}), '()\n', (890, 892), False, 'from decoder import Decoder\n'), ((1754, 1769), 'numpy.random.random', 'random.random', ([], {}), '()\n', (1767, 1769), False, 'from numpy import random\n')] |
# https:github.com/timestocome
# take xor neat-python example and convert it to predict tomorrow's stock
# market change using last 5 days data
# uses Python NEAT library
# https://github.com/CodeReclaimers/neat-python
from __future__ import print_function
import os
import neat
import visualize
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# stock data previously input, loaded and log leveled
# using LoadAndMatchDates.py, LevelData.py
# reading output LeveledLogStockData.csv
# pick one, any one and use it as input/output
###############################################################################
# read data in
data = pd.read_csv('LeveledLogStockData.csv')
#print(data.columns)
#print(data)
# select an index to use to train network
index = data['leveled log Nasdaq'].values
n_samples = len(index)
# split into inputs and outputs
n_inputs = 5 # number of days to use as input
n_outputs = 1 # predict next day
x = []
y = []
for i in range(n_samples - n_inputs - 1):
x.append(index[i:i+n_inputs] )
y.append([index[i+1]])
x = np.asarray(x)
y = np.asarray(y)
#print(x.shape, y.shape)
# hold out last samples for testing
n_train = int(n_samples * .9)
n_test = n_samples - n_train
print('train, test', n_train, n_test)
train_x = x[0:n_train]
test_x = x[n_train:-1]
train_y = y[0:n_train]
test_y = y[n_train:-1]
print('data split', train_x.shape, train_y.shape)
print('data split', test_x.shape, test_y.shape)
# shuffle training data?
z = np.arange(0, n_train-1)
np.random.shuffle(z)
tx = train_x[z[::-1]]
ty = train_y[z[::-1]]
train_x = tx
train_y = ty
###############################################################################
# some of these need to be updated in the config-feedforward file
# fitness_threshold = n_train - 1
# num_inputs => n_inputs
# num_hidden => ? how many hidden nodes do you want?
# num_outputs => n_outputs
#
# optional changes
# population size, activation function, .... others as needed
###############################################################################
n_generations = 10
n_evaluate = 1
clip_error = 4.
lr = 0.1
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = n_train
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(train_x, train_y):
output = net.activate(xi)
error = (output[0] - xo[0]) **2
# clipping the error keeps more species in play
#genome.fitness -= lr * error
if error < clip_error:
genome.fitness -= error
else:
genome.fitness -= clip_error
def run(config_file):
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
# True == show all species, False == don't show species
p.add_reporter(neat.StdOutReporter(False))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(5))
# Stop running after n=n_generations
# if n=None runs until solution is found
winner = p.run(eval_genomes, n=n_generations)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against testing data.
print('\nTest Output, Actual, Diff:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
predicted = []
for xi, xo in zip(test_x, test_y):
output = winner_net.activate(xi)
predicted.append(output)
node_names = {-1:'4', -2: '3', -3: '2', -4: '1', -5: '0', 0:'Predict Change'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
# ? save?
#p = neat.Checkpointer.restore_checkpoint('neat-checkpoint')
#p.run(eval_genomes, n_evaluate)
# plot predictions vs actual
plt.plot(test_y, 'g', label='Actual')
plt.plot(predicted, 'r-', label='Predicted')
plt.title('Test Data')
plt.legend()
plt.show()
if __name__ == '__main__':
# find and load configuation file
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path)
| [
"pandas.read_csv",
"neat.Config",
"visualize.draw_net",
"neat.StatisticsReporter",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.asarray",
"neat.nn.FeedForwardNetwork.create",
"neat.StdOutReporter",
"visualize.plot_stats",
"os.path.dirname",
"matplotlib.pyplot.title",
"matplotlib.pyplot.l... | [((750, 788), 'pandas.read_csv', 'pd.read_csv', (['"""LeveledLogStockData.csv"""'], {}), "('LeveledLogStockData.csv')\n", (761, 788), True, 'import pandas as pd\n'), ((1179, 1192), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1189, 1192), True, 'import numpy as np\n'), ((1197, 1210), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1207, 1210), True, 'import numpy as np\n'), ((1603, 1628), 'numpy.arange', 'np.arange', (['(0)', '(n_train - 1)'], {}), '(0, n_train - 1)\n', (1612, 1628), True, 'import numpy as np\n'), ((1627, 1647), 'numpy.random.shuffle', 'np.random.shuffle', (['z'], {}), '(z)\n', (1644, 1647), True, 'import numpy as np\n'), ((2930, 3053), 'neat.Config', 'neat.Config', (['neat.DefaultGenome', 'neat.DefaultReproduction', 'neat.DefaultSpeciesSet', 'neat.DefaultStagnation', 'config_file'], {}), '(neat.DefaultGenome, neat.DefaultReproduction, neat.\n DefaultSpeciesSet, neat.DefaultStagnation, config_file)\n', (2941, 3053), False, 'import neat\n'), ((3184, 3207), 'neat.Population', 'neat.Population', (['config'], {}), '(config)\n', (3199, 3207), False, 'import neat\n'), ((3396, 3421), 'neat.StatisticsReporter', 'neat.StatisticsReporter', ([], {}), '()\n', (3419, 3421), False, 'import neat\n'), ((3848, 3897), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['winner', 'config'], {}), '(winner, config)\n', (3881, 3897), False, 'import neat\n'), ((4131, 4194), 'visualize.draw_net', 'visualize.draw_net', (['config', 'winner', '(True)'], {'node_names': 'node_names'}), '(config, winner, True, node_names=node_names)\n', (4149, 4194), False, 'import visualize\n'), ((4199, 4249), 'visualize.plot_stats', 'visualize.plot_stats', (['stats'], {'ylog': '(False)', 'view': '(True)'}), '(stats, ylog=False, view=True)\n', (4219, 4249), False, 'import visualize\n'), ((4254, 4294), 'visualize.plot_species', 'visualize.plot_species', (['stats'], {'view': '(True)'}), '(stats, view=True)\n', (4276, 4294), False, 'import visualize\n'), ((4452, 4489), 'matplotlib.pyplot.plot', 'plt.plot', (['test_y', '"""g"""'], {'label': '"""Actual"""'}), "(test_y, 'g', label='Actual')\n", (4460, 4489), True, 'import matplotlib.pyplot as plt\n'), ((4494, 4538), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted', '"""r-"""'], {'label': '"""Predicted"""'}), "(predicted, 'r-', label='Predicted')\n", (4502, 4538), True, 'import matplotlib.pyplot as plt\n'), ((4544, 4566), 'matplotlib.pyplot.title', 'plt.title', (['"""Test Data"""'], {}), "('Test Data')\n", (4553, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4583), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4581, 4583), True, 'import matplotlib.pyplot as plt\n'), ((4588, 4598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4596, 4598), True, 'import matplotlib.pyplot as plt\n'), ((4687, 4712), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4702, 4712), False, 'import os\n'), ((4731, 4776), 'os.path.join', 'os.path.join', (['local_dir', '"""config-feedforward"""'], {}), "(local_dir, 'config-feedforward')\n", (4743, 4776), False, 'import os\n'), ((2368, 2417), 'neat.nn.FeedForwardNetwork.create', 'neat.nn.FeedForwardNetwork.create', (['genome', 'config'], {}), '(genome, config)\n', (2401, 2417), False, 'import neat\n'), ((3356, 3382), 'neat.StdOutReporter', 'neat.StdOutReporter', (['(False)'], {}), '(False)\n', (3375, 3382), False, 'import neat\n'), ((3467, 3487), 'neat.Checkpointer', 'neat.Checkpointer', (['(5)'], {}), '(5)\n', (3484, 3487), False, 'import neat\n')] |
import torch
import numpy as np
from elf.io import open_file
from elf.wrapper import RoiWrapper
from ..util import ensure_tensor_with_channels
class RawDataset(torch.utils.data.Dataset):
"""
"""
max_sampling_attempts = 500
@staticmethod
def compute_len(path, key, patch_shape, with_channels):
with open_file(path, mode="r") as f:
shape = f[key].shape[1:] if with_channels else f[key].shape
n_samples = int(np.prod(
[float(sh / csh) for sh, csh in zip(shape, patch_shape)]
))
return n_samples
def __init__(
self,
raw_path,
raw_key,
patch_shape,
raw_transform=None,
transform=None,
roi=None,
dtype=torch.float32,
n_samples=None,
sampler=None,
ndim=None,
with_channels=False,
):
self.raw_path = raw_path
self.raw_key = raw_key
self.raw = open_file(raw_path, mode="r")[raw_key]
self._with_channels = with_channels
if ndim is None:
self._ndim = self.raw.ndim - 1 if with_channels else self.raw.ndim
else:
self._ndim = ndim
assert self._ndim in (2, 3), "Invalid data dimensionality: {self._ndim}. Only 2d or 3d data is supported"
if self._with_channels:
assert self.raw.ndim == self._ndim + 1, f"{self.raw.ndim}, {self._ndim}"
raw_ndim = self.raw.ndim - 1 if self._with_channels else self.raw.ndim
if roi is not None:
assert len(roi) == raw_ndim, f"{roi}, {raw_ndim}"
self.raw = RoiWrapper(self.raw, (slice(None),) + roi) if self._with_channels else RoiWrapper(self.raw, roi)
self.roi = roi
self.shape = self.raw.shape[1:] if self._with_channels else self.raw.shape
assert len(patch_shape) == raw_ndim, f"{patch_shape}, {raw_ndim}"
self.patch_shape = patch_shape
self.raw_transform = raw_transform
self.transform = transform
self.sampler = sampler
self.dtype = dtype
if n_samples is None:
self._len = n_samples
else:
self._len = self.compute_len(raw_path, raw_key, self.patch_shape, with_channels)
# TODO
self.trafo_halo = None
# self.trafo_halo = None if self.transform is None\
# else self.transform.halo(self.patch_shape)
if self.trafo_halo is None:
self.sample_shape = self.patch_shape
else:
if len(self.trafo_halo) == 2 and self._ndim == 3:
self.trafo_halo = (0,) + self.trafo_halo
assert len(self.trafo_halo) == self._ndim
self.sample_shape = tuple(sh + ha for sh, ha in zip(self.patch_shape, self.trafo_halo))
self.inner_bb = tuple(slice(ha, sh - ha) for sh, ha in zip(self.patch_shape, self.trafo_halo))
def __len__(self):
return self._len
@property
def ndim(self):
return self._ndim
def _sample_bounding_box(self):
bb_start = [
np.random.randint(0, sh - psh) if sh - psh > 0 else 0
for sh, psh in zip(self.shape, self.sample_shape)
]
return tuple(slice(start, start + psh) for start, psh in zip(bb_start, self.sample_shape))
def _get_sample(self, index):
bb = self._sample_bounding_box()
if self._with_channels:
raw = self.raw[(slice(None),) + bb]
else:
raw = self.raw[bb]
if self.sampler is not None:
sample_id = 0
while not self.sampler(raw):
bb = self._sample_bounding_box()
raw = self.raw[(slice(None),) + bb] if self._with_channels else self.raw[bb]
sample_id += 1
if sample_id > self.max_sampling_attempts:
raise RuntimeError(f"Could not sample a valid batch in {self.max_sampling_attempts} attempts")
return raw
def crop(self, tensor):
bb = self.inner_bb
if tensor.ndim > len(bb):
bb = (tensor.ndim - len(bb)) * (slice(None),) + bb
return tensor[bb]
def __getitem__(self, index):
raw = self._get_sample(index)
if self.raw_transform is not None:
raw = self.raw_transform(raw)
if self.transform is not None:
raw = self.transform(raw)
if self.trafo_halo is not None:
raw = self.crop(raw)
raw = ensure_tensor_with_channels(raw, ndim=self._ndim, dtype=self.dtype)
return raw
# need to overwrite pickle to support h5py
def __getstate__(self):
state = self.__dict__.copy()
del state["raw"]
return state
def __setstate__(self, state):
state["raw"] = open_file(state["raw_path"], mode="r")[state["raw_key"]]
self.__dict__.update(state)
| [
"numpy.random.randint",
"elf.io.open_file",
"elf.wrapper.RoiWrapper"
] | [((330, 355), 'elf.io.open_file', 'open_file', (['path'], {'mode': '"""r"""'}), "(path, mode='r')\n", (339, 355), False, 'from elf.io import open_file\n'), ((944, 973), 'elf.io.open_file', 'open_file', (['raw_path'], {'mode': '"""r"""'}), "(raw_path, mode='r')\n", (953, 973), False, 'from elf.io import open_file\n'), ((4765, 4803), 'elf.io.open_file', 'open_file', (["state['raw_path']"], {'mode': '"""r"""'}), "(state['raw_path'], mode='r')\n", (4774, 4803), False, 'from elf.io import open_file\n'), ((1672, 1697), 'elf.wrapper.RoiWrapper', 'RoiWrapper', (['self.raw', 'roi'], {}), '(self.raw, roi)\n', (1682, 1697), False, 'from elf.wrapper import RoiWrapper\n'), ((3050, 3080), 'numpy.random.randint', 'np.random.randint', (['(0)', '(sh - psh)'], {}), '(0, sh - psh)\n', (3067, 3080), True, 'import numpy as np\n')] |
from . import Regression
from ..tests import random_plane,scattered_plane
import numpy as N
def test_coordinates():
"""Tests coordinate length"""
plane,coefficients = random_plane()
fit = Regression(plane)
assert N.column_stack(plane).shape[0] == fit.C.shape[0]
def test_regression():
"""Make sure we can fit a simple plane"""
plane,coefficients = random_plane()
fit = Regression(plane)
assert N.allclose(fit.coefficients(), coefficients)
def test_covariance():
"""Make sure we don't get empty covariance matrices"""
plane,coefficients = scattered_plane()
fit = Regression(plane)
for i in fit.covariance_matrix().flatten():
assert i != 0
| [
"numpy.column_stack"
] | [((230, 251), 'numpy.column_stack', 'N.column_stack', (['plane'], {}), '(plane)\n', (244, 251), True, 'import numpy as N\n')] |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a color-based blob detector for camera images."""
from typing import Mapping, Optional, Tuple
from absl import logging
import cv2
from dmr_vision import detector
from dmr_vision import types
import numpy as np
class BlobDetector(detector.ImageDetector):
"""Color-based blob detector."""
def __init__(self,
color_ranges: Mapping[str, types.ColorRange],
scale: float = (1. / 6.),
min_area: int = 230,
mask_points: Optional[types.MaskPoints] = None,
visualize: bool = False,
toolkit: bool = False):
"""Constructs a `BlobDetector` instance.
Args:
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
scale: Rescaling image factor. Used for increasing the frame rate, at the
cost of reducing the precision of the blob barycenter and controur.
min_area: The minimum area the detected blob must have.
mask_points: (u, v) coordinates defining a closed regions of interest in
the image where the blob detector will not look for blobs.
visualize: Whether to output a visualization of the detected blob or not.
toolkit: Whether to display a YUV GUI toolkit for parameter tuning.
Enabling this implcitly sets `visualize = True`.
"""
self._color_ranges = color_ranges
self._scale = np.array(scale)
self._min_area = min_area
self._mask_points = mask_points if mask_points is not None else ()
self._visualize = visualize
self._mask = None
self._toolkit = toolkit
if self._toolkit:
self._visualize = True
self._window_name = "UV toolkit"
self._window_size = (800, 1000)
cv2.namedWindow(
self._window_name,
cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.resizeWindow(self._window_name, self._window_size)
self._trackbar_scale = 1000
num_colors = len(self._color_ranges.keys())
if num_colors > 1:
cv2.createTrackbar("Color selector", self._window_name, 0,
len(self._color_ranges.keys()) - 1,
self._callback_change_color)
cv2.createTrackbar("Subsampling", self._window_name, 5, 10,
lambda x: None)
cv2.setTrackbarMin("Subsampling", self._window_name, 1)
self._u_range_trackbar = CreateRangeTrackbar(self._window_name, "U min",
"U max", self._color_ranges,
"U", self._trackbar_scale)
self._v_range_trackbar = CreateRangeTrackbar(self._window_name, "V min",
"V max", self._color_ranges,
"V", self._trackbar_scale)
self._callback_change_color(0)
def __del__(self):
if self._toolkit:
cv2.destroyAllWindows()
def __call__(self,
image: np.ndarray) -> Tuple[types.Centers, types.Detections]:
"""Finds color blobs in the image.
Args:
image: the input image.
Returns:
A dictionary mapping a blob name with
- the (u, v) coordinate of its barycenter, if found;
- `None`, otherwise;
and a dictionary mapping a blob name with
- its contour superimposed on the input image;
- `None`, if `BlobDetector` is run with `visualize == False`.
"""
# Preprocess the image.
image = self._preprocess(image)
# Convert the image to YUV.
yuv_image = cv2.cvtColor(image.astype(np.float32) / 255., cv2.COLOR_RGB2YUV)
# Find blobs.
blob_centers = {}
blob_visualizations = {}
for name, color_range in self._color_ranges.items():
blob = self._find_blob(yuv_image, color_range)
blob_centers[name] = blob.center * (1. / self._scale) if blob else None
blob_visualizations[name] = (
self._draw_blobs(image, blob) if self._visualize else None)
if self._toolkit:
self._update_gui_toolkit(yuv_image, image)
return blob_centers, blob_visualizations
def _preprocess(self, image: np.ndarray) -> np.ndarray:
"""Preprocesses an image for color-based blob detection."""
# Resize the image to make all other operations faster.
size = np.round(image.shape[:2] * self._scale).astype(np.int32)
resized = cv2.resize(image, (size[1], size[0]))
if self._mask is None:
self._setup_mask(resized)
# Denoise the image.
denoised = cv2.fastNlMeansDenoisingColored(
src=resized, h=7, hColor=7, templateWindowSize=3, searchWindowSize=5)
return cv2.multiply(denoised, self._mask)
def _setup_mask(self, image: np.ndarray) -> None:
"""Initialises an image mask to explude pixels from blob detection."""
self._mask = np.ones(image.shape, image.dtype)
for mask_points in self._mask_points:
cv2.fillPoly(self._mask, np.int32([mask_points * self._scale]), 0)
def _find_blob(self, yuv_image: np.ndarray,
color_range: types.ColorRange) -> Optional[types.Blob]:
"""Find the largest blob matching the YUV color range.
Args:
yuv_image: An image in YUV color space.
color_range: The YUV color range used for segmentation.
Returns:
If found, the (u, v) coordinate of the barycenter and the contour of the
segmented blob. Otherwise returns `None`.
"""
# Threshold the image in YUV color space.
lower = color_range.lower
upper = color_range.upper
mask = cv2.inRange(yuv_image.copy(), lower, upper)
# Find contours.
_, contours, _ = cv2.findContours(
image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
# Find the largest contour.
max_area_contour = max(contours, key=cv2.contourArea)
# If the blob's area is too small, ignore it.
correction_factor = np.square(1. / self._scale)
normalized_area = cv2.contourArea(max_area_contour) * correction_factor
if normalized_area < self._min_area:
return None
# Compute the centroid.
moments = cv2.moments(max_area_contour)
if moments["m00"] == 0:
return None
cx, cy = moments["m10"] / moments["m00"], moments["m01"] / moments["m00"]
return types.Blob(center=np.array([cx, cy]), contour=max_area_contour)
def _draw_blobs(self, image: np.ndarray, blob: types.Blob) -> np.ndarray:
"""Draws the controuer of the detected blobs."""
frame = image.copy()
if blob:
# Draw center.
cv2.drawMarker(
img=frame,
position=(int(blob.center[0]), int(blob.center[1])),
color=(255, 0, 0),
markerType=cv2.MARKER_CROSS,
markerSize=7,
thickness=1,
line_type=cv2.LINE_AA)
# Draw contours.
cv2.drawContours(
image=frame,
contours=[blob.contour],
contourIdx=0,
color=(0, 0, 255),
thickness=1)
return frame
def _callback_change_color(self, color_index: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and selects the associated color.
The association between index and color is implementation dependent, i.e.
in the insertion order into a dictionary.
Args:
color_index: The current value of the trackbar. Passed automatically.
"""
colors = list(self._color_ranges.keys())
selected_color = colors[color_index]
min_upper = self._color_ranges[selected_color]
lower = min_upper.lower
upper = min_upper.upper
self._u_range_trackbar.set_trackbar_pos(lower[1], upper[1])
self._v_range_trackbar.set_trackbar_pos(lower[2], upper[2])
cv2.setWindowTitle(self._window_name,
self._window_name + " - Color: " + selected_color)
def _update_gui_toolkit(self, image_yuv: np.ndarray,
image_rgb: np.ndarray) -> None:
"""Updates the YUV GUI toolkit.
Creates and shows the UV representation of the current image.
Args:
image_yuv: The current image in YUV color space.
image_rgb: The current image in RGB color space.
"""
subsample = cv2.getTrackbarPos("Subsampling", self._window_name)
img_u = image_yuv[0::subsample, 0::subsample, 1]
img_v = 1.0 - image_yuv[0::subsample, 0::subsample, 2]
pixel_color = image_rgb[0::subsample, 0::subsample, :]
pixel_color = pixel_color.reshape(np.prod(img_u.shape[0:2]), -1)
img_u = img_u.ravel()
img_v = img_v.ravel()
fig_size = 300
fig = np.full(shape=(fig_size, fig_size, 3), fill_value=255, dtype=np.uint8)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(fig_size, fig_size),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(0, 0),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.putText(
img=fig,
text="U",
org=(int(0.94 * fig_size), int(0.97 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
cv2.putText(
img=fig,
text="V",
org=(int(0.03 * fig_size), int(0.06 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
for i in range(img_u.size):
color = tuple(int(p) for p in pixel_color[i, ::-1])
position = (int(img_u[i] * fig_size), int(img_v[i] * fig_size))
cv2.drawMarker(
img=fig,
position=position,
color=color,
markerType=cv2.MARKER_SQUARE,
markerSize=3,
thickness=2)
u_min, u_max = self._u_range_trackbar.get_trackbar_pos()
u_min = int(u_min * fig_size)
u_max = int(u_max * fig_size)
v_min, v_max = self._v_range_trackbar.get_trackbar_pos()
v_min = int((1.0 - v_min) * fig_size)
v_max = int((1.0 - v_max) * fig_size)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_min, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_max, v_max),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_min),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_max, v_max),
color=(0, 0, 0),
thickness=2)
cv2.imshow(self._window_name, fig)
cv2.waitKey(1)
class CreateRangeTrackbar:
"""Class to create and control, on an OpenCV GUI, two trackbars representing a range of values."""
def __init__(self,
window_name: str,
trackbar_name_lower: str,
trackbar_name_upper: str,
color_ranges: Mapping[str, types.ColorRange],
color_code: str,
trackbar_scale: int = 1000):
"""Initializes the class.
Args:
window_name: Name of the window that will be used as a parent of the
created trackbar.
trackbar_name_lower: The name of the trackbar implementing the lower bound
of the range.
trackbar_name_upper: The name of the trackbar implementing the upper bound
of the range.
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
color_code: The color code to change in `color_ranges`. Shall be "U" or
"V".
trackbar_scale: The trackbar scale to recover the real value from the
current trackbar position.
"""
self._window_name = window_name
self._trackbar_name_lower = trackbar_name_lower
self._trackbar_name_upper = trackbar_name_upper
self._color_ranges = color_ranges
self._color_code = color_code
self._trackbar_scale = trackbar_scale
self._trackbar_reset = False
# pylint: disable=g-long-lambda
cv2.createTrackbar(
self._trackbar_name_lower, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"lower", "lower", self._color_code, x))
cv2.createTrackbar(
self._trackbar_name_upper, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"upper", "upper", self._color_code, x))
# pylint: enable=g-long-lambda
def set_trackbar_pos(self, lower_value: float, upper_value: float) -> None:
"""Sets the trackbars to specific values."""
if lower_value > upper_value:
logging.error(
"Wrong values for setting range trackbars. Lower value "
"must be less than upper value. Provided lower: %d. "
"Provided upper: %d.", lower_value, upper_value)
return
# To change the trackbar values avoiding the consistency check enforced by
# the callback to implement a range of values with two sliders, we set the
# variable self._trackbar_reset to `True` and then bring it back to
# `False`.
self._trackbar_reset = True
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
int(lower_value * self._trackbar_scale))
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
int(upper_value * self._trackbar_scale))
self._trackbar_reset = False
def get_trackbar_pos(self, normalized: bool = True) -> Tuple[float, float]:
"""Gets the trackbars lower and upper values."""
lower = cv2.getTrackbarPos(self._trackbar_name_lower, self._window_name)
upper = cv2.getTrackbarPos(self._trackbar_name_upper, self._window_name)
if normalized:
return lower / self._trackbar_scale, upper / self._trackbar_scale
else:
return lower, upper
def _callback_update_threshold(self, lower_or_upper: str, attribute: str,
color_code: str, value: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and updates the associated U or V threshold.
This callback assumes that two trackbars, `trackbar_name_lower` and
`trackbar_name_upper`, form a range of values. As a consequence, when one
of the two trackbar is moved, there is a consistency check that the range
is valid (i.e. lower value less than max value and vice versa).
Typical usage example:
To pass it to an OpenCV/Qt trackbar, use this function in a lambda
as follows:
cv2.createTrackbar("Trackbar lower", ..., lambda x:
class_variable._callback_update_threshold("lower", "lower", "U", x))
Args:
lower_or_upper: The behaviour of this callback for the range. Shall be
`lower` or `upper`.
attribute: The name of the threshold in `self._color_ranges` for the
current selected color.
color_code: The color code to change. Shall be "U" or "V".
value: The current value of the trackbar.
"""
if not self._trackbar_reset:
if lower_or_upper == "lower":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_upper,
self._window_name)
if value > limiting_value:
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
limiting_value)
return
elif lower_or_upper == "upper":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_lower,
self._window_name)
if value < limiting_value:
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
limiting_value)
return
selected_color_index = cv2.getTrackbarPos("Color selector",
self._window_name)
colors = list(self._color_ranges.keys())
selected_color = colors[selected_color_index]
updated_value = value / self._trackbar_scale
color_threshold = getattr(self._color_ranges[selected_color], attribute)
if color_code == "U":
color_threshold[1] = updated_value
elif color_code == "V":
color_threshold[2] = updated_value
else:
logging.error(
"Wrong trackbar name. No U/V color code correspondence."
"Provided: `%s`.", color_code)
return
| [
"numpy.prod",
"numpy.int32",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.drawMarker",
"cv2.resizeWindow",
"cv2.multiply",
"cv2.line",
"cv2.contourArea",
"cv2.setWindowTitle",
"cv2.arrowedLine",
"cv2.waitKey",
"numpy.round",
"cv2.drawContours",
"numpy.ones",
"cv2.setTra... | [((2029, 2044), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (2037, 2044), True, 'import numpy as np\n'), ((5028, 5065), 'cv2.resize', 'cv2.resize', (['image', '(size[1], size[0])'], {}), '(image, (size[1], size[0]))\n', (5038, 5065), False, 'import cv2\n'), ((5165, 5270), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', ([], {'src': 'resized', 'h': '(7)', 'hColor': '(7)', 'templateWindowSize': '(3)', 'searchWindowSize': '(5)'}), '(src=resized, h=7, hColor=7,\n templateWindowSize=3, searchWindowSize=5)\n', (5196, 5270), False, 'import cv2\n'), ((5287, 5321), 'cv2.multiply', 'cv2.multiply', (['denoised', 'self._mask'], {}), '(denoised, self._mask)\n', (5299, 5321), False, 'import cv2\n'), ((5467, 5500), 'numpy.ones', 'np.ones', (['image.shape', 'image.dtype'], {}), '(image.shape, image.dtype)\n', (5474, 5500), True, 'import numpy as np\n'), ((6266, 6355), 'cv2.findContours', 'cv2.findContours', ([], {'image': 'mask', 'mode': 'cv2.RETR_EXTERNAL', 'method': 'cv2.CHAIN_APPROX_SIMPLE'}), '(image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.\n CHAIN_APPROX_SIMPLE)\n', (6282, 6355), False, 'import cv2\n'), ((6563, 6591), 'numpy.square', 'np.square', (['(1.0 / self._scale)'], {}), '(1.0 / self._scale)\n', (6572, 6591), True, 'import numpy as np\n'), ((6768, 6797), 'cv2.moments', 'cv2.moments', (['max_area_contour'], {}), '(max_area_contour)\n', (6779, 6797), False, 'import cv2\n'), ((8361, 8453), 'cv2.setWindowTitle', 'cv2.setWindowTitle', (['self._window_name', "(self._window_name + ' - Color: ' + selected_color)"], {}), "(self._window_name, self._window_name + ' - Color: ' +\n selected_color)\n", (8379, 8453), False, 'import cv2\n'), ((8835, 8887), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Subsampling"""', 'self._window_name'], {}), "('Subsampling', self._window_name)\n", (8853, 8887), False, 'import cv2\n'), ((9211, 9281), 'numpy.full', 'np.full', ([], {'shape': '(fig_size, fig_size, 3)', 'fill_value': '(255)', 'dtype': 'np.uint8'}), '(shape=(fig_size, fig_size, 3), fill_value=255, dtype=np.uint8)\n', (9218, 9281), True, 'import numpy as np\n'), ((9286, 9406), 'cv2.arrowedLine', 'cv2.arrowedLine', ([], {'img': 'fig', 'pt1': '(0, fig_size)', 'pt2': '(fig_size, fig_size)', 'color': '(0, 0, 0)', 'thickness': '(2)', 'tipLength': '(0.03)'}), '(img=fig, pt1=(0, fig_size), pt2=(fig_size, fig_size), color\n =(0, 0, 0), thickness=2, tipLength=0.03)\n', (9301, 9406), False, 'import cv2\n'), ((9455, 9560), 'cv2.arrowedLine', 'cv2.arrowedLine', ([], {'img': 'fig', 'pt1': '(0, fig_size)', 'pt2': '(0, 0)', 'color': '(0, 0, 0)', 'thickness': '(2)', 'tipLength': '(0.03)'}), '(img=fig, pt1=(0, fig_size), pt2=(0, 0), color=(0, 0, 0),\n thickness=2, tipLength=0.03)\n', (9470, 9560), False, 'import cv2\n'), ((10672, 10763), 'cv2.line', 'cv2.line', ([], {'img': 'fig', 'pt1': '(u_min, v_max)', 'pt2': '(u_min, v_min)', 'color': '(0, 0, 0)', 'thickness': '(2)'}), '(img=fig, pt1=(u_min, v_max), pt2=(u_min, v_min), color=(0, 0, 0),\n thickness=2)\n', (10680, 10763), False, 'import cv2\n'), ((10805, 10896), 'cv2.line', 'cv2.line', ([], {'img': 'fig', 'pt1': '(u_max, v_max)', 'pt2': '(u_max, v_min)', 'color': '(0, 0, 0)', 'thickness': '(2)'}), '(img=fig, pt1=(u_max, v_max), pt2=(u_max, v_min), color=(0, 0, 0),\n thickness=2)\n', (10813, 10896), False, 'import cv2\n'), ((10938, 11029), 'cv2.line', 'cv2.line', ([], {'img': 'fig', 'pt1': '(u_min, v_min)', 'pt2': '(u_max, v_min)', 'color': '(0, 0, 0)', 'thickness': '(2)'}), '(img=fig, pt1=(u_min, v_min), pt2=(u_max, v_min), color=(0, 0, 0),\n thickness=2)\n', (10946, 11029), False, 'import cv2\n'), ((11071, 11162), 'cv2.line', 'cv2.line', ([], {'img': 'fig', 'pt1': '(u_min, v_max)', 'pt2': '(u_max, v_max)', 'color': '(0, 0, 0)', 'thickness': '(2)'}), '(img=fig, pt1=(u_min, v_max), pt2=(u_max, v_max), color=(0, 0, 0),\n thickness=2)\n', (11079, 11162), False, 'import cv2\n'), ((11205, 11239), 'cv2.imshow', 'cv2.imshow', (['self._window_name', 'fig'], {}), '(self._window_name, fig)\n', (11215, 11239), False, 'import cv2\n'), ((11244, 11258), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11255, 11258), False, 'import cv2\n'), ((14225, 14289), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self._trackbar_name_lower', 'self._window_name'], {}), '(self._trackbar_name_lower, self._window_name)\n', (14243, 14289), False, 'import cv2\n'), ((14302, 14366), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self._trackbar_name_upper', 'self._window_name'], {}), '(self._trackbar_name_upper, self._window_name)\n', (14320, 14366), False, 'import cv2\n'), ((16406, 16461), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Color selector"""', 'self._window_name'], {}), "('Color selector', self._window_name)\n", (16424, 16461), False, 'import cv2\n'), ((2364, 2470), 'cv2.namedWindow', 'cv2.namedWindow', (['self._window_name', '(cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)'], {}), '(self._window_name, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO |\n cv2.WINDOW_GUI_EXPANDED)\n', (2379, 2470), False, 'import cv2\n'), ((2494, 2548), 'cv2.resizeWindow', 'cv2.resizeWindow', (['self._window_name', 'self._window_size'], {}), '(self._window_name, self._window_size)\n', (2510, 2548), False, 'import cv2\n'), ((2852, 2927), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Subsampling"""', 'self._window_name', '(5)', '(10)', '(lambda x: None)'], {}), "('Subsampling', self._window_name, 5, 10, lambda x: None)\n", (2870, 2927), False, 'import cv2\n'), ((2959, 3014), 'cv2.setTrackbarMin', 'cv2.setTrackbarMin', (['"""Subsampling"""', 'self._window_name', '(1)'], {}), "('Subsampling', self._window_name, 1)\n", (2977, 3014), False, 'import cv2\n'), ((3579, 3602), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3600, 3602), False, 'import cv2\n'), ((6613, 6646), 'cv2.contourArea', 'cv2.contourArea', (['max_area_contour'], {}), '(max_area_contour)\n', (6628, 6646), False, 'import cv2\n'), ((7469, 7574), 'cv2.drawContours', 'cv2.drawContours', ([], {'image': 'frame', 'contours': '[blob.contour]', 'contourIdx': '(0)', 'color': '(0, 0, 255)', 'thickness': '(1)'}), '(image=frame, contours=[blob.contour], contourIdx=0, color=\n (0, 0, 255), thickness=1)\n', (7485, 7574), False, 'import cv2\n'), ((9098, 9123), 'numpy.prod', 'np.prod', (['img_u.shape[0:2]'], {}), '(img_u.shape[0:2])\n', (9105, 9123), True, 'import numpy as np\n'), ((10218, 10335), 'cv2.drawMarker', 'cv2.drawMarker', ([], {'img': 'fig', 'position': 'position', 'color': 'color', 'markerType': 'cv2.MARKER_SQUARE', 'markerSize': '(3)', 'thickness': '(2)'}), '(img=fig, position=position, color=color, markerType=cv2.\n MARKER_SQUARE, markerSize=3, thickness=2)\n', (10232, 10335), False, 'import cv2\n'), ((13285, 13462), 'absl.logging.error', 'logging.error', (['"""Wrong values for setting range trackbars. Lower value must be less than upper value. Provided lower: %d. Provided upper: %d."""', 'lower_value', 'upper_value'], {}), "(\n 'Wrong values for setting range trackbars. Lower value must be less than upper value. Provided lower: %d. Provided upper: %d.'\n , lower_value, upper_value)\n", (13298, 13462), False, 'from absl import logging\n'), ((4957, 4996), 'numpy.round', 'np.round', (['(image.shape[:2] * self._scale)'], {}), '(image.shape[:2] * self._scale)\n', (4965, 4996), True, 'import numpy as np\n'), ((5574, 5611), 'numpy.int32', 'np.int32', (['[mask_points * self._scale]'], {}), '([mask_points * self._scale])\n', (5582, 5611), True, 'import numpy as np\n'), ((6951, 6969), 'numpy.array', 'np.array', (['[cx, cy]'], {}), '([cx, cy])\n', (6959, 6969), True, 'import numpy as np\n'), ((15753, 15817), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self._trackbar_name_upper', 'self._window_name'], {}), '(self._trackbar_name_upper, self._window_name)\n', (15771, 15817), False, 'import cv2\n'), ((16882, 16989), 'absl.logging.error', 'logging.error', (['"""Wrong trackbar name. No U/V color code correspondence.Provided: `%s`."""', 'color_code'], {}), "(\n 'Wrong trackbar name. No U/V color code correspondence.Provided: `%s`.',\n color_code)\n", (16895, 16989), False, 'from absl import logging\n'), ((15907, 15992), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['self._trackbar_name_lower', 'self._window_name', 'limiting_value'], {}), '(self._trackbar_name_lower, self._window_name, limiting_value\n )\n', (15925, 15992), False, 'import cv2\n'), ((16097, 16161), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['self._trackbar_name_lower', 'self._window_name'], {}), '(self._trackbar_name_lower, self._window_name)\n', (16115, 16161), False, 'import cv2\n'), ((16251, 16336), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['self._trackbar_name_upper', 'self._window_name', 'limiting_value'], {}), '(self._trackbar_name_upper, self._window_name, limiting_value\n )\n', (16269, 16336), False, 'import cv2\n')] |
import pandas as pd
import numpy as np
class PayoffMatrix():
def __init__(self, sim):
self.sim = sim
self.matrix = np.zeros(shape=(len(self.sim.subclones), len(self.sim.subclones)))
self.populate_matrix(self.sim.t)
def populate_matrix(self, t):
treatments = self.sim.treatments[t, :]
for i in range(len(self.sim.subclones)):
for j in range(len(self.sim.subclones)):
fj = np.dot(self.sim.subclones[j].alpha, treatments)
fi = np.dot(self.sim.subclones[i].alpha, treatments)
# print (self.sim.subclones[j].alpha)
# print (treatments)
self.matrix[i, j] = fj - fi
def print_matrix(self):
labs = [s.label for s in self.sim.subclones]
self.pretty_matrix = pd.DataFrame(self.matrix, index=labs, columns=labs)
print (self.pretty_matrix)
| [
"pandas.DataFrame",
"numpy.dot"
] | [((813, 864), 'pandas.DataFrame', 'pd.DataFrame', (['self.matrix'], {'index': 'labs', 'columns': 'labs'}), '(self.matrix, index=labs, columns=labs)\n', (825, 864), True, 'import pandas as pd\n'), ((450, 497), 'numpy.dot', 'np.dot', (['self.sim.subclones[j].alpha', 'treatments'], {}), '(self.sim.subclones[j].alpha, treatments)\n', (456, 497), True, 'import numpy as np\n'), ((519, 566), 'numpy.dot', 'np.dot', (['self.sim.subclones[i].alpha', 'treatments'], {}), '(self.sim.subclones[i].alpha, treatments)\n', (525, 566), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import itertools
from matplotlib import cm
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve
from sklearn.linear_model import LogisticRegression, SGDClassifier
class Classification:
'''
Class with methods for fitting, evaluating, and interpreting
useful classification models.
Note:
Consider features you may want to engineer,
including polynomial and interaction terms
(based on EDA, initial regression results, etc.)
before each instantiation of this class.
Args:
df: Pandas dataframe.
X_cols (list of str): Feature column names.
y_col (str): Target column name.
Attributes:
df: Pandas dataframe.
X_cols (list of str): Feature column names.
y_col (str): Target column name.
X (np matrix): Features.
y (np array): Target.
X_scaled (np matrix): Standardized features.
Todo:
* RF and GB
* Profit curve
* Normalization option for histograms
'''
def __init__(self, df, X_cols, y_col):
self.X_cols = X_cols
self.y_col = y_col
self.df = df #pandas dataframe
self.X = df[X_cols].values #getting features into numpy matrix for use with sklearn
self.y = df[y_col].values.ravel() #getting target into numpy array for use with sklearn
self.X_scaled = StandardScaler().fit(self.X).transform(self.X) #standardized feature matrix
def elastic_net(self, alphas, l1_ratios, folds, imbalanced=False): #slow
# NOTE TO SELF: Should I consider class_weight? My
# understanding is that class_weight rescales the
# predicted probabilities, does not change result,
# and implies assumptions about cost/benefit anyway,
# because it usually trades off recall for precision.
'''
Runs parallelized GridSearchCV for elastic net logistic
regression over the specified alphas and l1_ratios.
Notes:
Instead of alphas for regularization strength,
sklearn logistic reg uses parameter 'C' which is inverse alphas.
Args:
alphas (list of floats > 0): +infinity approaches no penalty, max penaly is rarely < 0.1
l1_ratios (list of floats 0 to 1): 0 is pure ridge (l2 penalty), 1 is pure lasso (l1 penalty)
folds (int): Number of folds to use in cross validation.
imbalanced (bool): Default False, set to True to score on PR instead of ROC.
Returns:
model (obj): GridSearchCV optimized elastic net model.
preds (np array): Predicted values of target.
Raises:
~in progress~
Example:
alphas = np.logspace(1, -2, 10)
l1_ratios = [0, .5, 1]
folds = 5
model, preds = clf.elastic_net(alphas, l1_ratios, folds)
'''
if imbalanced == False:
parameters = {'l1_ratio': l1_ratios, 'C': alphas}
elastic_net = LogisticRegression(penalty='elasticnet', solver='saga', max_iter=1000, n_jobs=-1)
scoring = make_scorer(roc_auc_score, needs_threshold=True)
model = GridSearchCV(elastic_net, parameters, scoring=scoring, cv=folds, n_jobs=-1)
model.fit(self.X_scaled, self.y)
preds = model.predict(self.X_scaled)
print("-------- BEST MODEL --------")
print(model.best_estimator_)
print("-------- ---------- --------")
return model, preds
else:
parameters = {'l1_ratio': l1_ratios, 'C': alphas}
elastic_net = LogisticRegression(penalty='elasticnet', solver='saga', max_iter=1000, n_jobs=-1)
scoring = make_scorer(average_precision_score, needs_threshold=True)
model = GridSearchCV(elastic_net, parameters, scoring=scoring, cv=folds, n_jobs=-1)
model.fit(self.X_scaled, self.y)
preds = model.predict(self.X_scaled)
print("-------- BEST MODEL --------")
print(model.best_estimator_)
print("-------- ---------- --------")
return model, preds
def elastic_net_sgd(self, alphas, l1_ratios, folds): #faster, more robust
'''
Runs parallelized GridSearchCV for SGD optimized elastic net
logistic regression over the specified alphas and l1_ratios.
Also optimizes over class_weight 'balanced' vs None.
Notes:
Predictions are more robust to outliers than plain elastic net,
but appear to have more bias as well.
Does not fit inliers as closely as RF, GB,
so they may be preferable for robust predictions.
Instead of alphas for regularization strength,
sklearn logistic reg uses parameter 'C' which is inverse alphas.
Args:
alphas (list of floats >= 0): 0 is no penalty, max penalty is rarely > 10.
l1_ratios (list of floats 0 to 1): 0 is pure ridge (l2 penalty), 1 is pure lasso (l1 penalty)
folds (int): Number of folds to use in cross validation.
Returns:
model (obj): GridSearchCV optimized elastic net model.
preds (np array): Predicted values of target.
Raises:
~in progress~
Example:
alphas = np.logspace(-3, 1, 25)
l1_ratios = [0, .5, 1]
folds = 5
model, preds = clf.elastic_net_sgd(alphas, l1_ratios, folds)
'''
parameters = {'l1_ratio': l1_ratios, 'alpha': alphas, 'class_weight': [None, 'balanced']}
elastic_net = SGDClassifier(loss='log', penalty='elasticnet', n_jobs=-1)
model = GridSearchCV(elastic_net, parameters, cv=folds, n_jobs=-1)
model.fit(self.X_scaled, self.y)
preds = model.predict(self.X_scaled)
print("-------- BEST MODEL --------")
print(model.best_estimator_)
print("-------- ---------- --------")
return model, preds
def coefficient_plot(self, model):
'''Plots model coefficients.
Args:
model (obj): GridSearchCV model object.
'''
if len(model.best_estimator_.coef_) == 1: #this happens with Logistic Reg coefs
coef = pd.Series(model.best_estimator_.coef_[0], index=self.X_cols)
sorted_coef = coef.sort_values()
sorted_coef.plot(kind = "barh", figsize=(12, 9))
plt.title("Coefficients in the model")
print(sorted_coef[::-1])
print("Intercept ", model.best_estimator_.intercept_)
else:
coef = pd.Series(model.best_estimator_.coef_, index=self.X_cols)
sorted_coef = coef.sort_values()
sorted_coef.plot(kind = "barh", figsize=(12, 9))
plt.title("Coefficients in the model")
print(sorted_coef[::-1])
print("Intercept ", model.best_estimator_.intercept_)
def lasso_plot(self, alphas):
'''Visualizes robust coefficients (feature importances).
Args:
alphas (list of floats > 0): +infinity approaches no penalty, max penaly is rarely < 0.1
Example:
clf.lasso_plot(np.logspace(2, -3, 25))
'''
coefs = []
for a in alphas:
lasso = LogisticRegression(penalty='l1', C=a, solver='saga', max_iter=1000)
lasso.fit(self.X_scaled, self.y)
if len(lasso.coef_) == 1:
coefs.append(lasso.coef_[0])
else:
coefs.append(lasso.coef_)
fig, ax = plt.subplots(1, 1, figsize=(12, 9))
ax.plot(alphas, coefs)
ax.set_xscale('log')
# ax.set_xlim(ax.get_xlim()[::-1]) #reverse axis
plt.xlabel('C (inverse λ)')
plt.ylabel('coefficients')
plt.title('LASSO coefficients as a function of regularization')
plt.legend(labels=self.X_cols, loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def roc_curve(self, model):
'''Plots FPR against TPR across decision thresholds.
Args:
model (obj): Sklearn classifier.
'''
probs = model.predict_proba(self.X_scaled) #predict probabilities
probs = probs[:, 1] #keep probabilities for the positive outcome only
fpr, tpr, thresholds = roc_curve(self.y, probs) #calculate roc curve
plt.figure(figsize=(8, 6))
plt.plot([0, 1], [0, 1], linestyle='--') #plot no skill
plt.plot(fpr, tpr) #plot the roc curve for the model
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.show()
best_ratio = 0
best_thresh = 0
corresp_tp = 0
corresp_fp = 0
for fp, tp, t in zip(fpr, tpr, thresholds):
if tp/fp > best_ratio and tp > 0.5:
best_ratio = tp/fp
best_thresh = t
corresp_tp = tp
corresp_fp = fp
print("Best ratio w/ TPR > .5:", best_ratio)
print(" Decision threshold:", best_thresh)
print(" TPR:", corresp_tp)
print(" FPR:", corresp_fp)
def pr_curve(self, model):
'''Plots Recall (TPR) against Precision across decision thresholds.
Args:
model (obj): Sklearn classifier.
'''
probs = model.predict_proba(self.X_scaled) #predict probabilities
probs = probs[:, 1] #keep probabilities for the positive outcome only
precision, recall, thresholds = precision_recall_curve(self.y, probs) #calculate p-r curve
plt.figure(figsize=(8, 6))
plt.plot([0, 1], [0.5, 0.5], linestyle='--') #plot no skill
plt.plot(recall, precision) #plot the p-r curve for the model
plt.xlabel('Recall (TPR)')
plt.ylabel('Precision')
plt.show()
best_ratio = 0
best_thresh = 0
corresp_p = 0
corresp_r = 0
for p, r, t in zip(precision, recall, thresholds):
if p/r < 1 and p/r > best_ratio:
best_ratio = p/r
best_thresh = t
corresp_p = p
corresp_r = r
print("P/R ratio closest to 1:", best_ratio)
print(" Decision threshold:", best_thresh)
print(" Precision:", corresp_p)
print(" Recall:", corresp_r)
def profit_curve(self, thresholds, model, cost_bene):
'''
Given assumptions about cost/benefit of TPs, FPs TNs, FNs,
plots profit of classifier across decision thresholds
and prints threshold for max profit.
Args:
thresholds (arr of floats): List of decision thresholds to plot.
model (obj): Sklearn classifier.
cost_bene (dict): Dict mapping costs/benefits to TP, FP, TN, FN.
Example:
cost_bene = {'tp': 2, 'fp': -2, 'tn': 0, 'fn': -1}
model = SGDClassifier()
thresholds = np.linspace(.01, .99, 25)
clf.profit_curve(thresholds, model, cost_bene)
'''
profits = []
for t in thresholds:
preds = (model.predict_proba(self.X_scaled)[:,1] >= t).astype(bool)
y = self.y
tn, fp, fn, tp = confusion_matrix(y, preds).ravel()
avg_profit = (tn*cost_bene['tn'] + fp*cost_bene['fp'] + fn*cost_bene['fn'] + tp*cost_bene['tp'])/(tn+fp+fn+tp)
profits.append(avg_profit)
plt.figure(figsize=(8, 6))
plt.plot(thresholds, profits)
plt.xlabel('Decision threshold')
plt.ylabel('Expected profit')
plt.show()
print("Estimated max avg profit:", max(profits))
print(" Decision threshold:", thresholds[profits.index(max(profits))])
class ConfusionMatrix:
'''
Class for plotting confusion matrix and
printing accuracy, precision, recall, and fallout.
Args:
y (list or np array): Actual target values.
y_pred (list or np array): Predicted target values.
model (obj): Sklearn classifier.
Example:
cm = ConfusionMatrix(y_pred, y_test, model)
cm.plot_matrix()
'''
def __init__(self, y, y_pred, model):
self.y = y #e.g. df[y_col]
self.y_pred = y_pred
self.model = model
self.cm = confusion_matrix(self.y, self.y_pred)
if model.classes_[0] == 1: #in case the labels are flipped from the usual indices
self.cm = np.array([[self.cm[1,1], self.cm[1,0]], [self.cm[0,1], self.cm[0,0]]])
def plot_matrix(self, classes=['0', '1'], title_on=False, title='Confusion Matrix', cmap=plt.cm.Blues, **kwargs):
'''Plots confusion matrix w/ accuracy, precision, recall, fallout.
Args:
classes (list of two str): ['0', '1'] by default, change labels as desired.
title_on (bool, optional): Default False, set to True to print title.
title (str): Show title, if title_on arg set to True.
cmap: Plot color palette.
**kwargs: For use with sklearn classification report below.
'''
fig, ax = plt.subplots()
plt.imshow(self.cm, interpolation='nearest', cmap=cmap)
if title_on == True:
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = self.cm.max() / 2.
for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):
plt.text(j, i, self.cm[i, j],
horizontalalignment="center",
color="white" if self.cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label', fontsize=14)
ax.xaxis.tick_top()
plt.xlabel('Predicted label', fontsize=14)
ax.xaxis.set_label_position('top')
plt.show()
#for comparison to the classification report
tp = self.cm[1,1]
fn = self.cm[1,0]
fp = self.cm[0,1]
tn = self.cm[0,0]
print('Accuracy = {:.3f}'.format((tp+tn)/(tp+fp+tn+fn)))
print('Precision = {:.3f}'.format(tp/(tp+fp)))
print('Recall (TPR) = {:.3f}'.format(tp/(tp+fn)))
print('Fallout (FPR) = {:.3f}'.format(fp/(fp+tn)))
#classification report
print("")
print('---- Classification Report ----')
print(classification_report(self.y, self.y_pred, **kwargs))
| [
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"numpy.array",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.imshow",
"sklearn.linear_model.SGDClassifier",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks... | [((5901, 5959), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""log"""', 'penalty': '"""elasticnet"""', 'n_jobs': '(-1)'}), "(loss='log', penalty='elasticnet', n_jobs=-1)\n", (5914, 5959), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((5976, 6034), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['elastic_net', 'parameters'], {'cv': 'folds', 'n_jobs': '(-1)'}), '(elastic_net, parameters, cv=folds, n_jobs=-1)\n', (5988, 6034), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7854, 7889), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 9)'}), '(1, 1, figsize=(12, 9))\n', (7866, 7889), True, 'import matplotlib.pyplot as plt\n'), ((8015, 8042), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""C (inverse λ)"""'], {}), "('C (inverse λ)')\n", (8025, 8042), True, 'import matplotlib.pyplot as plt\n'), ((8051, 8077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""coefficients"""'], {}), "('coefficients')\n", (8061, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8086, 8149), 'matplotlib.pyplot.title', 'plt.title', (['"""LASSO coefficients as a function of regularization"""'], {}), "('LASSO coefficients as a function of regularization')\n", (8095, 8149), True, 'import matplotlib.pyplot as plt\n'), ((8158, 8232), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'labels': 'self.X_cols', 'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)'}), "(labels=self.X_cols, loc='center left', bbox_to_anchor=(1, 0.5))\n", (8168, 8232), True, 'import matplotlib.pyplot as plt\n'), ((8241, 8251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8249, 8251), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8635), 'sklearn.metrics.roc_curve', 'roc_curve', (['self.y', 'probs'], {}), '(self.y, probs)\n', (8620, 8635), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((8665, 8691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (8675, 8691), True, 'import matplotlib.pyplot as plt\n'), ((8700, 8740), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""'}), "([0, 1], [0, 1], linestyle='--')\n", (8708, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8782), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (8772, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8825, 8842), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (8835, 8842), True, 'import matplotlib.pyplot as plt\n'), ((8851, 8868), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (8861, 8868), True, 'import matplotlib.pyplot as plt\n'), ((8877, 8887), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8885, 8887), True, 'import matplotlib.pyplot as plt\n'), ((9808, 9845), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['self.y', 'probs'], {}), '(self.y, probs)\n', (9830, 9845), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((9875, 9901), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (9885, 9901), True, 'import matplotlib.pyplot as plt\n'), ((9910, 9954), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0.5, 0.5]'], {'linestyle': '"""--"""'}), "([0, 1], [0.5, 0.5], linestyle='--')\n", (9918, 9954), True, 'import matplotlib.pyplot as plt\n'), ((9978, 10005), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {}), '(recall, precision)\n', (9986, 10005), True, 'import matplotlib.pyplot as plt\n'), ((10048, 10074), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall (TPR)"""'], {}), "('Recall (TPR)')\n", (10058, 10074), True, 'import matplotlib.pyplot as plt\n'), ((10083, 10106), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (10093, 10106), True, 'import matplotlib.pyplot as plt\n'), ((10115, 10125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10123, 10125), True, 'import matplotlib.pyplot as plt\n'), ((11749, 11775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (11759, 11775), True, 'import matplotlib.pyplot as plt\n'), ((11784, 11813), 'matplotlib.pyplot.plot', 'plt.plot', (['thresholds', 'profits'], {}), '(thresholds, profits)\n', (11792, 11813), True, 'import matplotlib.pyplot as plt\n'), ((11822, 11854), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Decision threshold"""'], {}), "('Decision threshold')\n", (11832, 11854), True, 'import matplotlib.pyplot as plt\n'), ((11863, 11892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected profit"""'], {}), "('Expected profit')\n", (11873, 11892), True, 'import matplotlib.pyplot as plt\n'), ((11901, 11911), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11909, 11911), True, 'import matplotlib.pyplot as plt\n'), ((12616, 12653), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['self.y', 'self.y_pred'], {}), '(self.y, self.y_pred)\n', (12632, 12653), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((13433, 13447), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (13445, 13447), True, 'import matplotlib.pyplot as plt\n'), ((13465, 13520), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(self.cm, interpolation='nearest', cmap=cmap)\n", (13475, 13520), True, 'import matplotlib.pyplot as plt\n'), ((13587, 13601), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13599, 13601), True, 'import matplotlib.pyplot as plt\n'), ((13655, 13699), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (13665, 13699), True, 'import matplotlib.pyplot as plt\n'), ((13708, 13739), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (13718, 13739), True, 'import matplotlib.pyplot as plt\n'), ((14041, 14059), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14057, 14059), True, 'import matplotlib.pyplot as plt\n'), ((14068, 14105), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {'fontsize': '(14)'}), "('True label', fontsize=14)\n", (14078, 14105), True, 'import matplotlib.pyplot as plt\n'), ((14142, 14184), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {'fontsize': '(14)'}), "('Predicted label', fontsize=14)\n", (14152, 14184), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14244, 14246), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3357), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""elasticnet"""', 'solver': '"""saga"""', 'max_iter': '(1000)', 'n_jobs': '(-1)'}), "(penalty='elasticnet', solver='saga', max_iter=1000,\n n_jobs=-1)\n", (3290, 3357), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((3376, 3424), 'sklearn.metrics.make_scorer', 'make_scorer', (['roc_auc_score'], {'needs_threshold': '(True)'}), '(roc_auc_score, needs_threshold=True)\n', (3387, 3424), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((3445, 3520), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['elastic_net', 'parameters'], {'scoring': 'scoring', 'cv': 'folds', 'n_jobs': '(-1)'}), '(elastic_net, parameters, scoring=scoring, cv=folds, n_jobs=-1)\n', (3457, 3520), False, 'from sklearn.model_selection import GridSearchCV\n'), ((3890, 3975), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""elasticnet"""', 'solver': '"""saga"""', 'max_iter': '(1000)', 'n_jobs': '(-1)'}), "(penalty='elasticnet', solver='saga', max_iter=1000,\n n_jobs=-1)\n", (3908, 3975), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((3994, 4052), 'sklearn.metrics.make_scorer', 'make_scorer', (['average_precision_score'], {'needs_threshold': '(True)'}), '(average_precision_score, needs_threshold=True)\n', (4005, 4052), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((4073, 4148), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['elastic_net', 'parameters'], {'scoring': 'scoring', 'cv': 'folds', 'n_jobs': '(-1)'}), '(elastic_net, parameters, scoring=scoring, cv=folds, n_jobs=-1)\n', (4085, 4148), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6543, 6603), 'pandas.Series', 'pd.Series', (['model.best_estimator_.coef_[0]'], {'index': 'self.X_cols'}), '(model.best_estimator_.coef_[0], index=self.X_cols)\n', (6552, 6603), True, 'import pandas as pd\n'), ((6722, 6760), 'matplotlib.pyplot.title', 'plt.title', (['"""Coefficients in the model"""'], {}), "('Coefficients in the model')\n", (6731, 6760), True, 'import matplotlib.pyplot as plt\n'), ((6898, 6955), 'pandas.Series', 'pd.Series', (['model.best_estimator_.coef_'], {'index': 'self.X_cols'}), '(model.best_estimator_.coef_, index=self.X_cols)\n', (6907, 6955), True, 'import pandas as pd\n'), ((7074, 7112), 'matplotlib.pyplot.title', 'plt.title', (['"""Coefficients in the model"""'], {}), "('Coefficients in the model')\n", (7083, 7112), True, 'import matplotlib.pyplot as plt\n'), ((7580, 7647), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'C': 'a', 'solver': '"""saga"""', 'max_iter': '(1000)'}), "(penalty='l1', C=a, solver='saga', max_iter=1000)\n", (7598, 7647), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n'), ((12766, 12840), 'numpy.array', 'np.array', (['[[self.cm[1, 1], self.cm[1, 0]], [self.cm[0, 1], self.cm[0, 0]]]'], {}), '([[self.cm[1, 1], self.cm[1, 0]], [self.cm[0, 1], self.cm[0, 0]]])\n', (12774, 12840), True, 'import numpy as np\n'), ((13562, 13578), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13571, 13578), True, 'import matplotlib.pyplot as plt\n'), ((13878, 13995), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'self.cm[i, j]'], {'horizontalalignment': '"""center"""', 'color': "('white' if self.cm[i, j] > thresh else 'black')"}), "(j, i, self.cm[i, j], horizontalalignment='center', color='white' if\n self.cm[i, j] > thresh else 'black')\n", (13886, 13995), True, 'import matplotlib.pyplot as plt\n'), ((14765, 14817), 'sklearn.metrics.classification_report', 'classification_report', (['self.y', 'self.y_pred'], {}), '(self.y, self.y_pred, **kwargs)\n', (14786, 14817), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((11543, 11569), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'preds'], {}), '(y, preds)\n', (11559, 11569), False, 'from sklearn.metrics import confusion_matrix, make_scorer, roc_auc_score, average_precision_score, classification_report, roc_curve, precision_recall_curve\n'), ((1640, 1656), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1654, 1656), False, 'from sklearn.preprocessing import StandardScaler\n')] |
#!/usr/bin/env python3
import os
import re
import cv2
import keras
import numpy as np
import pandas as pd
DATA_PATH = 'cage/images/'
LEFT_PATH = 'data/left.h5'
RIGHT_PATH = 'data/right.h5'
NUM_PATH = 'data/numbers.csv'
DataSet = (np.ndarray, np.ndarray, np.ndarray)
def extract() -> (list, list):
l_data, r_data = [], []
pattern = r'(\d)(\d)_\w.*\.png'
for _, _, files in os.walk(DATA_PATH, topdown=False):
for file in files:
if 'png' not in file:
continue
match = re.match(pattern, file)
if not match or len(match.groups()) != 2:
continue
l_num, r_num = match.groups()
file = DATA_PATH + file
img: np.ndarray = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
_, width = img.shape
l_part: np.ndarray = img[..., :int(width / 2)]
r_part: np.ndarray = img[..., int(width / 2):]
l_data.append({'image': l_part, 'number': l_num})
r_data.append({'image': r_part, 'number': r_num})
return l_data, r_data
def load() -> (DataSet, DataSet):
l_full, r_full = extract()
np.random.shuffle(l_full)
np.random.shuffle(r_full)
xl_full, yl_full = np.array([x['image'] for x in l_full]), np.array([int(x['number']) for x in l_full])
xr_full, yr_full = np.array([x['image'] for x in r_full]), np.array([int(x['number']) for x in r_full])
xl_train, yl_train = xl_full[:int(len(xl_full) / 3)], yl_full[:int(len(yl_full) / 3)]
xr_train, yr_train = xr_full[:int(len(xr_full) / 3)], yr_full[:int(len(yr_full) / 3)]
xl_valid, yl_valid = \
xl_full[int(len(xl_full) / 3):int(len(xl_full) / 3 * 2)], \
yl_full[int(len(yl_full) / 3):int(len(yl_full) / 3 * 2)]
xr_valid, yr_valid = \
xr_full[int(len(xr_full) / 3):int(len(xr_full) / 3 * 2)], \
yr_full[int(len(yr_full) / 3):int(len(yr_full) / 3 * 2)]
xl_test, yl_test = \
xl_full[int(len(xl_full) / 3 * 2):], \
yl_full[int(len(yl_full) / 3 * 2):]
xr_test, yr_test = \
xr_full[int(len(xr_full) / 3 * 2):], \
yr_full[int(len(yr_full) / 3 * 2):]
xl_mean, xr_mean = \
xl_train.mean(axis=0, keepdims=True), \
xr_train.mean(axis=0, keepdims=True)
xl_std, xr_std = \
xl_train.std(axis=0, keepdims=True) + 1e-7, \
xr_train.std(axis=0, keepdims=True) + 1e-7
xl_train, xr_train = (xl_train - xl_mean) / xl_std, (xr_train - xr_mean) / xr_std
xl_valid, xr_valid = (xl_valid - xl_mean) / xl_std, (xr_valid - xr_mean) / xr_std
xl_test, xr_test = (xl_test - xl_mean) / xl_std, (xr_test - xr_mean) / xr_std
xl_train, xr_train = xl_train[..., np.newaxis], xr_train[..., np.newaxis]
xl_valid, xr_valid = xl_valid[..., np.newaxis], xr_valid[..., np.newaxis]
xl_test, xr_test = xl_test[..., np.newaxis], xr_test[..., np.newaxis]
if not os.path.exists(NUM_PATH):
nums = {
'l_mean': xl_mean.tolist(),
'l_std': xl_std.tolist(),
'r_mean': xr_mean.tolist(),
'r_std': xr_std.tolist(),
}
with open(NUM_PATH, 'xt', encoding='utf-8', newline='\n') as f:
pd.DataFrame([nums]).to_csv(f, index=False, line_terminator='\n')
return ((xl_train, yl_train), (xl_valid, yl_valid), (xl_test, yl_test)), \
((xr_train, yr_train), (xr_valid, yr_valid), (xr_test, yr_test))
def train():
l_full, r_full = load()
(xl_train, yl_train), (xl_valid, yl_valid), (xl_test, yl_test) = l_full[0], l_full[1], l_full[2]
(xr_train, yr_train), (xr_valid, yr_valid), (xr_test, yr_test) = r_full[0], r_full[1], r_full[2]
if os.path.exists(LEFT_PATH) and os.path.exists(RIGHT_PATH):
l_model = keras.models.load_model(LEFT_PATH)
r_model = keras.models.load_model(RIGHT_PATH)
print(l_model.evaluate(xl_test, yl_test))
print(r_model.evaluate(xr_test, yr_test))
return
l_model = keras.models.Sequential([
keras.layers.Conv2D(128, 24, activation='relu', padding='same', input_shape=[70, 100, 1]),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(256, 12, activation='relu', padding='same'),
keras.layers.Conv2D(256, 12, activation='relu', padding='same'),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(512, 12, activation='relu', padding='same'),
keras.layers.Conv2D(512, 12, activation='relu', padding='same'),
keras.layers.MaxPooling2D(2),
keras.layers.Flatten(),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation='softmax')
])
r_model = keras.models.clone_model(l_model)
l_model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
l_model.fit(xl_train, yl_train, epochs=10, validation_data=(xl_valid, yl_valid))
l_model.save(LEFT_PATH)
r_model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
r_model.fit(xr_train, yr_train, epochs=10, validation_data=(xr_valid, yr_valid))
r_model.save(RIGHT_PATH)
print(l_model.evaluate(xl_test, yl_test))
print(r_model.evaluate(xr_test, yr_test))
if __name__ == '__main__':
train()
| [
"os.path.exists",
"keras.layers.Conv2D",
"keras.models.load_model",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"pandas.DataFrame",
"re.match",
"numpy.array",
"keras.layers.Dropout",
"keras.layers.Dense",
"keras.models.clone_model",
"cv2.imread",
"os.walk",
"numpy.random.shuffle"
... | [((389, 422), 'os.walk', 'os.walk', (['DATA_PATH'], {'topdown': '(False)'}), '(DATA_PATH, topdown=False)\n', (396, 422), False, 'import os\n'), ((1154, 1179), 'numpy.random.shuffle', 'np.random.shuffle', (['l_full'], {}), '(l_full)\n', (1171, 1179), True, 'import numpy as np\n'), ((1184, 1209), 'numpy.random.shuffle', 'np.random.shuffle', (['r_full'], {}), '(r_full)\n', (1201, 1209), True, 'import numpy as np\n'), ((4776, 4809), 'keras.models.clone_model', 'keras.models.clone_model', (['l_model'], {}), '(l_model)\n', (4800, 4809), False, 'import keras\n'), ((1233, 1271), 'numpy.array', 'np.array', (["[x['image'] for x in l_full]"], {}), "([x['image'] for x in l_full])\n", (1241, 1271), True, 'import numpy as np\n'), ((1341, 1379), 'numpy.array', 'np.array', (["[x['image'] for x in r_full]"], {}), "([x['image'] for x in r_full])\n", (1349, 1379), True, 'import numpy as np\n'), ((2902, 2926), 'os.path.exists', 'os.path.exists', (['NUM_PATH'], {}), '(NUM_PATH)\n', (2916, 2926), False, 'import os\n'), ((3670, 3695), 'os.path.exists', 'os.path.exists', (['LEFT_PATH'], {}), '(LEFT_PATH)\n', (3684, 3695), False, 'import os\n'), ((3700, 3726), 'os.path.exists', 'os.path.exists', (['RIGHT_PATH'], {}), '(RIGHT_PATH)\n', (3714, 3726), False, 'import os\n'), ((3746, 3780), 'keras.models.load_model', 'keras.models.load_model', (['LEFT_PATH'], {}), '(LEFT_PATH)\n', (3769, 3780), False, 'import keras\n'), ((3799, 3834), 'keras.models.load_model', 'keras.models.load_model', (['RIGHT_PATH'], {}), '(RIGHT_PATH)\n', (3822, 3834), False, 'import keras\n'), ((530, 553), 're.match', 're.match', (['pattern', 'file'], {}), '(pattern, file)\n', (538, 553), False, 'import re\n'), ((742, 780), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (752, 780), False, 'import cv2\n'), ((3999, 4093), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(128)', '(24)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'input_shape': '[70, 100, 1]'}), "(128, 24, activation='relu', padding='same', input_shape\n =[70, 100, 1])\n", (4018, 4093), False, 'import keras\n'), ((4098, 4126), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2)'], {}), '(2)\n', (4123, 4126), False, 'import keras\n'), ((4136, 4199), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(256)', '(12)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, 12, activation='relu', padding='same')\n", (4155, 4199), False, 'import keras\n'), ((4209, 4272), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(256)', '(12)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, 12, activation='relu', padding='same')\n", (4228, 4272), False, 'import keras\n'), ((4282, 4310), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2)'], {}), '(2)\n', (4307, 4310), False, 'import keras\n'), ((4320, 4383), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(512)', '(12)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512, 12, activation='relu', padding='same')\n", (4339, 4383), False, 'import keras\n'), ((4393, 4456), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(512)', '(12)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512, 12, activation='relu', padding='same')\n", (4412, 4456), False, 'import keras\n'), ((4466, 4494), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2)'], {}), '(2)\n', (4491, 4494), False, 'import keras\n'), ((4504, 4526), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (4524, 4526), False, 'import keras\n'), ((4536, 4578), 'keras.layers.Dense', 'keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (4554, 4578), False, 'import keras\n'), ((4588, 4613), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (4608, 4613), False, 'import keras\n'), ((4623, 4665), 'keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4641, 4665), False, 'import keras\n'), ((4675, 4700), 'keras.layers.Dropout', 'keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (4695, 4700), False, 'import keras\n'), ((4710, 4754), 'keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (4728, 4754), False, 'import keras\n'), ((3195, 3215), 'pandas.DataFrame', 'pd.DataFrame', (['[nums]'], {}), '([nums])\n', (3207, 3215), True, 'import pandas as pd\n')] |
import numpy as np
import termcolor
import cnc_structs
def string_array_to_char_array(m):
c = np.array([x.decode('ascii')[0] if len(x) > 0 else ' ' for x in m.flat]).reshape(m.shape)
return '\n'.join(map(''.join, c))
def staticmap_array(map: cnc_structs.CNCMapDataStruct) -> np.ndarray:
tile_names = np.zeros(map.MapCellHeight * map.MapCellWidth, dtype='S32')
for i in range(tile_names.size):
tile_names[i] = map.StaticCells[i].TemplateTypeName
return tile_names.reshape((map.MapCellHeight, map.MapCellWidth))
def tiberium_array(map: cnc_structs.CNCDynamicMapStruct, static_map):
array = np.zeros((static_map.MapCellHeight, static_map.MapCellWidth), dtype=bool)
for entry in map.Entries:
array[
entry.CellY - static_map.MapCellY, entry.CellX - static_map.MapCellX
] = entry.IsResource
return array
def f(dynamic_map, layers, occupiers, shroud_array, map_shape, House, AllyFlags):
# TODO Owner should be Color, not House
MapCellHeight, MapCellWidth = map_shape
fixed_pos_map_assets = np.zeros(MapCellHeight * MapCellWidth, dtype='S32')
fixed_pos_map_shapes = np.zeros(MapCellHeight * MapCellWidth, dtype='uint8')
actors = []
terrains = {}
for o in layers.Objects:
if o.Type == 5: # terrain
terrains[o.ID] = (o.AssetName, o.ShapeIndex)
# exclude ANIM and BULLET?
else:
if (ord(o.Owner) & AllyFlags) or o.Cloak != 2: # CLOAKED
# TODO obey shroud
# buildings have multiple cells, shroud is a bt tricky there
actors.append(
{
'Asset': o.AssetName.decode('ascii'),
'Shape': o.ShapeIndex,
'Position': (o.PositionY, o.PositionX),
'Owner': ord(o.Owner),
'Strength': o.Strength,
'IsSelected': (o.IsSelectedMask & (1 << House)) != 0,
'ControlGroup': o.ControlGroup,
'IsRepairing': o.IsRepairing,
'IsPrimaryFactory': o.IsPrimaryFactory,
'Cloak': o.Cloak,
'Pips': list(o.Pips[: o.MaxPips])
+ [-1] * (cnc_structs.MAX_OBJECT_PIPS - o.MaxPips),
}
)
for entry in dynamic_map.Entries:
if entry.IsOverlay and entry.Type >= 1 and entry.Type <= 5: # walls
actors.append(
{
'Asset': entry.AssetName.decode('ascii'),
'Shape': entry.ShapeIndex,
'Position': (entry.PositionY, entry.PositionX),
'Owner': ord(entry.Owner) if ord(entry.Owner) == House else 255,
'Strength': 0,
'IsSelected': False,
'ControlGroup': -1,
'IsRepairing': False,
'IsPrimaryFactory': False,
'Cloak': 0,
'Pips': [-1] * cnc_structs.MAX_OBJECT_PIPS,
}
)
else:
fixed_pos_map_assets[entry.CellY * MapCellWidth + entry.CellX] = entry.AssetName
fixed_pos_map_shapes[entry.CellY * MapCellWidth + entry.CellX] = entry.ShapeIndex
for i, o in enumerate(occupiers.Entries):
if len(o.Objects) >= 1 and o.Objects[0].Type == 5: # terrain
assert len(o.Objects) == 1
fixed_pos_map_assets[i], fixed_pos_map_shapes[i] = terrains[o.Objects[0].ID]
return (
fixed_pos_map_assets.reshape(map_shape),
fixed_pos_map_shapes.reshape(map_shape),
actors,
)
def layers_array(objects: cnc_structs.CNCObjectListStruct, static_map):
array = np.zeros((static_map.MapCellHeight, static_map.MapCellWidth), dtype=int)
for thing in objects.Objects:
array[thing.CellY - static_map.MapCellY, thing.CellX - static_map.MapCellX] = thing.Type
return array
def layers_list(layers, static_map):
return [
{
'Owner': ord(o.Owner),
'Asset': o.AssetName.decode('ascii'),
'Type': o.Type,
'ID': o.ID,
'X': o.CellX - static_map.MapCellX,
'Y': o.CellY - static_map.MapCellY,
'OccupyList': o.OccupyList[: o.OccupyListLength],
}
for o in layers.Objects
if o.Type > 0
]
def units_and_buildings_dict(layers):
return {(o.Type, o.ID): o for o in layers.Objects if o.Type >= 1 and o.Type <= 4}
def layers_term(layers, dynamic_map, static_map, occupiers):
units_and_buildings = units_and_buildings_dict(layers)
tiberium = tiberium_array(dynamic_map, static_map)
text = ''
for i, (occupier, is_tiberium, static_cell) in enumerate(
zip(occupiers.Entries, tiberium.flat, static_map.StaticCells)
):
if i < static_map.MapCellWidth or i >= static_map.MapCellWidth * (
static_map.MapCellHeight - 1
):
continue
cell_text = ' '
color = 'white'
background = 'on_green' if is_tiberium else 'on_grey'
# print(static_cell.TemplateTypeName)
if i % static_map.MapCellWidth == 0:
cell_text = '|'
elif i % static_map.MapCellWidth == static_map.MapCellWidth - 1:
cell_text = '|\n'
elif static_cell.TemplateTypeName.startswith(
b'W'
) or static_cell.TemplateTypeName.startswith(
b'RV'
): # river or water
background = 'on_blue'
elif static_cell.TemplateTypeName.startswith(
b'S'
) and not static_cell.TemplateTypeName.startswith(
b'SH'
): # slope but not shore
background = 'on_white'
if len(occupier.Objects) > 0:
occupier = occupier.Objects[0]
if (occupier.Type, occupier.ID) in units_and_buildings:
occupier = units_and_buildings[(occupier.Type, occupier.ID)]
color = ['yellow', 'blue', 'red', 'white', 'magenta', 'cyan'][
ord(occupier.Owner) - 4
]
cell_text = occupier.AssetName.decode('ascii')[0]
if occupier.Type >= 1 and occupier.Type <= 3:
cell_text = cell_text.lower()
elif occupier.Type == 4:
cell_text = cell_text.upper()
text += termcolor.colored(cell_text, color, background)
return text.rstrip('\n')
def sidebar_term(sidebar: cnc_structs.CNCSidebarStruct):
return (
f'Tiberium: {(100 * sidebar.Tiberium) // sidebar.MaxTiberium if sidebar.MaxTiberium > 0 else 0 :3d}% '
f'Power: {(100 * sidebar.PowerDrained) // sidebar.PowerProduced if sidebar.PowerProduced > 0 else 0:3d}%'
f' Credits: {sidebar.Credits} | '
+ ', '.join(
sidebar.Entries[i].AssetName.decode('ascii') for i in range(sidebar.EntryCount[0])
),
'|',
', '.join(
sidebar.Entries[i].AssetName.decode('ascii')
for i in range(sidebar.EntryCount[0], sidebar.EntryCount[0] + sidebar.EntryCount[1])
),
)
def players_units(layers, house):
return [o for o in layers.Objects if ord(o.Owner) == house and o.IsSelectable]
def shroud_array(shrouds: cnc_structs.CNCShroudStruct, static_map_shape):
return np.array([entry.IsVisible for entry in shrouds.Entries], dtype=bool).reshape(
static_map_shape
)
def occupiers_list(occupiers_struct, static_map):
return [
{'X': i % static_map.MapCellWidth, 'Y': i // static_map.MapCellWidth, 'Objects': e.Objects}
for i, e in enumerate(occupiers_struct.Entries)
if e.Count > 0
]
def occupiers_array(occupiers_struct, static_map):
return np.array(
[
((-1 if len(e.Objects) == 0 else e.Objects[0].Type) << 32)
+ (-1 if len(e.Objects) == 0 else e.Objects[0].ID)
for e in occupiers_struct.Entries
]
).reshape((static_map.MapCellHeight, static_map.MapCellWidth))
| [
"numpy.array",
"numpy.zeros",
"termcolor.colored"
] | [((317, 376), 'numpy.zeros', 'np.zeros', (['(map.MapCellHeight * map.MapCellWidth)'], {'dtype': '"""S32"""'}), "(map.MapCellHeight * map.MapCellWidth, dtype='S32')\n", (325, 376), True, 'import numpy as np\n'), ((628, 701), 'numpy.zeros', 'np.zeros', (['(static_map.MapCellHeight, static_map.MapCellWidth)'], {'dtype': 'bool'}), '((static_map.MapCellHeight, static_map.MapCellWidth), dtype=bool)\n', (636, 701), True, 'import numpy as np\n'), ((1074, 1125), 'numpy.zeros', 'np.zeros', (['(MapCellHeight * MapCellWidth)'], {'dtype': '"""S32"""'}), "(MapCellHeight * MapCellWidth, dtype='S32')\n", (1082, 1125), True, 'import numpy as np\n'), ((1153, 1206), 'numpy.zeros', 'np.zeros', (['(MapCellHeight * MapCellWidth)'], {'dtype': '"""uint8"""'}), "(MapCellHeight * MapCellWidth, dtype='uint8')\n", (1161, 1206), True, 'import numpy as np\n'), ((3806, 3878), 'numpy.zeros', 'np.zeros', (['(static_map.MapCellHeight, static_map.MapCellWidth)'], {'dtype': 'int'}), '((static_map.MapCellHeight, static_map.MapCellWidth), dtype=int)\n', (3814, 3878), True, 'import numpy as np\n'), ((6471, 6518), 'termcolor.colored', 'termcolor.colored', (['cell_text', 'color', 'background'], {}), '(cell_text, color, background)\n', (6488, 6518), False, 'import termcolor\n'), ((7423, 7491), 'numpy.array', 'np.array', (['[entry.IsVisible for entry in shrouds.Entries]'], {'dtype': 'bool'}), '([entry.IsVisible for entry in shrouds.Entries], dtype=bool)\n', (7431, 7491), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Fit result class."""
import copy
from numpy import array, linspace, ravel
from matplotlib import pyplot as pl
from ufit.utils import cached_property
from ufit.plotting import DataPlotter
from ufit.pycompat import iteritems
__all__ = ['Result']
class Result(object):
def __init__(self, success, data, model, params, message, chisqr):
self.success = success
self.data = data
self.model = model
self.params = params
self.message = message
self.chisqr = chisqr
def __getitem__(self, key):
return self.paramdict[key]
def copy(self):
return copy.deepcopy(self)
@cached_property
def paramdict(self):
"""Returns a dictionary mapping parameter names to parameter objects."""
return dict((p.name, p) for p in self.params)
@cached_property
def paramvalues(self):
"""Returns a dictionary mapping parameter names to values."""
return dict((p.name, p.value) for p in self.params)
@cached_property
def paramerrors(self):
"""Returns a dictionary mapping parameter names to errors."""
return dict((p.name, p.error) for p in self.params)
@cached_property
def values(self):
"""Returns a list with all parameter values."""
return [p.value for p in self.params]
@cached_property
def errors(self):
"""Returns a list with all parameter errors."""
return [p.error for p in self.params]
@cached_property
def results(self):
"""Returns a list with the parameter values, then the parameter errors
and the chi-square value concatenated.
"""
return self.values + self.errors + [self.chisqr]
@cached_property
def residuals(self):
"""Returns the array of residuals."""
return self.model.fcn(self.paramvalues, self.data.x) - self.data.y
@cached_property
def xx(self):
"""Returns a fine-spaced array of X values between the minimum and maximum
of the original data X values.
"""
return linspace(self.data.x.min(), self.data.x.max(), 1000)
@cached_property
def yy(self):
"""Returns the model evaluated at self.xx."""
return self.model.fcn(self.paramvalues, self.xx)
def printout(self):
"""Print out a table of the fit result and the parameter values.
The chi-square value is also included in the table. Example output::
Fit results for 373
---------------------------------------------------------------------
bkgd = 5.5111 +/- 0.21427
slope = -1.0318 +/- 0.16187
inc_pos = -0.015615 +/- 0.00066617
inc_ampl = 547.21 +/- 8.0482
inc_fwhm = 0.12656 +/- 0.0012489
dho_center = -0.015615 +/- 0 (fixed: inc_pos)
dho_pos = 0.41689 +/- 0.0086916
dho_ampl = 0.36347 +/- 0.034156
dho_gamma = 0.22186 +/- 0.025093
dho_tt = 16 +/- 0 (fixed: data.T)
chi^2/NDF = 1.491
=====================================================================
"""
print('Fit results for %s' % self.data.name)
if not self.success:
print('FIT FAILED: ' + self.message)
elif self.message:
print('> %s' % self.message)
print('-' * 80)
for p in self.params:
print(p)
print('%-15s = %10.4g' % ('chi^2/NDF', self.chisqr))
print('=' * 80)
def plot(self, axes=None, params=True, multi=False):
"""Plot the data and model together in the current figure.
If *params* is true, also plot parameter values as text.
"""
plotter = DataPlotter(axes=axes)
c = plotter.plot_data(self.data, multi=multi)
plotter.plot_model(self.model, self.data, paramvalues=self.paramvalues,
labels=not multi, color=c)
if params and not multi:
plotter.plot_params(self.params, self.chisqr)
def plotfull(self, axes=None, params=True):
"""Plot the data and model, including subcomponents, together in the
current figure or the given *axes*.
If *params* is true, also plot parameter values as text.
"""
plotter = DataPlotter(axes=axes)
plotter.plot_data(self.data)
plotter.plot_model_full(self.model, self.data,
paramvalues=self.paramvalues)
if params:
plotter.plot_params(self.params, self.chisqr)
def calc_panel_size(num):
for nx, ny in ([1, 1], [2, 1], [2, 2], [3, 2], [3, 3], [4, 3], [5, 3], [4, 4],
[5, 4], [6, 4], [5, 5], [6, 5], [7, 5], [6, 6], [8, 5], [7, 6],
[9, 5], [8, 6], [7, 7], [9, 6], [8, 7], [9, 7], [8, 8], [10, 7],
[9, 8], [11, 7], [9, 9], [11, 8], [10, 9], [12, 8], [11, 9], [10, 10]):
if nx*ny >= num:
return nx, ny
return num//10 + 1, 10
class MultiResult(list):
def plot(self):
dims = calc_panel_size(len(self))
fig, axarray = pl.subplots(dims[1], dims[0])
for res, axes in zip(self, ravel(axarray)):
res.plotfull(axes=axes)
# pl.tight_layout()
@cached_property
def datavalues(self):
d = dict((k, [v]) for (k, v) in iteritems(self[0].data.meta))
for res in self[1:]:
for k, v in iteritems(res.data.meta):
d[k].append(v)
return d
@cached_property
def paramvalues(self):
"""Return a dictionary mapping parameter names to arrays of
parameter values, one for each result.
"""
d = dict((p.name, [p.value]) for p in self[0].params)
for res in self[1:]:
for p in res.params:
d[p.name].append(p.value)
for k in d:
d[k] = array(d[k])
return d
@cached_property
def paramerrors(self):
"""Return a dictionary mapping parameter names to arrays of
parameter errors, one for each result.
"""
d = dict((p.name, [p.error]) for p in self[0].params)
for res in self[1:]:
for p in res.params:
d[p.name].append(p.error)
for k in d:
d[k] = array(d[k])
return d
def printout(self):
"""Print global parameters of the fit."""
print('OVERALL fit results')
print('-' * 80)
for p in self[0].params:
if p.overall:
print(p)
print('=' * 80)
def plot_param(self, xname, pname):
pl.errorbar(self.datavalues[xname], self.paramvalues[pname],
self.paramerrors[pname], fmt='o-')
pl.xlabel(xname)
pl.ylabel(pname)
| [
"copy.deepcopy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.errorbar",
"numpy.ravel",
"ufit.plotting.DataPlotter",
"matplotlib.pyplot.subplots",
"ufit.pycompat.iteritems"
] | [((983, 1002), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (996, 1002), False, 'import copy\n'), ((4222, 4244), 'ufit.plotting.DataPlotter', 'DataPlotter', ([], {'axes': 'axes'}), '(axes=axes)\n', (4233, 4244), False, 'from ufit.plotting import DataPlotter\n'), ((4790, 4812), 'ufit.plotting.DataPlotter', 'DataPlotter', ([], {'axes': 'axes'}), '(axes=axes)\n', (4801, 4812), False, 'from ufit.plotting import DataPlotter\n'), ((5603, 5632), 'matplotlib.pyplot.subplots', 'pl.subplots', (['dims[1]', 'dims[0]'], {}), '(dims[1], dims[0])\n', (5614, 5632), True, 'from matplotlib import pyplot as pl\n'), ((7107, 7207), 'matplotlib.pyplot.errorbar', 'pl.errorbar', (['self.datavalues[xname]', 'self.paramvalues[pname]', 'self.paramerrors[pname]'], {'fmt': '"""o-"""'}), "(self.datavalues[xname], self.paramvalues[pname], self.\n paramerrors[pname], fmt='o-')\n", (7118, 7207), True, 'from matplotlib import pyplot as pl\n'), ((7231, 7247), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['xname'], {}), '(xname)\n', (7240, 7247), True, 'from matplotlib import pyplot as pl\n'), ((7256, 7272), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['pname'], {}), '(pname)\n', (7265, 7272), True, 'from matplotlib import pyplot as pl\n'), ((5668, 5682), 'numpy.ravel', 'ravel', (['axarray'], {}), '(axarray)\n', (5673, 5682), False, 'from numpy import array, linspace, ravel\n'), ((5920, 5944), 'ufit.pycompat.iteritems', 'iteritems', (['res.data.meta'], {}), '(res.data.meta)\n', (5929, 5944), False, 'from ufit.pycompat import iteritems\n'), ((6375, 6386), 'numpy.array', 'array', (['d[k]'], {}), '(d[k])\n', (6380, 6386), False, 'from numpy import array, linspace, ravel\n'), ((6785, 6796), 'numpy.array', 'array', (['d[k]'], {}), '(d[k])\n', (6790, 6796), False, 'from numpy import array, linspace, ravel\n'), ((5837, 5865), 'ufit.pycompat.iteritems', 'iteritems', (['self[0].data.meta'], {}), '(self[0].data.meta)\n', (5846, 5865), False, 'from ufit.pycompat import iteritems\n')] |
'''
training for Navier Stokes with Reynolds number 500, 0.5 second time period
'''
import csv
import random
from timeit import default_timer
import deepxde as dde
from deepxde.optimizers.config import set_LBFGS_options
import numpy as np
from baselines.data import NSdata
import tensorflow as tf
Re = 500
def forcing(x):
return - 4 * tf.math.cos(4 * x[:, 1:2])
def pde(x, u):
'''
Args:
x: (x, y, t)
u: (u, v, w), where (u,v) is the velocity, w is the vorticity
Returns: list of pde loss
'''
u_vel, v_vel, w = u[:, 0:1], u[:, 1:2], u[:, 2:3]
u_vel_x = dde.grad.jacobian(u, x, i=0, j=0)
u_vel_xx = dde.grad.hessian(u, x, component=0, i=0, j=0)
u_vel_yy = dde.grad.hessian(u, x, component=0, i=1, j=1)
v_vel_y = dde.grad.jacobian(u, x, i=1, j=1)
v_vel_xx = dde.grad.hessian(u, x, component=1, i=0, j=0)
v_vel_yy = dde.grad.hessian(u, x, component=1, i=1, j=1)
w_vor_x = dde.grad.jacobian(u, x, i=2, j=0)
w_vor_y = dde.grad.jacobian(u, x, i=2, j=1)
w_vor_t = dde.grad.jacobian(u, x, i=2, j=2)
w_vor_xx = dde.grad.hessian(u, x, component=2, i=0, j=0)
w_vor_yy = dde.grad.hessian(u, x, component=2, i=1, j=1)
eqn1 = w_vor_t + u_vel * w_vor_x + v_vel * w_vor_y - \
1 / Re * (w_vor_xx + w_vor_yy) - forcing(x)
eqn2 = u_vel_x + v_vel_y
eqn3 = u_vel_xx + u_vel_yy + w_vor_y
eqn4 = v_vel_xx + v_vel_yy - w_vor_x
return [eqn1, eqn2, eqn3, eqn4]
def eval(model, dataset,
step, time_cost,
offset, config):
'''
evaluate test error for the model over dataset
'''
test_points, test_vals = dataset.get_test_xyt()
pred = model.predict(test_points)
vel_u_truth = test_vals[:, 0]
vel_v_truth = test_vals[:, 1]
vor_truth = test_vals[:, 2]
vel_u_pred = pred[:, 0]
vel_v_pred = pred[:, 1]
vor_pred = pred[:, 2]
u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
print(f'Instance index : {offset}')
print(f'L2 relative error in u: {u_err}')
print(f'L2 relative error in v: {v_err}')
print(f'L2 relative error in vorticity: {vor_err}')
with open(config['log']['logfile'], 'a') as f:
writer = csv.writer(f)
writer.writerow([offset, u_err, v_err, vor_err, step, time_cost])
def train(offset, config, args):
seed = random.randint(1, 10000)
print(f'Random seed :{seed}')
np.random.seed(seed)
# construct dataloader
data_config = config['data']
if 'datapath2' in data_config:
dataset = NSdata(datapath1=data_config['datapath'],
datapath2=data_config['datapath2'],
offset=offset, num=1,
nx=data_config['nx'], nt=data_config['nt'],
sub=data_config['sub'], sub_t=data_config['sub_t'],
vel=True,
t_interval=data_config['time_interval'])
else:
dataset = NSdata(datapath1=data_config['datapath'],
offset=offset, num=1,
nx=data_config['nx'], nt=data_config['nt'],
sub=data_config['sub'], sub_t=data_config['sub_t'],
vel=True,
t_interval=data_config['time_interval'])
spatial_domain = dde.geometry.Rectangle(xmin=[0, 0], xmax=[2 * np.pi, 2 * np.pi])
temporal_domain = dde.geometry.TimeDomain(0, data_config['time_interval'])
st_domain = dde.geometry.GeometryXTime(spatial_domain, temporal_domain)
num_boundary_points = dataset.S
points = dataset.get_boundary_points(num_x=num_boundary_points, num_y=num_boundary_points,
num_t=dataset.T)
u_value = dataset.get_boundary_value(component=0)
v_value = dataset.get_boundary_value(component=1)
w_value = dataset.get_boundary_value(component=2)
# u, v are velocity, w is vorticity
boundary_u = dde.PointSetBC(points=points, values=u_value, component=0)
boundary_v = dde.PointSetBC(points=points, values=v_value, component=1)
boundary_w = dde.PointSetBC(points=points, values=w_value, component=2)
data = dde.data.TimePDE(
st_domain,
pde,
[
boundary_u,
boundary_v,
boundary_w
],
num_domain=config['train']['num_domain'],
num_boundary=config['train']['num_boundary'],
num_test=config['train']['num_test'],
)
net = dde.maps.FNN(config['model']['layers'],
config['model']['activation'],
'Glorot normal')
# net = dde.maps.STMsFFN([3] + 4 * [50] + [3], 'tanh', 'Glorot normal', [50], [50])
model = dde.Model(data, net)
model.compile('adam', lr=config['train']['base_lr'], loss_weights=[1, 1, 1, 1, 100, 100, 100])
if 'log_step' in config['train']:
step_size = config['train']['log_step']
else:
step_size = 100
epochs = config['train']['epochs'] // step_size
for i in range(epochs):
time_start = default_timer()
model.train(epochs=step_size, display_every=step_size)
time_end = default_timer()
eval(model, dataset, i * step_size,
time_cost=time_end - time_start,
offset=offset,
config=config)
print('Done!')
# set_LBFGS_options(maxiter=10000)
# model.compile('L-BFGS', loss_weights=[1, 1, 1, 1, 100, 100, 100])
# model.train()
# test_points, test_vals = dataset.get_test_xyt()
#
# pred = model.predict(test_points)
# vel_u_truth = test_vals[:, 0]
# vel_v_truth = test_vals[:, 1]
# vor_truth = test_vals[:, 2]
#
# vel_u_pred = pred[:, 0]
# vel_v_pred = pred[:, 1]
# vor_pred = pred[:, 2]
#
# u_err = dde.metrics.l2_relative_error(vel_u_truth, vel_u_pred)
# v_err = dde.metrics.l2_relative_error(vel_v_truth, vel_v_pred)
# vor_err = dde.metrics.l2_relative_error(vor_truth, vor_pred)
# print(f'Instance index : {offset}')
# print(f'L2 relative error in u: {u_err}')
# print(f'L2 relative error in v: {v_err}')
# print(f'L2 relative error in vorticity: {vor_err}')
# with open(args.logfile, 'a') as f:
# writer = csv.writer(f)
# writer.writerow([offset, u_err, v_err, vor_err])
| [
"deepxde.PointSetBC",
"deepxde.geometry.Rectangle",
"deepxde.data.TimePDE",
"deepxde.Model",
"deepxde.geometry.GeometryXTime",
"deepxde.grad.jacobian",
"tensorflow.math.cos",
"csv.writer",
"timeit.default_timer",
"deepxde.grad.hessian",
"deepxde.metrics.l2_relative_error",
"baselines.data.NSda... | [((604, 637), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['u', 'x'], {'i': '(0)', 'j': '(0)'}), '(u, x, i=0, j=0)\n', (621, 637), True, 'import deepxde as dde\n'), ((653, 698), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(0)', 'i': '(0)', 'j': '(0)'}), '(u, x, component=0, i=0, j=0)\n', (669, 698), True, 'import deepxde as dde\n'), ((714, 759), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(0)', 'i': '(1)', 'j': '(1)'}), '(u, x, component=0, i=1, j=1)\n', (730, 759), True, 'import deepxde as dde\n'), ((775, 808), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['u', 'x'], {'i': '(1)', 'j': '(1)'}), '(u, x, i=1, j=1)\n', (792, 808), True, 'import deepxde as dde\n'), ((824, 869), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(1)', 'i': '(0)', 'j': '(0)'}), '(u, x, component=1, i=0, j=0)\n', (840, 869), True, 'import deepxde as dde\n'), ((885, 930), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(1)', 'i': '(1)', 'j': '(1)'}), '(u, x, component=1, i=1, j=1)\n', (901, 930), True, 'import deepxde as dde\n'), ((946, 979), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['u', 'x'], {'i': '(2)', 'j': '(0)'}), '(u, x, i=2, j=0)\n', (963, 979), True, 'import deepxde as dde\n'), ((994, 1027), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['u', 'x'], {'i': '(2)', 'j': '(1)'}), '(u, x, i=2, j=1)\n', (1011, 1027), True, 'import deepxde as dde\n'), ((1042, 1075), 'deepxde.grad.jacobian', 'dde.grad.jacobian', (['u', 'x'], {'i': '(2)', 'j': '(2)'}), '(u, x, i=2, j=2)\n', (1059, 1075), True, 'import deepxde as dde\n'), ((1092, 1137), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(2)', 'i': '(0)', 'j': '(0)'}), '(u, x, component=2, i=0, j=0)\n', (1108, 1137), True, 'import deepxde as dde\n'), ((1153, 1198), 'deepxde.grad.hessian', 'dde.grad.hessian', (['u', 'x'], {'component': '(2)', 'i': '(1)', 'j': '(1)'}), '(u, x, component=2, i=1, j=1)\n', (1169, 1198), True, 'import deepxde as dde\n'), ((1894, 1948), 'deepxde.metrics.l2_relative_error', 'dde.metrics.l2_relative_error', (['vel_u_truth', 'vel_u_pred'], {}), '(vel_u_truth, vel_u_pred)\n', (1923, 1948), True, 'import deepxde as dde\n'), ((1961, 2015), 'deepxde.metrics.l2_relative_error', 'dde.metrics.l2_relative_error', (['vel_v_truth', 'vel_v_pred'], {}), '(vel_v_truth, vel_v_pred)\n', (1990, 2015), True, 'import deepxde as dde\n'), ((2030, 2080), 'deepxde.metrics.l2_relative_error', 'dde.metrics.l2_relative_error', (['vor_truth', 'vor_pred'], {}), '(vor_truth, vor_pred)\n', (2059, 2080), True, 'import deepxde as dde\n'), ((2471, 2495), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2485, 2495), False, 'import random\n'), ((2534, 2554), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2548, 2554), True, 'import numpy as np\n'), ((3450, 3514), 'deepxde.geometry.Rectangle', 'dde.geometry.Rectangle', ([], {'xmin': '[0, 0]', 'xmax': '[2 * np.pi, 2 * np.pi]'}), '(xmin=[0, 0], xmax=[2 * np.pi, 2 * np.pi])\n', (3472, 3514), True, 'import deepxde as dde\n'), ((3537, 3593), 'deepxde.geometry.TimeDomain', 'dde.geometry.TimeDomain', (['(0)', "data_config['time_interval']"], {}), "(0, data_config['time_interval'])\n", (3560, 3593), True, 'import deepxde as dde\n'), ((3610, 3669), 'deepxde.geometry.GeometryXTime', 'dde.geometry.GeometryXTime', (['spatial_domain', 'temporal_domain'], {}), '(spatial_domain, temporal_domain)\n', (3636, 3669), True, 'import deepxde as dde\n'), ((4078, 4136), 'deepxde.PointSetBC', 'dde.PointSetBC', ([], {'points': 'points', 'values': 'u_value', 'component': '(0)'}), '(points=points, values=u_value, component=0)\n', (4092, 4136), True, 'import deepxde as dde\n'), ((4154, 4212), 'deepxde.PointSetBC', 'dde.PointSetBC', ([], {'points': 'points', 'values': 'v_value', 'component': '(1)'}), '(points=points, values=v_value, component=1)\n', (4168, 4212), True, 'import deepxde as dde\n'), ((4230, 4288), 'deepxde.PointSetBC', 'dde.PointSetBC', ([], {'points': 'points', 'values': 'w_value', 'component': '(2)'}), '(points=points, values=w_value, component=2)\n', (4244, 4288), True, 'import deepxde as dde\n'), ((4301, 4506), 'deepxde.data.TimePDE', 'dde.data.TimePDE', (['st_domain', 'pde', '[boundary_u, boundary_v, boundary_w]'], {'num_domain': "config['train']['num_domain']", 'num_boundary': "config['train']['num_boundary']", 'num_test': "config['train']['num_test']"}), "(st_domain, pde, [boundary_u, boundary_v, boundary_w],\n num_domain=config['train']['num_domain'], num_boundary=config['train'][\n 'num_boundary'], num_test=config['train']['num_test'])\n", (4317, 4506), True, 'import deepxde as dde\n'), ((4610, 4701), 'deepxde.maps.FNN', 'dde.maps.FNN', (["config['model']['layers']", "config['model']['activation']", '"""Glorot normal"""'], {}), "(config['model']['layers'], config['model']['activation'],\n 'Glorot normal')\n", (4622, 4701), True, 'import deepxde as dde\n'), ((4844, 4864), 'deepxde.Model', 'dde.Model', (['data', 'net'], {}), '(data, net)\n', (4853, 4864), True, 'import deepxde as dde\n'), ((343, 369), 'tensorflow.math.cos', 'tf.math.cos', (['(4 * x[:, 1:2])'], {}), '(4 * x[:, 1:2])\n', (354, 369), True, 'import tensorflow as tf\n'), ((2337, 2350), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2347, 2350), False, 'import csv\n'), ((2668, 2927), 'baselines.data.NSdata', 'NSdata', ([], {'datapath1': "data_config['datapath']", 'datapath2': "data_config['datapath2']", 'offset': 'offset', 'num': '(1)', 'nx': "data_config['nx']", 'nt': "data_config['nt']", 'sub': "data_config['sub']", 'sub_t': "data_config['sub_t']", 'vel': '(True)', 't_interval': "data_config['time_interval']"}), "(datapath1=data_config['datapath'], datapath2=data_config['datapath2'\n ], offset=offset, num=1, nx=data_config['nx'], nt=data_config['nt'],\n sub=data_config['sub'], sub_t=data_config['sub_t'], vel=True,\n t_interval=data_config['time_interval'])\n", (2674, 2927), False, 'from baselines.data import NSdata\n'), ((3093, 3313), 'baselines.data.NSdata', 'NSdata', ([], {'datapath1': "data_config['datapath']", 'offset': 'offset', 'num': '(1)', 'nx': "data_config['nx']", 'nt': "data_config['nt']", 'sub': "data_config['sub']", 'sub_t': "data_config['sub_t']", 'vel': '(True)', 't_interval': "data_config['time_interval']"}), "(datapath1=data_config['datapath'], offset=offset, num=1, nx=\n data_config['nx'], nt=data_config['nt'], sub=data_config['sub'], sub_t=\n data_config['sub_t'], vel=True, t_interval=data_config['time_interval'])\n", (3099, 3313), False, 'from baselines.data import NSdata\n'), ((5187, 5202), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (5200, 5202), False, 'from timeit import default_timer\n'), ((5285, 5300), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (5298, 5300), False, 'from timeit import default_timer\n')] |
import partitura
import pandas as pd
import numpy as np
import os
def save_csv_for_parangonada(outdir, part, ppart, align, zalign=None, feature = None):
part = partitura.utils.music.ensure_notearray(part)
ppart = partitura.utils.music.ensure_notearray(ppart)
# ___ create np array for features
ffields = [('velocity', '<f4'),
('timing', '<f4'),
('articulation', '<f4'),
('id', 'U256')]
""" for all dicts create an appropriate entry in an array: match = 0, deletion = 1, insertion = 2 """
farray = []
notes = list(part["id"])
if feature is not None:
# veloctiy, timing, articulation, note
for no, i in enumerate(list(feature['id'])):
farray.append((feature['velocity'][no],feature['timing'][no],feature['articulation'][no], i))
else:
for no, i in enumerate(notes):
farray.append((0,0,0, i))
featurearray = np.array(farray, dtype=ffields)
#___ create np array for alignment
fields = [('idx', 'i4'),
('matchtype', 'U256'),
('partid', 'U256'),
('ppartid', 'U256')]
""" for all dicts create an appropriate entry in an array: match = 0, deletion = 1, insertion = 2 """
array = []
for no, i in enumerate(align):
#print(no, i)
if i["label"]=="match":
array.append((no, "0", i["score_id"], str(i["performance_id"])))
elif i["label"]=="insertion":
array.append((no, "2", "undefined", str(i["performance_id"])))
elif i["label"]=="deletion":
array.append((no, "1", i["score_id"], "undefined"))
alignarray = np.array(array, dtype=fields)
""" for all dicts create an appropriate entry in an array: match = 0, deletion = 1, insertion = 2 """
zarray = []
if zalign is not None:
for no, i in enumerate(zalign):
#print(no, i)
if i["label"]=="match":
zarray.append((no, "0", i["score_id"], str(i["performance_id"])))
elif i["label"]=="insertion":
zarray.append((no, "2", "undefined", str(i["performance_id"])))
elif i["label"]=="deletion":
zarray.append((no, "1", i["score_id"], "undefined"))
else: # if no zalign is available, save the same alignment twice
for no, i in enumerate(align):
#print(no, i)
if i["label"]=="match":
zarray.append((no, "0", i["score_id"], str(i["performance_id"])))
elif i["label"]=="insertion":
zarray.append((no, "2", "undefined", str(i["performance_id"])))
elif i["label"]=="deletion":
zarray.append((no, "1", i["score_id"], "undefined"))
zalignarray = np.array(zarray, dtype=fields)
pd.DataFrame(ppart).to_csv(outdir + os.path.sep+"ppart.csv", index= 0)
pd.DataFrame(part).to_csv(outdir + os.path.sep+"part.csv", index= 0)
pd.DataFrame(alignarray).to_csv(outdir + os.path.sep+"align.csv", index= 0)
pd.DataFrame(zalignarray).to_csv(outdir + os.path.sep+"zalign.csv", index= 0)
pd.DataFrame(featurearray).to_csv(outdir + os.path.sep+"feature.csv", index= 0)
# np.savetxt(outdir + os.path.sep+"ppart.csv", ppart, delimiter= ",")
# np.savetxt(outdir + os.path.sep+"part.csv", part, delimiter= ",")
# np.savetxt(outdir + os.path.sep+"align.csv", alignarray, delimiter= ",")
# np.savetxt(outdir + os.path.sep+"zalign.csv",zalignarray, delimiter= ",")
# np.savetxt(featurearray).to_csv(outdir + os.path.sep+"feature.csv",featurearray, delimiter= ",") | [
"pandas.DataFrame",
"numpy.array",
"partitura.utils.music.ensure_notearray"
] | [((167, 211), 'partitura.utils.music.ensure_notearray', 'partitura.utils.music.ensure_notearray', (['part'], {}), '(part)\n', (205, 211), False, 'import partitura\n'), ((224, 269), 'partitura.utils.music.ensure_notearray', 'partitura.utils.music.ensure_notearray', (['ppart'], {}), '(ppart)\n', (262, 269), False, 'import partitura\n'), ((957, 988), 'numpy.array', 'np.array', (['farray'], {'dtype': 'ffields'}), '(farray, dtype=ffields)\n', (965, 988), True, 'import numpy as np\n'), ((1691, 1720), 'numpy.array', 'np.array', (['array'], {'dtype': 'fields'}), '(array, dtype=fields)\n', (1699, 1720), True, 'import numpy as np\n'), ((2806, 2836), 'numpy.array', 'np.array', (['zarray'], {'dtype': 'fields'}), '(zarray, dtype=fields)\n', (2814, 2836), True, 'import numpy as np\n'), ((2842, 2861), 'pandas.DataFrame', 'pd.DataFrame', (['ppart'], {}), '(ppart)\n', (2854, 2861), True, 'import pandas as pd\n'), ((2917, 2935), 'pandas.DataFrame', 'pd.DataFrame', (['part'], {}), '(part)\n', (2929, 2935), True, 'import pandas as pd\n'), ((2992, 3016), 'pandas.DataFrame', 'pd.DataFrame', (['alignarray'], {}), '(alignarray)\n', (3004, 3016), True, 'import pandas as pd\n'), ((3074, 3099), 'pandas.DataFrame', 'pd.DataFrame', (['zalignarray'], {}), '(zalignarray)\n', (3086, 3099), True, 'import pandas as pd\n'), ((3158, 3184), 'pandas.DataFrame', 'pd.DataFrame', (['featurearray'], {}), '(featurearray)\n', (3170, 3184), True, 'import pandas as pd\n')] |
"""Incorporate ETH Zurich's BIWI (EWAP) dataset into simple gridworld."""
import os
from pathlib import Path
from matplotlib.image import imread
import matplotlib.pyplot as plt
import numpy as np
from .simple_gw import SimpleGridworld
# Grid constants
OBSTACLE = 2
GOAL = 6
PERSON = 9
ROBOT = 15
class EwapDataset:
"""
Contains all relevant information for EWAP dataset.
"""
def __init__(
self,
dataset_root='datasets/ewap_dataset',
sequence='seq_hotel'
):
dataset_path = Path(os.path.abspath(__file__)).parents[0]
self.sequence_path = dataset_path / dataset_root / sequence
# get obstacle map
obs_map_path = self.sequence_path / 'hmap.png'
self.obstacle_map = imread(str(obs_map_path.resolve()))
# get pedestrian position and velocity data (obsmat.txt)
self.pedestrian_data = np.loadtxt(self.sequence_path / 'obsmat.txt')
# get shift and scale amount for this sequence
self.pos_shift = np.loadtxt(self.sequence_path / 'shift.txt')
self.scale_factor = np.loadtxt(self.sequence_path / 'scale.txt')
self.frame_id = 0
self.processed_data = self.process_data()
def scale(self, raw_data):
scaled_data = raw_data.copy()
scaled_data[:, 2:] *= self.scale_factor
return scaled_data
def shift(self, scaled_ped_data):
shifted_ped_data = scaled_ped_data.copy()
shifted_ped_data[:, 2] = shifted_ped_data[:, 2] - self.pos_shift[0]
shifted_ped_data[:, 4] = shifted_ped_data[:, 4] - self.pos_shift[1]
return shifted_ped_data
def discretize(self, scaled_shifted_data):
discrete_data = np.round(scaled_shifted_data).astype(np.int64)
return discrete_data
def normalize_frames(self, unnormalied_data):
"""
Normalizers the data frames, so first frame is integer 0.
"""
normalized_data = unnormalied_data.copy()
normalized_data[:, 0] -= np.min(normalized_data[:, 0])
normalized_data[:, 0] = normalized_data[:, 0] / 10
return normalized_data
def process_data(self):
"""
scales, shifts, and discretizes input data according to parameters
generated by map2world.py script. normalizes frame numbers.
"""
scaled_data = self.scale(self.pedestrian_data)
shifted_data = self.shift(scaled_data)
discrete_data = self.discretize(shifted_data)
# normalize frames
processed_data = self.normalize_frames(discrete_data)
return processed_data
def pad_dataset(self, pad_amount):
"""
Pad dataset with obstacle pixels to ensure 2*vision_radius+1 squares
are possible for feature extractor.
"""
self.obstacle_map = np.pad(
self.obstacle_map,
pad_amount + 1, # for when agent runs into edges
mode='constant',
constant_values=1.0
)
# shift dataset to accomodate padding
self.processed_data[:, 2] += pad_amount
self.processed_data[:, 4] += pad_amount
def pedestrian_goal(self, pedestrian_id):
"""Return goal of pedestrian with the given ID.
:param pedestrian_id: ID of pedestrian.
"""
pedestrian_mask = (self.processed_data[:, 1] == pedestrian_id)
pedestrian_traj = self.processed_data[pedestrian_mask]
return (pedestrian_traj[-1, 2], pedestrian_traj[-1, 4])
def get_max_frame(self):
return np.max(self.processed_data[:, 0])
class EwapGridworld(SimpleGridworld):
"""
Env that incorporates ETHZ BIWI (EWAP) dataset.
make sure the correct directory sturcture exists in order to run, i.e.
/envs/datasets/ewap_dataset/seq_eth
/envs/datasets/ewap_dataset/seq_hotel
folders exist. Additionally, run script map2world.py for both sequences,
instruction found in the script at /envs/datasets/ewap_dataset/map2world.py
"""
def __init__(
self,
ped_id,
sequence='seq_hotel',
dataset_root='datasets/ewap_dataset',
person_thickness=2,
vision_radius=40,
render=False,
):
"""
Initialize EWAP gridworld. Make sure all files in the correct
directories as specified above!
:param ped_id: ID of pedestrian whose goal we adopt.
:param sequence: Sequence to base world on. example: 'seq_hotel'
:param dataset_root: path to root of dataset directory.
:param person_thickness: The dimensions of the nxn boxes that will
represent people.
"""
self.dataset = EwapDataset(
sequence=sequence,
dataset_root=dataset_root
)
self.vision_radius = vision_radius
self.speed = 4
self.dataset.pad_dataset(vision_radius + 2 * self.speed)
self.person_thickness = person_thickness
obstacle_array = np.where(self.dataset.obstacle_map == 1.0)
obstacle_array = np.array(obstacle_array).T
goals = self.dataset.pedestrian_goal(ped_id)
super().__init__(
self.dataset.obstacle_map.shape,
obstacle_array,
goals
)
# agent velociy
self.player_vel = np.zeros(2)
# construct goal map
self.adopt_goal(ped_id)
# seperate for people position incase we want to do processing.
self.person_map = self.obstacle_grid.copy()
self.person_map.fill(0)
# Play only until video exists
self.max_steps = self.dataset.get_max_frame()
# counters for debuggging data
self.png_number = 0
# rendering
self.render = render
if self.render:
self.enable_rendering()
def enable_rendering(self):
self.render = True
self.fig = plt.figure()
self.gridspec = self.fig.add_gridspec(4, 2)
# setup axis
ax_obs = self.fig.add_subplot(self.gridspec[0, 0])
ax_persons = self.fig.add_subplot(self.gridspec[0, 1])
ax_goal = self.fig.add_subplot(self.gridspec[1, 0])
ax_gridworld = self.fig.add_subplot(self.gridspec[2:, :])
dummy_surrounding = np.eye(2, 2)
self.im_obs = ax_obs.imshow(dummy_surrounding)
self.im_persons = ax_persons.imshow(dummy_surrounding)
self.im_goal = ax_goal.imshow(dummy_surrounding)
self.im_gridworld = ax_gridworld.imshow(self.obstacle_grid)
self.im_gridworld.set_clim(vmin=0, vmax=ROBOT)
self.fig.canvas.draw()
plt.pause(0.000001)
def disable_rendering(self):
self.render = False
def thicken(self, grid, target_thickness):
"""
thicken pixel by specified target_thickness parameter in supplied grid.
:param target_thickness: thickness amount.
:param grid: grid in which pixels reside.
"""
row_ind, col_ind = np.where(grid != 0)
thick = grid.copy()
assert row_ind.size == col_ind.size
for i in range(row_ind.size):
row = row_ind[i]
col = col_ind[i]
thick[
row - target_thickness:row + target_thickness + 1,
col - target_thickness:col + target_thickness + 1
] = thick[row, col]
return thick
def populate_person_map(self, frame_num):
"""Populates the person map based on input frame_num, which is the
frame id of EWAP database's footage.
:param frame_num: frame to get positions from.
"""
# clear person map
self.person_map.fill(0)
# Get data for current frame of simulation.
frame_pedestrians = self.dataset.processed_data[
self.dataset.processed_data[:, 0] == frame_num
]
self.person_map[frame_pedestrians[:, 2],
frame_pedestrians[:, 4]] = 1.0
self.person_map = self.thicken(self.person_map, self.person_thickness)
def adopt_goal(self, pedestrian_id):
"""Change the goal to the one used by pedestrian.
:param pedestrian_id: ID of pedestrian whose goal we adopt.
"""
self.goal_pos = self.dataset.pedestrian_goal(pedestrian_id)
self.goal_grid[self.goal_pos] = 1.0
# thicken the goal, making it area instead of pixel
goal_thickness = self.person_thickness * 5
self.goal_grid = self.thicken(self.goal_grid, goal_thickness)
def reset(self):
"""
reset gridworld to initial positon, with all trajectories starting
again at first frame.
"""
super().reset()
assert self.step_number == 0, 'Step number non-zero after reset!'
self.player_vel = np.zeros(2)
self.populate_person_map(self.step_number)
return self.state_extractor().astype('float32')
def reward_function(self, state, action, next_state):
reward = np.array(0.0)
if self.goal_grid[tuple(self.player_pos)] == 1.0:
reward += 1.0
if self.obstacle_grid[tuple(self.player_pos)] == 1.0:
reward += -1.0
if self.person_map[tuple(self.player_pos)] == 1.0:
reward += -2.0
dist_to_goal = np.sum(np.abs(state[-2:] - state[-4:-2]))
next_dist_to_goal = np.sum(np.abs(next_state[-2:] - next_state[-4:-2]))
if next_dist_to_goal > dist_to_goal:
reward -= 0.001
elif next_dist_to_goal < dist_to_goal:
reward += 0.001
return reward
def state_extractor(self):
"""
Extract state for CURRENT internal state of gridworld.
"""
row_low = self.player_pos[0] - self.vision_radius
row_high = self.player_pos[0] + self.vision_radius + 1
col_low = self.player_pos[1] - self.vision_radius
col_high = self.player_pos[1] + self.vision_radius + 1
obstacles = self.obstacle_grid[row_low: row_high, col_low: col_high]
people = self.person_map[row_low: row_high, col_low: col_high]
goals = self.goal_grid[row_low: row_high, col_low: col_high]
if self.render:
self.im_obs.set_data(obstacles)
self.im_persons.set_data(people)
self.im_goal.set_data(goals)
expected_shape = (
2 * self.vision_radius + 1,
2 * self.vision_radius + 1
)
assert obstacles.shape == expected_shape
assert people.shape == expected_shape
assert goals.shape == expected_shape
# state vector is surroundings concatenated with player pos and goal
# pos, hence the +4 size
state = np.zeros(obstacles.size + people.size + goals.size + 6)
local_map = np.concatenate(
(
obstacles.flatten(),
people.flatten(),
goals.flatten()
)
)
# populate state vector
state[:-6] = local_map
state[-6:-4] = self.player_pos
state[-4:-2] = self.player_vel
state[-2:] = np.array(self.goal_pos)
return state
def step(self, action):
"""
Advance the gridworld player based on action.
'done' is set when environment reaches goal, hits obstacle, or exceeds
max step number, which is heuristically set to length*height of
gridworld.
:param action: Action peformed by player.
:return (next_state, reward, done, False)
"""
assert self.action_space.contains(action), "Invalid action!"
# extract current
state = self.state_extractor().astype('float32')
# advance player based on action
action_vector = self.speed * self.action_dict[action]
self.player_vel = action_vector
next_pos = self.player_pos + action_vector
self.step_number += 1
# update pedestrian positions
self.populate_person_map(self.step_number)
# extract next state
self.player_pos = next_pos
next_state = self.state_extractor().astype('float32')
# reward function r(s_t, a_t, s_t+1)
reward = self.reward_function(state, action, next_state)
# position reset condition
goal_reached = (self.goal_grid[tuple(self.player_pos)] == 1.0)
obstacle_hit = (self.obstacle_grid[tuple(self.player_pos)] == 1.0)
person_hit = (self.person_map[tuple(self.player_pos)] == 1.0)
if obstacle_hit or person_hit:
self.reset_player_pos()
# temrination conditions
max_steps_elapsed = self.step_number > self.max_steps
done = max_steps_elapsed
if self.render:
self.render_gridworld()
return next_state, reward, done, max_steps_elapsed
def overlay(self, overlay, overlaid_on, const):
"""Overlays value 'const' on numpy array 'overlaid_on' in indices where
'overlay' is non-zero.
:param overlay: numpy to overlay on another array.
:param overlaid_on: overlay is applied on this array.
:param const: constant to overlay with, e.g. const=2 will put 2s where
overlay is nonzero.
"""
output = np.copy(overlaid_on)
output[overlay != 0.0] = const
return output
def render_gridworld(self):
to_render = self.obstacle_grid.copy() * OBSTACLE
to_render = self.overlay(self.person_map, to_render, PERSON)
to_render = self.overlay(self.goal_grid, to_render, GOAL)
to_render[tuple(self.player_pos)] = ROBOT
self.im_gridworld.set_data(to_render)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def dump_png(self, path='./png_dumps/'):
"""Saves a PNG image of current state.
:param path: path to save image to.
"""
impath = Path(path)
image_name = str(self.png_number) + '.png'
to_save = self.obstacles_map + self.goal_grid + self.obstacle_grid
to_save[tuple(self.player_pos)] = ROBOT
plt.imsave(str((impath / image_name).resolve()), to_save)
self.png_number += 1
| [
"numpy.copy",
"numpy.eye",
"numpy.abs",
"pathlib.Path",
"numpy.where",
"numpy.max",
"numpy.array",
"numpy.pad",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.pause",
"numpy.loadtxt",
"os.path.abspath",
"numpy.round"
] | [((897, 942), 'numpy.loadtxt', 'np.loadtxt', (["(self.sequence_path / 'obsmat.txt')"], {}), "(self.sequence_path / 'obsmat.txt')\n", (907, 942), True, 'import numpy as np\n'), ((1024, 1068), 'numpy.loadtxt', 'np.loadtxt', (["(self.sequence_path / 'shift.txt')"], {}), "(self.sequence_path / 'shift.txt')\n", (1034, 1068), True, 'import numpy as np\n'), ((1097, 1141), 'numpy.loadtxt', 'np.loadtxt', (["(self.sequence_path / 'scale.txt')"], {}), "(self.sequence_path / 'scale.txt')\n", (1107, 1141), True, 'import numpy as np\n'), ((2012, 2041), 'numpy.min', 'np.min', (['normalized_data[:, 0]'], {}), '(normalized_data[:, 0])\n', (2018, 2041), True, 'import numpy as np\n'), ((2820, 2899), 'numpy.pad', 'np.pad', (['self.obstacle_map', '(pad_amount + 1)'], {'mode': '"""constant"""', 'constant_values': '(1.0)'}), "(self.obstacle_map, pad_amount + 1, mode='constant', constant_values=1.0)\n", (2826, 2899), True, 'import numpy as np\n'), ((3543, 3576), 'numpy.max', 'np.max', (['self.processed_data[:, 0]'], {}), '(self.processed_data[:, 0])\n', (3549, 3576), True, 'import numpy as np\n'), ((4995, 5037), 'numpy.where', 'np.where', (['(self.dataset.obstacle_map == 1.0)'], {}), '(self.dataset.obstacle_map == 1.0)\n', (5003, 5037), True, 'import numpy as np\n'), ((5323, 5334), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (5331, 5334), True, 'import numpy as np\n'), ((5907, 5919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5917, 5919), True, 'import matplotlib.pyplot as plt\n'), ((6271, 6283), 'numpy.eye', 'np.eye', (['(2)', '(2)'], {}), '(2, 2)\n', (6277, 6283), True, 'import numpy as np\n'), ((6622, 6638), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-06)'], {}), '(1e-06)\n', (6631, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6986, 7005), 'numpy.where', 'np.where', (['(grid != 0)'], {}), '(grid != 0)\n', (6994, 7005), True, 'import numpy as np\n'), ((8791, 8802), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (8799, 8802), True, 'import numpy as np\n'), ((8988, 9001), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (8996, 9001), True, 'import numpy as np\n'), ((10701, 10756), 'numpy.zeros', 'np.zeros', (['(obstacles.size + people.size + goals.size + 6)'], {}), '(obstacles.size + people.size + goals.size + 6)\n', (10709, 10756), True, 'import numpy as np\n'), ((11097, 11120), 'numpy.array', 'np.array', (['self.goal_pos'], {}), '(self.goal_pos)\n', (11105, 11120), True, 'import numpy as np\n'), ((13228, 13248), 'numpy.copy', 'np.copy', (['overlaid_on'], {}), '(overlaid_on)\n', (13235, 13248), True, 'import numpy as np\n'), ((13870, 13880), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (13874, 13880), False, 'from pathlib import Path\n'), ((5063, 5087), 'numpy.array', 'np.array', (['obstacle_array'], {}), '(obstacle_array)\n', (5071, 5087), True, 'import numpy as np\n'), ((9295, 9328), 'numpy.abs', 'np.abs', (['(state[-2:] - state[-4:-2])'], {}), '(state[-2:] - state[-4:-2])\n', (9301, 9328), True, 'import numpy as np\n'), ((9365, 9408), 'numpy.abs', 'np.abs', (['(next_state[-2:] - next_state[-4:-2])'], {}), '(next_state[-2:] - next_state[-4:-2])\n', (9371, 9408), True, 'import numpy as np\n'), ((1711, 1740), 'numpy.round', 'np.round', (['scaled_shifted_data'], {}), '(scaled_shifted_data)\n', (1719, 1740), True, 'import numpy as np\n'), ((547, 572), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (562, 572), False, 'import os\n')] |
import random
import h5py
import numpy as np
import torch
import torch.utils.data as udata
import glob
import os
from PIL import Image
import torchvision.transforms as transforms
# import torch.nn.functional as F
def normalize():
return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def norm(data, max_val, min_val):
return (data-min_val)/(max_val-min_val)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class HyperDatasetValid(udata.Dataset):
def __init__(self, mode='valid', opt=None):
if mode != 'valid':
raise Exception("Invalid mode!", mode)
data_path = './Dataset/Valid'
data_names = glob.glob(os.path.join(data_path, '*.mat'))
self.keys = data_names
self.keys.sort()
self.opt = opt
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
mat = h5py.File(self.keys[index], 'r')
hyper = np.float32(np.array(mat['cube']))
# hyper = np.float32(np.array(mat['rad']))
hyper = np.transpose(hyper, [2, 1, 0])
transform_hyper = self.get_transform(cur=1, opt=self.opt)
hyper = transform_hyper(hyper)
rgb = np.float32(np.array(mat['rgb']))
rgb = np.transpose(rgb, [2, 1, 0])
transform_rgb = self.get_transform(cur=2, opt=self.opt)
rgb = transform_rgb(rgb)
mat.close()
return rgb, hyper
def get_transform(self, cur, opt, params=None, method=Image.BICUBIC, normalize=False):
transform_list = []
transform_list += [transforms.ToTensor()]
if normalize:
mt = [0.5] * opt.output_nc if cur == 1 else [0.5] * opt.input_nc
mtp = tuple(mt)
# print("mtp: ", mtp)
transform_list += [transforms.Normalize(mtp, mtp)]
return transforms.Compose(transform_list)
class HyperDatasetTrain(udata.Dataset):
def __init__(self, mode='train', opt=None):
if mode != 'train':
raise Exception("Invalid mode!", mode)
# data_path = './Dataset/Train1'
data_path = './Dataset/Train'
data_names = glob.glob(os.path.join(data_path, '*.mat'))
self.keys = data_names
random.shuffle(self.keys)
# self.keys.sort()
self.opt = opt
def __len__(self):
# print("length")
return len(self.keys)
def __getitem__(self, index):
mat = h5py.File(self.keys[index], 'r')
# hyper = np.float32(np.array(mat['rad']))
hyper = np.float32(np.array(mat['cube'])) # (482, 512, 31) CWH
hyper = np.transpose(hyper, [2, 1, 0])
# print("hyper shape: {}".format(hyper.shape))
# print("hyper max {} hyper min {}".format(hyper.max(), hyper.min()))
# hyper max 1.0 hyper min 0.004312408156692982
transform_hyper = self.get_transform(cur=1, opt=self.opt)
hyper = transform_hyper(hyper)
rgb = np.float32(np.array(mat['rgb'])) # (482, 512, 3) CWH
rgb = np.transpose(rgb, [2, 1, 0])
# print("RGB shape: {}".format(rgb.shape))
# print("rgb max {} rgb min {}".format(rgb.max(), rgb.min()))
# rgb max 227.0 rgb min 1.0
transform_rgb = self.get_transform(cur=2, opt=self.opt)
rgb = transform_rgb(rgb)
mat.close()
# print("RGB shape: {} hyper shape: {}".format(rgb.shape, hyper.shape))
# RGB shape: torch.Size([3, 482, 512]) hyper shape: torch.Size([31, 482, 512])
return rgb, hyper
def get_transform(self, cur, opt, params=None, method=Image.BICUBIC, normalize=False):
transform_list = []
# if 'resize' in opt.resize_or_crop:
# osize = [opt.loadSize, opt.loadSize]
# transform_list.append(transforms.Scale(osize, method))
# elif 'scale_width' in opt.resize_or_crop:
# transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method)))
# if 'crop' in opt.resize_or_crop:
# transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
# if opt.resize_or_crop == 'none':
# base = float(2 ** opt.n_downsample_global)
# if opt.netG == 'local':
# base *= (2 ** opt.n_local_enhancers)
# transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
# if opt.isTrain and not opt.no_flip:
# transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor()]
if normalize:
mt = []
if cur == 1:
mt = [0.5] * opt.output_nc
elif cur == 2:
mt = [0.5] * opt.input_nc
mtp = tuple(mt)
# print("mtp: ", mtp)
transform_list += [transforms.Normalize(mtp, mtp)]
return transforms.Compose(transform_list)
| [
"random.shuffle",
"os.path.join",
"h5py.File",
"numpy.array",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"numpy.transpose",
"torchvision.transforms.Compose"
] | [((243, 297), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (263, 297), True, 'import torchvision.transforms as transforms\n'), ((1589, 1621), 'h5py.File', 'h5py.File', (['self.keys[index]', '"""r"""'], {}), "(self.keys[index], 'r')\n", (1598, 1621), False, 'import h5py\n'), ((1739, 1769), 'numpy.transpose', 'np.transpose', (['hyper', '[2, 1, 0]'], {}), '(hyper, [2, 1, 0])\n', (1751, 1769), True, 'import numpy as np\n'), ((1937, 1965), 'numpy.transpose', 'np.transpose', (['rgb', '[2, 1, 0]'], {}), '(rgb, [2, 1, 0])\n', (1949, 1965), True, 'import numpy as np\n'), ((2521, 2555), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (2539, 2555), True, 'import torchvision.transforms as transforms\n'), ((2909, 2934), 'random.shuffle', 'random.shuffle', (['self.keys'], {}), '(self.keys)\n', (2923, 2934), False, 'import random\n'), ((3114, 3146), 'h5py.File', 'h5py.File', (['self.keys[index]', '"""r"""'], {}), "(self.keys[index], 'r')\n", (3123, 3146), False, 'import h5py\n'), ((3287, 3317), 'numpy.transpose', 'np.transpose', (['hyper', '[2, 1, 0]'], {}), '(hyper, [2, 1, 0])\n', (3299, 3317), True, 'import numpy as np\n'), ((3699, 3727), 'numpy.transpose', 'np.transpose', (['rgb', '[2, 1, 0]'], {}), '(rgb, [2, 1, 0])\n', (3711, 3727), True, 'import numpy as np\n'), ((5587, 5621), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (5605, 5621), True, 'import torchvision.transforms as transforms\n'), ((1372, 1404), 'os.path.join', 'os.path.join', (['data_path', '"""*.mat"""'], {}), "(data_path, '*.mat')\n", (1384, 1404), False, 'import os\n'), ((1649, 1670), 'numpy.array', 'np.array', (["mat['cube']"], {}), "(mat['cube'])\n", (1657, 1670), True, 'import numpy as np\n'), ((1901, 1921), 'numpy.array', 'np.array', (["mat['rgb']"], {}), "(mat['rgb'])\n", (1909, 1921), True, 'import numpy as np\n'), ((2257, 2278), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2276, 2278), True, 'import torchvision.transforms as transforms\n'), ((2835, 2867), 'os.path.join', 'os.path.join', (['data_path', '"""*.mat"""'], {}), "(data_path, '*.mat')\n", (2847, 2867), False, 'import os\n'), ((3225, 3246), 'numpy.array', 'np.array', (["mat['cube']"], {}), "(mat['cube'])\n", (3233, 3246), True, 'import numpy as np\n'), ((3637, 3657), 'numpy.array', 'np.array', (["mat['rgb']"], {}), "(mat['rgb'])\n", (3645, 3657), True, 'import numpy as np\n'), ((5243, 5264), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5262, 5264), True, 'import torchvision.transforms as transforms\n'), ((2473, 2503), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mtp', 'mtp'], {}), '(mtp, mtp)\n', (2493, 2503), True, 'import torchvision.transforms as transforms\n'), ((5539, 5569), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['mtp', 'mtp'], {}), '(mtp, mtp)\n', (5559, 5569), True, 'import torchvision.transforms as transforms\n')] |
import copy
import numpy as np
from math import sqrt, pi, e
from bisection import bisection
class LegendrePolynomial:
def __init__(self, n):
self.degree = n
self.coef = [1]
while n and len(self.coef) < n + 1:
self.coef.append(0)
def get(self, x):
res = 0
for i in range(self.degree + 1):
res += self.coef[i] * x ** (self.degree - i)
return res
def __imul__(self, other):
for i in range(len(self.coef)):
self.coef[i] *= other
return self
def __isub__(self, other):
i = other.degree
j = self.degree
while i != -1:
self.coef[j] -= other.coef[i]
i -= 1
j -= 1
return self
def promote(self):
self.degree += 1
self.coef.append(0)
return self
def form_legendre(n):
if n == 0:
return LegendrePolynomial(0)
if n == 1:
return LegendrePolynomial(1)
zero = LegendrePolynomial(0)
one = LegendrePolynomial(1)
for i in range(1, n):
m = i + 1
buf_one = copy.deepcopy(one)
one.promote()
one *= ((2 * m - 1) / m)
zero *= ((m - 1) / m)
one -= zero
zero = buf_one
return one
def roots_legendre(n):
roots = []
roots_inter = []
h = 2 / n
a = -1
b = a + h
legendre = form_legendre(n)
while len(roots_inter) != n:
roots_inter = []
while b <= 1:
if legendre.get(a) * legendre.get(b) < 0:
roots_inter.append([a, b])
a = b
b += h
h /= 2
a = -1
b = a + h
for i in roots_inter:
roots.append(bisection(i[0], i[1], 0.000001, legendre.get))
return roots
def quadrature(k):
if k % 2:
return 0
else:
return 2 / (k + 1)
def integrate(a, b, n, f, tau, m, h_y):
t = roots_legendre(n)
A = np.zeros((n, n))
B = np.zeros((n, 1))
for k in range(n):
for i in range(n):
A[k, i] = t[i] ** k
B[k] = quadrature(k)
D = np.linalg.inv(A)
C = D * B
Ai = np.array(C.ravel())
res = 0
for i in range(n):
print(f((b + a) / 2 + (b - a) / 2 * t[i], tau, m, h_y))
res += Ai[i] * f((b + a) / 2 + (b - a) / 2 * t[i], tau, m, h_y)
res *= (b - a) / 2
return res
def nparray_to_list(a):
a = list(a)
for i in range(len(a)):
a[i] = list(a[i])
return a
| [
"numpy.zeros",
"numpy.linalg.inv",
"bisection.bisection",
"copy.deepcopy"
] | [((1949, 1965), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1957, 1965), True, 'import numpy as np\n'), ((1974, 1990), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (1982, 1990), True, 'import numpy as np\n'), ((2112, 2128), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (2125, 2128), True, 'import numpy as np\n'), ((1110, 1128), 'copy.deepcopy', 'copy.deepcopy', (['one'], {}), '(one)\n', (1123, 1128), False, 'import copy\n'), ((1720, 1762), 'bisection.bisection', 'bisection', (['i[0]', 'i[1]', '(1e-06)', 'legendre.get'], {}), '(i[0], i[1], 1e-06, legendre.get)\n', (1729, 1762), False, 'from bisection import bisection\n')] |
# -*- coding: utf-8 -*-
"""
This is a script for satellite image classification
Last updated on Aug 6 2019
@author: <NAME>
@Email: <EMAIL>
@functions
1. generate samples from satellite images
2. grid search SVM/random forest parameters
3. object-based post-classification refinement
superpixel-based regularization for classification maps
4. confusion matrix: OA, kappa, PA, UA, AA
5. save maps as images
@sample codes
c = rscls.rscls(image,ground_truth,cls=number_of_classes)
c.padding(patch)
c.normalize(style='-11') # optional
x_train,y_train = c.train_sample(num_per_cls)
x_train,y_train = rscls.make_sample(x_train,y_train)
x_test,y_test = c.test_sample()
# for superpixel refinement
c.locate_obj(seg)
pcmap = rscls.obpc(c.seg,predicted,c.obj)
@Notes
Ground truth file should be uint8 format begin with 1
Background = 0
"""
import numpy as np
import copy
import scipy.stats as stats
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
class rscls:
def __init__(self, im, gt, cls): # 图片,ground truth, 类别数
if cls == 0:
print('num of class not specified !!')
self.im = copy.deepcopy(im) # 深拷贝
if gt.max() != cls:
self.gt = copy.deepcopy(gt - 1)
else:
self.gt = copy.deepcopy(gt - 1)
self.gt_b = copy.deepcopy(gt)
self.cls = cls
self.patch = 1
self.imx, self.imy, self.imz = self.im.shape
self.record = []
self.sample = {}
def padding(self, patch):
self.patch = patch
pad = self.patch // 2
r1 = np.repeat([self.im[0, :, :]], pad, axis=0) # 将元素im重复pad次
r2 = np.repeat([self.im[-1, :, :]], pad, axis=0)
self.im = np.concatenate((r1, self.im, r2)) # 图像填充
r1 = np.reshape(self.im[:, 0, :], [self.imx + 2 * pad, 1, self.imz]) # 将 image reshape
r2 = np.reshape(self.im[:, -1, :], [self.imx + 2 * pad, 1, self.imz])
r1 = np.repeat(r1, pad, axis=1)
r2 = np.repeat(r2, pad, axis=1)
self.im = np.concatenate((r1, self.im, r2), axis=1)
self.im = self.im.astype('float32')
def normalize(self, style='01'):
im = self.im
for i in range(im.shape[-1]):
im[:, :, i] = (im[:, :, i] - im[:, :, i].min()) / (im[:, :, i].max() - im[:, :, i].min())
if style == '-11':
im = im * 2 - 1
def locate_sample(self):
sam = []
for i in range(self.cls):
_xy = np.array(np.where(self.gt == i)).T
_sam = np.concatenate([_xy, i * np.ones([_xy.shape[0], 1])], axis=-1)
try:
sam = np.concatenate([sam, _sam], axis=0)
except:
sam = _sam
self.sample = sam.astype(int)
def get_patch(self, xy):
d = self.patch // 2
x = xy[0]
y = xy[1]
try:
self.im[x][y]
except IndexError:
return []
x += d
y += d
sam = self.im[(x - d):(x + d + 1), (y - d):(y + d + 1)]
return np.array(sam)
def train_sample(self, pn):
x_train, y_train = [], []
self.locate_sample()
_samp = self.sample
for _cls in range(self.cls):
_xy = _samp[_samp[:, 2] == _cls]
np.random.shuffle(_xy)
_xy = _xy[:pn, :]
for xy in _xy:
self.gt[xy[0], xy[1]] = 255 # !!
#
x_train.append(self.get_patch(xy[:-1]))
y_train.append(xy[-1])
# print(_xy)
x_train, y_train = np.array(x_train), np.array(y_train)
idx = np.random.permutation(x_train.shape[0])
x_train = x_train[idx]
y_train = y_train[idx]
return x_train, y_train.astype(int)
def test_sample(self):
x_test, y_test = [], []
self.locate_sample()
_samp = self.sample
for _cls in range(self.cls):
_xy = _samp[_samp[:, 2] == _cls]
np.random.shuffle(_xy)
for xy in _xy:
x_test.append(self.get_patch(xy[:-1]))
y_test.append(xy[-1])
return np.array(x_test), np.array(y_test)
def all_sample(self):
imx, imy = self.gt.shape
sample = []
for i in range(imx):
for j in range(imy):
sample.append(self.get_patch(np.array([i, j])))
return np.array(sample)
def all_sample_light(self, clip=0, bs=10):
imx, imy = self.gt.shape
imz = self.im.shape[-1]
patch = self.patch
# fp = np.memmap('allsample' + str(clip) + '.h5', dtype='float32', mode='w+', shape=(imgx*self.IMGY,5,5,bs))
fp = np.zeros([imx * imy, patch, patch, imz])
countnum = 0
for i in range(imx * clip, imx * (clip + 1)):
for j in range(imy):
xy = np.array([i, j])
fp[countnum, :, :, :] = self.get_patch(xy)
countnum += 1
return fp
def all_sample_row_hd(self, sub=0):
imx, imy = self.gt.shape
imz = self.im.shape[-1]
patch = self.patch
# fp = np.memmap('allsample' + str(clip) + '.h5', dtype='float32', mode='w+', shape=(imgx*self.IMGY,5,5,bs))
fp = np.zeros([imx * imy, patch, patch, imz])
countnum = 0
for i in range(sub):
for j in range(imy):
xy = np.array([i, j])
fp[countnum, :, :, :] = self.get_patch(xy)
countnum += 1
return fp
def all_sample_row(self, sub=0):
imx, imy = self.gt.shape
fp = []
for j in range(imy):
xy = np.array([sub, j])
fp.append(self.get_patch(xy))
return np.array(fp)
def all_sample_heavy(self, name, clip=0, bs=10):
imx, imy = self.gt.shape
imz = self.im.shape[-1]
patch = self.patch
try:
fp = np.memmap(name, dtype='float32', mode='w+', shape=(imx * imy, patch, patch, imz))
except:
fp = np.memmap(name, dtype='float32', mode='r', shape=(imx * imy, patch, patch, imz))
# fp = np.zeros([imx*imy,patch,patch,imz])
countnum = 0
for i in range(imx * clip, imx * (clip + 1)):
for j in range(imy):
xy = np.array([i, j])
fp[countnum, :, :, :] = self.get_patch(xy)
countnum += 1
return fp
def read_all_sample(self, name, clip=0, bs=10):
imx, imy = self.gt.shape
imz = self.im.shape[-1]
patch = self.patch
fp = np.memmap(name, dtype='float32', mode='r', shape=(imx * imy, patch, patch, imz))
return fp
def locate_obj(self, seg):
obj = {}
for i in range(seg.min(), seg.max() + 1):
obj[str(i)] = np.where(seg == i) # 若满足条件则为1,否则相应位置为0
self.obj = obj
self.seg = seg
def obpc(seg, cmap, obj):
pcmap = copy.deepcopy(cmap)
for (k, v) in obj.items():
#print('v', v)
#print('cmap[v]', cmap[v])
tmplabel = stats.mode(cmap[v])[0]
pcmap[v] = tmplabel
return pcmap
def cfm(pre, ref, ncl=9):
if ref.min() != 0:
print('warning: label should begin with 0 !!')
return
nsize = ref.shape[0]
cf = np.zeros((ncl, ncl))
for i in range(nsize):
cf[pre[i], ref[i]] += 1
tmp1 = 0
for j in range(ncl):
tmp1 = tmp1 + (cf[j, :].sum() / nsize) * (cf[:, j].sum() / nsize)
cfm = np.zeros((ncl + 2, ncl + 1))
cfm[:-2, :-1] = cf
oa = 0
for i in range(ncl):
if cf[i, :].sum():
cfm[i, ncl] = cf[i, i] / cf[i, :].sum()
if cf[:, i].sum():
cfm[ncl, i] = cf[i, i] / cf[:, i].sum()
oa += cf[i, i]
cfm[-1, 0] = oa / nsize
cfm[-1, 1] = (cfm[-1, 0] - tmp1) / (1 - tmp1)
cfm[-1, 2] = cfm[ncl, :-1].mean()
print('oa: ', format(cfm[-1, 0], '.5'), ' kappa: ', format(cfm[-1, 1], '.5'),
' mean: ', format(cfm[-1, 2], '.5'))
return cfm
def gtcfm(pre, gt, ncl):
if gt.max() == 255:
print('warning: max 255 !!')
cf = np.zeros([ncl, ncl])
for i in range(gt.shape[0]):
for j in range(gt.shape[1]):
if gt[i, j]:
cf[pre[i, j] - 1, gt[i, j] - 1] += 1
tmp1 = 0
nsize = np.sum(gt != 0)
for j in range(ncl):
tmp1 = tmp1 + (cf[j, :].sum() / nsize) * (cf[:, j].sum() / nsize)
cfm = np.zeros((ncl + 2, ncl + 1))
cfm[:-2, :-1] = cf
oa = 0
for i in range(ncl):
if cf[i, :].sum():
cfm[i, ncl] = cf[i, i] / cf[i, :].sum()
if cf[:, i].sum():
cfm[ncl, i] = cf[i, i] / cf[:, i].sum()
oa += cf[i, i]
cfm[-1, 0] = oa / nsize
cfm[-1, 1] = (cfm[-1, 0] - tmp1) / (1 - tmp1)
cfm[-1, 2] = cfm[ncl, :-1].mean()
#print(cfm)
print(cfm[ncl, :-1])
print('oa: ', format(cfm[-1, 0], '.5'), ' kappa: ', format(cfm[-1, 1], '.5'),
' mean: ', format(cfm[-1, 2], '.5'))
return cfm
def svm(trainx, trainy):
cost = []
gamma = []
for i in range(-5, 16, 2):
cost.append(np.power(2.0, i))
for i in range(-15, 4, 2):
gamma.append(np.power(2.0, i))
parameters = {'C': cost, 'gamma': gamma}
svm = SVC(verbose=0, kernel='rbf')
clf = GridSearchCV(svm, parameters, cv=3)
p = clf.fit(trainx, trainy)
print(clf.best_params_)
bestc = clf.best_params_['C']
bestg = clf.best_params_['gamma']
tmpc = [-1.75, -1.5, -1.25, -1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cost = []
gamma = []
for i in tmpc:
cost.append(bestc * np.power(2.0, i))
gamma.append(bestg * np.power(2.0, i))
parameters = {'C': cost, 'gamma': gamma}
svm = SVC(verbose=0, kernel='rbf')
clf = GridSearchCV(svm, parameters, cv=3)
p = clf.fit(trainx, trainy)
print(clf.best_params_)
p2 = clf.best_estimator_
return p2
def svm_rbf(trainx, trainy):
cost = []
gamma = []
for i in range(-3, 10, 2):
cost.append(np.power(2.0, i))
for i in range(-5, 4, 2):
gamma.append(np.power(2.0, i))
parameters = {'C': cost, 'gamma': gamma}
svm = SVC(verbose=0, kernel='rbf')
clf = GridSearchCV(svm, parameters, cv=3)
clf.fit(trainx, trainy)
# print(clf.best_params_)
bestc = clf.best_params_['C']
bestg = clf.best_params_['gamma']
tmpc = [-1.75, -1.5, -1.25, -1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cost = []
gamma = []
for i in tmpc:
cost.append(bestc * np.power(2.0, i))
gamma.append(bestg * np.power(2.0, i))
parameters = {'C': cost, 'gamma': gamma}
svm = SVC(verbose=0, kernel='rbf')
clf = GridSearchCV(svm, parameters, cv=3)
clf.fit(trainx, trainy)
# print(clf.best_params_)
p = clf.best_estimator_
return p
def rf(trainx, trainy, sim=1, nj=1):
nest = []
nfea = []
for i in range(20, 201, 20):
nest.append(i)
if sim:
for i in range(1, int(trainx.shape[-1])):
nfea.append(i)
parameters = {'n_estimators': nest, 'max_features': nfea}
else:
parameters = {'n_estimators': nest}
rf = RandomForestClassifier(n_jobs=nj, verbose=0, oob_score=False)
clf = GridSearchCV(rf, parameters, cv=3)
p = clf.fit(trainx, trainy)
p2 = clf.best_estimator_
return p2
def GNB(trainx, trainy):
clf = GaussianNB()
p = clf.fit(trainx, trainy)
return p
def svm_linear(trainx, trainy):
cost = []
for i in range(-3, 10, 2):
cost.append(np.power(2.0, i))
parameters = {'C': cost}
svm = SVC(verbose=0, kernel='linear')
clf = GridSearchCV(svm, parameters, cv=3)
clf.fit(trainx, trainy)
# print(clf.best_params_)
bestc = clf.best_params_['C']
tmpc = [-1.75, -1.5, -1.25, -1, -0.75, -0.5, -0.25, 0.0,
0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cost = []
for i in tmpc:
cost.append(bestc * np.power(2.0, i))
parameters = {'C': cost}
svm = SVC(verbose=0, kernel='linear')
clf = GridSearchCV(svm, parameters, cv=3)
clf.fit(trainx, trainy)
p = clf.best_estimator_
return p
def make_sample(sample, label):
a = np.flip(sample, 1)
b = np.flip(sample, 2)
c = np.flip(b, 1)
newsample = np.concatenate((a, b, c, sample), axis=0)
newlabel = np.concatenate((label, label, label, label), axis=0)
return newsample, newlabel
def save_cmap(img, cmap, fname):
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap)
plt.savefig(fname, dpi=height)
plt.close()
| [
"sklearn.model_selection.GridSearchCV",
"numpy.array",
"copy.deepcopy",
"numpy.flip",
"numpy.repeat",
"numpy.reshape",
"numpy.where",
"numpy.memmap",
"matplotlib.pyplot.close",
"numpy.concatenate",
"numpy.random.permutation",
"matplotlib.pyplot.savefig",
"numpy.ones",
"sklearn.ensemble.Ran... | [((7045, 7064), 'copy.deepcopy', 'copy.deepcopy', (['cmap'], {}), '(cmap)\n', (7058, 7064), False, 'import copy\n'), ((7397, 7417), 'numpy.zeros', 'np.zeros', (['(ncl, ncl)'], {}), '((ncl, ncl))\n', (7405, 7417), True, 'import numpy as np\n'), ((7600, 7628), 'numpy.zeros', 'np.zeros', (['(ncl + 2, ncl + 1)'], {}), '((ncl + 2, ncl + 1))\n', (7608, 7628), True, 'import numpy as np\n'), ((8226, 8246), 'numpy.zeros', 'np.zeros', (['[ncl, ncl]'], {}), '([ncl, ncl])\n', (8234, 8246), True, 'import numpy as np\n'), ((8420, 8435), 'numpy.sum', 'np.sum', (['(gt != 0)'], {}), '(gt != 0)\n', (8426, 8435), True, 'import numpy as np\n'), ((8545, 8573), 'numpy.zeros', 'np.zeros', (['(ncl + 2, ncl + 1)'], {}), '((ncl + 2, ncl + 1))\n', (8553, 8573), True, 'import numpy as np\n'), ((9366, 9394), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""rbf"""'}), "(verbose=0, kernel='rbf')\n", (9369, 9394), False, 'from sklearn.svm import SVC\n'), ((9405, 9440), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (9417, 9440), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9882, 9910), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""rbf"""'}), "(verbose=0, kernel='rbf')\n", (9885, 9910), False, 'from sklearn.svm import SVC\n'), ((9921, 9956), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (9933, 9956), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10314, 10342), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""rbf"""'}), "(verbose=0, kernel='rbf')\n", (10317, 10342), False, 'from sklearn.svm import SVC\n'), ((10353, 10388), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (10365, 10388), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10828, 10856), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""rbf"""'}), "(verbose=0, kernel='rbf')\n", (10831, 10856), False, 'from sklearn.svm import SVC\n'), ((10867, 10902), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (10879, 10902), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11343, 11404), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': 'nj', 'verbose': '(0)', 'oob_score': '(False)'}), '(n_jobs=nj, verbose=0, oob_score=False)\n', (11365, 11404), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((11415, 11449), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['rf', 'parameters'], {'cv': '(3)'}), '(rf, parameters, cv=3)\n', (11427, 11449), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11562, 11574), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (11572, 11574), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((11777, 11808), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""linear"""'}), "(verbose=0, kernel='linear')\n", (11780, 11808), False, 'from sklearn.svm import SVC\n'), ((11819, 11854), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (11831, 11854), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12178, 12209), 'sklearn.svm.SVC', 'SVC', ([], {'verbose': '(0)', 'kernel': '"""linear"""'}), "(verbose=0, kernel='linear')\n", (12181, 12209), False, 'from sklearn.svm import SVC\n'), ((12220, 12255), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svm', 'parameters'], {'cv': '(3)'}), '(svm, parameters, cv=3)\n', (12232, 12255), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12367, 12385), 'numpy.flip', 'np.flip', (['sample', '(1)'], {}), '(sample, 1)\n', (12374, 12385), True, 'import numpy as np\n'), ((12394, 12412), 'numpy.flip', 'np.flip', (['sample', '(2)'], {}), '(sample, 2)\n', (12401, 12412), True, 'import numpy as np\n'), ((12421, 12434), 'numpy.flip', 'np.flip', (['b', '(1)'], {}), '(b, 1)\n', (12428, 12434), True, 'import numpy as np\n'), ((12451, 12492), 'numpy.concatenate', 'np.concatenate', (['(a, b, c, sample)'], {'axis': '(0)'}), '((a, b, c, sample), axis=0)\n', (12465, 12492), True, 'import numpy as np\n'), ((12508, 12560), 'numpy.concatenate', 'np.concatenate', (['(label, label, label, label)'], {'axis': '(0)'}), '((label, label, label, label), axis=0)\n', (12522, 12560), True, 'import numpy as np\n'), ((12639, 12652), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (12647, 12652), True, 'import numpy as np\n'), ((12721, 12733), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12731, 12733), True, 'import matplotlib.pyplot as plt\n'), ((12801, 12836), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (12809, 12836), True, 'import matplotlib.pyplot as plt\n'), ((12911, 12941), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'dpi': 'height'}), '(fname, dpi=height)\n', (12922, 12941), True, 'import matplotlib.pyplot as plt\n'), ((12946, 12957), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12955, 12957), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'copy.deepcopy', 'copy.deepcopy', (['im'], {}), '(im)\n', (1291, 1295), False, 'import copy\n'), ((1453, 1470), 'copy.deepcopy', 'copy.deepcopy', (['gt'], {}), '(gt)\n', (1466, 1470), False, 'import copy\n'), ((1721, 1763), 'numpy.repeat', 'np.repeat', (['[self.im[0, :, :]]', 'pad'], {'axis': '(0)'}), '([self.im[0, :, :]], pad, axis=0)\n', (1730, 1763), True, 'import numpy as np\n'), ((1792, 1835), 'numpy.repeat', 'np.repeat', (['[self.im[-1, :, :]]', 'pad'], {'axis': '(0)'}), '([self.im[-1, :, :]], pad, axis=0)\n', (1801, 1835), True, 'import numpy as np\n'), ((1854, 1887), 'numpy.concatenate', 'np.concatenate', (['(r1, self.im, r2)'], {}), '((r1, self.im, r2))\n', (1868, 1887), True, 'import numpy as np\n'), ((1909, 1972), 'numpy.reshape', 'np.reshape', (['self.im[:, 0, :]', '[self.imx + 2 * pad, 1, self.imz]'], {}), '(self.im[:, 0, :], [self.imx + 2 * pad, 1, self.imz])\n', (1919, 1972), True, 'import numpy as np\n'), ((2005, 2069), 'numpy.reshape', 'np.reshape', (['self.im[:, -1, :]', '[self.imx + 2 * pad, 1, self.imz]'], {}), '(self.im[:, -1, :], [self.imx + 2 * pad, 1, self.imz])\n', (2015, 2069), True, 'import numpy as np\n'), ((2083, 2109), 'numpy.repeat', 'np.repeat', (['r1', 'pad'], {'axis': '(1)'}), '(r1, pad, axis=1)\n', (2092, 2109), True, 'import numpy as np\n'), ((2123, 2149), 'numpy.repeat', 'np.repeat', (['r2', 'pad'], {'axis': '(1)'}), '(r2, pad, axis=1)\n', (2132, 2149), True, 'import numpy as np\n'), ((2168, 2209), 'numpy.concatenate', 'np.concatenate', (['(r1, self.im, r2)'], {'axis': '(1)'}), '((r1, self.im, r2), axis=1)\n', (2182, 2209), True, 'import numpy as np\n'), ((3175, 3188), 'numpy.array', 'np.array', (['sam'], {}), '(sam)\n', (3183, 3188), True, 'import numpy as np\n'), ((3753, 3792), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (3774, 3792), True, 'import numpy as np\n'), ((4524, 4540), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (4532, 4540), True, 'import numpy as np\n'), ((4811, 4851), 'numpy.zeros', 'np.zeros', (['[imx * imy, patch, patch, imz]'], {}), '([imx * imy, patch, patch, imz])\n', (4819, 4851), True, 'import numpy as np\n'), ((5368, 5408), 'numpy.zeros', 'np.zeros', (['[imx * imy, patch, patch, imz]'], {}), '([imx * imy, patch, patch, imz])\n', (5376, 5408), True, 'import numpy as np\n'), ((5846, 5858), 'numpy.array', 'np.array', (['fp'], {}), '(fp)\n', (5854, 5858), True, 'import numpy as np\n'), ((6693, 6778), 'numpy.memmap', 'np.memmap', (['name'], {'dtype': '"""float32"""', 'mode': '"""r"""', 'shape': '(imx * imy, patch, patch, imz)'}), "(name, dtype='float32', mode='r', shape=(imx * imy, patch, patch, imz)\n )\n", (6702, 6778), True, 'import numpy as np\n'), ((1353, 1374), 'copy.deepcopy', 'copy.deepcopy', (['(gt - 1)'], {}), '(gt - 1)\n', (1366, 1374), False, 'import copy\n'), ((1411, 1432), 'copy.deepcopy', 'copy.deepcopy', (['(gt - 1)'], {}), '(gt - 1)\n', (1424, 1432), False, 'import copy\n'), ((3407, 3429), 'numpy.random.shuffle', 'np.random.shuffle', (['_xy'], {}), '(_xy)\n', (3424, 3429), True, 'import numpy as np\n'), ((3702, 3719), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (3710, 3719), True, 'import numpy as np\n'), ((3721, 3738), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (3729, 3738), True, 'import numpy as np\n'), ((4110, 4132), 'numpy.random.shuffle', 'np.random.shuffle', (['_xy'], {}), '(_xy)\n', (4127, 4132), True, 'import numpy as np\n'), ((4268, 4284), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (4276, 4284), True, 'import numpy as np\n'), ((4286, 4302), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4294, 4302), True, 'import numpy as np\n'), ((5770, 5788), 'numpy.array', 'np.array', (['[sub, j]'], {}), '([sub, j])\n', (5778, 5788), True, 'import numpy as np\n'), ((6035, 6120), 'numpy.memmap', 'np.memmap', (['name'], {'dtype': '"""float32"""', 'mode': '"""w+"""', 'shape': '(imx * imy, patch, patch, imz)'}), "(name, dtype='float32', mode='w+', shape=(imx * imy, patch, patch,\n imz))\n", (6044, 6120), True, 'import numpy as np\n'), ((6917, 6935), 'numpy.where', 'np.where', (['(seg == i)'], {}), '(seg == i)\n', (6925, 6935), True, 'import numpy as np\n'), ((7173, 7192), 'scipy.stats.mode', 'stats.mode', (['cmap[v]'], {}), '(cmap[v])\n', (7183, 7192), True, 'import scipy.stats as stats\n'), ((9222, 9238), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (9230, 9238), True, 'import numpy as np\n'), ((9292, 9308), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (9300, 9308), True, 'import numpy as np\n'), ((10171, 10187), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (10179, 10187), True, 'import numpy as np\n'), ((10240, 10256), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (10248, 10256), True, 'import numpy as np\n'), ((11719, 11735), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (11727, 11735), True, 'import numpy as np\n'), ((2763, 2798), 'numpy.concatenate', 'np.concatenate', (['[sam, _sam]'], {'axis': '(0)'}), '([sam, _sam], axis=0)\n', (2777, 2798), True, 'import numpy as np\n'), ((4981, 4997), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4989, 4997), True, 'import numpy as np\n'), ((5513, 5529), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (5521, 5529), True, 'import numpy as np\n'), ((6150, 6235), 'numpy.memmap', 'np.memmap', (['name'], {'dtype': '"""float32"""', 'mode': '"""r"""', 'shape': '(imx * imy, patch, patch, imz)'}), "(name, dtype='float32', mode='r', shape=(imx * imy, patch, patch, imz)\n )\n", (6159, 6235), True, 'import numpy as np\n'), ((6411, 6427), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (6419, 6427), True, 'import numpy as np\n'), ((9762, 9778), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (9770, 9778), True, 'import numpy as np\n'), ((9809, 9825), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (9817, 9825), True, 'import numpy as np\n'), ((10708, 10724), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (10716, 10724), True, 'import numpy as np\n'), ((10755, 10771), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (10763, 10771), True, 'import numpy as np\n'), ((12121, 12137), 'numpy.power', 'np.power', (['(2.0)', 'i'], {}), '(2.0, i)\n', (12129, 12137), True, 'import numpy as np\n'), ((2616, 2638), 'numpy.where', 'np.where', (['(self.gt == i)'], {}), '(self.gt == i)\n', (2624, 2638), True, 'import numpy as np\n'), ((2686, 2712), 'numpy.ones', 'np.ones', (['[_xy.shape[0], 1]'], {}), '([_xy.shape[0], 1])\n', (2693, 2712), True, 'import numpy as np\n'), ((4490, 4506), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (4498, 4506), True, 'import numpy as np\n')] |
"""ImpulseDict class for manipulating impulse responses."""
import numpy as np
from .result_dict import ResultDict
from ..utilities.ordered_set import OrderedSet
from ..utilities.bijection import Bijection
from .steady_state_dict import SteadyStateDict
class ImpulseDict(ResultDict):
def __init__(self, data, internals=None, T=None):
if isinstance(data, ImpulseDict):
if internals is not None or T is not None:
raise ValueError('Supplying ImpulseDict and also internal or T to constructor not allowed')
super().__init__(data)
self.T = data.T
else:
if not isinstance(data, dict):
raise ValueError('ImpulseDicts are initialized with a `dict` of top-level impulse responses.')
super().__init__(data, internals)
self.T = (T if T is not None else self.infer_length())
def __getitem__(self, k):
return super().__getitem__(k, T=self.T)
def __add__(self, other):
return self.binary_operation(other, lambda a, b: a + b)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.binary_operation(other, lambda a, b: a - b)
def __rsub__(self, other):
return self.binary_operation(other, lambda a, b: b - a)
def __mul__(self, other):
return self.binary_operation(other, lambda a, b: a * b)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.binary_operation(other, lambda a, b: a / b)
def __rtruediv__(self, other):
return self.binary_operation(other, lambda a, b: b / a)
def __neg__(self):
return self.unary_operation(lambda a: -a)
def __pos__(self):
return self
def __abs__(self):
return self.unary_operation(lambda a: abs(a))
def binary_operation(self, other, op):
if isinstance(other, (SteadyStateDict, ImpulseDict)):
toplevel = {k: op(v, other[k]) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
other_internals = other.internals[b]
internals[b] = {k: op(v, other_internals[k]) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
elif isinstance(other, (float, int)):
toplevel = {k: op(v, other) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
internals[b] = {k: op(v, other) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
else:
return NotImplementedError(f'Can only perform operations with ImpulseDicts and other ImpulseDicts, SteadyStateDicts, or numbers, not {type(other).__name__}')
def unary_operation(self, op):
toplevel = {k: op(v) for k, v in self.toplevel.items()}
internals = {}
for b in self.internals:
internals[b] = {k: op(v) for k, v in self.internals[b].items()}
return ImpulseDict(toplevel, internals, self.T)
def pack(self):
T = self.T
bigv = np.empty(T*len(self.toplevel))
for i, v in enumerate(self.toplevel.values()):
bigv[i*T:(i+1)*T] = v
return bigv
@staticmethod
def unpack(bigv, outputs, T):
impulse = {}
for i, o in enumerate(outputs):
impulse[o] = bigv[i*T:(i+1)*T]
return ImpulseDict(impulse, T=T)
def infer_length(self):
lengths = [len(v) for v in self.toplevel.values()]
length = max(lengths)
if length != min(lengths):
raise ValueError(f'Building ImpulseDict with inconsistent lengths {max(lengths)} and {min(lengths)}')
return length
def get(self, k):
"""Like __getitem__ but with default of zero impulse"""
if isinstance(k, str):
return self.toplevel.get(k, np.zeros(self.T))
elif isinstance(k, tuple):
raise TypeError(f'Key {k} to {type(self).__name__} cannot be tuple')
else:
try:
return type(self)({ki: self.toplevel.get(ki, np.zeros(self.T)) for ki in k}, T=self.T)
except TypeError:
raise TypeError(f'Key {k} to {type(self).__name__} needs to be a string or an iterable (list, set, etc) of strings')
| [
"numpy.zeros"
] | [((3996, 4012), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (4004, 4012), True, 'import numpy as np\n'), ((4222, 4238), 'numpy.zeros', 'np.zeros', (['self.T'], {}), '(self.T)\n', (4230, 4238), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.